async_openai/types/assistant.rs
1use std::collections::HashMap;
2
3use derive_builder::Builder;
4use serde::{Deserialize, Serialize};
5
6use crate::error::OpenAIError;
7
8use super::{FunctionName, FunctionObject, ResponseFormat};
9
10#[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)]
11pub struct AssistantToolCodeInterpreterResources {
12 ///A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool.
13 pub file_ids: Vec<String>, // maxItems: 20
14}
15
16#[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)]
17pub struct AssistantToolFileSearchResources {
18 /// The ID of the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.
19 pub vector_store_ids: Vec<String>,
20}
21
22#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
23pub struct AssistantToolResources {
24 #[serde(skip_serializing_if = "Option::is_none")]
25 pub code_interpreter: Option<AssistantToolCodeInterpreterResources>,
26 #[serde(skip_serializing_if = "Option::is_none")]
27 pub file_search: Option<AssistantToolFileSearchResources>,
28}
29
30#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
31pub struct CreateAssistantToolResources {
32 #[serde(skip_serializing_if = "Option::is_none")]
33 pub code_interpreter: Option<AssistantToolCodeInterpreterResources>,
34 #[serde(skip_serializing_if = "Option::is_none")]
35 pub file_search: Option<CreateAssistantToolFileSearchResources>,
36}
37
38#[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)]
39pub struct CreateAssistantToolFileSearchResources {
40 /// The [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.
41 pub vector_store_ids: Option<Vec<String>>,
42 /// A helper to create a [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) with file_ids and attach it to this assistant. There can be a maximum of 1 vector store attached to the assistant.
43 pub vector_stores: Option<Vec<AssistantVectorStore>>,
44}
45
46#[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)]
47pub struct AssistantVectorStore {
48 /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store.
49 pub file_ids: Vec<String>,
50
51 /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy.
52 pub chunking_strategy: Option<AssistantVectorStoreChunkingStrategy>,
53
54 /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.
55 pub metadata: Option<HashMap<String, String>>,
56}
57
58#[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)]
59#[serde(tag = "type")]
60pub enum AssistantVectorStoreChunkingStrategy {
61 /// The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`.
62 #[default]
63 #[serde(rename = "auto")]
64 Auto,
65 #[serde(rename = "static")]
66 Static { r#static: StaticChunkingStrategy },
67}
68
69/// Static Chunking Strategy
70#[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)]
71pub struct StaticChunkingStrategy {
72 /// The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`.
73 pub max_chunk_size_tokens: u16,
74 /// The number of tokens that overlap between chunks. The default value is `400`.
75 ///
76 /// Note that the overlap must not exceed half of `max_chunk_size_tokens`.
77 pub chunk_overlap_tokens: u16,
78}
79
80/// Represents an `assistant` that can call the model and use tools.
81#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
82pub struct AssistantObject {
83 /// The identifier, which can be referenced in API endpoints.
84 pub id: String,
85 /// The object type, which is always `assistant`.
86 pub object: String,
87 /// The Unix timestamp (in seconds) for when the assistant was created.
88 pub created_at: i32,
89 /// The name of the assistant. The maximum length is 256 characters.
90 pub name: Option<String>,
91 /// The description of the assistant. The maximum length is 512 characters.
92 pub description: Option<String>,
93 /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models) for descriptions of them.
94 pub model: String,
95 /// The system instructions that the assistant uses. The maximum length is 256,000 characters.
96 pub instructions: Option<String>,
97 /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
98 #[serde(default)]
99 pub tools: Vec<AssistantTools>,
100 /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.
101 pub tool_resources: Option<AssistantToolResources>,
102 /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.
103 pub metadata: Option<HashMap<String, String>>,
104 /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
105 pub temperature: Option<f32>,
106 /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
107 /// We generally recommend altering this or temperature but not both.
108 pub top_p: Option<f32>,
109
110 pub response_format: Option<AssistantsApiResponseFormatOption>,
111}
112
113/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
114///
115/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
116///
117/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
118///
119/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.
120#[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)]
121pub enum AssistantsApiResponseFormatOption {
122 #[default]
123 #[serde(rename = "auto")]
124 Auto,
125 #[serde(untagged)]
126 Format(ResponseFormat),
127}
128
129/// Retrieval tool
130#[derive(Clone, Serialize, Debug, Default, Deserialize, PartialEq)]
131pub struct AssistantToolsFileSearch {
132 /// Overrides for the file search tool.
133 #[serde(skip_serializing_if = "Option::is_none")]
134 pub file_search: Option<AssistantToolsFileSearchOverrides>,
135}
136
137#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
138pub struct AssistantToolsFileSearchOverrides {
139 /// The maximum number of results the file search tool should output. The default is 20 for gpt-4* models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive.
140 ///
141 //// Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) for more information.
142 pub max_num_results: Option<u8>,
143 pub ranking_options: Option<FileSearchRankingOptions>,
144}
145
146#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
147pub enum FileSearchRanker {
148 #[serde(rename = "auto")]
149 Auto,
150 #[serde(rename = "default_2024_08_21")]
151 Default2024_08_21,
152}
153
154/// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and a score_threshold of 0.
155///
156/// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) for more information.
157#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
158pub struct FileSearchRankingOptions {
159 /// The ranker to use for the file search. If not specified will use the `auto` ranker.
160 #[serde(skip_serializing_if = "Option::is_none")]
161 pub ranker: Option<FileSearchRanker>,
162
163 /// The score threshold for the file search. All values must be a floating point number between 0 and 1.
164 pub score_threshold: f32,
165}
166
167/// Function tool
168#[derive(Clone, Serialize, Debug, Default, Deserialize, PartialEq)]
169pub struct AssistantToolsFunction {
170 pub function: FunctionObject,
171}
172
173#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
174#[serde(tag = "type")]
175#[serde(rename_all = "snake_case")]
176pub enum AssistantTools {
177 CodeInterpreter,
178 FileSearch(AssistantToolsFileSearch),
179 Function(AssistantToolsFunction),
180}
181
182#[derive(Clone, Serialize, Default, Debug, Deserialize, Builder, PartialEq)]
183#[builder(name = "CreateAssistantRequestArgs")]
184#[builder(pattern = "mutable")]
185#[builder(setter(into, strip_option), default)]
186#[builder(derive(Debug))]
187#[builder(build_fn(error = "OpenAIError"))]
188pub struct CreateAssistantRequest {
189 /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them.
190 pub model: String,
191
192 /// The name of the assistant. The maximum length is 256 characters.
193 #[serde(skip_serializing_if = "Option::is_none")]
194 pub name: Option<String>,
195
196 /// The description of the assistant. The maximum length is 512 characters.
197 #[serde(skip_serializing_if = "Option::is_none")]
198 pub description: Option<String>,
199
200 /// The system instructions that the assistant uses. The maximum length is 256,000 characters.
201 #[serde(skip_serializing_if = "Option::is_none")]
202 pub instructions: Option<String>,
203
204 /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
205 #[serde(skip_serializing_if = "Option::is_none")]
206 pub tools: Option<Vec<AssistantTools>>,
207
208 /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.
209 #[serde(skip_serializing_if = "Option::is_none")]
210 pub tool_resources: Option<CreateAssistantToolResources>,
211
212 /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.
213 #[serde(skip_serializing_if = "Option::is_none")]
214 pub metadata: Option<HashMap<String, String>>,
215
216 /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
217 #[serde(skip_serializing_if = "Option::is_none")]
218 pub temperature: Option<f32>,
219
220 /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
221 ///
222 /// We generally recommend altering this or temperature but not both.
223 #[serde(skip_serializing_if = "Option::is_none")]
224 pub top_p: Option<f32>,
225
226 #[serde(skip_serializing_if = "Option::is_none")]
227 pub response_format: Option<AssistantsApiResponseFormatOption>,
228}
229
230#[derive(Clone, Serialize, Default, Debug, Deserialize, Builder, PartialEq)]
231#[builder(name = "ModifyAssistantRequestArgs")]
232#[builder(pattern = "mutable")]
233#[builder(setter(into, strip_option), default)]
234#[builder(derive(Debug))]
235#[builder(build_fn(error = "OpenAIError"))]
236pub struct ModifyAssistantRequest {
237 /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them.
238 #[serde(skip_serializing_if = "Option::is_none")]
239 pub model: Option<String>,
240
241 /// The name of the assistant. The maximum length is 256 characters.
242 #[serde(skip_serializing_if = "Option::is_none")]
243 pub name: Option<String>,
244
245 /// The description of the assistant. The maximum length is 512 characters.
246 #[serde(skip_serializing_if = "Option::is_none")]
247 pub description: Option<String>,
248
249 /// The system instructions that the assistant uses. The maximum length is 256,000 characters.
250 #[serde(skip_serializing_if = "Option::is_none")]
251 pub instructions: Option<String>,
252
253 /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
254 #[serde(skip_serializing_if = "Option::is_none")]
255 pub tools: Option<Vec<AssistantTools>>,
256
257 /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.
258 #[serde(skip_serializing_if = "Option::is_none")]
259 pub tool_resources: Option<AssistantToolResources>,
260 /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.
261 #[serde(skip_serializing_if = "Option::is_none")]
262 pub metadata: Option<HashMap<String, String>>,
263
264 /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
265 #[serde(skip_serializing_if = "Option::is_none")]
266 pub temperature: Option<f32>,
267
268 /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
269 ///
270 /// We generally recommend altering this or temperature but not both.
271 #[serde(skip_serializing_if = "Option::is_none")]
272 pub top_p: Option<f32>,
273
274 #[serde(skip_serializing_if = "Option::is_none")]
275 pub response_format: Option<AssistantsApiResponseFormatOption>,
276}
277
278#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)]
279pub struct DeleteAssistantResponse {
280 pub id: String,
281 pub deleted: bool,
282 pub object: String,
283}
284
285#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)]
286pub struct ListAssistantsResponse {
287 pub object: String,
288 pub data: Vec<AssistantObject>,
289 pub first_id: Option<String>,
290 pub last_id: Option<String>,
291 pub has_more: bool,
292}
293
294/// Controls which (if any) tool is called by the model.
295/// `none` means the model will not call any tools and instead generates a message.
296/// `auto` is the default value and means the model can pick between generating a message or calling one or more tools.
297/// `required` means the model must call one or more tools before responding to the user.
298/// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool.
299#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)]
300#[serde(rename_all = "lowercase")]
301pub enum AssistantsApiToolChoiceOption {
302 #[default]
303 None,
304 Auto,
305 Required,
306 #[serde(untagged)]
307 Named(AssistantsNamedToolChoice),
308}
309
310/// Specifies a tool the model should use. Use to force the model to call a specific tool.
311#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)]
312pub struct AssistantsNamedToolChoice {
313 /// The type of the tool. If type is `function`, the function name must be set
314 pub r#type: AssistantToolType,
315
316 pub function: Option<FunctionName>,
317}
318
319#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)]
320#[serde(rename_all = "snake_case")]
321pub enum AssistantToolType {
322 #[default]
323 Function,
324 CodeInterpreter,
325 FileSearch,
326}