open_ai/resources/beta/threads/threads.rs
1use std::cell::RefCell;
2use std::collections::HashMap;
3use std::error::Error;
4use std::rc::Rc;
5use futures::executor::block_on;
6use serde::{Deserialize, Serialize};
7use serde_json::Value;
8use crate::resource::APIResource;
9// use crate::library::{AssistantStream,ThreadCreateAndRunParamsBaseStream};
10// use crate::core::{self, is_request_options, APIPromise};
11// use crate::resources::beta::threads::threads;
12use crate::resources::beta::assistants as assistants_api;
13use crate::resources::beta::threads::messages as messages_api;
14use crate::resources::beta::threads::runs::runs as runs_api;
15use crate::resources::beta::threads as threads_api;
16use crate::resources::beta::threads::runs::runs;
17use crate::{OpenAI, OpenAIObject, streaming};
18use crate::core;
19use crate::core::{APIClient, Headers};
20use crate::core::streaming::APIFuture;
21// use crate::streaming::Stream; // from '../../../streaming';
22
23#[derive(Debug, Clone)]
24pub struct Threads {
25 pub client: Option<APIResource>,
26 pub runs: runs_api::Runs,
27 pub messages: messages_api::Messages,
28}
29
30impl Threads {
31 pub fn new() -> Self {
32 Threads {
33 client: None,
34 runs: runs_api::Runs::new(),
35 messages: messages_api::Messages::new(),
36 }
37 }
38
39 pub fn set_client(&mut self, client: APIResource) {
40 self.messages.client = Some(client.clone());
41 self.runs.client = Some(client.clone());
42 self.client = Some(client);
43 }
44
45 /// Create a thread.
46 pub fn create<'a>(&self, body: ThreadCreateParams) -> APIFuture<ThreadCreateParams, Thread, Thread> {
47 let mut headers: Headers = HashMap::new();
48 headers.insert("OpenAI-Beta".to_string(), Some("assistants=v2".to_string()));
49
50 self.client.clone().unwrap().lock().unwrap().post(
51 "/threads",
52 Some(core::RequestOptions {
53 body: Some(body),
54 headers: Some(headers),
55 ..Default::default()
56 }),
57 )
58 }
59
60 /// Retrieves a thread.
61 pub fn retrieve(
62 &self,
63 thread_id: &str,
64 options: Option<core::RequestOptions<()>>,
65 ) -> APIFuture<String, Thread, ()> {
66 let mut headers: Headers = HashMap::new();
67 headers.insert("OpenAI-Beta".to_string(), Some("assistants=v2".to_string()));
68
69 if let Some(opts) = options {
70 if let Some(hdrs) = opts.headers {
71 for (key, value) in hdrs {
72 headers.insert(key, value);
73 }
74 }
75 }
76
77 self.client.clone().unwrap().clone().lock().unwrap().get(
78 &format!("/threads/{thread_id}"),
79 Some(core::RequestOptions {
80 body: Some(thread_id.to_string()),
81 headers: Some(headers),
82 ..Default::default()
83 }),
84 )
85 }
86
87 /// Modifies a thread.
88 pub fn update(
89 &self,
90 thread_id: &str,
91 body: ThreadUpdateParams,
92 options: Option<core::RequestOptions<ThreadUpdateParams>>,
93 ) -> APIFuture<ThreadUpdateParams, Thread, ()> {
94 let mut headers: Headers = HashMap::new();
95 // headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
96 headers.insert("OpenAI-Beta".to_string(), Some("assistants=v2".to_string()));
97 if let Some(opts) = options {
98 if let Some(hdrs) = opts.headers {
99 for (key, value) in hdrs {
100 headers.insert(key, value);
101 }
102 }
103 }
104
105 self.client.clone().unwrap().lock().unwrap().post(
106 &format!("/threads/{thread_id}"),
107 Some(core::RequestOptions {
108 body: Some(body),
109 headers: Some(headers),
110 ..Default::default()
111 }),
112 )
113 }
114
115 /// Delete a thread.
116 pub fn del(
117 &self,
118 thread_id: &str,
119 options: Option<core::RequestOptions<()>>,
120 ) -> APIFuture<(), Thread, ()> {
121 let mut headers: Headers = HashMap::new();
122 // headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
123 headers.insert("OpenAI-Beta".to_string(), Some("assistants=v2".to_string()));
124 if let Some(opts) = options {
125 if let Some(hdrs) = opts.headers {
126 for (key, value) in hdrs {
127 headers.insert(key, value);
128 }
129 }
130 }
131 self.client.clone().unwrap().lock().unwrap().delete(
132 &format!("/threads/{thread_id}"),
133 Some(core::RequestOptions::<()> {
134 headers: Some(headers),
135 ..Default::default()
136 }),
137 )
138 }
139
140 // createAndRun(
141 // body: ThreadCreateAndRunParamsNonStreaming,
142 // options: Option<Core.RequestOptions>,
143 // ): APIPromise<RunsAPI.Run>,
144 // createAndRun(
145 // body: ThreadCreateAndRunParamsStreaming,
146 // options: Option<Core.RequestOptions>,
147 // ): APIPromise<Stream<assistants_api::AssistantStreamEvent>>,
148 // createAndRun(
149 // body: ThreadCreateAndRunParamsBase,
150 // options: Option<Core.RequestOptions>,
151 // ): APIPromise<Stream<assistants_api::AssistantStreamEvent> | RunsAPI.Run>,
152 // createAndRun(
153 // body: ThreadCreateAndRunParams,
154 // options: Option<Core.RequestOptions>,
155 // ): APIPromise<RunsAPI.Run> | APIPromise<Stream<assistants_api::AssistantStreamEvent>> {
156 // return this._client.post('/threads/runs', {
157 // body,
158 // ...options,
159 // headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
160 // stream: body.stream ?? false,
161 // }) as APIPromise<RunsAPI.Run> | APIPromise<Stream<assistants_api::AssistantStreamEvent>>,
162 /// Create a thread and run it in one request.
163 pub fn create_and_run(
164 &self,
165 body: ThreadCreateAndRunParams,
166 options: Option<core::RequestOptions<ThreadCreateAndRunParams>>,
167 ) -> APIFuture<ThreadCreateAndRunParams, Thread, ()> {
168 let stream = body.stream.unwrap_or(false);
169 let mut headers: Headers = HashMap::new();
170 // headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
171 headers.insert("OpenAI-Beta".to_string(), Some("assistants=v2".to_string()));
172 if let Some(opts) = options {
173 if let Some(hdrs) = opts.headers {
174 for (key, value) in hdrs {
175 headers.insert(key, value);
176 }
177 }
178 }
179
180 self.client.clone().unwrap().lock().unwrap().post(
181 "/threads/runs",
182 Some(core::RequestOptions {
183 body: Some(body),
184 stream: Some(stream),
185 headers: Some(headers),
186 ..Default::default()
187 }),
188 )
189 }
190
191 // /// A helper to create a thread, start a run and then poll for a terminal state.
192 // /// More information on Run lifecycles can be found here:
193 // /// https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
194 // // async create_and_run_poll(
195 // // body: ThreadCreateAndRunParamsNonStreaming,
196 // // options: Option<Core.RequestOptions & { pollIntervalMs: Option<number }>,
197 // // ): Promise<Threads.Run> {
198 // // const run = await this.createAndRun(body, options),
199 // // return await this.runs.poll(run.thread_id, run.id, options),
200 // // }
201 // pub async fn create_and_run_poll(
202 // &self,
203 // body: ThreadCreateAndRunParams,
204 // options: Option<core::RequestOptions<ThreadCreateAndRunParams>>,
205 // ) -> Result<threads::Runs, Box<dyn Error>> {
206 // let run = self.create_and_run(body, options).await?;
207 // // self.runs.poll
208 // runs_api::Runs::poll(
209 // &self.openai.as_ref().unwrap().borrow().runs,
210 // run.thread_id,
211 // run.id,
212 // options,
213 // ).await
214 // }
215
216 // /// Create a thread and stream the run back
217 // // createAndRunStream(
218 // // body: ThreadCreateAndRunParamsBaseStream,
219 // // options: Option<Core.RequestOptions>,
220 // // ): AssistantStream {
221 // // return AssistantStream.createThreadAssistantStream(body, this._client.beta.threads, options),
222 // // }
223 // pub async fn create_and_run_stream(
224 // &self,
225 // body: ThreadCreateAndRunParams,
226 // options: Option<core::RequestOptions>,
227 // ) -> Result<AssistantStream, Box<dyn Error>> {
228 // streaming::Stream::create_thread_assistant_stream(
229 // body,
230 // &self.client.as_ref().unwrap().borrow().client.beta.threads,
231 // options,
232 // )
233 // }
234}
235
236// #[derive(Debug, Deserialize, Serialize)]
237// pub enum CreateAndRunResponse {
238// Run(runs_api::Run),
239// Stream(Stream::<assistants_api::AssistantStreamEvent>)
240// }
241//
242// impl Default for CreateAndRunResponse {
243// fn default() -> Self {
244// CreateAndRunResponse::Run(runs_api::Run::default())
245// }
246// }
247
248/// An object describing the expected output of the model. If `json_object` only
249/// `function` type `tools` are allowed to be passed to the Run. If `text` the model
250/// can return text or any value needed.
251#[derive(Default, Debug, Clone, Deserialize, Serialize)]
252pub struct AssistantResponseFormat {
253 /// Must be one of `text` or `json_object`.
254 #[serde(rename = "type", skip_serializing_if = "Option::is_none")]
255 pub format_type: Option<AssistantResponseFormatType>,
256}
257
258#[derive(Debug, Clone, Serialize, Deserialize)]
259#[serde(rename_all = "snake_case")]
260pub enum AssistantResponseFormatType {
261 Text,
262 JsonObject,
263}
264
265/// Specifies the format that the model must output. Compatible with
266/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
267/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
268/// and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
269///
270/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
271/// message the model generates is valid JSON.
272///
273/// **Important:** when using JSON mode, you **must** also instruct the model to
274/// produce JSON yourself via a system or user message. Without this, the model may
275/// generate an unending stream of whitespace until the generation reaches the token
276/// limit, resulting in a long-running and seemingly "stuck" request. Also note that
277/// the message content may be partially cut off if `finish_reason="length"`, which
278/// indicates the generation exceeded `max_tokens` or the conversation exceeded the
279/// max context length.
280// pub type AssistantResponseFormatOption = "none" | "auto" | AssistantResponseFormat;
281#[derive(Default, Debug, Clone, Serialize, Deserialize)]
282#[serde(rename_all = "snake_case")]
283pub enum AssistantResponseFormatOption {
284 #[default]
285 None,
286 Auto,
287 AssistantResponseFormat(AssistantResponseFormat),
288}
289
290/// Specifies a tool the model should use. Use to force the model to call a specific
291/// tool.
292#[derive(Default, Debug, Clone, Deserialize, Serialize)]
293pub struct AssistantToolChoice {
294 /// The type of the tool. If type is `function`, the function name must be set
295 #[serde(rename = "type")]
296 pub choice_type: AssistantToolChoiceType,
297
298 #[serde(skip_serializing_if = "Option::is_none")]
299 pub function: Option<AssistantToolChoiceFunction>,
300}
301
302#[derive(Default, Debug, Clone, Deserialize, Serialize)]
303pub struct AssistantToolChoiceFunction {
304 /// The name of the function to call.
305 name: String,
306}
307
308#[derive(Default, Debug, Clone, Serialize, Deserialize)]
309#[serde(rename_all = "snake_case")]
310pub enum AssistantToolChoiceType {
311 #[default]
312 Function,
313 CodeInterpreter,
314 FileSearch,
315}
316
317/// Controls which (if any) tool is called by the model. `none` means the model will
318/// not call any tools and instead generates a message. `auto` is the default value
319/// and means the model can pick between generating a message or calling one or more
320/// tools. `required` means the model must call one or more tools before responding
321/// to the user. Specifying a particular tool like `{"type": "file_search"}` or
322/// `{"type": "function", "function": {"name": "my_function"}}` forces the model to
323/// call that tool.
324// export type AssistantToolChoiceOption = 'none' | 'auto' | 'required' | AssistantToolChoice,
325#[derive(Default, Debug, Clone, Serialize, Deserialize)]
326#[serde(rename_all = "snake_case")]
327pub enum AssistantToolChoiceOption {
328 #[default]
329 None,
330 Auto,
331 Required,
332 AssistantToolChoice(AssistantToolChoice),
333}
334
335/// Represents a thread that contains
336/// [messages](https://platform.openai.com/docs/api-reference/messages).
337#[derive(Default, Debug, Clone, Deserialize, Serialize)]
338pub struct Thread {
339 /// The identifier, which can be referenced in API endpoints.
340 pub id: String,
341
342 /// The Unix timestamp (in seconds) for when the thread was created.
343 pub created_at: u64,
344
345 /// Set of 16 key-value pairs that can be attached to an object. This can be useful
346 /// for storing additional information about the object in a structured format. Keys
347 /// can be a maximum of 64 characters long and values can be a maxium of 512
348 /// characters long.
349 pub metadata: Option<Value>,
350
351 /// The object type, which is always `thread`.
352 pub object: OpenAIObject,
353
354 /// A set of resources that are made available to the assistant's tools in this
355 /// thread. The resources are specific to the type of tool. For example, the
356 /// `code_interpreter` tool requires a list of file IDs, while the `file_search`
357 /// tool requires a list of vector store IDs.
358 pub tool_resources: Option<thread::ToolResources>,
359}
360
361pub mod thread {
362 use super::*;
363 /// A set of resources that are made available to the assistant's tools in this
364 /// thread. The resources are specific to the type of tool. For example, the
365 /// `code_interpreter` tool requires a list of file IDs, while the `file_search`
366 /// tool requires a list of vector store IDs.
367 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
368 pub struct ToolResources {
369 #[serde(skip_serializing_if = "Option::is_none")]
370 pub code_interpreter: Option<tool_resources::CodeInterpreter>,
371
372 #[serde(skip_serializing_if = "Option::is_none")]
373 pub file_search: Option<tool_resources::FileSearch>,
374 }
375
376 pub mod tool_resources {
377 use super::*;
378 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
379 pub struct CodeInterpreter {
380 /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
381 /// available to the `code_interpreter` tool. There can be a maximum of 20 files
382 /// associated with the tool.
383 #[serde(skip_serializing_if = "Option::is_none")]
384 file_ids: Option<Vec<String>>,
385 }
386 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
387 pub struct FileSearch {
388 /// The [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
389 /// attached to this thread. There can be a maximum of 1 vector store attached to
390 /// the thread.
391 #[serde(skip_serializing_if = "Option::is_none")]
392 vector_store_ids: Option<Vec<String>>,
393 }
394 }
395}
396
397pub struct ThreadDeleted {
398 pub id: String,
399 pub deleted: bool,
400 pub object: ThreadDeletedObject,
401}
402
403#[derive(Debug, Clone, Serialize, Deserialize)]
404pub enum ThreadDeletedObject {
405 #[serde(rename = "thread.deleted")]
406 ThreadDeleted,
407}
408
409#[derive(Default, Debug, Clone, Deserialize, Serialize)]
410pub struct ThreadCreateParams {
411 /// A list of [messages](https://platform.openai.com/docs/api-reference/messages) to
412 /// start the thread with.
413 #[serde(skip_serializing_if = "Option::is_none")]
414 pub messages: Option<Vec<thread_create_params::Message>>,
415
416 /// Set of 16 key-value pairs that can be attached to an object. This can be useful
417 /// for storing additional information about the object in a structured format. Keys
418 /// can be a maximum of 64 characters long and values can be a maxium of 512
419 /// characters long.
420 #[serde(skip_serializing_if = "Option::is_none")]
421 pub metadata: Option<Value>,
422
423 /// A set of resources that are made available to the assistant's tools in this
424 /// thread. The resources are specific to the type of tool. For example, the
425 /// `code_interpreter` tool requires a list of file IDs, while the `file_search`
426 /// tool requires a list of vector store IDs.
427 #[serde(skip_serializing_if = "Option::is_none")]
428 pub tool_resources: Option<thread_create_params::ToolResources>,
429}
430
431pub mod thread_create_params {
432 use super::*;
433
434 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
435 pub struct Message {
436 /// The text contents of the message.
437 pub content: message::Content,
438
439 /// The role of the entity that is creating the message. Allowed values include:
440 ///
441 /// - `user`: Indicates the message is sent by an actual user and should be used in
442 /// most cases to represent user-generated messages.
443 /// - `assistant`: Indicates the message is generated by the assistant. Use this
444 /// value to insert messages from the assistant into the conversation.
445 pub role: message::Role,
446
447 /// A list of files attached to the message, and the tools they should be added to.
448 #[serde(skip_serializing_if = "Option::is_none")]
449 pub attachments: Option<Vec<message::Attachment>>,
450
451 /// Set of 16 key-value pairs that can be attached to an object. This can be useful
452 /// for storing additional information about the object in a structured format. Keys
453 /// can be a maximum of 64 characters long and values can be a maxium of 512
454 /// characters long.
455 #[serde(skip_serializing_if = "Option::is_none")]
456 pub metadata: Option<Value>,
457 }
458
459 pub mod message {
460 use super::*;
461
462 #[derive(Default, Debug, Clone, Serialize, Deserialize)]
463 pub enum Role {
464 #[default]
465 User,
466 Assistant,
467 }
468
469 #[derive(Debug, Clone, Serialize, Deserialize)]
470 #[serde(untagged)]
471 pub enum Content {
472 Text(String),
473 Multiple(messages_api::MessageContent), // String | Vec<messages_api::MessageContentPartParam>
474 }
475
476 impl Default for Content {
477 fn default() -> Self {
478 Content::Text(String::default())
479 }
480 }
481
482 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
483 pub struct Attachment {
484 /// The ID of the file to attach to the message.
485 #[serde(skip_serializing_if = "Option::is_none")]
486 file_id: Option<String>,
487
488 /// The tools to add this file to.
489 #[serde(skip_serializing_if = "Option::is_none")]
490 tools: Option<Vec<attachment::Tool>>,
491 }
492
493 pub mod attachment {
494 use super::*;
495
496 #[derive(Debug, Clone, Serialize, Deserialize)]
497 #[serde(untagged)]
498 pub enum Tool {
499 CodeInterpreterTool(assistants_api::CodeInterpreterTool),
500 FileSearch(SearchTool),
501 }
502
503 impl Default for Tool {
504 fn default() -> Self {
505 Tool::CodeInterpreterTool(assistants_api::CodeInterpreterTool::default())
506 }
507 }
508
509 #[derive(Debug, Clone, Serialize, Deserialize)]
510 #[serde(tag = "type")]
511 pub enum SearchTool {
512 #[serde(rename = "file_search")]
513 FileSearch
514 }
515 }
516 }
517
518 /// A set of resources that are made available to the assistant's tools in this
519 /// thread. The resources are specific to the type of tool. For example, the
520 /// `code_interpreter` tool requires a list of file IDs, while the `file_search`
521 /// tool requires a list of vector store IDs.
522 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
523 pub struct ToolResources {
524 #[serde(skip_serializing_if = "Option::is_none")]
525 pub code_interpreter: Option<tool_resources::CodeInterpreter>,
526
527 #[serde(skip_serializing_if = "Option::is_none")]
528 pub file_search: Option<tool_resources::FileSearch>,
529 }
530
531 pub mod tool_resources {
532 use super::*;
533 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
534 pub struct CodeInterpreter {
535 /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
536 /// available to the `code_interpreter` tool. There can be a maximum of 20 files
537 /// associated with the tool.
538 #[serde(skip_serializing_if = "Option::is_none")]
539 pub file_ids: Option<Vec<String>>,
540 }
541
542 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
543 pub struct FileSearch {
544 /// The [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
545 /// attached to this thread. There can be a maximum of 1 vector store attached to
546 /// the thread.
547 #[serde(skip_serializing_if = "Option::is_none")]
548 pub vector_store_ids: Option<Vec<String>>,
549
550 /// A helper to create a
551 /// [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
552 /// with file_ids and attach it to this thread. There can be a maximum of 1 vector
553 /// store attached to the thread.
554 #[serde(skip_serializing_if = "Option::is_none")]
555 pub vector_stores: Option<Vec<file_search::VectorStore>>,
556 }
557
558 pub mod file_search {
559 use super::*;
560 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
561 pub struct VectorStore {
562 /// The chunking strategy used to chunk the file(s). If not set, will use the `auto`
563 /// strategy.
564 #[serde(skip_serializing_if = "Option::is_none")]
565 pub chunking_strategy: Option<vector_store::ChunkingStrategy>,
566
567 /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
568 /// add to the vector store. There can be a maximum of 10000 files in a vector
569 /// store.
570 #[serde(skip_serializing_if = "Option::is_none")]
571 pub file_ids: Option<Vec<String>>,
572
573 /// Set of 16 key-value pairs that can be attached to a vector store. This can be
574 /// useful for storing additional information about the vector store in a structured
575 /// format. Keys can be a maximum of 64 characters long and values can be a maxium
576 /// of 512 characters long.
577 #[serde(skip_serializing_if = "Option::is_none")]
578 pub metadata: Option<Value>,
579 }
580
581 pub mod vector_store {
582 use super::*;
583
584 #[derive(Default, Debug, Clone, Serialize, Deserialize)]
585 #[serde(tag = "type", rename_all = "snake_case")]
586 pub enum ChunkingStrategy {
587 /// The default strategy. This strategy currently uses a `max_chunk_size_tokens` of
588 /// `800` and `chunk_overlap_tokens` of `400`.
589 ///
590 /// Always `auto`.
591 #[default]
592 Auto,
593
594 /// Always `static`.
595 Static {
596 #[serde(rename = "static")]
597 detail: vector_store_static::Static,
598 },
599 }
600
601 pub mod vector_store_static {
602 use super::*;
603 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
604 pub struct Static {
605 /// The number of tokens that overlap between chunks. The default value is `400`.
606 ///
607 /// Note that the overlap must not exceed half of `max_chunk_size_tokens`.
608 pub chunk_overlap_tokens: u32,
609
610 /// The maximum number of tokens in each chunk. The default value is `800`. The
611 /// minimum value is `100` and the maximum value is `4096`.
612 pub max_chunk_size_tokens: u32,
613 }
614 }
615 }
616 }
617 }
618}
619
620#[derive(Default, Debug, Clone, Serialize, Deserialize)]
621pub struct ThreadUpdateParams {
622 /// Set of 16 key-value pairs that can be attached to an object. This can be useful
623 /// for storing additional information about the object in a structured format. Keys
624 /// can be a maximum of 64 characters long and values can be a maxium of 512
625 /// characters long.
626 #[serde(skip_serializing_if = "Option::is_none")]
627 pub metadata: Option<Value>,
628
629 /// A set of resources that are made available to the assistant's tools in this
630 /// thread. The resources are specific to the type of tool. For example, the
631 /// `code_interpreter` tool requires a list of file IDs, while the `file_search`
632 /// tool requires a list of vector store IDs.
633 #[serde(skip_serializing_if = "Option::is_none")]
634 pub tool_resources: Option<thread_update_params::ToolResources>,
635}
636
637pub mod thread_update_params {
638 use serde::{Deserialize, Serialize};
639
640 /// A set of resources that are made available to the assistant's tools in this
641 /// thread. The resources are specific to the type of tool. For example, the
642 /// `code_interpreter` tool requires a list of file IDs, while the `file_search`
643 /// tool requires a list of vector store IDs.
644 #[derive(Default, Debug, Clone, Serialize, Deserialize)]
645 pub struct ToolResources {
646 #[serde(skip_serializing_if = "Option::is_none")]
647 pub code_interpreter: Option<tool_resources::CodeInterpreter>,
648
649 #[serde(skip_serializing_if = "Option::is_none")]
650 pub file_search: Option<tool_resources::FileSearch>,
651 }
652
653 pub mod tool_resources {
654 use super::*;
655 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
656 pub struct CodeInterpreter {
657 /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
658 /// available to the `code_interpreter` tool. There can be a maximum of 20 files
659 /// associated with the tool.
660 #[serde(skip_serializing_if = "Option::is_none")]
661 pub file_ids: Option<Vec<String>>,
662 }
663
664 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
665 pub struct FileSearch {
666 /// The
667 /// [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
668 /// attached to this thread. There can be a maximum of 1 vector store attached to
669 /// the thread.
670 #[serde(skip_serializing_if = "Option::is_none")]
671 pub vector_store_ids: Option<Vec<String>>,
672 }
673 }
674}
675
676// export type ThreadCreateAndRunParams =
677// | ThreadCreateAndRunParamsNonStreaming
678// | ThreadCreateAndRunParamsStreaming,
679
680#[derive(Default, Debug, Clone, Deserialize, Serialize)]
681pub struct ThreadCreateAndRunParams {
682 /// The ID of the
683 /// [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
684 /// execute this run.
685 pub assistant_id: String,
686
687 /// Override the default system message of the assistant. This is useful for
688 /// modifying the behavior on a per-run basis.
689 #[serde(skip_serializing_if = "Option::is_none")]
690 pub instructions: Option<String>,
691
692 /// The maximum number of completion tokens that may be used over the course of the
693 /// run. The run will make a best effort to use only the number of completion tokens
694 /// specified, across multiple turns of the run. If the run exceeds the number of
695 /// completion tokens specified, the run will end with status `incomplete`. See
696 /// `incomplete_details` for more info.
697 #[serde(skip_serializing_if = "Option::is_none")]
698 pub max_completion_tokens: Option<u32>,
699
700 /// The maximum number of prompt tokens that may be used over the course of the run.
701 /// The run will make a best effort to use only the number of prompt tokens
702 /// specified, across multiple turns of the run. If the run exceeds the number of
703 /// prompt tokens specified, the run will end with status `incomplete`. See
704 /// `incomplete_details` for more info.
705 #[serde(skip_serializing_if = "Option::is_none")]
706 pub max_prompt_tokens: Option<u32>,
707
708 /// Set of 16 key-value pairs that can be attached to an object. This can be useful
709 /// for storing additional information about the object in a structured format. Keys
710 /// can be a maximum of 64 characters long and values can be a maxium of 512
711 /// characters long.
712 #[serde(skip_serializing_if = "Option::is_none")]
713 pub metadata: Option<Value>,
714
715 /// The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
716 /// be used to execute this run. If a value is provided here, it will override the
717 /// model associated with the assistant. If not, the model associated with the
718 /// assistant will be used.
719 #[serde(skip_serializing_if = "Option::is_none")]
720 pub model: Option<String>,
721 // | (string & {}>,
722 // | 'gpt-4o'
723 // | 'gpt-4o-2024-05-13'
724 // | 'gpt-4-turbo'
725 // | 'gpt-4-turbo-2024-04-09'
726 // | 'gpt-4-0125-preview'
727 // | 'gpt-4-turbo-preview'
728 // | 'gpt-4-1106-preview'
729 // | 'gpt-4-vision-preview'
730 // | 'gpt-4'
731 // | 'gpt-4-0314'
732 // | 'gpt-4-0613'
733 // | 'gpt-4-32k'
734 // | 'gpt-4-32k-0314'
735 // | 'gpt-4-32k-0613'
736 // | 'gpt-3.5-turbo'
737 // | 'gpt-3.5-turbo-16k'
738 // | 'gpt-3.5-turbo-0613'
739 // | 'gpt-3.5-turbo-1106'
740 // | 'gpt-3.5-turbo-0125'
741 // | 'gpt-3.5-turbo-16k-0613'
742 // | null,
743
744 /// Whether to enable
745 /// [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
746 /// during tool use.
747 #[serde(skip_serializing_if = "Option::is_none")]
748 pub parallel_tool_calls: Option<bool>,
749
750 /// Specifies the format that the model must output. Compatible with
751 /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
752 /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
753 /// and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
754 ///
755 /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
756 /// message the model generates is valid JSON.
757 ///
758 /// **Important:** when using JSON mode, you **must** also instruct the model to
759 /// produce JSON yourself via a system or user message. Without this, the model may
760 /// generate an unending stream of whitespace until the generation reaches the token
761 /// limit, resulting in a long-running and seemingly "stuck" request. Also note that
762 /// the message content may be partially cut off if `finish_reason="length"`, which
763 /// indicates the generation exceeded `max_tokens` or the conversation exceeded the
764 /// max context length.
765 #[serde(skip_serializing_if = "Option::is_none")]
766 pub response_format: Option<AssistantResponseFormatOption>,
767
768 /// If `true`, returns a stream of events that happen during the Run as server-sent
769 /// events, terminating when the Run enters a terminal state with a `data: [DONE]`
770 /// message.
771 #[serde(skip_serializing_if = "Option::is_none")]
772 pub stream: Option<bool>,
773
774 /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
775 /// make the output more random, while lower values like 0.2 will make it more
776 /// focused and deterministic.
777 #[serde(skip_serializing_if = "Option::is_none")]
778 pub temperature: Option<f32>,
779
780 /// If no thread is provided, an empty thread will be created.
781 #[serde(skip_serializing_if = "Option::is_none")]
782 pub thread: Option<thread_create_and_run_params::Thread>,
783
784 /// Controls which (if any) tool is called by the model. `none` means the model will
785 /// not call any tools and instead generates a message. `auto` is the default value
786 /// and means the model can pick between generating a message or calling one or more
787 /// tools. `required` means the model must call one or more tools before responding
788 /// to the user. Specifying a particular tool like `{"type": "file_search"}` or
789 /// `{"type": "function", "function": {"name": "my_function"}}` forces the model to
790 /// call that tool.
791 #[serde(skip_serializing_if = "Option::is_none")]
792 pub tool_choice: Option<AssistantToolChoiceOption>,
793
794 /// A set of resources that are used by the assistant's tools. The resources are
795 /// specific to the type of tool. For example, the `code_interpreter` tool requires
796 /// a list of file IDs, while the `file_search` tool requires a list of vector store
797 /// IDs.
798 #[serde(skip_serializing_if = "Option::is_none")]
799 pub tool_resources: Option<thread_create_and_run_params::ToolResources>,
800
801 /// Override the tools the assistant can use for this run. This is useful for
802 /// modifying the behavior on a per-run basis.
803 #[serde(skip_serializing_if = "Option::is_none")]
804 pub tools: Option<Vec<thread_create_and_run_params::Tool>>,
805
806 /// An alternative to sampling with temperature, called nucleus sampling, where the
807 /// model considers the results of the tokens with top_p probability mass. So 0.1
808 /// means only the tokens comprising the top 10% probability mass are considered.
809 ///
810 /// We generally recommend altering this or temperature but not both.
811 #[serde(skip_serializing_if = "Option::is_none")]
812 pub top_p: Option<f32>,
813
814 /// Controls for how a thread will be truncated prior to the run. Use this to
815 /// control the intial context window of the run.
816 #[serde(skip_serializing_if = "Option::is_none")]
817 pub truncation_strategy: Option<thread_create_and_run_params::TruncationStrategy>,
818}
819
820pub mod thread_create_and_run_params {
821 use super::*;
822
823 /// If no thread is provided, an empty thread will be created.
824 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
825 pub struct Thread {
826 /// A list of [messages](https://platform.openai.com/docs/api-reference/messages) to
827 /// start the thread with.
828 #[serde(skip_serializing_if = "Option::is_none")]
829 pub messages: Option<Vec<thread::Message>>,
830
831 /// Set of 16 key-value pairs that can be attached to an object. This can be useful
832 /// for storing additional information about the object in a structured format. Keys
833 /// can be a maximum of 64 characters long and values can be a maxium of 512
834 /// characters long.
835 #[serde(skip_serializing_if = "Option::is_none")]
836 pub metadata: Option<Value>,
837
838 /// A set of resources that are made available to the assistant's tools in this
839 /// thread. The resources are specific to the type of tool. For example, the
840 /// `code_interpreter` tool requires a list of file IDs, while the `file_search`
841 /// tool requires a list of vector store IDs.
842 #[serde(skip_serializing_if = "Option::is_none")]
843 pub tool_resources: Option<thread::ToolResources>,
844 }
845
846 pub mod thread {
847 use super::*;
848
849 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
850 pub struct Message {
851 /// The text contents of the message.
852 pub content: message::Content,
853
854 /// The role of the entity that is creating the message. Allowed values include:
855 ///
856 /// - `user`: Indicates the message is sent by an actual user and should be used in
857 /// most cases to represent user-generated messages.
858 /// - `assistant`: Indicates the message is generated by the assistant. Use this
859 /// value to insert messages from the assistant into the conversation.
860 pub role: message::Role,
861 /// A list of files attached to the message, and the tools they should be added to.
862 #[serde(skip_serializing_if = "Option::is_none")]
863 pub attachments: Option<Vec<message::Attachment>>,
864
865 /// Set of 16 key-value pairs that can be attached to an object. This can be useful
866 /// for storing additional information about the object in a structured format. Keys
867 /// can be a maximum of 64 characters long and values can be a maxium of 512
868 /// characters long.
869 #[serde(skip_serializing_if = "Option::is_none")]
870 pub metadata: Option<Value>,
871 }
872
873 pub mod message {
874 use super::*;
875
876 #[derive(Debug, Clone, Serialize, Deserialize)]
877 #[serde(untagged)]
878 pub enum Content {
879 Text(String),
880 Multiple(messages_api::MessageContent), // String | Vec<messages_api::MessageContentPartParam>
881 }
882
883 impl Default for Content {
884 fn default() -> Self {
885 Content::Text(String::default())
886 }
887 }
888
889 #[derive(Default, Debug, Clone, Serialize, Deserialize)]
890 #[serde(untagged, rename_all = "snake_case")]
891 pub enum Role {
892 #[default]
893 User,
894 Assistant,
895 }
896
897 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
898 pub struct Attachment {
899 /// The ID of the file to attach to the message.
900 #[serde(skip_serializing_if = "Option::is_none")]
901 pub file_id: Option<String>,
902
903 /// The tools to add this file to.
904 #[serde(skip_serializing_if = "Option::is_none")]
905 pub tools: Option<Vec<attachment::Tool>>,
906 }
907
908 pub mod attachment {
909 use super::*;
910
911 #[derive(Debug, Clone, Serialize, Deserialize)]
912 #[serde(untagged)]
913 pub enum Tool {
914 CodeInterpreterTool(assistants_api::CodeInterpreterTool),
915 FileSearch(FileSearch),
916 }
917
918 impl Default for Tool {
919 fn default() -> Self {
920 Tool::CodeInterpreterTool(assistants_api::CodeInterpreterTool::default())
921 }
922 }
923
924 #[derive(Default, Debug, Clone, Serialize, Deserialize)]
925 #[serde(tag = "type", rename_all = "snake_case")]
926 pub enum FileSearch {
927 #[default]
928 FileSearch,
929 }
930 }
931 }
932
933
934 #[derive(Debug, Clone, Serialize, Deserialize)]
935 #[serde(untagged)]
936 pub enum Tools {
937 CodeInterpreterTool(assistants_api::CodeInterpreterTool),
938 FileSearchTool(assistants_api::FileSearchTool),
939 FunctionTool(assistants_api::FunctionTool),
940 }
941
942 /// A set of resources that are made available to the assistant's tools in this
943 /// thread. The resources are specific to the type of tool. For example, the
944 /// `code_interpreter` tool requires a list of file IDs, while the `file_search`
945 /// tool requires a list of vector store IDs.
946 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
947 pub struct ToolResources {
948 #[serde(skip_serializing_if = "Option::is_none")]
949 pub code_interpreter: Option<tool_resources::CodeInterpreter>,
950
951 #[serde(skip_serializing_if = "Option::is_none")]
952 pub file_search: Option<tool_resources::FileSearch>,
953 }
954
955 pub mod tool_resources {
956 use super::*;
957 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
958 pub struct CodeInterpreter {
959 /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
960 /// available to the `code_interpreter` tool. There can be a maximum of 20 files
961 /// associated with the tool.
962 #[serde(skip_serializing_if = "Option::is_none")]
963 pub file_ids: Option<Vec<String>>,
964 }
965
966 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
967 pub struct FileSearch {
968 /// The
969 /// [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
970 /// attached to this thread. There can be a maximum of 1 vector store attached to
971 /// the thread.
972 #[serde(skip_serializing_if = "Option::is_none")]
973 pub vector_store_ids: Option<Vec<String>>,
974
975 /// A helper to create a
976 /// [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
977 /// with file_ids and attach it to this thread. There can be a maximum of 1 vector
978 /// store attached to the thread.
979 #[serde(skip_serializing_if = "Option::is_none")]
980 pub vector_stores: Option<Vec<file_search::VectorStore>>,
981 }
982
983 pub mod file_search {
984 use super::*;
985
986 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
987 pub struct VectorStore {
988 /// The chunking strategy used to chunk the file(s). If not set, will use the `auto`
989 /// strategy.
990 #[serde(skip_serializing_if = "Option::is_none")]
991 pub chunking_strategy: Option<vector_store::ChunkingStrategy>,
992
993 /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
994 /// add to the vector store. There can be a maximum of 10000 files in a vector
995 /// store.
996 #[serde(skip_serializing_if = "Option::is_none")]
997 pub file_ids: Option<Vec<String>>,
998
999 /// Set of 16 key-value pairs that can be attached to a vector store. This can be
1000 /// useful for storing additional information about the vector store in a structured
1001 /// format. Keys can be a maximum of 64 characters long and values can be a maxium
1002 /// of 512 characters long.
1003 #[serde(skip_serializing_if = "Option::is_none")]
1004 pub metadata: Option<Value>,
1005 }
1006
1007 pub mod vector_store {
1008 use super::*;
1009
1010 #[derive(Default, Debug, Clone, Serialize, Deserialize)]
1011 #[serde(tag = "type", rename_all = "snake_case")]
1012 pub enum ChunkingStrategy {
1013 /// The default strategy. This strategy currently uses a `max_chunk_size_tokens` of
1014 /// `800` and `chunk_overlap_tokens` of `400`.
1015 ///
1016 /// Always `auto`.
1017 #[default]
1018 Auto,
1019
1020 /// Always `static`.
1021 Static {
1022 #[serde(rename = "static")]
1023 detail: vector_store_static::Static,
1024 },
1025 }
1026
1027 pub mod vector_store_static {
1028 use super::*;
1029
1030 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
1031 pub struct Static {
1032 /// The number of tokens that overlap between chunks. The default value is `400`.
1033 ///
1034 /// Note that the overlap must not exceed half of `max_chunk_size_tokens`.
1035 pub chunk_overlap_tokens: u32,
1036
1037 /// The maximum number of tokens in each chunk. The default value is `800`. The
1038 /// minimum value is `100` and the maximum value is `4096`.
1039 pub max_chunk_size_tokens: u32,
1040 }
1041 }
1042 }
1043 }
1044 }
1045 }
1046
1047 #[derive(Debug, Clone, Serialize, Deserialize)]
1048 #[serde(untagged)]
1049 pub enum Tool {
1050 CodeInterpreterTool(assistants_api::CodeInterpreterTool),
1051 FileSearchTool(assistants_api::FileSearchTool),
1052 FunctionTool(assistants_api::FunctionTool),
1053 }
1054
1055 impl Default for Tool {
1056 fn default() -> Self {
1057 Tool::CodeInterpreterTool(assistants_api::CodeInterpreterTool::default())
1058 }
1059 }
1060
1061 /// A set of resources that are used by the assistant's tools. The resources are
1062 /// specific to the type of tool. For example, the `code_interpreter` tool requires
1063 /// a list of file IDs, while the `file_search` tool requires a list of vector store
1064 /// IDs.
1065 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
1066 pub struct ToolResources {
1067 #[serde(skip_serializing_if = "Option::is_none")]
1068 pub code_interpreter: Option<tool_resources::CodeInterpreter>,
1069
1070 #[serde(skip_serializing_if = "Option::is_none")]
1071 pub file_search: Option<tool_resources::FileSearch>,
1072 }
1073
1074 pub mod tool_resources {
1075 use super::*;
1076 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
1077 pub struct CodeInterpreter {
1078 /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
1079 /// available to the `code_interpreter` tool. There can be a maximum of 20 files
1080 /// associated with the tool.
1081 #[serde(skip_serializing_if = "Option::is_none")]
1082 pub file_ids: Option<Vec<String>>,
1083 }
1084
1085 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
1086 pub struct FileSearch {
1087 /// The ID of the
1088 /// [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
1089 /// attached to this assistant. There can be a maximum of 1 vector store attached to
1090 /// the assistant.
1091 #[serde(skip_serializing_if = "Option::is_none")]
1092 pub vector_store_ids: Option<Vec<String>>,
1093 }
1094 }
1095
1096 /// Controls for how a thread will be truncated prior to the run. Use this to
1097 /// control the intial context window of the run.
1098 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
1099 pub struct TruncationStrategy {
1100 /// The truncation strategy to use for the thread. The default is `auto`. If set to
1101 /// `last_messages`, the thread will be truncated to the n most recent messages in
1102 /// the thread. When set to `auto`, messages in the middle of the thread will be
1103 /// dropped to fit the context length of the model, `max_prompt_tokens`.
1104 #[serde(rename = "type")]
1105 pub truncation_strategy_type: truncation_strategy::TruncationStrategyType,
1106 /// The number of most recent messages from the thread when constructing the context
1107 /// for the run.
1108 #[serde(skip_serializing_if = "Option::is_none")]
1109 pub last_messages: Option<u32>,
1110 }
1111
1112 pub mod truncation_strategy {
1113 use super::*;
1114
1115 #[derive(Default, Debug, Clone, Serialize, Deserialize)]
1116 #[serde(tag = "type", rename_all = "snake_case")]
1117 pub enum TruncationStrategyType {
1118 #[default]
1119 Auto,
1120 LastMessages,
1121 }
1122 }
1123}
1124
1125#[derive(Default, Debug, Clone, Deserialize, Serialize)]
1126pub struct ThreadCreateAndRunPollParams {
1127 /// The ID of the
1128 /// [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
1129 /// execute this run.
1130 pub assistant_id: String,
1131 /// Override the default system message of the assistant. This is useful for
1132 /// modifying the behavior on a per-run basis.
1133 #[serde(skip_serializing_if = "Option::is_none")]
1134 pub instructions: Option<String>,
1135
1136 /// The maximum number of completion tokens that may be used over the course of the
1137 /// run. The run will make a best effort to use only the number of completion tokens
1138 /// specified, across multiple turns of the run. If the run exceeds the number of
1139 /// completion tokens specified, the run will end with status `incomplete`. See
1140 /// `incomplete_details` for more info.
1141 #[serde(skip_serializing_if = "Option::is_none")]
1142 pub max_completion_tokens: Option<u32>,
1143
1144 /// The maximum number of prompt tokens that may be used over the course of the run.
1145 /// The run will make a best effort to use only the number of prompt tokens
1146 /// specified, across multiple turns of the run. If the run exceeds the number of
1147 /// prompt tokens specified, the run will end with status `incomplete`. See
1148 /// `incomplete_details` for more info.
1149 #[serde(skip_serializing_if = "Option::is_none")]
1150 pub max_prompt_tokens: Option<u32>,
1151
1152 /// Set of 16 key-value pairs that can be attached to an object. This can be useful
1153 /// for storing additional information about the object in a structured format. Keys
1154 /// can be a maximum of 64 characters long and values can be a maxium of 512
1155 /// characters long.
1156 #[serde(skip_serializing_if = "Option::is_none")]
1157 pub metadata: Option<Value>,
1158
1159 /// The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
1160 /// be used to execute this run. If a value is provided here, it will override the
1161 /// model associated with the assistant. If not, the model associated with the
1162 /// assistant will be used.
1163 #[serde(skip_serializing_if = "Option::is_none")]
1164 pub model: Option<String>,
1165 // | 'gpt-4o'
1166 // | 'gpt-4o-2024-05-13'
1167 // | 'gpt-4-turbo'
1168 // | 'gpt-4-turbo-2024-04-09'
1169 // | 'gpt-4-0125-preview'
1170 // | 'gpt-4-turbo-preview'
1171 // | 'gpt-4-1106-preview'
1172 // | 'gpt-4-vision-preview'
1173 // | 'gpt-4'
1174 // | 'gpt-4-0314'
1175 // | 'gpt-4-0613'
1176 // | 'gpt-4-32k'
1177 // | 'gpt-4-32k-0314'
1178 // | 'gpt-4-32k-0613'
1179 // | 'gpt-3.5-turbo'
1180 // | 'gpt-3.5-turbo-16k'
1181 // | 'gpt-3.5-turbo-0613'
1182 // | 'gpt-3.5-turbo-1106'
1183 // | 'gpt-3.5-turbo-0125'
1184 // | 'gpt-3.5-turbo-16k-0613'
1185
1186 /// Specifies the format that the model must output. Compatible with
1187 /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
1188 /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
1189 /// and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
1190 ///
1191 /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
1192 /// message the model generates is valid JSON.
1193 ///
1194 /// **Important:** when using JSON mode, you **must** also instruct the model to
1195 /// produce JSON yourself via a system or user message. Without this, the model may
1196 /// generate an unending stream of whitespace until the generation reaches the token
1197 /// limit, resulting in a long-running and seemingly "stuck" request. Also note that
1198 /// the message content may be partially cut off if `finish_reason="length"`, which
1199 /// indicates the generation exceeded `max_tokens` or the conversation exceeded the
1200 /// max context length.
1201 #[serde(skip_serializing_if = "Option::is_none")]
1202 pub response_format: Option<AssistantResponseFormatOption>,
1203
1204 /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
1205 /// make the output more random, while lower values like 0.2 will make it more
1206 /// focused and deterministic.
1207 #[serde(skip_serializing_if = "Option::is_none")]
1208 pub temperature: Option<f32>,
1209
1210 /// If no thread is provided, an empty thread will be created.
1211 #[serde(skip_serializing_if = "Option::is_none")]
1212 pub thread: Option<thread_create_and_run_poll_params::Thread>,
1213
1214 /// Controls which (if any) tool is called by the model. `none` means the model will
1215 /// not call any tools and instead generates a message. `auto` is the default value
1216 /// and means the model can pick between generating a message or calling one or more
1217 /// tools. `required` means the model must call one or more tools before responding
1218 /// to the user. Specifying a particular tool like `{"type": "file_search"}` or
1219 /// `{"type": "function", "function": {"name": "my_function"}}` forces the model to
1220 /// call that tool.
1221 #[serde(skip_serializing_if = "Option::is_none")]
1222 pub tool_choice: Option<AssistantToolChoiceOption>,
1223
1224 /// A set of resources that are used by the assistant's tools. The resources are
1225 /// specific to the type of tool. For example, the `code_interpreter` tool requires
1226 /// a list of file IDs, while the `file_search` tool requires a list of vector store
1227 /// IDs.
1228 #[serde(skip_serializing_if = "Option::is_none")]
1229 pub tool_resources: Option<thread_create_and_run_poll_params::ToolResources>,
1230
1231 /// Override the tools the assistant can use for this run. This is useful for
1232 /// modifying the behavior on a per-run basis.
1233 #[serde(skip_serializing_if = "Option::is_none")]
1234 pub tools: Option<Vec<thread_create_and_run_params::Tool>>,
1235
1236 /// An alternative to sampling with temperature, called nucleus sampling, where the
1237 /// model considers the results of the tokens with top_p probability mass. So 0.1
1238 /// means only the tokens comprising the top 10% probability mass are considered.
1239 ///
1240 /// We generally recommend altering this or temperature but not both.
1241 #[serde(skip_serializing_if = "Option::is_none")]
1242 pub top_p: Option<f32>,
1243
1244 /// Controls for how a thread will be truncated prior to the run. Use this to
1245 /// control the intial context window of the run.
1246 #[serde(skip_serializing_if = "Option::is_none")]
1247 pub truncation_strategy: Option<thread_create_and_run_poll_params::TruncationStrategy>,
1248}
1249
1250pub mod thread_create_and_run_poll_params {
1251 use super::*;
1252
1253 /// If no thread is provided, an empty thread will be created.
1254 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
1255 pub struct Thread {
1256 /// A list of [messages](https://platform.openai.com/docs/api-reference/messages) to
1257 /// start the thread with.
1258 #[serde(skip_serializing_if = "Option::is_none")]
1259 pub messages: Option<Vec<thread::Message>>,
1260
1261 /// Set of 16 key-value pairs that can be attached to an object. This can be useful
1262 /// for storing additional information about the object in a structured format. Keys
1263 /// can be a maximum of 64 characters long and values can be a maxium of 512
1264 /// characters long.
1265 #[serde(skip_serializing_if = "Option::is_none")]
1266 pub metadata: Option<Value>,
1267
1268 /// A set of resources that are made available to the assistant's tools in this
1269 /// thread. The resources are specific to the type of tool. For example, the
1270 /// `code_interpreter` tool requires a list of file IDs, while the `file_search`
1271 /// tool requires a list of vector store IDs.
1272 #[serde(skip_serializing_if = "Option::is_none")]
1273 pub tool_resources: Option<thread::ToolResources>,
1274 }
1275
1276 pub mod thread {
1277 use super::*;
1278
1279 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
1280 pub struct Message {
1281 /// The text contents of the message.
1282 pub content: message::Content,
1283
1284 /// The role of the entity that is creating the message. Allowed values include:
1285 ///
1286 /// - `user`: Indicates the message is sent by an actual user and should be used in
1287 /// most cases to represent user-generated messages.
1288 /// - `assistant`: Indicates the message is generated by the assistant. Use this
1289 /// value to insert messages from the assistant into the conversation.
1290 pub role: message::Role,
1291
1292 /// A list of files attached to the message, and the tools they should be added to.
1293 #[serde(skip_serializing_if = "Option::is_none")]
1294 pub attachments: Option<Vec<message::Attachment>>,
1295
1296 /// Set of 16 key-value pairs that can be attached to an object. This can be useful
1297 /// for storing additional information about the object in a structured format. Keys
1298 /// can be a maximum of 64 characters long and values can be a maxium of 512
1299 /// characters long.
1300 #[serde(skip_serializing_if = "Option::is_none")]
1301 pub metadata: Option<Value>,
1302 }
1303
1304 pub mod message {
1305 use super::*;
1306
1307 #[derive(Debug, Clone, Serialize, Deserialize)]
1308 #[serde(untagged)]
1309 pub enum Content {
1310 Text(String),
1311 Multiple(messages_api::MessageContent), // String | Vec<messages_api::MessageContentPartParam>
1312 }
1313
1314 impl Default for Content {
1315 fn default() -> Self {
1316 Content::Text(String::default())
1317 }
1318 }
1319
1320 #[derive(Default, Debug, Clone, Serialize, Deserialize)]
1321 #[serde(untagged, rename_all = "snake_case")]
1322 pub enum Role {
1323 #[default]
1324 User,
1325 Assistant,
1326 }
1327
1328
1329 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
1330 pub struct Attachment {
1331 /// The ID of the file to attach to the message.
1332 #[serde(skip_serializing_if = "Option::is_none")]
1333 pub file_id: Option<String>,
1334
1335 /// The tools to add this file to.
1336 #[serde(skip_serializing_if = "Option::is_none")]
1337 pub tools: Option<Vec<attachment::Tool>>,
1338 }
1339
1340 pub mod attachment {
1341 use super::*;
1342
1343 #[derive(Debug, Clone, Serialize, Deserialize)]
1344 #[serde(untagged)]
1345 pub enum Tool {
1346 CodeInterpreterTool(assistants_api::CodeInterpreterTool),
1347 FileSearchTool(assistants_api::FileSearchTool),
1348 }
1349
1350 impl Default for Tool {
1351 fn default() -> Self {
1352 Tool::CodeInterpreterTool(assistants_api::CodeInterpreterTool::default())
1353 }
1354 }
1355 }
1356 }
1357
1358 /// A set of resources that are made available to the assistant's tools in this
1359 /// thread. The resources are specific to the type of tool. For example, the
1360 /// `code_interpreter` tool requires a list of file IDs, while the `file_search`
1361 /// tool requires a list of vector store IDs.
1362 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
1363 pub struct ToolResources {
1364 #[serde(skip_serializing_if = "Option::is_none")]
1365 pub code_interpreter: Option<tool_resources::CodeInterpreter>,
1366
1367 #[serde(skip_serializing_if = "Option::is_none")]
1368 pub file_search: Option<tool_resources::FileSearch>,
1369 }
1370
1371 pub mod tool_resources {
1372 use super::*;
1373 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
1374 pub struct CodeInterpreter {
1375 /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
1376 /// available to the `code_interpreter` tool. There can be a maximum of 20 files
1377 /// associated with the tool.
1378 #[serde(skip_serializing_if = "Option::is_none")]
1379 pub file_ids: Option<Vec<String>>,
1380 }
1381
1382 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
1383 pub struct FileSearch {
1384 /// The
1385 /// [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
1386 /// attached to this thread. There can be a maximum of 1 vector store attached to
1387 /// the thread.
1388 #[serde(skip_serializing_if = "Option::is_none")]
1389 pub vector_store_ids: Option<Vec<String>>,
1390
1391 /// A helper to create a
1392 /// [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
1393 /// with file_ids and attach it to this thread. There can be a maximum of 1 vector
1394 /// store attached to the thread.
1395 #[serde(skip_serializing_if = "Option::is_none")]
1396 pub vector_stores: Option<Vec<file_search::VectorStore>>,
1397 }
1398
1399 pub mod file_search {
1400 use super::*;
1401 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
1402 pub struct VectorStore {
1403 /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
1404 /// add to the vector store. There can be a maximum of 10000 files in a vector
1405 /// store.
1406 #[serde(skip_serializing_if = "Option::is_none")]
1407 pub file_ids: Option<Vec<String>>,
1408
1409 /// Set of 16 key-value pairs that can be attached to a vector store. This can be
1410 /// useful for storing additional information about the vector store in a structured
1411 /// format. Keys can be a maximum of 64 characters long and values can be a maxium
1412 /// of 512 characters long.
1413 #[serde(skip_serializing_if = "Option::is_none")]
1414 pub metadata: Option<Value>,
1415 }
1416 }
1417 }
1418 }
1419
1420 /// A set of resources that are used by the assistant's tools. The resources are
1421 /// specific to the type of tool. For example, the `code_interpreter` tool requires
1422 /// a list of file IDs, while the `file_search` tool requires a list of vector store
1423 /// IDs.
1424 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
1425 pub struct ToolResources {
1426 #[serde(skip_serializing_if = "Option::is_none")]
1427 pub code_interpreter: Option<tool_resources::CodeInterpreter>,
1428
1429 #[serde(skip_serializing_if = "Option::is_none")]
1430 pub file_search: Option<tool_resources::FileSearch>,
1431 }
1432
1433 pub mod tool_resources {
1434 use super::*;
1435 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
1436 pub struct CodeInterpreter {
1437 /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
1438 /// available to the `code_interpreter` tool. There can be a maximum of 20 files
1439 /// associated with the tool.
1440 #[serde(skip_serializing_if = "Option::is_none")]
1441 pub file_ids: Option<Vec<String>>,
1442 }
1443
1444 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
1445 pub struct FileSearch {
1446 /// The ID of the
1447 /// [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
1448 /// attached to this assistant. There can be a maximum of 1 vector store attached to
1449 /// the assistant.
1450 #[serde(skip_serializing_if = "Option::is_none")]
1451 pub vector_store_ids: Option<Vec<String>>,
1452 }
1453 }
1454
1455 /// Controls for how a thread will be truncated prior to the run. Use this to
1456 /// control the intial context window of the run.
1457 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
1458 pub struct TruncationStrategy {
1459 /// The truncation strategy to use for the thread. The default is `auto`. If set to
1460 /// `last_messages`, the thread will be truncated to the n most recent messages in
1461 /// the thread. When set to `auto`, messages in the middle of the thread will be
1462 /// dropped to fit the context length of the model, `max_prompt_tokens`.
1463 #[serde(rename = "type")]
1464 pub truncation_strategy_type: truncation_strategy::TruncationStrategyType,
1465
1466 /// The number of most recent messages from the thread when constructing the context
1467 /// for the run.
1468 #[serde(skip_serializing_if = "Option::is_none")]
1469 pub last_messages: Option<u32>,
1470 }
1471
1472 pub mod truncation_strategy {
1473 use super::*;
1474
1475 #[derive(Default, Debug, Clone, Serialize, Deserialize)]
1476 #[serde(tag = "type", rename_all = "snake_case")]
1477 pub enum TruncationStrategyType {
1478 #[default]
1479 Auto,
1480 LastMessages,
1481 }
1482 }
1483}
1484
1485#[derive(Default, Debug, Clone, Deserialize, Serialize)]
1486pub struct ThreadCreateAndRunStreamParams {
1487 /// The ID of the
1488 /// [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
1489 /// execute this run.
1490 assistant_id: String,
1491 /// Override the default system message of the assistant. This is useful for
1492 /// modifying the behavior on a per-run basis.
1493 #[serde(skip_serializing_if = "Option::is_none")]
1494 pub instructions: Option<String>,
1495
1496 /// The maximum number of completion tokens that may be used over the course of the
1497 /// run. The run will make a best effort to use only the number of completion tokens
1498 /// specified, across multiple turns of the run. If the run exceeds the number of
1499 /// completion tokens specified, the run will end with status `incomplete`. See
1500 /// `incomplete_details` for more info.
1501 #[serde(skip_serializing_if = "Option::is_none")]
1502 pub max_completion_tokens: Option<u32>,
1503
1504 /// The maximum number of prompt tokens that may be used over the course of the run.
1505 /// The run will make a best effort to use only the number of prompt tokens
1506 /// specified, across multiple turns of the run. If the run exceeds the number of
1507 /// prompt tokens specified, the run will end with status `incomplete`. See
1508 /// `incomplete_details` for more info.
1509 #[serde(skip_serializing_if = "Option::is_none")]
1510 pub max_prompt_tokens: Option<u32>,
1511
1512 /// Set of 16 key-value pairs that can be attached to an object. This can be useful
1513 /// for storing additional information about the object in a structured format. Keys
1514 /// can be a maximum of 64 characters long and values can be a maxium of 512
1515 /// characters long.
1516 #[serde(skip_serializing_if = "Option::is_none")]
1517 pub metadata: Option<Value>,
1518
1519 /// The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
1520 /// be used to execute this run. If a value is provided here, it will override the
1521 /// model associated with the assistant. If not, the model associated with the
1522 /// assistant will be used.
1523 #[serde(skip_serializing_if = "Option::is_none")]
1524 pub model: Option<String>,
1525 // | 'gpt-4o'
1526 // | 'gpt-4o-2024-05-13'
1527 // | 'gpt-4-turbo'
1528 // | 'gpt-4-turbo-2024-04-09'
1529 // | 'gpt-4-0125-preview'
1530 // | 'gpt-4-turbo-preview'
1531 // | 'gpt-4-1106-preview'
1532 // | 'gpt-4-vision-preview'
1533 // | 'gpt-4'
1534 // | 'gpt-4-0314'
1535 // | 'gpt-4-0613'
1536 // | 'gpt-4-32k'
1537 // | 'gpt-4-32k-0314'
1538 // | 'gpt-4-32k-0613'
1539 // | 'gpt-3.5-turbo'
1540 // | 'gpt-3.5-turbo-16k'
1541 // | 'gpt-3.5-turbo-0613'
1542 // | 'gpt-3.5-turbo-1106'
1543 // | 'gpt-3.5-turbo-0125'
1544 // | 'gpt-3.5-turbo-16k-0613'
1545
1546 /// Specifies the format that the model must output. Compatible with
1547 /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
1548 /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
1549 /// and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
1550 ///
1551 /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
1552 /// message the model generates is valid JSON.
1553 ///
1554 /// **Important:** when using JSON mode, you **must** also instruct the model to
1555 /// produce JSON yourself via a system or user message. Without this, the model may
1556 /// generate an unending stream of whitespace until the generation reaches the token
1557 /// limit, resulting in a long-running and seemingly "stuck" request. Also note that
1558 /// the message content may be partially cut off if `finish_reason="length"`, which
1559 /// indicates the generation exceeded `max_tokens` or the conversation exceeded the
1560 /// max context length.
1561 #[serde(skip_serializing_if = "Option::is_none")]
1562 pub response_format: Option<AssistantResponseFormatOption>,
1563
1564 /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
1565 /// make the output more random, while lower values like 0.2 will make it more
1566 /// focused and deterministic.
1567 #[serde(skip_serializing_if = "Option::is_none")]
1568 pub temperature: Option<f32>,
1569
1570 /// If no thread is provided, an empty thread will be created.
1571 #[serde(skip_serializing_if = "Option::is_none")]
1572 pub thread: Option<thread_create_and_run_stream_params::Thread>,
1573
1574 /// Controls which (if any) tool is called by the model. `none` means the model will
1575 /// not call any tools and instead generates a message. `auto` is the default value
1576 /// and means the model can pick between generating a message or calling one or more
1577 /// tools. `required` means the model must call one or more tools before responding
1578 /// to the user. Specifying a particular tool like `{"type": "file_search"}` or
1579 /// `{"type": "function", "function": {"name": "my_function"}}` forces the model to
1580 /// call that tool.
1581 #[serde(skip_serializing_if = "Option::is_none")]
1582 pub tool_choice: Option<AssistantToolChoiceOption>,
1583
1584 /// A set of resources that are used by the assistant's tools. The resources are
1585 /// specific to the type of tool. For example, the `code_interpreter` tool requires
1586 /// a list of file IDs, while the `file_search` tool requires a list of vector store
1587 /// IDs.
1588 #[serde(skip_serializing_if = "Option::is_none")]
1589 pub tool_resources: Option<thread_create_and_run_stream_params::ToolResources>,
1590
1591 /// Override the tools the assistant can use for this run. This is useful for
1592 /// modifying the behavior on a per-run basis.
1593 #[serde(skip_serializing_if = "Option::is_none")]
1594 pub tools: Option<Vec<thread_create_and_run_stream_params::Tool>>,
1595
1596 /// An alternative to sampling with temperature, called nucleus sampling, where the
1597 /// model considers the results of the tokens with top_p probability mass. So 0.1
1598 /// means only the tokens comprising the top 10% probability mass are considered.
1599 ///
1600 /// We generally recommend altering this or temperature but not both.
1601 #[serde(skip_serializing_if = "Option::is_none")]
1602 pub top_p: Option<f32>,
1603
1604 /// Controls for how a thread will be truncated prior to the run. Use this to
1605 /// control the intial context window of the run.
1606 #[serde(skip_serializing_if = "Option::is_none")]
1607 pub truncation_strategy: Option<thread_create_and_run_stream_params::TruncationStrategy>,
1608}
1609
1610pub mod thread_create_and_run_stream_params {
1611 use super::*;
1612 /// If no thread is provided, an empty thread will be created.
1613 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
1614 pub struct Thread {
1615 /// A list of [messages](https://platform.openai.com/docs/api-reference/messages) to
1616 /// start the thread with.
1617 #[serde(skip_serializing_if = "Option::is_none")]
1618 pub messages: Option<Vec<thread::Message> >,
1619
1620 /// Set of 16 key-value pairs that can be attached to an object. This can be useful
1621 /// for storing additional information about the object in a structured format. Keys
1622 /// can be a maximum of 64 characters long and values can be a maxium of 512
1623 /// characters long.
1624 #[serde(skip_serializing_if = "Option::is_none")]
1625 pub metadata: Option<Value>,
1626
1627 /// A set of resources that are made available to the assistant's tools in this
1628 /// thread. The resources are specific to the type of tool. For example, the
1629 /// `code_interpreter` tool requires a list of file IDs, while the `file_search`
1630 /// tool requires a list of vector store IDs.
1631 #[serde(skip_serializing_if = "Option::is_none")]
1632 pub tool_resources: Option<thread::ToolResources>,
1633 }
1634
1635 pub mod thread {
1636 use super::*;
1637 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
1638 pub struct Message {
1639 /// The text contents of the message.
1640 pub content: message::Content,
1641
1642 /// The role of the entity that is creating the message. Allowed values include:
1643 ///
1644 /// - `user`: Indicates the message is sent by an actual user and should be used in
1645 /// most cases to represent user-generated messages.
1646 /// - `assistant`: Indicates the message is generated by the assistant. Use this
1647 /// value to insert messages from the assistant into the conversation.
1648 pub role: message::Role,
1649
1650 /// A list of files attached to the message, and the tools they should be added to.
1651 #[serde(skip_serializing_if = "Option::is_none")]
1652 pub attachments: Option<Vec<message::Attachment> >,
1653
1654 /// Set of 16 key-value pairs that can be attached to an object. This can be useful
1655 /// for storing additional information about the object in a structured format. Keys
1656 /// can be a maximum of 64 characters long and values can be a maxium of 512
1657 /// characters long.
1658 #[serde(skip_serializing_if = "Option::is_none")]
1659 pub metadata: Option<Value>,
1660 }
1661
1662 pub mod message {
1663 use super::*;
1664
1665 #[derive(Debug, Clone, Serialize, Deserialize)]
1666 #[serde(untagged)]
1667 pub enum Content {
1668 Text(String),
1669 Multiple(messages_api::MessageContent), // String | Vec<messages_api::MessageContentPartParam>
1670 }
1671
1672 impl Default for Content {
1673 fn default() -> Self {
1674 Content::Text(String::default())
1675 }
1676 }
1677
1678 #[derive(Default, Debug, Clone, Serialize, Deserialize)]
1679 pub enum Role {
1680 #[default]
1681 User,
1682 Assistant,
1683 }
1684
1685 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
1686 pub struct Attachment {
1687 /// The ID of the file to attach to the message.
1688 #[serde(skip_serializing_if = "Option::is_none")]
1689 pub file_id: Option<String>,
1690
1691 /// The tools to add this file to.
1692 #[serde(skip_serializing_if = "Option::is_none")]
1693 pub tools: Option<Vec<attachment::Tool>>,
1694 // pub tools: Option<Vec<assistants_api::CodeInterpreterTool | assistants_api::FileSearchTool> >,
1695 }
1696
1697 pub mod attachment {
1698 use super::*;
1699
1700 #[derive(Debug, Clone, Serialize, Deserialize)]
1701 #[serde(untagged)]
1702 pub enum Tool {
1703 CodeInterpreterTool(assistants_api::CodeInterpreterTool),
1704 FileSearchTool(assistants_api::FileSearchTool),
1705 }
1706
1707 impl Default for Tool {
1708 fn default() -> Self {
1709 Tool::CodeInterpreterTool(assistants_api::CodeInterpreterTool::default())
1710 }
1711 }
1712
1713 #[derive(Debug, Clone, Serialize, Deserialize)]
1714 #[serde(tag = "type")]
1715 pub enum SearchTool {
1716 #[serde(rename = "file_search")]
1717 FileSearch
1718 }
1719 }
1720 }
1721
1722 /// A set of resources that are made available to the assistant's tools in this
1723 /// thread. The resources are specific to the type of tool. For example, the
1724 /// `code_interpreter` tool requires a list of file IDs, while the `file_search`
1725 /// tool requires a list of vector store IDs.
1726 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
1727 pub struct ToolResources {
1728 #[serde(skip_serializing_if = "Option::is_none")]
1729 pub code_interpreter: Option<tool_resources::CodeInterpreter>,
1730
1731 #[serde(skip_serializing_if = "Option::is_none")]
1732 pub file_search: Option<tool_resources::FileSearch>,
1733 }
1734
1735 pub mod tool_resources {
1736 use super::*;
1737 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
1738 pub struct CodeInterpreter {
1739 /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
1740 /// available to the `code_interpreter` tool. There can be a maximum of 20 files
1741 /// associated with the tool.
1742 #[serde(skip_serializing_if = "Option::is_none")]
1743 pub file_ids: Option<Vec<String>>,
1744 }
1745
1746 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
1747 pub struct FileSearch {
1748 /// The
1749 /// [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
1750 /// attached to this thread. There can be a maximum of 1 vector store attached to
1751 /// the thread.
1752 #[serde(skip_serializing_if = "Option::is_none")]
1753 pub vector_store_ids: Option<Vec<String>>,
1754
1755 /// A helper to create a
1756 /// [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
1757 /// with file_ids and attach it to this thread. There can be a maximum of 1 vector
1758 /// store attached to the thread.
1759 #[serde(skip_serializing_if = "Option::is_none")]
1760 pub vector_stores: Option<Vec<file_search::VectorStore>>,
1761 }
1762
1763 pub mod file_search {
1764 use super::*;
1765 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
1766 pub struct VectorStore {
1767 /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
1768 /// add to the vector store. There can be a maximum of 10000 files in a vector
1769 /// store.
1770 #[serde(skip_serializing_if = "Option::is_none")]
1771 pub file_ids: Option<Vec<String>>,
1772
1773 /// Set of 16 key-value pairs that can be attached to a vector store. This can be
1774 /// useful for storing additional information about the vector store in a structured
1775 /// format. Keys can be a maximum of 64 characters long and values can be a maxium
1776 /// of 512 characters long.
1777 #[serde(skip_serializing_if = "Option::is_none")]
1778 pub metadata: Option<Value>,
1779 }
1780 }
1781 }
1782 }
1783
1784 #[derive(Debug, Clone, Serialize, Deserialize)]
1785 #[serde(untagged)]
1786 pub enum Tool {
1787 CodeInterpreterTool(assistants_api::CodeInterpreterTool),
1788 FileSearchTool(assistants_api::FileSearchTool),
1789 FunctionTool(assistants_api::FunctionTool),
1790 }
1791
1792 impl Default for Tool {
1793 fn default() -> Self {
1794 Tool::CodeInterpreterTool(assistants_api::CodeInterpreterTool::default())
1795 }
1796 }
1797
1798 /// A set of resources that are used by the assistant's tools. The resources are
1799 /// specific to the type of tool. For example, the `code_interpreter` tool requires
1800 /// a list of file IDs, while the `file_search` tool requires a list of vector store
1801 /// IDs.
1802 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
1803 pub struct ToolResources {
1804 #[serde(skip_serializing_if = "Option::is_none")]
1805 pub code_interpreter: Option<tool_resources::CodeInterpreter>,
1806
1807 #[serde(skip_serializing_if = "Option::is_none")]
1808 pub file_search: Option<tool_resources::FileSearch>,
1809 }
1810
1811 pub mod tool_resources {
1812 use super::*;
1813 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
1814 pub struct CodeInterpreter {
1815 /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
1816 /// available to the `code_interpreter` tool. There can be a maximum of 20 files
1817 /// associated with the tool.
1818 #[serde(skip_serializing_if = "Option::is_none")]
1819 pub file_ids: Option<Vec<String>>,
1820 }
1821
1822 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
1823 pub struct FileSearch {
1824 /// The ID of the
1825 /// [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
1826 /// attached to this assistant. There can be a maximum of 1 vector store attached to
1827 /// the assistant.
1828 #[serde(skip_serializing_if = "Option::is_none")]
1829 pub vector_store_ids: Option<Vec<String>>,
1830 }
1831 }
1832
1833 /// Controls for how a thread will be truncated prior to the run. Use this to
1834 /// control the intial context window of the run.
1835 #[derive(Default, Debug, Clone, Deserialize, Serialize)]
1836 pub struct TruncationStrategy {
1837 /// The truncation strategy to use for the thread. The default is `auto`. If set to
1838 /// `last_messages`, the thread will be truncated to the n most recent messages in
1839 /// the thread. When set to `auto`, messages in the middle of the thread will be
1840 /// dropped to fit the context length of the model, `max_prompt_tokens`.
1841 #[serde(rename = "type")]
1842 pub truncation_strategy_type: truncation_strategy::TruncationStrategyType,
1843 /// The number of most recent messages from the thread when constructing the context
1844 /// for the run.
1845 #[serde(skip_serializing_if = "Option::is_none")]
1846 pub last_messages: Option<u32>,
1847 }
1848
1849 pub mod truncation_strategy {
1850 use super::*;
1851
1852 #[derive(Default, Debug, Clone, Serialize, Deserialize)]
1853 #[serde(tag = "type", rename_all = "snake_case")]
1854 pub enum TruncationStrategyType {
1855 #[default]
1856 Auto,
1857 LastMessages,
1858 }
1859 }
1860}
1861
1862pub mod threads {
1863 use super::*;
1864
1865 pub use threads_api::AssistantResponseFormat;
1866 pub use threads_api::AssistantResponseFormatOption;
1867 pub use threads_api::AssistantToolChoice;
1868 pub use threads_api::AssistantToolChoiceFunction;
1869 pub use threads_api::AssistantToolChoiceOption;
1870 pub use threads_api::Thread;
1871 pub use threads_api::ThreadDeleted;
1872 pub use threads_api::ThreadCreateParams;
1873 pub use threads_api::ThreadUpdateParams;
1874 pub use threads_api::ThreadCreateAndRunParams;
1875 pub use threads_api::ThreadCreateAndRunPollParams;
1876 pub use threads_api::ThreadCreateAndRunStreamParams;
1877 pub use runs_api::Runs;
1878 pub use runs_api::RequiredActionFunctionToolCall;
1879 pub use runs_api::Run;
1880 pub use runs_api::RunStatus;
1881 pub use runs_api::RunCreateParams;
1882 pub use runs_api::RunUpdateParams;
1883 pub use runs_api::RunListParams;
1884 pub use runs_api::RunCreateAndPollParams;
1885 pub use runs_api::RunCreateAndStreamParams;
1886 pub use runs_api::RunStreamParams;
1887 pub use runs_api::RunSubmitToolOutputsParams;
1888 pub use runs_api::RunSubmitToolOutputsAndPollParams;
1889 pub use runs_api::RunSubmitToolOutputsStreamParams;
1890 pub use messages_api::Messages;
1891 pub use messages_api::Annotation;
1892 pub use messages_api::AnnotationDelta;
1893 pub use messages_api::FileCitationAnnotation;
1894 pub use messages_api::FileCitationDeltaAnnotation;
1895 pub use messages_api::FilePathAnnotation;
1896 pub use messages_api::FilePathDeltaAnnotation;
1897 pub use messages_api::ImageFile;
1898 pub use messages_api::ImageFileDelta;
1899 pub use messages_api::ImageFileDeltaBlock;
1900 pub use messages_api::ImageURL;
1901 pub use messages_api::ImageURLDelta;
1902 pub use messages_api::ImageURLDeltaBlock;
1903 pub use messages_api::Message;
1904 pub use messages_api::MessageContent;
1905 pub use messages_api::MessageContentDelta;
1906 pub use messages_api::MessageDeleted;
1907 pub use messages_api::MessageDelta;
1908 pub use messages_api::MessageDeltaEvent;
1909 pub use messages_api::Text;
1910 pub use messages_api::TextContentBlockParam;
1911 pub use messages_api::TextDelta;
1912 pub use messages_api::TextDeltaBlock;
1913 pub use messages_api::MessageCreateParams;
1914 pub use messages_api::MessageUpdateParams;
1915 pub use messages_api::MessageListParams;
1916}