openai_struct/models/modify_assistant_request.rs
1/*
2 * OpenAI API
3 *
4 * The OpenAI REST API. Please see pub https://platform.openai.com/docs/api-reference for more details.
5 *
6 * OpenAPI spec pub version: 2.3.0
7 *
8 * Generated pub by: https://github.com/swagger-api/swagger-codegen.git
9 */
10
11#[allow(unused_imports)]
12use serde_json::Value;
13
14#[derive(Debug, Serialize, Deserialize)]
15pub struct ModifyAssistantRequest {
16 /// The description of the assistant. The maximum length is 512 characters.
17 #[serde(rename = "description")]
18 pub description: Option<String>,
19 /// The system instructions that the assistant uses. The maximum length is 256,000 characters.
20 #[serde(rename = "instructions")]
21 pub instructions: Option<String>,
22 #[serde(rename = "metadata")]
23 pub metadata: Option<crate::models::Metadata>,
24 /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.
25 #[serde(rename = "model")]
26 pub model: Option<Value>,
27 /// The name of the assistant. The maximum length is 256 characters.
28 #[serde(rename = "name")]
29 pub name: Option<String>,
30 #[serde(rename = "reasoning_effort")]
31 pub reasoning_effort: Option<crate::models::ReasoningEffort>,
32 #[serde(rename = "response_format")]
33 pub response_format: Option<crate::models::AssistantsApiResponseFormatOption>,
34 /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
35 #[serde(rename = "temperature")]
36 pub temperature: Option<f32>,
37 #[serde(rename = "tool_resources")]
38 pub tool_resources: Option<crate::models::ModifyAssistantRequestToolResources>,
39 /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
40 #[serde(rename = "tools")]
41 pub tools: Option<Vec<Value>>,
42 /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both.
43 #[serde(rename = "top_p")]
44 pub top_p: Option<f32>,
45}