openai_struct/models/
truncation_object.rs

1/*
2 * OpenAI API
3 *
4 * The OpenAI REST API. Please see pub https://platform.openai.com/docs/api-reference for more details.
5 *
6 * OpenAPI spec pub version: 2.3.0
7 *
8 * Generated pub by: https://github.com/swagger-api/swagger-codegen.git
9 */
10
11/// pub TruncationObject : Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run.
12
13#[allow(unused_imports)]
14use serde_json::Value;
15
16#[derive(Debug, Serialize, Deserialize)]
17pub struct TruncationObject {
18    /// The number of most recent messages from the thread when constructing the context for the run.
19    #[serde(rename = "last_messages")]
20    pub last_messages: Option<i32>,
21    /// The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`.
22    #[serde(rename = "type")]
23    pub _type: String,
24}