trieve_client/models/
generate_off_chunks_req_payload.rs

1/*
2 * Trieve API
3 *
4 * Trieve OpenAPI Specification. This document describes all of the operations available through the Trieve API.
5 *
6 * The version of the OpenAPI document: 0.11.7
7 * Contact: developers@trieve.ai
8 * Generated by: https://openapi-generator.tech
9 */
10
11use crate::models;
12
13#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
14pub struct GenerateOffChunksReqPayload {
15    /// The ids of the chunks to be retrieved and injected into the context window for RAG.
16    #[serde(rename = "chunk_ids")]
17    pub chunk_ids: Vec<uuid::Uuid>,
18    /// Frequency penalty is a number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. Default is 0.7.
19    #[serde(rename = "frequency_penalty", default, with = "::serde_with::rust::double_option", skip_serializing_if = "Option::is_none")]
20    pub frequency_penalty: Option<Option<f32>>,
21    /// Set highlight_results to false for a slight latency improvement (1-10ms). If not specified, this defaults to true. This will add `<b><mark>` tags to the chunk_html of the chunks to highlight matching splits.
22    #[serde(rename = "highlight_results", default, with = "::serde_with::rust::double_option", skip_serializing_if = "Option::is_none")]
23    pub highlight_results: Option<Option<bool>>,
24    /// The maximum number of tokens to generate in the chat completion. Default is None.
25    #[serde(rename = "max_tokens", default, with = "::serde_with::rust::double_option", skip_serializing_if = "Option::is_none")]
26    pub max_tokens: Option<Option<i32>>,
27    /// Presence penalty is a number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. Default is 0.7.
28    #[serde(rename = "presence_penalty", default, with = "::serde_with::rust::double_option", skip_serializing_if = "Option::is_none")]
29    pub presence_penalty: Option<Option<f32>>,
30    /// The previous messages to be placed into the chat history. There must be at least one previous message.
31    #[serde(rename = "prev_messages")]
32    pub prev_messages: Vec<models::ChatMessageProxy>,
33    /// Prompt will be used to tell the model what to generate in the next message in the chat. The default is 'Respond to the previous instruction and include the doc numbers that you used in square brackets at the end of the sentences that you used the docs for:'. You can also specify an empty string to leave the final message alone such that your user's final message can be used as the prompt. See docs.trieve.ai or contact us for more information.
34    #[serde(rename = "prompt", default, with = "::serde_with::rust::double_option", skip_serializing_if = "Option::is_none")]
35    pub prompt: Option<Option<String>>,
36    /// Stop tokens are up to 4 sequences where the API will stop generating further tokens. Default is None.
37    #[serde(rename = "stop_tokens", default, with = "::serde_with::rust::double_option", skip_serializing_if = "Option::is_none")]
38    pub stop_tokens: Option<Option<Vec<String>>>,
39    /// Whether or not to stream the response. If this is set to true or not included, the response will be a stream. If this is set to false, the response will be a normal JSON response. Default is true.
40    #[serde(rename = "stream_response", default, with = "::serde_with::rust::double_option", skip_serializing_if = "Option::is_none")]
41    pub stream_response: Option<Option<bool>>,
42    /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default is 0.5.
43    #[serde(rename = "temperature", default, with = "::serde_with::rust::double_option", skip_serializing_if = "Option::is_none")]
44    pub temperature: Option<Option<f32>>,
45    /// User ID is the id of the user who is making the request. This is used to track user interactions with the RAG results.
46    #[serde(rename = "user_id", default, with = "::serde_with::rust::double_option", skip_serializing_if = "Option::is_none")]
47    pub user_id: Option<Option<String>>,
48}
49
50impl GenerateOffChunksReqPayload {
51    pub fn new(chunk_ids: Vec<uuid::Uuid>, prev_messages: Vec<models::ChatMessageProxy>) -> GenerateOffChunksReqPayload {
52        GenerateOffChunksReqPayload {
53            chunk_ids,
54            frequency_penalty: None,
55            highlight_results: None,
56            max_tokens: None,
57            presence_penalty: None,
58            prev_messages,
59            prompt: None,
60            stop_tokens: None,
61            stream_response: None,
62            temperature: None,
63            user_id: None,
64        }
65    }
66}
67