cohere_rust/api/
chat.rs

1use serde::{Deserialize, Serialize};
2
3use super::GenerateModel;
4
5#[derive(Serialize, Default, Debug)]
6pub struct ChatRequest<'input> {
7    /// The chat message from the user to the model.
8    pub message: &'input str,
9    /// optional - The model to use for text generation. Custom models can also be supplied with their full ID. Defaults to 'command'.
10    #[serde(skip_serializing_if = "Option::is_none")]
11    pub model: Option<GenerateModel>,
12    /// optional - Dictates how the prompt will be constructed. When set to 'AUTO' some parts of chat history and documents will be dropped
13    /// to construct a prompt that fits within the model's context length limit.
14    #[serde(skip_serializing_if = "Option::is_none")]
15    pub prompt_truncation: Option<PromptTruncation>,
16    /// optional - A non-negative float that tunes the degree of randomness in generation.
17    #[serde(skip_serializing_if = "Option::is_none")]
18    pub temperature: Option<f64>,
19    /// optional - Previous conversations can be stored and resumed by providing the conversation's identifier.
20    /// If a conversation with this id does not already exist, a new conversation will be created.
21    #[serde(skip_serializing_if = "Option::is_none")]
22    pub conversation_id: Option<String>,
23    /// optional - A list of previous messages between the user and the model,
24    /// meant to give the model conversational context for responding to the user's message.
25    #[serde(skip_serializing_if = "Option::is_none")]
26    pub chat_history: Option<&'input Vec<ChatMessage>>,
27    // optional - When specified, the default Cohere preamble will be replaced with the provided one.
28    #[serde(skip_serializing_if = "Option::is_none")]
29    pub preamble: Option<String>,
30    // optional - The maximum number of tokens the model will generate as part of the response.
31    #[serde(skip_serializing_if = "Option::is_none")]
32    pub max_tokens: Option<u64>,
33    // optional - Ensures only the top k most likely tokens are considered for generation at each step
34    #[serde(skip_serializing_if = "Option::is_none")]
35    pub k: Option<u64>,
36}
37
38#[derive(Serialize, Deserialize, Debug, PartialEq)]
39#[serde(tag = "role")]
40pub enum ChatMessage {
41    #[serde(rename = "CHATBOT")]
42    Chatbot { message: String },
43    #[serde(rename = "USER")]
44    User { message: String },
45}
46
47#[derive(Serialize, Debug)]
48pub(crate) struct ChatStreamRequest<'input> {
49    #[serde(flatten)]
50    pub request: &'input ChatRequest<'input>,
51    pub stream: bool,
52}
53
54#[derive(strum_macros::Display, Serialize, Debug)]
55pub enum PromptTruncation {
56    #[strum(serialize = "AUTO")]
57    #[serde(rename = "AUTO")]
58    Auto,
59    #[strum(serialize = "OFF")]
60    #[serde(rename = "OFF")]
61    Off,
62}
63
64#[derive(strum_macros::Display, Serialize, Debug)]
65pub enum CitationQuality {
66    #[strum(serialize = "accurate")]
67    #[serde(rename = "accurate")]
68    Accurate,
69    #[strum(serialize = "fast")]
70    #[serde(rename = "fast")]
71    Fast,
72}
73
74#[derive(Deserialize, Debug, PartialEq)]
75pub struct ChatResponse {
76    pub generation_id: String,
77    pub response_id: String,
78    pub text: String,
79}
80
81#[derive(Deserialize, Debug, PartialEq)]
82#[serde(tag = "event_type")]
83pub enum ChatStreamResponse {
84    #[serde(rename = "stream-start")]
85    ChatStreamStart {
86        generation_id: String,
87        is_finished: bool,
88    },
89    #[serde(rename = "text-generation")]
90    ChatTextGeneration { is_finished: bool, text: String },
91    #[serde(rename = "stream-end")]
92    ChatStreamEnd {
93        finish_reason: String,
94        is_finished: bool,
95        response: ChatResponse,
96    },
97}