pub struct ChatRequest<'input> {
pub message: &'input str,
pub model: Option<GenerateModel>,
pub prompt_truncation: Option<PromptTruncation>,
pub temperature: Option<f64>,
pub conversation_id: Option<String>,
pub chat_history: Option<&'input Vec<ChatMessage>>,
pub preamble: Option<String>,
pub max_tokens: Option<u64>,
pub k: Option<u64>,
}Fields§
§message: &'input strThe chat message from the user to the model.
model: Option<GenerateModel>optional - The model to use for text generation. Custom models can also be supplied with their full ID. Defaults to ‘command’.
prompt_truncation: Option<PromptTruncation>optional - Dictates how the prompt will be constructed. When set to ‘AUTO’ some parts of chat history and documents will be dropped to construct a prompt that fits within the model’s context length limit.
temperature: Option<f64>optional - A non-negative float that tunes the degree of randomness in generation.
conversation_id: Option<String>optional - Previous conversations can be stored and resumed by providing the conversation’s identifier. If a conversation with this id does not already exist, a new conversation will be created.
chat_history: Option<&'input Vec<ChatMessage>>optional - A list of previous messages between the user and the model, meant to give the model conversational context for responding to the user’s message.
preamble: Option<String>§max_tokens: Option<u64>§k: Option<u64>