Skip to main content

cake_core/models/llama3/
history.rs

1use crate::models::chat::Message;
2
3/// Chat history.
4pub struct History(Vec<Message>);
5
6// Adapted from https://github.com/meta-llama/llama3/blob/main/llama/tokenizer.py#L202
7impl History {
8    fn encode_header(message: &Message) -> String {
9        format!("<|start_header_id|>{}<|end_header_id|>\n\n", message.role)
10    }
11
12    fn encode_message(message: &Message) -> String {
13        Self::encode_header(message) + message.content.trim() + "<|eot_id|>"
14    }
15
16    /// Create a new instance of this object.
17    pub fn new() -> Self {
18        Self(vec![])
19    }
20
21    /// Encode the dialog to llama3 prompt format.
22    pub fn encode_dialog_to_prompt(&self) -> String {
23        let mut encoded = "<|begin_of_text|>".to_string();
24
25        for message in self.iter() {
26            encoded += &Self::encode_message(message);
27        }
28
29        //  Add the start of an assistant message for the model to complete.
30        encoded += &Self::encode_header(&Message::assistant("".to_string()));
31
32        encoded
33    }
34}
35
36impl std::ops::Deref for History {
37    type Target = Vec<Message>;
38    fn deref(&self) -> &Vec<Message> {
39        &self.0
40    }
41}
42
43impl std::ops::DerefMut for History {
44    fn deref_mut(&mut self) -> &mut Self::Target {
45        &mut self.0
46    }
47}