tiktoken_rs/
api.rs

1use anyhow::{anyhow, Result};
2
3use crate::{
4    cl100k_base,
5    model::get_context_size,
6    o200k_base, o200k_harmony, p50k_base, p50k_edit, r50k_base,
7    tokenizer::{get_tokenizer, Tokenizer},
8    CoreBPE,
9};
10
11/// Calculates the maximum number of tokens available for completion based on the model and prompt provided.
12///
13/// This function determines the number of tokens left for a completion task, given the model and a prompt string.
14/// It first retrieves the context size for the given model and the `CoreBPE` instance for tokenization.
15/// Then, it calculates the number of tokens in the prompt using the appropriate tokenizer.
16///
17/// # Arguments
18///
19/// * `model` - A string slice representing the model name, e.g., "gpt-3.5-turbo".
20/// * `prompt` - A string slice containing the prompt text.
21///
22/// # Errors
23///
24/// This function returns an error in the following cases:
25///
26/// * If there is a failure in creating a `CoreBPE` instance for the specified model.
27///
28/// # Examples
29///
30/// ```
31/// use tiktoken_rs::get_completion_max_tokens;
32///
33/// let model = "gpt-3.5-turbo";
34/// let prompt = "Translate the following English text to French: '";
35/// let max_tokens = get_completion_max_tokens(model, prompt).unwrap();
36/// ```
37///
38/// # Returns
39///
40/// If successful, the function returns a `Result` containing the maximum number of tokens available for completion,
41/// based on the given model and prompt.
42pub fn get_completion_max_tokens(model: &str, prompt: &str) -> Result<usize> {
43    let context_size = get_context_size(model);
44    let bpe = get_bpe_from_model(model)?;
45    let prompt_tokens = bpe.encode_with_special_tokens(prompt).len();
46    Ok(context_size.saturating_sub(prompt_tokens))
47}
48
49/// The name and arguments of a function that should be called, as generated by the model.
50#[derive(Debug, Default, Clone, PartialEq, Eq)]
51pub struct FunctionCall {
52    /// The name of the function to call.
53    pub name: String,
54    /// The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.
55    pub arguments: String,
56}
57
58#[derive(Debug, Default, Clone, PartialEq, Eq)]
59pub struct ChatCompletionRequestMessage {
60    /// The role of the messages author. One of `system`, `user`, `assistant`, or `function`.
61    pub role: String,
62    /// The contents of the message.
63    /// `content` is required for all messages except assistant messages with function calls.
64    pub content: Option<String>,
65    /// The name of the author of this message. `name` is required if role is function,
66    /// and it should be the name of the function whose response is in the `content`.
67    /// May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.
68    pub name: Option<String>,
69    /// The name and arguments of a function that should be called, as generated by the model.
70    pub function_call: Option<FunctionCall>,
71}
72
73/// Based on <https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb>
74///
75/// num_tokens_from_messages returns the number of tokens required to encode the given messages into
76/// the given model. This is used to estimate the number of tokens that will be used for chat
77/// completion.
78///
79/// # Arguments
80///
81/// * model: A string slice containing the model name (e.g. "gpt-3.5").
82/// * messages: A slice of ChatCompletionRequestMessage structs representing chat messages.
83///
84/// # Returns
85///
86/// * `Result<usize>`: A Result containing the total number of tokens needed to encode the messages
87///   for the specified model, or an error if the tokenizer for the model is not found or not supported.
88///
89/// # Errors
90///
91/// This function will return an error if:
92///
93/// * The tokenizer for the specified model is not found.
94/// * The tokenizer is not a supported chat model (i.e., not Tokenizer::Cl100kBase).
95///
96pub fn num_tokens_from_messages(
97    model: &str,
98    messages: &[ChatCompletionRequestMessage],
99) -> Result<usize> {
100    let tokenizer =
101        get_tokenizer(model).ok_or_else(|| anyhow!("No tokenizer found for model {}", model))?;
102    if tokenizer != Tokenizer::Cl100kBase
103        && tokenizer != Tokenizer::O200kBase
104        && tokenizer != Tokenizer::O200kHarmony
105    {
106        anyhow::bail!("Chat completion is only supported chat models")
107    }
108    let bpe = get_bpe_from_tokenizer(tokenizer)?;
109
110    let (tokens_per_message, tokens_per_name) = if model.starts_with("gpt-3.5") {
111        (
112            4,  // every message follows <im_start>{role/name}\n{content}<im_end>\n
113            -1, // if there's a name, the role is omitted
114        )
115    } else {
116        (3, 1)
117    };
118
119    let mut num_tokens: i32 = 0;
120    for message in messages {
121        num_tokens += tokens_per_message;
122        num_tokens += bpe
123            .encode_with_special_tokens(&message.role.to_string())
124            .len() as i32;
125        num_tokens += bpe
126            .encode_with_special_tokens(&message.content.clone().unwrap_or_default())
127            .len() as i32;
128        if let Some(name) = &message.name {
129            num_tokens += bpe.encode_with_special_tokens(name).len() as i32;
130            num_tokens += tokens_per_name;
131        }
132    }
133    num_tokens += 3; // every reply is primed with <|start|>assistant<|message|>
134    Ok(num_tokens as usize)
135}
136
137/// Calculates the maximum number of tokens available for chat completion based on the model and messages provided.
138///
139/// This function determines the number of tokens left for a chat completion task, given the model and a slice of
140/// chat completion request messages. It first retrieves the tokenizer for the given model and checks if chat completion
141/// is supported. Then, it calculates the number of tokens in the existing messages using the appropriate tokenizer.
142///
143/// # Arguments
144///
145/// * `model` - A string slice representing the model name, e.g., "gpt-3.5-turbo".
146/// * `messages` - A slice of `ChatCompletionRequestMessage` instances containing the chat context.
147///
148/// # Errors
149///
150/// This function returns an error in the following cases:
151///
152/// * If there is no tokenizer found for the specified model.
153/// * If chat completion is not supported for the specified model.
154/// * If there is a failure in creating a `CoreBPE` instance for the specified tokenizer.
155///
156/// # Example
157///
158/// ```
159/// use tiktoken_rs::{get_chat_completion_max_tokens, ChatCompletionRequestMessage};
160///
161/// let model = "gpt-3.5-turbo";
162/// let messages = vec![
163///     ChatCompletionRequestMessage {
164///         content: Some("You are a helpful assistant that only speaks French.".to_string()),
165///         role: "system".to_string(),
166///         name: None,
167///         function_call: None,
168///     },
169///     ChatCompletionRequestMessage {
170///         content: Some("Hello, how are you?".to_string()),
171///         role: "user".to_string(),
172///         name: None,
173///         function_call: None,
174///     },
175///     ChatCompletionRequestMessage {
176///         content: Some("Parlez-vous francais?".to_string()),
177///         role: "system".to_string(),
178///         name: None,
179///         function_call: None,
180///     },
181/// ];
182/// let max_tokens = get_chat_completion_max_tokens(model, &messages).unwrap();
183/// ```
184///
185/// # Returns
186///
187/// If successful, the function returns a `Result` containing the maximum number of tokens available for chat completion,
188/// based on the given model and messages.
189pub fn get_chat_completion_max_tokens(
190    model: &str,
191    messages: &[ChatCompletionRequestMessage],
192) -> Result<usize> {
193    let context_size = get_context_size(model);
194    let prompt_tokens = num_tokens_from_messages(model, messages)?;
195    Ok(context_size.saturating_sub(prompt_tokens))
196}
197
198/// Returns a `CoreBPE` instance corresponding to the tokenizer used by the given model.
199///
200/// This function first retrieves the tokenizer associated with the specified model name
201/// and then maps the tokenizer to the appropriate `CoreBPE` instance, which is used for
202/// tokenization in different models.
203///
204/// # Arguments
205///
206/// * `model` - A string slice representing the model name for which a `CoreBPE` instance should be retrieved.
207///
208/// # Errors
209///
210/// This function returns an error if:
211/// * No tokenizer is found for the given model.
212/// * There is a failure in creating a `CoreBPE` instance for the tokenizer.
213///
214/// # Examples
215///
216/// ```
217/// use tiktoken_rs::get_bpe_from_model;
218///
219/// let model = "gpt-4-0314";
220/// let bpe = get_bpe_from_model(model).unwrap();
221/// ```
222///
223/// # Returns
224///
225/// If successful, the function returns a `Result` containing the `CoreBPE` instance corresponding to the tokenizer used by the given model.
226pub fn get_bpe_from_model(model: &str) -> Result<CoreBPE> {
227    let tokenizer =
228        get_tokenizer(model).ok_or_else(|| anyhow!("No tokenizer found for model {}", model))?;
229    let bpe = get_bpe_from_tokenizer(tokenizer)?;
230    Ok(bpe)
231}
232
233/// Returns a `CoreBPE` instance corresponding to the given tokenizer.
234///
235/// This function is responsible for mapping a `Tokenizer` enum variant to the appropriate
236/// `CoreBPE` instance, which is used for tokenization in different models.
237///
238/// # Arguments
239///
240/// * `tokenizer` - A `Tokenizer` enum variant representing the tokenizer for which a `CoreBPE` instance should be retrieved.
241///
242/// # Errors
243///
244/// This function returns an error if there is a failure in creating a `CoreBPE` instance for the specified tokenizer.
245///
246/// # Examples
247///
248/// ```
249/// use tiktoken_rs::get_bpe_from_tokenizer;
250/// use tiktoken_rs::tokenizer::Tokenizer;
251///
252/// let tokenizer = Tokenizer::Cl100kBase;
253/// let bpe = get_bpe_from_tokenizer(tokenizer).unwrap();
254/// ```
255///
256/// # Returns
257///
258/// If successful, the function returns a `Result` containing the `CoreBPE` instance corresponding to the given tokenizer.
259pub fn get_bpe_from_tokenizer(tokenizer: Tokenizer) -> Result<CoreBPE> {
260    match tokenizer {
261        Tokenizer::O200kHarmony => o200k_harmony(),
262        Tokenizer::O200kBase => o200k_base(),
263        Tokenizer::Cl100kBase => cl100k_base(),
264        Tokenizer::R50kBase => r50k_base(),
265        Tokenizer::P50kBase => p50k_base(),
266        Tokenizer::P50kEdit => p50k_edit(),
267        Tokenizer::Gpt2 => r50k_base(),
268    }
269}
270
271#[cfg(test)]
272mod tests {
273    use super::*;
274
275    #[test]
276    fn test_get_bpe_from_tokenizer() {
277        let bpe = get_bpe_from_tokenizer(Tokenizer::Cl100kBase).unwrap();
278        assert_eq!(bpe.decode(vec!(15339)).unwrap(), "hello");
279    }
280
281    #[test]
282    fn test_num_tokens_from_messages() {
283        let messages = vec![
284            ChatCompletionRequestMessage {
285                role: "system".to_string(),
286                name: None,
287                content: Some("You are a helpful, pattern-following assistant that translates corporate jargon into plain English.".to_string()),
288                function_call: None,
289            },
290            ChatCompletionRequestMessage {
291                role: "system".to_string(),
292                name: Some("example_user".to_string()),
293                content: Some("New synergies will help drive top-line growth.".to_string()),
294                function_call: None,
295            },
296            ChatCompletionRequestMessage {
297                role: "system".to_string(),
298                name: Some("example_assistant".to_string()),
299                content: Some("Things working well together will increase revenue.".to_string()),
300                function_call: None,
301            },
302            ChatCompletionRequestMessage {
303                role: "system".to_string(),
304                name: Some("example_user".to_string()),
305                content: Some("Let's circle back when we have more bandwidth to touch base on opportunities for increased leverage.".to_string()),
306                function_call: None,
307            },
308            ChatCompletionRequestMessage {
309                role: "system".to_string(),
310                name: Some("example_assistant".to_string()),
311                content: Some("Let's talk later when we're less busy about how to do better.".to_string()),
312                function_call: None,
313            },
314            ChatCompletionRequestMessage {
315                role: "user".to_string(),
316                name: None,
317                content: Some("This late pivot means we don't have time to boil the ocean for the client deliverable.".to_string()),
318                function_call: None,
319            },
320        ];
321        let num_tokens = num_tokens_from_messages("gpt-3.5-turbo-0301", &messages).unwrap();
322        assert_eq!(num_tokens, 127);
323
324        let num_tokens = num_tokens_from_messages("gpt-4-0314", &messages).unwrap();
325        assert_eq!(num_tokens, 129);
326
327        let num_tokens = num_tokens_from_messages("gpt-4o-2024-05-13", &messages).unwrap();
328        assert_eq!(num_tokens, 124);
329    }
330
331    #[test]
332    fn test_get_chat_completion_max_tokens() {
333        let model = "gpt-3.5-turbo";
334        let messages = vec![
335            ChatCompletionRequestMessage {
336                content: Some("You are a helpful assistant that only speaks French.".to_string()),
337                role: "system".to_string(),
338                name: None,
339                function_call: None,
340            },
341            ChatCompletionRequestMessage {
342                content: Some("Hello, how are you?".to_string()),
343                role: "user".to_string(),
344                name: None,
345                function_call: None,
346            },
347            ChatCompletionRequestMessage {
348                content: Some("Parlez-vous francais?".to_string()),
349                role: "system".to_string(),
350                name: None,
351                function_call: None,
352            },
353        ];
354        let max_tokens = get_chat_completion_max_tokens(model, &messages).unwrap();
355        assert!(max_tokens > 0);
356    }
357
358    #[test]
359    fn test_get_completion_max_tokens() {
360        let model = "gpt-3.5-turbo";
361        let prompt = "Translate the following English text to French: '";
362        let max_tokens = get_completion_max_tokens(model, prompt).unwrap();
363        assert!(max_tokens > 0);
364    }
365}
366
367/// This module provide support for working with the `async_openai` crate.
368#[cfg(feature = "async-openai")]
369pub mod async_openai {
370    use anyhow::Result;
371
372    impl From<&async_openai::types::FunctionCall> for super::FunctionCall {
373        fn from(f: &async_openai::types::FunctionCall) -> Self {
374            Self {
375                name: f.name.clone(),
376                arguments: f.arguments.clone(),
377            }
378        }
379    }
380
381    impl From<&async_openai::types::ChatCompletionRequestMessage>
382        for super::ChatCompletionRequestMessage
383    {
384        fn from(m: &async_openai::types::ChatCompletionRequestMessage) -> Self {
385            Self {
386                role: m.role.to_string(),
387                name: m.name.clone(),
388                content: m.content.clone(),
389                function_call: m.function_call.as_ref().map(|f| f.into()),
390            }
391        }
392    }
393
394    /// Calculates the total number of tokens for the given list of messages.
395    ///
396    /// # Arguments
397    ///
398    /// * `model` - A string slice representing the name of the model.
399    /// * `messages` - A slice of `async_openai::types::ChatCompletionRequestMessage` instances.
400    ///
401    /// # Returns
402    ///
403    /// * A `Result` containing the total number of tokens (`usize`) or an error if the calculation fails.
404    pub fn num_tokens_from_messages(
405        model: &str,
406        messages: &[async_openai::types::ChatCompletionRequestMessage],
407    ) -> Result<usize> {
408        let messages = messages.iter().map(|m| m.into()).collect::<Vec<_>>();
409        super::num_tokens_from_messages(model, &messages)
410    }
411
412    /// Retrieves the maximum token limit for chat completions.
413    ///
414    /// # Arguments
415    ///
416    /// * `model` - A string slice representing the name of the model.
417    /// * `messages` - A slice of `async_openai::types::ChatCompletionRequestMessage` instances.
418    ///
419    /// # Returns
420    ///
421    /// * A `Result` containing the maximum number of tokens (`usize`) allowed for chat completions or an error if the retrieval fails.
422    pub fn get_chat_completion_max_tokens(
423        model: &str,
424        messages: &[async_openai::types::ChatCompletionRequestMessage],
425    ) -> Result<usize> {
426        let messages = messages.iter().map(|m| m.into()).collect::<Vec<_>>();
427        super::get_chat_completion_max_tokens(model, &messages)
428    }
429
430    #[cfg(test)]
431    mod tests {
432        use super::*;
433
434        #[test]
435        fn test_num_tokens_from_messages() {
436            let model = "gpt-3.5-turbo-0301";
437            let messages = &[async_openai::types::ChatCompletionRequestMessage {
438                role: async_openai::types::Role::System,
439                name: None,
440                content: Some("You are a helpful, pattern-following assistant that translates corporate jargon into plain English.".to_string()),
441                function_call: None,
442            }];
443            let num_tokens = num_tokens_from_messages(model, messages).unwrap();
444            assert!(num_tokens > 0);
445        }
446
447        #[test]
448        fn test_get_chat_completion_max_tokens() {
449            let model = "gpt-3.5-turbo";
450            let messages = &[async_openai::types::ChatCompletionRequestMessage {
451                content: Some("You are a helpful assistant that only speaks French.".to_string()),
452                role: async_openai::types::Role::System,
453                name: None,
454                function_call: None,
455            }];
456            let max_tokens = get_chat_completion_max_tokens(model, messages).unwrap();
457            assert!(max_tokens > 0);
458        }
459    }
460}