text_splitter/chunk_size/
huggingface.rs

1use tokenizers::{Encoding, Tokenizer};
2
3use crate::ChunkSizer;
4
5/// Compute the number of tokens that exist within an entire [`Encoding`] object.
6///
7/// Take into account [`Encoding::get_overflowing`] for cases where the [`Tokenizer`] producing the [`Encoding`] has truncation parameters set.
8fn num_tokens_with_overflow(encoding: &Encoding, pad_id: Option<u32>) -> usize {
9    let base = encoding
10        .get_ids()
11        .iter()
12        // Skip padding tokens at beginning and end so they don't count towards the chunk size
13        .skip_while(|&id| pad_id.is_some_and(|pad_id| id == &pad_id))
14        .take_while(|&id| pad_id.is_none_or(|pad_id| id != &pad_id))
15        .count();
16
17    // If the [`Tokenizer`] has truncation, need to check overflow encodings to determine overall size.
18    let overflow: usize = encoding
19        .get_overflowing()
20        .iter()
21        .map(|enc| num_tokens_with_overflow(enc, pad_id))
22        .sum();
23
24    base + overflow
25}
26
27impl ChunkSizer for Tokenizer {
28    /// Returns the number of tokens in a given text after tokenization.
29    ///
30    /// # Panics
31    ///
32    /// Will panic if you don't have a byte-level tokenizer and the splitter
33    /// encounters text it can't tokenize.
34    fn size(&self, chunk: &str) -> usize {
35        let encoding = self
36            .encode_fast(chunk, false)
37            .expect("Unable to tokenize the following string {chunk}");
38
39        let pad_id = self.get_padding().map(|params| params.pad_id);
40        num_tokens_with_overflow(&encoding, pad_id)
41    }
42}
43
44#[cfg(test)]
45mod tests {
46    use super::*;
47
48    #[test]
49    fn returns_size() {
50        let tokenizer = Tokenizer::from_pretrained("bert-base-cased", None).unwrap();
51        let size = tokenizer.size(" An apple a");
52        assert_eq!(size, 3);
53    }
54
55    #[test]
56    fn returns_size_handles_prefix() {
57        let tokenizer =
58            tokenizers::Tokenizer::from_file("./tests/tokenizers/huggingface.json").unwrap();
59
60        let size = tokenizer.size("An apple a");
61        assert_eq!(size, 3);
62    }
63
64    #[test]
65    fn handles_padding() {
66        let tokenizer = Tokenizer::from_pretrained("thenlper/gte-small", None).unwrap();
67        let size = tokenizer.size("An apple a");
68        assert_eq!(size, 3);
69    }
70
71    #[test]
72    fn handle_truncation() {
73        let tokenizer = Tokenizer::from_pretrained("sentence-transformers/all-MiniLM-L6-v2", None)
74            .expect("Could not load tokenizer 'sentence-transformers/all-MiniLM-L6-v2'");
75
76        // Need to ensure chunk is large enough to cause Encoding overflows.
77        assert_eq!(
78            tokenizer.size("An apple a day keeps the doctor away.".repeat(100).as_str()),
79            900
80        );
81    }
82}