bm25_vectorizer/
bm25_token_indexer.rs

1/// Trait for mapping tokens to unique indices for efficient BM25 processing.
2///
3/// This trait defines how string tokens are converted to numerical or other
4/// indexable representations.
5///
6/// Some indexing strategies include:
7/// - **Hash-based**: Use hash functions (e.g. Murmur3) to map tokens to integers
8/// - **Dictionary-based**: Maintain a mapping from tokens to sequential indices
9///
10/// # Type Parameters
11///
12/// * `Bm25TokenIndex` - The type used to represent token indices. This should
13///   typically implement `Hash`, `Eq`, `Clone`, and other traits required for
14///   use as map keys.
15///
16/// # Examples
17///
18/// ```rust
19/// use bm25_vectorizer::Bm25TokenIndexer;
20/// use std::collections::HashMap;
21///
22/// // Hash-based token indexer
23/// struct HashTokenIndexer;
24///
25/// impl Bm25TokenIndexer for HashTokenIndexer {
26///     type Bm25TokenIndex = u64;
27///
28///     fn index(&self, token: &str) -> Self::Bm25TokenIndex {
29///         use std::hash::{Hash, Hasher};
30///         // Note: Better hashing algorithms can be used (e.g. Murmur3)
31///         use std::collections::hash_map::DefaultHasher;
32///
33///         let mut hasher = DefaultHasher::new();
34///        token.hash(&mut hasher);
35///         hasher.finish()
36///     }
37/// }
38///
39/// // Dictionary-based token indexer
40/// struct DictionaryIndexer {
41///     token_to_id: HashMap<String, usize>,
42///     next_id: usize,
43/// }
44///
45/// impl DictionaryIndexer {
46///     fn new() -> Self {
47///         Self {
48///             token_to_id: HashMap::new(),
49///             next_id: 0,
50///         }
51///     }
52/// }
53///
54/// impl Bm25TokenIndexer for DictionaryIndexer {
55///     type Bm25TokenIndex = usize;
56///
57///     fn index(&self, token: &str) -> Self::Bm25TokenIndex {
58///         // Note: In a real implementation, you'd want interior mutability
59///         // or a different API design to handle the mutable state
60///         self.token_to_id.get(token).copied().unwrap_or(0)
61///     }
62/// }
63/// ```
64pub trait Bm25TokenIndexer {
65    /// The type used to represent token indices.
66    /// This associated type defines what kind of index representation is used
67    /// for tokens.
68    type Bm25TokenIndex;
69
70    /// Maps a token string to its corresponding index representation.
71    ///
72    /// This method converts a string token into the index type defined by
73    /// `Bm25TokenIndex`.
74    ///
75    /// # Arguments
76    ///
77    /// * `token` - The string token to be indexed
78    ///
79    /// # Returns
80    ///
81    /// An index of type `Self::Bm25TokenIndex` that uniquely represents the token
82    ///
83    /// # Examples
84    ///
85    /// ```rust
86    /// use bm25_vectorizer::Bm25TokenIndexer;
87    /// use std::hash::{Hash, Hasher, DefaultHasher};
88    ///
89    /// struct HashIndexer;
90    ///
91    /// impl Bm25TokenIndexer for HashIndexer {
92    ///     type Bm25TokenIndex = u64;
93    ///
94    ///     fn index(&self, token: &str) -> Self::Bm25TokenIndex {
95    ///         let mut hasher = DefaultHasher::new();
96    ///         token.hash(&mut hasher);
97    ///         hasher.finish()
98    ///     }
99    /// }
100    ///
101    /// let indexer = HashIndexer;
102    /// let index1 = indexer.index("hello");
103    /// let index2 = indexer.index("hello");
104    /// assert_eq!(index1, index2); // Same token, same index
105    /// ```
106    fn index(&self, token: &str) -> Self::Bm25TokenIndex;
107}
108
109// Tests for Bm25TokenIndexer trait
110#[cfg(test)]
111mod tests {
112    use super::*;
113    use crate::mocking::{
114        MockDictionaryTokenIndexer, MockHashTokenIndexer, MockStringTokenIndexer,
115        MockWhitespaceTokenizer,
116    };
117    use crate::Bm25Tokenizer;
118
119    #[test]
120    fn test_hash_token_indexer_deterministic() {
121        let indexer = MockHashTokenIndexer;
122        let index1 = indexer.index("hello");
123        let index2 = indexer.index("hello");
124        assert_eq!(index1, index2, "Same token should produce same index");
125    }
126
127    #[test]
128    fn test_hash_token_indexer_different_tokens() {
129        let indexer = MockHashTokenIndexer;
130        let index1 = indexer.index("hello");
131        let index2 = indexer.index("world");
132        assert_ne!(
133            index1, index2,
134            "Different tokens should produce different indices"
135        );
136    }
137
138    #[test]
139    fn test_hash_token_indexer_case_sensitivity() {
140        let indexer = MockHashTokenIndexer;
141        let index1 = indexer.index("hello");
142        let index2 = indexer.index("Hello");
143        assert_ne!(
144            index1, index2,
145            "Case-different tokens should produce different indices"
146        );
147    }
148
149    #[test]
150    fn test_dictionary_token_indexer_sequential() {
151        let indexer = MockDictionaryTokenIndexer::new();
152        let index1 = indexer.index("hello");
153        let index2 = indexer.index("world");
154        let index3 = indexer.index("rust");
155
156        assert_eq!(index1, 0);
157        assert_eq!(index2, 1);
158        assert_eq!(index3, 2);
159    }
160
161    #[test]
162    fn test_dictionary_token_indexer_deterministic() {
163        let indexer = MockDictionaryTokenIndexer::new();
164        let index1 = indexer.index("hello");
165        let index2 = indexer.index("world");
166        let index3 = indexer.index("hello"); // Repeat
167
168        assert_eq!(index1, index3, "Same token should produce same index");
169        assert_ne!(
170            index1, index2,
171            "Different tokens should produce different indices"
172        );
173    }
174
175    #[test]
176    fn test_dictionary_token_indexer_empty_string() {
177        let indexer = MockDictionaryTokenIndexer::new();
178        let index1 = indexer.index("");
179        let index2 = indexer.index("");
180        assert_eq!(
181            index1, index2,
182            "Empty string should be handled consistently"
183        );
184    }
185
186    #[test]
187    fn test_string_token_indexer() {
188        let indexer = MockStringTokenIndexer;
189        let index1 = indexer.index("hello");
190        let index2 = indexer.index("world");
191
192        assert_eq!(index1, "idx_hello");
193        assert_eq!(index2, "idx_world");
194    }
195
196    #[test]
197    fn test_string_token_indexer_deterministic() {
198        let indexer = MockStringTokenIndexer;
199        let index1 = indexer.index("test");
200        let index2 = indexer.index("test");
201        assert_eq!(index1, index2, "Same token should produce same index");
202    }
203
204    // Integration tests combining tokenizer and indexer
205
206    #[test]
207    fn test_tokenizer_indexer_integration() {
208        let tokenizer = MockWhitespaceTokenizer;
209        let indexer = MockHashTokenIndexer;
210
211        let text = "hello world hello rust";
212        let tokens = tokenizer.tokenize(text);
213        let indices: Vec<u64> = tokens.iter().map(|token| indexer.index(token)).collect();
214
215        // Should have 4 indices
216        assert_eq!(indices.len(), 4);
217
218        // "hello" appears twice and should have the same index
219        assert_eq!(
220            indices[0], indices[2],
221            "Repeated token 'hello' should have same index"
222        );
223
224        // All other tokens should be different
225        assert_ne!(
226            indices[0], indices[1],
227            "'hello' and 'world' should have different indices"
228        );
229        assert_ne!(
230            indices[1], indices[3],
231            "'world' and 'rust' should have different indices"
232        );
233        assert_ne!(
234            indices[0], indices[3],
235            "'hello' and 'rust' should have different indices"
236        );
237    }
238
239    #[test]
240    fn test_dictionary_indexer_with_tokenizer() {
241        let tokenizer = MockWhitespaceTokenizer;
242        let indexer = MockDictionaryTokenIndexer::new();
243
244        let text = "the quick brown fox jumps over the lazy dog";
245        let tokens = tokenizer.tokenize(text);
246        let indices: Vec<usize> = tokens.iter().map(|token| indexer.index(token)).collect();
247
248        // Should have 9 indices (same length as tokens)
249        assert_eq!(indices.len(), 9);
250
251        // "the" appears twice at positions 0 and 6, should have same index
252        let the_index = indexer.index("the");
253        assert_eq!(indices[0], the_index);
254        assert_eq!(indices[6], the_index);
255        assert_eq!(
256            indices[0], indices[6],
257            "Repeated token 'the' should have same index"
258        );
259    }
260
261    #[test]
262    fn test_edge_cases() {
263        let tokenizer = MockWhitespaceTokenizer;
264        let indexer = MockHashTokenIndexer;
265
266        // Test with whitespace-only string
267        let tokens = tokenizer.tokenize("   \t  \n  ");
268        assert!(
269            tokens.is_empty(),
270            "Whitespace-only string should produce no tokens"
271        );
272
273        // Test with single character
274        let tokens = tokenizer.tokenize("a");
275        assert_eq!(tokens, vec!["a"]);
276        let index = indexer.index(&tokens[0]);
277        assert!(index > 0, "Single character should produce valid index");
278
279        // Test with very long token
280        let long_token = "a".repeat(1000);
281        let index1 = indexer.index(&long_token);
282        let index2 = indexer.index(&long_token);
283        assert_eq!(index1, index2, "Long token should be handled consistently");
284    }
285
286    #[test]
287    fn test_indexer_properties() {
288        let indexer = MockHashTokenIndexer;
289
290        // Property: same input should always produce same output
291        let token = "consistent";
292        let index1 = indexer.index(token);
293        let index2 = indexer.index(token);
294        assert_eq!(index1, index2, "Indexer should be deterministic");
295
296        // Property: different inputs should generally produce different outputs
297        // (Note: hash collisions are possible but rare)
298        let index_a = indexer.index("a");
299        let index_b = indexer.index("b");
300        assert_ne!(
301            index_a, index_b,
302            "Different tokens should generally have different indices"
303        );
304    }
305}