timecat/utils/
cache_table.rs

1use super::*;
2
3#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
4#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
5pub struct CacheTableEntry<T> {
6    hash: NonZeroU64, // To save space in cache table
7    entry: T,
8}
9
10impl<T> CacheTableEntry<T> {
11    #[inline]
12    pub const fn new(hash: NonZeroU64, entry: T) -> CacheTableEntry<T> {
13        CacheTableEntry { hash, entry }
14    }
15
16    #[inline]
17    pub fn get_hash(self) -> NonZeroU64 {
18        self.hash
19    }
20
21    #[inline]
22    pub fn get_entry(self) -> T {
23        self.entry
24    }
25
26    #[inline]
27    pub fn get_entry_mut(&mut self) -> &mut T {
28        &mut self.entry
29    }
30}
31
32#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
33#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
34pub enum CacheTableSize {
35    Max(usize),
36    Min(usize),
37    Round(usize),
38    Exact(usize),
39}
40
41impl CacheTableSize {
42    pub const ZERO: Self = Self::Exact(0);
43
44    pub const fn unwrap(self) -> usize {
45        match self {
46            Self::Max(size) => size,
47            Self::Min(size) => size,
48            Self::Round(size) => size,
49            Self::Exact(size) => size,
50        }
51    }
52
53    #[inline]
54    pub const fn is_min(self) -> bool {
55        matches!(self, Self::Min(_))
56    }
57
58    #[inline]
59    pub const fn is_max(self) -> bool {
60        matches!(self, Self::Max(_))
61    }
62
63    #[inline]
64    pub const fn is_round(self) -> bool {
65        matches!(self, Self::Round(_))
66    }
67
68    #[inline]
69    pub const fn is_exact(self) -> bool {
70        matches!(self, Self::Exact(_))
71    }
72
73    #[inline]
74    pub const fn is_zero(self) -> bool {
75        matches!(self, Self::ZERO)
76    }
77
78    #[inline]
79    pub const fn get_entry_size<T>() -> usize {
80        size_of::<Option<CacheTableEntry<T>>>()
81    }
82
83    pub fn to_num_entries_and_entry_size<T>(self) -> (usize, usize) {
84        let mut size = self.unwrap();
85        let entry_size = Self::get_entry_size::<T>();
86        size *= 2_usize.pow(20);
87        size /= entry_size;
88        if self.is_exact() {
89            return (size, entry_size);
90        }
91        let pow_f64 = (size as f64).log2();
92        let pow = match self {
93            Self::Max(_) => pow_f64.floor(),
94            Self::Min(_) => pow_f64.ceil(),
95            Self::Round(_) => pow_f64.round(),
96            Self::Exact(_) => unreachable!(),
97        } as u32;
98        size = 2_usize.pow(pow);
99        (size, entry_size)
100    }
101
102    #[inline]
103    pub fn to_num_entries<T>(self) -> usize {
104        self.to_num_entries_and_entry_size::<T>().0
105    }
106
107    #[inline]
108    pub fn to_memory_size_in_mb<T>(self) -> usize {
109        let (size, entry_size) = self.to_num_entries_and_entry_size::<T>();
110        size * entry_size / 2_usize.pow(20)
111    }
112}
113
114impl Default for CacheTableSize {
115    fn default() -> Self {
116        CACHE_TABLE_SIZE
117    }
118}
119
120impl fmt::Display for CacheTableSize {
121    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
122        write!(f, "{} MB", self.unwrap())
123    }
124}
125
126#[cfg(feature = "extras")]
127macro_rules! update_variables {
128    ($self: ident, $e_copy: ident, $hash: ident, $entry: ident) => {
129        if let Some(e) = $e_copy {
130            if e.get_hash() == $hash {
131                if e.get_entry() != $entry {
132                    $self.num_overwrites.fetch_add(1, MEMORY_ORDERING);
133                }
134            } else {
135                $self.num_collisions.fetch_add(1, MEMORY_ORDERING);
136            }
137        } else {
138            $self.num_cells_filled.fetch_add(1, MEMORY_ORDERING);
139        }
140    };
141}
142
143#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
144#[derive(Debug, Default)]
145pub struct CacheTable<T> {
146    table: RwLock<Box<[Option<CacheTableEntry<T>>]>>,
147    size: RwLock<CacheTableSize>,
148    mask: AtomicUsize,
149    is_safe_to_do_bitwise_and: AtomicBool,
150    num_cells_filled: AtomicUsize,
151    #[cfg(feature = "extras")]
152    num_overwrites: AtomicUsize,
153    #[cfg(feature = "extras")]
154    num_collisions: AtomicUsize,
155    #[cfg(feature = "extras")]
156    zero_hit: AtomicUsize,
157}
158
159impl<T: Copy + PartialEq> CacheTable<T> {
160    #[inline]
161    fn generate_table(size: CacheTableSize) -> Box<[Option<CacheTableEntry<T>>]> {
162        vec![None; size.to_num_entries::<T>()].into_boxed_slice()
163    }
164
165    #[inline]
166    const fn is_safe_to_do_bitwise_and(size: usize) -> bool {
167        size.count_ones() == 1 && size > 1
168    }
169
170    #[inline]
171    const fn into_inner(table: &[Option<CacheTableEntry<T>>]) -> usize {
172        if Self::is_safe_to_do_bitwise_and(table.len()) {
173            table.len() - 1
174        } else {
175            table.len()
176        }
177    }
178
179    #[inline]
180    fn reset_mask(&self, table: &[Option<CacheTableEntry<T>>]) {
181        self.mask.store(Self::into_inner(table), MEMORY_ORDERING);
182        self.is_safe_to_do_bitwise_and.store(
183            Self::is_safe_to_do_bitwise_and(table.len()),
184            MEMORY_ORDERING,
185        );
186    }
187
188    pub fn new(size: CacheTableSize) -> CacheTable<T> {
189        let cache_table = CacheTable {
190            table: RwLock::new(Self::generate_table(size)),
191            size: RwLock::new(size),
192            mask: Default::default(),
193            is_safe_to_do_bitwise_and: Default::default(),
194            num_cells_filled: AtomicUsize::new(0),
195            #[cfg(feature = "extras")]
196            num_overwrites: AtomicUsize::new(0),
197            #[cfg(feature = "extras")]
198            num_collisions: AtomicUsize::new(0),
199            #[cfg(feature = "extras")]
200            zero_hit: AtomicUsize::new(0),
201        };
202        cache_table.reset_mask(&cache_table.table.read().unwrap());
203        cache_table
204    }
205
206    #[inline]
207    fn get_index(&self, hash: u64) -> usize {
208        if self.is_safe_to_do_bitwise_and.load(MEMORY_ORDERING) {
209            hash as usize & self.mask.load(MEMORY_ORDERING)
210        } else {
211            hash as usize % self.mask.load(MEMORY_ORDERING)
212        }
213    }
214
215    #[inline]
216    pub fn get(&self, hash: u64) -> Option<T> {
217        let hash = NonZeroU64::new(hash).unwrap_or(DEFAULT_HASH);
218        let entry = {
219            let table = self.table.read().unwrap();
220            *get_item_unchecked!(table, self.get_index(hash.get()))
221        }?;
222        (entry.hash == hash).then_some(entry.entry)
223    }
224
225    #[inline]
226    pub fn add(&self, hash: u64, entry: T) {
227        let hash = NonZeroU64::new(hash).unwrap_or(DEFAULT_HASH);
228        let mut table = self.table.write().unwrap();
229        let e = get_item_unchecked_mut!(table, self.get_index(hash.get()));
230        #[cfg(not(feature = "extras"))]
231        if e.is_none() {
232            self.num_cells_filled.fetch_add(1, MEMORY_ORDERING);
233        }
234        #[cfg(feature = "extras")]
235        let e_copy = *e;
236        *e = Some(CacheTableEntry { hash, entry });
237        drop(table);
238        #[cfg(feature = "extras")]
239        update_variables!(self, e_copy, hash, entry);
240    }
241
242    #[inline]
243    pub fn replace_if<F: Fn(T) -> bool>(&self, hash: u64, entry: T, replace: F) {
244        let hash = NonZeroU64::new(hash).unwrap_or(DEFAULT_HASH);
245        let mut table = self.table.write().unwrap();
246        let e = get_item_unchecked_mut!(table, self.get_index(hash.get()));
247        let to_replace = if let Some(entry) = e {
248            replace(entry.entry)
249        } else {
250            true
251        };
252        if to_replace {
253            #[cfg(not(feature = "extras"))]
254            if e.is_none() {
255                self.num_cells_filled.fetch_add(1, MEMORY_ORDERING);
256            }
257            #[cfg(feature = "extras")]
258            let e_copy = *e;
259            *e = Some(CacheTableEntry { hash, entry });
260            drop(table);
261            #[cfg(feature = "extras")]
262            update_variables!(self, e_copy, hash, entry);
263        }
264    }
265
266    #[inline]
267    pub fn clear(&self) {
268        self.table.write().unwrap().fill(None);
269        self.num_cells_filled.store(0, MEMORY_ORDERING);
270        self.reset_variables()
271    }
272
273    #[inline]
274    pub const fn get_table(&self) -> &RwLock<Box<[Option<CacheTableEntry<T>>]>> {
275        &self.table
276    }
277
278    #[inline]
279    pub fn get_num_cells_filled(&self) -> usize {
280        self.num_cells_filled.load(MEMORY_ORDERING)
281    }
282
283    #[inline]
284    #[cfg(feature = "extras")]
285    pub fn get_num_overwrites(&self) -> usize {
286        self.num_overwrites.load(MEMORY_ORDERING)
287    }
288
289    #[inline]
290    #[cfg(feature = "extras")]
291    pub fn get_num_collisions(&self) -> usize {
292        self.num_collisions.load(MEMORY_ORDERING)
293    }
294
295    #[inline]
296    #[cfg(feature = "extras")]
297    pub fn get_zero_hit(&self) -> usize {
298        self.zero_hit.load(MEMORY_ORDERING)
299    }
300
301    #[inline]
302    pub fn reset_num_cells_filled(&self) {
303        self.num_cells_filled.store(0, MEMORY_ORDERING);
304    }
305
306    #[inline]
307    #[cfg(feature = "extras")]
308    pub fn reset_num_overwrites(&self) {
309        self.num_overwrites.store(0, MEMORY_ORDERING);
310    }
311
312    #[inline]
313    #[cfg(feature = "extras")]
314    pub fn reset_num_collisions(&self) {
315        self.num_collisions.store(0, MEMORY_ORDERING);
316    }
317
318    #[inline]
319    #[cfg(feature = "extras")]
320    pub fn reset_zero_hit(&self) {
321        self.zero_hit.store(0, MEMORY_ORDERING);
322    }
323
324    /// Variable needed to be reset per search
325    pub fn reset_variables(&self) {
326        #[cfg(feature = "extras")]
327        {
328            self.reset_num_overwrites();
329            self.reset_num_collisions();
330            self.reset_zero_hit();
331        }
332    }
333
334    #[inline]
335    pub fn get_hash_full(&self) -> f64 {
336        (self.num_cells_filled.load(MEMORY_ORDERING) as f64 / self.len() as f64) * 100.0
337    }
338
339    #[inline]
340    pub fn len(&self) -> usize {
341        self.table.read().unwrap().len()
342    }
343
344    #[inline]
345    pub fn is_empty(&self) -> bool {
346        self.get_num_cells_filled() == 0
347    }
348
349    #[inline]
350    pub fn get_size(&self) -> CacheTableSize {
351        *self.size.read().unwrap()
352    }
353
354    pub fn set_size(&self, size: CacheTableSize) {
355        *self.size.write().unwrap() = size;
356        let current_table_copy = self.table.read().unwrap().clone();
357        *self.table.write().unwrap() = Self::generate_table(size);
358        self.reset_mask(&current_table_copy);
359        self.reset_variables();
360        for entry in current_table_copy.iter().flatten() {
361            self.add(entry.hash.get(), entry.entry);
362        }
363    }
364}
365
366impl<T: Copy + PartialEq> Clone for CacheTable<T> {
367    fn clone(&self) -> Self {
368        CacheTable {
369            table: RwLock::new(self.table.read().unwrap().clone()),
370            size: RwLock::new(self.get_size()),
371            mask: AtomicUsize::new(self.mask.load(MEMORY_ORDERING)),
372            is_safe_to_do_bitwise_and: AtomicBool::new(
373                self.is_safe_to_do_bitwise_and.load(MEMORY_ORDERING),
374            ),
375            num_cells_filled: AtomicUsize::new(self.num_cells_filled.load(MEMORY_ORDERING)),
376            #[cfg(feature = "extras")]
377            num_overwrites: AtomicUsize::new(self.num_overwrites.load(MEMORY_ORDERING)),
378            #[cfg(feature = "extras")]
379            num_collisions: AtomicUsize::new(self.num_collisions.load(MEMORY_ORDERING)),
380            #[cfg(feature = "extras")]
381            zero_hit: AtomicUsize::new(self.zero_hit.load(MEMORY_ORDERING)),
382        }
383    }
384}