Skip to main content

fixed_cache/
lib.rs

1#![doc = include_str!("../README.md")]
2#![cfg_attr(docsrs, feature(doc_cfg))]
3#![cfg_attr(not(feature = "std"), no_std)]
4#![allow(clippy::new_without_default)]
5
6#[cfg(feature = "alloc")]
7extern crate alloc;
8
9use core::{
10    cell::UnsafeCell,
11    convert::Infallible,
12    fmt,
13    hash::{BuildHasher, Hash},
14    marker::PhantomData,
15    mem::{self, MaybeUninit},
16    ptr,
17    sync::atomic::{AtomicUsize, Ordering},
18};
19use equivalent::Equivalent;
20
21#[cfg(feature = "stats")]
22mod stats;
23#[cfg(feature = "stats")]
24#[cfg_attr(docsrs, doc(cfg(feature = "stats")))]
25pub use stats::{AnyRef, CountingStatsHandler, Stats, StatsHandler};
26
27const LOCKED_BIT: usize = 1 << 0;
28const ALIVE_BIT: usize = 1 << 1;
29const NEEDED_BITS: usize = 2;
30
31const EPOCH_BITS: usize = 10;
32const EPOCH_SHIFT: usize = NEEDED_BITS;
33const EPOCH_MASK: usize = ((1 << EPOCH_BITS) - 1) << EPOCH_SHIFT;
34const EPOCH_NEEDED_BITS: usize = NEEDED_BITS + EPOCH_BITS;
35
36#[cfg(feature = "rapidhash")]
37type DefaultBuildHasher = core::hash::BuildHasherDefault<rapidhash::fast::RapidHasher<'static>>;
38#[cfg(all(not(feature = "rapidhash"), feature = "std"))]
39type DefaultBuildHasher = std::hash::RandomState;
40#[cfg(all(not(feature = "rapidhash"), not(feature = "std")))]
41type DefaultBuildHasher = core::hash::BuildHasherDefault<NoDefaultHasher>;
42
43#[cfg(all(not(feature = "rapidhash"), not(feature = "std")))]
44#[doc(hidden)]
45pub enum NoDefaultHasher {}
46
47/// Configuration trait for [`Cache`].
48///
49/// This trait allows customizing cache behavior through compile-time configuration.
50pub trait CacheConfig {
51    /// Whether to track statistics for cache performance.
52    ///
53    /// When enabled, the cache tracks hit and miss counts for each key, allowing
54    /// monitoring of cache performance.
55    ///
56    /// Only enabled when the `stats` feature is also enabled.
57    ///
58    /// Defaults to `true`.
59    const STATS: bool = true;
60
61    /// Whether to track epochs for cheap invalidation via [`Cache::clear`].
62    ///
63    /// When enabled, the cache tracks an epoch counter that is incremented on each call to
64    /// [`Cache::clear`]. Entries are considered invalid if their epoch doesn't match the current
65    /// epoch, allowing O(1) invalidation of all entries without touching each bucket.
66    ///
67    /// Defaults to `false`.
68    const EPOCHS: bool = false;
69}
70
71/// Default cache configuration.
72pub struct DefaultCacheConfig(());
73impl CacheConfig for DefaultCacheConfig {}
74
75/// A concurrent, fixed-size, set-associative cache.
76///
77/// This cache maps keys to values using a fixed number of buckets. Each key hashes to exactly
78/// one bucket, and collisions are resolved by eviction (the new value replaces the old one).
79///
80/// # Thread Safety
81///
82/// The cache is safe to share across threads (`Send + Sync`). All operations use atomic
83/// instructions and never block, making it suitable for high-contention scenarios.
84///
85/// # Limitations
86///
87/// - **Eviction on collision**: When two keys hash to the same bucket, the older entry is evicted.
88/// - **No iteration**: Individual entries cannot be enumerated.
89///
90/// # Type Parameters
91///
92/// - `K`: The key type. Must implement [`Hash`] + [`Eq`].
93/// - `V`: The value type. Must implement [`Clone`].
94/// - `S`: The hash builder type. Must implement [`BuildHasher`]. Defaults to [`RandomState`] or
95///   [`rapidhash`] if the `rapidhash` feature is enabled.
96///
97/// # Example
98///
99/// ```
100/// use fixed_cache::Cache;
101///
102/// let cache: Cache<u64, u64> = Cache::new(256, Default::default());
103///
104/// // Insert a value
105/// cache.insert(42, 100);
106/// assert_eq!(cache.get(&42), Some(100));
107///
108/// // Get or compute a value
109/// let value = cache.get_or_insert_with(123, |&k| k * 2);
110/// assert_eq!(value, 246);
111/// ```
112///
113/// [`Hash`]: std::hash::Hash
114/// [`Eq`]: std::cmp::Eq
115/// [`Clone`]: std::clone::Clone
116/// [`Drop`]: std::ops::Drop
117/// [`BuildHasher`]: std::hash::BuildHasher
118/// [`RandomState`]: std::hash::RandomState
119/// [`rapidhash`]: https://crates.io/crates/rapidhash
120pub struct Cache<K, V, S = DefaultBuildHasher, C: CacheConfig = DefaultCacheConfig> {
121    entries: *const [Bucket<(K, V)>],
122    build_hasher: S,
123    #[cfg(feature = "stats")]
124    stats: Option<Stats<K, V>>,
125    #[cfg(feature = "alloc")]
126    drop: bool,
127    epoch: AtomicUsize,
128    _config: PhantomData<C>,
129}
130
131impl<K, V, S, C: CacheConfig> fmt::Debug for Cache<K, V, S, C> {
132    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
133        f.debug_struct("Cache").finish_non_exhaustive()
134    }
135}
136
137// SAFETY: `Cache` is safe to share across threads because `Bucket` uses atomic operations.
138unsafe impl<K: Send, V: Send, S: Send, C: CacheConfig + Send> Send for Cache<K, V, S, C> {}
139unsafe impl<K: Send, V: Send, S: Sync, C: CacheConfig + Sync> Sync for Cache<K, V, S, C> {}
140
141impl<K, V, S, C> Cache<K, V, S, C>
142where
143    K: Hash + Eq,
144    S: BuildHasher,
145    C: CacheConfig,
146{
147    const NEEDS_DROP: bool = Bucket::<(K, V)>::NEEDS_DROP;
148
149    /// Create a new cache with the specified number of entries and hasher.
150    ///
151    /// Dynamically allocates memory for the cache entries.
152    ///
153    /// # Panics
154    ///
155    /// Panics if `num`:
156    /// - is not a power of two.
157    /// - isn't at least 4.
158    // See len_assertion for why.
159    #[cfg(feature = "alloc")]
160    #[cfg_attr(docsrs, doc(cfg(feature = "alloc")))]
161    pub fn new(num: usize, build_hasher: S) -> Self {
162        Self::len_assertion(num);
163        let layout = alloc::alloc::Layout::array::<Bucket<(K, V)>>(num).unwrap();
164        let ptr = unsafe { alloc::alloc::alloc_zeroed(layout) };
165        if ptr.is_null() {
166            alloc::alloc::handle_alloc_error(layout);
167        }
168        let entries = ptr::slice_from_raw_parts(ptr.cast::<Bucket<(K, V)>>(), num);
169        Self::new_inner(entries, build_hasher, true)
170    }
171
172    /// Creates a new cache with the specified entries and hasher.
173    ///
174    /// # Panics
175    ///
176    /// See [`new`](Self::new).
177    #[inline]
178    pub const fn new_static(entries: &'static [Bucket<(K, V)>], build_hasher: S) -> Self {
179        Self::len_assertion(entries.len());
180        Self::new_inner(entries, build_hasher, false)
181    }
182
183    /// Sets the cache's statistics.
184    ///
185    /// Can only be called when [`CacheConfig::STATS`] is `true`.
186    #[cfg(feature = "stats")]
187    #[inline]
188    pub fn with_stats(mut self, stats: Option<Stats<K, V>>) -> Self {
189        self.set_stats(stats);
190        self
191    }
192
193    /// Sets the cache's statistics.
194    ///
195    /// Can only be called when [`CacheConfig::STATS`] is `true`.
196    #[cfg(feature = "stats")]
197    #[inline]
198    pub fn set_stats(&mut self, stats: Option<Stats<K, V>>) {
199        const { assert!(C::STATS, "can only set stats when C::STATS is true") }
200        self.stats = stats;
201    }
202
203    #[inline]
204    const fn new_inner(entries: *const [Bucket<(K, V)>], build_hasher: S, drop: bool) -> Self {
205        let _ = drop;
206        Self {
207            entries,
208            build_hasher,
209            #[cfg(feature = "stats")]
210            stats: None,
211            #[cfg(feature = "alloc")]
212            drop,
213            epoch: AtomicUsize::new(0),
214            _config: PhantomData,
215        }
216    }
217
218    #[inline]
219    const fn len_assertion(len: usize) {
220        // We need `RESERVED_BITS` bits to store metadata for each entry.
221        // Since we calculate the tag mask based on the index mask, and the index mask is (len - 1),
222        // we assert that the length's bottom `RESERVED_BITS` bits are zero.
223        let reserved = if C::EPOCHS { EPOCH_NEEDED_BITS } else { NEEDED_BITS };
224        assert!(len.is_power_of_two(), "length must be a power of two");
225        assert!((len & ((1 << reserved) - 1)) == 0, "len must have its bottom N bits set to zero");
226    }
227
228    #[inline]
229    const fn index_mask(&self) -> usize {
230        let n = self.capacity();
231        unsafe { core::hint::assert_unchecked(n.is_power_of_two()) };
232        n - 1
233    }
234
235    #[inline]
236    const fn tag_mask(&self) -> usize {
237        !self.index_mask()
238    }
239
240    /// Returns the current epoch of the cache.
241    ///
242    /// Only meaningful when [`CacheConfig::EPOCHS`] is `true`.
243    #[inline]
244    fn epoch(&self) -> usize {
245        self.epoch.load(Ordering::Relaxed)
246    }
247
248    /// Clears the cache by invalidating all entries.
249    ///
250    /// This is O(1): it simply increments an epoch counter. On epoch wraparound, falls back to
251    /// [`clear_slow`](Self::clear_slow).
252    ///
253    /// Can only be called when [`CacheConfig::EPOCHS`] is `true`.
254    #[inline]
255    pub fn clear(&self) {
256        const EPOCH_MAX: usize = (1 << EPOCH_BITS) - 1;
257        const { assert!(C::EPOCHS, "can only .clear() when C::EPOCHS is true") }
258        let prev = self.epoch.fetch_add(1, Ordering::Release);
259        if prev == EPOCH_MAX {
260            self.clear_slow();
261        }
262    }
263
264    /// Clears the cache by zeroing all bucket memory.
265    ///
266    /// This is O(N) where N is the number of buckets. Prefer [`clear`](Self::clear) when
267    /// [`CacheConfig::EPOCHS`] is `true`.
268    ///
269    /// # Safety
270    ///
271    /// This method is safe but may race with concurrent operations. Callers should ensure
272    /// no other threads are accessing the cache during this operation.
273    pub fn clear_slow(&self) {
274        // SAFETY: We zero the entire bucket array. This is safe because:
275        // - Bucket<T> is repr(C) and zeroed memory is a valid empty bucket state.
276        // - We don't need to drop existing entries since we're zeroing the ALIVE_BIT.
277        // - Concurrent readers will see either the old state or zeros (empty).
278        unsafe {
279            ptr::write_bytes(
280                self.entries.cast_mut().cast::<Bucket<(K, V)>>(),
281                0,
282                self.entries.len(),
283            )
284        };
285    }
286
287    /// Returns the hash builder used by this cache.
288    #[inline]
289    pub const fn hasher(&self) -> &S {
290        &self.build_hasher
291    }
292
293    /// Returns the number of entries in this cache.
294    #[inline]
295    pub const fn capacity(&self) -> usize {
296        self.entries.len()
297    }
298
299    /// Returns the statistics of this cache.
300    #[cfg(feature = "stats")]
301    pub const fn stats(&self) -> Option<&Stats<K, V>> {
302        self.stats.as_ref()
303    }
304}
305
306impl<K, V, S, C> Cache<K, V, S, C>
307where
308    K: Hash + Eq,
309    V: Clone,
310    S: BuildHasher,
311    C: CacheConfig,
312{
313    /// Get an entry from the cache.
314    pub fn get<Q: ?Sized + Hash + Equivalent<K>>(&self, key: &Q) -> Option<V> {
315        let (bucket, tag) = self.calc(key);
316        self.get_inner(key, bucket, tag)
317    }
318
319    #[inline]
320    fn get_inner<Q: ?Sized + Hash + Equivalent<K>>(
321        &self,
322        key: &Q,
323        bucket: &Bucket<(K, V)>,
324        tag: usize,
325    ) -> Option<V> {
326        if bucket.try_lock(Some(tag)) {
327            // SAFETY: We hold the lock and bucket is alive, so we have exclusive access.
328            let (ck, v) = unsafe { (*bucket.data.get()).assume_init_ref() };
329            if key.equivalent(ck) {
330                #[cfg(feature = "stats")]
331                if C::STATS
332                    && let Some(stats) = &self.stats
333                {
334                    stats.record_hit(ck, v);
335                }
336                let v = v.clone();
337                bucket.unlock(tag);
338                return Some(v);
339            }
340            bucket.unlock(tag);
341        }
342        #[cfg(feature = "stats")]
343        if C::STATS
344            && let Some(stats) = &self.stats
345        {
346            stats.record_miss(AnyRef::new(&key));
347        }
348        None
349    }
350
351    /// Insert an entry into the cache.
352    pub fn insert(&self, key: K, value: V) {
353        let (bucket, tag) = self.calc(&key);
354        self.insert_inner(bucket, tag, || (key, value));
355    }
356
357    /// Remove an entry from the cache.
358    ///
359    /// Returns the value if the key was present in the cache.
360    pub fn remove<Q: ?Sized + Hash + Equivalent<K>>(&self, key: &Q) -> Option<V> {
361        let (bucket, tag) = self.calc(key);
362        if bucket.try_lock(Some(tag)) {
363            // SAFETY: We hold the lock and bucket is alive, so we have exclusive access.
364            let data = unsafe { &mut *bucket.data.get() };
365            let (ck, v) = unsafe { data.assume_init_ref() };
366            if key.equivalent(ck) {
367                let v = v.clone();
368                #[cfg(feature = "stats")]
369                if C::STATS
370                    && let Some(stats) = &self.stats
371                {
372                    stats.record_remove(ck, &v);
373                }
374                if Self::NEEDS_DROP {
375                    // SAFETY: We hold the lock, so we have exclusive access.
376                    unsafe { data.assume_init_drop() };
377                }
378                bucket.unlock(0);
379                return Some(v);
380            }
381            bucket.unlock(tag);
382        }
383        None
384    }
385
386    #[inline]
387    fn insert_inner(
388        &self,
389        bucket: &Bucket<(K, V)>,
390        tag: usize,
391        make_entry: impl FnOnce() -> (K, V),
392    ) {
393        #[inline(always)]
394        unsafe fn do_write<T>(ptr: *mut T, f: impl FnOnce() -> T) {
395            // This function is translated as:
396            // - allocate space for a T on the stack
397            // - call f() with the return value being put onto this stack space
398            // - memcpy from the stack to the heap
399            //
400            // Ideally we want LLVM to always realize that doing a stack
401            // allocation is unnecessary and optimize the code so it writes
402            // directly into the heap instead. It seems we get it to realize
403            // this most consistently if we put this critical line into it's
404            // own function instead of inlining it into the surrounding code.
405            unsafe { ptr::write(ptr, f()) };
406        }
407
408        let Some(prev_tag) = bucket.try_lock_ret(None) else {
409            return;
410        };
411
412        // SAFETY: We hold the lock, so we have exclusive access.
413        unsafe {
414            let is_alive = (prev_tag & !LOCKED_BIT) != 0;
415            let data = bucket.data.get().cast::<(K, V)>();
416
417            if C::STATS && cfg!(feature = "stats") {
418                #[cfg(feature = "stats")]
419                if is_alive {
420                    let (old_key, old_value) = ptr::replace(data, make_entry());
421                    if let Some(stats) = &self.stats {
422                        stats.record_insert(&(*data).0, &(*data).1, Some((&old_key, &old_value)));
423                    }
424                } else {
425                    do_write(data, make_entry);
426                    if let Some(stats) = &self.stats {
427                        stats.record_insert(&(*data).0, &(*data).1, None);
428                    }
429                }
430            } else {
431                if Self::NEEDS_DROP && is_alive {
432                    ptr::drop_in_place(data);
433                }
434                do_write(data, make_entry);
435            }
436        }
437        bucket.unlock(tag);
438    }
439
440    /// Gets a value from the cache, or inserts one computed by `f` if not present.
441    ///
442    /// If the key is found in the cache, returns a clone of the cached value.
443    /// Otherwise, calls `f` to compute the value, attempts to insert it, and returns it.
444    #[inline]
445    pub fn get_or_insert_with<F>(&self, key: K, f: F) -> V
446    where
447        F: FnOnce(&K) -> V,
448    {
449        let Ok(v) = self.get_or_try_insert_with(key, |key| Ok::<_, Infallible>(f(key)));
450        v
451    }
452
453    /// Gets a value from the cache, or inserts one computed by `f` if not present.
454    ///
455    /// If the key is found in the cache, returns a clone of the cached value.
456    /// Otherwise, calls `f` to compute the value, attempts to insert it, and returns it.
457    ///
458    /// This is the same as [`get_or_insert_with`], but takes a reference to the key, and a function
459    /// to get the key reference to an owned key.
460    ///
461    /// [`get_or_insert_with`]: Self::get_or_insert_with
462    #[inline]
463    pub fn get_or_insert_with_ref<'a, Q, F, Cvt>(&self, key: &'a Q, f: F, cvt: Cvt) -> V
464    where
465        Q: ?Sized + Hash + Equivalent<K>,
466        F: FnOnce(&'a Q) -> V,
467        Cvt: FnOnce(&'a Q) -> K,
468    {
469        let Ok(v) = self.get_or_try_insert_with_ref(key, |key| Ok::<_, Infallible>(f(key)), cvt);
470        v
471    }
472
473    /// Gets a value from the cache, or attempts to insert one computed by `f` if not present.
474    ///
475    /// If the key is found in the cache, returns `Ok` with a clone of the cached value.
476    /// Otherwise, calls `f` to compute the value. If `f` returns `Ok`, attempts to insert
477    /// the value and returns it. If `f` returns `Err`, the error is propagated.
478    #[inline]
479    pub fn get_or_try_insert_with<F, E>(&self, key: K, f: F) -> Result<V, E>
480    where
481        F: FnOnce(&K) -> Result<V, E>,
482    {
483        let mut key = mem::ManuallyDrop::new(key);
484        let mut read = false;
485        let r = self.get_or_try_insert_with_ref(&*key, f, |k| {
486            read = true;
487            unsafe { ptr::read(k) }
488        });
489        if !read {
490            unsafe { mem::ManuallyDrop::drop(&mut key) }
491        }
492        r
493    }
494
495    /// Gets a value from the cache, or attempts to insert one computed by `f` if not present.
496    ///
497    /// If the key is found in the cache, returns `Ok` with a clone of the cached value.
498    /// Otherwise, calls `f` to compute the value. If `f` returns `Ok`, attempts to insert
499    /// the value and returns it. If `f` returns `Err`, the error is propagated.
500    ///
501    /// This is the same as [`Self::get_or_try_insert_with`], but takes a reference to the key, and
502    /// a function to get the key reference to an owned key.
503    #[inline]
504    pub fn get_or_try_insert_with_ref<'a, Q, F, Cvt, E>(
505        &self,
506        key: &'a Q,
507        f: F,
508        cvt: Cvt,
509    ) -> Result<V, E>
510    where
511        Q: ?Sized + Hash + Equivalent<K>,
512        F: FnOnce(&'a Q) -> Result<V, E>,
513        Cvt: FnOnce(&'a Q) -> K,
514    {
515        let (bucket, tag) = self.calc(key);
516        if let Some(v) = self.get_inner(key, bucket, tag) {
517            return Ok(v);
518        }
519        let value = f(key)?;
520        self.insert_inner(bucket, tag, || (cvt(key), value.clone()));
521        Ok(value)
522    }
523
524    #[inline]
525    fn calc<Q: ?Sized + Hash + Equivalent<K>>(&self, key: &Q) -> (&Bucket<(K, V)>, usize) {
526        let hash = self.hash_key(key);
527        // SAFETY: index is masked to be within bounds.
528        let bucket = unsafe { (&*self.entries).get_unchecked(hash & self.index_mask()) };
529        let mut tag = hash & self.tag_mask();
530        if Self::NEEDS_DROP {
531            tag |= ALIVE_BIT;
532        }
533        if C::EPOCHS {
534            tag = (tag & !EPOCH_MASK) | ((self.epoch() << EPOCH_SHIFT) & EPOCH_MASK);
535        }
536        (bucket, tag)
537    }
538
539    #[inline]
540    fn hash_key<Q: ?Sized + Hash + Equivalent<K>>(&self, key: &Q) -> usize {
541        let hash = self.build_hasher.hash_one(key);
542
543        if cfg!(target_pointer_width = "32") {
544            ((hash >> 32) as usize) ^ (hash as usize)
545        } else {
546            hash as usize
547        }
548    }
549}
550
551impl<K, V, S, C: CacheConfig> Drop for Cache<K, V, S, C> {
552    fn drop(&mut self) {
553        #[cfg(feature = "alloc")]
554        if self.drop {
555            // SAFETY: `Drop` has exclusive access.
556            drop(unsafe { alloc::boxed::Box::from_raw(self.entries.cast_mut()) });
557        }
558    }
559}
560
561/// A single cache bucket that holds one key-value pair.
562///
563/// Buckets are aligned to 128 bytes to avoid false sharing between cache lines.
564/// Each bucket contains an atomic tag for lock-free synchronization and uninitialized
565/// storage for the data.
566///
567/// This type is public to allow use with the [`static_cache!`] macro for compile-time
568/// cache initialization. You typically don't need to interact with it directly.
569#[repr(C, align(128))]
570#[doc(hidden)]
571pub struct Bucket<T> {
572    tag: AtomicUsize,
573    data: UnsafeCell<MaybeUninit<T>>,
574}
575
576impl<T> Bucket<T> {
577    const NEEDS_DROP: bool = mem::needs_drop::<T>();
578
579    /// Creates a new zeroed bucket.
580    #[inline]
581    pub const fn new() -> Self {
582        Self { tag: AtomicUsize::new(0), data: UnsafeCell::new(MaybeUninit::zeroed()) }
583    }
584
585    #[inline]
586    fn try_lock(&self, expected: Option<usize>) -> bool {
587        self.try_lock_ret(expected).is_some()
588    }
589
590    #[inline]
591    fn try_lock_ret(&self, expected: Option<usize>) -> Option<usize> {
592        let state = self.tag.load(Ordering::Relaxed);
593        if let Some(expected) = expected {
594            if state != expected {
595                return None;
596            }
597        } else if state & LOCKED_BIT != 0 {
598            return None;
599        }
600        self.tag
601            .compare_exchange(state, state | LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
602            .ok()
603    }
604
605    #[inline]
606    fn is_alive(&self) -> bool {
607        self.tag.load(Ordering::Relaxed) & ALIVE_BIT != 0
608    }
609
610    #[inline]
611    fn unlock(&self, tag: usize) {
612        self.tag.store(tag, Ordering::Release);
613    }
614}
615
616// SAFETY: `Bucket` is a specialized `Mutex<T>` that never blocks.
617unsafe impl<T: Send> Send for Bucket<T> {}
618unsafe impl<T: Send> Sync for Bucket<T> {}
619
620impl<T> Drop for Bucket<T> {
621    fn drop(&mut self) {
622        if Self::NEEDS_DROP && self.is_alive() {
623            // SAFETY: `Drop` has exclusive access.
624            unsafe { self.data.get_mut().assume_init_drop() };
625        }
626    }
627}
628
629/// Declares a static cache with the given name, key type, value type, and size.
630///
631/// The size must be a power of two.
632///
633/// # Example
634///
635/// ```
636/// # #[cfg(feature = "rapidhash")] {
637/// use fixed_cache::{Cache, static_cache};
638///
639/// type BuildHasher = std::hash::BuildHasherDefault<rapidhash::fast::RapidHasher<'static>>;
640///
641/// static MY_CACHE: Cache<u64, &'static str, BuildHasher> =
642///     static_cache!(u64, &'static str, 1024, BuildHasher::new());
643///
644/// let value = MY_CACHE.get_or_insert_with(42, |_k| "hi");
645/// assert_eq!(value, "hi");
646///
647/// let new_value = MY_CACHE.get_or_insert_with(42, |_k| "not hi");
648/// assert_eq!(new_value, "hi");
649/// # }
650/// ```
651#[macro_export]
652macro_rules! static_cache {
653    ($K:ty, $V:ty, $size:expr) => {
654        $crate::static_cache!($K, $V, $size, Default::default())
655    };
656    ($K:ty, $V:ty, $size:expr, $hasher:expr) => {{
657        static ENTRIES: [$crate::Bucket<($K, $V)>; $size] =
658            [const { $crate::Bucket::new() }; $size];
659        $crate::Cache::new_static(&ENTRIES, $hasher)
660    }};
661}
662
663#[cfg(test)]
664mod tests {
665    use super::*;
666    use std::thread;
667
668    const fn iters(n: usize) -> usize {
669        if cfg!(miri) { n / 10 } else { n }
670    }
671
672    type BH = std::hash::BuildHasherDefault<rapidhash::fast::RapidHasher<'static>>;
673    type Cache<K, V> = super::Cache<K, V, BH>;
674
675    struct EpochConfig;
676    impl CacheConfig for EpochConfig {
677        const EPOCHS: bool = true;
678    }
679    type EpochCache<K, V> = super::Cache<K, V, BH, EpochConfig>;
680
681    struct NoStatsConfig;
682    impl CacheConfig for NoStatsConfig {
683        const STATS: bool = false;
684    }
685    type NoStatsCache<K, V> = super::Cache<K, V, BH, NoStatsConfig>;
686
687    fn new_cache<K: Hash + Eq, V: Clone>(size: usize) -> Cache<K, V> {
688        Cache::new(size, Default::default())
689    }
690
691    #[test]
692    fn basic_get_or_insert() {
693        let cache = new_cache(1024);
694
695        let mut computed = false;
696        let value = cache.get_or_insert_with(42, |&k| {
697            computed = true;
698            k * 2
699        });
700        assert!(computed);
701        assert_eq!(value, 84);
702
703        computed = false;
704        let value = cache.get_or_insert_with(42, |&k| {
705            computed = true;
706            k * 2
707        });
708        assert!(!computed);
709        assert_eq!(value, 84);
710    }
711
712    #[test]
713    fn different_keys() {
714        let cache: Cache<String, usize> = new_cache(1024);
715
716        let v1 = cache.get_or_insert_with("hello".to_string(), |s| s.len());
717        let v2 = cache.get_or_insert_with("world!".to_string(), |s| s.len());
718
719        assert_eq!(v1, 5);
720        assert_eq!(v2, 6);
721    }
722
723    #[test]
724    fn new_dynamic_allocation() {
725        let cache: Cache<u32, u32> = new_cache(64);
726        assert_eq!(cache.capacity(), 64);
727
728        cache.insert(1, 100);
729        assert_eq!(cache.get(&1), Some(100));
730    }
731
732    #[test]
733    fn get_miss() {
734        let cache = new_cache::<u64, u64>(64);
735        assert_eq!(cache.get(&999), None);
736    }
737
738    #[test]
739    fn insert_and_get() {
740        let cache: Cache<u64, String> = new_cache(64);
741
742        cache.insert(1, "one".to_string());
743        cache.insert(2, "two".to_string());
744        cache.insert(3, "three".to_string());
745
746        assert_eq!(cache.get(&1), Some("one".to_string()));
747        assert_eq!(cache.get(&2), Some("two".to_string()));
748        assert_eq!(cache.get(&3), Some("three".to_string()));
749        assert_eq!(cache.get(&4), None);
750    }
751
752    #[test]
753    fn insert_twice() {
754        let cache = new_cache(64);
755
756        cache.insert(42, 1);
757        assert_eq!(cache.get(&42), Some(1));
758
759        cache.insert(42, 2);
760        let v = cache.get(&42);
761        assert!(v == Some(1) || v == Some(2));
762    }
763
764    #[test]
765    fn remove_existing() {
766        let cache: Cache<u64, String> = new_cache(64);
767
768        cache.insert(1, "one".to_string());
769        assert_eq!(cache.get(&1), Some("one".to_string()));
770
771        let removed = cache.remove(&1);
772        assert_eq!(removed, Some("one".to_string()));
773        assert_eq!(cache.get(&1), None);
774    }
775
776    #[test]
777    fn remove_nonexistent() {
778        let cache = new_cache::<u64, u64>(64);
779        assert_eq!(cache.remove(&999), None);
780    }
781
782    #[test]
783    fn get_or_insert_with_ref() {
784        let cache: Cache<String, usize> = new_cache(64);
785
786        let key = "hello";
787        let value = cache.get_or_insert_with_ref(key, |s| s.len(), |s| s.to_string());
788        assert_eq!(value, 5);
789
790        let value2 = cache.get_or_insert_with_ref(key, |_| 999, |s| s.to_string());
791        assert_eq!(value2, 5);
792    }
793
794    #[test]
795    fn get_or_insert_with_ref_different_keys() {
796        let cache: Cache<String, usize> = new_cache(1024);
797
798        let v1 = cache.get_or_insert_with_ref("foo", |s| s.len(), |s| s.to_string());
799        let v2 = cache.get_or_insert_with_ref("barbaz", |s| s.len(), |s| s.to_string());
800
801        assert_eq!(v1, 3);
802        assert_eq!(v2, 6);
803    }
804
805    #[test]
806    fn capacity() {
807        let cache = new_cache::<u64, u64>(256);
808        assert_eq!(cache.capacity(), 256);
809
810        let cache2 = new_cache::<u64, u64>(128);
811        assert_eq!(cache2.capacity(), 128);
812    }
813
814    #[test]
815    fn hasher() {
816        let cache = new_cache::<u64, u64>(64);
817        let _ = cache.hasher();
818    }
819
820    #[test]
821    fn debug_impl() {
822        let cache = new_cache::<u64, u64>(64);
823        let debug_str = format!("{:?}", cache);
824        assert!(debug_str.contains("Cache"));
825    }
826
827    #[test]
828    fn bucket_new() {
829        let bucket: Bucket<(u64, u64)> = Bucket::new();
830        assert_eq!(bucket.tag.load(Ordering::Relaxed), 0);
831    }
832
833    #[test]
834    fn many_entries() {
835        let cache: Cache<u64, u64> = new_cache(1024);
836        let n = iters(500);
837
838        for i in 0..n as u64 {
839            cache.insert(i, i * 2);
840        }
841
842        let mut hits = 0;
843        for i in 0..n as u64 {
844            if cache.get(&i) == Some(i * 2) {
845                hits += 1;
846            }
847        }
848        assert!(hits > 0);
849    }
850
851    #[test]
852    fn string_keys() {
853        let cache: Cache<String, i32> = new_cache(1024);
854
855        cache.insert("alpha".to_string(), 1);
856        cache.insert("beta".to_string(), 2);
857        cache.insert("gamma".to_string(), 3);
858
859        assert_eq!(cache.get("alpha"), Some(1));
860        assert_eq!(cache.get("beta"), Some(2));
861        assert_eq!(cache.get("gamma"), Some(3));
862    }
863
864    #[test]
865    fn zero_values() {
866        let cache: Cache<u64, u64> = new_cache(64);
867
868        cache.insert(0, 0);
869        assert_eq!(cache.get(&0), Some(0));
870
871        cache.insert(1, 0);
872        assert_eq!(cache.get(&1), Some(0));
873    }
874
875    #[test]
876    fn clone_value() {
877        #[derive(Clone, PartialEq, Debug)]
878        struct MyValue(u64);
879
880        let cache: Cache<u64, MyValue> = new_cache(64);
881
882        cache.insert(1, MyValue(123));
883        let v = cache.get(&1);
884        assert_eq!(v, Some(MyValue(123)));
885    }
886
887    fn run_concurrent<F>(num_threads: usize, f: F)
888    where
889        F: Fn(usize) + Send + Sync,
890    {
891        thread::scope(|s| {
892            for t in 0..num_threads {
893                let f = &f;
894                s.spawn(move || f(t));
895            }
896        });
897    }
898
899    #[test]
900    fn concurrent_reads() {
901        let cache: Cache<u64, u64> = new_cache(1024);
902        let n = iters(100);
903
904        for i in 0..n as u64 {
905            cache.insert(i, i * 10);
906        }
907
908        run_concurrent(4, |_| {
909            for i in 0..n as u64 {
910                let _ = cache.get(&i);
911            }
912        });
913    }
914
915    #[test]
916    fn concurrent_writes() {
917        let cache: Cache<u64, u64> = new_cache(1024);
918        let n = iters(100);
919
920        run_concurrent(4, |t| {
921            for i in 0..n {
922                cache.insert((t * 1000 + i) as u64, i as u64);
923            }
924        });
925    }
926
927    #[test]
928    fn concurrent_read_write() {
929        let cache: Cache<u64, u64> = new_cache(256);
930        let n = iters(1000);
931
932        run_concurrent(2, |t| {
933            for i in 0..n as u64 {
934                if t == 0 {
935                    cache.insert(i % 100, i);
936                } else {
937                    let _ = cache.get(&(i % 100));
938                }
939            }
940        });
941    }
942
943    #[test]
944    fn concurrent_get_or_insert() {
945        let cache: Cache<u64, u64> = new_cache(1024);
946        let n = iters(100);
947
948        run_concurrent(8, |_| {
949            for i in 0..n as u64 {
950                let _ = cache.get_or_insert_with(i, |&k| k * 2);
951            }
952        });
953
954        for i in 0..n as u64 {
955            if let Some(v) = cache.get(&i) {
956                assert_eq!(v, i * 2);
957            }
958        }
959    }
960
961    #[test]
962    #[should_panic = "power of two"]
963    fn non_power_of_two() {
964        let _ = new_cache::<u64, u64>(100);
965    }
966
967    #[test]
968    #[should_panic = "len must have its bottom N bits set to zero"]
969    fn small_cache() {
970        let _ = new_cache::<u64, u64>(2);
971    }
972
973    #[test]
974    fn power_of_two_sizes() {
975        for shift in 2..10 {
976            let size = 1 << shift;
977            let cache = new_cache::<u64, u64>(size);
978            assert_eq!(cache.capacity(), size);
979        }
980    }
981
982    #[test]
983    fn equivalent_key_lookup() {
984        let cache: Cache<String, i32> = new_cache(64);
985
986        cache.insert("hello".to_string(), 42);
987
988        assert_eq!(cache.get("hello"), Some(42));
989    }
990
991    #[test]
992    fn large_values() {
993        let cache: Cache<u64, [u8; 1000]> = new_cache(64);
994
995        let large_value = [42u8; 1000];
996        cache.insert(1, large_value);
997
998        assert_eq!(cache.get(&1), Some(large_value));
999    }
1000
1001    #[test]
1002    fn send_sync() {
1003        fn assert_send<T: Send>() {}
1004        fn assert_sync<T: Sync>() {}
1005
1006        assert_send::<Cache<u64, u64>>();
1007        assert_sync::<Cache<u64, u64>>();
1008        assert_send::<Bucket<(u64, u64)>>();
1009        assert_sync::<Bucket<(u64, u64)>>();
1010    }
1011
1012    #[test]
1013    fn get_or_try_insert_with_ok() {
1014        let cache = new_cache(1024);
1015
1016        let mut computed = false;
1017        let result: Result<u64, &str> = cache.get_or_try_insert_with(42, |&k| {
1018            computed = true;
1019            Ok(k * 2)
1020        });
1021        assert!(computed);
1022        assert_eq!(result, Ok(84));
1023
1024        computed = false;
1025        let result: Result<u64, &str> = cache.get_or_try_insert_with(42, |&k| {
1026            computed = true;
1027            Ok(k * 2)
1028        });
1029        assert!(!computed);
1030        assert_eq!(result, Ok(84));
1031    }
1032
1033    #[test]
1034    fn get_or_try_insert_with_err() {
1035        let cache: Cache<u64, u64> = new_cache(1024);
1036
1037        let result: Result<u64, &str> = cache.get_or_try_insert_with(42, |_| Err("failed"));
1038        assert_eq!(result, Err("failed"));
1039
1040        assert_eq!(cache.get(&42), None);
1041    }
1042
1043    #[test]
1044    fn get_or_try_insert_with_ref_ok() {
1045        let cache: Cache<String, usize> = new_cache(64);
1046
1047        let key = "hello";
1048        let result: Result<usize, &str> =
1049            cache.get_or_try_insert_with_ref(key, |s| Ok(s.len()), |s| s.to_string());
1050        assert_eq!(result, Ok(5));
1051
1052        let result2: Result<usize, &str> =
1053            cache.get_or_try_insert_with_ref(key, |_| Ok(999), |s| s.to_string());
1054        assert_eq!(result2, Ok(5));
1055    }
1056
1057    #[test]
1058    fn get_or_try_insert_with_ref_err() {
1059        let cache: Cache<String, usize> = new_cache(64);
1060
1061        let key = "hello";
1062        let result: Result<usize, &str> =
1063            cache.get_or_try_insert_with_ref(key, |_| Err("failed"), |s| s.to_string());
1064        assert_eq!(result, Err("failed"));
1065
1066        assert_eq!(cache.get(key), None);
1067    }
1068
1069    #[test]
1070    fn drop_on_cache_drop() {
1071        use std::sync::atomic::{AtomicUsize, Ordering};
1072
1073        static DROP_COUNT: AtomicUsize = AtomicUsize::new(0);
1074
1075        #[derive(Clone, Hash, Eq, PartialEq)]
1076        struct DropKey(u64);
1077        impl Drop for DropKey {
1078            fn drop(&mut self) {
1079                DROP_COUNT.fetch_add(1, Ordering::SeqCst);
1080            }
1081        }
1082
1083        #[derive(Clone)]
1084        struct DropValue(#[allow(dead_code)] u64);
1085        impl Drop for DropValue {
1086            fn drop(&mut self) {
1087                DROP_COUNT.fetch_add(1, Ordering::SeqCst);
1088            }
1089        }
1090
1091        DROP_COUNT.store(0, Ordering::SeqCst);
1092        {
1093            let cache: super::Cache<DropKey, DropValue, BH> =
1094                super::Cache::new(64, Default::default());
1095            cache.insert(DropKey(1), DropValue(100));
1096            cache.insert(DropKey(2), DropValue(200));
1097            cache.insert(DropKey(3), DropValue(300));
1098            assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 0);
1099        }
1100        // 3 keys + 3 values = 6 drops
1101        assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 6);
1102    }
1103
1104    #[test]
1105    fn drop_on_eviction() {
1106        use std::sync::atomic::{AtomicUsize, Ordering};
1107
1108        static DROP_COUNT: AtomicUsize = AtomicUsize::new(0);
1109
1110        #[derive(Clone, Hash, Eq, PartialEq)]
1111        struct DropKey(u64);
1112        impl Drop for DropKey {
1113            fn drop(&mut self) {
1114                DROP_COUNT.fetch_add(1, Ordering::SeqCst);
1115            }
1116        }
1117
1118        #[derive(Clone)]
1119        struct DropValue(#[allow(dead_code)] u64);
1120        impl Drop for DropValue {
1121            fn drop(&mut self) {
1122                DROP_COUNT.fetch_add(1, Ordering::SeqCst);
1123            }
1124        }
1125
1126        DROP_COUNT.store(0, Ordering::SeqCst);
1127        {
1128            let cache: super::Cache<DropKey, DropValue, BH> =
1129                super::Cache::new(64, Default::default());
1130            cache.insert(DropKey(1), DropValue(100));
1131            assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 0);
1132            // Insert same key again - should evict old entry
1133            cache.insert(DropKey(1), DropValue(200));
1134            // Old key + old value dropped = 2
1135            assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 2);
1136        }
1137        // Cache dropped: new key + new value = 2 more
1138        assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 4);
1139    }
1140
1141    #[test]
1142    fn epoch_clear() {
1143        let cache: EpochCache<u64, u64> = EpochCache::new(4096, Default::default());
1144
1145        assert_eq!(cache.epoch(), 0);
1146
1147        cache.insert(1, 100);
1148        cache.insert(2, 200);
1149        assert_eq!(cache.get(&1), Some(100));
1150        assert_eq!(cache.get(&2), Some(200));
1151
1152        cache.clear();
1153        assert_eq!(cache.epoch(), 1);
1154
1155        assert_eq!(cache.get(&1), None);
1156        assert_eq!(cache.get(&2), None);
1157
1158        cache.insert(1, 101);
1159        assert_eq!(cache.get(&1), Some(101));
1160
1161        cache.clear();
1162        assert_eq!(cache.epoch(), 2);
1163        assert_eq!(cache.get(&1), None);
1164    }
1165
1166    #[test]
1167    fn epoch_wrap_around() {
1168        let cache: EpochCache<u64, u64> = EpochCache::new(4096, Default::default());
1169
1170        for _ in 0..300 {
1171            cache.insert(42, 123);
1172            assert_eq!(cache.get(&42), Some(123));
1173            cache.clear();
1174            assert_eq!(cache.get(&42), None);
1175        }
1176    }
1177
1178    #[test]
1179    fn no_stats_config() {
1180        let cache: NoStatsCache<u64, u64> = NoStatsCache::new(64, Default::default());
1181
1182        cache.insert(1, 100);
1183        assert_eq!(cache.get(&1), Some(100));
1184        assert_eq!(cache.get(&999), None);
1185
1186        cache.insert(1, 200);
1187        assert_eq!(cache.get(&1), Some(200));
1188
1189        cache.remove(&1);
1190        assert_eq!(cache.get(&1), None);
1191
1192        let v = cache.get_or_insert_with(42, |&k| k * 2);
1193        assert_eq!(v, 84);
1194    }
1195
1196    #[test]
1197    fn epoch_wraparound_stays_cleared() {
1198        let cache: EpochCache<u64, u64> = EpochCache::new(4096, Default::default());
1199
1200        cache.insert(42, 123);
1201        assert_eq!(cache.get(&42), Some(123));
1202
1203        for i in 0..2048 {
1204            cache.clear();
1205            assert_eq!(cache.get(&42), None, "failed at clear #{i}");
1206        }
1207    }
1208
1209    #[test]
1210    fn remove_copy_type() {
1211        let cache = new_cache::<u64, u64>(64);
1212
1213        cache.insert(1, 100);
1214        assert_eq!(cache.get(&1), Some(100));
1215
1216        let removed = cache.remove(&1);
1217        assert_eq!(removed, Some(100));
1218        assert_eq!(cache.get(&1), None);
1219
1220        cache.insert(1, 200);
1221        assert_eq!(cache.get(&1), Some(200));
1222    }
1223
1224    #[test]
1225    fn remove_then_reinsert_copy() {
1226        let cache = new_cache::<u64, u64>(64);
1227
1228        for i in 0..100u64 {
1229            cache.insert(1, i);
1230            assert_eq!(cache.get(&1), Some(i));
1231            assert_eq!(cache.remove(&1), Some(i));
1232            assert_eq!(cache.get(&1), None);
1233        }
1234    }
1235
1236    #[test]
1237    fn epoch_with_needs_drop() {
1238        let cache: EpochCache<String, String> = EpochCache::new(4096, Default::default());
1239
1240        cache.insert("key".to_string(), "value".to_string());
1241        assert_eq!(cache.get("key"), Some("value".to_string()));
1242
1243        cache.clear();
1244        assert_eq!(cache.get("key"), None);
1245
1246        cache.insert("key".to_string(), "value2".to_string());
1247        assert_eq!(cache.get("key"), Some("value2".to_string()));
1248    }
1249
1250    #[test]
1251    fn epoch_remove() {
1252        let cache: EpochCache<u64, u64> = EpochCache::new(4096, Default::default());
1253
1254        cache.insert(1, 100);
1255        assert_eq!(cache.remove(&1), Some(100));
1256        assert_eq!(cache.get(&1), None);
1257
1258        cache.insert(1, 200);
1259        assert_eq!(cache.get(&1), Some(200));
1260
1261        cache.clear();
1262        assert_eq!(cache.get(&1), None);
1263        assert_eq!(cache.remove(&1), None);
1264    }
1265
1266    #[test]
1267    fn no_stats_needs_drop() {
1268        let cache: NoStatsCache<String, String> = NoStatsCache::new(64, Default::default());
1269
1270        cache.insert("a".to_string(), "b".to_string());
1271        assert_eq!(cache.get("a"), Some("b".to_string()));
1272
1273        cache.insert("a".to_string(), "c".to_string());
1274        assert_eq!(cache.get("a"), Some("c".to_string()));
1275
1276        cache.remove(&"a".to_string());
1277        assert_eq!(cache.get("a"), None);
1278    }
1279
1280    #[test]
1281    fn no_stats_get_or_insert() {
1282        let cache: NoStatsCache<String, usize> = NoStatsCache::new(64, Default::default());
1283
1284        let v = cache.get_or_insert_with_ref("hello", |s| s.len(), |s| s.to_string());
1285        assert_eq!(v, 5);
1286
1287        let v2 = cache.get_or_insert_with_ref("hello", |_| 999, |s| s.to_string());
1288        assert_eq!(v2, 5);
1289    }
1290
1291    #[test]
1292    fn epoch_concurrent() {
1293        let cache: EpochCache<u64, u64> = EpochCache::new(4096, Default::default());
1294        let n = iters(10_000);
1295
1296        run_concurrent(4, |t| {
1297            for i in 0..n as u64 {
1298                match t {
1299                    0 => {
1300                        cache.insert(i % 50, i);
1301                    }
1302                    1 => {
1303                        let _ = cache.get(&(i % 50));
1304                    }
1305                    2 => {
1306                        if i % 100 == 0 {
1307                            cache.clear();
1308                        }
1309                    }
1310                    _ => {
1311                        let _ = cache.remove(&(i % 50));
1312                    }
1313                }
1314            }
1315        });
1316    }
1317}