fixed_cache/
lib.rs

1#![doc = include_str!("../README.md")]
2#![cfg_attr(docsrs, feature(doc_cfg))]
3#![allow(clippy::new_without_default)]
4
5use equivalent::Equivalent;
6use std::{
7    cell::UnsafeCell,
8    convert::Infallible,
9    fmt,
10    hash::{BuildHasher, Hash},
11    marker::PhantomData,
12    mem::MaybeUninit,
13    sync::atomic::{AtomicUsize, Ordering},
14};
15
16#[cfg(feature = "stats")]
17mod stats;
18#[cfg(feature = "stats")]
19#[cfg_attr(docsrs, doc(cfg(feature = "stats")))]
20pub use stats::{AnyRef, CountingStatsHandler, Stats, StatsHandler};
21
22const LOCKED_BIT: usize = 1 << 0;
23const ALIVE_BIT: usize = 1 << 1;
24const NEEDED_BITS: usize = 2;
25
26const EPOCH_BITS: usize = 10;
27const EPOCH_SHIFT: usize = NEEDED_BITS;
28const EPOCH_MASK: usize = ((1 << EPOCH_BITS) - 1) << EPOCH_SHIFT;
29const EPOCH_NEEDED_BITS: usize = NEEDED_BITS + EPOCH_BITS;
30
31#[cfg(feature = "rapidhash")]
32type DefaultBuildHasher = std::hash::BuildHasherDefault<rapidhash::fast::RapidHasher<'static>>;
33#[cfg(not(feature = "rapidhash"))]
34type DefaultBuildHasher = std::hash::RandomState;
35
36/// Configuration trait for [`Cache`].
37///
38/// This trait allows customizing cache behavior through compile-time configuration.
39pub trait CacheConfig {
40    /// Whether to track epochs for cheap invalidation via [`Cache::clear`].
41    ///
42    /// When enabled, the cache tracks an epoch counter that is incremented on each call to
43    /// [`Cache::clear`]. Entries are considered invalid if their epoch doesn't match the current
44    /// epoch, allowing O(1) invalidation of all entries without touching each bucket.
45    ///
46    /// Defaults to `false`.
47    const EPOCHS: bool = false;
48}
49
50/// Default cache configuration.
51pub struct DefaultCacheConfig(());
52impl CacheConfig for DefaultCacheConfig {}
53
54/// A concurrent, fixed-size, set-associative cache.
55///
56/// This cache maps keys to values using a fixed number of buckets. Each key hashes to exactly
57/// one bucket, and collisions are resolved by eviction (the new value replaces the old one).
58///
59/// # Thread Safety
60///
61/// The cache is safe to share across threads (`Send + Sync`). All operations use atomic
62/// instructions and never block, making it suitable for high-contention scenarios.
63///
64/// # Limitations
65///
66/// - **Eviction on collision**: When two keys hash to the same bucket, the older entry is evicted.
67/// - **No iteration**: Individual entries cannot be enumerated.
68///
69/// # Type Parameters
70///
71/// - `K`: The key type. Must implement [`Hash`] + [`Eq`].
72/// - `V`: The value type. Must implement [`Clone`].
73/// - `S`: The hash builder type. Must implement [`BuildHasher`]. Defaults to [`RandomState`] or
74///   [`rapidhash`] if the `rapidhash` feature is enabled.
75///
76/// # Example
77///
78/// ```
79/// use fixed_cache::Cache;
80///
81/// let cache: Cache<u64, u64> = Cache::new(256, Default::default());
82///
83/// // Insert a value
84/// cache.insert(42, 100);
85/// assert_eq!(cache.get(&42), Some(100));
86///
87/// // Get or compute a value
88/// let value = cache.get_or_insert_with(123, |&k| k * 2);
89/// assert_eq!(value, 246);
90/// ```
91///
92/// [`Hash`]: std::hash::Hash
93/// [`Eq`]: std::cmp::Eq
94/// [`Clone`]: std::clone::Clone
95/// [`Drop`]: std::ops::Drop
96/// [`BuildHasher`]: std::hash::BuildHasher
97/// [`RandomState`]: std::hash::RandomState
98/// [`rapidhash`]: https://crates.io/crates/rapidhash
99pub struct Cache<K, V, S = DefaultBuildHasher, C: CacheConfig = DefaultCacheConfig> {
100    entries: *const [Bucket<(K, V)>],
101    build_hasher: S,
102    #[cfg(feature = "stats")]
103    stats: Option<Stats<K, V>>,
104    drop: bool,
105    epoch: AtomicUsize,
106    _config: PhantomData<C>,
107}
108
109impl<K, V, S, C: CacheConfig> fmt::Debug for Cache<K, V, S, C> {
110    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
111        f.debug_struct("Cache").finish_non_exhaustive()
112    }
113}
114
115// SAFETY: `Cache` is safe to share across threads because `Bucket` uses atomic operations.
116unsafe impl<K: Send, V: Send, S: Send, C: CacheConfig + Send> Send for Cache<K, V, S, C> {}
117unsafe impl<K: Send, V: Send, S: Sync, C: CacheConfig + Sync> Sync for Cache<K, V, S, C> {}
118
119impl<K, V, S, C> Cache<K, V, S, C>
120where
121    K: Hash + Eq,
122    S: BuildHasher,
123    C: CacheConfig,
124{
125    const NEEDS_DROP: bool = Bucket::<(K, V)>::NEEDS_DROP;
126
127    /// Create a new cache with the specified number of entries and hasher.
128    ///
129    /// Dynamically allocates memory for the cache entries.
130    ///
131    /// # Panics
132    ///
133    /// Panics if `num`:
134    /// - is not a power of two.
135    /// - isn't at least 4.
136    // See len_assertion for why.
137    pub fn new(num: usize, build_hasher: S) -> Self {
138        Self::len_assertion(num);
139        let entries =
140            Box::into_raw((0..num).map(|_| Bucket::new()).collect::<Vec<_>>().into_boxed_slice());
141        Self::new_inner(entries, build_hasher, true)
142    }
143
144    /// Creates a new cache with the specified entries and hasher.
145    ///
146    /// # Panics
147    ///
148    /// See [`new`](Self::new).
149    #[inline]
150    pub const fn new_static(entries: &'static [Bucket<(K, V)>], build_hasher: S) -> Self {
151        Self::len_assertion(entries.len());
152        Self::new_inner(entries, build_hasher, false)
153    }
154
155    /// Sets the cache's statistics.
156    #[cfg(feature = "stats")]
157    #[inline]
158    pub fn with_stats(mut self, stats: Option<Stats<K, V>>) -> Self {
159        self.set_stats(stats);
160        self
161    }
162
163    /// Sets the cache's statistics.
164    #[cfg(feature = "stats")]
165    #[inline]
166    pub fn set_stats(&mut self, stats: Option<Stats<K, V>>) {
167        self.stats = stats;
168    }
169
170    #[inline]
171    const fn new_inner(entries: *const [Bucket<(K, V)>], build_hasher: S, drop: bool) -> Self {
172        Self {
173            entries,
174            build_hasher,
175            #[cfg(feature = "stats")]
176            stats: None,
177            drop,
178            epoch: AtomicUsize::new(0),
179            _config: PhantomData,
180        }
181    }
182
183    #[inline]
184    const fn len_assertion(len: usize) {
185        // We need `RESERVED_BITS` bits to store metadata for each entry.
186        // Since we calculate the tag mask based on the index mask, and the index mask is (len - 1),
187        // we assert that the length's bottom `RESERVED_BITS` bits are zero.
188        let reserved = if C::EPOCHS { EPOCH_NEEDED_BITS } else { NEEDED_BITS };
189        assert!(len.is_power_of_two(), "length must be a power of two");
190        assert!((len & ((1 << reserved) - 1)) == 0, "len must have its bottom N bits set to zero");
191    }
192
193    #[inline]
194    const fn index_mask(&self) -> usize {
195        let n = self.capacity();
196        unsafe { std::hint::assert_unchecked(n.is_power_of_two()) };
197        n - 1
198    }
199
200    #[inline]
201    const fn tag_mask(&self) -> usize {
202        !self.index_mask()
203    }
204
205    /// Returns the current epoch of the cache.
206    ///
207    /// Only meaningful when [`CacheConfig::EPOCHS`] is `true`.
208    #[inline]
209    fn epoch(&self) -> usize {
210        self.epoch.load(Ordering::Relaxed)
211    }
212
213    /// Clears the cache by invalidating all entries.
214    ///
215    /// This is O(1): it simply increments an epoch counter. On epoch wraparound, falls back to
216    /// [`clear_slow`](Self::clear_slow).
217    ///
218    /// Can only be called when [`CacheConfig::EPOCHS`] is `true`.
219    #[inline]
220    pub fn clear(&self) {
221        const EPOCH_MAX: usize = (1 << EPOCH_BITS) - 1;
222        const { assert!(C::EPOCHS, "can only .clear() when C::EPOCHS is true") }
223        let prev = self.epoch.fetch_add(1, Ordering::Release);
224        if prev == EPOCH_MAX {
225            self.clear_slow();
226        }
227    }
228
229    /// Clears the cache by zeroing all bucket memory.
230    ///
231    /// This is O(N) where N is the number of buckets. Prefer [`clear`](Self::clear) when
232    /// [`CacheConfig::EPOCHS`] is `true`.
233    ///
234    /// # Safety
235    ///
236    /// This method is safe but may race with concurrent operations. Callers should ensure
237    /// no other threads are accessing the cache during this operation.
238    pub fn clear_slow(&self) {
239        // SAFETY: We zero the entire bucket array. This is safe because:
240        // - Bucket<T> is repr(C) and zeroed memory is a valid empty bucket state.
241        // - We don't need to drop existing entries since we're zeroing the ALIVE_BIT.
242        // - Concurrent readers will see either the old state or zeros (empty).
243        unsafe {
244            std::ptr::write_bytes(
245                self.entries.cast_mut().cast::<Bucket<(K, V)>>(),
246                0,
247                self.entries.len(),
248            )
249        };
250    }
251
252    /// Returns the hash builder used by this cache.
253    #[inline]
254    pub const fn hasher(&self) -> &S {
255        &self.build_hasher
256    }
257
258    /// Returns the number of entries in this cache.
259    #[inline]
260    pub const fn capacity(&self) -> usize {
261        self.entries.len()
262    }
263
264    /// Returns the statistics of this cache.
265    #[cfg(feature = "stats")]
266    pub const fn stats(&self) -> Option<&Stats<K, V>> {
267        self.stats.as_ref()
268    }
269}
270
271impl<K, V, S, C> Cache<K, V, S, C>
272where
273    K: Hash + Eq,
274    V: Clone,
275    S: BuildHasher,
276    C: CacheConfig,
277{
278    /// Get an entry from the cache.
279    pub fn get<Q: ?Sized + Hash + Equivalent<K>>(&self, key: &Q) -> Option<V> {
280        let (bucket, tag) = self.calc(key);
281        self.get_inner(key, bucket, tag)
282    }
283
284    #[inline]
285    fn get_inner<Q: ?Sized + Hash + Equivalent<K>>(
286        &self,
287        key: &Q,
288        bucket: &Bucket<(K, V)>,
289        tag: usize,
290    ) -> Option<V> {
291        if bucket.try_lock(Some(tag)) {
292            // SAFETY: We hold the lock and bucket is alive, so we have exclusive access.
293            let (ck, v) = unsafe { (*bucket.data.get()).assume_init_ref() };
294            if key.equivalent(ck) {
295                #[cfg(feature = "stats")]
296                if let Some(stats) = &self.stats {
297                    stats.record_hit(ck, v);
298                }
299                let v = v.clone();
300                bucket.unlock(tag);
301                return Some(v);
302            }
303            #[cfg(feature = "stats")]
304            if let Some(stats) = &self.stats {
305                stats.record_collision(AnyRef::new(&key), ck, v);
306            }
307            bucket.unlock(tag);
308        }
309        #[cfg(feature = "stats")]
310        if let Some(stats) = &self.stats {
311            stats.record_miss(AnyRef::new(&key));
312        }
313        None
314    }
315
316    /// Insert an entry into the cache.
317    pub fn insert(&self, key: K, value: V) {
318        let (bucket, tag) = self.calc(&key);
319        self.insert_inner(|| key, || value, bucket, tag);
320    }
321
322    /// Remove an entry from the cache.
323    ///
324    /// Returns the value if the key was present in the cache.
325    pub fn remove<Q: ?Sized + Hash + Equivalent<K>>(&self, key: &Q) -> Option<V> {
326        let (bucket, tag) = self.calc(key);
327        if bucket.try_lock(Some(tag)) {
328            // SAFETY: We hold the lock and bucket is alive, so we have exclusive access.
329            let data = unsafe { &mut *bucket.data.get() };
330            let (ck, v) = unsafe { data.assume_init_ref() };
331            if key.equivalent(ck) {
332                let v = v.clone();
333                if Self::NEEDS_DROP {
334                    // SAFETY: We hold the lock, so we have exclusive access.
335                    unsafe { data.assume_init_drop() };
336                }
337                bucket.unlock(0);
338                return Some(v);
339            }
340            bucket.unlock(tag);
341        }
342        None
343    }
344
345    #[inline]
346    fn insert_inner(
347        &self,
348        make_key: impl FnOnce() -> K,
349        make_value: impl FnOnce() -> V,
350        bucket: &Bucket<(K, V)>,
351        tag: usize,
352    ) {
353        if let Some(prev_tag) = bucket.try_lock_ret(None) {
354            // SAFETY: We hold the lock, so we have exclusive access.
355            unsafe {
356                let data = (&mut *bucket.data.get()).as_mut_ptr();
357                // Drop old value if bucket was alive.
358                if Self::NEEDS_DROP && (prev_tag & ALIVE_BIT) != 0 {
359                    std::ptr::drop_in_place(data);
360                }
361                (&raw mut (*data).0).write(make_key());
362                (&raw mut (*data).1).write(make_value());
363            }
364            bucket.unlock(tag);
365        }
366    }
367
368    /// Gets a value from the cache, or inserts one computed by `f` if not present.
369    ///
370    /// If the key is found in the cache, returns a clone of the cached value.
371    /// Otherwise, calls `f` to compute the value, attempts to insert it, and returns it.
372    #[inline]
373    pub fn get_or_insert_with<F>(&self, key: K, f: F) -> V
374    where
375        F: FnOnce(&K) -> V,
376    {
377        self.get_or_try_insert_with(key, |key| Ok::<_, Infallible>(f(key))).unwrap()
378    }
379
380    /// Gets a value from the cache, or inserts one computed by `f` if not present.
381    ///
382    /// If the key is found in the cache, returns a clone of the cached value.
383    /// Otherwise, calls `f` to compute the value, attempts to insert it, and returns it.
384    ///
385    /// This is the same as [`get_or_insert_with`], but takes a reference to the key, and a function
386    /// to get the key reference to an owned key.
387    ///
388    /// [`get_or_insert_with`]: Self::get_or_insert_with
389    #[inline]
390    pub fn get_or_insert_with_ref<'a, Q, F, Cvt>(&self, key: &'a Q, f: F, cvt: Cvt) -> V
391    where
392        Q: ?Sized + Hash + Equivalent<K>,
393        F: FnOnce(&'a Q) -> V,
394        Cvt: FnOnce(&'a Q) -> K,
395    {
396        self.get_or_try_insert_with_ref(key, |key| Ok::<_, Infallible>(f(key)), cvt).unwrap()
397    }
398
399    /// Gets a value from the cache, or attempts to insert one computed by `f` if not present.
400    ///
401    /// If the key is found in the cache, returns `Ok` with a clone of the cached value.
402    /// Otherwise, calls `f` to compute the value. If `f` returns `Ok`, attempts to insert
403    /// the value and returns it. If `f` returns `Err`, the error is propagated.
404    #[inline]
405    pub fn get_or_try_insert_with<F, E>(&self, key: K, f: F) -> Result<V, E>
406    where
407        F: FnOnce(&K) -> Result<V, E>,
408    {
409        let mut key = std::mem::ManuallyDrop::new(key);
410        let mut read = false;
411        let r = self.get_or_try_insert_with_ref(&*key, f, |k| {
412            read = true;
413            unsafe { std::ptr::read(k) }
414        });
415        if !read {
416            unsafe { std::mem::ManuallyDrop::drop(&mut key) }
417        }
418        r
419    }
420
421    /// Gets a value from the cache, or attempts to insert one computed by `f` if not present.
422    ///
423    /// If the key is found in the cache, returns `Ok` with a clone of the cached value.
424    /// Otherwise, calls `f` to compute the value. If `f` returns `Ok`, attempts to insert
425    /// the value and returns it. If `f` returns `Err`, the error is propagated.
426    ///
427    /// This is the same as [`Self::get_or_try_insert_with`], but takes a reference to the key, and
428    /// a function to get the key reference to an owned key.
429    #[inline]
430    pub fn get_or_try_insert_with_ref<'a, Q, F, Cvt, E>(
431        &self,
432        key: &'a Q,
433        f: F,
434        cvt: Cvt,
435    ) -> Result<V, E>
436    where
437        Q: ?Sized + Hash + Equivalent<K>,
438        F: FnOnce(&'a Q) -> Result<V, E>,
439        Cvt: FnOnce(&'a Q) -> K,
440    {
441        let (bucket, tag) = self.calc(key);
442        if let Some(v) = self.get_inner(key, bucket, tag) {
443            return Ok(v);
444        }
445        let value = f(key)?;
446        self.insert_inner(|| cvt(key), || value.clone(), bucket, tag);
447        Ok(value)
448    }
449
450    #[inline]
451    fn calc<Q: ?Sized + Hash + Equivalent<K>>(&self, key: &Q) -> (&Bucket<(K, V)>, usize) {
452        let hash = self.hash_key(key);
453        // SAFETY: index is masked to be within bounds.
454        let bucket = unsafe { (&*self.entries).get_unchecked(hash & self.index_mask()) };
455        let mut tag = hash & self.tag_mask();
456        if Self::NEEDS_DROP {
457            tag |= ALIVE_BIT;
458        }
459        if C::EPOCHS {
460            tag = (tag & !EPOCH_MASK) | ((self.epoch() << EPOCH_SHIFT) & EPOCH_MASK);
461        }
462        (bucket, tag)
463    }
464
465    #[inline]
466    fn hash_key<Q: ?Sized + Hash + Equivalent<K>>(&self, key: &Q) -> usize {
467        let hash = self.build_hasher.hash_one(key);
468
469        if cfg!(target_pointer_width = "32") {
470            ((hash >> 32) as usize) ^ (hash as usize)
471        } else {
472            hash as usize
473        }
474    }
475}
476
477impl<K, V, S, C: CacheConfig> Drop for Cache<K, V, S, C> {
478    fn drop(&mut self) {
479        if self.drop {
480            // SAFETY: `Drop` has exclusive access.
481            drop(unsafe { Box::from_raw(self.entries.cast_mut()) });
482        }
483    }
484}
485
486/// A single cache bucket that holds one key-value pair.
487///
488/// Buckets are aligned to 128 bytes to avoid false sharing between cache lines.
489/// Each bucket contains an atomic tag for lock-free synchronization and uninitialized
490/// storage for the data.
491///
492/// This type is public to allow use with the [`static_cache!`] macro for compile-time
493/// cache initialization. You typically don't need to interact with it directly.
494#[repr(C, align(128))]
495#[doc(hidden)]
496pub struct Bucket<T> {
497    tag: AtomicUsize,
498    data: UnsafeCell<MaybeUninit<T>>,
499}
500
501impl<T> Bucket<T> {
502    const NEEDS_DROP: bool = std::mem::needs_drop::<T>();
503
504    /// Creates a new zeroed bucket.
505    #[inline]
506    pub const fn new() -> Self {
507        Self { tag: AtomicUsize::new(0), data: UnsafeCell::new(MaybeUninit::zeroed()) }
508    }
509
510    #[inline]
511    fn try_lock(&self, expected: Option<usize>) -> bool {
512        self.try_lock_ret(expected).is_some()
513    }
514
515    #[inline]
516    fn try_lock_ret(&self, expected: Option<usize>) -> Option<usize> {
517        let state = self.tag.load(Ordering::Relaxed);
518        if let Some(expected) = expected {
519            if state != expected {
520                return None;
521            }
522        } else if state & LOCKED_BIT != 0 {
523            return None;
524        }
525        self.tag
526            .compare_exchange(state, state | LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
527            .ok()
528    }
529
530    #[inline]
531    fn is_alive(&self) -> bool {
532        self.tag.load(Ordering::Relaxed) & ALIVE_BIT != 0
533    }
534
535    #[inline]
536    fn unlock(&self, tag: usize) {
537        self.tag.store(tag, Ordering::Release);
538    }
539}
540
541// SAFETY: `Bucket` is a specialized `Mutex<T>` that never blocks.
542unsafe impl<T: Send> Send for Bucket<T> {}
543unsafe impl<T: Send> Sync for Bucket<T> {}
544
545impl<T> Drop for Bucket<T> {
546    fn drop(&mut self) {
547        if Self::NEEDS_DROP && self.is_alive() {
548            // SAFETY: `Drop` has exclusive access.
549            unsafe { self.data.get_mut().assume_init_drop() };
550        }
551    }
552}
553
554/// Declares a static cache with the given name, key type, value type, and size.
555///
556/// The size must be a power of two.
557///
558/// # Example
559///
560/// ```
561/// # #[cfg(feature = "rapidhash")] {
562/// use fixed_cache::{Cache, static_cache};
563///
564/// type BuildHasher = std::hash::BuildHasherDefault<rapidhash::fast::RapidHasher<'static>>;
565///
566/// static MY_CACHE: Cache<u64, &'static str, BuildHasher> =
567///     static_cache!(u64, &'static str, 1024, BuildHasher::new());
568///
569/// let value = MY_CACHE.get_or_insert_with(42, |_k| "hi");
570/// assert_eq!(value, "hi");
571///
572/// let new_value = MY_CACHE.get_or_insert_with(42, |_k| "not hi");
573/// assert_eq!(new_value, "hi");
574/// # }
575/// ```
576#[macro_export]
577macro_rules! static_cache {
578    ($K:ty, $V:ty, $size:expr) => {
579        $crate::static_cache!($K, $V, $size, Default::default())
580    };
581    ($K:ty, $V:ty, $size:expr, $hasher:expr) => {{
582        static ENTRIES: [$crate::Bucket<($K, $V)>; $size] =
583            [const { $crate::Bucket::new() }; $size];
584        $crate::Cache::new_static(&ENTRIES, $hasher)
585    }};
586}
587
588#[cfg(test)]
589mod tests {
590    use super::*;
591    use std::thread;
592
593    const fn iters(n: usize) -> usize {
594        if cfg!(miri) { n / 10 } else { n }
595    }
596
597    type BH = std::hash::BuildHasherDefault<rapidhash::fast::RapidHasher<'static>>;
598    type Cache<K, V> = super::Cache<K, V, BH>;
599
600    struct EpochConfig;
601    impl CacheConfig for EpochConfig {
602        const EPOCHS: bool = true;
603    }
604    type EpochCache<K, V> = super::Cache<K, V, BH, EpochConfig>;
605
606    fn new_cache<K: Hash + Eq, V: Clone>(size: usize) -> Cache<K, V> {
607        Cache::new(size, Default::default())
608    }
609
610    #[test]
611    fn basic_get_or_insert() {
612        let cache = new_cache(1024);
613
614        let mut computed = false;
615        let value = cache.get_or_insert_with(42, |&k| {
616            computed = true;
617            k * 2
618        });
619        assert!(computed);
620        assert_eq!(value, 84);
621
622        computed = false;
623        let value = cache.get_or_insert_with(42, |&k| {
624            computed = true;
625            k * 2
626        });
627        assert!(!computed);
628        assert_eq!(value, 84);
629    }
630
631    #[test]
632    fn different_keys() {
633        let cache: Cache<String, usize> = new_cache(1024);
634
635        let v1 = cache.get_or_insert_with("hello".to_string(), |s| s.len());
636        let v2 = cache.get_or_insert_with("world!".to_string(), |s| s.len());
637
638        assert_eq!(v1, 5);
639        assert_eq!(v2, 6);
640    }
641
642    #[test]
643    fn new_dynamic_allocation() {
644        let cache: Cache<u32, u32> = new_cache(64);
645        assert_eq!(cache.capacity(), 64);
646
647        cache.insert(1, 100);
648        assert_eq!(cache.get(&1), Some(100));
649    }
650
651    #[test]
652    fn get_miss() {
653        let cache = new_cache::<u64, u64>(64);
654        assert_eq!(cache.get(&999), None);
655    }
656
657    #[test]
658    fn insert_and_get() {
659        let cache: Cache<u64, String> = new_cache(64);
660
661        cache.insert(1, "one".to_string());
662        cache.insert(2, "two".to_string());
663        cache.insert(3, "three".to_string());
664
665        assert_eq!(cache.get(&1), Some("one".to_string()));
666        assert_eq!(cache.get(&2), Some("two".to_string()));
667        assert_eq!(cache.get(&3), Some("three".to_string()));
668        assert_eq!(cache.get(&4), None);
669    }
670
671    #[test]
672    fn insert_twice() {
673        let cache = new_cache(64);
674
675        cache.insert(42, 1);
676        assert_eq!(cache.get(&42), Some(1));
677
678        cache.insert(42, 2);
679        let v = cache.get(&42);
680        assert!(v == Some(1) || v == Some(2));
681    }
682
683    #[test]
684    fn remove_existing() {
685        let cache: Cache<u64, String> = new_cache(64);
686
687        cache.insert(1, "one".to_string());
688        assert_eq!(cache.get(&1), Some("one".to_string()));
689
690        let removed = cache.remove(&1);
691        assert_eq!(removed, Some("one".to_string()));
692        assert_eq!(cache.get(&1), None);
693    }
694
695    #[test]
696    fn remove_nonexistent() {
697        let cache = new_cache::<u64, u64>(64);
698        assert_eq!(cache.remove(&999), None);
699    }
700
701    #[test]
702    fn get_or_insert_with_ref() {
703        let cache: Cache<String, usize> = new_cache(64);
704
705        let key = "hello";
706        let value = cache.get_or_insert_with_ref(key, |s| s.len(), |s| s.to_string());
707        assert_eq!(value, 5);
708
709        let value2 = cache.get_or_insert_with_ref(key, |_| 999, |s| s.to_string());
710        assert_eq!(value2, 5);
711    }
712
713    #[test]
714    fn get_or_insert_with_ref_different_keys() {
715        let cache: Cache<String, usize> = new_cache(1024);
716
717        let v1 = cache.get_or_insert_with_ref("foo", |s| s.len(), |s| s.to_string());
718        let v2 = cache.get_or_insert_with_ref("barbaz", |s| s.len(), |s| s.to_string());
719
720        assert_eq!(v1, 3);
721        assert_eq!(v2, 6);
722    }
723
724    #[test]
725    fn capacity() {
726        let cache = new_cache::<u64, u64>(256);
727        assert_eq!(cache.capacity(), 256);
728
729        let cache2 = new_cache::<u64, u64>(128);
730        assert_eq!(cache2.capacity(), 128);
731    }
732
733    #[test]
734    fn hasher() {
735        let cache = new_cache::<u64, u64>(64);
736        let _ = cache.hasher();
737    }
738
739    #[test]
740    fn debug_impl() {
741        let cache = new_cache::<u64, u64>(64);
742        let debug_str = format!("{:?}", cache);
743        assert!(debug_str.contains("Cache"));
744    }
745
746    #[test]
747    fn bucket_new() {
748        let bucket: Bucket<(u64, u64)> = Bucket::new();
749        assert_eq!(bucket.tag.load(Ordering::Relaxed), 0);
750    }
751
752    #[test]
753    fn many_entries() {
754        let cache: Cache<u64, u64> = new_cache(1024);
755        let n = iters(500);
756
757        for i in 0..n as u64 {
758            cache.insert(i, i * 2);
759        }
760
761        let mut hits = 0;
762        for i in 0..n as u64 {
763            if cache.get(&i) == Some(i * 2) {
764                hits += 1;
765            }
766        }
767        assert!(hits > 0);
768    }
769
770    #[test]
771    fn string_keys() {
772        let cache: Cache<String, i32> = new_cache(1024);
773
774        cache.insert("alpha".to_string(), 1);
775        cache.insert("beta".to_string(), 2);
776        cache.insert("gamma".to_string(), 3);
777
778        assert_eq!(cache.get("alpha"), Some(1));
779        assert_eq!(cache.get("beta"), Some(2));
780        assert_eq!(cache.get("gamma"), Some(3));
781    }
782
783    #[test]
784    fn zero_values() {
785        let cache: Cache<u64, u64> = new_cache(64);
786
787        cache.insert(0, 0);
788        assert_eq!(cache.get(&0), Some(0));
789
790        cache.insert(1, 0);
791        assert_eq!(cache.get(&1), Some(0));
792    }
793
794    #[test]
795    fn clone_value() {
796        #[derive(Clone, PartialEq, Debug)]
797        struct MyValue(u64);
798
799        let cache: Cache<u64, MyValue> = new_cache(64);
800
801        cache.insert(1, MyValue(123));
802        let v = cache.get(&1);
803        assert_eq!(v, Some(MyValue(123)));
804    }
805
806    fn run_concurrent<F>(num_threads: usize, f: F)
807    where
808        F: Fn(usize) + Send + Sync,
809    {
810        thread::scope(|s| {
811            for t in 0..num_threads {
812                let f = &f;
813                s.spawn(move || f(t));
814            }
815        });
816    }
817
818    #[test]
819    fn concurrent_reads() {
820        let cache: Cache<u64, u64> = new_cache(1024);
821        let n = iters(100);
822
823        for i in 0..n as u64 {
824            cache.insert(i, i * 10);
825        }
826
827        run_concurrent(4, |_| {
828            for i in 0..n as u64 {
829                let _ = cache.get(&i);
830            }
831        });
832    }
833
834    #[test]
835    fn concurrent_writes() {
836        let cache: Cache<u64, u64> = new_cache(1024);
837        let n = iters(100);
838
839        run_concurrent(4, |t| {
840            for i in 0..n {
841                cache.insert((t * 1000 + i) as u64, i as u64);
842            }
843        });
844    }
845
846    #[test]
847    fn concurrent_read_write() {
848        let cache: Cache<u64, u64> = new_cache(256);
849        let n = iters(1000);
850
851        run_concurrent(2, |t| {
852            for i in 0..n as u64 {
853                if t == 0 {
854                    cache.insert(i % 100, i);
855                } else {
856                    let _ = cache.get(&(i % 100));
857                }
858            }
859        });
860    }
861
862    #[test]
863    fn concurrent_get_or_insert() {
864        let cache: Cache<u64, u64> = new_cache(1024);
865        let n = iters(100);
866
867        run_concurrent(8, |_| {
868            for i in 0..n as u64 {
869                let _ = cache.get_or_insert_with(i, |&k| k * 2);
870            }
871        });
872
873        for i in 0..n as u64 {
874            if let Some(v) = cache.get(&i) {
875                assert_eq!(v, i * 2);
876            }
877        }
878    }
879
880    #[test]
881    #[should_panic = "power of two"]
882    fn non_power_of_two() {
883        let _ = new_cache::<u64, u64>(100);
884    }
885
886    #[test]
887    #[should_panic = "len must have its bottom N bits set to zero"]
888    fn small_cache() {
889        let _ = new_cache::<u64, u64>(2);
890    }
891
892    #[test]
893    fn power_of_two_sizes() {
894        for shift in 2..10 {
895            let size = 1 << shift;
896            let cache = new_cache::<u64, u64>(size);
897            assert_eq!(cache.capacity(), size);
898        }
899    }
900
901    #[test]
902    fn equivalent_key_lookup() {
903        let cache: Cache<String, i32> = new_cache(64);
904
905        cache.insert("hello".to_string(), 42);
906
907        assert_eq!(cache.get("hello"), Some(42));
908    }
909
910    #[test]
911    fn large_values() {
912        let cache: Cache<u64, [u8; 1000]> = new_cache(64);
913
914        let large_value = [42u8; 1000];
915        cache.insert(1, large_value);
916
917        assert_eq!(cache.get(&1), Some(large_value));
918    }
919
920    #[test]
921    fn send_sync() {
922        fn assert_send<T: Send>() {}
923        fn assert_sync<T: Sync>() {}
924
925        assert_send::<Cache<u64, u64>>();
926        assert_sync::<Cache<u64, u64>>();
927        assert_send::<Bucket<(u64, u64)>>();
928        assert_sync::<Bucket<(u64, u64)>>();
929    }
930
931    #[test]
932    fn get_or_try_insert_with_ok() {
933        let cache = new_cache(1024);
934
935        let mut computed = false;
936        let result: Result<u64, &str> = cache.get_or_try_insert_with(42, |&k| {
937            computed = true;
938            Ok(k * 2)
939        });
940        assert!(computed);
941        assert_eq!(result, Ok(84));
942
943        computed = false;
944        let result: Result<u64, &str> = cache.get_or_try_insert_with(42, |&k| {
945            computed = true;
946            Ok(k * 2)
947        });
948        assert!(!computed);
949        assert_eq!(result, Ok(84));
950    }
951
952    #[test]
953    fn get_or_try_insert_with_err() {
954        let cache: Cache<u64, u64> = new_cache(1024);
955
956        let result: Result<u64, &str> = cache.get_or_try_insert_with(42, |_| Err("failed"));
957        assert_eq!(result, Err("failed"));
958
959        assert_eq!(cache.get(&42), None);
960    }
961
962    #[test]
963    fn get_or_try_insert_with_ref_ok() {
964        let cache: Cache<String, usize> = new_cache(64);
965
966        let key = "hello";
967        let result: Result<usize, &str> =
968            cache.get_or_try_insert_with_ref(key, |s| Ok(s.len()), |s| s.to_string());
969        assert_eq!(result, Ok(5));
970
971        let result2: Result<usize, &str> =
972            cache.get_or_try_insert_with_ref(key, |_| Ok(999), |s| s.to_string());
973        assert_eq!(result2, Ok(5));
974    }
975
976    #[test]
977    fn get_or_try_insert_with_ref_err() {
978        let cache: Cache<String, usize> = new_cache(64);
979
980        let key = "hello";
981        let result: Result<usize, &str> =
982            cache.get_or_try_insert_with_ref(key, |_| Err("failed"), |s| s.to_string());
983        assert_eq!(result, Err("failed"));
984
985        assert_eq!(cache.get(key), None);
986    }
987
988    #[test]
989    fn drop_on_cache_drop() {
990        use std::sync::atomic::{AtomicUsize, Ordering};
991
992        static DROP_COUNT: AtomicUsize = AtomicUsize::new(0);
993
994        #[derive(Clone, Hash, Eq, PartialEq)]
995        struct DropKey(u64);
996        impl Drop for DropKey {
997            fn drop(&mut self) {
998                DROP_COUNT.fetch_add(1, Ordering::SeqCst);
999            }
1000        }
1001
1002        #[derive(Clone)]
1003        struct DropValue(#[allow(dead_code)] u64);
1004        impl Drop for DropValue {
1005            fn drop(&mut self) {
1006                DROP_COUNT.fetch_add(1, Ordering::SeqCst);
1007            }
1008        }
1009
1010        DROP_COUNT.store(0, Ordering::SeqCst);
1011        {
1012            let cache: super::Cache<DropKey, DropValue, BH> =
1013                super::Cache::new(64, Default::default());
1014            cache.insert(DropKey(1), DropValue(100));
1015            cache.insert(DropKey(2), DropValue(200));
1016            cache.insert(DropKey(3), DropValue(300));
1017            assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 0);
1018        }
1019        // 3 keys + 3 values = 6 drops
1020        assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 6);
1021    }
1022
1023    #[test]
1024    fn drop_on_eviction() {
1025        use std::sync::atomic::{AtomicUsize, Ordering};
1026
1027        static DROP_COUNT: AtomicUsize = AtomicUsize::new(0);
1028
1029        #[derive(Clone, Hash, Eq, PartialEq)]
1030        struct DropKey(u64);
1031        impl Drop for DropKey {
1032            fn drop(&mut self) {
1033                DROP_COUNT.fetch_add(1, Ordering::SeqCst);
1034            }
1035        }
1036
1037        #[derive(Clone)]
1038        struct DropValue(#[allow(dead_code)] u64);
1039        impl Drop for DropValue {
1040            fn drop(&mut self) {
1041                DROP_COUNT.fetch_add(1, Ordering::SeqCst);
1042            }
1043        }
1044
1045        DROP_COUNT.store(0, Ordering::SeqCst);
1046        {
1047            let cache: super::Cache<DropKey, DropValue, BH> =
1048                super::Cache::new(64, Default::default());
1049            cache.insert(DropKey(1), DropValue(100));
1050            assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 0);
1051            // Insert same key again - should evict old entry
1052            cache.insert(DropKey(1), DropValue(200));
1053            // Old key + old value dropped = 2
1054            assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 2);
1055        }
1056        // Cache dropped: new key + new value = 2 more
1057        assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 4);
1058    }
1059
1060    #[test]
1061    fn epoch_clear() {
1062        let cache: EpochCache<u64, u64> = EpochCache::new(4096, Default::default());
1063
1064        assert_eq!(cache.epoch(), 0);
1065
1066        cache.insert(1, 100);
1067        cache.insert(2, 200);
1068        assert_eq!(cache.get(&1), Some(100));
1069        assert_eq!(cache.get(&2), Some(200));
1070
1071        cache.clear();
1072        assert_eq!(cache.epoch(), 1);
1073
1074        assert_eq!(cache.get(&1), None);
1075        assert_eq!(cache.get(&2), None);
1076
1077        cache.insert(1, 101);
1078        assert_eq!(cache.get(&1), Some(101));
1079
1080        cache.clear();
1081        assert_eq!(cache.epoch(), 2);
1082        assert_eq!(cache.get(&1), None);
1083    }
1084
1085    #[test]
1086    fn epoch_wrap_around() {
1087        let cache: EpochCache<u64, u64> = EpochCache::new(4096, Default::default());
1088
1089        for _ in 0..300 {
1090            cache.insert(42, 123);
1091            assert_eq!(cache.get(&42), Some(123));
1092            cache.clear();
1093            assert_eq!(cache.get(&42), None);
1094        }
1095    }
1096
1097    #[test]
1098    fn epoch_wraparound_stays_cleared() {
1099        let cache: EpochCache<u64, u64> = EpochCache::new(4096, Default::default());
1100
1101        cache.insert(42, 123);
1102        assert_eq!(cache.get(&42), Some(123));
1103
1104        for i in 0..2048 {
1105            cache.clear();
1106            assert_eq!(cache.get(&42), None, "failed at clear #{i}");
1107        }
1108    }
1109}