1#![doc = include_str!("../README.md")]
2#![cfg_attr(docsrs, feature(doc_cfg))]
3#![allow(clippy::new_without_default)]
4
5use equivalent::Equivalent;
6use std::{
7 cell::UnsafeCell,
8 convert::Infallible,
9 fmt,
10 hash::{BuildHasher, Hash},
11 marker::PhantomData,
12 mem::MaybeUninit,
13 sync::atomic::{AtomicUsize, Ordering},
14};
15
16#[cfg(feature = "stats")]
17mod stats;
18#[cfg(feature = "stats")]
19#[cfg_attr(docsrs, doc(cfg(feature = "stats")))]
20pub use stats::{AnyRef, CountingStatsHandler, Stats, StatsHandler};
21
22const LOCKED_BIT: usize = 1 << 0;
23const ALIVE_BIT: usize = 1 << 1;
24const NEEDED_BITS: usize = 2;
25
26const EPOCH_BITS: usize = 10;
27const EPOCH_SHIFT: usize = NEEDED_BITS;
28const EPOCH_MASK: usize = ((1 << EPOCH_BITS) - 1) << EPOCH_SHIFT;
29const EPOCH_NEEDED_BITS: usize = NEEDED_BITS + EPOCH_BITS;
30
31#[cfg(feature = "rapidhash")]
32type DefaultBuildHasher = std::hash::BuildHasherDefault<rapidhash::fast::RapidHasher<'static>>;
33#[cfg(not(feature = "rapidhash"))]
34type DefaultBuildHasher = std::hash::RandomState;
35
36pub trait CacheConfig {
40 const EPOCHS: bool = false;
48}
49
50pub struct DefaultCacheConfig(());
52impl CacheConfig for DefaultCacheConfig {}
53
54pub struct Cache<K, V, S = DefaultBuildHasher, C: CacheConfig = DefaultCacheConfig> {
100 entries: *const [Bucket<(K, V)>],
101 build_hasher: S,
102 #[cfg(feature = "stats")]
103 stats: Option<Stats<K, V>>,
104 drop: bool,
105 epoch: AtomicUsize,
106 _config: PhantomData<C>,
107}
108
109impl<K, V, S, C: CacheConfig> fmt::Debug for Cache<K, V, S, C> {
110 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
111 f.debug_struct("Cache").finish_non_exhaustive()
112 }
113}
114
115unsafe impl<K: Send, V: Send, S: Send, C: CacheConfig + Send> Send for Cache<K, V, S, C> {}
117unsafe impl<K: Send, V: Send, S: Sync, C: CacheConfig + Sync> Sync for Cache<K, V, S, C> {}
118
119impl<K, V, S, C> Cache<K, V, S, C>
120where
121 K: Hash + Eq,
122 S: BuildHasher,
123 C: CacheConfig,
124{
125 const NEEDS_DROP: bool = Bucket::<(K, V)>::NEEDS_DROP;
126
127 pub fn new(num: usize, build_hasher: S) -> Self {
138 Self::len_assertion(num);
139 let layout = std::alloc::Layout::array::<Bucket<(K, V)>>(num).unwrap();
140 let ptr = unsafe { std::alloc::alloc_zeroed(layout) };
141 if ptr.is_null() {
142 std::alloc::handle_alloc_error(layout);
143 }
144 let entries = std::ptr::slice_from_raw_parts(ptr.cast::<Bucket<(K, V)>>(), num);
145 Self::new_inner(entries, build_hasher, true)
146 }
147
148 #[inline]
154 pub const fn new_static(entries: &'static [Bucket<(K, V)>], build_hasher: S) -> Self {
155 Self::len_assertion(entries.len());
156 Self::new_inner(entries, build_hasher, false)
157 }
158
159 #[cfg(feature = "stats")]
161 #[inline]
162 pub fn with_stats(mut self, stats: Option<Stats<K, V>>) -> Self {
163 self.set_stats(stats);
164 self
165 }
166
167 #[cfg(feature = "stats")]
169 #[inline]
170 pub fn set_stats(&mut self, stats: Option<Stats<K, V>>) {
171 self.stats = stats;
172 }
173
174 #[inline]
175 const fn new_inner(entries: *const [Bucket<(K, V)>], build_hasher: S, drop: bool) -> Self {
176 Self {
177 entries,
178 build_hasher,
179 #[cfg(feature = "stats")]
180 stats: None,
181 drop,
182 epoch: AtomicUsize::new(0),
183 _config: PhantomData,
184 }
185 }
186
187 #[inline]
188 const fn len_assertion(len: usize) {
189 let reserved = if C::EPOCHS { EPOCH_NEEDED_BITS } else { NEEDED_BITS };
193 assert!(len.is_power_of_two(), "length must be a power of two");
194 assert!((len & ((1 << reserved) - 1)) == 0, "len must have its bottom N bits set to zero");
195 }
196
197 #[inline]
198 const fn index_mask(&self) -> usize {
199 let n = self.capacity();
200 unsafe { std::hint::assert_unchecked(n.is_power_of_two()) };
201 n - 1
202 }
203
204 #[inline]
205 const fn tag_mask(&self) -> usize {
206 !self.index_mask()
207 }
208
209 #[inline]
213 fn epoch(&self) -> usize {
214 self.epoch.load(Ordering::Relaxed)
215 }
216
217 #[inline]
224 pub fn clear(&self) {
225 const EPOCH_MAX: usize = (1 << EPOCH_BITS) - 1;
226 const { assert!(C::EPOCHS, "can only .clear() when C::EPOCHS is true") }
227 let prev = self.epoch.fetch_add(1, Ordering::Release);
228 if prev == EPOCH_MAX {
229 self.clear_slow();
230 }
231 }
232
233 pub fn clear_slow(&self) {
243 unsafe {
248 std::ptr::write_bytes(
249 self.entries.cast_mut().cast::<Bucket<(K, V)>>(),
250 0,
251 self.entries.len(),
252 )
253 };
254 }
255
256 #[inline]
258 pub const fn hasher(&self) -> &S {
259 &self.build_hasher
260 }
261
262 #[inline]
264 pub const fn capacity(&self) -> usize {
265 self.entries.len()
266 }
267
268 #[cfg(feature = "stats")]
270 pub const fn stats(&self) -> Option<&Stats<K, V>> {
271 self.stats.as_ref()
272 }
273}
274
275impl<K, V, S, C> Cache<K, V, S, C>
276where
277 K: Hash + Eq,
278 V: Clone,
279 S: BuildHasher,
280 C: CacheConfig,
281{
282 pub fn get<Q: ?Sized + Hash + Equivalent<K>>(&self, key: &Q) -> Option<V> {
284 let (bucket, tag) = self.calc(key);
285 self.get_inner(key, bucket, tag)
286 }
287
288 #[inline]
289 fn get_inner<Q: ?Sized + Hash + Equivalent<K>>(
290 &self,
291 key: &Q,
292 bucket: &Bucket<(K, V)>,
293 tag: usize,
294 ) -> Option<V> {
295 if bucket.try_lock(Some(tag)) {
296 let (ck, v) = unsafe { (*bucket.data.get()).assume_init_ref() };
298 if key.equivalent(ck) {
299 #[cfg(feature = "stats")]
300 if let Some(stats) = &self.stats {
301 stats.record_hit(ck, v);
302 }
303 let v = v.clone();
304 bucket.unlock(tag);
305 return Some(v);
306 }
307 bucket.unlock(tag);
308 }
309 #[cfg(feature = "stats")]
310 if let Some(stats) = &self.stats {
311 stats.record_miss(AnyRef::new(&key));
312 }
313 None
314 }
315
316 pub fn insert(&self, key: K, value: V) {
318 let (bucket, tag) = self.calc(&key);
319 self.insert_inner(|| key, || value, bucket, tag);
320 }
321
322 pub fn remove<Q: ?Sized + Hash + Equivalent<K>>(&self, key: &Q) -> Option<V> {
326 let (bucket, tag) = self.calc(key);
327 if bucket.try_lock(Some(tag)) {
328 let data = unsafe { &mut *bucket.data.get() };
330 let (ck, v) = unsafe { data.assume_init_ref() };
331 if key.equivalent(ck) {
332 let v = v.clone();
333 #[cfg(feature = "stats")]
334 if let Some(stats) = &self.stats {
335 stats.record_remove(ck, &v);
336 }
337 if Self::NEEDS_DROP {
338 unsafe { data.assume_init_drop() };
340 }
341 bucket.unlock(0);
342 return Some(v);
343 }
344 bucket.unlock(tag);
345 }
346 None
347 }
348
349 #[inline]
350 fn insert_inner(
351 &self,
352 make_key: impl FnOnce() -> K,
353 make_value: impl FnOnce() -> V,
354 bucket: &Bucket<(K, V)>,
355 tag: usize,
356 ) {
357 if let Some(prev_tag) = bucket.try_lock_ret(None) {
358 unsafe {
360 let is_alive = (prev_tag & !LOCKED_BIT) != 0;
362 let data = (&mut *bucket.data.get()).as_mut_ptr();
363
364 #[cfg(feature = "stats")]
365 {
366 if is_alive {
367 let old_key = std::ptr::replace(&raw mut (*data).0, make_key());
369 let old_value = std::ptr::replace(&raw mut (*data).1, make_value());
370 if let Some(stats) = &self.stats {
371 stats.record_insert(
372 &(*data).0,
373 &(*data).1,
374 Some((&old_key, &old_value)),
375 );
376 }
377 } else {
378 (&raw mut (*data).0).write(make_key());
379 (&raw mut (*data).1).write(make_value());
380 if let Some(stats) = &self.stats {
381 stats.record_insert(&(*data).0, &(*data).1, None);
382 }
383 }
384 }
385
386 #[cfg(not(feature = "stats"))]
387 {
388 if Self::NEEDS_DROP && is_alive {
390 std::ptr::drop_in_place(data);
391 }
392 (&raw mut (*data).0).write(make_key());
393 (&raw mut (*data).1).write(make_value());
394 }
395 }
396 bucket.unlock(tag);
397 }
398 }
399
400 #[inline]
405 pub fn get_or_insert_with<F>(&self, key: K, f: F) -> V
406 where
407 F: FnOnce(&K) -> V,
408 {
409 self.get_or_try_insert_with(key, |key| Ok::<_, Infallible>(f(key))).unwrap()
410 }
411
412 #[inline]
422 pub fn get_or_insert_with_ref<'a, Q, F, Cvt>(&self, key: &'a Q, f: F, cvt: Cvt) -> V
423 where
424 Q: ?Sized + Hash + Equivalent<K>,
425 F: FnOnce(&'a Q) -> V,
426 Cvt: FnOnce(&'a Q) -> K,
427 {
428 self.get_or_try_insert_with_ref(key, |key| Ok::<_, Infallible>(f(key)), cvt).unwrap()
429 }
430
431 #[inline]
437 pub fn get_or_try_insert_with<F, E>(&self, key: K, f: F) -> Result<V, E>
438 where
439 F: FnOnce(&K) -> Result<V, E>,
440 {
441 let mut key = std::mem::ManuallyDrop::new(key);
442 let mut read = false;
443 let r = self.get_or_try_insert_with_ref(&*key, f, |k| {
444 read = true;
445 unsafe { std::ptr::read(k) }
446 });
447 if !read {
448 unsafe { std::mem::ManuallyDrop::drop(&mut key) }
449 }
450 r
451 }
452
453 #[inline]
462 pub fn get_or_try_insert_with_ref<'a, Q, F, Cvt, E>(
463 &self,
464 key: &'a Q,
465 f: F,
466 cvt: Cvt,
467 ) -> Result<V, E>
468 where
469 Q: ?Sized + Hash + Equivalent<K>,
470 F: FnOnce(&'a Q) -> Result<V, E>,
471 Cvt: FnOnce(&'a Q) -> K,
472 {
473 let (bucket, tag) = self.calc(key);
474 if let Some(v) = self.get_inner(key, bucket, tag) {
475 return Ok(v);
476 }
477 let value = f(key)?;
478 self.insert_inner(|| cvt(key), || value.clone(), bucket, tag);
479 Ok(value)
480 }
481
482 #[inline]
483 fn calc<Q: ?Sized + Hash + Equivalent<K>>(&self, key: &Q) -> (&Bucket<(K, V)>, usize) {
484 let hash = self.hash_key(key);
485 let bucket = unsafe { (&*self.entries).get_unchecked(hash & self.index_mask()) };
487 let mut tag = hash & self.tag_mask();
488 if Self::NEEDS_DROP {
489 tag |= ALIVE_BIT;
490 }
491 if C::EPOCHS {
492 tag = (tag & !EPOCH_MASK) | ((self.epoch() << EPOCH_SHIFT) & EPOCH_MASK);
493 }
494 (bucket, tag)
495 }
496
497 #[inline]
498 fn hash_key<Q: ?Sized + Hash + Equivalent<K>>(&self, key: &Q) -> usize {
499 let hash = self.build_hasher.hash_one(key);
500
501 if cfg!(target_pointer_width = "32") {
502 ((hash >> 32) as usize) ^ (hash as usize)
503 } else {
504 hash as usize
505 }
506 }
507}
508
509impl<K, V, S, C: CacheConfig> Drop for Cache<K, V, S, C> {
510 fn drop(&mut self) {
511 if self.drop {
512 drop(unsafe { Box::from_raw(self.entries.cast_mut()) });
514 }
515 }
516}
517
518#[repr(C, align(128))]
527#[doc(hidden)]
528pub struct Bucket<T> {
529 tag: AtomicUsize,
530 data: UnsafeCell<MaybeUninit<T>>,
531}
532
533impl<T> Bucket<T> {
534 const NEEDS_DROP: bool = std::mem::needs_drop::<T>();
535
536 #[inline]
538 pub const fn new() -> Self {
539 Self { tag: AtomicUsize::new(0), data: UnsafeCell::new(MaybeUninit::zeroed()) }
540 }
541
542 #[inline]
543 fn try_lock(&self, expected: Option<usize>) -> bool {
544 self.try_lock_ret(expected).is_some()
545 }
546
547 #[inline]
548 fn try_lock_ret(&self, expected: Option<usize>) -> Option<usize> {
549 let state = self.tag.load(Ordering::Relaxed);
550 if let Some(expected) = expected {
551 if state != expected {
552 return None;
553 }
554 } else if state & LOCKED_BIT != 0 {
555 return None;
556 }
557 self.tag
558 .compare_exchange(state, state | LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
559 .ok()
560 }
561
562 #[inline]
563 fn is_alive(&self) -> bool {
564 self.tag.load(Ordering::Relaxed) & ALIVE_BIT != 0
565 }
566
567 #[inline]
568 fn unlock(&self, tag: usize) {
569 self.tag.store(tag, Ordering::Release);
570 }
571}
572
573unsafe impl<T: Send> Send for Bucket<T> {}
575unsafe impl<T: Send> Sync for Bucket<T> {}
576
577impl<T> Drop for Bucket<T> {
578 fn drop(&mut self) {
579 if Self::NEEDS_DROP && self.is_alive() {
580 unsafe { self.data.get_mut().assume_init_drop() };
582 }
583 }
584}
585
586#[macro_export]
609macro_rules! static_cache {
610 ($K:ty, $V:ty, $size:expr) => {
611 $crate::static_cache!($K, $V, $size, Default::default())
612 };
613 ($K:ty, $V:ty, $size:expr, $hasher:expr) => {{
614 static ENTRIES: [$crate::Bucket<($K, $V)>; $size] =
615 [const { $crate::Bucket::new() }; $size];
616 $crate::Cache::new_static(&ENTRIES, $hasher)
617 }};
618}
619
620#[cfg(test)]
621mod tests {
622 use super::*;
623 use std::thread;
624
625 const fn iters(n: usize) -> usize {
626 if cfg!(miri) { n / 10 } else { n }
627 }
628
629 type BH = std::hash::BuildHasherDefault<rapidhash::fast::RapidHasher<'static>>;
630 type Cache<K, V> = super::Cache<K, V, BH>;
631
632 struct EpochConfig;
633 impl CacheConfig for EpochConfig {
634 const EPOCHS: bool = true;
635 }
636 type EpochCache<K, V> = super::Cache<K, V, BH, EpochConfig>;
637
638 fn new_cache<K: Hash + Eq, V: Clone>(size: usize) -> Cache<K, V> {
639 Cache::new(size, Default::default())
640 }
641
642 #[test]
643 fn basic_get_or_insert() {
644 let cache = new_cache(1024);
645
646 let mut computed = false;
647 let value = cache.get_or_insert_with(42, |&k| {
648 computed = true;
649 k * 2
650 });
651 assert!(computed);
652 assert_eq!(value, 84);
653
654 computed = false;
655 let value = cache.get_or_insert_with(42, |&k| {
656 computed = true;
657 k * 2
658 });
659 assert!(!computed);
660 assert_eq!(value, 84);
661 }
662
663 #[test]
664 fn different_keys() {
665 let cache: Cache<String, usize> = new_cache(1024);
666
667 let v1 = cache.get_or_insert_with("hello".to_string(), |s| s.len());
668 let v2 = cache.get_or_insert_with("world!".to_string(), |s| s.len());
669
670 assert_eq!(v1, 5);
671 assert_eq!(v2, 6);
672 }
673
674 #[test]
675 fn new_dynamic_allocation() {
676 let cache: Cache<u32, u32> = new_cache(64);
677 assert_eq!(cache.capacity(), 64);
678
679 cache.insert(1, 100);
680 assert_eq!(cache.get(&1), Some(100));
681 }
682
683 #[test]
684 fn get_miss() {
685 let cache = new_cache::<u64, u64>(64);
686 assert_eq!(cache.get(&999), None);
687 }
688
689 #[test]
690 fn insert_and_get() {
691 let cache: Cache<u64, String> = new_cache(64);
692
693 cache.insert(1, "one".to_string());
694 cache.insert(2, "two".to_string());
695 cache.insert(3, "three".to_string());
696
697 assert_eq!(cache.get(&1), Some("one".to_string()));
698 assert_eq!(cache.get(&2), Some("two".to_string()));
699 assert_eq!(cache.get(&3), Some("three".to_string()));
700 assert_eq!(cache.get(&4), None);
701 }
702
703 #[test]
704 fn insert_twice() {
705 let cache = new_cache(64);
706
707 cache.insert(42, 1);
708 assert_eq!(cache.get(&42), Some(1));
709
710 cache.insert(42, 2);
711 let v = cache.get(&42);
712 assert!(v == Some(1) || v == Some(2));
713 }
714
715 #[test]
716 fn remove_existing() {
717 let cache: Cache<u64, String> = new_cache(64);
718
719 cache.insert(1, "one".to_string());
720 assert_eq!(cache.get(&1), Some("one".to_string()));
721
722 let removed = cache.remove(&1);
723 assert_eq!(removed, Some("one".to_string()));
724 assert_eq!(cache.get(&1), None);
725 }
726
727 #[test]
728 fn remove_nonexistent() {
729 let cache = new_cache::<u64, u64>(64);
730 assert_eq!(cache.remove(&999), None);
731 }
732
733 #[test]
734 fn get_or_insert_with_ref() {
735 let cache: Cache<String, usize> = new_cache(64);
736
737 let key = "hello";
738 let value = cache.get_or_insert_with_ref(key, |s| s.len(), |s| s.to_string());
739 assert_eq!(value, 5);
740
741 let value2 = cache.get_or_insert_with_ref(key, |_| 999, |s| s.to_string());
742 assert_eq!(value2, 5);
743 }
744
745 #[test]
746 fn get_or_insert_with_ref_different_keys() {
747 let cache: Cache<String, usize> = new_cache(1024);
748
749 let v1 = cache.get_or_insert_with_ref("foo", |s| s.len(), |s| s.to_string());
750 let v2 = cache.get_or_insert_with_ref("barbaz", |s| s.len(), |s| s.to_string());
751
752 assert_eq!(v1, 3);
753 assert_eq!(v2, 6);
754 }
755
756 #[test]
757 fn capacity() {
758 let cache = new_cache::<u64, u64>(256);
759 assert_eq!(cache.capacity(), 256);
760
761 let cache2 = new_cache::<u64, u64>(128);
762 assert_eq!(cache2.capacity(), 128);
763 }
764
765 #[test]
766 fn hasher() {
767 let cache = new_cache::<u64, u64>(64);
768 let _ = cache.hasher();
769 }
770
771 #[test]
772 fn debug_impl() {
773 let cache = new_cache::<u64, u64>(64);
774 let debug_str = format!("{:?}", cache);
775 assert!(debug_str.contains("Cache"));
776 }
777
778 #[test]
779 fn bucket_new() {
780 let bucket: Bucket<(u64, u64)> = Bucket::new();
781 assert_eq!(bucket.tag.load(Ordering::Relaxed), 0);
782 }
783
784 #[test]
785 fn many_entries() {
786 let cache: Cache<u64, u64> = new_cache(1024);
787 let n = iters(500);
788
789 for i in 0..n as u64 {
790 cache.insert(i, i * 2);
791 }
792
793 let mut hits = 0;
794 for i in 0..n as u64 {
795 if cache.get(&i) == Some(i * 2) {
796 hits += 1;
797 }
798 }
799 assert!(hits > 0);
800 }
801
802 #[test]
803 fn string_keys() {
804 let cache: Cache<String, i32> = new_cache(1024);
805
806 cache.insert("alpha".to_string(), 1);
807 cache.insert("beta".to_string(), 2);
808 cache.insert("gamma".to_string(), 3);
809
810 assert_eq!(cache.get("alpha"), Some(1));
811 assert_eq!(cache.get("beta"), Some(2));
812 assert_eq!(cache.get("gamma"), Some(3));
813 }
814
815 #[test]
816 fn zero_values() {
817 let cache: Cache<u64, u64> = new_cache(64);
818
819 cache.insert(0, 0);
820 assert_eq!(cache.get(&0), Some(0));
821
822 cache.insert(1, 0);
823 assert_eq!(cache.get(&1), Some(0));
824 }
825
826 #[test]
827 fn clone_value() {
828 #[derive(Clone, PartialEq, Debug)]
829 struct MyValue(u64);
830
831 let cache: Cache<u64, MyValue> = new_cache(64);
832
833 cache.insert(1, MyValue(123));
834 let v = cache.get(&1);
835 assert_eq!(v, Some(MyValue(123)));
836 }
837
838 fn run_concurrent<F>(num_threads: usize, f: F)
839 where
840 F: Fn(usize) + Send + Sync,
841 {
842 thread::scope(|s| {
843 for t in 0..num_threads {
844 let f = &f;
845 s.spawn(move || f(t));
846 }
847 });
848 }
849
850 #[test]
851 fn concurrent_reads() {
852 let cache: Cache<u64, u64> = new_cache(1024);
853 let n = iters(100);
854
855 for i in 0..n as u64 {
856 cache.insert(i, i * 10);
857 }
858
859 run_concurrent(4, |_| {
860 for i in 0..n as u64 {
861 let _ = cache.get(&i);
862 }
863 });
864 }
865
866 #[test]
867 fn concurrent_writes() {
868 let cache: Cache<u64, u64> = new_cache(1024);
869 let n = iters(100);
870
871 run_concurrent(4, |t| {
872 for i in 0..n {
873 cache.insert((t * 1000 + i) as u64, i as u64);
874 }
875 });
876 }
877
878 #[test]
879 fn concurrent_read_write() {
880 let cache: Cache<u64, u64> = new_cache(256);
881 let n = iters(1000);
882
883 run_concurrent(2, |t| {
884 for i in 0..n as u64 {
885 if t == 0 {
886 cache.insert(i % 100, i);
887 } else {
888 let _ = cache.get(&(i % 100));
889 }
890 }
891 });
892 }
893
894 #[test]
895 fn concurrent_get_or_insert() {
896 let cache: Cache<u64, u64> = new_cache(1024);
897 let n = iters(100);
898
899 run_concurrent(8, |_| {
900 for i in 0..n as u64 {
901 let _ = cache.get_or_insert_with(i, |&k| k * 2);
902 }
903 });
904
905 for i in 0..n as u64 {
906 if let Some(v) = cache.get(&i) {
907 assert_eq!(v, i * 2);
908 }
909 }
910 }
911
912 #[test]
913 #[should_panic = "power of two"]
914 fn non_power_of_two() {
915 let _ = new_cache::<u64, u64>(100);
916 }
917
918 #[test]
919 #[should_panic = "len must have its bottom N bits set to zero"]
920 fn small_cache() {
921 let _ = new_cache::<u64, u64>(2);
922 }
923
924 #[test]
925 fn power_of_two_sizes() {
926 for shift in 2..10 {
927 let size = 1 << shift;
928 let cache = new_cache::<u64, u64>(size);
929 assert_eq!(cache.capacity(), size);
930 }
931 }
932
933 #[test]
934 fn equivalent_key_lookup() {
935 let cache: Cache<String, i32> = new_cache(64);
936
937 cache.insert("hello".to_string(), 42);
938
939 assert_eq!(cache.get("hello"), Some(42));
940 }
941
942 #[test]
943 fn large_values() {
944 let cache: Cache<u64, [u8; 1000]> = new_cache(64);
945
946 let large_value = [42u8; 1000];
947 cache.insert(1, large_value);
948
949 assert_eq!(cache.get(&1), Some(large_value));
950 }
951
952 #[test]
953 fn send_sync() {
954 fn assert_send<T: Send>() {}
955 fn assert_sync<T: Sync>() {}
956
957 assert_send::<Cache<u64, u64>>();
958 assert_sync::<Cache<u64, u64>>();
959 assert_send::<Bucket<(u64, u64)>>();
960 assert_sync::<Bucket<(u64, u64)>>();
961 }
962
963 #[test]
964 fn get_or_try_insert_with_ok() {
965 let cache = new_cache(1024);
966
967 let mut computed = false;
968 let result: Result<u64, &str> = cache.get_or_try_insert_with(42, |&k| {
969 computed = true;
970 Ok(k * 2)
971 });
972 assert!(computed);
973 assert_eq!(result, Ok(84));
974
975 computed = false;
976 let result: Result<u64, &str> = cache.get_or_try_insert_with(42, |&k| {
977 computed = true;
978 Ok(k * 2)
979 });
980 assert!(!computed);
981 assert_eq!(result, Ok(84));
982 }
983
984 #[test]
985 fn get_or_try_insert_with_err() {
986 let cache: Cache<u64, u64> = new_cache(1024);
987
988 let result: Result<u64, &str> = cache.get_or_try_insert_with(42, |_| Err("failed"));
989 assert_eq!(result, Err("failed"));
990
991 assert_eq!(cache.get(&42), None);
992 }
993
994 #[test]
995 fn get_or_try_insert_with_ref_ok() {
996 let cache: Cache<String, usize> = new_cache(64);
997
998 let key = "hello";
999 let result: Result<usize, &str> =
1000 cache.get_or_try_insert_with_ref(key, |s| Ok(s.len()), |s| s.to_string());
1001 assert_eq!(result, Ok(5));
1002
1003 let result2: Result<usize, &str> =
1004 cache.get_or_try_insert_with_ref(key, |_| Ok(999), |s| s.to_string());
1005 assert_eq!(result2, Ok(5));
1006 }
1007
1008 #[test]
1009 fn get_or_try_insert_with_ref_err() {
1010 let cache: Cache<String, usize> = new_cache(64);
1011
1012 let key = "hello";
1013 let result: Result<usize, &str> =
1014 cache.get_or_try_insert_with_ref(key, |_| Err("failed"), |s| s.to_string());
1015 assert_eq!(result, Err("failed"));
1016
1017 assert_eq!(cache.get(key), None);
1018 }
1019
1020 #[test]
1021 fn drop_on_cache_drop() {
1022 use std::sync::atomic::{AtomicUsize, Ordering};
1023
1024 static DROP_COUNT: AtomicUsize = AtomicUsize::new(0);
1025
1026 #[derive(Clone, Hash, Eq, PartialEq)]
1027 struct DropKey(u64);
1028 impl Drop for DropKey {
1029 fn drop(&mut self) {
1030 DROP_COUNT.fetch_add(1, Ordering::SeqCst);
1031 }
1032 }
1033
1034 #[derive(Clone)]
1035 struct DropValue(#[allow(dead_code)] u64);
1036 impl Drop for DropValue {
1037 fn drop(&mut self) {
1038 DROP_COUNT.fetch_add(1, Ordering::SeqCst);
1039 }
1040 }
1041
1042 DROP_COUNT.store(0, Ordering::SeqCst);
1043 {
1044 let cache: super::Cache<DropKey, DropValue, BH> =
1045 super::Cache::new(64, Default::default());
1046 cache.insert(DropKey(1), DropValue(100));
1047 cache.insert(DropKey(2), DropValue(200));
1048 cache.insert(DropKey(3), DropValue(300));
1049 assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 0);
1050 }
1051 assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 6);
1053 }
1054
1055 #[test]
1056 fn drop_on_eviction() {
1057 use std::sync::atomic::{AtomicUsize, Ordering};
1058
1059 static DROP_COUNT: AtomicUsize = AtomicUsize::new(0);
1060
1061 #[derive(Clone, Hash, Eq, PartialEq)]
1062 struct DropKey(u64);
1063 impl Drop for DropKey {
1064 fn drop(&mut self) {
1065 DROP_COUNT.fetch_add(1, Ordering::SeqCst);
1066 }
1067 }
1068
1069 #[derive(Clone)]
1070 struct DropValue(#[allow(dead_code)] u64);
1071 impl Drop for DropValue {
1072 fn drop(&mut self) {
1073 DROP_COUNT.fetch_add(1, Ordering::SeqCst);
1074 }
1075 }
1076
1077 DROP_COUNT.store(0, Ordering::SeqCst);
1078 {
1079 let cache: super::Cache<DropKey, DropValue, BH> =
1080 super::Cache::new(64, Default::default());
1081 cache.insert(DropKey(1), DropValue(100));
1082 assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 0);
1083 cache.insert(DropKey(1), DropValue(200));
1085 assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 2);
1087 }
1088 assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 4);
1090 }
1091
1092 #[test]
1093 fn epoch_clear() {
1094 let cache: EpochCache<u64, u64> = EpochCache::new(4096, Default::default());
1095
1096 assert_eq!(cache.epoch(), 0);
1097
1098 cache.insert(1, 100);
1099 cache.insert(2, 200);
1100 assert_eq!(cache.get(&1), Some(100));
1101 assert_eq!(cache.get(&2), Some(200));
1102
1103 cache.clear();
1104 assert_eq!(cache.epoch(), 1);
1105
1106 assert_eq!(cache.get(&1), None);
1107 assert_eq!(cache.get(&2), None);
1108
1109 cache.insert(1, 101);
1110 assert_eq!(cache.get(&1), Some(101));
1111
1112 cache.clear();
1113 assert_eq!(cache.epoch(), 2);
1114 assert_eq!(cache.get(&1), None);
1115 }
1116
1117 #[test]
1118 fn epoch_wrap_around() {
1119 let cache: EpochCache<u64, u64> = EpochCache::new(4096, Default::default());
1120
1121 for _ in 0..300 {
1122 cache.insert(42, 123);
1123 assert_eq!(cache.get(&42), Some(123));
1124 cache.clear();
1125 assert_eq!(cache.get(&42), None);
1126 }
1127 }
1128
1129 #[test]
1130 fn epoch_wraparound_stays_cleared() {
1131 let cache: EpochCache<u64, u64> = EpochCache::new(4096, Default::default());
1132
1133 cache.insert(42, 123);
1134 assert_eq!(cache.get(&42), Some(123));
1135
1136 for i in 0..2048 {
1137 cache.clear();
1138 assert_eq!(cache.get(&42), None, "failed at clear #{i}");
1139 }
1140 }
1141}