1#![doc = include_str!("../README.md")]
2#![cfg_attr(docsrs, feature(doc_cfg))]
3#![allow(clippy::new_without_default)]
4
5use equivalent::Equivalent;
6use std::{
7 cell::UnsafeCell,
8 convert::Infallible,
9 fmt,
10 hash::{BuildHasher, Hash},
11 marker::PhantomData,
12 mem::MaybeUninit,
13 sync::atomic::{AtomicUsize, Ordering},
14};
15
16#[cfg(feature = "stats")]
17mod stats;
18#[cfg(feature = "stats")]
19#[cfg_attr(docsrs, doc(cfg(feature = "stats")))]
20pub use stats::{AnyRef, CountingStatsHandler, Stats, StatsHandler};
21
22const LOCKED_BIT: usize = 1 << 0;
23const ALIVE_BIT: usize = 1 << 1;
24const NEEDED_BITS: usize = 2;
25
26const EPOCH_BITS: usize = 10;
27const EPOCH_SHIFT: usize = NEEDED_BITS;
28const EPOCH_MASK: usize = ((1 << EPOCH_BITS) - 1) << EPOCH_SHIFT;
29const EPOCH_NEEDED_BITS: usize = NEEDED_BITS + EPOCH_BITS;
30
31#[cfg(feature = "rapidhash")]
32type DefaultBuildHasher = std::hash::BuildHasherDefault<rapidhash::fast::RapidHasher<'static>>;
33#[cfg(not(feature = "rapidhash"))]
34type DefaultBuildHasher = std::hash::RandomState;
35
36pub trait CacheConfig {
40 const EPOCHS: bool = false;
48}
49
50pub struct DefaultCacheConfig(());
52impl CacheConfig for DefaultCacheConfig {}
53
54pub struct Cache<K, V, S = DefaultBuildHasher, C: CacheConfig = DefaultCacheConfig> {
100 entries: *const [Bucket<(K, V)>],
101 build_hasher: S,
102 #[cfg(feature = "stats")]
103 stats: Option<Stats<K, V>>,
104 drop: bool,
105 epoch: AtomicUsize,
106 _config: PhantomData<C>,
107}
108
109impl<K, V, S, C: CacheConfig> fmt::Debug for Cache<K, V, S, C> {
110 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
111 f.debug_struct("Cache").finish_non_exhaustive()
112 }
113}
114
115unsafe impl<K: Send, V: Send, S: Send, C: CacheConfig + Send> Send for Cache<K, V, S, C> {}
117unsafe impl<K: Send, V: Send, S: Sync, C: CacheConfig + Sync> Sync for Cache<K, V, S, C> {}
118
119impl<K, V, S, C> Cache<K, V, S, C>
120where
121 K: Hash + Eq,
122 S: BuildHasher,
123 C: CacheConfig,
124{
125 const NEEDS_DROP: bool = Bucket::<(K, V)>::NEEDS_DROP;
126
127 pub fn new(num: usize, build_hasher: S) -> Self {
138 Self::len_assertion(num);
139 let layout = std::alloc::Layout::array::<Bucket<(K, V)>>(num).unwrap();
140 let ptr = unsafe { std::alloc::alloc_zeroed(layout) };
141 if ptr.is_null() {
142 std::alloc::handle_alloc_error(layout);
143 }
144 let entries = std::ptr::slice_from_raw_parts(ptr.cast::<Bucket<(K, V)>>(), num);
145 Self::new_inner(entries, build_hasher, true)
146 }
147
148 #[inline]
154 pub const fn new_static(entries: &'static [Bucket<(K, V)>], build_hasher: S) -> Self {
155 Self::len_assertion(entries.len());
156 Self::new_inner(entries, build_hasher, false)
157 }
158
159 #[cfg(feature = "stats")]
161 #[inline]
162 pub fn with_stats(mut self, stats: Option<Stats<K, V>>) -> Self {
163 self.set_stats(stats);
164 self
165 }
166
167 #[cfg(feature = "stats")]
169 #[inline]
170 pub fn set_stats(&mut self, stats: Option<Stats<K, V>>) {
171 self.stats = stats;
172 }
173
174 #[inline]
175 const fn new_inner(entries: *const [Bucket<(K, V)>], build_hasher: S, drop: bool) -> Self {
176 Self {
177 entries,
178 build_hasher,
179 #[cfg(feature = "stats")]
180 stats: None,
181 drop,
182 epoch: AtomicUsize::new(0),
183 _config: PhantomData,
184 }
185 }
186
187 #[inline]
188 const fn len_assertion(len: usize) {
189 let reserved = if C::EPOCHS { EPOCH_NEEDED_BITS } else { NEEDED_BITS };
193 assert!(len.is_power_of_two(), "length must be a power of two");
194 assert!((len & ((1 << reserved) - 1)) == 0, "len must have its bottom N bits set to zero");
195 }
196
197 #[inline]
198 const fn index_mask(&self) -> usize {
199 let n = self.capacity();
200 unsafe { std::hint::assert_unchecked(n.is_power_of_two()) };
201 n - 1
202 }
203
204 #[inline]
205 const fn tag_mask(&self) -> usize {
206 !self.index_mask()
207 }
208
209 #[inline]
213 fn epoch(&self) -> usize {
214 self.epoch.load(Ordering::Relaxed)
215 }
216
217 #[inline]
224 pub fn clear(&self) {
225 const EPOCH_MAX: usize = (1 << EPOCH_BITS) - 1;
226 const { assert!(C::EPOCHS, "can only .clear() when C::EPOCHS is true") }
227 let prev = self.epoch.fetch_add(1, Ordering::Release);
228 if prev == EPOCH_MAX {
229 self.clear_slow();
230 }
231 }
232
233 pub fn clear_slow(&self) {
243 unsafe {
248 std::ptr::write_bytes(
249 self.entries.cast_mut().cast::<Bucket<(K, V)>>(),
250 0,
251 self.entries.len(),
252 )
253 };
254 }
255
256 #[inline]
258 pub const fn hasher(&self) -> &S {
259 &self.build_hasher
260 }
261
262 #[inline]
264 pub const fn capacity(&self) -> usize {
265 self.entries.len()
266 }
267
268 #[cfg(feature = "stats")]
270 pub const fn stats(&self) -> Option<&Stats<K, V>> {
271 self.stats.as_ref()
272 }
273}
274
275impl<K, V, S, C> Cache<K, V, S, C>
276where
277 K: Hash + Eq,
278 V: Clone,
279 S: BuildHasher,
280 C: CacheConfig,
281{
282 pub fn get<Q: ?Sized + Hash + Equivalent<K>>(&self, key: &Q) -> Option<V> {
284 let (bucket, tag) = self.calc(key);
285 self.get_inner(key, bucket, tag)
286 }
287
288 #[inline]
289 fn get_inner<Q: ?Sized + Hash + Equivalent<K>>(
290 &self,
291 key: &Q,
292 bucket: &Bucket<(K, V)>,
293 tag: usize,
294 ) -> Option<V> {
295 if bucket.try_lock(Some(tag)) {
296 let (ck, v) = unsafe { (*bucket.data.get()).assume_init_ref() };
298 if key.equivalent(ck) {
299 #[cfg(feature = "stats")]
300 if let Some(stats) = &self.stats {
301 stats.record_hit(ck, v);
302 }
303 let v = v.clone();
304 bucket.unlock(tag);
305 return Some(v);
306 }
307 #[cfg(feature = "stats")]
308 if let Some(stats) = &self.stats {
309 stats.record_collision(AnyRef::new(&key), ck, v);
310 }
311 bucket.unlock(tag);
312 }
313 #[cfg(feature = "stats")]
314 if let Some(stats) = &self.stats {
315 stats.record_miss(AnyRef::new(&key));
316 }
317 None
318 }
319
320 pub fn insert(&self, key: K, value: V) {
322 let (bucket, tag) = self.calc(&key);
323 self.insert_inner(|| key, || value, bucket, tag);
324 }
325
326 pub fn remove<Q: ?Sized + Hash + Equivalent<K>>(&self, key: &Q) -> Option<V> {
330 let (bucket, tag) = self.calc(key);
331 if bucket.try_lock(Some(tag)) {
332 let data = unsafe { &mut *bucket.data.get() };
334 let (ck, v) = unsafe { data.assume_init_ref() };
335 if key.equivalent(ck) {
336 let v = v.clone();
337 if Self::NEEDS_DROP {
338 unsafe { data.assume_init_drop() };
340 }
341 bucket.unlock(0);
342 return Some(v);
343 }
344 bucket.unlock(tag);
345 }
346 None
347 }
348
349 #[inline]
350 fn insert_inner(
351 &self,
352 make_key: impl FnOnce() -> K,
353 make_value: impl FnOnce() -> V,
354 bucket: &Bucket<(K, V)>,
355 tag: usize,
356 ) {
357 if let Some(prev_tag) = bucket.try_lock_ret(None) {
358 unsafe {
360 let data = (&mut *bucket.data.get()).as_mut_ptr();
361 if Self::NEEDS_DROP && (prev_tag & ALIVE_BIT) != 0 {
363 std::ptr::drop_in_place(data);
364 }
365 (&raw mut (*data).0).write(make_key());
366 (&raw mut (*data).1).write(make_value());
367 }
368 bucket.unlock(tag);
369 }
370 }
371
372 #[inline]
377 pub fn get_or_insert_with<F>(&self, key: K, f: F) -> V
378 where
379 F: FnOnce(&K) -> V,
380 {
381 self.get_or_try_insert_with(key, |key| Ok::<_, Infallible>(f(key))).unwrap()
382 }
383
384 #[inline]
394 pub fn get_or_insert_with_ref<'a, Q, F, Cvt>(&self, key: &'a Q, f: F, cvt: Cvt) -> V
395 where
396 Q: ?Sized + Hash + Equivalent<K>,
397 F: FnOnce(&'a Q) -> V,
398 Cvt: FnOnce(&'a Q) -> K,
399 {
400 self.get_or_try_insert_with_ref(key, |key| Ok::<_, Infallible>(f(key)), cvt).unwrap()
401 }
402
403 #[inline]
409 pub fn get_or_try_insert_with<F, E>(&self, key: K, f: F) -> Result<V, E>
410 where
411 F: FnOnce(&K) -> Result<V, E>,
412 {
413 let mut key = std::mem::ManuallyDrop::new(key);
414 let mut read = false;
415 let r = self.get_or_try_insert_with_ref(&*key, f, |k| {
416 read = true;
417 unsafe { std::ptr::read(k) }
418 });
419 if !read {
420 unsafe { std::mem::ManuallyDrop::drop(&mut key) }
421 }
422 r
423 }
424
425 #[inline]
434 pub fn get_or_try_insert_with_ref<'a, Q, F, Cvt, E>(
435 &self,
436 key: &'a Q,
437 f: F,
438 cvt: Cvt,
439 ) -> Result<V, E>
440 where
441 Q: ?Sized + Hash + Equivalent<K>,
442 F: FnOnce(&'a Q) -> Result<V, E>,
443 Cvt: FnOnce(&'a Q) -> K,
444 {
445 let (bucket, tag) = self.calc(key);
446 if let Some(v) = self.get_inner(key, bucket, tag) {
447 return Ok(v);
448 }
449 let value = f(key)?;
450 self.insert_inner(|| cvt(key), || value.clone(), bucket, tag);
451 Ok(value)
452 }
453
454 #[inline]
455 fn calc<Q: ?Sized + Hash + Equivalent<K>>(&self, key: &Q) -> (&Bucket<(K, V)>, usize) {
456 let hash = self.hash_key(key);
457 let bucket = unsafe { (&*self.entries).get_unchecked(hash & self.index_mask()) };
459 let mut tag = hash & self.tag_mask();
460 if Self::NEEDS_DROP {
461 tag |= ALIVE_BIT;
462 }
463 if C::EPOCHS {
464 tag = (tag & !EPOCH_MASK) | ((self.epoch() << EPOCH_SHIFT) & EPOCH_MASK);
465 }
466 (bucket, tag)
467 }
468
469 #[inline]
470 fn hash_key<Q: ?Sized + Hash + Equivalent<K>>(&self, key: &Q) -> usize {
471 let hash = self.build_hasher.hash_one(key);
472
473 if cfg!(target_pointer_width = "32") {
474 ((hash >> 32) as usize) ^ (hash as usize)
475 } else {
476 hash as usize
477 }
478 }
479}
480
481impl<K, V, S, C: CacheConfig> Drop for Cache<K, V, S, C> {
482 fn drop(&mut self) {
483 if self.drop {
484 drop(unsafe { Box::from_raw(self.entries.cast_mut()) });
486 }
487 }
488}
489
490#[repr(C, align(128))]
499#[doc(hidden)]
500pub struct Bucket<T> {
501 tag: AtomicUsize,
502 data: UnsafeCell<MaybeUninit<T>>,
503}
504
505impl<T> Bucket<T> {
506 const NEEDS_DROP: bool = std::mem::needs_drop::<T>();
507
508 #[inline]
510 pub const fn new() -> Self {
511 Self { tag: AtomicUsize::new(0), data: UnsafeCell::new(MaybeUninit::zeroed()) }
512 }
513
514 #[inline]
515 fn try_lock(&self, expected: Option<usize>) -> bool {
516 self.try_lock_ret(expected).is_some()
517 }
518
519 #[inline]
520 fn try_lock_ret(&self, expected: Option<usize>) -> Option<usize> {
521 let state = self.tag.load(Ordering::Relaxed);
522 if let Some(expected) = expected {
523 if state != expected {
524 return None;
525 }
526 } else if state & LOCKED_BIT != 0 {
527 return None;
528 }
529 self.tag
530 .compare_exchange(state, state | LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
531 .ok()
532 }
533
534 #[inline]
535 fn is_alive(&self) -> bool {
536 self.tag.load(Ordering::Relaxed) & ALIVE_BIT != 0
537 }
538
539 #[inline]
540 fn unlock(&self, tag: usize) {
541 self.tag.store(tag, Ordering::Release);
542 }
543}
544
545unsafe impl<T: Send> Send for Bucket<T> {}
547unsafe impl<T: Send> Sync for Bucket<T> {}
548
549impl<T> Drop for Bucket<T> {
550 fn drop(&mut self) {
551 if Self::NEEDS_DROP && self.is_alive() {
552 unsafe { self.data.get_mut().assume_init_drop() };
554 }
555 }
556}
557
558#[macro_export]
581macro_rules! static_cache {
582 ($K:ty, $V:ty, $size:expr) => {
583 $crate::static_cache!($K, $V, $size, Default::default())
584 };
585 ($K:ty, $V:ty, $size:expr, $hasher:expr) => {{
586 static ENTRIES: [$crate::Bucket<($K, $V)>; $size] =
587 [const { $crate::Bucket::new() }; $size];
588 $crate::Cache::new_static(&ENTRIES, $hasher)
589 }};
590}
591
592#[cfg(test)]
593mod tests {
594 use super::*;
595 use std::thread;
596
597 const fn iters(n: usize) -> usize {
598 if cfg!(miri) { n / 10 } else { n }
599 }
600
601 type BH = std::hash::BuildHasherDefault<rapidhash::fast::RapidHasher<'static>>;
602 type Cache<K, V> = super::Cache<K, V, BH>;
603
604 struct EpochConfig;
605 impl CacheConfig for EpochConfig {
606 const EPOCHS: bool = true;
607 }
608 type EpochCache<K, V> = super::Cache<K, V, BH, EpochConfig>;
609
610 fn new_cache<K: Hash + Eq, V: Clone>(size: usize) -> Cache<K, V> {
611 Cache::new(size, Default::default())
612 }
613
614 #[test]
615 fn basic_get_or_insert() {
616 let cache = new_cache(1024);
617
618 let mut computed = false;
619 let value = cache.get_or_insert_with(42, |&k| {
620 computed = true;
621 k * 2
622 });
623 assert!(computed);
624 assert_eq!(value, 84);
625
626 computed = false;
627 let value = cache.get_or_insert_with(42, |&k| {
628 computed = true;
629 k * 2
630 });
631 assert!(!computed);
632 assert_eq!(value, 84);
633 }
634
635 #[test]
636 fn different_keys() {
637 let cache: Cache<String, usize> = new_cache(1024);
638
639 let v1 = cache.get_or_insert_with("hello".to_string(), |s| s.len());
640 let v2 = cache.get_or_insert_with("world!".to_string(), |s| s.len());
641
642 assert_eq!(v1, 5);
643 assert_eq!(v2, 6);
644 }
645
646 #[test]
647 fn new_dynamic_allocation() {
648 let cache: Cache<u32, u32> = new_cache(64);
649 assert_eq!(cache.capacity(), 64);
650
651 cache.insert(1, 100);
652 assert_eq!(cache.get(&1), Some(100));
653 }
654
655 #[test]
656 fn get_miss() {
657 let cache = new_cache::<u64, u64>(64);
658 assert_eq!(cache.get(&999), None);
659 }
660
661 #[test]
662 fn insert_and_get() {
663 let cache: Cache<u64, String> = new_cache(64);
664
665 cache.insert(1, "one".to_string());
666 cache.insert(2, "two".to_string());
667 cache.insert(3, "three".to_string());
668
669 assert_eq!(cache.get(&1), Some("one".to_string()));
670 assert_eq!(cache.get(&2), Some("two".to_string()));
671 assert_eq!(cache.get(&3), Some("three".to_string()));
672 assert_eq!(cache.get(&4), None);
673 }
674
675 #[test]
676 fn insert_twice() {
677 let cache = new_cache(64);
678
679 cache.insert(42, 1);
680 assert_eq!(cache.get(&42), Some(1));
681
682 cache.insert(42, 2);
683 let v = cache.get(&42);
684 assert!(v == Some(1) || v == Some(2));
685 }
686
687 #[test]
688 fn remove_existing() {
689 let cache: Cache<u64, String> = new_cache(64);
690
691 cache.insert(1, "one".to_string());
692 assert_eq!(cache.get(&1), Some("one".to_string()));
693
694 let removed = cache.remove(&1);
695 assert_eq!(removed, Some("one".to_string()));
696 assert_eq!(cache.get(&1), None);
697 }
698
699 #[test]
700 fn remove_nonexistent() {
701 let cache = new_cache::<u64, u64>(64);
702 assert_eq!(cache.remove(&999), None);
703 }
704
705 #[test]
706 fn get_or_insert_with_ref() {
707 let cache: Cache<String, usize> = new_cache(64);
708
709 let key = "hello";
710 let value = cache.get_or_insert_with_ref(key, |s| s.len(), |s| s.to_string());
711 assert_eq!(value, 5);
712
713 let value2 = cache.get_or_insert_with_ref(key, |_| 999, |s| s.to_string());
714 assert_eq!(value2, 5);
715 }
716
717 #[test]
718 fn get_or_insert_with_ref_different_keys() {
719 let cache: Cache<String, usize> = new_cache(1024);
720
721 let v1 = cache.get_or_insert_with_ref("foo", |s| s.len(), |s| s.to_string());
722 let v2 = cache.get_or_insert_with_ref("barbaz", |s| s.len(), |s| s.to_string());
723
724 assert_eq!(v1, 3);
725 assert_eq!(v2, 6);
726 }
727
728 #[test]
729 fn capacity() {
730 let cache = new_cache::<u64, u64>(256);
731 assert_eq!(cache.capacity(), 256);
732
733 let cache2 = new_cache::<u64, u64>(128);
734 assert_eq!(cache2.capacity(), 128);
735 }
736
737 #[test]
738 fn hasher() {
739 let cache = new_cache::<u64, u64>(64);
740 let _ = cache.hasher();
741 }
742
743 #[test]
744 fn debug_impl() {
745 let cache = new_cache::<u64, u64>(64);
746 let debug_str = format!("{:?}", cache);
747 assert!(debug_str.contains("Cache"));
748 }
749
750 #[test]
751 fn bucket_new() {
752 let bucket: Bucket<(u64, u64)> = Bucket::new();
753 assert_eq!(bucket.tag.load(Ordering::Relaxed), 0);
754 }
755
756 #[test]
757 fn many_entries() {
758 let cache: Cache<u64, u64> = new_cache(1024);
759 let n = iters(500);
760
761 for i in 0..n as u64 {
762 cache.insert(i, i * 2);
763 }
764
765 let mut hits = 0;
766 for i in 0..n as u64 {
767 if cache.get(&i) == Some(i * 2) {
768 hits += 1;
769 }
770 }
771 assert!(hits > 0);
772 }
773
774 #[test]
775 fn string_keys() {
776 let cache: Cache<String, i32> = new_cache(1024);
777
778 cache.insert("alpha".to_string(), 1);
779 cache.insert("beta".to_string(), 2);
780 cache.insert("gamma".to_string(), 3);
781
782 assert_eq!(cache.get("alpha"), Some(1));
783 assert_eq!(cache.get("beta"), Some(2));
784 assert_eq!(cache.get("gamma"), Some(3));
785 }
786
787 #[test]
788 fn zero_values() {
789 let cache: Cache<u64, u64> = new_cache(64);
790
791 cache.insert(0, 0);
792 assert_eq!(cache.get(&0), Some(0));
793
794 cache.insert(1, 0);
795 assert_eq!(cache.get(&1), Some(0));
796 }
797
798 #[test]
799 fn clone_value() {
800 #[derive(Clone, PartialEq, Debug)]
801 struct MyValue(u64);
802
803 let cache: Cache<u64, MyValue> = new_cache(64);
804
805 cache.insert(1, MyValue(123));
806 let v = cache.get(&1);
807 assert_eq!(v, Some(MyValue(123)));
808 }
809
810 fn run_concurrent<F>(num_threads: usize, f: F)
811 where
812 F: Fn(usize) + Send + Sync,
813 {
814 thread::scope(|s| {
815 for t in 0..num_threads {
816 let f = &f;
817 s.spawn(move || f(t));
818 }
819 });
820 }
821
822 #[test]
823 fn concurrent_reads() {
824 let cache: Cache<u64, u64> = new_cache(1024);
825 let n = iters(100);
826
827 for i in 0..n as u64 {
828 cache.insert(i, i * 10);
829 }
830
831 run_concurrent(4, |_| {
832 for i in 0..n as u64 {
833 let _ = cache.get(&i);
834 }
835 });
836 }
837
838 #[test]
839 fn concurrent_writes() {
840 let cache: Cache<u64, u64> = new_cache(1024);
841 let n = iters(100);
842
843 run_concurrent(4, |t| {
844 for i in 0..n {
845 cache.insert((t * 1000 + i) as u64, i as u64);
846 }
847 });
848 }
849
850 #[test]
851 fn concurrent_read_write() {
852 let cache: Cache<u64, u64> = new_cache(256);
853 let n = iters(1000);
854
855 run_concurrent(2, |t| {
856 for i in 0..n as u64 {
857 if t == 0 {
858 cache.insert(i % 100, i);
859 } else {
860 let _ = cache.get(&(i % 100));
861 }
862 }
863 });
864 }
865
866 #[test]
867 fn concurrent_get_or_insert() {
868 let cache: Cache<u64, u64> = new_cache(1024);
869 let n = iters(100);
870
871 run_concurrent(8, |_| {
872 for i in 0..n as u64 {
873 let _ = cache.get_or_insert_with(i, |&k| k * 2);
874 }
875 });
876
877 for i in 0..n as u64 {
878 if let Some(v) = cache.get(&i) {
879 assert_eq!(v, i * 2);
880 }
881 }
882 }
883
884 #[test]
885 #[should_panic = "power of two"]
886 fn non_power_of_two() {
887 let _ = new_cache::<u64, u64>(100);
888 }
889
890 #[test]
891 #[should_panic = "len must have its bottom N bits set to zero"]
892 fn small_cache() {
893 let _ = new_cache::<u64, u64>(2);
894 }
895
896 #[test]
897 fn power_of_two_sizes() {
898 for shift in 2..10 {
899 let size = 1 << shift;
900 let cache = new_cache::<u64, u64>(size);
901 assert_eq!(cache.capacity(), size);
902 }
903 }
904
905 #[test]
906 fn equivalent_key_lookup() {
907 let cache: Cache<String, i32> = new_cache(64);
908
909 cache.insert("hello".to_string(), 42);
910
911 assert_eq!(cache.get("hello"), Some(42));
912 }
913
914 #[test]
915 fn large_values() {
916 let cache: Cache<u64, [u8; 1000]> = new_cache(64);
917
918 let large_value = [42u8; 1000];
919 cache.insert(1, large_value);
920
921 assert_eq!(cache.get(&1), Some(large_value));
922 }
923
924 #[test]
925 fn send_sync() {
926 fn assert_send<T: Send>() {}
927 fn assert_sync<T: Sync>() {}
928
929 assert_send::<Cache<u64, u64>>();
930 assert_sync::<Cache<u64, u64>>();
931 assert_send::<Bucket<(u64, u64)>>();
932 assert_sync::<Bucket<(u64, u64)>>();
933 }
934
935 #[test]
936 fn get_or_try_insert_with_ok() {
937 let cache = new_cache(1024);
938
939 let mut computed = false;
940 let result: Result<u64, &str> = cache.get_or_try_insert_with(42, |&k| {
941 computed = true;
942 Ok(k * 2)
943 });
944 assert!(computed);
945 assert_eq!(result, Ok(84));
946
947 computed = false;
948 let result: Result<u64, &str> = cache.get_or_try_insert_with(42, |&k| {
949 computed = true;
950 Ok(k * 2)
951 });
952 assert!(!computed);
953 assert_eq!(result, Ok(84));
954 }
955
956 #[test]
957 fn get_or_try_insert_with_err() {
958 let cache: Cache<u64, u64> = new_cache(1024);
959
960 let result: Result<u64, &str> = cache.get_or_try_insert_with(42, |_| Err("failed"));
961 assert_eq!(result, Err("failed"));
962
963 assert_eq!(cache.get(&42), None);
964 }
965
966 #[test]
967 fn get_or_try_insert_with_ref_ok() {
968 let cache: Cache<String, usize> = new_cache(64);
969
970 let key = "hello";
971 let result: Result<usize, &str> =
972 cache.get_or_try_insert_with_ref(key, |s| Ok(s.len()), |s| s.to_string());
973 assert_eq!(result, Ok(5));
974
975 let result2: Result<usize, &str> =
976 cache.get_or_try_insert_with_ref(key, |_| Ok(999), |s| s.to_string());
977 assert_eq!(result2, Ok(5));
978 }
979
980 #[test]
981 fn get_or_try_insert_with_ref_err() {
982 let cache: Cache<String, usize> = new_cache(64);
983
984 let key = "hello";
985 let result: Result<usize, &str> =
986 cache.get_or_try_insert_with_ref(key, |_| Err("failed"), |s| s.to_string());
987 assert_eq!(result, Err("failed"));
988
989 assert_eq!(cache.get(key), None);
990 }
991
992 #[test]
993 fn drop_on_cache_drop() {
994 use std::sync::atomic::{AtomicUsize, Ordering};
995
996 static DROP_COUNT: AtomicUsize = AtomicUsize::new(0);
997
998 #[derive(Clone, Hash, Eq, PartialEq)]
999 struct DropKey(u64);
1000 impl Drop for DropKey {
1001 fn drop(&mut self) {
1002 DROP_COUNT.fetch_add(1, Ordering::SeqCst);
1003 }
1004 }
1005
1006 #[derive(Clone)]
1007 struct DropValue(#[allow(dead_code)] u64);
1008 impl Drop for DropValue {
1009 fn drop(&mut self) {
1010 DROP_COUNT.fetch_add(1, Ordering::SeqCst);
1011 }
1012 }
1013
1014 DROP_COUNT.store(0, Ordering::SeqCst);
1015 {
1016 let cache: super::Cache<DropKey, DropValue, BH> =
1017 super::Cache::new(64, Default::default());
1018 cache.insert(DropKey(1), DropValue(100));
1019 cache.insert(DropKey(2), DropValue(200));
1020 cache.insert(DropKey(3), DropValue(300));
1021 assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 0);
1022 }
1023 assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 6);
1025 }
1026
1027 #[test]
1028 fn drop_on_eviction() {
1029 use std::sync::atomic::{AtomicUsize, Ordering};
1030
1031 static DROP_COUNT: AtomicUsize = AtomicUsize::new(0);
1032
1033 #[derive(Clone, Hash, Eq, PartialEq)]
1034 struct DropKey(u64);
1035 impl Drop for DropKey {
1036 fn drop(&mut self) {
1037 DROP_COUNT.fetch_add(1, Ordering::SeqCst);
1038 }
1039 }
1040
1041 #[derive(Clone)]
1042 struct DropValue(#[allow(dead_code)] u64);
1043 impl Drop for DropValue {
1044 fn drop(&mut self) {
1045 DROP_COUNT.fetch_add(1, Ordering::SeqCst);
1046 }
1047 }
1048
1049 DROP_COUNT.store(0, Ordering::SeqCst);
1050 {
1051 let cache: super::Cache<DropKey, DropValue, BH> =
1052 super::Cache::new(64, Default::default());
1053 cache.insert(DropKey(1), DropValue(100));
1054 assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 0);
1055 cache.insert(DropKey(1), DropValue(200));
1057 assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 2);
1059 }
1060 assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 4);
1062 }
1063
1064 #[test]
1065 fn epoch_clear() {
1066 let cache: EpochCache<u64, u64> = EpochCache::new(4096, Default::default());
1067
1068 assert_eq!(cache.epoch(), 0);
1069
1070 cache.insert(1, 100);
1071 cache.insert(2, 200);
1072 assert_eq!(cache.get(&1), Some(100));
1073 assert_eq!(cache.get(&2), Some(200));
1074
1075 cache.clear();
1076 assert_eq!(cache.epoch(), 1);
1077
1078 assert_eq!(cache.get(&1), None);
1079 assert_eq!(cache.get(&2), None);
1080
1081 cache.insert(1, 101);
1082 assert_eq!(cache.get(&1), Some(101));
1083
1084 cache.clear();
1085 assert_eq!(cache.epoch(), 2);
1086 assert_eq!(cache.get(&1), None);
1087 }
1088
1089 #[test]
1090 fn epoch_wrap_around() {
1091 let cache: EpochCache<u64, u64> = EpochCache::new(4096, Default::default());
1092
1093 for _ in 0..300 {
1094 cache.insert(42, 123);
1095 assert_eq!(cache.get(&42), Some(123));
1096 cache.clear();
1097 assert_eq!(cache.get(&42), None);
1098 }
1099 }
1100
1101 #[test]
1102 fn epoch_wraparound_stays_cleared() {
1103 let cache: EpochCache<u64, u64> = EpochCache::new(4096, Default::default());
1104
1105 cache.insert(42, 123);
1106 assert_eq!(cache.get(&42), Some(123));
1107
1108 for i in 0..2048 {
1109 cache.clear();
1110 assert_eq!(cache.get(&42), None, "failed at clear #{i}");
1111 }
1112 }
1113}