1#![doc = include_str!("../README.md")]
2#![cfg_attr(docsrs, feature(doc_cfg))]
3#![allow(clippy::new_without_default)]
4
5use equivalent::Equivalent;
6use std::{
7 cell::UnsafeCell,
8 convert::Infallible,
9 fmt,
10 hash::{BuildHasher, Hash},
11 marker::PhantomData,
12 mem::MaybeUninit,
13 sync::atomic::{AtomicUsize, Ordering},
14};
15
16#[cfg(feature = "stats")]
17mod stats;
18#[cfg(feature = "stats")]
19#[cfg_attr(docsrs, doc(cfg(feature = "stats")))]
20pub use stats::{AnyRef, CountingStatsHandler, Stats, StatsHandler};
21
22const LOCKED_BIT: usize = 1 << 0;
23const ALIVE_BIT: usize = 1 << 1;
24const NEEDED_BITS: usize = 2;
25
26const EPOCH_BITS: usize = 10;
27const EPOCH_SHIFT: usize = NEEDED_BITS;
28const EPOCH_MASK: usize = ((1 << EPOCH_BITS) - 1) << EPOCH_SHIFT;
29const EPOCH_NEEDED_BITS: usize = NEEDED_BITS + EPOCH_BITS;
30
31#[cfg(feature = "rapidhash")]
32type DefaultBuildHasher = std::hash::BuildHasherDefault<rapidhash::fast::RapidHasher<'static>>;
33#[cfg(not(feature = "rapidhash"))]
34type DefaultBuildHasher = std::hash::RandomState;
35
36pub trait CacheConfig {
40 const EPOCHS: bool = false;
48}
49
50pub struct DefaultCacheConfig(());
52impl CacheConfig for DefaultCacheConfig {}
53
54pub struct Cache<K, V, S = DefaultBuildHasher, C: CacheConfig = DefaultCacheConfig> {
100 entries: *const [Bucket<(K, V)>],
101 build_hasher: S,
102 #[cfg(feature = "stats")]
103 stats: Option<Stats<K, V>>,
104 drop: bool,
105 epoch: AtomicUsize,
106 _config: PhantomData<C>,
107}
108
109impl<K, V, S, C: CacheConfig> fmt::Debug for Cache<K, V, S, C> {
110 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
111 f.debug_struct("Cache").finish_non_exhaustive()
112 }
113}
114
115unsafe impl<K: Send, V: Send, S: Send, C: CacheConfig + Send> Send for Cache<K, V, S, C> {}
117unsafe impl<K: Send, V: Send, S: Sync, C: CacheConfig + Sync> Sync for Cache<K, V, S, C> {}
118
119impl<K, V, S, C> Cache<K, V, S, C>
120where
121 K: Hash + Eq,
122 S: BuildHasher,
123 C: CacheConfig,
124{
125 const NEEDS_DROP: bool = Bucket::<(K, V)>::NEEDS_DROP;
126
127 pub fn new(num: usize, build_hasher: S) -> Self {
138 Self::len_assertion(num);
139 let layout = std::alloc::Layout::array::<Bucket<(K, V)>>(num).unwrap();
140 let ptr = unsafe { std::alloc::alloc_zeroed(layout) };
141 if ptr.is_null() {
142 std::alloc::handle_alloc_error(layout);
143 }
144 let entries = std::ptr::slice_from_raw_parts(ptr.cast::<Bucket<(K, V)>>(), num);
145 Self::new_inner(entries, build_hasher, true)
146 }
147
148 #[inline]
154 pub const fn new_static(entries: &'static [Bucket<(K, V)>], build_hasher: S) -> Self {
155 Self::len_assertion(entries.len());
156 Self::new_inner(entries, build_hasher, false)
157 }
158
159 #[cfg(feature = "stats")]
161 #[inline]
162 pub fn with_stats(mut self, stats: Option<Stats<K, V>>) -> Self {
163 self.set_stats(stats);
164 self
165 }
166
167 #[cfg(feature = "stats")]
169 #[inline]
170 pub fn set_stats(&mut self, stats: Option<Stats<K, V>>) {
171 self.stats = stats;
172 }
173
174 #[inline]
175 const fn new_inner(entries: *const [Bucket<(K, V)>], build_hasher: S, drop: bool) -> Self {
176 Self {
177 entries,
178 build_hasher,
179 #[cfg(feature = "stats")]
180 stats: None,
181 drop,
182 epoch: AtomicUsize::new(0),
183 _config: PhantomData,
184 }
185 }
186
187 #[inline]
188 const fn len_assertion(len: usize) {
189 let reserved = if C::EPOCHS { EPOCH_NEEDED_BITS } else { NEEDED_BITS };
193 assert!(len.is_power_of_two(), "length must be a power of two");
194 assert!((len & ((1 << reserved) - 1)) == 0, "len must have its bottom N bits set to zero");
195 }
196
197 #[inline]
198 const fn index_mask(&self) -> usize {
199 let n = self.capacity();
200 unsafe { std::hint::assert_unchecked(n.is_power_of_two()) };
201 n - 1
202 }
203
204 #[inline]
205 const fn tag_mask(&self) -> usize {
206 !self.index_mask()
207 }
208
209 #[inline]
213 fn epoch(&self) -> usize {
214 self.epoch.load(Ordering::Relaxed)
215 }
216
217 #[inline]
224 pub fn clear(&self) {
225 const EPOCH_MAX: usize = (1 << EPOCH_BITS) - 1;
226 const { assert!(C::EPOCHS, "can only .clear() when C::EPOCHS is true") }
227 let prev = self.epoch.fetch_add(1, Ordering::Release);
228 if prev == EPOCH_MAX {
229 self.clear_slow();
230 }
231 }
232
233 pub fn clear_slow(&self) {
243 unsafe {
248 std::ptr::write_bytes(
249 self.entries.cast_mut().cast::<Bucket<(K, V)>>(),
250 0,
251 self.entries.len(),
252 )
253 };
254 }
255
256 #[inline]
258 pub const fn hasher(&self) -> &S {
259 &self.build_hasher
260 }
261
262 #[inline]
264 pub const fn capacity(&self) -> usize {
265 self.entries.len()
266 }
267
268 #[cfg(feature = "stats")]
270 pub const fn stats(&self) -> Option<&Stats<K, V>> {
271 self.stats.as_ref()
272 }
273}
274
275impl<K, V, S, C> Cache<K, V, S, C>
276where
277 K: Hash + Eq,
278 V: Clone,
279 S: BuildHasher,
280 C: CacheConfig,
281{
282 pub fn get<Q: ?Sized + Hash + Equivalent<K>>(&self, key: &Q) -> Option<V> {
284 let (bucket, tag) = self.calc(key);
285 self.get_inner(key, bucket, tag)
286 }
287
288 #[inline]
289 fn get_inner<Q: ?Sized + Hash + Equivalent<K>>(
290 &self,
291 key: &Q,
292 bucket: &Bucket<(K, V)>,
293 tag: usize,
294 ) -> Option<V> {
295 if bucket.try_lock(Some(tag)) {
296 let (ck, v) = unsafe { (*bucket.data.get()).assume_init_ref() };
298 if key.equivalent(ck) {
299 #[cfg(feature = "stats")]
300 if let Some(stats) = &self.stats {
301 stats.record_hit(ck, v);
302 }
303 let v = v.clone();
304 bucket.unlock(tag);
305 return Some(v);
306 }
307 bucket.unlock(tag);
308 }
309 #[cfg(feature = "stats")]
310 if let Some(stats) = &self.stats {
311 stats.record_miss(AnyRef::new(&key));
312 }
313 None
314 }
315
316 pub fn insert(&self, key: K, value: V) {
318 let (bucket, tag) = self.calc(&key);
319 self.insert_inner(|| key, || value, bucket, tag);
320 }
321
322 pub fn remove<Q: ?Sized + Hash + Equivalent<K>>(&self, key: &Q) -> Option<V> {
326 let (bucket, tag) = self.calc(key);
327 if bucket.try_lock(Some(tag)) {
328 let data = unsafe { &mut *bucket.data.get() };
330 let (ck, v) = unsafe { data.assume_init_ref() };
331 if key.equivalent(ck) {
332 let v = v.clone();
333 if Self::NEEDS_DROP {
334 unsafe { data.assume_init_drop() };
336 }
337 bucket.unlock(0);
338 return Some(v);
339 }
340 bucket.unlock(tag);
341 }
342 None
343 }
344
345 #[inline]
346 fn insert_inner(
347 &self,
348 make_key: impl FnOnce() -> K,
349 make_value: impl FnOnce() -> V,
350 bucket: &Bucket<(K, V)>,
351 tag: usize,
352 ) {
353 if let Some(prev_tag) = bucket.try_lock_ret(None) {
354 unsafe {
356 let is_alive = (prev_tag & !LOCKED_BIT) != 0;
358 let data = (&mut *bucket.data.get()).as_mut_ptr();
359
360 #[cfg(feature = "stats")]
361 {
362 if is_alive {
363 let old_key = std::ptr::replace(&raw mut (*data).0, make_key());
366 let old_value = std::ptr::replace(&raw mut (*data).1, make_value());
367 if let Some(stats) = &self.stats
368 && !old_key.equivalent(&(*data).0)
369 {
370 stats.record_collision(AnyRef::new(&(*data).0), &old_key, &old_value);
371 }
372 } else {
373 (&raw mut (*data).0).write(make_key());
374 (&raw mut (*data).1).write(make_value());
375 }
376 }
377
378 #[cfg(not(feature = "stats"))]
379 {
380 if Self::NEEDS_DROP && is_alive {
382 std::ptr::drop_in_place(data);
383 }
384 (&raw mut (*data).0).write(make_key());
385 (&raw mut (*data).1).write(make_value());
386 }
387 }
388 bucket.unlock(tag);
389 }
390 }
391
392 #[inline]
397 pub fn get_or_insert_with<F>(&self, key: K, f: F) -> V
398 where
399 F: FnOnce(&K) -> V,
400 {
401 self.get_or_try_insert_with(key, |key| Ok::<_, Infallible>(f(key))).unwrap()
402 }
403
404 #[inline]
414 pub fn get_or_insert_with_ref<'a, Q, F, Cvt>(&self, key: &'a Q, f: F, cvt: Cvt) -> V
415 where
416 Q: ?Sized + Hash + Equivalent<K>,
417 F: FnOnce(&'a Q) -> V,
418 Cvt: FnOnce(&'a Q) -> K,
419 {
420 self.get_or_try_insert_with_ref(key, |key| Ok::<_, Infallible>(f(key)), cvt).unwrap()
421 }
422
423 #[inline]
429 pub fn get_or_try_insert_with<F, E>(&self, key: K, f: F) -> Result<V, E>
430 where
431 F: FnOnce(&K) -> Result<V, E>,
432 {
433 let mut key = std::mem::ManuallyDrop::new(key);
434 let mut read = false;
435 let r = self.get_or_try_insert_with_ref(&*key, f, |k| {
436 read = true;
437 unsafe { std::ptr::read(k) }
438 });
439 if !read {
440 unsafe { std::mem::ManuallyDrop::drop(&mut key) }
441 }
442 r
443 }
444
445 #[inline]
454 pub fn get_or_try_insert_with_ref<'a, Q, F, Cvt, E>(
455 &self,
456 key: &'a Q,
457 f: F,
458 cvt: Cvt,
459 ) -> Result<V, E>
460 where
461 Q: ?Sized + Hash + Equivalent<K>,
462 F: FnOnce(&'a Q) -> Result<V, E>,
463 Cvt: FnOnce(&'a Q) -> K,
464 {
465 let (bucket, tag) = self.calc(key);
466 if let Some(v) = self.get_inner(key, bucket, tag) {
467 return Ok(v);
468 }
469 let value = f(key)?;
470 self.insert_inner(|| cvt(key), || value.clone(), bucket, tag);
471 Ok(value)
472 }
473
474 #[inline]
475 fn calc<Q: ?Sized + Hash + Equivalent<K>>(&self, key: &Q) -> (&Bucket<(K, V)>, usize) {
476 let hash = self.hash_key(key);
477 let bucket = unsafe { (&*self.entries).get_unchecked(hash & self.index_mask()) };
479 let mut tag = hash & self.tag_mask();
480 if Self::NEEDS_DROP {
481 tag |= ALIVE_BIT;
482 }
483 if C::EPOCHS {
484 tag = (tag & !EPOCH_MASK) | ((self.epoch() << EPOCH_SHIFT) & EPOCH_MASK);
485 }
486 (bucket, tag)
487 }
488
489 #[inline]
490 fn hash_key<Q: ?Sized + Hash + Equivalent<K>>(&self, key: &Q) -> usize {
491 let hash = self.build_hasher.hash_one(key);
492
493 if cfg!(target_pointer_width = "32") {
494 ((hash >> 32) as usize) ^ (hash as usize)
495 } else {
496 hash as usize
497 }
498 }
499}
500
501impl<K, V, S, C: CacheConfig> Drop for Cache<K, V, S, C> {
502 fn drop(&mut self) {
503 if self.drop {
504 drop(unsafe { Box::from_raw(self.entries.cast_mut()) });
506 }
507 }
508}
509
510#[repr(C, align(128))]
519#[doc(hidden)]
520pub struct Bucket<T> {
521 tag: AtomicUsize,
522 data: UnsafeCell<MaybeUninit<T>>,
523}
524
525impl<T> Bucket<T> {
526 const NEEDS_DROP: bool = std::mem::needs_drop::<T>();
527
528 #[inline]
530 pub const fn new() -> Self {
531 Self { tag: AtomicUsize::new(0), data: UnsafeCell::new(MaybeUninit::zeroed()) }
532 }
533
534 #[inline]
535 fn try_lock(&self, expected: Option<usize>) -> bool {
536 self.try_lock_ret(expected).is_some()
537 }
538
539 #[inline]
540 fn try_lock_ret(&self, expected: Option<usize>) -> Option<usize> {
541 let state = self.tag.load(Ordering::Relaxed);
542 if let Some(expected) = expected {
543 if state != expected {
544 return None;
545 }
546 } else if state & LOCKED_BIT != 0 {
547 return None;
548 }
549 self.tag
550 .compare_exchange(state, state | LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
551 .ok()
552 }
553
554 #[inline]
555 fn is_alive(&self) -> bool {
556 self.tag.load(Ordering::Relaxed) & ALIVE_BIT != 0
557 }
558
559 #[inline]
560 fn unlock(&self, tag: usize) {
561 self.tag.store(tag, Ordering::Release);
562 }
563}
564
565unsafe impl<T: Send> Send for Bucket<T> {}
567unsafe impl<T: Send> Sync for Bucket<T> {}
568
569impl<T> Drop for Bucket<T> {
570 fn drop(&mut self) {
571 if Self::NEEDS_DROP && self.is_alive() {
572 unsafe { self.data.get_mut().assume_init_drop() };
574 }
575 }
576}
577
578#[macro_export]
601macro_rules! static_cache {
602 ($K:ty, $V:ty, $size:expr) => {
603 $crate::static_cache!($K, $V, $size, Default::default())
604 };
605 ($K:ty, $V:ty, $size:expr, $hasher:expr) => {{
606 static ENTRIES: [$crate::Bucket<($K, $V)>; $size] =
607 [const { $crate::Bucket::new() }; $size];
608 $crate::Cache::new_static(&ENTRIES, $hasher)
609 }};
610}
611
612#[cfg(test)]
613mod tests {
614 use super::*;
615 use std::thread;
616
617 const fn iters(n: usize) -> usize {
618 if cfg!(miri) { n / 10 } else { n }
619 }
620
621 type BH = std::hash::BuildHasherDefault<rapidhash::fast::RapidHasher<'static>>;
622 type Cache<K, V> = super::Cache<K, V, BH>;
623
624 struct EpochConfig;
625 impl CacheConfig for EpochConfig {
626 const EPOCHS: bool = true;
627 }
628 type EpochCache<K, V> = super::Cache<K, V, BH, EpochConfig>;
629
630 fn new_cache<K: Hash + Eq, V: Clone>(size: usize) -> Cache<K, V> {
631 Cache::new(size, Default::default())
632 }
633
634 #[test]
635 fn basic_get_or_insert() {
636 let cache = new_cache(1024);
637
638 let mut computed = false;
639 let value = cache.get_or_insert_with(42, |&k| {
640 computed = true;
641 k * 2
642 });
643 assert!(computed);
644 assert_eq!(value, 84);
645
646 computed = false;
647 let value = cache.get_or_insert_with(42, |&k| {
648 computed = true;
649 k * 2
650 });
651 assert!(!computed);
652 assert_eq!(value, 84);
653 }
654
655 #[test]
656 fn different_keys() {
657 let cache: Cache<String, usize> = new_cache(1024);
658
659 let v1 = cache.get_or_insert_with("hello".to_string(), |s| s.len());
660 let v2 = cache.get_or_insert_with("world!".to_string(), |s| s.len());
661
662 assert_eq!(v1, 5);
663 assert_eq!(v2, 6);
664 }
665
666 #[test]
667 fn new_dynamic_allocation() {
668 let cache: Cache<u32, u32> = new_cache(64);
669 assert_eq!(cache.capacity(), 64);
670
671 cache.insert(1, 100);
672 assert_eq!(cache.get(&1), Some(100));
673 }
674
675 #[test]
676 fn get_miss() {
677 let cache = new_cache::<u64, u64>(64);
678 assert_eq!(cache.get(&999), None);
679 }
680
681 #[test]
682 fn insert_and_get() {
683 let cache: Cache<u64, String> = new_cache(64);
684
685 cache.insert(1, "one".to_string());
686 cache.insert(2, "two".to_string());
687 cache.insert(3, "three".to_string());
688
689 assert_eq!(cache.get(&1), Some("one".to_string()));
690 assert_eq!(cache.get(&2), Some("two".to_string()));
691 assert_eq!(cache.get(&3), Some("three".to_string()));
692 assert_eq!(cache.get(&4), None);
693 }
694
695 #[test]
696 fn insert_twice() {
697 let cache = new_cache(64);
698
699 cache.insert(42, 1);
700 assert_eq!(cache.get(&42), Some(1));
701
702 cache.insert(42, 2);
703 let v = cache.get(&42);
704 assert!(v == Some(1) || v == Some(2));
705 }
706
707 #[test]
708 fn remove_existing() {
709 let cache: Cache<u64, String> = new_cache(64);
710
711 cache.insert(1, "one".to_string());
712 assert_eq!(cache.get(&1), Some("one".to_string()));
713
714 let removed = cache.remove(&1);
715 assert_eq!(removed, Some("one".to_string()));
716 assert_eq!(cache.get(&1), None);
717 }
718
719 #[test]
720 fn remove_nonexistent() {
721 let cache = new_cache::<u64, u64>(64);
722 assert_eq!(cache.remove(&999), None);
723 }
724
725 #[test]
726 fn get_or_insert_with_ref() {
727 let cache: Cache<String, usize> = new_cache(64);
728
729 let key = "hello";
730 let value = cache.get_or_insert_with_ref(key, |s| s.len(), |s| s.to_string());
731 assert_eq!(value, 5);
732
733 let value2 = cache.get_or_insert_with_ref(key, |_| 999, |s| s.to_string());
734 assert_eq!(value2, 5);
735 }
736
737 #[test]
738 fn get_or_insert_with_ref_different_keys() {
739 let cache: Cache<String, usize> = new_cache(1024);
740
741 let v1 = cache.get_or_insert_with_ref("foo", |s| s.len(), |s| s.to_string());
742 let v2 = cache.get_or_insert_with_ref("barbaz", |s| s.len(), |s| s.to_string());
743
744 assert_eq!(v1, 3);
745 assert_eq!(v2, 6);
746 }
747
748 #[test]
749 fn capacity() {
750 let cache = new_cache::<u64, u64>(256);
751 assert_eq!(cache.capacity(), 256);
752
753 let cache2 = new_cache::<u64, u64>(128);
754 assert_eq!(cache2.capacity(), 128);
755 }
756
757 #[test]
758 fn hasher() {
759 let cache = new_cache::<u64, u64>(64);
760 let _ = cache.hasher();
761 }
762
763 #[test]
764 fn debug_impl() {
765 let cache = new_cache::<u64, u64>(64);
766 let debug_str = format!("{:?}", cache);
767 assert!(debug_str.contains("Cache"));
768 }
769
770 #[test]
771 fn bucket_new() {
772 let bucket: Bucket<(u64, u64)> = Bucket::new();
773 assert_eq!(bucket.tag.load(Ordering::Relaxed), 0);
774 }
775
776 #[test]
777 fn many_entries() {
778 let cache: Cache<u64, u64> = new_cache(1024);
779 let n = iters(500);
780
781 for i in 0..n as u64 {
782 cache.insert(i, i * 2);
783 }
784
785 let mut hits = 0;
786 for i in 0..n as u64 {
787 if cache.get(&i) == Some(i * 2) {
788 hits += 1;
789 }
790 }
791 assert!(hits > 0);
792 }
793
794 #[test]
795 fn string_keys() {
796 let cache: Cache<String, i32> = new_cache(1024);
797
798 cache.insert("alpha".to_string(), 1);
799 cache.insert("beta".to_string(), 2);
800 cache.insert("gamma".to_string(), 3);
801
802 assert_eq!(cache.get("alpha"), Some(1));
803 assert_eq!(cache.get("beta"), Some(2));
804 assert_eq!(cache.get("gamma"), Some(3));
805 }
806
807 #[test]
808 fn zero_values() {
809 let cache: Cache<u64, u64> = new_cache(64);
810
811 cache.insert(0, 0);
812 assert_eq!(cache.get(&0), Some(0));
813
814 cache.insert(1, 0);
815 assert_eq!(cache.get(&1), Some(0));
816 }
817
818 #[test]
819 fn clone_value() {
820 #[derive(Clone, PartialEq, Debug)]
821 struct MyValue(u64);
822
823 let cache: Cache<u64, MyValue> = new_cache(64);
824
825 cache.insert(1, MyValue(123));
826 let v = cache.get(&1);
827 assert_eq!(v, Some(MyValue(123)));
828 }
829
830 fn run_concurrent<F>(num_threads: usize, f: F)
831 where
832 F: Fn(usize) + Send + Sync,
833 {
834 thread::scope(|s| {
835 for t in 0..num_threads {
836 let f = &f;
837 s.spawn(move || f(t));
838 }
839 });
840 }
841
842 #[test]
843 fn concurrent_reads() {
844 let cache: Cache<u64, u64> = new_cache(1024);
845 let n = iters(100);
846
847 for i in 0..n as u64 {
848 cache.insert(i, i * 10);
849 }
850
851 run_concurrent(4, |_| {
852 for i in 0..n as u64 {
853 let _ = cache.get(&i);
854 }
855 });
856 }
857
858 #[test]
859 fn concurrent_writes() {
860 let cache: Cache<u64, u64> = new_cache(1024);
861 let n = iters(100);
862
863 run_concurrent(4, |t| {
864 for i in 0..n {
865 cache.insert((t * 1000 + i) as u64, i as u64);
866 }
867 });
868 }
869
870 #[test]
871 fn concurrent_read_write() {
872 let cache: Cache<u64, u64> = new_cache(256);
873 let n = iters(1000);
874
875 run_concurrent(2, |t| {
876 for i in 0..n as u64 {
877 if t == 0 {
878 cache.insert(i % 100, i);
879 } else {
880 let _ = cache.get(&(i % 100));
881 }
882 }
883 });
884 }
885
886 #[test]
887 fn concurrent_get_or_insert() {
888 let cache: Cache<u64, u64> = new_cache(1024);
889 let n = iters(100);
890
891 run_concurrent(8, |_| {
892 for i in 0..n as u64 {
893 let _ = cache.get_or_insert_with(i, |&k| k * 2);
894 }
895 });
896
897 for i in 0..n as u64 {
898 if let Some(v) = cache.get(&i) {
899 assert_eq!(v, i * 2);
900 }
901 }
902 }
903
904 #[test]
905 #[should_panic = "power of two"]
906 fn non_power_of_two() {
907 let _ = new_cache::<u64, u64>(100);
908 }
909
910 #[test]
911 #[should_panic = "len must have its bottom N bits set to zero"]
912 fn small_cache() {
913 let _ = new_cache::<u64, u64>(2);
914 }
915
916 #[test]
917 fn power_of_two_sizes() {
918 for shift in 2..10 {
919 let size = 1 << shift;
920 let cache = new_cache::<u64, u64>(size);
921 assert_eq!(cache.capacity(), size);
922 }
923 }
924
925 #[test]
926 fn equivalent_key_lookup() {
927 let cache: Cache<String, i32> = new_cache(64);
928
929 cache.insert("hello".to_string(), 42);
930
931 assert_eq!(cache.get("hello"), Some(42));
932 }
933
934 #[test]
935 fn large_values() {
936 let cache: Cache<u64, [u8; 1000]> = new_cache(64);
937
938 let large_value = [42u8; 1000];
939 cache.insert(1, large_value);
940
941 assert_eq!(cache.get(&1), Some(large_value));
942 }
943
944 #[test]
945 fn send_sync() {
946 fn assert_send<T: Send>() {}
947 fn assert_sync<T: Sync>() {}
948
949 assert_send::<Cache<u64, u64>>();
950 assert_sync::<Cache<u64, u64>>();
951 assert_send::<Bucket<(u64, u64)>>();
952 assert_sync::<Bucket<(u64, u64)>>();
953 }
954
955 #[test]
956 fn get_or_try_insert_with_ok() {
957 let cache = new_cache(1024);
958
959 let mut computed = false;
960 let result: Result<u64, &str> = cache.get_or_try_insert_with(42, |&k| {
961 computed = true;
962 Ok(k * 2)
963 });
964 assert!(computed);
965 assert_eq!(result, Ok(84));
966
967 computed = false;
968 let result: Result<u64, &str> = cache.get_or_try_insert_with(42, |&k| {
969 computed = true;
970 Ok(k * 2)
971 });
972 assert!(!computed);
973 assert_eq!(result, Ok(84));
974 }
975
976 #[test]
977 fn get_or_try_insert_with_err() {
978 let cache: Cache<u64, u64> = new_cache(1024);
979
980 let result: Result<u64, &str> = cache.get_or_try_insert_with(42, |_| Err("failed"));
981 assert_eq!(result, Err("failed"));
982
983 assert_eq!(cache.get(&42), None);
984 }
985
986 #[test]
987 fn get_or_try_insert_with_ref_ok() {
988 let cache: Cache<String, usize> = new_cache(64);
989
990 let key = "hello";
991 let result: Result<usize, &str> =
992 cache.get_or_try_insert_with_ref(key, |s| Ok(s.len()), |s| s.to_string());
993 assert_eq!(result, Ok(5));
994
995 let result2: Result<usize, &str> =
996 cache.get_or_try_insert_with_ref(key, |_| Ok(999), |s| s.to_string());
997 assert_eq!(result2, Ok(5));
998 }
999
1000 #[test]
1001 fn get_or_try_insert_with_ref_err() {
1002 let cache: Cache<String, usize> = new_cache(64);
1003
1004 let key = "hello";
1005 let result: Result<usize, &str> =
1006 cache.get_or_try_insert_with_ref(key, |_| Err("failed"), |s| s.to_string());
1007 assert_eq!(result, Err("failed"));
1008
1009 assert_eq!(cache.get(key), None);
1010 }
1011
1012 #[test]
1013 fn drop_on_cache_drop() {
1014 use std::sync::atomic::{AtomicUsize, Ordering};
1015
1016 static DROP_COUNT: AtomicUsize = AtomicUsize::new(0);
1017
1018 #[derive(Clone, Hash, Eq, PartialEq)]
1019 struct DropKey(u64);
1020 impl Drop for DropKey {
1021 fn drop(&mut self) {
1022 DROP_COUNT.fetch_add(1, Ordering::SeqCst);
1023 }
1024 }
1025
1026 #[derive(Clone)]
1027 struct DropValue(#[allow(dead_code)] u64);
1028 impl Drop for DropValue {
1029 fn drop(&mut self) {
1030 DROP_COUNT.fetch_add(1, Ordering::SeqCst);
1031 }
1032 }
1033
1034 DROP_COUNT.store(0, Ordering::SeqCst);
1035 {
1036 let cache: super::Cache<DropKey, DropValue, BH> =
1037 super::Cache::new(64, Default::default());
1038 cache.insert(DropKey(1), DropValue(100));
1039 cache.insert(DropKey(2), DropValue(200));
1040 cache.insert(DropKey(3), DropValue(300));
1041 assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 0);
1042 }
1043 assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 6);
1045 }
1046
1047 #[test]
1048 fn drop_on_eviction() {
1049 use std::sync::atomic::{AtomicUsize, Ordering};
1050
1051 static DROP_COUNT: AtomicUsize = AtomicUsize::new(0);
1052
1053 #[derive(Clone, Hash, Eq, PartialEq)]
1054 struct DropKey(u64);
1055 impl Drop for DropKey {
1056 fn drop(&mut self) {
1057 DROP_COUNT.fetch_add(1, Ordering::SeqCst);
1058 }
1059 }
1060
1061 #[derive(Clone)]
1062 struct DropValue(#[allow(dead_code)] u64);
1063 impl Drop for DropValue {
1064 fn drop(&mut self) {
1065 DROP_COUNT.fetch_add(1, Ordering::SeqCst);
1066 }
1067 }
1068
1069 DROP_COUNT.store(0, Ordering::SeqCst);
1070 {
1071 let cache: super::Cache<DropKey, DropValue, BH> =
1072 super::Cache::new(64, Default::default());
1073 cache.insert(DropKey(1), DropValue(100));
1074 assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 0);
1075 cache.insert(DropKey(1), DropValue(200));
1077 assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 2);
1079 }
1080 assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 4);
1082 }
1083
1084 #[test]
1085 fn epoch_clear() {
1086 let cache: EpochCache<u64, u64> = EpochCache::new(4096, Default::default());
1087
1088 assert_eq!(cache.epoch(), 0);
1089
1090 cache.insert(1, 100);
1091 cache.insert(2, 200);
1092 assert_eq!(cache.get(&1), Some(100));
1093 assert_eq!(cache.get(&2), Some(200));
1094
1095 cache.clear();
1096 assert_eq!(cache.epoch(), 1);
1097
1098 assert_eq!(cache.get(&1), None);
1099 assert_eq!(cache.get(&2), None);
1100
1101 cache.insert(1, 101);
1102 assert_eq!(cache.get(&1), Some(101));
1103
1104 cache.clear();
1105 assert_eq!(cache.epoch(), 2);
1106 assert_eq!(cache.get(&1), None);
1107 }
1108
1109 #[test]
1110 fn epoch_wrap_around() {
1111 let cache: EpochCache<u64, u64> = EpochCache::new(4096, Default::default());
1112
1113 for _ in 0..300 {
1114 cache.insert(42, 123);
1115 assert_eq!(cache.get(&42), Some(123));
1116 cache.clear();
1117 assert_eq!(cache.get(&42), None);
1118 }
1119 }
1120
1121 #[test]
1122 fn epoch_wraparound_stays_cleared() {
1123 let cache: EpochCache<u64, u64> = EpochCache::new(4096, Default::default());
1124
1125 cache.insert(42, 123);
1126 assert_eq!(cache.get(&42), Some(123));
1127
1128 for i in 0..2048 {
1129 cache.clear();
1130 assert_eq!(cache.get(&42), None, "failed at clear #{i}");
1131 }
1132 }
1133}