1#![doc = include_str!("../README.md")]
2#![cfg_attr(docsrs, feature(doc_cfg))]
3#![allow(clippy::new_without_default)]
4
5use equivalent::Equivalent;
6use std::{
7 cell::UnsafeCell,
8 convert::Infallible,
9 fmt,
10 hash::{BuildHasher, Hash},
11 mem::MaybeUninit,
12 sync::atomic::{AtomicUsize, Ordering},
13};
14
15#[cfg(feature = "stats")]
16mod stats;
17#[cfg(feature = "stats")]
18#[cfg_attr(docsrs, doc(cfg(feature = "stats")))]
19pub use stats::{AnyRef, CountingStatsHandler, Stats, StatsHandler};
20
21const NEEDED_BITS: usize = 2;
22const LOCKED_BIT: usize = 1 << 0;
23const ALIVE_BIT: usize = 1 << 1;
24
25#[cfg(feature = "rapidhash")]
26type DefaultBuildHasher = std::hash::BuildHasherDefault<rapidhash::fast::RapidHasher<'static>>;
27#[cfg(not(feature = "rapidhash"))]
28type DefaultBuildHasher = std::hash::RandomState;
29
30pub struct Cache<K, V, S = DefaultBuildHasher> {
76 entries: *const [Bucket<(K, V)>],
77 build_hasher: S,
78 #[cfg(feature = "stats")]
79 stats: Option<Stats<K, V>>,
80 drop: bool,
81}
82
83impl<K, V, S> fmt::Debug for Cache<K, V, S> {
84 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
85 f.debug_struct("Cache").finish_non_exhaustive()
86 }
87}
88
89unsafe impl<K: Send, V: Send, S: Send> Send for Cache<K, V, S> {}
91unsafe impl<K: Send, V: Send, S: Sync> Sync for Cache<K, V, S> {}
92
93impl<K, V, S> Cache<K, V, S>
94where
95 K: Hash + Eq,
96 S: BuildHasher,
97{
98 pub fn new(num: usize, build_hasher: S) -> Self {
109 Self::len_assertion(num);
110 let entries =
111 Box::into_raw((0..num).map(|_| Bucket::new()).collect::<Vec<_>>().into_boxed_slice());
112 Self::new_inner(entries, build_hasher, true)
113 }
114
115 #[inline]
121 pub const fn new_static(entries: &'static [Bucket<(K, V)>], build_hasher: S) -> Self {
122 Self::len_assertion(entries.len());
123 Self::new_inner(entries, build_hasher, false)
124 }
125
126 #[cfg(feature = "stats")]
128 #[inline]
129 pub fn with_stats(mut self, stats: Option<Stats<K, V>>) -> Self {
130 self.set_stats(stats);
131 self
132 }
133
134 #[cfg(feature = "stats")]
136 #[inline]
137 pub fn set_stats(&mut self, stats: Option<Stats<K, V>>) {
138 self.stats = stats;
139 }
140
141 #[inline]
142 const fn new_inner(entries: *const [Bucket<(K, V)>], build_hasher: S, drop: bool) -> Self {
143 Self {
144 entries,
145 build_hasher,
146 #[cfg(feature = "stats")]
147 stats: None,
148 drop,
149 }
150 }
151
152 #[inline]
153 const fn len_assertion(len: usize) {
154 assert!(len.is_power_of_two(), "length must be a power of two");
158 assert!(
159 (len & ((1 << NEEDED_BITS) - 1)) == 0,
160 "len must have its bottom N bits set to zero"
161 );
162 }
163
164 #[inline]
165 const fn index_mask(&self) -> usize {
166 let n = self.capacity();
167 unsafe { std::hint::assert_unchecked(n.is_power_of_two()) };
168 n - 1
169 }
170
171 #[inline]
172 const fn tag_mask(&self) -> usize {
173 !self.index_mask()
174 }
175
176 #[inline]
178 pub const fn hasher(&self) -> &S {
179 &self.build_hasher
180 }
181
182 #[inline]
184 pub const fn capacity(&self) -> usize {
185 self.entries.len()
186 }
187
188 #[cfg(feature = "stats")]
190 pub const fn stats(&self) -> Option<&Stats<K, V>> {
191 self.stats.as_ref()
192 }
193}
194
195impl<K, V, S> Cache<K, V, S>
196where
197 K: Hash + Eq,
198 V: Clone,
199 S: BuildHasher,
200{
201 const NEEDS_DROP: bool = Bucket::<(K, V)>::NEEDS_DROP;
202
203 pub fn get<Q: ?Sized + Hash + Equivalent<K>>(&self, key: &Q) -> Option<V> {
205 let (bucket, tag) = self.calc(key);
206 self.get_inner(key, bucket, tag)
207 }
208
209 #[inline]
210 fn get_inner<Q: ?Sized + Hash + Equivalent<K>>(
211 &self,
212 key: &Q,
213 bucket: &Bucket<(K, V)>,
214 tag: usize,
215 ) -> Option<V> {
216 if bucket.try_lock(Some(tag)) {
217 let (ck, v) = unsafe { (*bucket.data.get()).assume_init_ref() };
219 if key.equivalent(ck) {
220 #[cfg(feature = "stats")]
221 if let Some(stats) = &self.stats {
222 stats.record_hit(ck, v);
223 }
224 let v = v.clone();
225 bucket.unlock(tag);
226 return Some(v);
227 }
228 #[cfg(feature = "stats")]
229 if let Some(stats) = &self.stats {
230 stats.record_collision(AnyRef::new(&key), ck, v);
231 }
232 bucket.unlock(tag);
233 }
234 #[cfg(feature = "stats")]
235 if let Some(stats) = &self.stats {
236 stats.record_miss(AnyRef::new(&key));
237 }
238 None
239 }
240
241 pub fn insert(&self, key: K, value: V) {
243 let (bucket, tag) = self.calc(&key);
244 self.insert_inner(|| key, || value, bucket, tag);
245 }
246
247 pub fn remove<Q: ?Sized + Hash + Equivalent<K>>(&self, key: &Q) -> Option<V> {
251 let (bucket, tag) = self.calc(key);
252 if bucket.try_lock(Some(tag)) {
253 let data = unsafe { &mut *bucket.data.get() };
255 let (ck, v) = unsafe { data.assume_init_ref() };
256 if key.equivalent(ck) {
257 let v = v.clone();
258 if Self::NEEDS_DROP {
259 unsafe { data.assume_init_drop() };
261 }
262 bucket.unlock(0);
263 return Some(v);
264 }
265 bucket.unlock(tag);
266 }
267 None
268 }
269
270 #[inline]
271 fn insert_inner(
272 &self,
273 make_key: impl FnOnce() -> K,
274 make_value: impl FnOnce() -> V,
275 bucket: &Bucket<(K, V)>,
276 tag: usize,
277 ) {
278 if let Some(prev_tag) = bucket.try_lock_ret(None) {
279 unsafe {
281 let data = (&mut *bucket.data.get()).as_mut_ptr();
282 if Self::NEEDS_DROP && (prev_tag & ALIVE_BIT) != 0 {
284 std::ptr::drop_in_place(data);
285 }
286 (&raw mut (*data).0).write(make_key());
287 (&raw mut (*data).1).write(make_value());
288 }
289 bucket.unlock(tag);
290 }
291 }
292
293 #[inline]
298 pub fn get_or_insert_with<F>(&self, key: K, f: F) -> V
299 where
300 F: FnOnce(&K) -> V,
301 {
302 self.get_or_try_insert_with(key, |key| Ok::<_, Infallible>(f(key))).unwrap()
303 }
304
305 #[inline]
315 pub fn get_or_insert_with_ref<'a, Q, F, Cvt>(&self, key: &'a Q, f: F, cvt: Cvt) -> V
316 where
317 Q: ?Sized + Hash + Equivalent<K>,
318 F: FnOnce(&'a Q) -> V,
319 Cvt: FnOnce(&'a Q) -> K,
320 {
321 self.get_or_try_insert_with_ref(key, |key| Ok::<_, Infallible>(f(key)), cvt).unwrap()
322 }
323
324 #[inline]
330 pub fn get_or_try_insert_with<F, E>(&self, key: K, f: F) -> Result<V, E>
331 where
332 F: FnOnce(&K) -> Result<V, E>,
333 {
334 let mut key = std::mem::ManuallyDrop::new(key);
335 let mut read = false;
336 let r = self.get_or_try_insert_with_ref(&*key, f, |k| {
337 read = true;
338 unsafe { std::ptr::read(k) }
339 });
340 if !read {
341 unsafe { std::mem::ManuallyDrop::drop(&mut key) }
342 }
343 r
344 }
345
346 #[inline]
355 pub fn get_or_try_insert_with_ref<'a, Q, F, Cvt, E>(
356 &self,
357 key: &'a Q,
358 f: F,
359 cvt: Cvt,
360 ) -> Result<V, E>
361 where
362 Q: ?Sized + Hash + Equivalent<K>,
363 F: FnOnce(&'a Q) -> Result<V, E>,
364 Cvt: FnOnce(&'a Q) -> K,
365 {
366 let (bucket, tag) = self.calc(key);
367 if let Some(v) = self.get_inner(key, bucket, tag) {
368 return Ok(v);
369 }
370 let value = f(key)?;
371 self.insert_inner(|| cvt(key), || value.clone(), bucket, tag);
372 Ok(value)
373 }
374
375 #[inline]
376 fn calc<Q: ?Sized + Hash + Equivalent<K>>(&self, key: &Q) -> (&Bucket<(K, V)>, usize) {
377 let hash = self.hash_key(key);
378 let bucket = unsafe { (&*self.entries).get_unchecked(hash & self.index_mask()) };
380 let mut tag = hash & self.tag_mask();
381 if Self::NEEDS_DROP {
382 tag |= ALIVE_BIT;
383 }
384 (bucket, tag)
385 }
386
387 #[inline]
388 fn hash_key<Q: ?Sized + Hash + Equivalent<K>>(&self, key: &Q) -> usize {
389 let hash = self.build_hasher.hash_one(key);
390
391 if cfg!(target_pointer_width = "32") {
392 ((hash >> 32) as usize) ^ (hash as usize)
393 } else {
394 hash as usize
395 }
396 }
397}
398
399impl<K, V, S> Drop for Cache<K, V, S> {
400 fn drop(&mut self) {
401 if self.drop {
402 drop(unsafe { Box::from_raw(self.entries.cast_mut()) });
404 }
405 }
406}
407
408#[repr(C, align(128))]
417#[doc(hidden)]
418pub struct Bucket<T> {
419 tag: AtomicUsize,
420 data: UnsafeCell<MaybeUninit<T>>,
421}
422
423impl<T> Bucket<T> {
424 const NEEDS_DROP: bool = std::mem::needs_drop::<T>();
425
426 #[inline]
428 pub const fn new() -> Self {
429 Self { tag: AtomicUsize::new(0), data: UnsafeCell::new(MaybeUninit::zeroed()) }
430 }
431
432 #[inline]
433 fn try_lock(&self, expected: Option<usize>) -> bool {
434 self.try_lock_ret(expected).is_some()
435 }
436
437 #[inline]
438 fn try_lock_ret(&self, expected: Option<usize>) -> Option<usize> {
439 let state = self.tag.load(Ordering::Relaxed);
440 if let Some(expected) = expected {
441 if state != expected {
442 return None;
443 }
444 } else if state & LOCKED_BIT != 0 {
445 return None;
446 }
447 self.tag
448 .compare_exchange(state, state | LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
449 .ok()
450 }
451
452 #[inline]
453 fn is_alive(&self) -> bool {
454 self.tag.load(Ordering::Relaxed) & ALIVE_BIT != 0
455 }
456
457 #[inline]
458 fn unlock(&self, tag: usize) {
459 self.tag.store(tag, Ordering::Release);
460 }
461}
462
463unsafe impl<T: Send> Send for Bucket<T> {}
465unsafe impl<T: Send> Sync for Bucket<T> {}
466
467impl<T> Drop for Bucket<T> {
468 fn drop(&mut self) {
469 if Self::NEEDS_DROP && self.is_alive() {
470 unsafe { self.data.get_mut().assume_init_drop() };
472 }
473 }
474}
475
476#[macro_export]
499macro_rules! static_cache {
500 ($K:ty, $V:ty, $size:expr) => {
501 $crate::static_cache!($K, $V, $size, Default::default())
502 };
503 ($K:ty, $V:ty, $size:expr, $hasher:expr) => {{
504 static ENTRIES: [$crate::Bucket<($K, $V)>; $size] =
505 [const { $crate::Bucket::new() }; $size];
506 $crate::Cache::new_static(&ENTRIES, $hasher)
507 }};
508}
509
510#[cfg(test)]
511mod tests {
512 use super::*;
513 use std::thread;
514
515 const fn iters(n: usize) -> usize {
516 if cfg!(miri) { n / 10 } else { n }
517 }
518
519 type BH = std::hash::BuildHasherDefault<rapidhash::fast::RapidHasher<'static>>;
520 type Cache<K, V> = super::Cache<K, V, BH>;
521
522 fn new_cache<K: Hash + Eq, V: Clone>(size: usize) -> Cache<K, V> {
523 Cache::new(size, Default::default())
524 }
525
526 #[test]
527 fn basic_get_or_insert() {
528 let cache = new_cache(1024);
529
530 let mut computed = false;
531 let value = cache.get_or_insert_with(42, |&k| {
532 computed = true;
533 k * 2
534 });
535 assert!(computed);
536 assert_eq!(value, 84);
537
538 computed = false;
539 let value = cache.get_or_insert_with(42, |&k| {
540 computed = true;
541 k * 2
542 });
543 assert!(!computed);
544 assert_eq!(value, 84);
545 }
546
547 #[test]
548 fn different_keys() {
549 let cache: Cache<String, usize> = new_cache(1024);
550
551 let v1 = cache.get_or_insert_with("hello".to_string(), |s| s.len());
552 let v2 = cache.get_or_insert_with("world!".to_string(), |s| s.len());
553
554 assert_eq!(v1, 5);
555 assert_eq!(v2, 6);
556 }
557
558 #[test]
559 fn new_dynamic_allocation() {
560 let cache: Cache<u32, u32> = new_cache(64);
561 assert_eq!(cache.capacity(), 64);
562
563 cache.insert(1, 100);
564 assert_eq!(cache.get(&1), Some(100));
565 }
566
567 #[test]
568 fn get_miss() {
569 let cache = new_cache::<u64, u64>(64);
570 assert_eq!(cache.get(&999), None);
571 }
572
573 #[test]
574 fn insert_and_get() {
575 let cache: Cache<u64, String> = new_cache(64);
576
577 cache.insert(1, "one".to_string());
578 cache.insert(2, "two".to_string());
579 cache.insert(3, "three".to_string());
580
581 assert_eq!(cache.get(&1), Some("one".to_string()));
582 assert_eq!(cache.get(&2), Some("two".to_string()));
583 assert_eq!(cache.get(&3), Some("three".to_string()));
584 assert_eq!(cache.get(&4), None);
585 }
586
587 #[test]
588 fn insert_twice() {
589 let cache = new_cache(64);
590
591 cache.insert(42, 1);
592 assert_eq!(cache.get(&42), Some(1));
593
594 cache.insert(42, 2);
595 let v = cache.get(&42);
596 assert!(v == Some(1) || v == Some(2));
597 }
598
599 #[test]
600 fn remove_existing() {
601 let cache: Cache<u64, String> = new_cache(64);
602
603 cache.insert(1, "one".to_string());
604 assert_eq!(cache.get(&1), Some("one".to_string()));
605
606 let removed = cache.remove(&1);
607 assert_eq!(removed, Some("one".to_string()));
608 assert_eq!(cache.get(&1), None);
609 }
610
611 #[test]
612 fn remove_nonexistent() {
613 let cache = new_cache::<u64, u64>(64);
614 assert_eq!(cache.remove(&999), None);
615 }
616
617 #[test]
618 fn get_or_insert_with_ref() {
619 let cache: Cache<String, usize> = new_cache(64);
620
621 let key = "hello";
622 let value = cache.get_or_insert_with_ref(key, |s| s.len(), |s| s.to_string());
623 assert_eq!(value, 5);
624
625 let value2 = cache.get_or_insert_with_ref(key, |_| 999, |s| s.to_string());
626 assert_eq!(value2, 5);
627 }
628
629 #[test]
630 fn get_or_insert_with_ref_different_keys() {
631 let cache: Cache<String, usize> = new_cache(1024);
632
633 let v1 = cache.get_or_insert_with_ref("foo", |s| s.len(), |s| s.to_string());
634 let v2 = cache.get_or_insert_with_ref("barbaz", |s| s.len(), |s| s.to_string());
635
636 assert_eq!(v1, 3);
637 assert_eq!(v2, 6);
638 }
639
640 #[test]
641 fn capacity() {
642 let cache = new_cache::<u64, u64>(256);
643 assert_eq!(cache.capacity(), 256);
644
645 let cache2 = new_cache::<u64, u64>(128);
646 assert_eq!(cache2.capacity(), 128);
647 }
648
649 #[test]
650 fn hasher() {
651 let cache = new_cache::<u64, u64>(64);
652 let _ = cache.hasher();
653 }
654
655 #[test]
656 fn debug_impl() {
657 let cache = new_cache::<u64, u64>(64);
658 let debug_str = format!("{:?}", cache);
659 assert!(debug_str.contains("Cache"));
660 }
661
662 #[test]
663 fn bucket_new() {
664 let bucket: Bucket<(u64, u64)> = Bucket::new();
665 assert_eq!(bucket.tag.load(Ordering::Relaxed), 0);
666 }
667
668 #[test]
669 fn many_entries() {
670 let cache: Cache<u64, u64> = new_cache(1024);
671 let n = iters(500);
672
673 for i in 0..n as u64 {
674 cache.insert(i, i * 2);
675 }
676
677 let mut hits = 0;
678 for i in 0..n as u64 {
679 if cache.get(&i) == Some(i * 2) {
680 hits += 1;
681 }
682 }
683 assert!(hits > 0);
684 }
685
686 #[test]
687 fn string_keys() {
688 let cache: Cache<String, i32> = new_cache(1024);
689
690 cache.insert("alpha".to_string(), 1);
691 cache.insert("beta".to_string(), 2);
692 cache.insert("gamma".to_string(), 3);
693
694 assert_eq!(cache.get("alpha"), Some(1));
695 assert_eq!(cache.get("beta"), Some(2));
696 assert_eq!(cache.get("gamma"), Some(3));
697 }
698
699 #[test]
700 fn zero_values() {
701 let cache: Cache<u64, u64> = new_cache(64);
702
703 cache.insert(0, 0);
704 assert_eq!(cache.get(&0), Some(0));
705
706 cache.insert(1, 0);
707 assert_eq!(cache.get(&1), Some(0));
708 }
709
710 #[test]
711 fn clone_value() {
712 #[derive(Clone, PartialEq, Debug)]
713 struct MyValue(u64);
714
715 let cache: Cache<u64, MyValue> = new_cache(64);
716
717 cache.insert(1, MyValue(123));
718 let v = cache.get(&1);
719 assert_eq!(v, Some(MyValue(123)));
720 }
721
722 fn run_concurrent<F>(num_threads: usize, f: F)
723 where
724 F: Fn(usize) + Send + Sync,
725 {
726 thread::scope(|s| {
727 for t in 0..num_threads {
728 let f = &f;
729 s.spawn(move || f(t));
730 }
731 });
732 }
733
734 #[test]
735 fn concurrent_reads() {
736 let cache: Cache<u64, u64> = new_cache(1024);
737 let n = iters(100);
738
739 for i in 0..n as u64 {
740 cache.insert(i, i * 10);
741 }
742
743 run_concurrent(4, |_| {
744 for i in 0..n as u64 {
745 let _ = cache.get(&i);
746 }
747 });
748 }
749
750 #[test]
751 fn concurrent_writes() {
752 let cache: Cache<u64, u64> = new_cache(1024);
753 let n = iters(100);
754
755 run_concurrent(4, |t| {
756 for i in 0..n {
757 cache.insert((t * 1000 + i) as u64, i as u64);
758 }
759 });
760 }
761
762 #[test]
763 fn concurrent_read_write() {
764 let cache: Cache<u64, u64> = new_cache(256);
765 let n = iters(1000);
766
767 run_concurrent(2, |t| {
768 for i in 0..n as u64 {
769 if t == 0 {
770 cache.insert(i % 100, i);
771 } else {
772 let _ = cache.get(&(i % 100));
773 }
774 }
775 });
776 }
777
778 #[test]
779 fn concurrent_get_or_insert() {
780 let cache: Cache<u64, u64> = new_cache(1024);
781 let n = iters(100);
782
783 run_concurrent(8, |_| {
784 for i in 0..n as u64 {
785 let _ = cache.get_or_insert_with(i, |&k| k * 2);
786 }
787 });
788
789 for i in 0..n as u64 {
790 if let Some(v) = cache.get(&i) {
791 assert_eq!(v, i * 2);
792 }
793 }
794 }
795
796 #[test]
797 #[should_panic = "power of two"]
798 fn non_power_of_two() {
799 let _ = new_cache::<u64, u64>(100);
800 }
801
802 #[test]
803 #[should_panic = "len must have its bottom N bits set to zero"]
804 fn small_cache() {
805 let _ = new_cache::<u64, u64>(2);
806 }
807
808 #[test]
809 fn power_of_two_sizes() {
810 for shift in 2..10 {
811 let size = 1 << shift;
812 let cache = new_cache::<u64, u64>(size);
813 assert_eq!(cache.capacity(), size);
814 }
815 }
816
817 #[test]
818 fn equivalent_key_lookup() {
819 let cache: Cache<String, i32> = new_cache(64);
820
821 cache.insert("hello".to_string(), 42);
822
823 assert_eq!(cache.get("hello"), Some(42));
824 }
825
826 #[test]
827 fn large_values() {
828 let cache: Cache<u64, [u8; 1000]> = new_cache(64);
829
830 let large_value = [42u8; 1000];
831 cache.insert(1, large_value);
832
833 assert_eq!(cache.get(&1), Some(large_value));
834 }
835
836 #[test]
837 fn send_sync() {
838 fn assert_send<T: Send>() {}
839 fn assert_sync<T: Sync>() {}
840
841 assert_send::<Cache<u64, u64>>();
842 assert_sync::<Cache<u64, u64>>();
843 assert_send::<Bucket<(u64, u64)>>();
844 assert_sync::<Bucket<(u64, u64)>>();
845 }
846
847 #[test]
848 fn get_or_try_insert_with_ok() {
849 let cache = new_cache(1024);
850
851 let mut computed = false;
852 let result: Result<u64, &str> = cache.get_or_try_insert_with(42, |&k| {
853 computed = true;
854 Ok(k * 2)
855 });
856 assert!(computed);
857 assert_eq!(result, Ok(84));
858
859 computed = false;
860 let result: Result<u64, &str> = cache.get_or_try_insert_with(42, |&k| {
861 computed = true;
862 Ok(k * 2)
863 });
864 assert!(!computed);
865 assert_eq!(result, Ok(84));
866 }
867
868 #[test]
869 fn get_or_try_insert_with_err() {
870 let cache: Cache<u64, u64> = new_cache(1024);
871
872 let result: Result<u64, &str> = cache.get_or_try_insert_with(42, |_| Err("failed"));
873 assert_eq!(result, Err("failed"));
874
875 assert_eq!(cache.get(&42), None);
876 }
877
878 #[test]
879 fn get_or_try_insert_with_ref_ok() {
880 let cache: Cache<String, usize> = new_cache(64);
881
882 let key = "hello";
883 let result: Result<usize, &str> =
884 cache.get_or_try_insert_with_ref(key, |s| Ok(s.len()), |s| s.to_string());
885 assert_eq!(result, Ok(5));
886
887 let result2: Result<usize, &str> =
888 cache.get_or_try_insert_with_ref(key, |_| Ok(999), |s| s.to_string());
889 assert_eq!(result2, Ok(5));
890 }
891
892 #[test]
893 fn get_or_try_insert_with_ref_err() {
894 let cache: Cache<String, usize> = new_cache(64);
895
896 let key = "hello";
897 let result: Result<usize, &str> =
898 cache.get_or_try_insert_with_ref(key, |_| Err("failed"), |s| s.to_string());
899 assert_eq!(result, Err("failed"));
900
901 assert_eq!(cache.get(key), None);
902 }
903
904 #[test]
905 fn drop_on_cache_drop() {
906 use std::sync::atomic::{AtomicUsize, Ordering};
907
908 static DROP_COUNT: AtomicUsize = AtomicUsize::new(0);
909
910 #[derive(Clone, Hash, Eq, PartialEq)]
911 struct DropKey(u64);
912 impl Drop for DropKey {
913 fn drop(&mut self) {
914 DROP_COUNT.fetch_add(1, Ordering::SeqCst);
915 }
916 }
917
918 #[derive(Clone)]
919 struct DropValue(#[allow(dead_code)] u64);
920 impl Drop for DropValue {
921 fn drop(&mut self) {
922 DROP_COUNT.fetch_add(1, Ordering::SeqCst);
923 }
924 }
925
926 DROP_COUNT.store(0, Ordering::SeqCst);
927 {
928 let cache: super::Cache<DropKey, DropValue, BH> =
929 super::Cache::new(64, Default::default());
930 cache.insert(DropKey(1), DropValue(100));
931 cache.insert(DropKey(2), DropValue(200));
932 cache.insert(DropKey(3), DropValue(300));
933 assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 0);
934 }
935 assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 6);
937 }
938
939 #[test]
940 fn drop_on_eviction() {
941 use std::sync::atomic::{AtomicUsize, Ordering};
942
943 static DROP_COUNT: AtomicUsize = AtomicUsize::new(0);
944
945 #[derive(Clone, Hash, Eq, PartialEq)]
946 struct DropKey(u64);
947 impl Drop for DropKey {
948 fn drop(&mut self) {
949 DROP_COUNT.fetch_add(1, Ordering::SeqCst);
950 }
951 }
952
953 #[derive(Clone)]
954 struct DropValue(#[allow(dead_code)] u64);
955 impl Drop for DropValue {
956 fn drop(&mut self) {
957 DROP_COUNT.fetch_add(1, Ordering::SeqCst);
958 }
959 }
960
961 DROP_COUNT.store(0, Ordering::SeqCst);
962 {
963 let cache: super::Cache<DropKey, DropValue, BH> =
964 super::Cache::new(64, Default::default());
965 cache.insert(DropKey(1), DropValue(100));
966 assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 0);
967 cache.insert(DropKey(1), DropValue(200));
969 assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 2);
971 }
972 assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 4);
974 }
975}