1#![doc = include_str!("../README.md")]
2#![cfg_attr(docsrs, feature(doc_cfg))]
3#![allow(clippy::new_without_default)]
4
5use equivalent::Equivalent;
6use std::{
7 cell::UnsafeCell,
8 convert::Infallible,
9 fmt,
10 hash::{BuildHasher, Hash},
11 mem::MaybeUninit,
12 sync::atomic::{AtomicUsize, Ordering},
13};
14
15#[cfg(feature = "stats")]
16mod stats;
17#[cfg(feature = "stats")]
18#[cfg_attr(docsrs, doc(cfg(feature = "stats")))]
19pub use stats::{AnyRef, CountingStatsHandler, Stats, StatsHandler};
20
21const NEEDED_BITS: usize = 2;
22const LOCKED_BIT: usize = 1 << 0;
23const ALIVE_BIT: usize = 1 << 1;
24
25#[cfg(feature = "rapidhash")]
26type DefaultBuildHasher = std::hash::BuildHasherDefault<rapidhash::fast::RapidHasher<'static>>;
27#[cfg(not(feature = "rapidhash"))]
28type DefaultBuildHasher = std::hash::RandomState;
29
30pub struct Cache<K, V, S = DefaultBuildHasher> {
76 entries: *const [Bucket<(K, V)>],
77 build_hasher: S,
78 #[cfg(feature = "stats")]
79 stats: Option<Stats<K, V>>,
80 drop: bool,
81}
82
83impl<K, V, S> fmt::Debug for Cache<K, V, S> {
84 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
85 f.debug_struct("Cache").finish_non_exhaustive()
86 }
87}
88
89unsafe impl<K: Send, V: Send, S: Send> Send for Cache<K, V, S> {}
91unsafe impl<K: Send, V: Send, S: Sync> Sync for Cache<K, V, S> {}
92
93impl<K, V, S> Cache<K, V, S>
94where
95 K: Hash + Eq,
96 S: BuildHasher,
97{
98 pub fn new(num: usize, build_hasher: S) -> Self {
109 Self::len_assertion(num);
110 let entries =
111 Box::into_raw((0..num).map(|_| Bucket::new()).collect::<Vec<_>>().into_boxed_slice());
112 Self::new_inner(entries, build_hasher, true)
113 }
114
115 #[inline]
121 pub const fn new_static(entries: &'static [Bucket<(K, V)>], build_hasher: S) -> Self {
122 Self::len_assertion(entries.len());
123 Self::new_inner(entries, build_hasher, false)
124 }
125
126 #[cfg(feature = "stats")]
128 #[inline]
129 pub fn with_stats(mut self, stats: Option<Stats<K, V>>) -> Self {
130 self.set_stats(stats);
131 self
132 }
133
134 #[cfg(feature = "stats")]
136 #[inline]
137 pub fn set_stats(&mut self, stats: Option<Stats<K, V>>) {
138 self.stats = stats;
139 }
140
141 #[inline]
142 const fn new_inner(entries: *const [Bucket<(K, V)>], build_hasher: S, drop: bool) -> Self {
143 Self {
144 entries,
145 build_hasher,
146 #[cfg(feature = "stats")]
147 stats: None,
148 drop,
149 }
150 }
151
152 #[inline]
153 const fn len_assertion(len: usize) {
154 assert!(len.is_power_of_two(), "length must be a power of two");
158 assert!(
159 (len & ((1 << NEEDED_BITS) - 1)) == 0,
160 "len must have its bottom N bits set to zero"
161 );
162 }
163
164 #[inline]
165 const fn index_mask(&self) -> usize {
166 let n = self.capacity();
167 unsafe { std::hint::assert_unchecked(n.is_power_of_two()) };
168 n - 1
169 }
170
171 #[inline]
172 const fn tag_mask(&self) -> usize {
173 !self.index_mask()
174 }
175
176 #[inline]
178 pub const fn hasher(&self) -> &S {
179 &self.build_hasher
180 }
181
182 #[inline]
184 pub const fn capacity(&self) -> usize {
185 self.entries.len()
186 }
187
188 #[cfg(feature = "stats")]
190 pub const fn stats(&self) -> Option<&Stats<K, V>> {
191 self.stats.as_ref()
192 }
193}
194
195impl<K, V, S> Cache<K, V, S>
196where
197 K: Hash + Eq,
198 V: Clone,
199 S: BuildHasher,
200{
201 const NEEDS_DROP: bool = Bucket::<(K, V)>::NEEDS_DROP;
202
203 pub fn get<Q: ?Sized + Hash + Equivalent<K>>(&self, key: &Q) -> Option<V> {
205 let (bucket, tag) = self.calc(key);
206 self.get_inner(key, bucket, tag)
207 }
208
209 #[inline]
210 fn get_inner<Q: ?Sized + Hash + Equivalent<K>>(
211 &self,
212 key: &Q,
213 bucket: &Bucket<(K, V)>,
214 tag: usize,
215 ) -> Option<V> {
216 if bucket.try_lock(Some(tag)) {
217 let (ck, v) = unsafe { (*bucket.data.get()).assume_init_ref() };
219 if key.equivalent(ck) {
220 #[cfg(feature = "stats")]
221 if let Some(stats) = &self.stats {
222 stats.record_hit(ck, v);
223 }
224 let v = v.clone();
225 bucket.unlock(tag);
226 return Some(v);
227 }
228 #[cfg(feature = "stats")]
229 if let Some(stats) = &self.stats {
230 stats.record_collision(AnyRef::new(&key), ck, v);
231 }
232 bucket.unlock(tag);
233 }
234 #[cfg(feature = "stats")]
235 if let Some(stats) = &self.stats {
236 stats.record_miss(AnyRef::new(&key));
237 }
238 None
239 }
240
241 pub fn insert(&self, key: K, value: V) {
243 let (bucket, tag) = self.calc(&key);
244 self.insert_inner(|| key, || value, bucket, tag);
245 }
246
247 #[inline]
248 fn insert_inner(
249 &self,
250 make_key: impl FnOnce() -> K,
251 make_value: impl FnOnce() -> V,
252 bucket: &Bucket<(K, V)>,
253 tag: usize,
254 ) {
255 if let Some(prev_tag) = bucket.try_lock_ret(None) {
256 unsafe {
258 let data = (&mut *bucket.data.get()).as_mut_ptr();
259 if Self::NEEDS_DROP && (prev_tag & ALIVE_BIT) != 0 {
261 std::ptr::drop_in_place(data);
262 }
263 (&raw mut (*data).0).write(make_key());
264 (&raw mut (*data).1).write(make_value());
265 }
266 bucket.unlock(tag);
267 }
268 }
269
270 #[inline]
275 pub fn get_or_insert_with<F>(&self, key: K, f: F) -> V
276 where
277 F: FnOnce(&K) -> V,
278 {
279 self.get_or_try_insert_with(key, |key| Ok::<_, Infallible>(f(key))).unwrap()
280 }
281
282 #[inline]
292 pub fn get_or_insert_with_ref<'a, Q, F, Cvt>(&self, key: &'a Q, f: F, cvt: Cvt) -> V
293 where
294 Q: ?Sized + Hash + Equivalent<K>,
295 F: FnOnce(&'a Q) -> V,
296 Cvt: FnOnce(&'a Q) -> K,
297 {
298 self.get_or_try_insert_with_ref(key, |key| Ok::<_, Infallible>(f(key)), cvt).unwrap()
299 }
300
301 #[inline]
307 pub fn get_or_try_insert_with<F, E>(&self, key: K, f: F) -> Result<V, E>
308 where
309 F: FnOnce(&K) -> Result<V, E>,
310 {
311 let mut key = std::mem::ManuallyDrop::new(key);
312 let mut read = false;
313 let r = self.get_or_try_insert_with_ref(&*key, f, |k| {
314 read = true;
315 unsafe { std::ptr::read(k) }
316 });
317 if !read {
318 unsafe { std::mem::ManuallyDrop::drop(&mut key) }
319 }
320 r
321 }
322
323 #[inline]
332 pub fn get_or_try_insert_with_ref<'a, Q, F, Cvt, E>(
333 &self,
334 key: &'a Q,
335 f: F,
336 cvt: Cvt,
337 ) -> Result<V, E>
338 where
339 Q: ?Sized + Hash + Equivalent<K>,
340 F: FnOnce(&'a Q) -> Result<V, E>,
341 Cvt: FnOnce(&'a Q) -> K,
342 {
343 let (bucket, tag) = self.calc(key);
344 if let Some(v) = self.get_inner(key, bucket, tag) {
345 return Ok(v);
346 }
347 let value = f(key)?;
348 self.insert_inner(|| cvt(key), || value.clone(), bucket, tag);
349 Ok(value)
350 }
351
352 #[inline]
353 fn calc<Q: ?Sized + Hash + Equivalent<K>>(&self, key: &Q) -> (&Bucket<(K, V)>, usize) {
354 let hash = self.hash_key(key);
355 let bucket = unsafe { (&*self.entries).get_unchecked(hash & self.index_mask()) };
357 let mut tag = hash & self.tag_mask();
358 if Self::NEEDS_DROP {
359 tag |= ALIVE_BIT;
360 }
361 (bucket, tag)
362 }
363
364 #[inline]
365 fn hash_key<Q: ?Sized + Hash + Equivalent<K>>(&self, key: &Q) -> usize {
366 let hash = self.build_hasher.hash_one(key);
367
368 if cfg!(target_pointer_width = "32") {
369 ((hash >> 32) as usize) ^ (hash as usize)
370 } else {
371 hash as usize
372 }
373 }
374}
375
376impl<K, V, S> Drop for Cache<K, V, S> {
377 fn drop(&mut self) {
378 if self.drop {
379 drop(unsafe { Box::from_raw(self.entries.cast_mut()) });
381 }
382 }
383}
384
385#[repr(C, align(128))]
394#[doc(hidden)]
395pub struct Bucket<T> {
396 tag: AtomicUsize,
397 data: UnsafeCell<MaybeUninit<T>>,
398}
399
400impl<T> Bucket<T> {
401 const NEEDS_DROP: bool = std::mem::needs_drop::<T>();
402
403 #[inline]
405 pub const fn new() -> Self {
406 Self { tag: AtomicUsize::new(0), data: UnsafeCell::new(MaybeUninit::zeroed()) }
407 }
408
409 #[inline]
410 fn try_lock(&self, expected: Option<usize>) -> bool {
411 self.try_lock_ret(expected).is_some()
412 }
413
414 #[inline]
415 fn try_lock_ret(&self, expected: Option<usize>) -> Option<usize> {
416 let state = self.tag.load(Ordering::Relaxed);
417 if let Some(expected) = expected {
418 if state != expected {
419 return None;
420 }
421 } else if state & LOCKED_BIT != 0 {
422 return None;
423 }
424 self.tag
425 .compare_exchange(state, state | LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
426 .ok()
427 }
428
429 #[inline]
430 fn is_alive(&self) -> bool {
431 self.tag.load(Ordering::Relaxed) & ALIVE_BIT != 0
432 }
433
434 #[inline]
435 fn unlock(&self, tag: usize) {
436 self.tag.store(tag, Ordering::Release);
437 }
438}
439
440unsafe impl<T: Send> Send for Bucket<T> {}
442unsafe impl<T: Send> Sync for Bucket<T> {}
443
444impl<T> Drop for Bucket<T> {
445 fn drop(&mut self) {
446 if Self::NEEDS_DROP && self.is_alive() {
447 unsafe { self.data.get_mut().assume_init_drop() };
449 }
450 }
451}
452
453#[macro_export]
476macro_rules! static_cache {
477 ($K:ty, $V:ty, $size:expr) => {
478 $crate::static_cache!($K, $V, $size, Default::default())
479 };
480 ($K:ty, $V:ty, $size:expr, $hasher:expr) => {{
481 static ENTRIES: [$crate::Bucket<($K, $V)>; $size] =
482 [const { $crate::Bucket::new() }; $size];
483 $crate::Cache::new_static(&ENTRIES, $hasher)
484 }};
485}
486
487#[cfg(test)]
488mod tests {
489 use super::*;
490 use std::thread;
491
492 const fn iters(n: usize) -> usize {
493 if cfg!(miri) { n / 10 } else { n }
494 }
495
496 type BH = std::hash::BuildHasherDefault<rapidhash::fast::RapidHasher<'static>>;
497 type Cache<K, V> = super::Cache<K, V, BH>;
498
499 fn new_cache<K: Hash + Eq, V: Clone>(size: usize) -> Cache<K, V> {
500 Cache::new(size, Default::default())
501 }
502
503 #[test]
504 fn basic_get_or_insert() {
505 let cache = new_cache(1024);
506
507 let mut computed = false;
508 let value = cache.get_or_insert_with(42, |&k| {
509 computed = true;
510 k * 2
511 });
512 assert!(computed);
513 assert_eq!(value, 84);
514
515 computed = false;
516 let value = cache.get_or_insert_with(42, |&k| {
517 computed = true;
518 k * 2
519 });
520 assert!(!computed);
521 assert_eq!(value, 84);
522 }
523
524 #[test]
525 fn different_keys() {
526 let cache: Cache<String, usize> = new_cache(1024);
527
528 let v1 = cache.get_or_insert_with("hello".to_string(), |s| s.len());
529 let v2 = cache.get_or_insert_with("world!".to_string(), |s| s.len());
530
531 assert_eq!(v1, 5);
532 assert_eq!(v2, 6);
533 }
534
535 #[test]
536 fn new_dynamic_allocation() {
537 let cache: Cache<u32, u32> = new_cache(64);
538 assert_eq!(cache.capacity(), 64);
539
540 cache.insert(1, 100);
541 assert_eq!(cache.get(&1), Some(100));
542 }
543
544 #[test]
545 fn get_miss() {
546 let cache = new_cache::<u64, u64>(64);
547 assert_eq!(cache.get(&999), None);
548 }
549
550 #[test]
551 fn insert_and_get() {
552 let cache: Cache<u64, String> = new_cache(64);
553
554 cache.insert(1, "one".to_string());
555 cache.insert(2, "two".to_string());
556 cache.insert(3, "three".to_string());
557
558 assert_eq!(cache.get(&1), Some("one".to_string()));
559 assert_eq!(cache.get(&2), Some("two".to_string()));
560 assert_eq!(cache.get(&3), Some("three".to_string()));
561 assert_eq!(cache.get(&4), None);
562 }
563
564 #[test]
565 fn insert_twice() {
566 let cache = new_cache(64);
567
568 cache.insert(42, 1);
569 assert_eq!(cache.get(&42), Some(1));
570
571 cache.insert(42, 2);
572 let v = cache.get(&42);
573 assert!(v == Some(1) || v == Some(2));
574 }
575
576 #[test]
577 fn get_or_insert_with_ref() {
578 let cache: Cache<String, usize> = new_cache(64);
579
580 let key = "hello";
581 let value = cache.get_or_insert_with_ref(key, |s| s.len(), |s| s.to_string());
582 assert_eq!(value, 5);
583
584 let value2 = cache.get_or_insert_with_ref(key, |_| 999, |s| s.to_string());
585 assert_eq!(value2, 5);
586 }
587
588 #[test]
589 fn get_or_insert_with_ref_different_keys() {
590 let cache: Cache<String, usize> = new_cache(1024);
591
592 let v1 = cache.get_or_insert_with_ref("foo", |s| s.len(), |s| s.to_string());
593 let v2 = cache.get_or_insert_with_ref("barbaz", |s| s.len(), |s| s.to_string());
594
595 assert_eq!(v1, 3);
596 assert_eq!(v2, 6);
597 }
598
599 #[test]
600 fn capacity() {
601 let cache = new_cache::<u64, u64>(256);
602 assert_eq!(cache.capacity(), 256);
603
604 let cache2 = new_cache::<u64, u64>(128);
605 assert_eq!(cache2.capacity(), 128);
606 }
607
608 #[test]
609 fn hasher() {
610 let cache = new_cache::<u64, u64>(64);
611 let _ = cache.hasher();
612 }
613
614 #[test]
615 fn debug_impl() {
616 let cache = new_cache::<u64, u64>(64);
617 let debug_str = format!("{:?}", cache);
618 assert!(debug_str.contains("Cache"));
619 }
620
621 #[test]
622 fn bucket_new() {
623 let bucket: Bucket<(u64, u64)> = Bucket::new();
624 assert_eq!(bucket.tag.load(Ordering::Relaxed), 0);
625 }
626
627 #[test]
628 fn many_entries() {
629 let cache: Cache<u64, u64> = new_cache(1024);
630 let n = iters(500);
631
632 for i in 0..n as u64 {
633 cache.insert(i, i * 2);
634 }
635
636 let mut hits = 0;
637 for i in 0..n as u64 {
638 if cache.get(&i) == Some(i * 2) {
639 hits += 1;
640 }
641 }
642 assert!(hits > 0);
643 }
644
645 #[test]
646 fn string_keys() {
647 let cache: Cache<String, i32> = new_cache(1024);
648
649 cache.insert("alpha".to_string(), 1);
650 cache.insert("beta".to_string(), 2);
651 cache.insert("gamma".to_string(), 3);
652
653 assert_eq!(cache.get("alpha"), Some(1));
654 assert_eq!(cache.get("beta"), Some(2));
655 assert_eq!(cache.get("gamma"), Some(3));
656 }
657
658 #[test]
659 fn zero_values() {
660 let cache: Cache<u64, u64> = new_cache(64);
661
662 cache.insert(0, 0);
663 assert_eq!(cache.get(&0), Some(0));
664
665 cache.insert(1, 0);
666 assert_eq!(cache.get(&1), Some(0));
667 }
668
669 #[test]
670 fn clone_value() {
671 #[derive(Clone, PartialEq, Debug)]
672 struct MyValue(u64);
673
674 let cache: Cache<u64, MyValue> = new_cache(64);
675
676 cache.insert(1, MyValue(123));
677 let v = cache.get(&1);
678 assert_eq!(v, Some(MyValue(123)));
679 }
680
681 fn run_concurrent<F>(num_threads: usize, f: F)
682 where
683 F: Fn(usize) + Send + Sync,
684 {
685 thread::scope(|s| {
686 for t in 0..num_threads {
687 let f = &f;
688 s.spawn(move || f(t));
689 }
690 });
691 }
692
693 #[test]
694 fn concurrent_reads() {
695 let cache: Cache<u64, u64> = new_cache(1024);
696 let n = iters(100);
697
698 for i in 0..n as u64 {
699 cache.insert(i, i * 10);
700 }
701
702 run_concurrent(4, |_| {
703 for i in 0..n as u64 {
704 let _ = cache.get(&i);
705 }
706 });
707 }
708
709 #[test]
710 fn concurrent_writes() {
711 let cache: Cache<u64, u64> = new_cache(1024);
712 let n = iters(100);
713
714 run_concurrent(4, |t| {
715 for i in 0..n {
716 cache.insert((t * 1000 + i) as u64, i as u64);
717 }
718 });
719 }
720
721 #[test]
722 fn concurrent_read_write() {
723 let cache: Cache<u64, u64> = new_cache(256);
724 let n = iters(1000);
725
726 run_concurrent(2, |t| {
727 for i in 0..n as u64 {
728 if t == 0 {
729 cache.insert(i % 100, i);
730 } else {
731 let _ = cache.get(&(i % 100));
732 }
733 }
734 });
735 }
736
737 #[test]
738 fn concurrent_get_or_insert() {
739 let cache: Cache<u64, u64> = new_cache(1024);
740 let n = iters(100);
741
742 run_concurrent(8, |_| {
743 for i in 0..n as u64 {
744 let _ = cache.get_or_insert_with(i, |&k| k * 2);
745 }
746 });
747
748 for i in 0..n as u64 {
749 if let Some(v) = cache.get(&i) {
750 assert_eq!(v, i * 2);
751 }
752 }
753 }
754
755 #[test]
756 #[should_panic = "power of two"]
757 fn non_power_of_two() {
758 let _ = new_cache::<u64, u64>(100);
759 }
760
761 #[test]
762 #[should_panic = "len must have its bottom N bits set to zero"]
763 fn small_cache() {
764 let _ = new_cache::<u64, u64>(2);
765 }
766
767 #[test]
768 fn power_of_two_sizes() {
769 for shift in 2..10 {
770 let size = 1 << shift;
771 let cache = new_cache::<u64, u64>(size);
772 assert_eq!(cache.capacity(), size);
773 }
774 }
775
776 #[test]
777 fn equivalent_key_lookup() {
778 let cache: Cache<String, i32> = new_cache(64);
779
780 cache.insert("hello".to_string(), 42);
781
782 assert_eq!(cache.get("hello"), Some(42));
783 }
784
785 #[test]
786 fn large_values() {
787 let cache: Cache<u64, [u8; 1000]> = new_cache(64);
788
789 let large_value = [42u8; 1000];
790 cache.insert(1, large_value);
791
792 assert_eq!(cache.get(&1), Some(large_value));
793 }
794
795 #[test]
796 fn send_sync() {
797 fn assert_send<T: Send>() {}
798 fn assert_sync<T: Sync>() {}
799
800 assert_send::<Cache<u64, u64>>();
801 assert_sync::<Cache<u64, u64>>();
802 assert_send::<Bucket<(u64, u64)>>();
803 assert_sync::<Bucket<(u64, u64)>>();
804 }
805
806 #[test]
807 fn get_or_try_insert_with_ok() {
808 let cache = new_cache(1024);
809
810 let mut computed = false;
811 let result: Result<u64, &str> = cache.get_or_try_insert_with(42, |&k| {
812 computed = true;
813 Ok(k * 2)
814 });
815 assert!(computed);
816 assert_eq!(result, Ok(84));
817
818 computed = false;
819 let result: Result<u64, &str> = cache.get_or_try_insert_with(42, |&k| {
820 computed = true;
821 Ok(k * 2)
822 });
823 assert!(!computed);
824 assert_eq!(result, Ok(84));
825 }
826
827 #[test]
828 fn get_or_try_insert_with_err() {
829 let cache: Cache<u64, u64> = new_cache(1024);
830
831 let result: Result<u64, &str> = cache.get_or_try_insert_with(42, |_| Err("failed"));
832 assert_eq!(result, Err("failed"));
833
834 assert_eq!(cache.get(&42), None);
835 }
836
837 #[test]
838 fn get_or_try_insert_with_ref_ok() {
839 let cache: Cache<String, usize> = new_cache(64);
840
841 let key = "hello";
842 let result: Result<usize, &str> =
843 cache.get_or_try_insert_with_ref(key, |s| Ok(s.len()), |s| s.to_string());
844 assert_eq!(result, Ok(5));
845
846 let result2: Result<usize, &str> =
847 cache.get_or_try_insert_with_ref(key, |_| Ok(999), |s| s.to_string());
848 assert_eq!(result2, Ok(5));
849 }
850
851 #[test]
852 fn get_or_try_insert_with_ref_err() {
853 let cache: Cache<String, usize> = new_cache(64);
854
855 let key = "hello";
856 let result: Result<usize, &str> =
857 cache.get_or_try_insert_with_ref(key, |_| Err("failed"), |s| s.to_string());
858 assert_eq!(result, Err("failed"));
859
860 assert_eq!(cache.get(key), None);
861 }
862
863 #[test]
864 fn drop_on_cache_drop() {
865 use std::sync::atomic::{AtomicUsize, Ordering};
866
867 static DROP_COUNT: AtomicUsize = AtomicUsize::new(0);
868
869 #[derive(Clone, Hash, Eq, PartialEq)]
870 struct DropKey(u64);
871 impl Drop for DropKey {
872 fn drop(&mut self) {
873 DROP_COUNT.fetch_add(1, Ordering::SeqCst);
874 }
875 }
876
877 #[derive(Clone)]
878 struct DropValue(#[allow(dead_code)] u64);
879 impl Drop for DropValue {
880 fn drop(&mut self) {
881 DROP_COUNT.fetch_add(1, Ordering::SeqCst);
882 }
883 }
884
885 DROP_COUNT.store(0, Ordering::SeqCst);
886 {
887 let cache: super::Cache<DropKey, DropValue, BH> =
888 super::Cache::new(64, Default::default());
889 cache.insert(DropKey(1), DropValue(100));
890 cache.insert(DropKey(2), DropValue(200));
891 cache.insert(DropKey(3), DropValue(300));
892 assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 0);
893 }
894 assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 6);
896 }
897
898 #[test]
899 fn drop_on_eviction() {
900 use std::sync::atomic::{AtomicUsize, Ordering};
901
902 static DROP_COUNT: AtomicUsize = AtomicUsize::new(0);
903
904 #[derive(Clone, Hash, Eq, PartialEq)]
905 struct DropKey(u64);
906 impl Drop for DropKey {
907 fn drop(&mut self) {
908 DROP_COUNT.fetch_add(1, Ordering::SeqCst);
909 }
910 }
911
912 #[derive(Clone)]
913 struct DropValue(#[allow(dead_code)] u64);
914 impl Drop for DropValue {
915 fn drop(&mut self) {
916 DROP_COUNT.fetch_add(1, Ordering::SeqCst);
917 }
918 }
919
920 DROP_COUNT.store(0, Ordering::SeqCst);
921 {
922 let cache: super::Cache<DropKey, DropValue, BH> =
923 super::Cache::new(64, Default::default());
924 cache.insert(DropKey(1), DropValue(100));
925 assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 0);
926 cache.insert(DropKey(1), DropValue(200));
928 assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 2);
930 }
931 assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 4);
933 }
934}