1use std::sync::Arc;
2#[cfg(feature = "metrics")]
3use std::sync::atomic::AtomicU64;
4use std::sync::atomic::{AtomicUsize, Ordering};
5
6use parking_lot::RwLock;
7
8use crate::erased::{Entry, ErasedKey, ErasedKeyLookup, ErasedKeyRef};
9use crate::guard::Guard;
10use crate::lifecycle::{DefaultLifecycle, Lifecycle};
11#[cfg(feature = "metrics")]
12use crate::metrics::CacheMetrics;
13use crate::shard::Shard;
14use crate::traits::{CacheKey, CacheKeyLookup};
15
16pub struct Cache<L: Lifecycle = DefaultLifecycle> {
75 shards: Vec<RwLock<Shard>>,
77 lifecycle: Arc<L>,
79 current_size: AtomicUsize,
81 entry_count: AtomicUsize,
83 shard_count: usize,
85 #[cfg_attr(not(feature = "metrics"), allow(dead_code))]
87 max_size_bytes: usize,
88 #[cfg(feature = "metrics")]
90 hits: AtomicU64,
91 #[cfg(feature = "metrics")]
93 misses: AtomicU64,
94 #[cfg(feature = "metrics")]
96 inserts: AtomicU64,
97 #[cfg(feature = "metrics")]
99 updates: AtomicU64,
100 #[cfg(feature = "metrics")]
102 evictions: AtomicU64,
103 #[cfg(feature = "metrics")]
105 removals: AtomicU64,
106}
107
108const MIN_SHARD_SIZE: usize = 4096;
114
115const DEFAULT_SHARD_COUNT: usize = 64;
120
121fn compute_shard_count(capacity: usize, desired_shards: usize) -> usize {
126 let max_shards = (capacity / MIN_SHARD_SIZE).max(1);
128
129 desired_shards.min(max_shards).next_power_of_two().max(1)
131}
132
133impl Cache<DefaultLifecycle> {
134 pub fn new(max_size_bytes: usize) -> Self {
145 let shard_count = compute_shard_count(max_size_bytes, DEFAULT_SHARD_COUNT);
146 Self::with_shards_and_lifecycle_internal(max_size_bytes, shard_count, DefaultLifecycle)
147 }
148
149 pub fn with_shards(max_size_bytes: usize, shard_count: usize) -> Self {
158 let shard_count = compute_shard_count(max_size_bytes, shard_count);
159 Self::with_shards_and_lifecycle_internal(max_size_bytes, shard_count, DefaultLifecycle)
160 }
161}
162
163impl<L: Lifecycle> Cache<L> {
164 pub fn with_lifecycle(max_size_bytes: usize, lifecycle: L) -> Self {
168 let shard_count = compute_shard_count(max_size_bytes, DEFAULT_SHARD_COUNT);
169 Self::with_shards_and_lifecycle_internal(max_size_bytes, shard_count, lifecycle)
170 }
171
172 pub fn with_shards_and_lifecycle(
176 max_size_bytes: usize,
177 shard_count: usize,
178 lifecycle: L,
179 ) -> Self {
180 let shard_count = compute_shard_count(max_size_bytes, shard_count);
181 Self::with_shards_and_lifecycle_internal(max_size_bytes, shard_count, lifecycle)
182 }
183
184 fn with_shards_and_lifecycle_internal(
186 max_size_bytes: usize,
187 shard_count: usize,
188 lifecycle: L,
189 ) -> Self {
190 let size_per_shard = max_size_bytes / shard_count;
192
193 let shards = (0..shard_count).map(|_| RwLock::new(Shard::new(size_per_shard))).collect();
195
196 Self {
197 shards,
198 lifecycle: Arc::new(lifecycle),
199 current_size: AtomicUsize::new(0),
200 entry_count: AtomicUsize::new(0),
201 shard_count,
202 max_size_bytes,
203 #[cfg(feature = "metrics")]
204 hits: AtomicU64::new(0),
205 #[cfg(feature = "metrics")]
206 misses: AtomicU64::new(0),
207 #[cfg(feature = "metrics")]
208 inserts: AtomicU64::new(0),
209 #[cfg(feature = "metrics")]
210 updates: AtomicU64::new(0),
211 #[cfg(feature = "metrics")]
212 evictions: AtomicU64::new(0),
213 #[cfg(feature = "metrics")]
214 removals: AtomicU64::new(0),
215 }
216 }
217
218 pub fn insert<K: CacheKey>(&self, key: K, value: K::Value) -> Option<K::Value> {
235 let erased_key = ErasedKey::new(&key);
236 let policy = key.policy();
237 let entry = Entry::new(value, policy);
238 let entry_size = entry.size;
239
240 let shard_lock = self.get_shard(erased_key.hash);
242
243 let mut shard = shard_lock.write();
245
246 let (old_entry, stats, evicted_entries) = shard.insert(erased_key, entry);
248
249 drop(shard);
251
252 if let Some(ref old) = old_entry {
253 let size_diff = entry_size as isize - old.size as isize;
255 if size_diff > 0 {
256 self.current_size.fetch_add(size_diff as usize, Ordering::Relaxed);
257 } else {
258 self.current_size.fetch_sub((-size_diff) as usize, Ordering::Relaxed);
259 }
260 #[cfg(feature = "metrics")]
262 self.updates.fetch_add(1, Ordering::Relaxed);
263 } else {
264 self.current_size.fetch_add(entry_size, Ordering::Relaxed);
266 self.entry_count.fetch_add(1, Ordering::Relaxed);
267 #[cfg(feature = "metrics")]
269 self.inserts.fetch_add(1, Ordering::Relaxed);
270 }
271
272 if stats.count > 0 {
274 self.entry_count.fetch_sub(stats.count, Ordering::Relaxed);
275 self.current_size.fetch_sub(stats.size, Ordering::Relaxed);
276 #[cfg(feature = "metrics")]
278 self.evictions.fetch_add(stats.count as u64, Ordering::Relaxed);
279
280 for evicted in evicted_entries {
282 self.lifecycle.on_evict(evicted.key.data.as_ref());
283 }
284 }
285
286 old_entry.and_then(|e| e.into_value::<K::Value>())
287 }
288
289 pub fn get<K: CacheKey>(&self, key: &K) -> Option<Guard<'_, K::Value>> {
303 let key_ref = ErasedKeyRef::new(key);
304 let shard_lock = self.get_shard(key_ref.hash);
305
306 let shard = shard_lock.read();
308
309 let Some(entry) = shard.get_ref(&key_ref) else {
311 #[cfg(feature = "metrics")]
313 self.misses.fetch_add(1, Ordering::Relaxed);
314 return None;
315 };
316
317 #[cfg(feature = "metrics")]
319 self.hits.fetch_add(1, Ordering::Relaxed);
320
321 let value_ref = entry.value_ref::<K::Value>()?;
323 let value_ptr = value_ref as *const K::Value;
324
325 unsafe { Some(Guard::new(shard, value_ptr)) }
328 }
329
330 pub fn get_clone<K: CacheKey>(&self, key: &K) -> Option<K::Value>
342 where
343 K::Value: Clone,
344 {
345 let key_ref = ErasedKeyRef::new(key);
346 let shard_lock = self.get_shard(key_ref.hash);
347
348 let shard = shard_lock.read();
350
351 let Some(entry) = shard.get_ref(&key_ref) else {
352 #[cfg(feature = "metrics")]
354 self.misses.fetch_add(1, Ordering::Relaxed);
355 return None;
356 };
357
358 #[cfg(feature = "metrics")]
360 self.hits.fetch_add(1, Ordering::Relaxed);
361
362 entry.value_ref::<K::Value>().cloned()
364 }
365
366 pub fn get_by<K, Q>(&self, key: &Q) -> Option<Guard<'_, K::Value>>
400 where
401 K: CacheKey,
402 Q: CacheKeyLookup<K> + ?Sized,
403 {
404 let key_ref = ErasedKeyLookup::new(key);
405 let shard_lock = self.get_shard(key_ref.hash);
406
407 let shard = shard_lock.read();
409
410 let Some(entry) = shard.get_ref_by(&key_ref) else {
412 #[cfg(feature = "metrics")]
414 self.misses.fetch_add(1, Ordering::Relaxed);
415 return None;
416 };
417
418 #[cfg(feature = "metrics")]
420 self.hits.fetch_add(1, Ordering::Relaxed);
421
422 let value_ref = entry.value_ref::<K::Value>()?;
424 let value_ptr = value_ref as *const K::Value;
425
426 unsafe { Some(Guard::new(shard, value_ptr)) }
429 }
430
431 pub fn get_clone_by<K, Q>(&self, key: &Q) -> Option<K::Value>
446 where
447 K: CacheKey,
448 K::Value: Clone,
449 Q: CacheKeyLookup<K> + ?Sized,
450 {
451 let key_ref = ErasedKeyLookup::new(key);
452 let shard_lock = self.get_shard(key_ref.hash);
453
454 let shard = shard_lock.read();
456
457 let Some(entry) = shard.get_ref_by(&key_ref) else {
458 #[cfg(feature = "metrics")]
460 self.misses.fetch_add(1, Ordering::Relaxed);
461 return None;
462 };
463
464 #[cfg(feature = "metrics")]
466 self.hits.fetch_add(1, Ordering::Relaxed);
467
468 entry.value_ref::<K::Value>().cloned()
470 }
471
472 pub fn remove<K: CacheKey>(&self, key: &K) -> Option<K::Value> {
488 let erased_key = ErasedKey::new(key);
489 let shard_lock = self.get_shard(erased_key.hash);
490
491 let mut shard = shard_lock.write();
492 let (stored_key, entry) = shard.remove(&erased_key)?;
493
494 self.current_size.fetch_sub(entry.size, Ordering::Relaxed);
496 self.entry_count.fetch_sub(1, Ordering::Relaxed);
497
498 #[cfg(feature = "metrics")]
500 self.removals.fetch_add(1, Ordering::Relaxed);
501
502 drop(shard);
504
505 self.lifecycle.on_remove(stored_key.data.as_ref());
507
508 entry.into_value::<K::Value>()
509 }
510
511 pub fn contains<K: CacheKey>(&self, key: &K) -> bool {
513 let key_ref = ErasedKeyRef::new(key);
514 let shard_lock = self.get_shard(key_ref.hash);
515 let shard = shard_lock.read();
516
517 shard.get_ref(&key_ref).is_some()
519 }
520
521 pub fn contains_by<K, Q>(&self, key: &Q) -> bool
533 where
534 K: CacheKey,
535 Q: CacheKeyLookup<K> + ?Sized,
536 {
537 let key_ref = ErasedKeyLookup::new(key);
538 let shard_lock = self.get_shard(key_ref.hash);
539 let shard = shard_lock.read();
540
541 shard.get_ref_by(&key_ref).is_some()
543 }
544
545 pub fn size(&self) -> usize {
547 self.current_size.load(Ordering::Relaxed)
548 }
549
550 pub fn len(&self) -> usize {
552 self.entry_count.load(Ordering::Relaxed)
553 }
554
555 pub fn is_empty(&self) -> bool {
557 self.len() == 0
558 }
559
560 pub fn clear(&self) {
573 let mut all_entries = Vec::new();
575
576 for shard_lock in &self.shards {
577 let mut shard = shard_lock.write();
578 all_entries.extend(shard.drain());
579 }
580
581 self.current_size.store(0, Ordering::Relaxed);
582 self.entry_count.store(0, Ordering::Relaxed);
583
584 #[cfg(feature = "metrics")]
586 {
587 self.hits.store(0, Ordering::Relaxed);
588 self.misses.store(0, Ordering::Relaxed);
589 self.inserts.store(0, Ordering::Relaxed);
590 self.updates.store(0, Ordering::Relaxed);
591 self.evictions.store(0, Ordering::Relaxed);
592 self.removals.store(0, Ordering::Relaxed);
593 }
594
595 for evicted in all_entries {
597 self.lifecycle.on_clear(evicted.key.data.as_ref());
598 }
599 }
600
601 #[cfg(feature = "metrics")]
620 pub fn metrics(&self) -> CacheMetrics {
621 CacheMetrics {
622 hits: self.hits.load(Ordering::Relaxed),
623 misses: self.misses.load(Ordering::Relaxed),
624 inserts: self.inserts.load(Ordering::Relaxed),
625 updates: self.updates.load(Ordering::Relaxed),
626 evictions: self.evictions.load(Ordering::Relaxed),
627 removals: self.removals.load(Ordering::Relaxed),
628 current_size_bytes: self.current_size.load(Ordering::Relaxed),
629 capacity_bytes: self.max_size_bytes,
630 entry_count: self.entry_count.load(Ordering::Relaxed),
631 }
632 }
633
634 fn get_shard(&self, hash: u64) -> &RwLock<Shard> {
636 let index = (hash as usize) & (self.shard_count - 1);
637 &self.shards[index]
638 }
639}
640
641unsafe impl<L: Lifecycle> Send for Cache<L> {}
644unsafe impl<L: Lifecycle> Sync for Cache<L> {}
645
646#[cfg(test)]
647mod tests {
648 use super::*;
649 use crate::DeepSizeOf;
650
651 #[derive(Hash, Eq, PartialEq, Clone, Debug)]
652 struct TestKey(u64);
653
654 impl CacheKey for TestKey {
655 type Value = TestValue;
656 }
657
658 #[derive(Clone, Debug, PartialEq, DeepSizeOf)]
659 struct TestValue {
660 data: String,
661 }
662
663 #[test]
664 fn test_compute_shard_count_scales_with_capacity() {
665 assert_eq!(compute_shard_count(1024, 64), 1);
667 assert_eq!(compute_shard_count(4095, 64), 1);
668
669 assert_eq!(compute_shard_count(4096, 64), 1);
671
672 assert_eq!(compute_shard_count(8192, 64), 2);
674
675 assert_eq!(compute_shard_count(65536, 64), 16);
677
678 assert_eq!(compute_shard_count(256 * 1024, 64), 64);
680 assert_eq!(compute_shard_count(1024 * 1024, 64), 64);
681
682 assert_eq!(compute_shard_count(8192, 128), 2); assert_eq!(compute_shard_count(1024 * 1024, 128), 128); }
686
687 #[test]
688 fn test_cache_insert_and_get() {
689 let cache = Cache::new(1024);
690
691 let key = TestKey(1);
692 let value = TestValue {
693 data: "hello".to_string(),
694 };
695
696 cache.insert(key.clone(), value.clone());
697
698 let retrieved = cache.get_clone(&key).expect("key should exist");
699 assert_eq!(retrieved, value);
700 }
701
702 #[test]
703 fn test_cache_remove() {
704 let cache = Cache::new(1024);
705
706 let key = TestKey(1);
707 let value = TestValue {
708 data: "hello".to_string(),
709 };
710
711 cache.insert(key.clone(), value.clone());
712 assert!(cache.contains(&key));
713
714 let removed = cache.remove(&key).expect("key should exist");
715 assert_eq!(removed, value);
716 assert!(!cache.contains(&key));
717 }
718
719 #[test]
720 fn test_cache_eviction() {
721 let cache = Cache::with_shards(1000, 4);
723
724 for i in 0..15 {
726 let key = TestKey(i);
727 let value = TestValue {
728 data: "x".repeat(50),
729 };
730 cache.insert(key, value);
731 }
732
733 assert!(cache.len() < 15, "Cache should have evicted some entries");
735 assert!(cache.size() <= 1000, "Cache size should be <= 1000, got {}", cache.size());
736 }
737
738 #[test]
739 fn test_cache_concurrent_access() {
740 use std::sync::Arc;
741 use std::thread;
742
743 let cache = Arc::new(Cache::new(10240));
744 let mut handles = vec![];
745
746 for t in 0..4 {
747 let cache = cache.clone();
748 handles.push(thread::spawn(move || {
749 for i in 0..100 {
750 let key = TestKey(t * 100 + i);
751 let value = TestValue {
752 data: format!("value-{}", i),
753 };
754 cache.insert(key.clone(), value.clone());
755
756 if let Some(retrieved) = cache.get_clone(&key) {
757 assert_eq!(retrieved, value);
758 }
759 }
760 }));
761 }
762
763 for handle in handles {
764 handle.join().expect("thread should not panic");
765 }
766
767 assert!(!cache.is_empty());
768 }
769
770 #[test]
771 fn test_cache_is_send_sync() {
772 fn assert_send<T: Send>() {}
773 fn assert_sync<T: Sync>() {}
774
775 assert_send::<Cache>();
776 assert_sync::<Cache>();
777 }
778
779 #[derive(Hash, Eq, PartialEq, Clone, Debug)]
782 struct DbCacheKey(String, String);
783
784 impl CacheKey for DbCacheKey {
785 type Value = TestValue;
786 }
787
788 struct DbCacheKeyRef<'a>(&'a str, &'a str);
789
790 impl std::hash::Hash for DbCacheKeyRef<'_> {
791 fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
792 self.0.hash(state);
794 self.1.hash(state);
795 }
796 }
797
798 impl CacheKeyLookup<DbCacheKey> for DbCacheKeyRef<'_> {
799 fn eq_key(&self, key: &DbCacheKey) -> bool {
800 self.0 == key.0 && self.1 == key.1
801 }
802
803 fn to_owned_key(self) -> DbCacheKey {
804 DbCacheKey(self.0.to_owned(), self.1.to_owned())
805 }
806 }
807
808 #[test]
809 fn test_borrowed_key_lookup_get_by() {
810 let cache = Cache::new(1024);
811
812 let key = DbCacheKey("namespace".to_string(), "database".to_string());
813 let value = TestValue {
814 data: "test_data".to_string(),
815 };
816
817 cache.insert(key.clone(), value.clone());
818
819 let borrowed_key = DbCacheKeyRef("namespace", "database");
821 let retrieved = cache.get_by::<DbCacheKey, _>(&borrowed_key);
822 assert!(retrieved.is_some());
823 assert_eq!(*retrieved.unwrap(), value);
824
825 let borrowed_key_missing = DbCacheKeyRef("namespace", "missing");
827 let retrieved = cache.get_by::<DbCacheKey, _>(&borrowed_key_missing);
828 assert!(retrieved.is_none());
829 }
830
831 #[test]
832 fn test_borrowed_key_lookup_get_clone_by() {
833 let cache = Cache::new(1024);
834
835 let key = DbCacheKey("ns".to_string(), "db".to_string());
836 let value = TestValue {
837 data: "cloned_data".to_string(),
838 };
839
840 cache.insert(key.clone(), value.clone());
841
842 let borrowed_key = DbCacheKeyRef("ns", "db");
844 let retrieved = cache.get_clone_by::<DbCacheKey, _>(&borrowed_key);
845 assert_eq!(retrieved, Some(value));
846
847 let borrowed_key_missing = DbCacheKeyRef("ns", "missing");
849 let retrieved = cache.get_clone_by::<DbCacheKey, _>(&borrowed_key_missing);
850 assert_eq!(retrieved, None);
851 }
852
853 #[test]
854 fn test_borrowed_key_lookup_contains_by() {
855 let cache = Cache::new(1024);
856
857 let key = DbCacheKey("catalog".to_string(), "schema".to_string());
858 let value = TestValue {
859 data: "contains_test".to_string(),
860 };
861
862 cache.insert(key.clone(), value);
863
864 let borrowed_key = DbCacheKeyRef("catalog", "schema");
866 assert!(cache.contains_by::<DbCacheKey, _>(&borrowed_key));
867
868 let borrowed_key_missing = DbCacheKeyRef("catalog", "missing");
870 assert!(!cache.contains_by::<DbCacheKey, _>(&borrowed_key_missing));
871 }
872
873 #[test]
874 fn test_borrowed_key_lookup_multiple_entries() {
875 let cache = Cache::new(4096);
876
877 for i in 0..10 {
879 let key = DbCacheKey(format!("ns{}", i), format!("db{}", i));
880 let value = TestValue {
881 data: format!("data{}", i),
882 };
883 cache.insert(key, value);
884 }
885
886 for i in 0..10 {
888 let ns = format!("ns{}", i);
889 let db = format!("db{}", i);
890 let borrowed_key = DbCacheKeyRef(&ns, &db);
891
892 let retrieved = cache.get_clone_by::<DbCacheKey, _>(&borrowed_key);
893 assert!(retrieved.is_some());
894 assert_eq!(retrieved.unwrap().data, format!("data{}", i));
895 }
896 }
897
898 #[test]
899 fn test_borrowed_key_existing_api_still_works() {
900 let cache = Cache::new(1024);
901
902 let key = DbCacheKey("test".to_string(), "key".to_string());
903 let value = TestValue {
904 data: "existing_api".to_string(),
905 };
906
907 cache.insert(key.clone(), value.clone());
908
909 let retrieved = cache.get(&key);
911 assert!(retrieved.is_some());
912 assert_eq!(*retrieved.unwrap(), value);
913
914 assert!(cache.contains(&key));
915
916 let cloned = cache.get_clone(&key);
917 assert_eq!(cloned, Some(value));
918 }
919
920 use std::any::Any;
923 use std::sync::Arc;
924 use std::sync::atomic::AtomicUsize;
925
926 use crate::CacheBuilder;
927 use crate::lifecycle::Lifecycle;
928
929 struct CountingLifecycle {
930 evict_count: Arc<AtomicUsize>,
931 remove_count: Arc<AtomicUsize>,
932 clear_count: Arc<AtomicUsize>,
933 }
934
935 impl CountingLifecycle {
936 fn new() -> (Self, Arc<AtomicUsize>, Arc<AtomicUsize>, Arc<AtomicUsize>) {
937 let evict_count = Arc::new(AtomicUsize::new(0));
938 let remove_count = Arc::new(AtomicUsize::new(0));
939 let clear_count = Arc::new(AtomicUsize::new(0));
940 (
941 Self {
942 evict_count: evict_count.clone(),
943 remove_count: remove_count.clone(),
944 clear_count: clear_count.clone(),
945 },
946 evict_count,
947 remove_count,
948 clear_count,
949 )
950 }
951 }
952
953 impl Lifecycle for CountingLifecycle {
954 fn on_evict(&self, _key: &dyn Any) {
955 self.evict_count.fetch_add(1, Ordering::Relaxed);
956 }
957
958 fn on_remove(&self, _key: &dyn Any) {
959 self.remove_count.fetch_add(1, Ordering::Relaxed);
960 }
961
962 fn on_clear(&self, _key: &dyn Any) {
963 self.clear_count.fetch_add(1, Ordering::Relaxed);
964 }
965 }
966
967 #[test]
968 fn test_lifecycle_on_evict() {
969 let (lifecycle, evict_count, _, _) = CountingLifecycle::new();
970
971 let cache = CacheBuilder::new(500).shards(1).lifecycle(lifecycle).build();
973
974 for i in 0..20 {
976 let key = TestKey(i);
977 let value = TestValue {
978 data: "x".repeat(50),
979 };
980 cache.insert(key, value);
981 }
982
983 assert!(evict_count.load(Ordering::Relaxed) > 0, "Expected evictions but got none");
985 }
986
987 #[test]
988 fn test_lifecycle_on_clear() {
989 let (lifecycle, _, _, clear_count) = CountingLifecycle::new();
990
991 let cache = CacheBuilder::new(4096).lifecycle(lifecycle).build();
992
993 for i in 0..5 {
995 let key = TestKey(i);
996 let value = TestValue {
997 data: format!("value{}", i),
998 };
999 cache.insert(key, value);
1000 }
1001
1002 assert_eq!(clear_count.load(Ordering::Relaxed), 0);
1003
1004 cache.clear();
1006
1007 assert_eq!(clear_count.load(Ordering::Relaxed), 5, "Expected 5 clear callbacks");
1009 }
1010
1011 #[test]
1012 fn test_lifecycle_on_remove() {
1013 let (lifecycle, _, remove_count, _) = CountingLifecycle::new();
1014
1015 let cache = CacheBuilder::new(4096).lifecycle(lifecycle).build();
1016
1017 let key = TestKey(1);
1018 let value = TestValue {
1019 data: "test".to_string(),
1020 };
1021
1022 cache.insert(key.clone(), value);
1023
1024 assert_eq!(remove_count.load(Ordering::Relaxed), 0);
1025
1026 let removed = cache.remove(&key);
1028 assert!(removed.is_some());
1029
1030 assert_eq!(remove_count.load(Ordering::Relaxed), 1, "Expected 1 remove callback");
1031 }
1032
1033 #[test]
1034 fn test_lifecycle_typed_downcast() {
1035 use crate::TypedLifecycle;
1036
1037 let evicted_keys = Arc::new(std::sync::Mutex::new(Vec::new()));
1038 let keys_clone = evicted_keys.clone();
1039
1040 let lifecycle = TypedLifecycle::<TestKey, _>::new(move |key| {
1041 keys_clone.lock().unwrap().push(key.0);
1042 });
1043
1044 let cache = CacheBuilder::new(500).shards(1).lifecycle(lifecycle).build();
1046
1047 for i in 0..20 {
1049 let key = TestKey(i);
1050 let value = TestValue {
1051 data: "x".repeat(50),
1052 };
1053 cache.insert(key, value);
1054 }
1055
1056 let keys = evicted_keys.lock().unwrap();
1058 assert!(!keys.is_empty(), "Expected some evicted keys to be captured");
1059 }
1060
1061 #[test]
1062 fn test_cache_with_lifecycle_is_send_sync() {
1063 fn assert_send<T: Send>() {}
1064 fn assert_sync<T: Sync>() {}
1065
1066 assert_send::<Cache>();
1068 assert_sync::<Cache>();
1069
1070 assert_send::<Cache<CountingLifecycle>>();
1072 assert_sync::<Cache<CountingLifecycle>>();
1073 }
1074}