Skip to main content

priority_lfu/
cache.rs

1#[cfg(feature = "metrics")]
2use std::sync::atomic::AtomicU64;
3use std::sync::atomic::{AtomicUsize, Ordering};
4
5use parking_lot::RwLock;
6
7use crate::erased::{Entry, ErasedKey, ErasedKeyLookup, ErasedKeyRef};
8use crate::guard::Guard;
9#[cfg(feature = "metrics")]
10use crate::metrics::CacheMetrics;
11use crate::shard::Shard;
12use crate::traits::{CacheKey, CacheKeyLookup};
13
14/// Thread-safe cache with weight-stratified clock eviction.
15///
16/// The cache can be shared across threads via `Arc<Cache>`. All methods are synchronous
17/// but safe to call from async contexts.
18///
19/// # Weight-Stratified Clock Eviction
20///
21/// This cache uses a two-level eviction strategy that combines policy-based prioritization
22/// with recency and frequency tracking:
23///
24/// 1. **Policy-based stratification**: Each entry has an eviction policy (Critical, Standard, or
25///    Volatile). When space is needed, lower-priority entries are evicted first:
26///    - **Volatile** (lowest priority) - evicted first
27///    - **Standard** (normal priority) - evicted if Volatile is empty
28///    - **Critical** (highest priority) - evicted only as a last resort
29///
30/// 2. **Clock algorithm**: Within each policy bucket, a "clock sweep" finds eviction candidates.
31///    The algorithm uses two signals:
32///    - **Clock bit**: Set when an entry is accessed. During eviction, if set, the bit is cleared
33///      and the entry gets another chance. If clear, the entry is a candidate.
34///    - **Frequency counter** (0-255): Tracks access frequency. Even with a clear clock bit,
35///      high-frequency entries get additional chances via frequency decay.
36///
37/// This approach ensures that frequently-accessed entries resist eviction regardless of policy,
38/// while still respecting policy-based priorities during memory pressure.
39///
40/// # Sharding for Concurrency
41///
42/// The cache divides its capacity across multiple shards (up to 64 by default). Each shard
43/// has its own lock, reducing contention in concurrent workloads. Keys are distributed to
44/// shards via their hash value.
45///
46/// The shard count is automatically scaled based on capacity to ensure each shard has at
47/// least 4KB of space. This prevents premature eviction due to uneven hash distribution:
48///
49/// - Large caches (>= 256KB): 64 shards
50/// - Smaller caches: Scaled down proportionally (e.g., 64KB → 16 shards, 4KB → 1 shard)
51///
52/// # Async Usage
53///
54/// ```ignore
55/// let cache = Arc::new(Cache::new(1024 * 1024));
56///
57/// // Share across tasks
58/// let cache_clone = cache.clone();
59/// tokio::spawn(async move {
60///     // Use get_clone() in async contexts to avoid holding guards across await points
61///     if let Some(value) = cache_clone.get_clone(&key) {
62///         do_async_work(&value).await;
63///     }
64/// });
65/// ```
66pub struct Cache {
67	/// Sharded storage
68	shards: Vec<RwLock<Shard>>,
69	/// Current total size in bytes
70	current_size: AtomicUsize,
71	/// Total entry count
72	entry_count: AtomicUsize,
73	/// Number of shards
74	shard_count: usize,
75	/// Maximum capacity in bytes
76	#[cfg_attr(not(feature = "metrics"), allow(dead_code))]
77	max_size_bytes: usize,
78	/// Metrics: cache hits
79	#[cfg(feature = "metrics")]
80	hits: AtomicU64,
81	/// Metrics: cache misses
82	#[cfg(feature = "metrics")]
83	misses: AtomicU64,
84	/// Metrics: new inserts
85	#[cfg(feature = "metrics")]
86	inserts: AtomicU64,
87	/// Metrics: updates (replaced existing key)
88	#[cfg(feature = "metrics")]
89	updates: AtomicU64,
90	/// Metrics: evictions
91	#[cfg(feature = "metrics")]
92	evictions: AtomicU64,
93	/// Metrics: explicit removals
94	#[cfg(feature = "metrics")]
95	removals: AtomicU64,
96}
97
98/// Minimum size per shard in bytes.
99///
100/// This ensures each shard has enough capacity to hold a reasonable number of entries,
101/// preventing premature eviction due to hash distribution variance across shards.
102/// With 4KB minimum, a shard can comfortably hold dozens of typical cache entries.
103const MIN_SHARD_SIZE: usize = 4096;
104
105/// Default number of shards for large caches.
106///
107/// More shards reduce contention but increase memory overhead.
108/// This value is used when capacity is large enough to support it.
109const DEFAULT_SHARD_COUNT: usize = 64;
110
111/// Compute the optimal shard count for a given capacity.
112///
113/// This ensures each shard has at least `MIN_SHARD_SIZE` bytes to prevent
114/// premature eviction due to uneven hash distribution.
115fn compute_shard_count(capacity: usize, desired_shards: usize) -> usize {
116	// Calculate max shards that maintain MIN_SHARD_SIZE per shard
117	let max_shards = (capacity / MIN_SHARD_SIZE).max(1);
118
119	// Use the smaller of desired and max, ensuring power of 2
120	desired_shards.min(max_shards).next_power_of_two().max(1)
121}
122
123impl Cache {
124	/// Create a new cache with the given maximum size in bytes.
125	///
126	/// Uses default configuration with automatic shard scaling. The number of shards
127	/// is chosen to balance concurrency (more shards = less contention) with per-shard
128	/// capacity (each shard needs enough space to avoid premature eviction).
129	///
130	/// - Large caches (>= 256KB): 64 shards
131	/// - Smaller caches: Scaled down to ensure at least 4KB per shard
132	///
133	/// For explicit control over shard count, use [`CacheBuilder`] or [`with_shards`].
134	pub fn new(max_size_bytes: usize) -> Self {
135		let shard_count = compute_shard_count(max_size_bytes, DEFAULT_SHARD_COUNT);
136		Self::with_shards_internal(max_size_bytes, shard_count)
137	}
138
139	/// Create with custom shard count.
140	///
141	/// More shards reduce contention but increase memory overhead.
142	/// Recommended: `num_cpus * 8` to `num_cpus * 16`.
143	///
144	/// **Note**: The shard count may be reduced if the capacity is too small to
145	/// support the requested number of shards (minimum 4KB per shard). This prevents
146	/// premature eviction due to uneven hash distribution.
147	pub fn with_shards(max_size_bytes: usize, shard_count: usize) -> Self {
148		let shard_count = compute_shard_count(max_size_bytes, shard_count);
149		Self::with_shards_internal(max_size_bytes, shard_count)
150	}
151
152	/// Internal constructor that uses the shard count directly (already validated).
153	fn with_shards_internal(max_size_bytes: usize, shard_count: usize) -> Self {
154		// Divide capacity per shard
155		let size_per_shard = max_size_bytes / shard_count;
156
157		// Create shards
158		let shards = (0..shard_count).map(|_| RwLock::new(Shard::new(size_per_shard))).collect();
159
160		Self {
161			shards,
162			current_size: AtomicUsize::new(0),
163			entry_count: AtomicUsize::new(0),
164			shard_count,
165			max_size_bytes,
166			#[cfg(feature = "metrics")]
167			hits: AtomicU64::new(0),
168			#[cfg(feature = "metrics")]
169			misses: AtomicU64::new(0),
170			#[cfg(feature = "metrics")]
171			inserts: AtomicU64::new(0),
172			#[cfg(feature = "metrics")]
173			updates: AtomicU64::new(0),
174			#[cfg(feature = "metrics")]
175			evictions: AtomicU64::new(0),
176			#[cfg(feature = "metrics")]
177			removals: AtomicU64::new(0),
178		}
179	}
180
181	/// Insert a key-value pair. Evicts items if necessary.
182	///
183	/// Returns the previous value if the key existed.
184	///
185	/// # Runtime Complexity
186	///
187	/// Expected case: O(1) for successful insertion without eviction.
188	///
189	/// Worst case: O(n) where n is the number of entries per shard.
190	/// Eviction happens via clock hand advancement within the shard.
191	pub fn insert<K: CacheKey>(&self, key: K, value: K::Value) -> Option<K::Value> {
192		let erased_key = ErasedKey::new(&key);
193		let policy = key.policy();
194		let entry = Entry::new(value, policy);
195		let entry_size = entry.size;
196
197		// Get the shard
198		let shard_lock = self.get_shard(erased_key.hash);
199
200		// Acquire write lock
201		let mut shard = shard_lock.write();
202
203		// Insert (handles eviction internally via Clock-PRO)
204		let (old_entry, (num_evictions, evicted_size)) = shard.insert(erased_key, entry);
205
206		if let Some(ref old) = old_entry {
207			// Update size (might be different)
208			let size_diff = entry_size as isize - old.size as isize;
209			if size_diff > 0 {
210				self.current_size.fetch_add(size_diff as usize, Ordering::Relaxed);
211			} else {
212				self.current_size.fetch_sub((-size_diff) as usize, Ordering::Relaxed);
213			}
214			// Metrics: track update
215			#[cfg(feature = "metrics")]
216			self.updates.fetch_add(1, Ordering::Relaxed);
217		} else {
218			// New entry
219			self.current_size.fetch_add(entry_size, Ordering::Relaxed);
220			self.entry_count.fetch_add(1, Ordering::Relaxed);
221			// Metrics: track insert
222			#[cfg(feature = "metrics")]
223			self.inserts.fetch_add(1, Ordering::Relaxed);
224		}
225
226		// Account for evictions
227		if num_evictions > 0 {
228			self.entry_count.fetch_sub(num_evictions, Ordering::Relaxed);
229			self.current_size.fetch_sub(evicted_size, Ordering::Relaxed);
230			// Metrics: track evictions
231			#[cfg(feature = "metrics")]
232			self.evictions.fetch_add(num_evictions as u64, Ordering::Relaxed);
233		}
234
235		old_entry.and_then(|e| e.into_value::<K::Value>())
236	}
237
238	/// Retrieve a value via guard. The guard holds a read lock on the shard.
239	///
240	/// # Warning
241	///
242	/// Do NOT hold this guard across `.await` points. Use `get_clone()` instead
243	/// for async contexts.
244	///
245	/// # Runtime Complexity
246	///
247	/// Expected case: O(1)
248	///
249	/// This method performs a zero-allocation hash table lookup and atomically
250	/// increments the reference counter for Clock-PRO.
251	pub fn get<K: CacheKey>(&self, key: &K) -> Option<Guard<'_, K::Value>> {
252		let key_ref = ErasedKeyRef::new(key);
253		let shard_lock = self.get_shard(key_ref.hash);
254
255		// Acquire read lock
256		let shard = shard_lock.read();
257
258		// Look up entry (bumps reference counter internally, zero allocation)
259		let Some(entry) = shard.get_ref(&key_ref) else {
260			// Metrics: track miss
261			#[cfg(feature = "metrics")]
262			self.misses.fetch_add(1, Ordering::Relaxed);
263			return None;
264		};
265
266		// Metrics: track hit
267		#[cfg(feature = "metrics")]
268		self.hits.fetch_add(1, Ordering::Relaxed);
269
270		// Get pointer to value (use value_ref for type-safe downcast)
271		let value_ref = entry.value_ref::<K::Value>()?;
272		let value_ptr = value_ref as *const K::Value;
273
274		// SAFETY: We hold a read lock on the shard, so the entry won't be modified
275		// or dropped while the guard exists. The guard ties its lifetime to the lock.
276		unsafe { Some(Guard::new(shard, value_ptr)) }
277	}
278
279	/// Retrieve a cloned value. Safe to hold across `.await` points.
280	///
281	/// Requires `V: Clone`. This is the preferred method for async code.
282	///
283	/// # Runtime Complexity
284	///
285	/// Expected case: O(1) + O(m) where m is the cost of cloning the value.
286	///
287	/// This method performs a hash table lookup in O(1) expected time, then clones
288	/// the underlying value. If cloning is expensive, consider using `Arc<T>` as your
289	/// value type, which makes cloning O(1).
290	pub fn get_clone<K: CacheKey>(&self, key: &K) -> Option<K::Value>
291	where
292		K::Value: Clone,
293	{
294		let key_ref = ErasedKeyRef::new(key);
295		let shard_lock = self.get_shard(key_ref.hash);
296
297		// Short-lived read lock
298		let shard = shard_lock.read();
299
300		let Some(entry) = shard.get_ref(&key_ref) else {
301			// Metrics: track miss
302			#[cfg(feature = "metrics")]
303			self.misses.fetch_add(1, Ordering::Relaxed);
304			return None;
305		};
306
307		// Metrics: track hit
308		#[cfg(feature = "metrics")]
309		self.hits.fetch_add(1, Ordering::Relaxed);
310
311		// Clone the value
312		entry.value_ref::<K::Value>().cloned()
313	}
314
315	/// Retrieve a value via guard using a borrowed lookup key (zero allocation).
316	///
317	/// This method allows looking up entries using a borrowed key type `Q` that
318	/// implements `CacheKeyLookup<K>`, enabling zero-allocation lookups. For example,
319	/// you can use `(&str, &str)` to look up entries stored with `(String, String)` keys.
320	///
321	/// # Warning
322	///
323	/// Do NOT hold this guard across `.await` points. Use `get_clone_by()` instead
324	/// for async contexts.
325	///
326	/// # Example
327	///
328	/// ```ignore
329	/// // Define borrowed lookup type
330	/// struct DbCacheKeyRef<'a>(&'a str, &'a str);
331	///
332	/// impl Hash for DbCacheKeyRef<'_> {
333	///     fn hash<H: Hasher>(&self, state: &mut H) {
334	///         self.0.hash(state);
335	///         self.1.hash(state);
336	///     }
337	/// }
338	///
339	/// impl CacheKeyLookup<DbCacheKey> for DbCacheKeyRef<'_> {
340	///     fn eq_key(&self, key: &DbCacheKey) -> bool {
341	///         self.0 == key.0 && self.1 == key.1
342	///     }
343	/// }
344	///
345	/// // Zero-allocation lookup
346	/// let value = cache.get_by::<DbCacheKey, _>(&DbCacheKeyRef(ns, db));
347	/// ```
348	pub fn get_by<K, Q>(&self, key: &Q) -> Option<Guard<'_, K::Value>>
349	where
350		K: CacheKey,
351		Q: CacheKeyLookup<K> + ?Sized,
352	{
353		let key_ref = ErasedKeyLookup::new(key);
354		let shard_lock = self.get_shard(key_ref.hash);
355
356		// Acquire read lock
357		let shard = shard_lock.read();
358
359		// Look up entry (bumps reference counter internally, zero allocation)
360		let Some(entry) = shard.get_ref_by(&key_ref) else {
361			// Metrics: track miss
362			#[cfg(feature = "metrics")]
363			self.misses.fetch_add(1, Ordering::Relaxed);
364			return None;
365		};
366
367		// Metrics: track hit
368		#[cfg(feature = "metrics")]
369		self.hits.fetch_add(1, Ordering::Relaxed);
370
371		// Get pointer to value (use value_ref for type-safe downcast)
372		let value_ref = entry.value_ref::<K::Value>()?;
373		let value_ptr = value_ref as *const K::Value;
374
375		// SAFETY: We hold a read lock on the shard, so the entry won't be modified
376		// or dropped while the guard exists. The guard ties its lifetime to the lock.
377		unsafe { Some(Guard::new(shard, value_ptr)) }
378	}
379
380	/// Retrieve a cloned value using a borrowed lookup key. Safe to hold across `.await` points.
381	///
382	/// This method allows looking up entries using a borrowed key type `Q` that
383	/// implements `CacheKeyLookup<K>`, enabling zero-allocation lookups. For example,
384	/// you can use `(&str, &str)` to look up entries stored with `(String, String)` keys.
385	///
386	/// Requires `V: Clone`. This is the preferred method for async code.
387	///
388	/// # Example
389	///
390	/// ```ignore
391	/// // Zero-allocation lookup with cloned result
392	/// let value = cache.get_clone_by::<DbCacheKey, _>(&DbCacheKeyRef(ns, db));
393	/// ```
394	pub fn get_clone_by<K, Q>(&self, key: &Q) -> Option<K::Value>
395	where
396		K: CacheKey,
397		K::Value: Clone,
398		Q: CacheKeyLookup<K> + ?Sized,
399	{
400		let key_ref = ErasedKeyLookup::new(key);
401		let shard_lock = self.get_shard(key_ref.hash);
402
403		// Short-lived read lock
404		let shard = shard_lock.read();
405
406		let Some(entry) = shard.get_ref_by(&key_ref) else {
407			// Metrics: track miss
408			#[cfg(feature = "metrics")]
409			self.misses.fetch_add(1, Ordering::Relaxed);
410			return None;
411		};
412
413		// Metrics: track hit
414		#[cfg(feature = "metrics")]
415		self.hits.fetch_add(1, Ordering::Relaxed);
416
417		// Clone the value
418		entry.value_ref::<K::Value>().cloned()
419	}
420
421	/// Remove a key from the cache.
422	///
423	/// # Runtime Complexity
424	///
425	/// Expected case: O(1)
426	///
427	/// Worst case: O(n) where n is the number of entries per shard.
428	///
429	/// The worst case occurs when the entry is removed from an IndexMap segment,
430	/// which preserves insertion order and may require shifting elements. In practice,
431	/// most removals are O(1) amortized.
432	pub fn remove<K: CacheKey>(&self, key: &K) -> Option<K::Value> {
433		let erased_key = ErasedKey::new(key);
434		let shard_lock = self.get_shard(erased_key.hash);
435
436		let mut shard = shard_lock.write();
437		let entry = shard.remove(&erased_key)?;
438
439		self.current_size.fetch_sub(entry.size, Ordering::Relaxed);
440		self.entry_count.fetch_sub(1, Ordering::Relaxed);
441
442		// Metrics: track removal
443		#[cfg(feature = "metrics")]
444		self.removals.fetch_add(1, Ordering::Relaxed);
445
446		entry.into_value::<K::Value>()
447	}
448
449	/// Check if a key exists (zero allocation).
450	pub fn contains<K: CacheKey>(&self, key: &K) -> bool {
451		let key_ref = ErasedKeyRef::new(key);
452		let shard_lock = self.get_shard(key_ref.hash);
453		let shard = shard_lock.read();
454
455		// Use get_ref for zero-allocation lookup
456		shard.get_ref(&key_ref).is_some()
457	}
458
459	/// Check if a borrowed lookup key exists (zero allocation).
460	///
461	/// This method allows checking existence using a borrowed key type `Q` that
462	/// implements `CacheKeyLookup<K>`, enabling zero-allocation lookups.
463	///
464	/// # Example
465	///
466	/// ```ignore
467	/// // Zero-allocation existence check
468	/// let exists = cache.contains_by::<DbCacheKey, _>(&DbCacheKeyRef(ns, db));
469	/// ```
470	pub fn contains_by<K, Q>(&self, key: &Q) -> bool
471	where
472		K: CacheKey,
473		Q: CacheKeyLookup<K> + ?Sized,
474	{
475		let key_ref = ErasedKeyLookup::new(key);
476		let shard_lock = self.get_shard(key_ref.hash);
477		let shard = shard_lock.read();
478
479		// Use get_ref_by for zero-allocation lookup
480		shard.get_ref_by(&key_ref).is_some()
481	}
482
483	/// Current total size in bytes.
484	pub fn size(&self) -> usize {
485		self.current_size.load(Ordering::Relaxed)
486	}
487
488	/// Number of entries across all types.
489	pub fn len(&self) -> usize {
490		self.entry_count.load(Ordering::Relaxed)
491	}
492
493	/// Check if cache is empty.
494	pub fn is_empty(&self) -> bool {
495		self.len() == 0
496	}
497
498	/// Clear all entries.
499	///
500	/// # Runtime Complexity
501	///
502	/// O(n) where n is the total number of entries in the cache.
503	///
504	/// This method acquires a write lock on each shard sequentially and clears
505	/// all data structures (HashMap and IndexMaps).
506	pub fn clear(&self) {
507		for shard_lock in &self.shards {
508			let mut shard = shard_lock.write();
509			shard.clear();
510		}
511		self.current_size.store(0, Ordering::Relaxed);
512		self.entry_count.store(0, Ordering::Relaxed);
513
514		// Reset all metrics
515		#[cfg(feature = "metrics")]
516		{
517			self.hits.store(0, Ordering::Relaxed);
518			self.misses.store(0, Ordering::Relaxed);
519			self.inserts.store(0, Ordering::Relaxed);
520			self.updates.store(0, Ordering::Relaxed);
521			self.evictions.store(0, Ordering::Relaxed);
522			self.removals.store(0, Ordering::Relaxed);
523		}
524	}
525
526	/// Get performance metrics snapshot.
527	///
528	/// Returns a snapshot of cache performance metrics including hits, misses,
529	/// evictions, and memory utilization.
530	///
531	/// # Example
532	///
533	/// ```
534	/// use priority_lfu::Cache;
535	///
536	/// let cache = Cache::new(1024 * 1024);
537	/// // ... perform cache operations ...
538	///
539	/// let metrics = cache.metrics();
540	/// println!("Hit rate: {:.2}%", metrics.hit_rate() * 100.0);
541	/// println!("Cache utilization: {:.2}%", metrics.utilization() * 100.0);
542	/// println!("Total evictions: {}", metrics.evictions);
543	/// ```
544	#[cfg(feature = "metrics")]
545	pub fn metrics(&self) -> CacheMetrics {
546		CacheMetrics {
547			hits: self.hits.load(Ordering::Relaxed),
548			misses: self.misses.load(Ordering::Relaxed),
549			inserts: self.inserts.load(Ordering::Relaxed),
550			updates: self.updates.load(Ordering::Relaxed),
551			evictions: self.evictions.load(Ordering::Relaxed),
552			removals: self.removals.load(Ordering::Relaxed),
553			current_size_bytes: self.current_size.load(Ordering::Relaxed),
554			capacity_bytes: self.max_size_bytes,
555			entry_count: self.entry_count.load(Ordering::Relaxed),
556		}
557	}
558
559	/// Get the shard for a given hash.
560	fn get_shard(&self, hash: u64) -> &RwLock<Shard> {
561		let index = (hash as usize) & (self.shard_count - 1);
562		&self.shards[index]
563	}
564}
565
566// Thread safety: Cache can be shared across threads
567unsafe impl Send for Cache {}
568unsafe impl Sync for Cache {}
569
570#[cfg(test)]
571mod tests {
572	use super::*;
573	use crate::DeepSizeOf;
574
575	#[derive(Hash, Eq, PartialEq, Clone, Debug)]
576	struct TestKey(u64);
577
578	impl CacheKey for TestKey {
579		type Value = TestValue;
580	}
581
582	#[derive(Clone, Debug, PartialEq, DeepSizeOf)]
583	struct TestValue {
584		data: String,
585	}
586
587	#[test]
588	fn test_compute_shard_count_scales_with_capacity() {
589		// Very small capacity: should get 1 shard
590		assert_eq!(compute_shard_count(1024, 64), 1);
591		assert_eq!(compute_shard_count(4095, 64), 1);
592
593		// Exactly MIN_SHARD_SIZE: 1 shard
594		assert_eq!(compute_shard_count(4096, 64), 1);
595
596		// 2x MIN_SHARD_SIZE: 2 shards (power of 2)
597		assert_eq!(compute_shard_count(8192, 64), 2);
598
599		// 16x MIN_SHARD_SIZE: 16 shards
600		assert_eq!(compute_shard_count(65536, 64), 16);
601
602		// Large capacity: full 64 shards
603		assert_eq!(compute_shard_count(256 * 1024, 64), 64);
604		assert_eq!(compute_shard_count(1024 * 1024, 64), 64);
605
606		// Custom shard count is also bounded
607		assert_eq!(compute_shard_count(8192, 128), 2); // Can't have 128 shards with 8KB
608		assert_eq!(compute_shard_count(1024 * 1024, 128), 128); // Large cache can have 128
609	}
610
611	#[test]
612	fn test_cache_insert_and_get() {
613		let cache = Cache::new(1024);
614
615		let key = TestKey(1);
616		let value = TestValue {
617			data: "hello".to_string(),
618		};
619
620		cache.insert(key.clone(), value.clone());
621
622		let retrieved = cache.get_clone(&key).expect("key should exist");
623		assert_eq!(retrieved, value);
624	}
625
626	#[test]
627	fn test_cache_remove() {
628		let cache = Cache::new(1024);
629
630		let key = TestKey(1);
631		let value = TestValue {
632			data: "hello".to_string(),
633		};
634
635		cache.insert(key.clone(), value.clone());
636		assert!(cache.contains(&key));
637
638		let removed = cache.remove(&key).expect("key should exist");
639		assert_eq!(removed, value);
640		assert!(!cache.contains(&key));
641	}
642
643	#[test]
644	fn test_cache_eviction() {
645		// Small cache that will trigger eviction (use fewer shards for small capacity)
646		let cache = Cache::with_shards(1000, 4);
647
648		// Insert values that exceed capacity
649		for i in 0..15 {
650			let key = TestKey(i);
651			let value = TestValue {
652				data: "x".repeat(50),
653			};
654			cache.insert(key, value);
655		}
656
657		// Cache should have evicted some entries
658		assert!(cache.len() < 15, "Cache should have evicted some entries");
659		assert!(cache.size() <= 1000, "Cache size should be <= 1000, got {}", cache.size());
660	}
661
662	#[test]
663	fn test_cache_concurrent_access() {
664		use std::sync::Arc;
665		use std::thread;
666
667		let cache = Arc::new(Cache::new(10240));
668		let mut handles = vec![];
669
670		for t in 0..4 {
671			let cache = cache.clone();
672			handles.push(thread::spawn(move || {
673				for i in 0..100 {
674					let key = TestKey(t * 100 + i);
675					let value = TestValue {
676						data: format!("value-{}", i),
677					};
678					cache.insert(key.clone(), value.clone());
679
680					if let Some(retrieved) = cache.get_clone(&key) {
681						assert_eq!(retrieved, value);
682					}
683				}
684			}));
685		}
686
687		for handle in handles {
688			handle.join().expect("thread should not panic");
689		}
690
691		assert!(!cache.is_empty());
692	}
693
694	#[test]
695	fn test_cache_is_send_sync() {
696		fn assert_send<T: Send>() {}
697		fn assert_sync<T: Sync>() {}
698
699		assert_send::<Cache>();
700		assert_sync::<Cache>();
701	}
702
703	// Tests for borrowed key lookups
704
705	#[derive(Hash, Eq, PartialEq, Clone, Debug)]
706	struct DbCacheKey(String, String);
707
708	impl CacheKey for DbCacheKey {
709		type Value = TestValue;
710	}
711
712	struct DbCacheKeyRef<'a>(&'a str, &'a str);
713
714	impl std::hash::Hash for DbCacheKeyRef<'_> {
715		fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
716			// MUST match DbCacheKey's hash implementation
717			self.0.hash(state);
718			self.1.hash(state);
719		}
720	}
721
722	impl CacheKeyLookup<DbCacheKey> for DbCacheKeyRef<'_> {
723		fn eq_key(&self, key: &DbCacheKey) -> bool {
724			self.0 == key.0 && self.1 == key.1
725		}
726
727		fn to_owned_key(self) -> DbCacheKey {
728			DbCacheKey(self.0.to_owned(), self.1.to_owned())
729		}
730	}
731
732	#[test]
733	fn test_borrowed_key_lookup_get_by() {
734		let cache = Cache::new(1024);
735
736		let key = DbCacheKey("namespace".to_string(), "database".to_string());
737		let value = TestValue {
738			data: "test_data".to_string(),
739		};
740
741		cache.insert(key.clone(), value.clone());
742
743		// Lookup using borrowed key (zero allocation)
744		let borrowed_key = DbCacheKeyRef("namespace", "database");
745		let retrieved = cache.get_by::<DbCacheKey, _>(&borrowed_key);
746		assert!(retrieved.is_some());
747		assert_eq!(*retrieved.unwrap(), value);
748
749		// Lookup with non-existent key
750		let borrowed_key_missing = DbCacheKeyRef("namespace", "missing");
751		let retrieved = cache.get_by::<DbCacheKey, _>(&borrowed_key_missing);
752		assert!(retrieved.is_none());
753	}
754
755	#[test]
756	fn test_borrowed_key_lookup_get_clone_by() {
757		let cache = Cache::new(1024);
758
759		let key = DbCacheKey("ns".to_string(), "db".to_string());
760		let value = TestValue {
761			data: "cloned_data".to_string(),
762		};
763
764		cache.insert(key.clone(), value.clone());
765
766		// Lookup using borrowed key (zero allocation)
767		let borrowed_key = DbCacheKeyRef("ns", "db");
768		let retrieved = cache.get_clone_by::<DbCacheKey, _>(&borrowed_key);
769		assert_eq!(retrieved, Some(value));
770
771		// Lookup with non-existent key
772		let borrowed_key_missing = DbCacheKeyRef("ns", "missing");
773		let retrieved = cache.get_clone_by::<DbCacheKey, _>(&borrowed_key_missing);
774		assert_eq!(retrieved, None);
775	}
776
777	#[test]
778	fn test_borrowed_key_lookup_contains_by() {
779		let cache = Cache::new(1024);
780
781		let key = DbCacheKey("catalog".to_string(), "schema".to_string());
782		let value = TestValue {
783			data: "contains_test".to_string(),
784		};
785
786		cache.insert(key.clone(), value);
787
788		// Check existence using borrowed key (zero allocation)
789		let borrowed_key = DbCacheKeyRef("catalog", "schema");
790		assert!(cache.contains_by::<DbCacheKey, _>(&borrowed_key));
791
792		// Check non-existent key
793		let borrowed_key_missing = DbCacheKeyRef("catalog", "missing");
794		assert!(!cache.contains_by::<DbCacheKey, _>(&borrowed_key_missing));
795	}
796
797	#[test]
798	fn test_borrowed_key_lookup_multiple_entries() {
799		let cache = Cache::new(4096);
800
801		// Insert multiple entries
802		for i in 0..10 {
803			let key = DbCacheKey(format!("ns{}", i), format!("db{}", i));
804			let value = TestValue {
805				data: format!("data{}", i),
806			};
807			cache.insert(key, value);
808		}
809
810		// Lookup each using borrowed keys
811		for i in 0..10 {
812			let ns = format!("ns{}", i);
813			let db = format!("db{}", i);
814			let borrowed_key = DbCacheKeyRef(&ns, &db);
815
816			let retrieved = cache.get_clone_by::<DbCacheKey, _>(&borrowed_key);
817			assert!(retrieved.is_some());
818			assert_eq!(retrieved.unwrap().data, format!("data{}", i));
819		}
820	}
821
822	#[test]
823	fn test_borrowed_key_existing_api_still_works() {
824		let cache = Cache::new(1024);
825
826		let key = DbCacheKey("test".to_string(), "key".to_string());
827		let value = TestValue {
828			data: "existing_api".to_string(),
829		};
830
831		cache.insert(key.clone(), value.clone());
832
833		// Existing API should still work (blanket impl)
834		let retrieved = cache.get(&key);
835		assert!(retrieved.is_some());
836		assert_eq!(*retrieved.unwrap(), value);
837
838		assert!(cache.contains(&key));
839
840		let cloned = cache.get_clone(&key);
841		assert_eq!(cloned, Some(value));
842	}
843}