Skip to main content

priority_lfu/
cache.rs

1use std::sync::Arc;
2#[cfg(feature = "metrics")]
3use std::sync::atomic::AtomicU64;
4use std::sync::atomic::{AtomicUsize, Ordering};
5
6use parking_lot::RwLock;
7
8use crate::erased::{Entry, ErasedKey, ErasedKeyLookup, ErasedKeyRef};
9use crate::guard::Guard;
10use crate::lifecycle::{DefaultLifecycle, Lifecycle};
11#[cfg(feature = "metrics")]
12use crate::metrics::CacheMetrics;
13use crate::shard::Shard;
14use crate::traits::{CacheKey, CacheKeyLookup};
15
16/// Thread-safe cache with weight-stratified clock eviction.
17///
18/// The cache can be shared across threads via `Arc<Cache>`. All methods are synchronous
19/// but safe to call from async contexts.
20///
21/// # Weight-Stratified Clock Eviction
22///
23/// This cache uses a two-level eviction strategy that combines policy-based prioritization
24/// with recency and frequency tracking:
25///
26/// 1. **Policy-based stratification**: Each entry has an eviction policy (Critical, Standard, or
27///    Volatile). When space is needed, lower-priority entries are evicted first:
28///    - **Volatile** (lowest priority) - evicted first
29///    - **Standard** (normal priority) - evicted if Volatile is empty
30///    - **Critical** (highest priority) - evicted only as a last resort
31///
32/// 2. **Clock algorithm**: Within each policy bucket, a "clock sweep" finds eviction candidates.
33///    The algorithm uses two signals:
34///    - **Clock bit**: Set when an entry is accessed. During eviction, if set, the bit is cleared
35///      and the entry gets another chance. If clear, the entry is a candidate.
36///    - **Frequency counter** (0-255): Tracks access frequency. Even with a clear clock bit,
37///      high-frequency entries get additional chances via frequency decay.
38///
39/// This approach ensures that frequently-accessed entries resist eviction regardless of policy,
40/// while still respecting policy-based priorities during memory pressure.
41///
42/// # Sharding for Concurrency
43///
44/// The cache divides its capacity across multiple shards (up to 64 by default). Each shard
45/// has its own lock, reducing contention in concurrent workloads. Keys are distributed to
46/// shards via their hash value.
47///
48/// The shard count is automatically scaled based on capacity to ensure each shard has at
49/// least 4KB of space. This prevents premature eviction due to uneven hash distribution:
50///
51/// - Large caches (>= 256KB): 64 shards
52/// - Smaller caches: Scaled down proportionally (e.g., 64KB → 16 shards, 4KB → 1 shard)
53///
54/// # Lifecycle Hooks
55///
56/// The cache supports lifecycle hooks via the [`Lifecycle`] trait. Hooks are called
57/// synchronously when entries are evicted, removed, or cleared. Use [`CacheBuilder`]
58/// to configure a custom lifecycle.
59///
60/// # Async Usage
61///
62/// ```ignore
63/// let cache = Arc::new(Cache::new(1024 * 1024));
64///
65/// // Share across tasks
66/// let cache_clone = cache.clone();
67/// tokio::spawn(async move {
68///     // Use get_clone() in async contexts to avoid holding guards across await points
69///     if let Some(value) = cache_clone.get_clone(&key) {
70///         do_async_work(&value).await;
71///     }
72/// });
73/// ```
74pub struct Cache<L: Lifecycle = DefaultLifecycle> {
75	/// Sharded storage
76	shards: Vec<RwLock<Shard>>,
77	/// Lifecycle hooks
78	lifecycle: Arc<L>,
79	/// Current total size in bytes
80	current_size: AtomicUsize,
81	/// Total entry count
82	entry_count: AtomicUsize,
83	/// Number of shards
84	shard_count: usize,
85	/// Maximum capacity in bytes
86	#[cfg_attr(not(feature = "metrics"), allow(dead_code))]
87	max_size_bytes: usize,
88	/// Metrics: cache hits
89	#[cfg(feature = "metrics")]
90	hits: AtomicU64,
91	/// Metrics: cache misses
92	#[cfg(feature = "metrics")]
93	misses: AtomicU64,
94	/// Metrics: new inserts
95	#[cfg(feature = "metrics")]
96	inserts: AtomicU64,
97	/// Metrics: updates (replaced existing key)
98	#[cfg(feature = "metrics")]
99	updates: AtomicU64,
100	/// Metrics: evictions
101	#[cfg(feature = "metrics")]
102	evictions: AtomicU64,
103	/// Metrics: explicit removals
104	#[cfg(feature = "metrics")]
105	removals: AtomicU64,
106}
107
108/// Minimum size per shard in bytes.
109///
110/// This ensures each shard has enough capacity to hold a reasonable number of entries,
111/// preventing premature eviction due to hash distribution variance across shards.
112/// With 4KB minimum, a shard can comfortably hold dozens of typical cache entries.
113const MIN_SHARD_SIZE: usize = 4096;
114
115/// Default number of shards for large caches.
116///
117/// More shards reduce contention but increase memory overhead.
118/// This value is used when capacity is large enough to support it.
119const DEFAULT_SHARD_COUNT: usize = 64;
120
121/// Compute the optimal shard count for a given capacity.
122///
123/// This ensures each shard has at least `MIN_SHARD_SIZE` bytes to prevent
124/// premature eviction due to uneven hash distribution.
125fn compute_shard_count(capacity: usize, desired_shards: usize) -> usize {
126	// Calculate max shards that maintain MIN_SHARD_SIZE per shard
127	let max_shards = (capacity / MIN_SHARD_SIZE).max(1);
128
129	// Use the smaller of desired and max, ensuring power of 2
130	desired_shards.min(max_shards).next_power_of_two().max(1)
131}
132
133impl Cache<DefaultLifecycle> {
134	/// Create a new cache with the given maximum size in bytes.
135	///
136	/// Uses default configuration with automatic shard scaling. The number of shards
137	/// is chosen to balance concurrency (more shards = less contention) with per-shard
138	/// capacity (each shard needs enough space to avoid premature eviction).
139	///
140	/// - Large caches (>= 256KB): 64 shards
141	/// - Smaller caches: Scaled down to ensure at least 4KB per shard
142	///
143	/// For explicit control over shard count or lifecycle hooks, use [`CacheBuilder`].
144	pub fn new(max_size_bytes: usize) -> Self {
145		let shard_count = compute_shard_count(max_size_bytes, DEFAULT_SHARD_COUNT);
146		Self::with_shards_and_lifecycle_internal(max_size_bytes, shard_count, DefaultLifecycle)
147	}
148
149	/// Create with custom shard count.
150	///
151	/// More shards reduce contention but increase memory overhead.
152	/// Recommended: `num_cpus * 8` to `num_cpus * 16`.
153	///
154	/// **Note**: The shard count may be reduced if the capacity is too small to
155	/// support the requested number of shards (minimum 4KB per shard). This prevents
156	/// premature eviction due to uneven hash distribution.
157	pub fn with_shards(max_size_bytes: usize, shard_count: usize) -> Self {
158		let shard_count = compute_shard_count(max_size_bytes, shard_count);
159		Self::with_shards_and_lifecycle_internal(max_size_bytes, shard_count, DefaultLifecycle)
160	}
161}
162
163impl<L: Lifecycle> Cache<L> {
164	/// Create a new cache with a custom lifecycle.
165	///
166	/// For easier configuration, use [`CacheBuilder`] instead.
167	pub fn with_lifecycle(max_size_bytes: usize, lifecycle: L) -> Self {
168		let shard_count = compute_shard_count(max_size_bytes, DEFAULT_SHARD_COUNT);
169		Self::with_shards_and_lifecycle_internal(max_size_bytes, shard_count, lifecycle)
170	}
171
172	/// Create a new cache with custom shard count and lifecycle.
173	///
174	/// For easier configuration, use [`CacheBuilder`] instead.
175	pub fn with_shards_and_lifecycle(
176		max_size_bytes: usize,
177		shard_count: usize,
178		lifecycle: L,
179	) -> Self {
180		let shard_count = compute_shard_count(max_size_bytes, shard_count);
181		Self::with_shards_and_lifecycle_internal(max_size_bytes, shard_count, lifecycle)
182	}
183
184	/// Internal constructor that uses the shard count directly (already validated).
185	fn with_shards_and_lifecycle_internal(
186		max_size_bytes: usize,
187		shard_count: usize,
188		lifecycle: L,
189	) -> Self {
190		// Divide capacity per shard
191		let size_per_shard = max_size_bytes / shard_count;
192
193		// Create shards
194		let shards = (0..shard_count).map(|_| RwLock::new(Shard::new(size_per_shard))).collect();
195
196		Self {
197			shards,
198			lifecycle: Arc::new(lifecycle),
199			current_size: AtomicUsize::new(0),
200			entry_count: AtomicUsize::new(0),
201			shard_count,
202			max_size_bytes,
203			#[cfg(feature = "metrics")]
204			hits: AtomicU64::new(0),
205			#[cfg(feature = "metrics")]
206			misses: AtomicU64::new(0),
207			#[cfg(feature = "metrics")]
208			inserts: AtomicU64::new(0),
209			#[cfg(feature = "metrics")]
210			updates: AtomicU64::new(0),
211			#[cfg(feature = "metrics")]
212			evictions: AtomicU64::new(0),
213			#[cfg(feature = "metrics")]
214			removals: AtomicU64::new(0),
215		}
216	}
217
218	/// Insert a key-value pair. Evicts items if necessary.
219	///
220	/// Returns the previous value if the key existed.
221	///
222	/// # Lifecycle Hooks
223	///
224	/// If entries are evicted to make room, [`Lifecycle::on_evict`] is called for each.
225	/// Note: The replaced value (if any) does NOT trigger `on_evict` - only automatic
226	/// evictions due to capacity pressure.
227	///
228	/// # Runtime Complexity
229	///
230	/// Expected case: O(1) for successful insertion without eviction.
231	///
232	/// Worst case: O(n) where n is the number of entries per shard.
233	/// Eviction happens via clock hand advancement within the shard.
234	pub fn insert<K: CacheKey>(&self, key: K, value: K::Value) -> Option<K::Value> {
235		let erased_key = ErasedKey::new(&key);
236		let policy = key.policy();
237		let entry = Entry::new(value, policy);
238		let entry_size = entry.size;
239
240		// Get the shard
241		let shard_lock = self.get_shard(erased_key.hash);
242
243		// Acquire write lock
244		let mut shard = shard_lock.write();
245
246		// Insert (handles eviction internally via Clock-PRO)
247		let (old_entry, stats, evicted_entries) = shard.insert(erased_key, entry);
248
249		// Release lock before calling lifecycle hooks to avoid holding lock during user code
250		drop(shard);
251
252		if let Some(ref old) = old_entry {
253			// Update size (might be different)
254			let size_diff = entry_size as isize - old.size as isize;
255			if size_diff > 0 {
256				self.current_size.fetch_add(size_diff as usize, Ordering::Relaxed);
257			} else {
258				self.current_size.fetch_sub((-size_diff) as usize, Ordering::Relaxed);
259			}
260			// Metrics: track update
261			#[cfg(feature = "metrics")]
262			self.updates.fetch_add(1, Ordering::Relaxed);
263		} else {
264			// New entry
265			self.current_size.fetch_add(entry_size, Ordering::Relaxed);
266			self.entry_count.fetch_add(1, Ordering::Relaxed);
267			// Metrics: track insert
268			#[cfg(feature = "metrics")]
269			self.inserts.fetch_add(1, Ordering::Relaxed);
270		}
271
272		// Account for evictions and call lifecycle hooks
273		if stats.count > 0 {
274			self.entry_count.fetch_sub(stats.count, Ordering::Relaxed);
275			self.current_size.fetch_sub(stats.size, Ordering::Relaxed);
276			// Metrics: track evictions
277			#[cfg(feature = "metrics")]
278			self.evictions.fetch_add(stats.count as u64, Ordering::Relaxed);
279
280			// Call lifecycle hooks for each evicted entry
281			for evicted in evicted_entries {
282				self.lifecycle.on_evict(evicted.key.data.as_ref());
283			}
284		}
285
286		old_entry.and_then(|e| e.into_value::<K::Value>())
287	}
288
289	/// Retrieve a value via guard. The guard holds a read lock on the shard.
290	///
291	/// # Warning
292	///
293	/// Do NOT hold this guard across `.await` points. Use `get_clone()` instead
294	/// for async contexts.
295	///
296	/// # Runtime Complexity
297	///
298	/// Expected case: O(1)
299	///
300	/// This method performs a zero-allocation hash table lookup and atomically
301	/// increments the reference counter for Clock-PRO.
302	pub fn get<K: CacheKey>(&self, key: &K) -> Option<Guard<'_, K::Value>> {
303		let key_ref = ErasedKeyRef::new(key);
304		let shard_lock = self.get_shard(key_ref.hash);
305
306		// Acquire read lock
307		let shard = shard_lock.read();
308
309		// Look up entry (bumps reference counter internally, zero allocation)
310		let Some(entry) = shard.get_ref(&key_ref) else {
311			// Metrics: track miss
312			#[cfg(feature = "metrics")]
313			self.misses.fetch_add(1, Ordering::Relaxed);
314			return None;
315		};
316
317		// Metrics: track hit
318		#[cfg(feature = "metrics")]
319		self.hits.fetch_add(1, Ordering::Relaxed);
320
321		// Get pointer to value (use value_ref for type-safe downcast)
322		let value_ref = entry.value_ref::<K::Value>()?;
323		let value_ptr = value_ref as *const K::Value;
324
325		// SAFETY: We hold a read lock on the shard, so the entry won't be modified
326		// or dropped while the guard exists. The guard ties its lifetime to the lock.
327		unsafe { Some(Guard::new(shard, value_ptr)) }
328	}
329
330	/// Retrieve a cloned value. Safe to hold across `.await` points.
331	///
332	/// Requires `V: Clone`. This is the preferred method for async code.
333	///
334	/// # Runtime Complexity
335	///
336	/// Expected case: O(1) + O(m) where m is the cost of cloning the value.
337	///
338	/// This method performs a hash table lookup in O(1) expected time, then clones
339	/// the underlying value. If cloning is expensive, consider using `Arc<T>` as your
340	/// value type, which makes cloning O(1).
341	pub fn get_clone<K: CacheKey>(&self, key: &K) -> Option<K::Value>
342	where
343		K::Value: Clone,
344	{
345		let key_ref = ErasedKeyRef::new(key);
346		let shard_lock = self.get_shard(key_ref.hash);
347
348		// Short-lived read lock
349		let shard = shard_lock.read();
350
351		let Some(entry) = shard.get_ref(&key_ref) else {
352			// Metrics: track miss
353			#[cfg(feature = "metrics")]
354			self.misses.fetch_add(1, Ordering::Relaxed);
355			return None;
356		};
357
358		// Metrics: track hit
359		#[cfg(feature = "metrics")]
360		self.hits.fetch_add(1, Ordering::Relaxed);
361
362		// Clone the value
363		entry.value_ref::<K::Value>().cloned()
364	}
365
366	/// Retrieve a value via guard using a borrowed lookup key (zero allocation).
367	///
368	/// This method allows looking up entries using a borrowed key type `Q` that
369	/// implements `CacheKeyLookup<K>`, enabling zero-allocation lookups. For example,
370	/// you can use `(&str, &str)` to look up entries stored with `(String, String)` keys.
371	///
372	/// # Warning
373	///
374	/// Do NOT hold this guard across `.await` points. Use `get_clone_by()` instead
375	/// for async contexts.
376	///
377	/// # Example
378	///
379	/// ```ignore
380	/// // Define borrowed lookup type
381	/// struct DbCacheKeyRef<'a>(&'a str, &'a str);
382	///
383	/// impl Hash for DbCacheKeyRef<'_> {
384	///     fn hash<H: Hasher>(&self, state: &mut H) {
385	///         self.0.hash(state);
386	///         self.1.hash(state);
387	///     }
388	/// }
389	///
390	/// impl CacheKeyLookup<DbCacheKey> for DbCacheKeyRef<'_> {
391	///     fn eq_key(&self, key: &DbCacheKey) -> bool {
392	///         self.0 == key.0 && self.1 == key.1
393	///     }
394	/// }
395	///
396	/// // Zero-allocation lookup
397	/// let value = cache.get_by::<DbCacheKey, _>(&DbCacheKeyRef(ns, db));
398	/// ```
399	pub fn get_by<K, Q>(&self, key: &Q) -> Option<Guard<'_, K::Value>>
400	where
401		K: CacheKey,
402		Q: CacheKeyLookup<K> + ?Sized,
403	{
404		let key_ref = ErasedKeyLookup::new(key);
405		let shard_lock = self.get_shard(key_ref.hash);
406
407		// Acquire read lock
408		let shard = shard_lock.read();
409
410		// Look up entry (bumps reference counter internally, zero allocation)
411		let Some(entry) = shard.get_ref_by(&key_ref) else {
412			// Metrics: track miss
413			#[cfg(feature = "metrics")]
414			self.misses.fetch_add(1, Ordering::Relaxed);
415			return None;
416		};
417
418		// Metrics: track hit
419		#[cfg(feature = "metrics")]
420		self.hits.fetch_add(1, Ordering::Relaxed);
421
422		// Get pointer to value (use value_ref for type-safe downcast)
423		let value_ref = entry.value_ref::<K::Value>()?;
424		let value_ptr = value_ref as *const K::Value;
425
426		// SAFETY: We hold a read lock on the shard, so the entry won't be modified
427		// or dropped while the guard exists. The guard ties its lifetime to the lock.
428		unsafe { Some(Guard::new(shard, value_ptr)) }
429	}
430
431	/// Retrieve a cloned value using a borrowed lookup key. Safe to hold across `.await` points.
432	///
433	/// This method allows looking up entries using a borrowed key type `Q` that
434	/// implements `CacheKeyLookup<K>`, enabling zero-allocation lookups. For example,
435	/// you can use `(&str, &str)` to look up entries stored with `(String, String)` keys.
436	///
437	/// Requires `V: Clone`. This is the preferred method for async code.
438	///
439	/// # Example
440	///
441	/// ```ignore
442	/// // Zero-allocation lookup with cloned result
443	/// let value = cache.get_clone_by::<DbCacheKey, _>(&DbCacheKeyRef(ns, db));
444	/// ```
445	pub fn get_clone_by<K, Q>(&self, key: &Q) -> Option<K::Value>
446	where
447		K: CacheKey,
448		K::Value: Clone,
449		Q: CacheKeyLookup<K> + ?Sized,
450	{
451		let key_ref = ErasedKeyLookup::new(key);
452		let shard_lock = self.get_shard(key_ref.hash);
453
454		// Short-lived read lock
455		let shard = shard_lock.read();
456
457		let Some(entry) = shard.get_ref_by(&key_ref) else {
458			// Metrics: track miss
459			#[cfg(feature = "metrics")]
460			self.misses.fetch_add(1, Ordering::Relaxed);
461			return None;
462		};
463
464		// Metrics: track hit
465		#[cfg(feature = "metrics")]
466		self.hits.fetch_add(1, Ordering::Relaxed);
467
468		// Clone the value
469		entry.value_ref::<K::Value>().cloned()
470	}
471
472	/// Remove a key from the cache.
473	///
474	/// # Lifecycle Hooks
475	///
476	/// Calls [`Lifecycle::on_remove`] with the actual stored key (not the lookup key).
477	///
478	/// # Runtime Complexity
479	///
480	/// Expected case: O(1)
481	///
482	/// Worst case: O(n) where n is the number of entries per shard.
483	///
484	/// The worst case occurs when the entry is removed from an IndexMap segment,
485	/// which preserves insertion order and may require shifting elements. In practice,
486	/// most removals are O(1) amortized.
487	pub fn remove<K: CacheKey>(&self, key: &K) -> Option<K::Value> {
488		let erased_key = ErasedKey::new(key);
489		let shard_lock = self.get_shard(erased_key.hash);
490
491		let mut shard = shard_lock.write();
492		let (stored_key, entry) = shard.remove(&erased_key)?;
493
494		// Update counters while still holding the lock to prevent race with clear()
495		self.current_size.fetch_sub(entry.size, Ordering::Relaxed);
496		self.entry_count.fetch_sub(1, Ordering::Relaxed);
497
498		// Metrics: track removal
499		#[cfg(feature = "metrics")]
500		self.removals.fetch_add(1, Ordering::Relaxed);
501
502		// Release lock before calling lifecycle hooks
503		drop(shard);
504
505		// Call lifecycle hook with the actual stored key
506		self.lifecycle.on_remove(stored_key.data.as_ref());
507
508		entry.into_value::<K::Value>()
509	}
510
511	/// Check if a key exists (zero allocation).
512	pub fn contains<K: CacheKey>(&self, key: &K) -> bool {
513		let key_ref = ErasedKeyRef::new(key);
514		let shard_lock = self.get_shard(key_ref.hash);
515		let shard = shard_lock.read();
516
517		// Use get_ref for zero-allocation lookup
518		shard.get_ref(&key_ref).is_some()
519	}
520
521	/// Check if a borrowed lookup key exists (zero allocation).
522	///
523	/// This method allows checking existence using a borrowed key type `Q` that
524	/// implements `CacheKeyLookup<K>`, enabling zero-allocation lookups.
525	///
526	/// # Example
527	///
528	/// ```ignore
529	/// // Zero-allocation existence check
530	/// let exists = cache.contains_by::<DbCacheKey, _>(&DbCacheKeyRef(ns, db));
531	/// ```
532	pub fn contains_by<K, Q>(&self, key: &Q) -> bool
533	where
534		K: CacheKey,
535		Q: CacheKeyLookup<K> + ?Sized,
536	{
537		let key_ref = ErasedKeyLookup::new(key);
538		let shard_lock = self.get_shard(key_ref.hash);
539		let shard = shard_lock.read();
540
541		// Use get_ref_by for zero-allocation lookup
542		shard.get_ref_by(&key_ref).is_some()
543	}
544
545	/// Current total size in bytes.
546	pub fn size(&self) -> usize {
547		self.current_size.load(Ordering::Relaxed)
548	}
549
550	/// Number of entries across all types.
551	pub fn len(&self) -> usize {
552		self.entry_count.load(Ordering::Relaxed)
553	}
554
555	/// Check if cache is empty.
556	pub fn is_empty(&self) -> bool {
557		self.len() == 0
558	}
559
560	/// Clear all entries.
561	///
562	/// # Lifecycle Hooks
563	///
564	/// Calls [`Lifecycle::on_clear`] for each removed entry.
565	///
566	/// # Runtime Complexity
567	///
568	/// O(n) where n is the total number of entries in the cache.
569	///
570	/// This method acquires a write lock on each shard sequentially and clears
571	/// all data structures (HashMap and IndexMaps).
572	pub fn clear(&self) {
573		// Collect all entries from all shards, then call lifecycle hooks outside of locks
574		let mut all_entries = Vec::new();
575
576		for shard_lock in &self.shards {
577			let mut shard = shard_lock.write();
578			all_entries.extend(shard.drain());
579		}
580
581		self.current_size.store(0, Ordering::Relaxed);
582		self.entry_count.store(0, Ordering::Relaxed);
583
584		// Reset all metrics
585		#[cfg(feature = "metrics")]
586		{
587			self.hits.store(0, Ordering::Relaxed);
588			self.misses.store(0, Ordering::Relaxed);
589			self.inserts.store(0, Ordering::Relaxed);
590			self.updates.store(0, Ordering::Relaxed);
591			self.evictions.store(0, Ordering::Relaxed);
592			self.removals.store(0, Ordering::Relaxed);
593		}
594
595		// Call lifecycle hooks for each cleared entry (outside of locks)
596		for evicted in all_entries {
597			self.lifecycle.on_clear(evicted.key.data.as_ref());
598		}
599	}
600
601	/// Get performance metrics snapshot.
602	///
603	/// Returns a snapshot of cache performance metrics including hits, misses,
604	/// evictions, and memory utilization.
605	///
606	/// # Example
607	///
608	/// ```
609	/// use priority_lfu::Cache;
610	///
611	/// let cache = Cache::new(1024 * 1024);
612	/// // ... perform cache operations ...
613	///
614	/// let metrics = cache.metrics();
615	/// println!("Hit rate: {:.2}%", metrics.hit_rate() * 100.0);
616	/// println!("Cache utilization: {:.2}%", metrics.utilization() * 100.0);
617	/// println!("Total evictions: {}", metrics.evictions);
618	/// ```
619	#[cfg(feature = "metrics")]
620	pub fn metrics(&self) -> CacheMetrics {
621		CacheMetrics {
622			hits: self.hits.load(Ordering::Relaxed),
623			misses: self.misses.load(Ordering::Relaxed),
624			inserts: self.inserts.load(Ordering::Relaxed),
625			updates: self.updates.load(Ordering::Relaxed),
626			evictions: self.evictions.load(Ordering::Relaxed),
627			removals: self.removals.load(Ordering::Relaxed),
628			current_size_bytes: self.current_size.load(Ordering::Relaxed),
629			capacity_bytes: self.max_size_bytes,
630			entry_count: self.entry_count.load(Ordering::Relaxed),
631		}
632	}
633
634	/// Get the shard for a given hash.
635	fn get_shard(&self, hash: u64) -> &RwLock<Shard> {
636		let index = (hash as usize) & (self.shard_count - 1);
637		&self.shards[index]
638	}
639}
640
641// Thread safety: Cache can be shared across threads
642// The L: Lifecycle bound already requires Send + Sync
643unsafe impl<L: Lifecycle> Send for Cache<L> {}
644unsafe impl<L: Lifecycle> Sync for Cache<L> {}
645
646#[cfg(test)]
647mod tests {
648	use super::*;
649	use crate::DeepSizeOf;
650
651	#[derive(Hash, Eq, PartialEq, Clone, Debug)]
652	struct TestKey(u64);
653
654	impl CacheKey for TestKey {
655		type Value = TestValue;
656	}
657
658	#[derive(Clone, Debug, PartialEq, DeepSizeOf)]
659	struct TestValue {
660		data: String,
661	}
662
663	#[test]
664	fn test_compute_shard_count_scales_with_capacity() {
665		// Very small capacity: should get 1 shard
666		assert_eq!(compute_shard_count(1024, 64), 1);
667		assert_eq!(compute_shard_count(4095, 64), 1);
668
669		// Exactly MIN_SHARD_SIZE: 1 shard
670		assert_eq!(compute_shard_count(4096, 64), 1);
671
672		// 2x MIN_SHARD_SIZE: 2 shards (power of 2)
673		assert_eq!(compute_shard_count(8192, 64), 2);
674
675		// 16x MIN_SHARD_SIZE: 16 shards
676		assert_eq!(compute_shard_count(65536, 64), 16);
677
678		// Large capacity: full 64 shards
679		assert_eq!(compute_shard_count(256 * 1024, 64), 64);
680		assert_eq!(compute_shard_count(1024 * 1024, 64), 64);
681
682		// Custom shard count is also bounded
683		assert_eq!(compute_shard_count(8192, 128), 2); // Can't have 128 shards with 8KB
684		assert_eq!(compute_shard_count(1024 * 1024, 128), 128); // Large cache can have 128
685	}
686
687	#[test]
688	fn test_cache_insert_and_get() {
689		let cache = Cache::new(1024);
690
691		let key = TestKey(1);
692		let value = TestValue {
693			data: "hello".to_string(),
694		};
695
696		cache.insert(key.clone(), value.clone());
697
698		let retrieved = cache.get_clone(&key).expect("key should exist");
699		assert_eq!(retrieved, value);
700	}
701
702	#[test]
703	fn test_cache_remove() {
704		let cache = Cache::new(1024);
705
706		let key = TestKey(1);
707		let value = TestValue {
708			data: "hello".to_string(),
709		};
710
711		cache.insert(key.clone(), value.clone());
712		assert!(cache.contains(&key));
713
714		let removed = cache.remove(&key).expect("key should exist");
715		assert_eq!(removed, value);
716		assert!(!cache.contains(&key));
717	}
718
719	#[test]
720	fn test_cache_eviction() {
721		// Small cache that will trigger eviction (use fewer shards for small capacity)
722		let cache = Cache::with_shards(1000, 4);
723
724		// Insert values that exceed capacity
725		for i in 0..15 {
726			let key = TestKey(i);
727			let value = TestValue {
728				data: "x".repeat(50),
729			};
730			cache.insert(key, value);
731		}
732
733		// Cache should have evicted some entries
734		assert!(cache.len() < 15, "Cache should have evicted some entries");
735		assert!(cache.size() <= 1000, "Cache size should be <= 1000, got {}", cache.size());
736	}
737
738	#[test]
739	fn test_cache_concurrent_access() {
740		use std::sync::Arc;
741		use std::thread;
742
743		let cache = Arc::new(Cache::new(10240));
744		let mut handles = vec![];
745
746		for t in 0..4 {
747			let cache = cache.clone();
748			handles.push(thread::spawn(move || {
749				for i in 0..100 {
750					let key = TestKey(t * 100 + i);
751					let value = TestValue {
752						data: format!("value-{}", i),
753					};
754					cache.insert(key.clone(), value.clone());
755
756					if let Some(retrieved) = cache.get_clone(&key) {
757						assert_eq!(retrieved, value);
758					}
759				}
760			}));
761		}
762
763		for handle in handles {
764			handle.join().expect("thread should not panic");
765		}
766
767		assert!(!cache.is_empty());
768	}
769
770	#[test]
771	fn test_cache_is_send_sync() {
772		fn assert_send<T: Send>() {}
773		fn assert_sync<T: Sync>() {}
774
775		assert_send::<Cache>();
776		assert_sync::<Cache>();
777	}
778
779	// Tests for borrowed key lookups
780
781	#[derive(Hash, Eq, PartialEq, Clone, Debug)]
782	struct DbCacheKey(String, String);
783
784	impl CacheKey for DbCacheKey {
785		type Value = TestValue;
786	}
787
788	struct DbCacheKeyRef<'a>(&'a str, &'a str);
789
790	impl std::hash::Hash for DbCacheKeyRef<'_> {
791		fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
792			// MUST match DbCacheKey's hash implementation
793			self.0.hash(state);
794			self.1.hash(state);
795		}
796	}
797
798	impl CacheKeyLookup<DbCacheKey> for DbCacheKeyRef<'_> {
799		fn eq_key(&self, key: &DbCacheKey) -> bool {
800			self.0 == key.0 && self.1 == key.1
801		}
802
803		fn to_owned_key(self) -> DbCacheKey {
804			DbCacheKey(self.0.to_owned(), self.1.to_owned())
805		}
806	}
807
808	#[test]
809	fn test_borrowed_key_lookup_get_by() {
810		let cache = Cache::new(1024);
811
812		let key = DbCacheKey("namespace".to_string(), "database".to_string());
813		let value = TestValue {
814			data: "test_data".to_string(),
815		};
816
817		cache.insert(key.clone(), value.clone());
818
819		// Lookup using borrowed key (zero allocation)
820		let borrowed_key = DbCacheKeyRef("namespace", "database");
821		let retrieved = cache.get_by::<DbCacheKey, _>(&borrowed_key);
822		assert!(retrieved.is_some());
823		assert_eq!(*retrieved.unwrap(), value);
824
825		// Lookup with non-existent key
826		let borrowed_key_missing = DbCacheKeyRef("namespace", "missing");
827		let retrieved = cache.get_by::<DbCacheKey, _>(&borrowed_key_missing);
828		assert!(retrieved.is_none());
829	}
830
831	#[test]
832	fn test_borrowed_key_lookup_get_clone_by() {
833		let cache = Cache::new(1024);
834
835		let key = DbCacheKey("ns".to_string(), "db".to_string());
836		let value = TestValue {
837			data: "cloned_data".to_string(),
838		};
839
840		cache.insert(key.clone(), value.clone());
841
842		// Lookup using borrowed key (zero allocation)
843		let borrowed_key = DbCacheKeyRef("ns", "db");
844		let retrieved = cache.get_clone_by::<DbCacheKey, _>(&borrowed_key);
845		assert_eq!(retrieved, Some(value));
846
847		// Lookup with non-existent key
848		let borrowed_key_missing = DbCacheKeyRef("ns", "missing");
849		let retrieved = cache.get_clone_by::<DbCacheKey, _>(&borrowed_key_missing);
850		assert_eq!(retrieved, None);
851	}
852
853	#[test]
854	fn test_borrowed_key_lookup_contains_by() {
855		let cache = Cache::new(1024);
856
857		let key = DbCacheKey("catalog".to_string(), "schema".to_string());
858		let value = TestValue {
859			data: "contains_test".to_string(),
860		};
861
862		cache.insert(key.clone(), value);
863
864		// Check existence using borrowed key (zero allocation)
865		let borrowed_key = DbCacheKeyRef("catalog", "schema");
866		assert!(cache.contains_by::<DbCacheKey, _>(&borrowed_key));
867
868		// Check non-existent key
869		let borrowed_key_missing = DbCacheKeyRef("catalog", "missing");
870		assert!(!cache.contains_by::<DbCacheKey, _>(&borrowed_key_missing));
871	}
872
873	#[test]
874	fn test_borrowed_key_lookup_multiple_entries() {
875		let cache = Cache::new(4096);
876
877		// Insert multiple entries
878		for i in 0..10 {
879			let key = DbCacheKey(format!("ns{}", i), format!("db{}", i));
880			let value = TestValue {
881				data: format!("data{}", i),
882			};
883			cache.insert(key, value);
884		}
885
886		// Lookup each using borrowed keys
887		for i in 0..10 {
888			let ns = format!("ns{}", i);
889			let db = format!("db{}", i);
890			let borrowed_key = DbCacheKeyRef(&ns, &db);
891
892			let retrieved = cache.get_clone_by::<DbCacheKey, _>(&borrowed_key);
893			assert!(retrieved.is_some());
894			assert_eq!(retrieved.unwrap().data, format!("data{}", i));
895		}
896	}
897
898	#[test]
899	fn test_borrowed_key_existing_api_still_works() {
900		let cache = Cache::new(1024);
901
902		let key = DbCacheKey("test".to_string(), "key".to_string());
903		let value = TestValue {
904			data: "existing_api".to_string(),
905		};
906
907		cache.insert(key.clone(), value.clone());
908
909		// Existing API should still work (blanket impl)
910		let retrieved = cache.get(&key);
911		assert!(retrieved.is_some());
912		assert_eq!(*retrieved.unwrap(), value);
913
914		assert!(cache.contains(&key));
915
916		let cloned = cache.get_clone(&key);
917		assert_eq!(cloned, Some(value));
918	}
919
920	// Lifecycle tests
921
922	use std::any::Any;
923	use std::sync::Arc;
924	use std::sync::atomic::AtomicUsize;
925
926	use crate::CacheBuilder;
927	use crate::lifecycle::Lifecycle;
928
929	struct CountingLifecycle {
930		evict_count: Arc<AtomicUsize>,
931		remove_count: Arc<AtomicUsize>,
932		clear_count: Arc<AtomicUsize>,
933	}
934
935	impl CountingLifecycle {
936		fn new() -> (Self, Arc<AtomicUsize>, Arc<AtomicUsize>, Arc<AtomicUsize>) {
937			let evict_count = Arc::new(AtomicUsize::new(0));
938			let remove_count = Arc::new(AtomicUsize::new(0));
939			let clear_count = Arc::new(AtomicUsize::new(0));
940			(
941				Self {
942					evict_count: evict_count.clone(),
943					remove_count: remove_count.clone(),
944					clear_count: clear_count.clone(),
945				},
946				evict_count,
947				remove_count,
948				clear_count,
949			)
950		}
951	}
952
953	impl Lifecycle for CountingLifecycle {
954		fn on_evict(&self, _key: &dyn Any) {
955			self.evict_count.fetch_add(1, Ordering::Relaxed);
956		}
957
958		fn on_remove(&self, _key: &dyn Any) {
959			self.remove_count.fetch_add(1, Ordering::Relaxed);
960		}
961
962		fn on_clear(&self, _key: &dyn Any) {
963			self.clear_count.fetch_add(1, Ordering::Relaxed);
964		}
965	}
966
967	#[test]
968	fn test_lifecycle_on_evict() {
969		let (lifecycle, evict_count, _, _) = CountingLifecycle::new();
970
971		// Small cache that will trigger eviction
972		let cache = CacheBuilder::new(500).shards(1).lifecycle(lifecycle).build();
973
974		// Insert values that exceed capacity to trigger eviction
975		for i in 0..20 {
976			let key = TestKey(i);
977			let value = TestValue {
978				data: "x".repeat(50),
979			};
980			cache.insert(key, value);
981		}
982
983		// Should have evicted some entries
984		assert!(evict_count.load(Ordering::Relaxed) > 0, "Expected evictions but got none");
985	}
986
987	#[test]
988	fn test_lifecycle_on_clear() {
989		let (lifecycle, _, _, clear_count) = CountingLifecycle::new();
990
991		let cache = CacheBuilder::new(4096).lifecycle(lifecycle).build();
992
993		// Insert some entries
994		for i in 0..5 {
995			let key = TestKey(i);
996			let value = TestValue {
997				data: format!("value{}", i),
998			};
999			cache.insert(key, value);
1000		}
1001
1002		assert_eq!(clear_count.load(Ordering::Relaxed), 0);
1003
1004		// Clear the cache
1005		cache.clear();
1006
1007		// All entries should have triggered on_clear
1008		assert_eq!(clear_count.load(Ordering::Relaxed), 5, "Expected 5 clear callbacks");
1009	}
1010
1011	#[test]
1012	fn test_lifecycle_on_remove() {
1013		let (lifecycle, _, remove_count, _) = CountingLifecycle::new();
1014
1015		let cache = CacheBuilder::new(4096).lifecycle(lifecycle).build();
1016
1017		let key = TestKey(1);
1018		let value = TestValue {
1019			data: "test".to_string(),
1020		};
1021
1022		cache.insert(key.clone(), value);
1023
1024		assert_eq!(remove_count.load(Ordering::Relaxed), 0);
1025
1026		// remove should trigger on_remove
1027		let removed = cache.remove(&key);
1028		assert!(removed.is_some());
1029
1030		assert_eq!(remove_count.load(Ordering::Relaxed), 1, "Expected 1 remove callback");
1031	}
1032
1033	#[test]
1034	fn test_lifecycle_typed_downcast() {
1035		use crate::TypedLifecycle;
1036
1037		let evicted_keys = Arc::new(std::sync::Mutex::new(Vec::new()));
1038		let keys_clone = evicted_keys.clone();
1039
1040		let lifecycle = TypedLifecycle::<TestKey, _>::new(move |key| {
1041			keys_clone.lock().unwrap().push(key.0);
1042		});
1043
1044		// Small cache to trigger eviction
1045		let cache = CacheBuilder::new(500).shards(1).lifecycle(lifecycle).build();
1046
1047		// Insert values that will be evicted
1048		for i in 0..20 {
1049			let key = TestKey(i);
1050			let value = TestValue {
1051				data: "x".repeat(50),
1052			};
1053			cache.insert(key, value);
1054		}
1055
1056		// Check that we captured evicted keys
1057		let keys = evicted_keys.lock().unwrap();
1058		assert!(!keys.is_empty(), "Expected some evicted keys to be captured");
1059	}
1060
1061	#[test]
1062	fn test_cache_with_lifecycle_is_send_sync() {
1063		fn assert_send<T: Send>() {}
1064		fn assert_sync<T: Sync>() {}
1065
1066		// Cache with default lifecycle
1067		assert_send::<Cache>();
1068		assert_sync::<Cache>();
1069
1070		// Cache with custom lifecycle
1071		assert_send::<Cache<CountingLifecycle>>();
1072		assert_sync::<Cache<CountingLifecycle>>();
1073	}
1074}