cache_rs/slru.rs
1//! Segmented Least Recently Used (SLRU) Cache Implementation
2//!
3//! SLRU is a scan-resistant cache algorithm that divides the cache into two segments:
4//! a **probationary segment** for new entries and a **protected segment** for frequently
5//! accessed entries. This design prevents one-time access patterns (scans) from evicting
6//! valuable cached items.
7//!
8//! # How the Algorithm Works
9//!
10//! SLRU uses a two-tier approach to distinguish between items that are accessed once
11//! (scans, sequential reads) versus items accessed repeatedly (working set).
12//!
13//! ## Segment Architecture
14//!
15//! ```text
16//! ┌──────────────────────────────────────────────────────────────────────────────┐
17//! │ SLRU Cache │
18//! │ │
19//! │ ┌──────────────────────────────────────────────────────────────────────┐ │
20//! │ │ PROTECTED SEGMENT (20%) │ │
21//! │ │ Frequently accessed items - harder to evict │ │
22//! │ │ ┌────────────────────────────────────────────────────────────────┐ │ │
23//! │ │ │ MRU ◀──▶ [hot_1] ◀──▶ [hot_2] ◀──▶ ... ◀──▶ [demote] LRU │ │ │
24//! │ │ └────────────────────────────────────────────────────────────────┘ │ │
25//! │ └──────────────────────────────────────────────────────────────────────┘ │
26//! │ │ demote ▲ promote │
27//! │ ▼ │ │
28//! │ ┌──────────────────────────────────────────────────────────────────────┐ │
29//! │ │ PROBATIONARY SEGMENT (80%) │ │
30//! │ │ New items and demoted items - easier to evict │ │
31//! │ │ ┌────────────────────────────────────────────────────────────────┐ │ │
32//! │ │ │ MRU ◀──▶ [new_1] ◀──▶ [new_2] ◀──▶ ... ◀──▶ [evict] LRU │ │ │
33//! │ │ └────────────────────────────────────────────────────────────────┘ │ │
34//! │ └──────────────────────────────────────────────────────────────────────┘ │
35//! │ ▲ │
36//! │ │ insert │
37//! │ new items │
38//! └──────────────────────────────────────────────────────────────────────────────┘
39//! ```
40//!
41//! ## Entry Lifecycle
42//!
43//! 1. **Insert**: New items enter the probationary segment
44//! 2. **First access in probationary**: Item promoted to protected segment
45//! 3. **Protected segment full**: LRU item demoted back to probationary
46//! 4. **Eviction**: Always from the LRU end of the probationary segment
47//!
48//! ## Scan Resistance Example
49//!
50//! ```text
51//! Initial state: Protected=[A, B, C], Probationary=[D, E, F]
52//! (A, B, C are hot items; D, E, F are warm items)
53//!
54//! Sequential scan of X, Y, Z (one-time access):
55//! put(X) → Protected=[A, B, C], Probationary=[X, D, E] (F evicted)
56//! put(Y) → Protected=[A, B, C], Probationary=[Y, X, D] (E evicted)
57//! put(Z) → Protected=[A, B, C], Probationary=[Z, Y, X] (D evicted)
58//!
59//! Hot items A, B, C remain in protected segment!
60//! The scan only displaced probationary items.
61//! ```
62//!
63//! ## Operations
64//!
65//! | Operation | Action | Time |
66//! |-----------|--------|------|
67//! | `get(key)` | Promote to protected if in probationary | O(1) |
68//! | `put(key, value)` | Insert to probationary, may evict | O(1) |
69//! | `remove(key)` | Remove from whichever segment contains it | O(1) |
70//!
71//! # Dual-Limit Capacity
72//!
73//! This implementation supports two independent limits:
74//!
75//! - **`max_entries`**: Maximum total items across both segments
76//! - **`max_size`**: Maximum total size of content
77//!
78//! The protected segment size is configured separately (default: 20% of total).
79//!
80//! # Performance Characteristics
81//!
82//! | Metric | Value |
83//! |--------|-------|
84//! | Get | O(1) |
85//! | Put | O(1) |
86//! | Remove | O(1) |
87//! | Memory per entry | ~90 bytes overhead + key×2 + value |
88//!
89//! Memory overhead includes: two list pointers, location tag, size tracking,
90//! HashMap bucket, and allocator overhead.
91//!
92//! # When to Use SLRU
93//!
94//! **Good for:**
95//! - Mixed workloads with both hot data and sequential scans
96//! - Database buffer pools
97//! - File system caches
98//! - Any scenario where scans shouldn't evict the working set
99//!
100//! **Not ideal for:**
101//! - Pure recency-based access patterns (LRU is simpler)
102//! - Frequency-dominant patterns (LFU/LFUDA is better)
103//! - Very small caches where the two-segment overhead isn't justified
104//!
105//! # Tuning the Protected Ratio
106//!
107//! The protected segment size controls the trade-off:
108//! - **Larger protected**: More scan resistance, but new items evicted faster
109//! - **Smaller protected**: Less scan resistance, but more room for new items
110//!
111//! Default is 20% protected, which works well for most workloads.
112//!
113//! # Thread Safety
114//!
115//! `SlruCache` is **not thread-safe**. For concurrent access, either:
116//! - Wrap with `Mutex` or `RwLock`
117//! - Use `ConcurrentSlruCache` (requires `concurrent` feature)
118//!
119//! # Examples
120//!
121//! ## Basic Usage
122//!
123//! ```
124//! use cache_rs::SlruCache;
125//! use cache_rs::config::SlruCacheConfig;
126//! use core::num::NonZeroUsize;
127//!
128//! // Total capacity 100, protected segment 20
129//! let config = SlruCacheConfig {
130//! capacity: NonZeroUsize::new(100).unwrap(),
131//! protected_capacity: NonZeroUsize::new(20).unwrap(),
132//! max_size: u64::MAX,
133//! };
134//! let mut cache = SlruCache::init(config, None);
135//!
136//! cache.put("a", 1, 1); // Enters probationary
137//! cache.get(&"a"); // Promoted to protected!
138//! cache.put("b", 2, 1); // Enters probationary
139//!
140//! assert_eq!(cache.get(&"a"), Some(&1)); // Still in protected
141//! ```
142//!
143//! ## Scan Resistance Demo
144//!
145//! ```
146//! use cache_rs::SlruCache;
147//! use cache_rs::config::SlruCacheConfig;
148//! use core::num::NonZeroUsize;
149//!
150//! let config = SlruCacheConfig {
151//! capacity: NonZeroUsize::new(10).unwrap(),
152//! protected_capacity: NonZeroUsize::new(3).unwrap(),
153//! max_size: u64::MAX,
154//! };
155//! let mut cache: SlruCache<i32, i32> = SlruCache::init(config, None);
156//!
157//! // Establish hot items in protected segment
158//! for key in [1, 2, 3] {
159//! cache.put(key, 100, 1);
160//! cache.get(&key); // Promote to protected
161//! }
162//!
163//! // Simulate a scan - these items only enter probationary
164//! for i in 100..120 {
165//! cache.put(i, i, 1); // One-time insertions
166//! }
167//!
168//! // Hot items survive the scan!
169//! assert!(cache.get(&1).is_some());
170//! assert!(cache.get(&2).is_some());
171//! assert!(cache.get(&3).is_some());
172//! ```
173
174extern crate alloc;
175
176use crate::config::SlruCacheConfig;
177use crate::entry::CacheEntry;
178use crate::list::{List, ListEntry};
179use crate::metrics::{CacheMetrics, SlruCacheMetrics};
180use alloc::boxed::Box;
181use alloc::collections::BTreeMap;
182use alloc::string::String;
183use alloc::vec::Vec;
184use core::borrow::Borrow;
185use core::hash::{BuildHasher, Hash};
186use core::num::NonZeroUsize;
187
188#[cfg(feature = "hashbrown")]
189use hashbrown::DefaultHashBuilder;
190#[cfg(feature = "hashbrown")]
191use hashbrown::HashMap;
192
193#[cfg(not(feature = "hashbrown"))]
194use std::collections::hash_map::RandomState as DefaultHashBuilder;
195#[cfg(not(feature = "hashbrown"))]
196use std::collections::HashMap;
197
198/// Entry location within the SLRU cache.
199///
200/// Tracks whether an entry is in the probationary or protected segment.
201#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
202pub enum Location {
203 /// Entry is in the probationary segment (default for new entries)
204 #[default]
205 Probationary,
206 /// Entry is in the protected segment (promoted on second access)
207 Protected,
208}
209
210/// SLRU-specific metadata stored in each cache entry.
211///
212/// This uses the unified `CacheEntry<K, V, SlruMeta>` pattern, eliminating
213/// the need for a complex tuple in the HashMap. Size and timestamps are
214/// handled by `CacheMetadata`, this struct only holds SLRU-specific data.
215#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
216pub struct SlruMeta {
217 /// Which segment this entry is in
218 pub location: Location,
219}
220
221/// Internal SLRU segment containing the actual cache algorithm.
222///
223/// This is shared between `SlruCache` (single-threaded) and
224/// `ConcurrentSlruCache` (multi-threaded). All algorithm logic is
225/// implemented here to avoid code duplication.
226///
227/// Uses `CacheEntry<K, V, SlruMeta>` for unified entry management with built-in
228/// size tracking and timestamps. The `SlruMeta` stores segment location, and
229/// all other metadata (size, last_accessed) is handled by `CacheMetadata`.
230///
231/// # Safety
232///
233/// This struct contains raw pointers in the `map` field. These pointers
234/// are always valid as long as:
235/// - The pointer was obtained from `probationary.add()` or `protected.add()`
236/// - The node has not been removed from the list
237/// - The segment has not been dropped
238pub(crate) struct SlruInner<K, V, S = DefaultHashBuilder> {
239 /// Configuration for the SLRU cache
240 config: SlruCacheConfig,
241
242 /// The probationary list holding newer or less frequently accessed items
243 probationary: List<CacheEntry<K, V, SlruMeta>>,
244
245 /// The protected list holding frequently accessed items
246 protected: List<CacheEntry<K, V, SlruMeta>>,
247
248 /// Maps keys to their list nodes. All metadata (size, timestamp, location)
249 /// is stored in the CacheEntry itself, not in the map.
250 map: HashMap<K, *mut ListEntry<CacheEntry<K, V, SlruMeta>>, S>,
251
252 /// Metrics for tracking cache performance and segment behavior
253 metrics: SlruCacheMetrics,
254
255 /// Current total size of cached content (sum of entry sizes)
256 current_size: u64,
257
258 /// Maximum content size the cache can hold
259 max_size: u64,
260}
261
262// SAFETY: SlruInner owns all data and raw pointers point only to nodes owned by
263// `probationary` or `protected` lists. Concurrent access is safe when wrapped in
264// proper synchronization primitives.
265unsafe impl<K: Send, V: Send, S: Send> Send for SlruInner<K, V, S> {}
266
267// SAFETY: All mutation requires &mut self; shared references cannot cause data races.
268unsafe impl<K: Send, V: Send, S: Sync> Sync for SlruInner<K, V, S> {}
269
270impl<K: Hash + Eq, V: Clone, S: BuildHasher> SlruInner<K, V, S> {
271 /// Creates a new SLRU segment from a configuration.
272 ///
273 /// This is the **recommended** way to create an SLRU segment. All configuration
274 /// is specified through the [`SlruCacheConfig`] struct.
275 ///
276 /// # Arguments
277 ///
278 /// * `config` - Configuration specifying capacity, protected capacity, and optional size limit
279 /// * `hasher` - Hash builder for the internal HashMap
280 #[allow(dead_code)] // Used by concurrent module when feature is enabled
281 pub(crate) fn init(config: SlruCacheConfig, hasher: S) -> Self {
282 let capacity = config.capacity.get();
283 let protected = config.protected_capacity.get();
284
285 assert!(
286 protected < capacity,
287 "SlruCacheConfig invalid: protected_capacity ({}) must be strictly less than capacity ({})",
288 protected,
289 capacity
290 );
291
292 let probationary_max_size = NonZeroUsize::new(capacity - protected).unwrap();
293
294 SlruInner {
295 config,
296 probationary: List::new(probationary_max_size),
297 protected: List::new(config.protected_capacity),
298 map: HashMap::with_capacity_and_hasher(
299 config.capacity.get().next_power_of_two(),
300 hasher,
301 ),
302 metrics: SlruCacheMetrics::new(config.max_size, config.protected_capacity.get() as u64),
303 current_size: 0,
304 max_size: config.max_size,
305 }
306 }
307
308 /// Returns the maximum number of key-value pairs the segment can hold.
309 #[inline]
310 pub(crate) fn cap(&self) -> NonZeroUsize {
311 self.config.capacity
312 }
313
314 /// Returns the maximum size of the protected segment.
315 #[inline]
316 pub(crate) fn protected_max_size(&self) -> NonZeroUsize {
317 self.config.protected_capacity
318 }
319
320 /// Returns the current number of key-value pairs in the segment.
321 #[inline]
322 pub(crate) fn len(&self) -> usize {
323 self.map.len()
324 }
325
326 /// Returns `true` if the segment contains no key-value pairs.
327 #[inline]
328 pub(crate) fn is_empty(&self) -> bool {
329 self.map.is_empty()
330 }
331
332 /// Returns the current total size of cached content.
333 #[inline]
334 pub(crate) fn current_size(&self) -> u64 {
335 self.current_size
336 }
337
338 /// Returns the maximum content size the cache can hold.
339 #[inline]
340 pub(crate) fn max_size(&self) -> u64 {
341 self.max_size
342 }
343
344 /// Returns a reference to the metrics for this segment.
345 #[inline]
346 pub(crate) fn metrics(&self) -> &SlruCacheMetrics {
347 &self.metrics
348 }
349
350 /// Moves an entry from the probationary segment to the protected segment.
351 /// If the protected segment is full, the LRU item from protected is demoted to probationary.
352 ///
353 /// Returns a raw pointer to the entry in its new location.
354 unsafe fn promote_to_protected(
355 &mut self,
356 node: *mut ListEntry<CacheEntry<K, V, SlruMeta>>,
357 ) -> *mut ListEntry<CacheEntry<K, V, SlruMeta>> {
358 // Remove from probationary list
359 let boxed_entry = self
360 .probationary
361 .remove(node)
362 .expect("Node should exist in probationary");
363
364 // If protected segment is full, demote LRU protected item to probationary
365 if self.protected.len() >= self.protected_max_size().get() {
366 // We need to make room in probationary first if it's full
367 if self.probationary.len() >= self.probationary.cap().get() {
368 // Evict LRU from probationary
369 if let Some(old_entry) = self.probationary.remove_last() {
370 let old_ptr = Box::into_raw(old_entry);
371 let cache_entry = (*old_ptr).get_value();
372 let evicted_size = cache_entry.metadata.size;
373 self.map.remove(&cache_entry.key);
374 self.current_size = self.current_size.saturating_sub(evicted_size);
375 self.metrics.record_probationary_eviction(evicted_size);
376 let _ = Box::from_raw(old_ptr);
377 }
378 }
379 self.demote_lru_protected();
380 }
381
382 // Get the raw pointer from the box
383 let entry_ptr = Box::into_raw(boxed_entry);
384
385 // Update location and timestamp in the entry
386 let cache_entry = (*entry_ptr).get_value_mut();
387 cache_entry.metadata.algorithm.location = Location::Protected;
388 cache_entry.touch(); // Update last_accessed timestamp
389
390 // Update the map pointer
391 if let Some(node_ptr) = self.map.get_mut(&cache_entry.key) {
392 *node_ptr = entry_ptr;
393 }
394
395 // Add to protected list using the pointer from the Box
396 unsafe {
397 self.protected.attach_from_other_list(entry_ptr);
398 }
399
400 entry_ptr
401 }
402
403 /// Demotes the least recently used item from the protected segment to the probationary segment.
404 unsafe fn demote_lru_protected(&mut self) {
405 if let Some(lru_protected) = self.protected.remove_last() {
406 let lru_ptr = Box::into_raw(lru_protected);
407 let cache_entry = (*lru_ptr).get_value_mut();
408
409 // Update location in the entry
410 cache_entry.metadata.algorithm.location = Location::Probationary;
411
412 // Update the map pointer
413 if let Some(node_ptr) = self.map.get_mut(&cache_entry.key) {
414 *node_ptr = lru_ptr;
415 }
416
417 // Add to probationary list
418 self.probationary.attach_from_other_list(lru_ptr);
419
420 // Record demotion
421 self.metrics.record_demotion();
422 }
423 }
424
425 /// Returns a reference to the value corresponding to the key.
426 pub(crate) fn get<Q>(&mut self, key: &Q) -> Option<&V>
427 where
428 K: Borrow<Q>,
429 Q: ?Sized + Hash + Eq,
430 {
431 let node = self.map.get(key).copied()?;
432
433 unsafe {
434 // SAFETY: node comes from our map, so it's a valid pointer
435 let cache_entry = (*node).get_value();
436 let location = cache_entry.metadata.algorithm.location;
437 let size = cache_entry.metadata.size;
438
439 match location {
440 Location::Probationary => {
441 self.metrics.record_probationary_hit(size);
442
443 // Promote from probationary to protected (updates timestamp and location)
444 let entry_ptr = self.promote_to_protected(node);
445
446 // Record promotion
447 self.metrics.record_promotion();
448
449 // Update segment sizes
450 self.metrics.update_segment_sizes(
451 self.probationary.len() as u64,
452 self.protected.len() as u64,
453 );
454
455 // SAFETY: entry_ptr is the return value from promote_to_protected
456 Some(&(*entry_ptr).get_value().value)
457 }
458 Location::Protected => {
459 self.metrics.record_protected_hit(size);
460
461 // Already protected, just move to MRU position and update timestamp
462 self.protected.move_to_front(node);
463 (*node).get_value_mut().touch();
464 Some(&(*node).get_value().value)
465 }
466 }
467 }
468 }
469
470 /// Returns a mutable reference to the value corresponding to the key.
471 pub(crate) fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
472 where
473 K: Borrow<Q>,
474 Q: ?Sized + Hash + Eq,
475 {
476 let node = self.map.get(key).copied()?;
477
478 unsafe {
479 // SAFETY: node comes from our map, so it's a valid pointer
480 let cache_entry = (*node).get_value();
481 let location = cache_entry.metadata.algorithm.location;
482 let size = cache_entry.metadata.size;
483
484 match location {
485 Location::Probationary => {
486 self.metrics.record_probationary_hit(size);
487
488 // Promote from probationary to protected (updates timestamp and location)
489 let entry_ptr = self.promote_to_protected(node);
490
491 // Record promotion
492 self.metrics.record_promotion();
493
494 // Update segment sizes
495 self.metrics.update_segment_sizes(
496 self.probationary.len() as u64,
497 self.protected.len() as u64,
498 );
499
500 // SAFETY: entry_ptr is the return value from promote_to_protected
501 Some(&mut (*entry_ptr).get_value_mut().value)
502 }
503 Location::Protected => {
504 self.metrics.record_protected_hit(size);
505
506 // Already protected, just move to MRU position and update timestamp
507 self.protected.move_to_front(node);
508 (*node).get_value_mut().touch();
509 Some(&mut (*node).get_value_mut().value)
510 }
511 }
512 }
513 }
514
515 /// Records a cache miss for metrics tracking
516 #[inline]
517 pub(crate) fn record_miss(&mut self, object_size: u64) {
518 self.metrics.core.record_miss(object_size);
519 }
520}
521
522impl<K: Hash + Eq + Clone, V, S: BuildHasher> SlruInner<K, V, S> {
523 /// Inserts a key-value pair into the segment.
524 ///
525 /// # Arguments
526 ///
527 /// * `key` - The key to insert
528 /// * `value` - The value to insert
529 /// * `size` - Optional size in bytes. Use `SIZE_UNIT` (1) for count-based caching.
530 ///
531 /// Returns evicted entries, or `None` if no entries were evicted.
532 /// Note: Replacing an existing key does not return the old value.
533 pub(crate) fn put(&mut self, key: K, value: V, size: u64) -> Option<Vec<(K, V)>>
534 where
535 V: Clone,
536 {
537 // If key is already in the cache, update it in place
538 if let Some(&node) = self.map.get(&key) {
539 unsafe {
540 // SAFETY: node comes from our map
541 let cache_entry = (*node).get_value();
542 let location = cache_entry.metadata.algorithm.location;
543 let old_size = cache_entry.metadata.size;
544
545 match location {
546 Location::Probationary => {
547 self.probationary.move_to_front(node);
548 // Create new CacheEntry with updated value
549 let new_entry = CacheEntry::with_algorithm_metadata(
550 key.clone(),
551 value,
552 size,
553 SlruMeta {
554 location: Location::Probationary,
555 },
556 );
557 let old_entry = self.probationary.update(node, new_entry, true);
558 // Update size tracking
559 self.current_size = self.current_size.saturating_sub(old_size);
560 self.current_size += size;
561 self.metrics.core.record_size_change(old_size, size);
562 self.metrics.core.bytes_written_to_cache += size;
563 // Replacement is not eviction - discard old entry
564 let _ = old_entry;
565 return None;
566 }
567 Location::Protected => {
568 self.protected.move_to_front(node);
569 // Create new CacheEntry with updated value
570 let new_entry = CacheEntry::with_algorithm_metadata(
571 key.clone(),
572 value,
573 size,
574 SlruMeta {
575 location: Location::Protected,
576 },
577 );
578 let old_entry = self.protected.update(node, new_entry, true);
579 // Update size tracking
580 self.current_size = self.current_size.saturating_sub(old_size);
581 self.current_size += size;
582 self.metrics.core.record_size_change(old_size, size);
583 self.metrics.core.bytes_written_to_cache += size;
584 // Replacement is not eviction - discard old entry
585 let _ = old_entry;
586 return None;
587 }
588 }
589 }
590 }
591
592 let mut evicted = Vec::new();
593
594 // Evict while entry count limit OR size limit would be exceeded
595 // This mirrors LRU's eviction logic to properly respect max_size
596 while self.len() >= self.cap().get()
597 || (self.current_size + size > self.config.max_size && !self.map.is_empty())
598 {
599 if let Some(entry) = self.evict() {
600 self.metrics.core.evictions += 1;
601 evicted.push(entry);
602 } else {
603 break;
604 }
605 }
606
607 // Add the new key-value pair to the probationary segment
608 let cache_entry = CacheEntry::with_algorithm_metadata(
609 key.clone(),
610 value,
611 size,
612 SlruMeta {
613 location: Location::Probationary,
614 },
615 );
616 let node = self.probationary.add_unchecked(cache_entry);
617 self.map.insert(key, node);
618 self.current_size += size;
619
620 // Record insertion and update segment sizes
621 self.metrics.core.record_insertion(size);
622 self.metrics
623 .update_segment_sizes(self.probationary.len() as u64, self.protected.len() as u64);
624
625 if evicted.is_empty() {
626 None
627 } else {
628 Some(evicted)
629 }
630 }
631
632 /// Removes a key from the segment, returning the value if the key was present.
633 pub(crate) fn remove<Q>(&mut self, key: &Q) -> Option<V>
634 where
635 K: Borrow<Q>,
636 Q: ?Sized + Hash + Eq,
637 {
638 let node = self.map.remove(key)?;
639
640 unsafe {
641 // SAFETY: node comes from our map
642 let cache_entry = (*node).get_value();
643 let location = cache_entry.metadata.algorithm.location;
644 let removed_size = cache_entry.metadata.size;
645
646 match location {
647 Location::Probationary => {
648 // SAFETY: take_value moves the value out and Box::from_raw frees memory
649 let boxed_entry = self.probationary.remove(node)?;
650 let entry_ptr = Box::into_raw(boxed_entry);
651 let cache_entry = (*entry_ptr).take_value();
652 self.current_size = self.current_size.saturating_sub(removed_size);
653 self.metrics.record_probationary_removal(removed_size);
654 let _ = Box::from_raw(entry_ptr);
655 Some(cache_entry.value)
656 }
657 Location::Protected => {
658 // SAFETY: take_value moves the value out and Box::from_raw frees memory
659 let boxed_entry = self.protected.remove(node)?;
660 let entry_ptr = Box::into_raw(boxed_entry);
661 let cache_entry = (*entry_ptr).take_value();
662 self.current_size = self.current_size.saturating_sub(removed_size);
663 self.metrics.record_protected_removal(removed_size);
664 let _ = Box::from_raw(entry_ptr);
665 Some(cache_entry.value)
666 }
667 }
668 }
669 }
670
671 /// Clears the segment, removing all key-value pairs.
672 pub(crate) fn clear(&mut self) {
673 self.map.clear();
674 self.probationary.clear();
675 self.protected.clear();
676 self.current_size = 0;
677 }
678
679 /// Check if key exists without promoting it between segments.
680 ///
681 /// Unlike `get()`, this method does NOT promote the entry from probationary
682 /// to protected, and does not update any access metadata.
683 #[inline]
684 pub(crate) fn contains<Q>(&self, key: &Q) -> bool
685 where
686 K: Borrow<Q>,
687 Q: ?Sized + Hash + Eq,
688 {
689 self.map.contains_key(key)
690 }
691
692 /// Returns a reference to the value without promoting or updating access metadata.
693 ///
694 /// Unlike `get()`, this method does NOT promote the entry from probationary
695 /// to protected, and does not update any ordering.
696 pub(crate) fn peek<Q>(&self, key: &Q) -> Option<&V>
697 where
698 K: Borrow<Q>,
699 Q: ?Sized + Hash + Eq,
700 {
701 let node = self.map.get(key)?;
702 unsafe {
703 // SAFETY: node comes from our map, so it's a valid pointer
704 let cache_entry = (**node).get_value();
705 Some(&cache_entry.value)
706 }
707 }
708
709 /// Removes and returns the eviction candidate.
710 ///
711 /// For SLRU, the eviction candidate is the LRU entry from the probationary
712 /// segment. If probationary is empty, falls back to the LRU entry from the
713 /// protected segment.
714 ///
715 /// This method does **not** increment the eviction counter in metrics.
716 /// Eviction metrics are only recorded when the cache internally evicts
717 /// entries to make room during `put()` operations.
718 fn evict(&mut self) -> Option<(K, V)> {
719 // Try probationary first (normal eviction target)
720 if let Some(old_entry) = self.probationary.remove_last() {
721 unsafe {
722 // SAFETY: take_value reads the CacheEntry out of MaybeUninit by value.
723 // Box::from_raw frees memory (MaybeUninit won't double-drop).
724 let entry_ptr = Box::into_raw(old_entry);
725 let cache_entry = (*entry_ptr).take_value();
726 let evicted_size = cache_entry.metadata.size;
727 self.map.remove(&cache_entry.key);
728 self.current_size = self.current_size.saturating_sub(evicted_size);
729 self.metrics.record_probationary_removal(evicted_size);
730 let _ = Box::from_raw(entry_ptr);
731 return Some((cache_entry.key, cache_entry.value));
732 }
733 }
734
735 // Fall back to protected if probationary is empty
736 if let Some(old_entry) = self.protected.remove_last() {
737 unsafe {
738 // SAFETY: take_value reads the CacheEntry out of MaybeUninit by value.
739 // Box::from_raw frees memory (MaybeUninit won't double-drop).
740 let entry_ptr = Box::into_raw(old_entry);
741 let cache_entry = (*entry_ptr).take_value();
742 let evicted_size = cache_entry.metadata.size;
743 self.map.remove(&cache_entry.key);
744 self.current_size = self.current_size.saturating_sub(evicted_size);
745 self.metrics.record_protected_removal(evicted_size);
746 let _ = Box::from_raw(entry_ptr);
747 return Some((cache_entry.key, cache_entry.value));
748 }
749 }
750
751 None
752 }
753}
754
755// Implement Debug for SlruInner manually since it contains raw pointers
756impl<K, V, S> core::fmt::Debug for SlruInner<K, V, S> {
757 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
758 f.debug_struct("SlruInner")
759 .field("capacity", &self.config.capacity)
760 .field("protected_capacity", &self.config.protected_capacity)
761 .field("len", &self.map.len())
762 .finish()
763 }
764}
765
766/// An implementation of a Segmented Least Recently Used (SLRU) cache.
767///
768/// The cache is divided into two segments:
769/// - Probationary segment: Where new entries are initially placed
770/// - Protected segment: Where frequently accessed entries are promoted to
771///
772/// When the cache reaches capacity, the least recently used entry from the
773/// probationary segment is evicted. If the probationary segment is empty,
774/// entries from the protected segment may be demoted back to probationary.
775///
776/// # Examples
777///
778/// ```
779/// use cache_rs::slru::SlruCache;
780/// use cache_rs::config::SlruCacheConfig;
781/// use core::num::NonZeroUsize;
782///
783/// // Create an SLRU cache with a total capacity of 4,
784/// // with a protected capacity of 2 (half protected, half probationary)
785/// let config = SlruCacheConfig {
786/// capacity: NonZeroUsize::new(4).unwrap(),
787/// protected_capacity: NonZeroUsize::new(2).unwrap(),
788/// max_size: u64::MAX,
789/// };
790/// let mut cache = SlruCache::init(config, None);
791///
792/// // Add some items
793/// cache.put("a", 1, 1);
794/// cache.put("b", 2, 1);
795/// cache.put("c", 3, 1);
796/// cache.put("d", 4, 1);
797///
798/// // Access "a" to promote it to the protected segment
799/// assert_eq!(cache.get(&"a"), Some(&1));
800///
801/// // Add a new item, which will evict the least recently used item
802/// // from the probationary segment (likely "b")
803/// cache.put("e", 5, 1);
804/// assert_eq!(cache.get(&"b"), None);
805/// ```
806#[derive(Debug)]
807pub struct SlruCache<K, V, S = DefaultHashBuilder> {
808 segment: SlruInner<K, V, S>,
809}
810
811impl<K: Hash + Eq, V: Clone, S: BuildHasher> SlruCache<K, V, S> {
812 /// Returns the maximum number of key-value pairs the cache can hold.
813 #[inline]
814 pub fn cap(&self) -> NonZeroUsize {
815 self.segment.cap()
816 }
817
818 /// Returns the maximum size of the protected segment.
819 #[inline]
820 pub fn protected_max_size(&self) -> NonZeroUsize {
821 self.segment.protected_max_size()
822 }
823
824 /// Returns the current number of key-value pairs in the cache.
825 #[inline]
826 pub fn len(&self) -> usize {
827 self.segment.len()
828 }
829
830 /// Returns `true` if the cache contains no key-value pairs.
831 #[inline]
832 pub fn is_empty(&self) -> bool {
833 self.segment.is_empty()
834 }
835
836 /// Returns the current total size of cached content.
837 #[inline]
838 pub fn current_size(&self) -> u64 {
839 self.segment.current_size()
840 }
841
842 /// Returns the maximum content size the cache can hold.
843 #[inline]
844 pub fn max_size(&self) -> u64 {
845 self.segment.max_size()
846 }
847
848 /// Returns a reference to the value corresponding to the key.
849 ///
850 /// The key may be any borrowed form of the cache's key type, but
851 /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
852 /// the key type.
853 ///
854 /// If a value is returned from the probationary segment, it is promoted
855 /// to the protected segment.
856 #[inline]
857 pub fn get<Q>(&mut self, key: &Q) -> Option<&V>
858 where
859 K: Borrow<Q>,
860 Q: ?Sized + Hash + Eq,
861 {
862 self.segment.get(key)
863 }
864
865 /// Returns a mutable reference to the value corresponding to the key.
866 ///
867 /// The key may be any borrowed form of the cache's key type, but
868 /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
869 /// the key type.
870 ///
871 /// If a value is returned from the probationary segment, it is promoted
872 /// to the protected segment.
873 #[inline]
874 pub fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
875 where
876 K: Borrow<Q>,
877 Q: ?Sized + Hash + Eq,
878 {
879 self.segment.get_mut(key)
880 }
881
882 /// Records a cache miss for metrics tracking (to be called by simulation system)
883 #[inline]
884 pub fn record_miss(&mut self, object_size: u64) {
885 self.segment.record_miss(object_size);
886 }
887}
888
889impl<K: Hash + Eq + Clone, V, S: BuildHasher> SlruCache<K, V, S> {
890 /// Inserts a key-value pair into the cache.
891 ///
892 /// If the key already exists, it is replaced. If at capacity, the least recently
893 /// used item from the probationary segment is evicted. The inserted entry always
894 /// starts in the probationary segment.
895 ///
896 /// # Arguments
897 ///
898 /// * `key` - The key to insert
899 /// * `value` - The value to insert
900 /// * `size` - Optional size in bytes for size-aware caching. Use `SIZE_UNIT` (1) for count-based caching.
901 ///
902 /// # Returns
903 ///
904 /// - `Some(vec)` containing evicted entries (not replaced entries)
905 /// - `None` if no entries were evicted (zero allocation)
906 #[inline]
907 pub fn put(&mut self, key: K, value: V, size: u64) -> Option<Vec<(K, V)>>
908 where
909 V: Clone,
910 {
911 self.segment.put(key, value, size)
912 }
913
914 /// Removes a key from the cache, returning the value at the key if the key was previously in the cache.
915 ///
916 /// The key may be any borrowed form of the cache's key type, but
917 /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
918 /// the key type.
919 #[inline]
920 pub fn remove<Q>(&mut self, key: &Q) -> Option<V>
921 where
922 K: Borrow<Q>,
923 Q: ?Sized + Hash + Eq,
924 V: Clone,
925 {
926 self.segment.remove(key)
927 }
928
929 /// Clears the cache, removing all key-value pairs.
930 #[inline]
931 pub fn clear(&mut self) {
932 self.segment.clear()
933 }
934
935 /// Check if key exists without promoting it between segments.
936 ///
937 /// Unlike `get()`, this method does NOT promote the entry from probationary
938 /// to protected, and does not update any access metadata.
939 ///
940 /// # Example
941 ///
942 /// ```
943 /// use cache_rs::SlruCache;
944 /// use cache_rs::config::SlruCacheConfig;
945 /// use core::num::NonZeroUsize;
946 ///
947 /// let config = SlruCacheConfig {
948 /// capacity: NonZeroUsize::new(10).unwrap(),
949 /// protected_capacity: NonZeroUsize::new(3).unwrap(),
950 /// max_size: u64::MAX,
951 /// };
952 /// let mut cache = SlruCache::init(config, None);
953 /// cache.put("a", 1, 1);
954 /// assert!(cache.contains(&"a"));
955 /// assert!(!cache.contains(&"b"));
956 /// ```
957 #[inline]
958 pub fn contains<Q>(&self, key: &Q) -> bool
959 where
960 K: Borrow<Q>,
961 Q: ?Sized + Hash + Eq,
962 {
963 self.segment.contains(key)
964 }
965
966 /// Returns a reference to the value without promoting or updating access metadata.
967 ///
968 /// Unlike [`get()`](Self::get), this does NOT promote the entry from
969 /// probationary to protected, and does not update any ordering.
970 ///
971 /// # Example
972 ///
973 /// ```
974 /// use cache_rs::SlruCache;
975 /// use cache_rs::config::SlruCacheConfig;
976 /// use core::num::NonZeroUsize;
977 ///
978 /// let config = SlruCacheConfig {
979 /// capacity: NonZeroUsize::new(3).unwrap(),
980 /// protected_capacity: NonZeroUsize::new(1).unwrap(),
981 /// max_size: u64::MAX,
982 /// };
983 /// let mut cache = SlruCache::init(config, None);
984 /// cache.put("a", 1, 1);
985 ///
986 /// // peek does not promote between segments
987 /// assert_eq!(cache.peek(&"a"), Some(&1));
988 /// assert_eq!(cache.peek(&"missing"), None);
989 /// ```
990 #[inline]
991 pub fn peek<Q>(&self, key: &Q) -> Option<&V>
992 where
993 K: Borrow<Q>,
994 Q: ?Sized + Hash + Eq,
995 {
996 self.segment.peek(key)
997 }
998}
999
1000impl<K: Hash + Eq, V> SlruCache<K, V>
1001where
1002 V: Clone,
1003{
1004 /// Creates a new SLRU cache from a configuration.
1005 ///
1006 /// This is the **only** way to create an SLRU cache. All configuration
1007 /// is specified through the [`SlruCacheConfig`] struct.
1008 ///
1009 /// # Arguments
1010 ///
1011 /// * `config` - Configuration specifying capacity, protected capacity, and optional size limit
1012 /// * `hasher` - Optional custom hash builder. If `None`, uses the default.
1013 ///
1014 /// # Example
1015 ///
1016 /// ```
1017 /// use cache_rs::SlruCache;
1018 /// use cache_rs::config::SlruCacheConfig;
1019 /// use core::num::NonZeroUsize;
1020 ///
1021 /// // Simple capacity-only cache with 20% protected segment
1022 /// let config = SlruCacheConfig {
1023 /// capacity: NonZeroUsize::new(100).unwrap(),
1024 /// protected_capacity: NonZeroUsize::new(20).unwrap(),
1025 /// max_size: u64::MAX,
1026 /// };
1027 /// let mut cache: SlruCache<&str, i32> = SlruCache::init(config, None);
1028 /// cache.put("key", 42, 1);
1029 ///
1030 /// // Cache with size limit
1031 /// let config = SlruCacheConfig {
1032 /// capacity: NonZeroUsize::new(1000).unwrap(),
1033 /// protected_capacity: NonZeroUsize::new(200).unwrap(),
1034 /// max_size: 10 * 1024 * 1024, // 10MB
1035 /// };
1036 /// let cache: SlruCache<String, Vec<u8>> = SlruCache::init(config, None);
1037 /// ```
1038 pub fn init(
1039 config: SlruCacheConfig,
1040 hasher: Option<DefaultHashBuilder>,
1041 ) -> SlruCache<K, V, DefaultHashBuilder> {
1042 SlruCache {
1043 segment: SlruInner::init(config, hasher.unwrap_or_default()),
1044 }
1045 }
1046}
1047
1048impl<K: Hash + Eq, V: Clone, S: BuildHasher> CacheMetrics for SlruCache<K, V, S> {
1049 fn metrics(&self) -> BTreeMap<String, f64> {
1050 self.segment.metrics().metrics()
1051 }
1052
1053 fn algorithm_name(&self) -> &'static str {
1054 self.segment.metrics().algorithm_name()
1055 }
1056}
1057
1058#[cfg(test)]
1059mod tests {
1060 extern crate std;
1061 use std::string::ToString;
1062
1063 use super::*;
1064 use crate::config::SlruCacheConfig;
1065 use alloc::format;
1066 use alloc::string::String;
1067
1068 /// Helper to create an SlruCache with the given capacity and protected capacity
1069 fn make_cache<K: Hash + Eq + Clone, V: Clone>(
1070 cap: usize,
1071 protected_cap: usize,
1072 ) -> SlruCache<K, V> {
1073 let config = SlruCacheConfig {
1074 capacity: NonZeroUsize::new(cap).unwrap(),
1075 protected_capacity: NonZeroUsize::new(protected_cap).unwrap(),
1076 max_size: u64::MAX,
1077 };
1078 SlruCache::init(config, None)
1079 }
1080
1081 #[test]
1082 fn test_slru_basic() {
1083 // Create a cache with capacity 4, with protected capacity 2
1084 // (2 probationary, 2 protected)
1085 let mut cache = make_cache(4, 2);
1086
1087 // Add items to fill probationary segment
1088 assert_eq!(cache.put("a", 1, 1), None);
1089 assert_eq!(cache.put("b", 2, 1), None);
1090 assert_eq!(cache.put("c", 3, 1), None);
1091 assert_eq!(cache.put("d", 4, 1), None);
1092
1093 // Cache should be at capacity
1094 assert_eq!(cache.len(), 4);
1095
1096 // Access "a" and "b" to promote them to protected segment
1097 assert_eq!(cache.get(&"a"), Some(&1));
1098 assert_eq!(cache.get(&"b"), Some(&2));
1099
1100 // Add a new item "e", should evict "c" from probationary
1101 let evicted = cache.put("e", 5, 1);
1102 assert!(evicted.is_some());
1103 let (evicted_key, evicted_val) = evicted.unwrap()[0];
1104 assert_eq!(evicted_key, "c");
1105 assert_eq!(evicted_val, 3);
1106
1107 // Add another item "f", should evict "d" from probationary
1108 let evicted = cache.put("f", 6, 1);
1109 assert!(evicted.is_some());
1110 let (evicted_key, evicted_val) = evicted.unwrap()[0];
1111 assert_eq!(evicted_key, "d");
1112 assert_eq!(evicted_val, 4);
1113
1114 // Check presence
1115 assert_eq!(cache.get(&"a"), Some(&1)); // Protected
1116 assert_eq!(cache.get(&"b"), Some(&2)); // Protected
1117 assert_eq!(cache.get(&"c"), None); // Evicted
1118 assert_eq!(cache.get(&"d"), None); // Evicted
1119 assert_eq!(cache.get(&"e"), Some(&5)); // Probationary
1120 assert_eq!(cache.get(&"f"), Some(&6)); // Probationary
1121 }
1122
1123 #[test]
1124 fn test_slru_update() {
1125 // Create a cache with capacity 4, with protected capacity 2
1126 let mut cache = make_cache(4, 2);
1127
1128 // Add items
1129 cache.put("a", 1, 1);
1130 cache.put("b", 2, 1);
1131
1132 // Access "a" to promote it to protected
1133 assert_eq!(cache.get(&"a"), Some(&1));
1134
1135 // Update values - replacement returns None
1136 assert!(cache.put("a", 10, 1).is_none());
1137 assert!(cache.put("b", 20, 1).is_none());
1138
1139 // Check updated values
1140 assert_eq!(cache.get(&"a"), Some(&10));
1141 assert_eq!(cache.get(&"b"), Some(&20));
1142 }
1143
1144 #[test]
1145 fn test_slru_remove() {
1146 // Create a cache with capacity 4, with protected capacity 2
1147 let mut cache = make_cache(4, 2);
1148
1149 // Add items
1150 cache.put("a", 1, 1);
1151 cache.put("b", 2, 1);
1152
1153 // Access "a" to promote it to protected
1154 assert_eq!(cache.get(&"a"), Some(&1));
1155
1156 // Remove items
1157 assert_eq!(cache.remove(&"a"), Some(1)); // From protected
1158 assert_eq!(cache.remove(&"b"), Some(2)); // From probationary
1159
1160 // Check that items are gone
1161 assert_eq!(cache.get(&"a"), None);
1162 assert_eq!(cache.get(&"b"), None);
1163
1164 // Check that removing non-existent item returns None
1165 assert_eq!(cache.remove(&"c"), None);
1166 }
1167
1168 #[test]
1169 fn test_slru_clear() {
1170 // Create a cache with capacity 4, with protected capacity 2
1171 let mut cache = make_cache(4, 2);
1172
1173 // Add items
1174 cache.put("a", 1, 1);
1175 cache.put("b", 2, 1);
1176 cache.put("c", 3, 1);
1177 cache.put("d", 4, 1);
1178
1179 // Clear the cache
1180 cache.clear();
1181
1182 // Check that cache is empty
1183 assert_eq!(cache.len(), 0);
1184 assert!(cache.is_empty());
1185
1186 // Check that items are gone
1187 assert_eq!(cache.get(&"a"), None);
1188 assert_eq!(cache.get(&"b"), None);
1189 assert_eq!(cache.get(&"c"), None);
1190 assert_eq!(cache.get(&"d"), None);
1191 }
1192
1193 #[test]
1194 fn test_slru_complex_values() {
1195 // Create a cache with capacity 4, with protected capacity 2
1196 let mut cache = make_cache(4, 2);
1197
1198 #[derive(Debug, Clone, PartialEq)]
1199 struct ComplexValue {
1200 id: usize,
1201 data: String,
1202 }
1203
1204 // Add complex values
1205 cache.put(
1206 "a",
1207 ComplexValue {
1208 id: 1,
1209 data: "a-data".to_string(),
1210 },
1211 1,
1212 );
1213 cache.put(
1214 "b",
1215 ComplexValue {
1216 id: 2,
1217 data: "b-data".to_string(),
1218 },
1219 1,
1220 );
1221
1222 // Modify a value using get_mut
1223 if let Some(value) = cache.get_mut(&"a") {
1224 value.id = 100;
1225 value.data = "a-modified".to_string();
1226 }
1227
1228 // Check the modified value
1229 let a = cache.get(&"a").unwrap();
1230 assert_eq!(a.id, 100);
1231 assert_eq!(a.data, "a-modified");
1232 }
1233
1234 #[test]
1235 fn test_slru_with_ratio() {
1236 // Test the with_ratio constructor
1237 let mut cache = make_cache(4, 2);
1238
1239 assert_eq!(cache.cap().get(), 4);
1240 assert_eq!(cache.protected_max_size().get(), 2);
1241
1242 // Test basic functionality
1243 assert_eq!(cache.put("a", 1, 1), None);
1244 assert_eq!(cache.put("b", 2, 1), None);
1245
1246 // Access "a" to promote it to protected
1247 assert_eq!(cache.get(&"a"), Some(&1));
1248
1249 // Fill the cache
1250 assert_eq!(cache.put("c", 3, 1), None);
1251 assert_eq!(cache.put("d", 4, 1), None);
1252
1253 // Add another item, should evict "b" from probationary
1254 let result = cache.put("e", 5, 1);
1255 assert_eq!(result.unwrap()[0].0, "b");
1256
1257 // Check that protected items remain
1258 assert_eq!(cache.get(&"a"), Some(&1));
1259 }
1260
1261 // Test that SlruInner works correctly (internal tests)
1262 #[test]
1263 fn test_slru_segment_directly() {
1264 let config = SlruCacheConfig {
1265 capacity: NonZeroUsize::new(4).unwrap(),
1266 protected_capacity: NonZeroUsize::new(2).unwrap(),
1267 max_size: u64::MAX,
1268 };
1269 let mut segment: SlruInner<&str, i32, DefaultHashBuilder> =
1270 SlruInner::init(config, DefaultHashBuilder::default());
1271
1272 assert_eq!(segment.len(), 0);
1273 assert!(segment.is_empty());
1274 assert_eq!(segment.cap().get(), 4);
1275 assert_eq!(segment.protected_max_size().get(), 2);
1276
1277 segment.put("a", 1, 1);
1278 segment.put("b", 2, 1);
1279 assert_eq!(segment.len(), 2);
1280
1281 // Access to promote
1282 assert_eq!(segment.get(&"a"), Some(&1));
1283 assert_eq!(segment.get(&"b"), Some(&2));
1284 }
1285
1286 #[test]
1287 fn test_slru_concurrent_access() {
1288 extern crate std;
1289 use std::sync::{Arc, Mutex};
1290 use std::thread;
1291 use std::vec::Vec;
1292
1293 let cache = Arc::new(Mutex::new(make_cache::<String, i32>(100, 50)));
1294 let num_threads = 4;
1295 let ops_per_thread = 100;
1296
1297 let mut handles: Vec<std::thread::JoinHandle<()>> = Vec::new();
1298
1299 for t in 0..num_threads {
1300 let cache = Arc::clone(&cache);
1301 handles.push(thread::spawn(move || {
1302 for i in 0..ops_per_thread {
1303 let key = std::format!("key_{}_{}", t, i);
1304 let mut guard = cache.lock().unwrap();
1305 guard.put(key.clone(), i, 1);
1306 let _ = guard.get(&key);
1307 }
1308 }));
1309 }
1310
1311 for handle in handles {
1312 handle.join().unwrap();
1313 }
1314
1315 let mut guard = cache.lock().unwrap();
1316 assert!(guard.len() <= 100);
1317 guard.clear(); // Clean up for MIRI
1318 }
1319
1320 #[test]
1321 fn test_slru_size_aware_tracking() {
1322 let mut cache = make_cache(10, 3);
1323
1324 assert_eq!(cache.current_size(), 0);
1325 assert_eq!(cache.max_size(), u64::MAX);
1326
1327 cache.put("a", 1, 100);
1328 cache.put("b", 2, 200);
1329 cache.put("c", 3, 150);
1330
1331 assert_eq!(cache.current_size(), 450);
1332 assert_eq!(cache.len(), 3);
1333
1334 // Clear should reset size
1335 cache.clear();
1336 assert_eq!(cache.current_size(), 0);
1337 }
1338
1339 #[test]
1340 fn test_slru_init_constructor() {
1341 let config = SlruCacheConfig {
1342 capacity: NonZeroUsize::new(1000).unwrap(),
1343 protected_capacity: NonZeroUsize::new(300).unwrap(),
1344 max_size: 1024 * 1024,
1345 };
1346 let cache: SlruCache<String, i32> = SlruCache::init(config, None);
1347
1348 assert_eq!(cache.current_size(), 0);
1349 assert_eq!(cache.max_size(), 1024 * 1024);
1350 }
1351
1352 #[test]
1353 fn test_slru_with_limits_constructor() {
1354 let config = SlruCacheConfig {
1355 capacity: NonZeroUsize::new(100).unwrap(),
1356 protected_capacity: NonZeroUsize::new(30).unwrap(),
1357 max_size: 1024 * 1024,
1358 };
1359 let cache: SlruCache<String, String> = SlruCache::init(config, None);
1360
1361 assert_eq!(cache.current_size(), 0);
1362 assert_eq!(cache.max_size(), 1024 * 1024);
1363 assert_eq!(cache.cap().get(), 100);
1364 assert_eq!(cache.protected_max_size().get(), 30);
1365 }
1366
1367 #[test]
1368 fn test_slru_record_miss() {
1369 use crate::metrics::CacheMetrics;
1370
1371 let mut cache: SlruCache<String, i32> = make_cache(100, 30);
1372
1373 cache.record_miss(100);
1374 cache.record_miss(200);
1375
1376 let metrics = cache.metrics();
1377 assert_eq!(metrics.get("cache_misses").unwrap(), &2.0);
1378 }
1379
1380 #[test]
1381 fn test_slru_get_mut() {
1382 let mut cache: SlruCache<String, i32> = make_cache(100, 30);
1383
1384 cache.put("key".to_string(), 10, 1);
1385 assert_eq!(cache.get(&"key".to_string()), Some(&10));
1386
1387 // Modify via get_mut
1388 if let Some(val) = cache.get_mut(&"key".to_string()) {
1389 *val = 42;
1390 }
1391 assert_eq!(cache.get(&"key".to_string()), Some(&42));
1392
1393 // get_mut on missing key returns None
1394 assert!(cache.get_mut(&"missing".to_string()).is_none());
1395 }
1396
1397 /// Helper to create an SlruCache with max_size limit
1398 fn make_cache_with_max_size(
1399 cap: usize,
1400 protected: usize,
1401 max_size: u64,
1402 ) -> SlruCache<String, i32> {
1403 let config = SlruCacheConfig {
1404 capacity: NonZeroUsize::new(cap).unwrap(),
1405 protected_capacity: NonZeroUsize::new(protected).unwrap(),
1406 max_size,
1407 };
1408 SlruCache::init(config, None)
1409 }
1410
1411 /// Test that SLRU evicts items when max_size would be exceeded.
1412 /// This test verifies the cache respects the max_size limit, not just entry count.
1413 #[test]
1414 fn test_slru_max_size_triggers_eviction() {
1415 // Create cache with large entry capacity (100) but small max_size (100 bytes)
1416 // This means size limit should be the constraint, not entry count
1417 let mut cache = make_cache_with_max_size(100, 30, 100);
1418
1419 // Insert items that fit within max_size
1420 cache.put("a".to_string(), 1, 30); // total: 30
1421 cache.put("b".to_string(), 2, 30); // total: 60
1422 cache.put("c".to_string(), 3, 30); // total: 90
1423
1424 assert_eq!(cache.len(), 3, "Should have 3 items");
1425 assert_eq!(cache.current_size(), 90, "Size should be 90");
1426
1427 // Insert item that would exceed max_size (90 + 20 = 110 > 100)
1428 // This SHOULD trigger eviction to stay within max_size
1429 cache.put("d".to_string(), 4, 20);
1430
1431 // Cache should evict to stay within max_size
1432 // The LRU item ("a") should be evicted, leaving b, c, d
1433 // Total size should be: 30 + 30 + 20 = 80 (after evicting "a")
1434 assert!(
1435 cache.current_size() <= 100,
1436 "current_size {} exceeds max_size 100",
1437 cache.current_size()
1438 );
1439
1440 // Verify that "a" was evicted
1441 assert!(
1442 cache.get(&"a".to_string()).is_none() || cache.current_size() <= 100,
1443 "Either 'a' should be evicted OR size should be within limits"
1444 );
1445 }
1446
1447 /// Test that max_size eviction works even with larger objects
1448 #[test]
1449 fn test_slru_max_size_eviction_large_objects() {
1450 // Create cache with max_size = 500 bytes, large entry capacity
1451 let mut cache = make_cache_with_max_size(1000, 200, 500);
1452
1453 // Insert objects that each take 100 bytes
1454 for i in 0..5 {
1455 cache.put(format!("key{}", i), i, 100);
1456 }
1457
1458 assert_eq!(cache.len(), 5);
1459 assert_eq!(cache.current_size(), 500, "Should have exactly 500 bytes");
1460
1461 // Insert one more - should trigger eviction to stay within 500
1462 cache.put("overflow".to_string(), 99, 100);
1463
1464 // Expected: oldest item evicted, size still <= 500
1465 assert!(
1466 cache.current_size() <= 500,
1467 "SLRU BUG: current_size {} exceeds max_size 500 after insert. \
1468 SLRU must evict items to respect max_size limit.",
1469 cache.current_size()
1470 );
1471 }
1472
1473 #[test]
1474 fn test_slru_contains_non_promoting() {
1475 // Create cache: 4 total (2 probationary, 2 protected)
1476 let mut cache = make_cache(4, 2);
1477 cache.put("a", 1, 1);
1478 cache.put("b", 2, 1);
1479
1480 // contains() should return true for existing keys
1481 assert!(cache.contains(&"a"));
1482 assert!(cache.contains(&"b"));
1483 assert!(!cache.contains(&"c"));
1484
1485 // contains() should NOT promote entries
1486 // Both should still be in probationary
1487 // Adding "c" and "d" should fill probationary
1488 cache.put("c", 3, 1);
1489 cache.put("d", 4, 1);
1490
1491 // At this point all 4 items are in probationary (none accessed twice)
1492 assert_eq!(cache.len(), 4);
1493 assert!(cache.contains(&"a"));
1494 assert!(cache.contains(&"d"));
1495 }
1496
1497 #[test]
1498 fn test_put_returns_none_when_no_eviction() {
1499 let mut cache = make_cache(10, 3);
1500 assert!(cache.put("a", 1, 1).is_none());
1501 assert!(cache.put("b", 2, 1).is_none());
1502 }
1503
1504 #[test]
1505 fn test_put_returns_single_eviction() {
1506 let mut cache = make_cache(2, 1);
1507 cache.put("a", 1, 1);
1508 cache.put("b", 2, 1);
1509 let result = cache.put("c", 3, 1);
1510 assert!(result.is_some());
1511 let evicted = result.unwrap();
1512 assert_eq!(evicted.len(), 1);
1513 }
1514
1515 #[test]
1516 fn test_put_replacement_returns_none() {
1517 let mut cache = make_cache(10, 3);
1518 cache.put("a", 1, 1);
1519 // Replacement is not eviction - returns None
1520 let result = cache.put("a", 2, 1);
1521 assert!(result.is_none());
1522 // Value should be updated
1523 assert_eq!(cache.get(&"a"), Some(&2));
1524 }
1525
1526 #[test]
1527 fn test_put_returns_multiple_evictions_size_based() {
1528 let config = SlruCacheConfig {
1529 capacity: NonZeroUsize::new(10).unwrap(),
1530 protected_capacity: NonZeroUsize::new(3).unwrap(),
1531 max_size: 100,
1532 };
1533 let mut cache = SlruCache::init(config, None);
1534 for i in 0..10 {
1535 cache.put(i, i, 10);
1536 }
1537 let result = cache.put(99, 99, 50);
1538 let evicted = result.unwrap();
1539 assert_eq!(evicted.len(), 5);
1540 }
1541
1542 #[test]
1543 fn test_slru_peek_non_promoting() {
1544 // Create cache: 4 total (2 probationary, 2 protected)
1545 let mut cache = make_cache(4, 2);
1546 cache.put("a", 1, 1);
1547 cache.put("b", 2, 1);
1548
1549 // peek() should return value without promoting
1550 assert_eq!(cache.peek(&"a"), Some(&1));
1551 assert_eq!(cache.peek(&"b"), Some(&2));
1552 assert_eq!(cache.peek(&"c"), None);
1553
1554 // "a" and "b" should still be in probationary (not promoted)
1555 // Now access "a" with get() to promote it
1556 cache.get(&"a");
1557
1558 // Add "c" and "d" - if peek promoted, this would evict "a"
1559 cache.put("c", 3, 1);
1560 cache.put("d", 4, 1);
1561
1562 // "a" was promoted by get(), so it should still exist
1563 assert!(cache.contains(&"a"));
1564 }
1565}