Skip to main content

cache_rs/
slru.rs

1//! Segmented Least Recently Used (SLRU) Cache Implementation
2//!
3//! SLRU is a scan-resistant cache algorithm that divides the cache into two segments:
4//! a **probationary segment** for new entries and a **protected segment** for frequently
5//! accessed entries. This design prevents one-time access patterns (scans) from evicting
6//! valuable cached items.
7//!
8//! # How the Algorithm Works
9//!
10//! SLRU uses a two-tier approach to distinguish between items that are accessed once
11//! (scans, sequential reads) versus items accessed repeatedly (working set).
12//!
13//! ## Segment Architecture
14//!
15//! ```text
16//! ┌─────────────────────────────────────────────────────────────────────────────┐
17//! │                              SLRU Cache                                      │
18//! │                                                                              │
19//! │  ┌─────────────────────────────────────────────────────────────────────┐    │
20//! │  │                    PROTECTED SEGMENT (20%)                          │    │
21//! │  │          Frequently accessed items - harder to evict                │    │
22//! │  │  ┌─────────────────────────────────────────────────────────────┐   │    │
23//! │  │  │ MRU ◀──▶ [hot_1] ◀──▶ [hot_2] ◀──▶ ... ◀──▶ [demote] LRU  │   │    │
24//! │  │  └─────────────────────────────────────────────────────────────┘   │    │
25//! │  └─────────────────────────────────────────────────────────────────────┘    │
26//! │                              │ demote                   ▲ promote           │
27//! │                              ▼                          │                   │
28//! │  ┌─────────────────────────────────────────────────────────────────────┐    │
29//! │  │                   PROBATIONARY SEGMENT (80%)                        │    │
30//! │  │          New items and demoted items - easier to evict              │    │
31//! │  │  ┌─────────────────────────────────────────────────────────────┐   │    │
32//! │  │  │ MRU ◀──▶ [new_1] ◀──▶ [new_2] ◀──▶ ... ◀──▶ [evict] LRU   │   │    │
33//! │  │  └─────────────────────────────────────────────────────────────┘   │    │
34//! │  └─────────────────────────────────────────────────────────────────────┘    │
35//! │                              ▲                                              │
36//! │                              │ insert                                       │
37//! │                         new items                                           │
38//! └─────────────────────────────────────────────────────────────────────────────┘
39//! ```
40//!
41//! ## Entry Lifecycle
42//!
43//! 1. **Insert**: New items enter the probationary segment
44//! 2. **First access in probationary**: Item promoted to protected segment
45//! 3. **Protected segment full**: LRU item demoted back to probationary
46//! 4. **Eviction**: Always from the LRU end of the probationary segment
47//!
48//! ## Scan Resistance Example
49//!
50//! ```text
51//! Initial state: Protected=[A, B, C], Probationary=[D, E, F]
52//! (A, B, C are hot items; D, E, F are warm items)
53//!
54//! Sequential scan of X, Y, Z (one-time access):
55//!   put(X) → Protected=[A, B, C], Probationary=[X, D, E]  (F evicted)
56//!   put(Y) → Protected=[A, B, C], Probationary=[Y, X, D]  (E evicted)
57//!   put(Z) → Protected=[A, B, C], Probationary=[Z, Y, X]  (D evicted)
58//!
59//! Hot items A, B, C remain in protected segment!
60//! The scan only displaced probationary items.
61//! ```
62//!
63//! ## Operations
64//!
65//! | Operation | Action | Time |
66//! |-----------|--------|------|
67//! | `get(key)` | Promote to protected if in probationary | O(1) |
68//! | `put(key, value)` | Insert to probationary, may evict | O(1) |
69//! | `remove(key)` | Remove from whichever segment contains it | O(1) |
70//!
71//! # Dual-Limit Capacity
72//!
73//! This implementation supports two independent limits:
74//!
75//! - **`max_entries`**: Maximum total items across both segments
76//! - **`max_size`**: Maximum total size of content
77//!
78//! The protected segment size is configured separately (default: 20% of total).
79//!
80//! # Performance Characteristics
81//!
82//! | Metric | Value |
83//! |--------|-------|
84//! | Get | O(1) |
85//! | Put | O(1) |
86//! | Remove | O(1) |
87//! | Memory per entry | ~90 bytes overhead + key×2 + value |
88//!
89//! Memory overhead includes: two list pointers, location tag, size tracking,
90//! HashMap bucket, and allocator overhead.
91//!
92//! # When to Use SLRU
93//!
94//! **Good for:**
95//! - Mixed workloads with both hot data and sequential scans
96//! - Database buffer pools
97//! - File system caches
98//! - Any scenario where scans shouldn't evict the working set
99//!
100//! **Not ideal for:**
101//! - Pure recency-based access patterns (LRU is simpler)
102//! - Frequency-dominant patterns (LFU/LFUDA is better)
103//! - Very small caches where the two-segment overhead isn't justified
104//!
105//! # Tuning the Protected Ratio
106//!
107//! The protected segment size controls the trade-off:
108//! - **Larger protected**: More scan resistance, but new items evicted faster
109//! - **Smaller protected**: Less scan resistance, but more room for new items
110//!
111//! Default is 20% protected, which works well for most workloads.
112//!
113//! # Thread Safety
114//!
115//! `SlruCache` is **not thread-safe**. For concurrent access, either:
116//! - Wrap with `Mutex` or `RwLock`
117//! - Use `ConcurrentSlruCache` (requires `concurrent` feature)
118//!
119//! # Examples
120//!
121//! ## Basic Usage
122//!
123//! ```
124//! use cache_rs::SlruCache;
125//! use cache_rs::config::SlruCacheConfig;
126//! use core::num::NonZeroUsize;
127//!
128//! // Total capacity 100, protected segment 20
129//! let config = SlruCacheConfig {
130//!     capacity: NonZeroUsize::new(100).unwrap(),
131//!     protected_capacity: NonZeroUsize::new(20).unwrap(),
132//!     max_size: u64::MAX,
133//! };
134//! let mut cache = SlruCache::init(config, None);
135//!
136//! cache.put("a", 1);  // Enters probationary
137//! cache.get(&"a");    // Promoted to protected!
138//! cache.put("b", 2);  // Enters probationary
139//!
140//! assert_eq!(cache.get(&"a"), Some(&1));  // Still in protected
141//! ```
142//!
143//! ## Scan Resistance Demo
144//!
145//! ```
146//! use cache_rs::SlruCache;
147//! use cache_rs::config::SlruCacheConfig;
148//! use core::num::NonZeroUsize;
149//!
150//! let config = SlruCacheConfig {
151//!     capacity: NonZeroUsize::new(10).unwrap(),
152//!     protected_capacity: NonZeroUsize::new(3).unwrap(),
153//!     max_size: u64::MAX,
154//! };
155//! let mut cache: SlruCache<i32, i32> = SlruCache::init(config, None);
156//!
157//! // Establish hot items in protected segment
158//! for key in [1, 2, 3] {
159//!     cache.put(key, 100);
160//!     cache.get(&key);  // Promote to protected
161//! }
162//!
163//! // Simulate a scan - these items only enter probationary
164//! for i in 100..120 {
165//!     cache.put(i, i);  // One-time insertions
166//! }
167//!
168//! // Hot items survive the scan!
169//! assert!(cache.get(&1).is_some());
170//! assert!(cache.get(&2).is_some());
171//! assert!(cache.get(&3).is_some());
172//! ```
173
174extern crate alloc;
175
176use crate::config::SlruCacheConfig;
177use crate::list::{List, ListEntry};
178use crate::metrics::{CacheMetrics, SlruCacheMetrics};
179use alloc::boxed::Box;
180use alloc::collections::BTreeMap;
181use alloc::string::String;
182use core::borrow::Borrow;
183use core::hash::{BuildHasher, Hash};
184use core::mem;
185use core::num::NonZeroUsize;
186
187#[cfg(feature = "hashbrown")]
188use hashbrown::DefaultHashBuilder;
189#[cfg(feature = "hashbrown")]
190use hashbrown::HashMap;
191
192#[cfg(not(feature = "hashbrown"))]
193use std::collections::hash_map::RandomState as DefaultHashBuilder;
194#[cfg(not(feature = "hashbrown"))]
195use std::collections::HashMap;
196
197/// Entry location within the SLRU cache
198#[derive(Debug, Clone, Copy, PartialEq, Eq)]
199enum Location {
200    /// Entry is in the probationary segment
201    Probationary,
202    /// Entry is in the protected segment
203    Protected,
204}
205
206/// Internal SLRU segment containing the actual cache algorithm.
207///
208/// This is shared between `SlruCache` (single-threaded) and
209/// `ConcurrentSlruCache` (multi-threaded). All algorithm logic is
210/// implemented here to avoid code duplication.
211///
212/// # Safety
213///
214/// This struct contains raw pointers in the `map` field. These pointers
215/// are always valid as long as:
216/// - The pointer was obtained from `probationary.add()` or `protected.add()`
217/// - The node has not been removed from the list
218/// - The segment has not been dropped
219pub(crate) struct SlruSegment<K, V, S = DefaultHashBuilder> {
220    /// Configuration for the SLRU cache
221    config: SlruCacheConfig,
222
223    /// The probationary list holding newer or less frequently accessed items
224    probationary: List<(K, V)>,
225
226    /// The protected list holding frequently accessed items
227    protected: List<(K, V)>,
228
229    /// A hash map mapping keys to entries in either the probationary or protected list
230    /// The tuple stores (node pointer, segment location, entry size in bytes)
231    #[allow(clippy::type_complexity)]
232    map: HashMap<K, (*mut ListEntry<(K, V)>, Location, u64), S>,
233
234    /// Metrics for tracking cache performance and segment behavior
235    metrics: SlruCacheMetrics,
236
237    /// Current total size of cached content (sum of entry sizes)
238    current_size: u64,
239
240    /// Maximum content size the cache can hold
241    max_size: u64,
242}
243
244// SAFETY: SlruSegment owns all data and raw pointers point only to nodes owned by
245// `probationary` or `protected` lists. Concurrent access is safe when wrapped in
246// proper synchronization primitives.
247unsafe impl<K: Send, V: Send, S: Send> Send for SlruSegment<K, V, S> {}
248
249// SAFETY: All mutation requires &mut self; shared references cannot cause data races.
250unsafe impl<K: Send, V: Send, S: Sync> Sync for SlruSegment<K, V, S> {}
251
252impl<K: Hash + Eq, V: Clone, S: BuildHasher> SlruSegment<K, V, S> {
253    /// Creates a new SLRU segment from a configuration.
254    ///
255    /// This is the **recommended** way to create an SLRU segment. All configuration
256    /// is specified through the [`SlruCacheConfig`] struct.
257    ///
258    /// # Arguments
259    ///
260    /// * `config` - Configuration specifying capacity, protected capacity, and optional size limit
261    /// * `hasher` - Hash builder for the internal HashMap
262    #[allow(dead_code)] // Used by concurrent module when feature is enabled
263    pub(crate) fn init(config: SlruCacheConfig, hasher: S) -> Self {
264        let capacity = config.capacity.get();
265        let protected = config.protected_capacity.get();
266
267        assert!(
268            protected < capacity,
269            "SlruCacheConfig invalid: protected_capacity ({}) must be strictly less than capacity ({})",
270            protected,
271            capacity
272        );
273
274        let probationary_max_size = NonZeroUsize::new(capacity - protected).unwrap();
275
276        SlruSegment {
277            config,
278            probationary: List::new(probationary_max_size),
279            protected: List::new(config.protected_capacity),
280            map: HashMap::with_capacity_and_hasher(
281                config.capacity.get().next_power_of_two(),
282                hasher,
283            ),
284            metrics: SlruCacheMetrics::new(config.max_size, config.protected_capacity.get() as u64),
285            current_size: 0,
286            max_size: config.max_size,
287        }
288    }
289
290    /// Returns the maximum number of key-value pairs the segment can hold.
291    #[inline]
292    pub(crate) fn cap(&self) -> NonZeroUsize {
293        self.config.capacity
294    }
295
296    /// Returns the maximum size of the protected segment.
297    #[inline]
298    pub(crate) fn protected_max_size(&self) -> NonZeroUsize {
299        self.config.protected_capacity
300    }
301
302    /// Returns the current number of key-value pairs in the segment.
303    #[inline]
304    pub(crate) fn len(&self) -> usize {
305        self.map.len()
306    }
307
308    /// Returns `true` if the segment contains no key-value pairs.
309    #[inline]
310    pub(crate) fn is_empty(&self) -> bool {
311        self.map.is_empty()
312    }
313
314    /// Returns the current total size of cached content.
315    #[inline]
316    pub(crate) fn current_size(&self) -> u64 {
317        self.current_size
318    }
319
320    /// Returns the maximum content size the cache can hold.
321    #[inline]
322    pub(crate) fn max_size(&self) -> u64 {
323        self.max_size
324    }
325
326    /// Estimates the size of a key-value pair in bytes for metrics tracking
327    fn estimate_object_size(&self, _key: &K, _value: &V) -> u64 {
328        mem::size_of::<K>() as u64 + mem::size_of::<V>() as u64 + 64
329    }
330
331    /// Returns a reference to the metrics for this segment.
332    #[inline]
333    pub(crate) fn metrics(&self) -> &SlruCacheMetrics {
334        &self.metrics
335    }
336
337    /// Moves an entry from the probationary segment to the protected segment.
338    /// If the protected segment is full, the LRU item from protected is demoted to probationary.
339    ///
340    /// Returns a raw pointer to the entry in its new location.
341    unsafe fn promote_to_protected(
342        &mut self,
343        node: *mut ListEntry<(K, V)>,
344    ) -> *mut ListEntry<(K, V)> {
345        // Remove from probationary list
346        let boxed_entry = self
347            .probationary
348            .remove(node)
349            .expect("Node should exist in probationary");
350
351        // If protected segment is full, demote LRU protected item to probationary
352        if self.protected.len() >= self.protected_max_size().get() {
353            // We need to make room in probationary first if it's full
354            if self.probationary.len() >= self.probationary.cap().get() {
355                // Evict LRU from probationary
356                if let Some(old_entry) = self.probationary.remove_last() {
357                    let old_ptr = Box::into_raw(old_entry);
358                    let (old_key, _) = (*old_ptr).get_value();
359                    // Get stored size and update current_size
360                    if let Some((_, _, evicted_size)) = self.map.remove(old_key) {
361                        self.current_size = self.current_size.saturating_sub(evicted_size);
362                        self.metrics.record_probationary_eviction(evicted_size);
363                    }
364                    let _ = Box::from_raw(old_ptr);
365                }
366            }
367            self.demote_lru_protected();
368        }
369
370        // Get the raw pointer from the box
371        let entry_ptr = Box::into_raw(boxed_entry);
372
373        // Get the key from the entry for updating the map
374        let (key_ref, _) = (*entry_ptr).get_value();
375
376        // Update the map with new location and pointer (preserve size)
377        if let Some(map_entry) = self.map.get_mut(key_ref) {
378            map_entry.0 = entry_ptr;
379            map_entry.1 = Location::Protected;
380        }
381
382        // Add to protected list using the pointer from the Box
383        unsafe {
384            self.protected.attach_from_other_list(entry_ptr);
385        }
386
387        entry_ptr
388    }
389
390    /// Demotes the least recently used item from the protected segment to the probationary segment.
391    unsafe fn demote_lru_protected(&mut self) {
392        if let Some(lru_protected) = self.protected.remove_last() {
393            let lru_ptr = Box::into_raw(lru_protected);
394            let (lru_key, _) = (*lru_ptr).get_value();
395
396            // Update the location and pointer in the map (preserve size)
397            if let Some(entry) = self.map.get_mut(lru_key) {
398                entry.0 = lru_ptr;
399                entry.1 = Location::Probationary;
400            }
401
402            // Add to probationary list
403            self.probationary.attach_from_other_list(lru_ptr);
404
405            // Record demotion
406            self.metrics.record_demotion();
407        }
408    }
409
410    /// Returns a reference to the value corresponding to the key.
411    pub(crate) fn get<Q>(&mut self, key: &Q) -> Option<&V>
412    where
413        K: Borrow<Q>,
414        Q: ?Sized + Hash + Eq,
415    {
416        let (node, location, size) = self.map.get(key).copied()?;
417
418        match location {
419            Location::Probationary => unsafe {
420                // SAFETY: node comes from our map, so it's a valid pointer to an entry in our probationary list
421                self.metrics.record_probationary_hit(size);
422
423                // Promote from probationary to protected
424                let entry_ptr = self.promote_to_protected(node);
425
426                // Record promotion
427                self.metrics.record_promotion();
428
429                // Update segment sizes
430                self.metrics.update_segment_sizes(
431                    self.probationary.len() as u64,
432                    self.protected.len() as u64,
433                );
434
435                // SAFETY: entry_ptr is the return value from promote_to_protected
436                let (_, v) = (*entry_ptr).get_value();
437                Some(v)
438            },
439            Location::Protected => unsafe {
440                // SAFETY: node comes from our map, so it's a valid pointer to an entry in our protected list
441                self.metrics.record_protected_hit(size);
442
443                // Already protected, just move to MRU position
444                self.protected.move_to_front(node);
445                let (_, v) = (*node).get_value();
446                Some(v)
447            },
448        }
449    }
450
451    /// Returns a mutable reference to the value corresponding to the key.
452    pub(crate) fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
453    where
454        K: Borrow<Q>,
455        Q: ?Sized + Hash + Eq,
456    {
457        let (node, location, size) = self.map.get(key).copied()?;
458
459        match location {
460            Location::Probationary => unsafe {
461                // SAFETY: node comes from our map, so it's a valid pointer to an entry in our probationary list
462                self.metrics.record_probationary_hit(size);
463
464                // Promote from probationary to protected
465                let entry_ptr = self.promote_to_protected(node);
466
467                // Record promotion
468                self.metrics.record_promotion();
469
470                // Update segment sizes
471                self.metrics.update_segment_sizes(
472                    self.probationary.len() as u64,
473                    self.protected.len() as u64,
474                );
475
476                // SAFETY: entry_ptr is the return value from promote_to_protected
477                let (_, v) = (*entry_ptr).get_value_mut();
478                Some(v)
479            },
480            Location::Protected => unsafe {
481                // SAFETY: node comes from our map, so it's a valid pointer to an entry in our protected list
482                self.metrics.record_protected_hit(size);
483
484                // Already protected, just move to MRU position
485                self.protected.move_to_front(node);
486                // SAFETY: node is still valid after move_to_front operation
487                let (_, v) = (*node).get_value_mut();
488                Some(v)
489            },
490        }
491    }
492
493    /// Records a cache miss for metrics tracking
494    #[inline]
495    pub(crate) fn record_miss(&mut self, object_size: u64) {
496        self.metrics.core.record_miss(object_size);
497    }
498}
499
500impl<K: Hash + Eq + Clone, V, S: BuildHasher> SlruSegment<K, V, S> {
501    /// Inserts a key-value pair into the segment.
502    pub(crate) fn put(&mut self, key: K, value: V) -> Option<(K, V)>
503    where
504        V: Clone,
505    {
506        let object_size = self.estimate_object_size(&key, &value);
507        self.put_with_size(key, value, object_size)
508    }
509
510    /// Insert a key-value pair with explicit size tracking.
511    pub(crate) fn put_with_size(&mut self, key: K, value: V, size: u64) -> Option<(K, V)>
512    where
513        V: Clone,
514    {
515        // If key is already in the cache, update it in place
516        if let Some(&(node, location, old_size)) = self.map.get(&key) {
517            match location {
518                Location::Probationary => unsafe {
519                    // SAFETY: node comes from our map
520                    self.probationary.move_to_front(node);
521                    let old_entry = self.probationary.update(node, (key.clone(), value), true);
522                    // Update size tracking: subtract old size, add new size
523                    self.current_size = self.current_size.saturating_sub(old_size);
524                    self.current_size += size;
525                    // Update the size in the map
526                    if let Some(map_entry) = self.map.get_mut(&key) {
527                        map_entry.2 = size;
528                    }
529                    self.metrics.core.record_insertion(size);
530                    return old_entry.0;
531                },
532                Location::Protected => unsafe {
533                    // SAFETY: node comes from our map
534                    self.protected.move_to_front(node);
535                    let old_entry = self.protected.update(node, (key.clone(), value), true);
536                    // Update size tracking: subtract old size, add new size
537                    self.current_size = self.current_size.saturating_sub(old_size);
538                    self.current_size += size;
539                    // Update the size in the map
540                    if let Some(map_entry) = self.map.get_mut(&key) {
541                        map_entry.2 = size;
542                    }
543                    self.metrics.core.record_insertion(size);
544                    return old_entry.0;
545                },
546            }
547        }
548
549        let mut evicted = None;
550
551        // Evict while entry count limit OR size limit would be exceeded
552        // This mirrors LRU's eviction logic to properly respect max_size
553        while self.len() >= self.cap().get()
554            || (self.current_size + size > self.config.max_size && !self.map.is_empty())
555        {
556            // If probationary segment has items, evict from there first
557            if !self.probationary.is_empty() {
558                if let Some(old_entry) = self.probationary.remove_last() {
559                    unsafe {
560                        let entry_ptr = Box::into_raw(old_entry);
561                        let (old_key, old_value) = (*entry_ptr).get_value().clone();
562                        // Get the stored size from the map before removing
563                        let evicted_size = self
564                            .map
565                            .get(&old_key)
566                            .map(|(_, _, sz)| *sz)
567                            .unwrap_or_else(|| self.estimate_object_size(&old_key, &old_value));
568                        self.current_size = self.current_size.saturating_sub(evicted_size);
569                        self.metrics.record_probationary_eviction(evicted_size);
570                        self.map.remove(&old_key);
571                        evicted = Some((old_key, old_value));
572                        let _ = Box::from_raw(entry_ptr);
573                    }
574                } else {
575                    break; // No more items to evict
576                }
577            } else if !self.protected.is_empty() {
578                // If probationary is empty, evict from protected
579                if let Some(old_entry) = self.protected.remove_last() {
580                    unsafe {
581                        let entry_ptr = Box::into_raw(old_entry);
582                        let (old_key, old_value) = (*entry_ptr).get_value().clone();
583                        // Get the stored size from the map before removing
584                        let evicted_size = self
585                            .map
586                            .get(&old_key)
587                            .map(|(_, _, sz)| *sz)
588                            .unwrap_or_else(|| self.estimate_object_size(&old_key, &old_value));
589                        self.current_size = self.current_size.saturating_sub(evicted_size);
590                        self.metrics.record_protected_eviction(evicted_size);
591                        self.map.remove(&old_key);
592                        evicted = Some((old_key, old_value));
593                        let _ = Box::from_raw(entry_ptr);
594                    }
595                } else {
596                    break; // No more items to evict
597                }
598            } else {
599                break; // Both segments empty, nothing to evict
600            }
601        }
602
603        // Add the new key-value pair to the probationary segment
604        let node = self.probationary.add_unchecked((key.clone(), value));
605        self.map.insert(key, (node, Location::Probationary, size));
606        self.current_size += size;
607
608        // Record insertion and update segment sizes
609        self.metrics.core.record_insertion(size);
610        self.metrics
611            .update_segment_sizes(self.probationary.len() as u64, self.protected.len() as u64);
612
613        evicted
614    }
615
616    /// Removes a key from the segment, returning the value if the key was present.
617    pub(crate) fn remove<Q>(&mut self, key: &Q) -> Option<V>
618    where
619        K: Borrow<Q>,
620        Q: ?Sized + Hash + Eq,
621        V: Clone,
622    {
623        let (node, location, removed_size) = self.map.remove(key)?;
624
625        match location {
626            Location::Probationary => unsafe {
627                // SAFETY: node comes from our map and was just removed
628                let boxed_entry = self.probationary.remove(node)?;
629                let entry_ptr = Box::into_raw(boxed_entry);
630                let (_, v_ref) = (*entry_ptr).get_value();
631                let value = v_ref.clone();
632                self.current_size = self.current_size.saturating_sub(removed_size);
633                let _ = Box::from_raw(entry_ptr);
634                Some(value)
635            },
636            Location::Protected => unsafe {
637                // SAFETY: node comes from our map and was just removed
638                let boxed_entry = self.protected.remove(node)?;
639                let entry_ptr = Box::into_raw(boxed_entry);
640                let (_, v_ref) = (*entry_ptr).get_value();
641                let value = v_ref.clone();
642                self.current_size = self.current_size.saturating_sub(removed_size);
643                let _ = Box::from_raw(entry_ptr);
644                Some(value)
645            },
646        }
647    }
648
649    /// Clears the segment, removing all key-value pairs.
650    pub(crate) fn clear(&mut self) {
651        self.map.clear();
652        self.probationary.clear();
653        self.protected.clear();
654        self.current_size = 0;
655    }
656}
657
658// Implement Debug for SlruSegment manually since it contains raw pointers
659impl<K, V, S> core::fmt::Debug for SlruSegment<K, V, S> {
660    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
661        f.debug_struct("SlruSegment")
662            .field("capacity", &self.config.capacity)
663            .field("protected_capacity", &self.config.protected_capacity)
664            .field("len", &self.map.len())
665            .finish()
666    }
667}
668
669/// An implementation of a Segmented Least Recently Used (SLRU) cache.
670///
671/// The cache is divided into two segments:
672/// - Probationary segment: Where new entries are initially placed
673/// - Protected segment: Where frequently accessed entries are promoted to
674///
675/// When the cache reaches capacity, the least recently used entry from the
676/// probationary segment is evicted. If the probationary segment is empty,
677/// entries from the protected segment may be demoted back to probationary.
678///
679/// # Examples
680///
681/// ```
682/// use cache_rs::slru::SlruCache;
683/// use cache_rs::config::SlruCacheConfig;
684/// use core::num::NonZeroUsize;
685///
686/// // Create an SLRU cache with a total capacity of 4,
687/// // with a protected capacity of 2 (half protected, half probationary)
688/// let config = SlruCacheConfig {
689///     capacity: NonZeroUsize::new(4).unwrap(),
690///     protected_capacity: NonZeroUsize::new(2).unwrap(),
691///     max_size: u64::MAX,
692/// };
693/// let mut cache = SlruCache::init(config, None);
694///
695/// // Add some items
696/// cache.put("a", 1);
697/// cache.put("b", 2);
698/// cache.put("c", 3);
699/// cache.put("d", 4);
700///
701/// // Access "a" to promote it to the protected segment
702/// assert_eq!(cache.get(&"a"), Some(&1));
703///
704/// // Add a new item, which will evict the least recently used item
705/// // from the probationary segment (likely "b")
706/// cache.put("e", 5);
707/// assert_eq!(cache.get(&"b"), None);
708/// ```
709#[derive(Debug)]
710pub struct SlruCache<K, V, S = DefaultHashBuilder> {
711    segment: SlruSegment<K, V, S>,
712}
713
714impl<K: Hash + Eq, V: Clone, S: BuildHasher> SlruCache<K, V, S> {
715    /// Returns the maximum number of key-value pairs the cache can hold.
716    #[inline]
717    pub fn cap(&self) -> NonZeroUsize {
718        self.segment.cap()
719    }
720
721    /// Returns the maximum size of the protected segment.
722    #[inline]
723    pub fn protected_max_size(&self) -> NonZeroUsize {
724        self.segment.protected_max_size()
725    }
726
727    /// Returns the current number of key-value pairs in the cache.
728    #[inline]
729    pub fn len(&self) -> usize {
730        self.segment.len()
731    }
732
733    /// Returns `true` if the cache contains no key-value pairs.
734    #[inline]
735    pub fn is_empty(&self) -> bool {
736        self.segment.is_empty()
737    }
738
739    /// Returns the current total size of cached content.
740    #[inline]
741    pub fn current_size(&self) -> u64 {
742        self.segment.current_size()
743    }
744
745    /// Returns the maximum content size the cache can hold.
746    #[inline]
747    pub fn max_size(&self) -> u64 {
748        self.segment.max_size()
749    }
750
751    /// Returns a reference to the value corresponding to the key.
752    ///
753    /// The key may be any borrowed form of the cache's key type, but
754    /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
755    /// the key type.
756    ///
757    /// If a value is returned from the probationary segment, it is promoted
758    /// to the protected segment.
759    #[inline]
760    pub fn get<Q>(&mut self, key: &Q) -> Option<&V>
761    where
762        K: Borrow<Q>,
763        Q: ?Sized + Hash + Eq,
764    {
765        self.segment.get(key)
766    }
767
768    /// Returns a mutable reference to the value corresponding to the key.
769    ///
770    /// The key may be any borrowed form of the cache's key type, but
771    /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
772    /// the key type.
773    ///
774    /// If a value is returned from the probationary segment, it is promoted
775    /// to the protected segment.
776    #[inline]
777    pub fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
778    where
779        K: Borrow<Q>,
780        Q: ?Sized + Hash + Eq,
781    {
782        self.segment.get_mut(key)
783    }
784
785    /// Records a cache miss for metrics tracking (to be called by simulation system)
786    #[inline]
787    pub fn record_miss(&mut self, object_size: u64) {
788        self.segment.record_miss(object_size);
789    }
790}
791
792impl<K: Hash + Eq + Clone, V, S: BuildHasher> SlruCache<K, V, S> {
793    /// Inserts a key-value pair into the cache.
794    ///
795    /// If the cache already contained this key, the old value is replaced and returned.
796    /// Otherwise, if the cache is at capacity, the least recently used item from the
797    /// probationary segment will be evicted. If the probationary segment is empty,
798    /// the least recently used item from the protected segment will be demoted to
799    /// the probationary segment.
800    ///
801    /// The inserted key-value pair is always placed in the probationary segment.
802    #[inline]
803    pub fn put(&mut self, key: K, value: V) -> Option<(K, V)>
804    where
805        V: Clone,
806    {
807        self.segment.put(key, value)
808    }
809
810    /// Insert a key-value pair with explicit size tracking.
811    ///
812    /// The `size` parameter specifies how much of `max_size` this entry consumes.
813    /// Use `size=1` for count-based caches.
814    #[inline]
815    pub fn put_with_size(&mut self, key: K, value: V, size: u64) -> Option<(K, V)>
816    where
817        V: Clone,
818    {
819        self.segment.put_with_size(key, value, size)
820    }
821
822    /// Removes a key from the cache, returning the value at the key if the key was previously in the cache.
823    ///
824    /// The key may be any borrowed form of the cache's key type, but
825    /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
826    /// the key type.
827    #[inline]
828    pub fn remove<Q>(&mut self, key: &Q) -> Option<V>
829    where
830        K: Borrow<Q>,
831        Q: ?Sized + Hash + Eq,
832        V: Clone,
833    {
834        self.segment.remove(key)
835    }
836
837    /// Clears the cache, removing all key-value pairs.
838    #[inline]
839    pub fn clear(&mut self) {
840        self.segment.clear()
841    }
842}
843
844impl<K: Hash + Eq, V> SlruCache<K, V>
845where
846    V: Clone,
847{
848    /// Creates a new SLRU cache from a configuration.
849    ///
850    /// This is the **only** way to create an SLRU cache. All configuration
851    /// is specified through the [`SlruCacheConfig`] struct.
852    ///
853    /// # Arguments
854    ///
855    /// * `config` - Configuration specifying capacity, protected capacity, and optional size limit
856    /// * `hasher` - Optional custom hash builder. If `None`, uses the default.
857    ///
858    /// # Example
859    ///
860    /// ```
861    /// use cache_rs::SlruCache;
862    /// use cache_rs::config::SlruCacheConfig;
863    /// use core::num::NonZeroUsize;
864    ///
865    /// // Simple capacity-only cache with 20% protected segment
866    /// let config = SlruCacheConfig {
867    ///     capacity: NonZeroUsize::new(100).unwrap(),
868    ///     protected_capacity: NonZeroUsize::new(20).unwrap(),
869    ///     max_size: u64::MAX,
870    /// };
871    /// let mut cache: SlruCache<&str, i32> = SlruCache::init(config, None);
872    /// cache.put("key", 42);
873    ///
874    /// // Cache with size limit
875    /// let config = SlruCacheConfig {
876    ///     capacity: NonZeroUsize::new(1000).unwrap(),
877    ///     protected_capacity: NonZeroUsize::new(200).unwrap(),
878    ///     max_size: 10 * 1024 * 1024,  // 10MB
879    /// };
880    /// let cache: SlruCache<String, Vec<u8>> = SlruCache::init(config, None);
881    /// ```
882    pub fn init(
883        config: SlruCacheConfig,
884        hasher: Option<DefaultHashBuilder>,
885    ) -> SlruCache<K, V, DefaultHashBuilder> {
886        SlruCache {
887            segment: SlruSegment::init(config, hasher.unwrap_or_default()),
888        }
889    }
890}
891
892impl<K: Hash + Eq, V: Clone, S: BuildHasher> CacheMetrics for SlruCache<K, V, S> {
893    fn metrics(&self) -> BTreeMap<String, f64> {
894        self.segment.metrics().metrics()
895    }
896
897    fn algorithm_name(&self) -> &'static str {
898        self.segment.metrics().algorithm_name()
899    }
900}
901
902#[cfg(test)]
903mod tests {
904    extern crate std;
905    use std::string::ToString;
906
907    use super::*;
908    use crate::config::SlruCacheConfig;
909    use alloc::format;
910    use alloc::string::String;
911
912    /// Helper to create an SlruCache with the given capacity and protected capacity
913    fn make_cache<K: Hash + Eq + Clone, V: Clone>(
914        cap: usize,
915        protected_cap: usize,
916    ) -> SlruCache<K, V> {
917        let config = SlruCacheConfig {
918            capacity: NonZeroUsize::new(cap).unwrap(),
919            protected_capacity: NonZeroUsize::new(protected_cap).unwrap(),
920            max_size: u64::MAX,
921        };
922        SlruCache::init(config, None)
923    }
924
925    #[test]
926    fn test_slru_basic() {
927        // Create a cache with capacity 4, with protected capacity 2
928        // (2 probationary, 2 protected)
929        let mut cache = make_cache(4, 2);
930
931        // Add items to fill probationary segment
932        assert_eq!(cache.put("a", 1), None);
933        assert_eq!(cache.put("b", 2), None);
934        assert_eq!(cache.put("c", 3), None);
935        assert_eq!(cache.put("d", 4), None);
936
937        // Cache should be at capacity
938        assert_eq!(cache.len(), 4);
939
940        // Access "a" and "b" to promote them to protected segment
941        assert_eq!(cache.get(&"a"), Some(&1));
942        assert_eq!(cache.get(&"b"), Some(&2));
943
944        // Add a new item "e", should evict "c" from probationary
945        let evicted = cache.put("e", 5);
946        assert!(evicted.is_some());
947        let (evicted_key, evicted_val) = evicted.unwrap();
948        assert_eq!(evicted_key, "c");
949        assert_eq!(evicted_val, 3);
950
951        // Add another item "f", should evict "d" from probationary
952        let evicted = cache.put("f", 6);
953        assert!(evicted.is_some());
954        let (evicted_key, evicted_val) = evicted.unwrap();
955        assert_eq!(evicted_key, "d");
956        assert_eq!(evicted_val, 4);
957
958        // Check presence
959        assert_eq!(cache.get(&"a"), Some(&1)); // Protected
960        assert_eq!(cache.get(&"b"), Some(&2)); // Protected
961        assert_eq!(cache.get(&"c"), None); // Evicted
962        assert_eq!(cache.get(&"d"), None); // Evicted
963        assert_eq!(cache.get(&"e"), Some(&5)); // Probationary
964        assert_eq!(cache.get(&"f"), Some(&6)); // Probationary
965    }
966
967    #[test]
968    fn test_slru_update() {
969        // Create a cache with capacity 4, with protected capacity 2
970        let mut cache = make_cache(4, 2);
971
972        // Add items
973        cache.put("a", 1);
974        cache.put("b", 2);
975
976        // Access "a" to promote it to protected
977        assert_eq!(cache.get(&"a"), Some(&1));
978
979        // Update values
980        assert_eq!(cache.put("a", 10).unwrap().1, 1);
981        assert_eq!(cache.put("b", 20).unwrap().1, 2);
982
983        // Check updated values
984        assert_eq!(cache.get(&"a"), Some(&10));
985        assert_eq!(cache.get(&"b"), Some(&20));
986    }
987
988    #[test]
989    fn test_slru_remove() {
990        // Create a cache with capacity 4, with protected capacity 2
991        let mut cache = make_cache(4, 2);
992
993        // Add items
994        cache.put("a", 1);
995        cache.put("b", 2);
996
997        // Access "a" to promote it to protected
998        assert_eq!(cache.get(&"a"), Some(&1));
999
1000        // Remove items
1001        assert_eq!(cache.remove(&"a"), Some(1)); // From protected
1002        assert_eq!(cache.remove(&"b"), Some(2)); // From probationary
1003
1004        // Check that items are gone
1005        assert_eq!(cache.get(&"a"), None);
1006        assert_eq!(cache.get(&"b"), None);
1007
1008        // Check that removing non-existent item returns None
1009        assert_eq!(cache.remove(&"c"), None);
1010    }
1011
1012    #[test]
1013    fn test_slru_clear() {
1014        // Create a cache with capacity 4, with protected capacity 2
1015        let mut cache = make_cache(4, 2);
1016
1017        // Add items
1018        cache.put("a", 1);
1019        cache.put("b", 2);
1020        cache.put("c", 3);
1021        cache.put("d", 4);
1022
1023        // Clear the cache
1024        cache.clear();
1025
1026        // Check that cache is empty
1027        assert_eq!(cache.len(), 0);
1028        assert!(cache.is_empty());
1029
1030        // Check that items are gone
1031        assert_eq!(cache.get(&"a"), None);
1032        assert_eq!(cache.get(&"b"), None);
1033        assert_eq!(cache.get(&"c"), None);
1034        assert_eq!(cache.get(&"d"), None);
1035    }
1036
1037    #[test]
1038    fn test_slru_complex_values() {
1039        // Create a cache with capacity 4, with protected capacity 2
1040        let mut cache = make_cache(4, 2);
1041
1042        #[derive(Debug, Clone, PartialEq)]
1043        struct ComplexValue {
1044            id: usize,
1045            data: String,
1046        }
1047
1048        // Add complex values
1049        cache.put(
1050            "a",
1051            ComplexValue {
1052                id: 1,
1053                data: "a-data".to_string(),
1054            },
1055        );
1056        cache.put(
1057            "b",
1058            ComplexValue {
1059                id: 2,
1060                data: "b-data".to_string(),
1061            },
1062        );
1063
1064        // Modify a value using get_mut
1065        if let Some(value) = cache.get_mut(&"a") {
1066            value.id = 100;
1067            value.data = "a-modified".to_string();
1068        }
1069
1070        // Check the modified value
1071        let a = cache.get(&"a").unwrap();
1072        assert_eq!(a.id, 100);
1073        assert_eq!(a.data, "a-modified");
1074    }
1075
1076    #[test]
1077    fn test_slru_with_ratio() {
1078        // Test the with_ratio constructor
1079        let mut cache = make_cache(4, 2);
1080
1081        assert_eq!(cache.cap().get(), 4);
1082        assert_eq!(cache.protected_max_size().get(), 2);
1083
1084        // Test basic functionality
1085        assert_eq!(cache.put("a", 1), None);
1086        assert_eq!(cache.put("b", 2), None);
1087
1088        // Access "a" to promote it to protected
1089        assert_eq!(cache.get(&"a"), Some(&1));
1090
1091        // Fill the cache
1092        assert_eq!(cache.put("c", 3), None);
1093        assert_eq!(cache.put("d", 4), None);
1094
1095        // Add another item, should evict "b" from probationary
1096        let result = cache.put("e", 5);
1097        assert_eq!(result.unwrap().0, "b");
1098
1099        // Check that protected items remain
1100        assert_eq!(cache.get(&"a"), Some(&1));
1101    }
1102
1103    // Test that SlruSegment works correctly (internal tests)
1104    #[test]
1105    fn test_slru_segment_directly() {
1106        let config = SlruCacheConfig {
1107            capacity: NonZeroUsize::new(4).unwrap(),
1108            protected_capacity: NonZeroUsize::new(2).unwrap(),
1109            max_size: u64::MAX,
1110        };
1111        let mut segment: SlruSegment<&str, i32, DefaultHashBuilder> =
1112            SlruSegment::init(config, DefaultHashBuilder::default());
1113
1114        assert_eq!(segment.len(), 0);
1115        assert!(segment.is_empty());
1116        assert_eq!(segment.cap().get(), 4);
1117        assert_eq!(segment.protected_max_size().get(), 2);
1118
1119        segment.put("a", 1);
1120        segment.put("b", 2);
1121        assert_eq!(segment.len(), 2);
1122
1123        // Access to promote
1124        assert_eq!(segment.get(&"a"), Some(&1));
1125        assert_eq!(segment.get(&"b"), Some(&2));
1126    }
1127
1128    #[test]
1129    fn test_slru_concurrent_access() {
1130        extern crate std;
1131        use std::sync::{Arc, Mutex};
1132        use std::thread;
1133        use std::vec::Vec;
1134
1135        let cache = Arc::new(Mutex::new(make_cache::<String, i32>(100, 50)));
1136        let num_threads = 4;
1137        let ops_per_thread = 100;
1138
1139        let mut handles: Vec<std::thread::JoinHandle<()>> = Vec::new();
1140
1141        for t in 0..num_threads {
1142            let cache = Arc::clone(&cache);
1143            handles.push(thread::spawn(move || {
1144                for i in 0..ops_per_thread {
1145                    let key = std::format!("key_{}_{}", t, i);
1146                    let mut guard = cache.lock().unwrap();
1147                    guard.put(key.clone(), i);
1148                    let _ = guard.get(&key);
1149                }
1150            }));
1151        }
1152
1153        for handle in handles {
1154            handle.join().unwrap();
1155        }
1156
1157        let mut guard = cache.lock().unwrap();
1158        assert!(guard.len() <= 100);
1159        guard.clear(); // Clean up for MIRI
1160    }
1161
1162    #[test]
1163    fn test_slru_size_aware_tracking() {
1164        let mut cache = make_cache(10, 3);
1165
1166        assert_eq!(cache.current_size(), 0);
1167        assert_eq!(cache.max_size(), u64::MAX);
1168
1169        cache.put_with_size("a", 1, 100);
1170        cache.put_with_size("b", 2, 200);
1171        cache.put_with_size("c", 3, 150);
1172
1173        assert_eq!(cache.current_size(), 450);
1174        assert_eq!(cache.len(), 3);
1175
1176        // Clear should reset size
1177        cache.clear();
1178        assert_eq!(cache.current_size(), 0);
1179    }
1180
1181    #[test]
1182    fn test_slru_init_constructor() {
1183        let config = SlruCacheConfig {
1184            capacity: NonZeroUsize::new(1000).unwrap(),
1185            protected_capacity: NonZeroUsize::new(300).unwrap(),
1186            max_size: 1024 * 1024,
1187        };
1188        let cache: SlruCache<String, i32> = SlruCache::init(config, None);
1189
1190        assert_eq!(cache.current_size(), 0);
1191        assert_eq!(cache.max_size(), 1024 * 1024);
1192    }
1193
1194    #[test]
1195    fn test_slru_with_limits_constructor() {
1196        let config = SlruCacheConfig {
1197            capacity: NonZeroUsize::new(100).unwrap(),
1198            protected_capacity: NonZeroUsize::new(30).unwrap(),
1199            max_size: 1024 * 1024,
1200        };
1201        let cache: SlruCache<String, String> = SlruCache::init(config, None);
1202
1203        assert_eq!(cache.current_size(), 0);
1204        assert_eq!(cache.max_size(), 1024 * 1024);
1205        assert_eq!(cache.cap().get(), 100);
1206        assert_eq!(cache.protected_max_size().get(), 30);
1207    }
1208
1209    #[test]
1210    fn test_slru_record_miss() {
1211        use crate::metrics::CacheMetrics;
1212
1213        let mut cache: SlruCache<String, i32> = make_cache(100, 30);
1214
1215        cache.record_miss(100);
1216        cache.record_miss(200);
1217
1218        let metrics = cache.metrics();
1219        assert_eq!(metrics.get("cache_misses").unwrap(), &2.0);
1220    }
1221
1222    #[test]
1223    fn test_slru_get_mut() {
1224        let mut cache: SlruCache<String, i32> = make_cache(100, 30);
1225
1226        cache.put("key".to_string(), 10);
1227        assert_eq!(cache.get(&"key".to_string()), Some(&10));
1228
1229        // Modify via get_mut
1230        if let Some(val) = cache.get_mut(&"key".to_string()) {
1231            *val = 42;
1232        }
1233        assert_eq!(cache.get(&"key".to_string()), Some(&42));
1234
1235        // get_mut on missing key returns None
1236        assert!(cache.get_mut(&"missing".to_string()).is_none());
1237    }
1238
1239    /// Helper to create an SlruCache with max_size limit
1240    fn make_cache_with_max_size(
1241        cap: usize,
1242        protected: usize,
1243        max_size: u64,
1244    ) -> SlruCache<String, i32> {
1245        let config = SlruCacheConfig {
1246            capacity: NonZeroUsize::new(cap).unwrap(),
1247            protected_capacity: NonZeroUsize::new(protected).unwrap(),
1248            max_size,
1249        };
1250        SlruCache::init(config, None)
1251    }
1252
1253    /// Test that SLRU evicts items when max_size would be exceeded.
1254    /// This test verifies the cache respects the max_size limit, not just entry count.
1255    #[test]
1256    fn test_slru_max_size_triggers_eviction() {
1257        // Create cache with large entry capacity (100) but small max_size (100 bytes)
1258        // This means size limit should be the constraint, not entry count
1259        let mut cache = make_cache_with_max_size(100, 30, 100);
1260
1261        // Insert items that fit within max_size
1262        cache.put_with_size("a".to_string(), 1, 30); // total: 30
1263        cache.put_with_size("b".to_string(), 2, 30); // total: 60
1264        cache.put_with_size("c".to_string(), 3, 30); // total: 90
1265
1266        assert_eq!(cache.len(), 3, "Should have 3 items");
1267        assert_eq!(cache.current_size(), 90, "Size should be 90");
1268
1269        // Insert item that would exceed max_size (90 + 20 = 110 > 100)
1270        // This SHOULD trigger eviction to stay within max_size
1271        cache.put_with_size("d".to_string(), 4, 20);
1272
1273        // Cache should evict to stay within max_size
1274        // The LRU item ("a") should be evicted, leaving b, c, d
1275        // Total size should be: 30 + 30 + 20 = 80 (after evicting "a")
1276        assert!(
1277            cache.current_size() <= 100,
1278            "current_size {} exceeds max_size 100",
1279            cache.current_size()
1280        );
1281
1282        // Verify that "a" was evicted
1283        assert!(
1284            cache.get(&"a".to_string()).is_none() || cache.current_size() <= 100,
1285            "Either 'a' should be evicted OR size should be within limits"
1286        );
1287    }
1288
1289    /// Test that max_size eviction works even with larger objects
1290    #[test]
1291    fn test_slru_max_size_eviction_large_objects() {
1292        // Create cache with max_size = 500 bytes, large entry capacity
1293        let mut cache = make_cache_with_max_size(1000, 200, 500);
1294
1295        // Insert objects that each take 100 bytes
1296        for i in 0..5 {
1297            cache.put_with_size(format!("key{}", i), i, 100);
1298        }
1299
1300        assert_eq!(cache.len(), 5);
1301        assert_eq!(cache.current_size(), 500, "Should have exactly 500 bytes");
1302
1303        // Insert one more - should trigger eviction to stay within 500
1304        cache.put_with_size("overflow".to_string(), 99, 100);
1305
1306        // Expected: oldest item evicted, size still <= 500
1307        assert!(
1308            cache.current_size() <= 500,
1309            "SLRU BUG: current_size {} exceeds max_size 500 after insert. \
1310             SLRU must evict items to respect max_size limit.",
1311            cache.current_size()
1312        );
1313    }
1314}