cache_rs/
slru.rs

1//! Segmented Least Recently Used Cache Implementation.
2//!
3//! The SLRU (Segmented LRU) cache divides the cache into two segments:
4//! - Probationary segment: Where new entries are initially placed
5//! - Protected segment: Where frequently accessed entries are promoted to
6//!
7//! This implementation provides better performance for scan-resistant workloads
8//! compared to standard LRU, as it protects frequently accessed items from
9//! being evicted by one-time scans through the data.
10
11extern crate alloc;
12
13use crate::config::SlruCacheConfig;
14use crate::list::{Entry, List};
15use crate::metrics::{CacheMetrics, SlruCacheMetrics};
16use alloc::boxed::Box;
17use alloc::collections::BTreeMap;
18use alloc::string::String;
19use core::borrow::Borrow;
20use core::hash::{BuildHasher, Hash};
21use core::mem;
22use core::num::NonZeroUsize;
23
24#[cfg(feature = "hashbrown")]
25use hashbrown::hash_map::DefaultHashBuilder;
26#[cfg(feature = "hashbrown")]
27use hashbrown::HashMap;
28
29#[cfg(not(feature = "hashbrown"))]
30use std::collections::hash_map::RandomState as DefaultHashBuilder;
31#[cfg(not(feature = "hashbrown"))]
32use std::collections::HashMap;
33
34/// Entry location within the SLRU cache
35#[derive(Debug, Clone, Copy, PartialEq, Eq)]
36enum Location {
37    /// Entry is in the probationary segment
38    Probationary,
39    /// Entry is in the protected segment
40    Protected,
41}
42
43/// An implementation of a Segmented Least Recently Used (SLRU) cache.
44///
45/// The cache is divided into two segments:
46/// - Probationary segment: Where new entries are initially placed
47/// - Protected segment: Where frequently accessed entries are promoted to
48///
49/// When the cache reaches capacity, the least recently used entry from the
50/// probationary segment is evicted. If the probationary segment is empty,
51/// entries from the protected segment may be demoted back to probationary.
52///
53/// # Examples
54///
55/// ```
56/// use cache_rs::slru::SlruCache;
57/// use core::num::NonZeroUsize;
58///
59/// // Create an SLRU cache with a total capacity of 4,
60/// // with a protected capacity of 2 (half protected, half probationary)
61/// let mut cache = SlruCache::new(
62///     NonZeroUsize::new(4).unwrap(),
63///     NonZeroUsize::new(2).unwrap()
64/// );
65///
66/// // Add some items
67/// cache.put("a", 1);
68/// cache.put("b", 2);
69/// cache.put("c", 3);
70/// cache.put("d", 4);
71///
72/// // Access "a" to promote it to the protected segment
73/// assert_eq!(cache.get(&"a"), Some(&1));
74///
75/// // Add a new item, which will evict the least recently used item
76/// // from the probationary segment (likely "b")
77/// cache.put("e", 5);
78/// assert_eq!(cache.get(&"b"), None);
79/// ```
80#[derive(Debug)]
81pub struct SlruCache<K, V, S = DefaultHashBuilder> {
82    /// Configuration for the SLRU cache
83    config: SlruCacheConfig,
84
85    /// The probationary list holding newer or less frequently accessed items
86    probationary: List<(K, V)>,
87
88    /// The protected list holding frequently accessed items
89    protected: List<(K, V)>,
90
91    /// A hash map mapping keys to entries in either the probationary or protected list
92    #[allow(clippy::type_complexity)]
93    map: HashMap<K, (*mut Entry<(K, V)>, Location), S>,
94
95    /// Metrics for tracking cache performance and segment behavior
96    metrics: SlruCacheMetrics,
97}
98
99impl<K: Hash + Eq, V: Clone, S: BuildHasher> SlruCache<K, V, S> {
100    /// Creates a new SLRU cache with the specified capacity and hash builder.
101    ///
102    /// # Parameters
103    ///
104    /// - `total_cap`: The total capacity of the cache. Must be a non-zero value.
105    /// - `protected_cap`: The maximum capacity of the protected segment.
106    ///   Must be a non-zero value and less than or equal to `total_cap`.
107    /// - `hash_builder`: The hash builder to use for the underlying hash map.
108    ///
109    /// # Panics
110    ///
111    /// Panics if `protected_cap` is greater than `total_cap`.
112    pub fn with_hasher(
113        total_cap: NonZeroUsize,
114        protected_cap: NonZeroUsize,
115        hash_builder: S,
116    ) -> Self {
117        let config = SlruCacheConfig::new(total_cap, protected_cap);
118
119        let probationary_max_size =
120            NonZeroUsize::new(config.capacity().get() - config.protected_capacity().get()).unwrap();
121
122        let max_cache_size_bytes = config.capacity().get() as u64 * 128; // Estimate based on capacity
123
124        SlruCache {
125            config,
126            probationary: List::new(probationary_max_size),
127            protected: List::new(config.protected_capacity()),
128            map: HashMap::with_capacity_and_hasher(
129                config.capacity().get().next_power_of_two(),
130                hash_builder,
131            ),
132            metrics: SlruCacheMetrics::new(
133                max_cache_size_bytes,
134                config.protected_capacity().get() as u64,
135            ),
136        }
137    }
138
139    /// Returns the maximum number of key-value pairs the cache can hold.
140    pub fn cap(&self) -> NonZeroUsize {
141        self.config.capacity()
142    }
143
144    /// Returns the maximum size of the protected segment.
145    pub fn protected_max_size(&self) -> NonZeroUsize {
146        self.config.protected_capacity()
147    }
148
149    /// Returns the current number of key-value pairs in the cache.
150    pub fn len(&self) -> usize {
151        self.map.len()
152    }
153
154    /// Returns `true` if the cache contains no key-value pairs.
155    pub fn is_empty(&self) -> bool {
156        self.map.is_empty()
157    }
158
159    /// Estimates the size of a key-value pair in bytes for metrics tracking
160    fn estimate_object_size(&self, _key: &K, _value: &V) -> u64 {
161        // Simple estimation: key size + value size + overhead for pointers and metadata
162        mem::size_of::<K>() as u64 + mem::size_of::<V>() as u64 + 64
163    }
164
165    /// Moves an entry from the probationary segment to the protected segment.
166    /// If the protected segment is full, the LRU item from protected is demoted to probationary.
167    ///
168    /// Returns a raw pointer to the entry in its new location.
169    unsafe fn promote_to_protected(&mut self, node: *mut Entry<(K, V)>) -> *mut Entry<(K, V)> {
170        // Remove from probationary list - this removes the node and returns it as a Box
171        let boxed_entry = self
172            .probationary
173            .remove(node)
174            .expect("Node should exist in probationary");
175
176        // If protected segment is full, demote LRU protected item to probationary
177        if self.protected.len() >= self.protected_max_size().get() {
178            // We need to make room in probationary first if it's full
179            if self.probationary.len() >= self.probationary.cap().get() {
180                // Evict LRU from probationary
181                if let Some(old_entry) = self.probationary.remove_last() {
182                    let old_ptr = Box::into_raw(old_entry);
183                    let (old_key, _) = (*old_ptr).get_value();
184                    self.map.remove(old_key);
185                    let _ = Box::from_raw(old_ptr);
186                }
187            }
188            self.demote_lru_protected();
189        }
190
191        // Get the raw pointer from the box - this should be the same as the original node pointer
192        let entry_ptr = Box::into_raw(boxed_entry);
193
194        // Get the key from the entry for updating the map
195        let (key_ref, _) = (*entry_ptr).get_value();
196
197        // Update the map with new location and pointer
198        if let Some(map_entry) = self.map.get_mut(key_ref) {
199            map_entry.0 = entry_ptr;
200            map_entry.1 = Location::Protected;
201        }
202
203        // Add to protected list using the pointer from the Box
204        unsafe {
205            self.protected.attach_from_other_list(entry_ptr);
206        }
207
208        entry_ptr
209    }
210
211    /// Demotes the least recently used item from the protected segment to the probationary segment.
212    unsafe fn demote_lru_protected(&mut self) {
213        if let Some(lru_protected) = self.protected.remove_last() {
214            let lru_ptr = Box::into_raw(lru_protected);
215            let (lru_key, _) = (*lru_ptr).get_value();
216
217            // Update the location and pointer in the map
218            if let Some(entry) = self.map.get_mut(lru_key) {
219                entry.0 = lru_ptr;
220                entry.1 = Location::Probationary;
221            }
222
223            // Add to probationary list
224            self.probationary.attach_from_other_list(lru_ptr);
225
226            // Record demotion
227            self.metrics.record_demotion();
228        }
229    }
230
231    /// Returns a reference to the value corresponding to the key.
232    ///
233    /// The key may be any borrowed form of the cache's key type, but
234    /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
235    /// the key type.
236    ///
237    /// If a value is returned from the probationary segment, it is promoted
238    /// to the protected segment.
239    pub fn get<Q>(&mut self, key: &Q) -> Option<&V>
240    where
241        K: Borrow<Q>,
242        Q: ?Sized + Hash + Eq,
243    {
244        let (node, location) = self.map.get(key).copied()?;
245
246        match location {
247            Location::Probationary => unsafe {
248                // SAFETY: node comes from our map, so it's a valid pointer to an entry in our probationary list
249                let (key_ref, value) = (*node).get_value();
250                let object_size = self.estimate_object_size(key_ref, value);
251                self.metrics.record_probationary_hit(object_size);
252
253                // Promote from probationary to protected
254                let entry_ptr = self.promote_to_protected(node);
255
256                // Record promotion
257                self.metrics.record_promotion();
258
259                // Update segment sizes
260                self.metrics.update_segment_sizes(
261                    self.probationary.len() as u64,
262                    self.protected.len() as u64,
263                );
264
265                // SAFETY: entry_ptr is the return value from promote_to_protected, which ensures
266                // it points to a valid entry in the protected list
267                let (_, v) = (*entry_ptr).get_value();
268                Some(v)
269            },
270            Location::Protected => unsafe {
271                // SAFETY: node comes from our map, so it's a valid pointer to an entry in our protected list
272                let (key_ref, value) = (*node).get_value();
273                let object_size = self.estimate_object_size(key_ref, value);
274                self.metrics.record_protected_hit(object_size);
275
276                // Already protected, just move to MRU position
277                self.protected.move_to_front(node);
278                let (_, v) = (*node).get_value();
279                Some(v)
280            },
281        }
282    }
283
284    /// Returns a mutable reference to the value corresponding to the key.
285    ///
286    /// The key may be any borrowed form of the cache's key type, but
287    /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
288    /// the key type.
289    ///
290    /// If a value is returned from the probationary segment, it is promoted
291    /// to the protected segment.
292    pub fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
293    where
294        K: Borrow<Q>,
295        Q: ?Sized + Hash + Eq,
296    {
297        let (node, location) = self.map.get(key).copied()?;
298
299        match location {
300            Location::Probationary => unsafe {
301                // SAFETY: node comes from our map, so it's a valid pointer to an entry in our probationary list
302                let (key_ref, value) = (*node).get_value();
303                let object_size = self.estimate_object_size(key_ref, value);
304                self.metrics.record_probationary_hit(object_size);
305
306                // Promote from probationary to protected
307                let entry_ptr = self.promote_to_protected(node);
308
309                // Record promotion
310                self.metrics.record_promotion();
311
312                // Update segment sizes
313                self.metrics.update_segment_sizes(
314                    self.probationary.len() as u64,
315                    self.protected.len() as u64,
316                );
317
318                // SAFETY: entry_ptr is the return value from promote_to_protected, which ensures
319                // it points to a valid entry in the protected list
320                let (_, v) = (*entry_ptr).get_value_mut();
321                Some(v)
322            },
323            Location::Protected => unsafe {
324                // SAFETY: node comes from our map, so it's a valid pointer to an entry in our protected list
325                let (key_ref, value) = (*node).get_value();
326                let object_size = self.estimate_object_size(key_ref, value);
327                self.metrics.record_protected_hit(object_size);
328
329                // Already protected, just move to MRU position
330                self.protected.move_to_front(node);
331                // SAFETY: node is still valid after move_to_front operation
332                let (_, v) = (*node).get_value_mut();
333                Some(v)
334            },
335        }
336    }
337
338    /// Inserts a key-value pair into the cache.
339    ///
340    /// If the cache already contained this key, the old value is replaced and returned.
341    /// Otherwise, if the cache is at capacity, the least recently used item from the
342    /// probationary segment will be evicted. If the probationary segment is empty,
343    /// the least recently used item from the protected segment will be demoted to
344    /// the probationary segment.
345    ///
346    /// The inserted key-value pair is always placed in the probationary segment.
347    pub fn put(&mut self, key: K, value: V) -> Option<(K, V)>
348    where
349        K: Clone,
350    {
351        let object_size = self.estimate_object_size(&key, &value);
352
353        // If key is already in the cache, update it in place
354        if let Some(&(node, location)) = self.map.get(&key) {
355            match location {
356                Location::Probationary => unsafe {
357                    // SAFETY: node comes from our map, so it's a valid pointer to an entry in our probationary list
358                    self.probationary.move_to_front(node);
359                    let old_entry = self.probationary.update(node, (key.clone(), value), true);
360
361                    // Record insertion (update)
362                    self.metrics.core.record_insertion(object_size);
363
364                    return old_entry.0;
365                },
366                Location::Protected => unsafe {
367                    // SAFETY: node comes from our map, so it's a valid pointer to an entry in our protected list
368                    self.protected.move_to_front(node);
369                    let old_entry = self.protected.update(node, (key.clone(), value), true);
370
371                    // Record insertion (update)
372                    self.metrics.core.record_insertion(object_size);
373
374                    return old_entry.0;
375                },
376            }
377        }
378
379        let mut evicted = None;
380
381        // Check if total cache is at capacity
382        if self.len() >= self.cap().get() {
383            // If probationary segment has items, evict from there first
384            if !self.probationary.is_empty() {
385                if let Some(old_entry) = self.probationary.remove_last() {
386                    unsafe {
387                        // SAFETY: old_entry comes from probationary.remove_last(), so it's a valid Box
388                        // that we own. Converting to raw pointer is safe for temporary access.
389                        let entry_ptr = Box::into_raw(old_entry);
390                        let (old_key, old_value) = (*entry_ptr).get_value().clone();
391
392                        // Record probationary eviction
393                        let evicted_size = self.estimate_object_size(&old_key, &old_value);
394                        self.metrics.record_probationary_eviction(evicted_size);
395
396                        // Remove from map
397                        self.map.remove(&old_key);
398                        evicted = Some((old_key, old_value));
399
400                        // SAFETY: Reconstructing Box from the same raw pointer to properly free memory
401                        let _ = Box::from_raw(entry_ptr);
402                    }
403                }
404            } else if !self.protected.is_empty() {
405                // If probationary is empty, evict from protected
406                if let Some(old_entry) = self.protected.remove_last() {
407                    unsafe {
408                        // SAFETY: old_entry comes from protected.remove_last(), so it's a valid Box
409                        // that we own. Converting to raw pointer is safe for temporary access.
410                        let entry_ptr = Box::into_raw(old_entry);
411                        let (old_key, old_value) = (*entry_ptr).get_value().clone();
412
413                        // Record protected eviction
414                        let evicted_size = self.estimate_object_size(&old_key, &old_value);
415                        self.metrics.record_protected_eviction(evicted_size);
416
417                        // Remove from map
418                        self.map.remove(&old_key);
419                        evicted = Some((old_key, old_value));
420
421                        // SAFETY: Reconstructing Box from the same raw pointer to properly free memory
422                        let _ = Box::from_raw(entry_ptr);
423                    }
424                }
425            }
426        }
427
428        // Add the new key-value pair to the probationary segment
429        if self.len() < self.cap().get() {
430            // Total cache has space, allow probationary to exceed its capacity
431            let node = self.probationary.add_unchecked((key.clone(), value));
432            self.map.insert(key, (node, Location::Probationary));
433        } else {
434            // Total cache is full, try to add normally (may fail due to probationary capacity)
435            if let Some(node) = self.probationary.add((key.clone(), value.clone())) {
436                self.map.insert(key, (node, Location::Probationary));
437            } else {
438                // Probationary is at capacity, need to make space
439                if let Some(old_entry) = self.probationary.remove_last() {
440                    unsafe {
441                        // SAFETY: old_entry comes from probationary.remove_last(), so it's a valid Box
442                        // that we own. Converting to raw pointer is safe for temporary access.
443                        let entry_ptr = Box::into_raw(old_entry);
444                        let (old_key, old_value) = (*entry_ptr).get_value().clone();
445
446                        // Record probationary eviction
447                        let evicted_size = self.estimate_object_size(&old_key, &old_value);
448                        self.metrics.record_probationary_eviction(evicted_size);
449
450                        // Remove from map
451                        self.map.remove(&old_key);
452                        evicted = Some((old_key, old_value));
453
454                        // SAFETY: Reconstructing Box from the same raw pointer to properly free memory
455                        let _ = Box::from_raw(entry_ptr);
456                    }
457                }
458
459                // Try again after making space
460                if let Some(node) = self.probationary.add((key.clone(), value)) {
461                    self.map.insert(key, (node, Location::Probationary));
462                }
463            }
464        }
465
466        // Record insertion and update segment sizes
467        self.metrics.core.record_insertion(object_size);
468        self.metrics
469            .update_segment_sizes(self.probationary.len() as u64, self.protected.len() as u64);
470
471        evicted
472    }
473
474    /// Removes a key from the cache, returning the value at the key if the key was previously in the cache.
475    ///
476    /// The key may be any borrowed form of the cache's key type, but
477    /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
478    /// the key type.
479    pub fn remove<Q>(&mut self, key: &Q) -> Option<V>
480    where
481        K: Borrow<Q>,
482        Q: ?Sized + Hash + Eq,
483        V: Clone,
484    {
485        let (node, location) = self.map.remove(key)?;
486
487        match location {
488            Location::Probationary => unsafe {
489                // SAFETY: node comes from our map and was just removed, so probationary.remove is safe
490                let boxed_entry = self.probationary.remove(node)?;
491                // SAFETY: boxed_entry is a valid Box we own, converting to raw pointer for temporary access
492                let entry_ptr = Box::into_raw(boxed_entry);
493                let value = (*entry_ptr).get_value().1.clone();
494                // SAFETY: Reconstructing Box from the same raw pointer to properly free memory
495                let _ = Box::from_raw(entry_ptr);
496                Some(value)
497            },
498            Location::Protected => unsafe {
499                // SAFETY: node comes from our map and was just removed, so protected.remove is safe
500                let boxed_entry = self.protected.remove(node)?;
501                // SAFETY: boxed_entry is a valid Box we own, converting to raw pointer for temporary access
502                let entry_ptr = Box::into_raw(boxed_entry);
503                let value = (*entry_ptr).get_value().1.clone();
504                // SAFETY: Reconstructing Box from the same raw pointer to properly free memory
505                let _ = Box::from_raw(entry_ptr);
506                Some(value)
507            },
508        }
509    }
510
511    /// Clears the cache, removing all key-value pairs.
512    pub fn clear(&mut self) {
513        self.map.clear();
514        self.probationary.clear();
515        self.protected.clear();
516    }
517
518    /// Records a cache miss for metrics tracking (to be called by simulation system)
519    pub fn record_miss(&mut self, object_size: u64) {
520        self.metrics.core.record_miss(object_size);
521    }
522}
523
524impl<K: Hash + Eq, V> SlruCache<K, V>
525where
526    V: Clone,
527{
528    /// Creates a new SLRU cache with the specified capacity and protected capacity.
529    ///
530    /// The total capacity must be greater than the protected capacity.
531    ///
532    /// # Panics
533    ///
534    /// Panics if `protected_capacity` is greater than `capacity`.
535    pub fn new(
536        capacity: NonZeroUsize,
537        protected_capacity: NonZeroUsize,
538    ) -> SlruCache<K, V, DefaultHashBuilder> {
539        let config = SlruCacheConfig::new(capacity, protected_capacity);
540        SlruCache::with_hasher(
541            config.capacity(),
542            config.protected_capacity(),
543            DefaultHashBuilder::default(),
544        )
545    }
546}
547
548impl<K: Hash + Eq, V, S: BuildHasher> CacheMetrics for SlruCache<K, V, S> {
549    fn metrics(&self) -> BTreeMap<String, f64> {
550        self.metrics.metrics()
551    }
552
553    fn algorithm_name(&self) -> &'static str {
554        self.metrics.algorithm_name()
555    }
556}
557
558#[cfg(test)]
559mod tests {
560    extern crate std;
561    use std::string::ToString;
562
563    use super::*;
564    use alloc::string::String;
565
566    #[test]
567    fn test_slru_basic() {
568        // Create a cache with capacity 4, with protected capacity 2
569        // (2 probationary, 2 protected)
570        let mut cache =
571            SlruCache::new(NonZeroUsize::new(4).unwrap(), NonZeroUsize::new(2).unwrap());
572
573        // Add items to fill probationary segment
574        assert_eq!(cache.put("a", 1), None);
575        assert_eq!(cache.put("b", 2), None);
576        assert_eq!(cache.put("c", 3), None);
577        assert_eq!(cache.put("d", 4), None);
578
579        // Cache should be at capacity
580        assert_eq!(cache.len(), 4);
581
582        // Access "a" and "b" to promote them to protected segment
583        assert_eq!(cache.get(&"a"), Some(&1));
584        assert_eq!(cache.get(&"b"), Some(&2));
585
586        // Add a new item "e", should evict "c" from probationary
587        let evicted = cache.put("e", 5);
588        assert!(evicted.is_some());
589        let (evicted_key, evicted_val) = evicted.unwrap();
590        assert_eq!(evicted_key, "c");
591        assert_eq!(evicted_val, 3);
592
593        // Add another item "f", should evict "d" from probationary
594        let evicted = cache.put("f", 6);
595        assert!(evicted.is_some());
596        let (evicted_key, evicted_val) = evicted.unwrap();
597        assert_eq!(evicted_key, "d");
598        assert_eq!(evicted_val, 4);
599
600        // Check presence
601        assert_eq!(cache.get(&"a"), Some(&1)); // Protected
602        assert_eq!(cache.get(&"b"), Some(&2)); // Protected
603        assert_eq!(cache.get(&"c"), None); // Evicted
604        assert_eq!(cache.get(&"d"), None); // Evicted
605        assert_eq!(cache.get(&"e"), Some(&5)); // Probationary
606        assert_eq!(cache.get(&"f"), Some(&6)); // Probationary
607    }
608
609    #[test]
610    fn test_slru_update() {
611        // Create a cache with capacity 4, with protected capacity 2
612        let mut cache =
613            SlruCache::new(NonZeroUsize::new(4).unwrap(), NonZeroUsize::new(2).unwrap());
614
615        // Add items
616        cache.put("a", 1);
617        cache.put("b", 2);
618
619        // Access "a" to promote it to protected
620        assert_eq!(cache.get(&"a"), Some(&1));
621
622        // Update values
623        assert_eq!(cache.put("a", 10).unwrap().1, 1);
624        assert_eq!(cache.put("b", 20).unwrap().1, 2);
625
626        // Check updated values
627        assert_eq!(cache.get(&"a"), Some(&10));
628        assert_eq!(cache.get(&"b"), Some(&20));
629    }
630
631    #[test]
632    fn test_slru_remove() {
633        // Create a cache with capacity 4, with protected capacity 2
634        let mut cache =
635            SlruCache::new(NonZeroUsize::new(4).unwrap(), NonZeroUsize::new(2).unwrap());
636
637        // Add items
638        cache.put("a", 1);
639        cache.put("b", 2);
640
641        // Access "a" to promote it to protected
642        assert_eq!(cache.get(&"a"), Some(&1));
643
644        // Remove items
645        assert_eq!(cache.remove(&"a"), Some(1)); // From protected
646        assert_eq!(cache.remove(&"b"), Some(2)); // From probationary
647
648        // Check that items are gone
649        assert_eq!(cache.get(&"a"), None);
650        assert_eq!(cache.get(&"b"), None);
651
652        // Check that removing non-existent item returns None
653        assert_eq!(cache.remove(&"c"), None);
654    }
655
656    #[test]
657    fn test_slru_clear() {
658        // Create a cache with capacity 4, with protected capacity 2
659        let mut cache =
660            SlruCache::new(NonZeroUsize::new(4).unwrap(), NonZeroUsize::new(2).unwrap());
661
662        // Add items
663        cache.put("a", 1);
664        cache.put("b", 2);
665        cache.put("c", 3);
666        cache.put("d", 4);
667
668        // Clear the cache
669        cache.clear();
670
671        // Check that cache is empty
672        assert_eq!(cache.len(), 0);
673        assert!(cache.is_empty());
674
675        // Check that items are gone
676        assert_eq!(cache.get(&"a"), None);
677        assert_eq!(cache.get(&"b"), None);
678        assert_eq!(cache.get(&"c"), None);
679        assert_eq!(cache.get(&"d"), None);
680    }
681
682    #[test]
683    fn test_slru_complex_values() {
684        // Create a cache with capacity 4, with protected capacity 2
685        let mut cache =
686            SlruCache::new(NonZeroUsize::new(4).unwrap(), NonZeroUsize::new(2).unwrap());
687
688        #[derive(Debug, Clone, PartialEq)]
689        struct ComplexValue {
690            id: usize,
691            data: String,
692        }
693
694        // Add complex values
695        cache.put(
696            "a",
697            ComplexValue {
698                id: 1,
699                data: "a-data".to_string(),
700            },
701        );
702        cache.put(
703            "b",
704            ComplexValue {
705                id: 2,
706                data: "b-data".to_string(),
707            },
708        );
709
710        // Modify a value using get_mut
711        if let Some(value) = cache.get_mut(&"a") {
712            value.id = 100;
713            value.data = "a-modified".to_string();
714        }
715
716        // Check the modified value
717        let a = cache.get(&"a").unwrap();
718        assert_eq!(a.id, 100);
719        assert_eq!(a.data, "a-modified");
720    }
721
722    #[test]
723    fn test_slru_with_ratio() {
724        // Test the with_ratio constructor
725        let mut cache =
726            SlruCache::new(NonZeroUsize::new(4).unwrap(), NonZeroUsize::new(2).unwrap());
727
728        assert_eq!(cache.cap().get(), 4);
729        assert_eq!(cache.protected_max_size().get(), 2);
730
731        // Test basic functionality
732        assert_eq!(cache.put("a", 1), None);
733        assert_eq!(cache.put("b", 2), None);
734
735        // Access "a" to promote it to protected
736        assert_eq!(cache.get(&"a"), Some(&1));
737
738        // Fill the cache
739        assert_eq!(cache.put("c", 3), None);
740        assert_eq!(cache.put("d", 4), None);
741
742        // Add another item, should evict "b" from probationary
743        let result = cache.put("e", 5);
744        assert_eq!(result.unwrap().0, "b");
745
746        // Check that protected items remain
747        assert_eq!(cache.get(&"a"), Some(&1));
748    }
749}