cache_rs/
slru.rs

1//! Segmented Least Recently Used Cache Implementation.
2//!
3//! The SLRU (Segmented LRU) cache divides the cache into two segments:
4//! - Probationary segment: Where new entries are initially placed
5//! - Protected segment: Where frequently accessed entries are promoted to
6//!
7//! This implementation provides better performance for scan-resistant workloads
8//! compared to standard LRU, as it protects frequently accessed items from
9//! being evicted by one-time scans through the data.
10
11extern crate alloc;
12
13use crate::config::SlruCacheConfig;
14use crate::list::{Entry, List};
15use crate::metrics::{CacheMetrics, SlruCacheMetrics};
16use alloc::boxed::Box;
17use alloc::collections::BTreeMap;
18use alloc::string::String;
19use core::borrow::Borrow;
20use core::hash::{BuildHasher, Hash};
21use core::mem;
22use core::num::NonZeroUsize;
23
24#[cfg(feature = "hashbrown")]
25use hashbrown::hash_map::DefaultHashBuilder;
26#[cfg(feature = "hashbrown")]
27use hashbrown::HashMap;
28
29#[cfg(not(feature = "hashbrown"))]
30use std::collections::hash_map::RandomState as DefaultHashBuilder;
31#[cfg(not(feature = "hashbrown"))]
32use std::collections::HashMap;
33
34/// Entry location within the SLRU cache
35#[derive(Debug, Clone, Copy, PartialEq, Eq)]
36enum Location {
37    /// Entry is in the probationary segment
38    Probationary,
39    /// Entry is in the protected segment
40    Protected,
41}
42
43/// Internal SLRU segment containing the actual cache algorithm.
44///
45/// This is shared between `SlruCache` (single-threaded) and
46/// `ConcurrentSlruCache` (multi-threaded). All algorithm logic is
47/// implemented here to avoid code duplication.
48///
49/// # Safety
50///
51/// This struct contains raw pointers in the `map` field. These pointers
52/// are always valid as long as:
53/// - The pointer was obtained from `probationary.add()` or `protected.add()`
54/// - The node has not been removed from the list
55/// - The segment has not been dropped
56pub(crate) struct SlruSegment<K, V, S = DefaultHashBuilder> {
57    /// Configuration for the SLRU cache
58    config: SlruCacheConfig,
59
60    /// The probationary list holding newer or less frequently accessed items
61    probationary: List<(K, V)>,
62
63    /// The protected list holding frequently accessed items
64    protected: List<(K, V)>,
65
66    /// A hash map mapping keys to entries in either the probationary or protected list
67    #[allow(clippy::type_complexity)]
68    map: HashMap<K, (*mut Entry<(K, V)>, Location), S>,
69
70    /// Metrics for tracking cache performance and segment behavior
71    metrics: SlruCacheMetrics,
72}
73
74// SAFETY: SlruSegment owns all data and raw pointers point only to nodes owned by
75// `probationary` or `protected` lists. Concurrent access is safe when wrapped in
76// proper synchronization primitives.
77unsafe impl<K: Send, V: Send, S: Send> Send for SlruSegment<K, V, S> {}
78
79// SAFETY: All mutation requires &mut self; shared references cannot cause data races.
80unsafe impl<K: Send, V: Send, S: Sync> Sync for SlruSegment<K, V, S> {}
81
82impl<K: Hash + Eq, V: Clone, S: BuildHasher> SlruSegment<K, V, S> {
83    /// Creates a new SLRU segment with the specified capacity and hash builder.
84    pub(crate) fn with_hasher(
85        total_cap: NonZeroUsize,
86        protected_cap: NonZeroUsize,
87        hash_builder: S,
88    ) -> Self {
89        let config = SlruCacheConfig::new(total_cap, protected_cap);
90
91        let probationary_max_size =
92            NonZeroUsize::new(config.capacity().get() - config.protected_capacity().get()).unwrap();
93
94        let max_cache_size_bytes = config.capacity().get() as u64 * 128;
95
96        SlruSegment {
97            config,
98            probationary: List::new(probationary_max_size),
99            protected: List::new(config.protected_capacity()),
100            map: HashMap::with_capacity_and_hasher(
101                config.capacity().get().next_power_of_two(),
102                hash_builder,
103            ),
104            metrics: SlruCacheMetrics::new(
105                max_cache_size_bytes,
106                config.protected_capacity().get() as u64,
107            ),
108        }
109    }
110
111    /// Returns the maximum number of key-value pairs the segment can hold.
112    #[inline]
113    pub(crate) fn cap(&self) -> NonZeroUsize {
114        self.config.capacity()
115    }
116
117    /// Returns the maximum size of the protected segment.
118    #[inline]
119    pub(crate) fn protected_max_size(&self) -> NonZeroUsize {
120        self.config.protected_capacity()
121    }
122
123    /// Returns the current number of key-value pairs in the segment.
124    #[inline]
125    pub(crate) fn len(&self) -> usize {
126        self.map.len()
127    }
128
129    /// Returns `true` if the segment contains no key-value pairs.
130    #[inline]
131    pub(crate) fn is_empty(&self) -> bool {
132        self.map.is_empty()
133    }
134
135    /// Estimates the size of a key-value pair in bytes for metrics tracking
136    fn estimate_object_size(&self, _key: &K, _value: &V) -> u64 {
137        mem::size_of::<K>() as u64 + mem::size_of::<V>() as u64 + 64
138    }
139
140    /// Returns a reference to the metrics for this segment.
141    #[inline]
142    pub(crate) fn metrics(&self) -> &SlruCacheMetrics {
143        &self.metrics
144    }
145
146    /// Moves an entry from the probationary segment to the protected segment.
147    /// If the protected segment is full, the LRU item from protected is demoted to probationary.
148    ///
149    /// Returns a raw pointer to the entry in its new location.
150    unsafe fn promote_to_protected(&mut self, node: *mut Entry<(K, V)>) -> *mut Entry<(K, V)> {
151        // Remove from probationary list
152        let boxed_entry = self
153            .probationary
154            .remove(node)
155            .expect("Node should exist in probationary");
156
157        // If protected segment is full, demote LRU protected item to probationary
158        if self.protected.len() >= self.protected_max_size().get() {
159            // We need to make room in probationary first if it's full
160            if self.probationary.len() >= self.probationary.cap().get() {
161                // Evict LRU from probationary
162                if let Some(old_entry) = self.probationary.remove_last() {
163                    let old_ptr = Box::into_raw(old_entry);
164                    let (old_key, _) = (*old_ptr).get_value();
165                    self.map.remove(old_key);
166                    let _ = Box::from_raw(old_ptr);
167                }
168            }
169            self.demote_lru_protected();
170        }
171
172        // Get the raw pointer from the box
173        let entry_ptr = Box::into_raw(boxed_entry);
174
175        // Get the key from the entry for updating the map
176        let (key_ref, _) = (*entry_ptr).get_value();
177
178        // Update the map with new location and pointer
179        if let Some(map_entry) = self.map.get_mut(key_ref) {
180            map_entry.0 = entry_ptr;
181            map_entry.1 = Location::Protected;
182        }
183
184        // Add to protected list using the pointer from the Box
185        unsafe {
186            self.protected.attach_from_other_list(entry_ptr);
187        }
188
189        entry_ptr
190    }
191
192    /// Demotes the least recently used item from the protected segment to the probationary segment.
193    unsafe fn demote_lru_protected(&mut self) {
194        if let Some(lru_protected) = self.protected.remove_last() {
195            let lru_ptr = Box::into_raw(lru_protected);
196            let (lru_key, _) = (*lru_ptr).get_value();
197
198            // Update the location and pointer in the map
199            if let Some(entry) = self.map.get_mut(lru_key) {
200                entry.0 = lru_ptr;
201                entry.1 = Location::Probationary;
202            }
203
204            // Add to probationary list
205            self.probationary.attach_from_other_list(lru_ptr);
206
207            // Record demotion
208            self.metrics.record_demotion();
209        }
210    }
211
212    /// Returns a reference to the value corresponding to the key.
213    pub(crate) fn get<Q>(&mut self, key: &Q) -> Option<&V>
214    where
215        K: Borrow<Q>,
216        Q: ?Sized + Hash + Eq,
217    {
218        let (node, location) = self.map.get(key).copied()?;
219
220        match location {
221            Location::Probationary => unsafe {
222                // SAFETY: node comes from our map, so it's a valid pointer to an entry in our probationary list
223                let (key_ref, value) = (*node).get_value();
224                let object_size = self.estimate_object_size(key_ref, value);
225                self.metrics.record_probationary_hit(object_size);
226
227                // Promote from probationary to protected
228                let entry_ptr = self.promote_to_protected(node);
229
230                // Record promotion
231                self.metrics.record_promotion();
232
233                // Update segment sizes
234                self.metrics.update_segment_sizes(
235                    self.probationary.len() as u64,
236                    self.protected.len() as u64,
237                );
238
239                // SAFETY: entry_ptr is the return value from promote_to_protected
240                let (_, v) = (*entry_ptr).get_value();
241                Some(v)
242            },
243            Location::Protected => unsafe {
244                // SAFETY: node comes from our map, so it's a valid pointer to an entry in our protected list
245                let (key_ref, value) = (*node).get_value();
246                let object_size = self.estimate_object_size(key_ref, value);
247                self.metrics.record_protected_hit(object_size);
248
249                // Already protected, just move to MRU position
250                self.protected.move_to_front(node);
251                let (_, v) = (*node).get_value();
252                Some(v)
253            },
254        }
255    }
256
257    /// Returns a mutable reference to the value corresponding to the key.
258    pub(crate) fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
259    where
260        K: Borrow<Q>,
261        Q: ?Sized + Hash + Eq,
262    {
263        let (node, location) = self.map.get(key).copied()?;
264
265        match location {
266            Location::Probationary => unsafe {
267                // SAFETY: node comes from our map, so it's a valid pointer to an entry in our probationary list
268                let (key_ref, value) = (*node).get_value();
269                let object_size = self.estimate_object_size(key_ref, value);
270                self.metrics.record_probationary_hit(object_size);
271
272                // Promote from probationary to protected
273                let entry_ptr = self.promote_to_protected(node);
274
275                // Record promotion
276                self.metrics.record_promotion();
277
278                // Update segment sizes
279                self.metrics.update_segment_sizes(
280                    self.probationary.len() as u64,
281                    self.protected.len() as u64,
282                );
283
284                // SAFETY: entry_ptr is the return value from promote_to_protected
285                let (_, v) = (*entry_ptr).get_value_mut();
286                Some(v)
287            },
288            Location::Protected => unsafe {
289                // SAFETY: node comes from our map, so it's a valid pointer to an entry in our protected list
290                let (key_ref, value) = (*node).get_value();
291                let object_size = self.estimate_object_size(key_ref, value);
292                self.metrics.record_protected_hit(object_size);
293
294                // Already protected, just move to MRU position
295                self.protected.move_to_front(node);
296                // SAFETY: node is still valid after move_to_front operation
297                let (_, v) = (*node).get_value_mut();
298                Some(v)
299            },
300        }
301    }
302
303    /// Records a cache miss for metrics tracking
304    #[inline]
305    pub(crate) fn record_miss(&mut self, object_size: u64) {
306        self.metrics.core.record_miss(object_size);
307    }
308}
309
310impl<K: Hash + Eq + Clone, V, S: BuildHasher> SlruSegment<K, V, S> {
311    /// Inserts a key-value pair into the segment.
312    pub(crate) fn put(&mut self, key: K, value: V) -> Option<(K, V)>
313    where
314        V: Clone,
315    {
316        let object_size = self.estimate_object_size(&key, &value);
317
318        // If key is already in the cache, update it in place
319        if let Some(&(node, location)) = self.map.get(&key) {
320            match location {
321                Location::Probationary => unsafe {
322                    // SAFETY: node comes from our map
323                    self.probationary.move_to_front(node);
324                    let old_entry = self.probationary.update(node, (key.clone(), value), true);
325                    self.metrics.core.record_insertion(object_size);
326                    return old_entry.0;
327                },
328                Location::Protected => unsafe {
329                    // SAFETY: node comes from our map
330                    self.protected.move_to_front(node);
331                    let old_entry = self.protected.update(node, (key.clone(), value), true);
332                    self.metrics.core.record_insertion(object_size);
333                    return old_entry.0;
334                },
335            }
336        }
337
338        let mut evicted = None;
339
340        // Check if total cache is at capacity
341        if self.len() >= self.cap().get() {
342            // If probationary segment has items, evict from there first
343            if !self.probationary.is_empty() {
344                if let Some(old_entry) = self.probationary.remove_last() {
345                    unsafe {
346                        let entry_ptr = Box::into_raw(old_entry);
347                        let (old_key, old_value) = (*entry_ptr).get_value().clone();
348                        let evicted_size = self.estimate_object_size(&old_key, &old_value);
349                        self.metrics.record_probationary_eviction(evicted_size);
350                        self.map.remove(&old_key);
351                        evicted = Some((old_key, old_value));
352                        let _ = Box::from_raw(entry_ptr);
353                    }
354                }
355            } else if !self.protected.is_empty() {
356                // If probationary is empty, evict from protected
357                if let Some(old_entry) = self.protected.remove_last() {
358                    unsafe {
359                        let entry_ptr = Box::into_raw(old_entry);
360                        let (old_key, old_value) = (*entry_ptr).get_value().clone();
361                        let evicted_size = self.estimate_object_size(&old_key, &old_value);
362                        self.metrics.record_protected_eviction(evicted_size);
363                        self.map.remove(&old_key);
364                        evicted = Some((old_key, old_value));
365                        let _ = Box::from_raw(entry_ptr);
366                    }
367                }
368            }
369        }
370
371        // Add the new key-value pair to the probationary segment
372        if self.len() < self.cap().get() {
373            // Total cache has space, allow probationary to exceed its capacity
374            let node = self.probationary.add_unchecked((key.clone(), value));
375            self.map.insert(key, (node, Location::Probationary));
376        } else {
377            // Total cache is full, try to add normally (may fail due to probationary capacity)
378            if let Some(node) = self.probationary.add((key.clone(), value.clone())) {
379                self.map.insert(key, (node, Location::Probationary));
380            } else {
381                // Probationary is at capacity, need to make space
382                if let Some(old_entry) = self.probationary.remove_last() {
383                    unsafe {
384                        let entry_ptr = Box::into_raw(old_entry);
385                        let (old_key, old_value) = (*entry_ptr).get_value().clone();
386                        let evicted_size = self.estimate_object_size(&old_key, &old_value);
387                        self.metrics.record_probationary_eviction(evicted_size);
388                        self.map.remove(&old_key);
389                        evicted = Some((old_key, old_value));
390                        let _ = Box::from_raw(entry_ptr);
391                    }
392                }
393
394                // Try again after making space
395                if let Some(node) = self.probationary.add((key.clone(), value)) {
396                    self.map.insert(key, (node, Location::Probationary));
397                }
398            }
399        }
400
401        // Record insertion and update segment sizes
402        self.metrics.core.record_insertion(object_size);
403        self.metrics
404            .update_segment_sizes(self.probationary.len() as u64, self.protected.len() as u64);
405
406        evicted
407    }
408
409    /// Removes a key from the segment, returning the value if the key was present.
410    pub(crate) fn remove<Q>(&mut self, key: &Q) -> Option<V>
411    where
412        K: Borrow<Q>,
413        Q: ?Sized + Hash + Eq,
414        V: Clone,
415    {
416        let (node, location) = self.map.remove(key)?;
417
418        match location {
419            Location::Probationary => unsafe {
420                // SAFETY: node comes from our map and was just removed
421                let boxed_entry = self.probationary.remove(node)?;
422                let entry_ptr = Box::into_raw(boxed_entry);
423                let value = (*entry_ptr).get_value().1.clone();
424                let _ = Box::from_raw(entry_ptr);
425                Some(value)
426            },
427            Location::Protected => unsafe {
428                // SAFETY: node comes from our map and was just removed
429                let boxed_entry = self.protected.remove(node)?;
430                let entry_ptr = Box::into_raw(boxed_entry);
431                let value = (*entry_ptr).get_value().1.clone();
432                let _ = Box::from_raw(entry_ptr);
433                Some(value)
434            },
435        }
436    }
437
438    /// Clears the segment, removing all key-value pairs.
439    pub(crate) fn clear(&mut self) {
440        self.map.clear();
441        self.probationary.clear();
442        self.protected.clear();
443    }
444}
445
446// Implement Debug for SlruSegment manually since it contains raw pointers
447impl<K, V, S> core::fmt::Debug for SlruSegment<K, V, S> {
448    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
449        f.debug_struct("SlruSegment")
450            .field("capacity", &self.config.capacity())
451            .field("protected_capacity", &self.config.protected_capacity())
452            .field("len", &self.map.len())
453            .finish()
454    }
455}
456
457/// An implementation of a Segmented Least Recently Used (SLRU) cache.
458///
459/// The cache is divided into two segments:
460/// - Probationary segment: Where new entries are initially placed
461/// - Protected segment: Where frequently accessed entries are promoted to
462///
463/// When the cache reaches capacity, the least recently used entry from the
464/// probationary segment is evicted. If the probationary segment is empty,
465/// entries from the protected segment may be demoted back to probationary.
466///
467/// # Examples
468///
469/// ```
470/// use cache_rs::slru::SlruCache;
471/// use core::num::NonZeroUsize;
472///
473/// // Create an SLRU cache with a total capacity of 4,
474/// // with a protected capacity of 2 (half protected, half probationary)
475/// let mut cache = SlruCache::new(
476///     NonZeroUsize::new(4).unwrap(),
477///     NonZeroUsize::new(2).unwrap()
478/// );
479///
480/// // Add some items
481/// cache.put("a", 1);
482/// cache.put("b", 2);
483/// cache.put("c", 3);
484/// cache.put("d", 4);
485///
486/// // Access "a" to promote it to the protected segment
487/// assert_eq!(cache.get(&"a"), Some(&1));
488///
489/// // Add a new item, which will evict the least recently used item
490/// // from the probationary segment (likely "b")
491/// cache.put("e", 5);
492/// assert_eq!(cache.get(&"b"), None);
493/// ```
494#[derive(Debug)]
495pub struct SlruCache<K, V, S = DefaultHashBuilder> {
496    segment: SlruSegment<K, V, S>,
497}
498
499impl<K: Hash + Eq, V: Clone, S: BuildHasher> SlruCache<K, V, S> {
500    /// Creates a new SLRU cache with the specified capacity and hash builder.
501    ///
502    /// # Parameters
503    ///
504    /// - `total_cap`: The total capacity of the cache. Must be a non-zero value.
505    /// - `protected_cap`: The maximum capacity of the protected segment.
506    ///   Must be a non-zero value and less than or equal to `total_cap`.
507    /// - `hash_builder`: The hash builder to use for the underlying hash map.
508    ///
509    /// # Panics
510    ///
511    /// Panics if `protected_cap` is greater than `total_cap`.
512    pub fn with_hasher(
513        total_cap: NonZeroUsize,
514        protected_cap: NonZeroUsize,
515        hash_builder: S,
516    ) -> Self {
517        Self {
518            segment: SlruSegment::with_hasher(total_cap, protected_cap, hash_builder),
519        }
520    }
521
522    /// Returns the maximum number of key-value pairs the cache can hold.
523    #[inline]
524    pub fn cap(&self) -> NonZeroUsize {
525        self.segment.cap()
526    }
527
528    /// Returns the maximum size of the protected segment.
529    #[inline]
530    pub fn protected_max_size(&self) -> NonZeroUsize {
531        self.segment.protected_max_size()
532    }
533
534    /// Returns the current number of key-value pairs in the cache.
535    #[inline]
536    pub fn len(&self) -> usize {
537        self.segment.len()
538    }
539
540    /// Returns `true` if the cache contains no key-value pairs.
541    #[inline]
542    pub fn is_empty(&self) -> bool {
543        self.segment.is_empty()
544    }
545
546    /// Returns a reference to the value corresponding to the key.
547    ///
548    /// The key may be any borrowed form of the cache's key type, but
549    /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
550    /// the key type.
551    ///
552    /// If a value is returned from the probationary segment, it is promoted
553    /// to the protected segment.
554    #[inline]
555    pub fn get<Q>(&mut self, key: &Q) -> Option<&V>
556    where
557        K: Borrow<Q>,
558        Q: ?Sized + Hash + Eq,
559    {
560        self.segment.get(key)
561    }
562
563    /// Returns a mutable reference to the value corresponding to the key.
564    ///
565    /// The key may be any borrowed form of the cache's key type, but
566    /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
567    /// the key type.
568    ///
569    /// If a value is returned from the probationary segment, it is promoted
570    /// to the protected segment.
571    #[inline]
572    pub fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
573    where
574        K: Borrow<Q>,
575        Q: ?Sized + Hash + Eq,
576    {
577        self.segment.get_mut(key)
578    }
579
580    /// Records a cache miss for metrics tracking (to be called by simulation system)
581    #[inline]
582    pub fn record_miss(&mut self, object_size: u64) {
583        self.segment.record_miss(object_size);
584    }
585}
586
587impl<K: Hash + Eq + Clone, V, S: BuildHasher> SlruCache<K, V, S> {
588    /// Inserts a key-value pair into the cache.
589    ///
590    /// If the cache already contained this key, the old value is replaced and returned.
591    /// Otherwise, if the cache is at capacity, the least recently used item from the
592    /// probationary segment will be evicted. If the probationary segment is empty,
593    /// the least recently used item from the protected segment will be demoted to
594    /// the probationary segment.
595    ///
596    /// The inserted key-value pair is always placed in the probationary segment.
597    #[inline]
598    pub fn put(&mut self, key: K, value: V) -> Option<(K, V)>
599    where
600        V: Clone,
601    {
602        self.segment.put(key, value)
603    }
604
605    /// Removes a key from the cache, returning the value at the key if the key was previously in the cache.
606    ///
607    /// The key may be any borrowed form of the cache's key type, but
608    /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
609    /// the key type.
610    #[inline]
611    pub fn remove<Q>(&mut self, key: &Q) -> Option<V>
612    where
613        K: Borrow<Q>,
614        Q: ?Sized + Hash + Eq,
615        V: Clone,
616    {
617        self.segment.remove(key)
618    }
619
620    /// Clears the cache, removing all key-value pairs.
621    #[inline]
622    pub fn clear(&mut self) {
623        self.segment.clear()
624    }
625}
626
627impl<K: Hash + Eq, V> SlruCache<K, V>
628where
629    V: Clone,
630{
631    /// Creates a new SLRU cache with the specified capacity and protected capacity.
632    ///
633    /// The total capacity must be greater than the protected capacity.
634    ///
635    /// # Panics
636    ///
637    /// Panics if `protected_capacity` is greater than `capacity`.
638    pub fn new(
639        capacity: NonZeroUsize,
640        protected_capacity: NonZeroUsize,
641    ) -> SlruCache<K, V, DefaultHashBuilder> {
642        let config = SlruCacheConfig::new(capacity, protected_capacity);
643        SlruCache::with_hasher(
644            config.capacity(),
645            config.protected_capacity(),
646            DefaultHashBuilder::default(),
647        )
648    }
649}
650
651impl<K: Hash + Eq, V: Clone, S: BuildHasher> CacheMetrics for SlruCache<K, V, S> {
652    fn metrics(&self) -> BTreeMap<String, f64> {
653        self.segment.metrics().metrics()
654    }
655
656    fn algorithm_name(&self) -> &'static str {
657        self.segment.metrics().algorithm_name()
658    }
659}
660
661#[cfg(test)]
662mod tests {
663    extern crate std;
664    use std::string::ToString;
665
666    use super::*;
667    use alloc::string::String;
668
669    #[test]
670    fn test_slru_basic() {
671        // Create a cache with capacity 4, with protected capacity 2
672        // (2 probationary, 2 protected)
673        let mut cache =
674            SlruCache::new(NonZeroUsize::new(4).unwrap(), NonZeroUsize::new(2).unwrap());
675
676        // Add items to fill probationary segment
677        assert_eq!(cache.put("a", 1), None);
678        assert_eq!(cache.put("b", 2), None);
679        assert_eq!(cache.put("c", 3), None);
680        assert_eq!(cache.put("d", 4), None);
681
682        // Cache should be at capacity
683        assert_eq!(cache.len(), 4);
684
685        // Access "a" and "b" to promote them to protected segment
686        assert_eq!(cache.get(&"a"), Some(&1));
687        assert_eq!(cache.get(&"b"), Some(&2));
688
689        // Add a new item "e", should evict "c" from probationary
690        let evicted = cache.put("e", 5);
691        assert!(evicted.is_some());
692        let (evicted_key, evicted_val) = evicted.unwrap();
693        assert_eq!(evicted_key, "c");
694        assert_eq!(evicted_val, 3);
695
696        // Add another item "f", should evict "d" from probationary
697        let evicted = cache.put("f", 6);
698        assert!(evicted.is_some());
699        let (evicted_key, evicted_val) = evicted.unwrap();
700        assert_eq!(evicted_key, "d");
701        assert_eq!(evicted_val, 4);
702
703        // Check presence
704        assert_eq!(cache.get(&"a"), Some(&1)); // Protected
705        assert_eq!(cache.get(&"b"), Some(&2)); // Protected
706        assert_eq!(cache.get(&"c"), None); // Evicted
707        assert_eq!(cache.get(&"d"), None); // Evicted
708        assert_eq!(cache.get(&"e"), Some(&5)); // Probationary
709        assert_eq!(cache.get(&"f"), Some(&6)); // Probationary
710    }
711
712    #[test]
713    fn test_slru_update() {
714        // Create a cache with capacity 4, with protected capacity 2
715        let mut cache =
716            SlruCache::new(NonZeroUsize::new(4).unwrap(), NonZeroUsize::new(2).unwrap());
717
718        // Add items
719        cache.put("a", 1);
720        cache.put("b", 2);
721
722        // Access "a" to promote it to protected
723        assert_eq!(cache.get(&"a"), Some(&1));
724
725        // Update values
726        assert_eq!(cache.put("a", 10).unwrap().1, 1);
727        assert_eq!(cache.put("b", 20).unwrap().1, 2);
728
729        // Check updated values
730        assert_eq!(cache.get(&"a"), Some(&10));
731        assert_eq!(cache.get(&"b"), Some(&20));
732    }
733
734    #[test]
735    fn test_slru_remove() {
736        // Create a cache with capacity 4, with protected capacity 2
737        let mut cache =
738            SlruCache::new(NonZeroUsize::new(4).unwrap(), NonZeroUsize::new(2).unwrap());
739
740        // Add items
741        cache.put("a", 1);
742        cache.put("b", 2);
743
744        // Access "a" to promote it to protected
745        assert_eq!(cache.get(&"a"), Some(&1));
746
747        // Remove items
748        assert_eq!(cache.remove(&"a"), Some(1)); // From protected
749        assert_eq!(cache.remove(&"b"), Some(2)); // From probationary
750
751        // Check that items are gone
752        assert_eq!(cache.get(&"a"), None);
753        assert_eq!(cache.get(&"b"), None);
754
755        // Check that removing non-existent item returns None
756        assert_eq!(cache.remove(&"c"), None);
757    }
758
759    #[test]
760    fn test_slru_clear() {
761        // Create a cache with capacity 4, with protected capacity 2
762        let mut cache =
763            SlruCache::new(NonZeroUsize::new(4).unwrap(), NonZeroUsize::new(2).unwrap());
764
765        // Add items
766        cache.put("a", 1);
767        cache.put("b", 2);
768        cache.put("c", 3);
769        cache.put("d", 4);
770
771        // Clear the cache
772        cache.clear();
773
774        // Check that cache is empty
775        assert_eq!(cache.len(), 0);
776        assert!(cache.is_empty());
777
778        // Check that items are gone
779        assert_eq!(cache.get(&"a"), None);
780        assert_eq!(cache.get(&"b"), None);
781        assert_eq!(cache.get(&"c"), None);
782        assert_eq!(cache.get(&"d"), None);
783    }
784
785    #[test]
786    fn test_slru_complex_values() {
787        // Create a cache with capacity 4, with protected capacity 2
788        let mut cache =
789            SlruCache::new(NonZeroUsize::new(4).unwrap(), NonZeroUsize::new(2).unwrap());
790
791        #[derive(Debug, Clone, PartialEq)]
792        struct ComplexValue {
793            id: usize,
794            data: String,
795        }
796
797        // Add complex values
798        cache.put(
799            "a",
800            ComplexValue {
801                id: 1,
802                data: "a-data".to_string(),
803            },
804        );
805        cache.put(
806            "b",
807            ComplexValue {
808                id: 2,
809                data: "b-data".to_string(),
810            },
811        );
812
813        // Modify a value using get_mut
814        if let Some(value) = cache.get_mut(&"a") {
815            value.id = 100;
816            value.data = "a-modified".to_string();
817        }
818
819        // Check the modified value
820        let a = cache.get(&"a").unwrap();
821        assert_eq!(a.id, 100);
822        assert_eq!(a.data, "a-modified");
823    }
824
825    #[test]
826    fn test_slru_with_ratio() {
827        // Test the with_ratio constructor
828        let mut cache =
829            SlruCache::new(NonZeroUsize::new(4).unwrap(), NonZeroUsize::new(2).unwrap());
830
831        assert_eq!(cache.cap().get(), 4);
832        assert_eq!(cache.protected_max_size().get(), 2);
833
834        // Test basic functionality
835        assert_eq!(cache.put("a", 1), None);
836        assert_eq!(cache.put("b", 2), None);
837
838        // Access "a" to promote it to protected
839        assert_eq!(cache.get(&"a"), Some(&1));
840
841        // Fill the cache
842        assert_eq!(cache.put("c", 3), None);
843        assert_eq!(cache.put("d", 4), None);
844
845        // Add another item, should evict "b" from probationary
846        let result = cache.put("e", 5);
847        assert_eq!(result.unwrap().0, "b");
848
849        // Check that protected items remain
850        assert_eq!(cache.get(&"a"), Some(&1));
851    }
852
853    // Test that SlruSegment works correctly (internal tests)
854    #[test]
855    fn test_slru_segment_directly() {
856        let mut segment: SlruSegment<&str, i32, DefaultHashBuilder> = SlruSegment::with_hasher(
857            NonZeroUsize::new(4).unwrap(),
858            NonZeroUsize::new(2).unwrap(),
859            DefaultHashBuilder::default(),
860        );
861
862        assert_eq!(segment.len(), 0);
863        assert!(segment.is_empty());
864        assert_eq!(segment.cap().get(), 4);
865        assert_eq!(segment.protected_max_size().get(), 2);
866
867        segment.put("a", 1);
868        segment.put("b", 2);
869        assert_eq!(segment.len(), 2);
870
871        // Access to promote
872        assert_eq!(segment.get(&"a"), Some(&1));
873        assert_eq!(segment.get(&"b"), Some(&2));
874    }
875
876    #[test]
877    fn test_slru_concurrent_access() {
878        extern crate std;
879        use std::sync::{Arc, Mutex};
880        use std::thread;
881        use std::vec::Vec;
882
883        let cache = Arc::new(Mutex::new(SlruCache::new(
884            NonZeroUsize::new(100).unwrap(),
885            NonZeroUsize::new(50).unwrap(),
886        )));
887        let num_threads = 4;
888        let ops_per_thread = 100;
889
890        let mut handles: Vec<std::thread::JoinHandle<()>> = Vec::new();
891
892        for t in 0..num_threads {
893            let cache = Arc::clone(&cache);
894            handles.push(thread::spawn(move || {
895                for i in 0..ops_per_thread {
896                    let key = std::format!("key_{}_{}", t, i);
897                    let mut guard = cache.lock().unwrap();
898                    guard.put(key.clone(), i);
899                    let _ = guard.get(&key);
900                }
901            }));
902        }
903
904        for handle in handles {
905            handle.join().unwrap();
906        }
907
908        let mut guard = cache.lock().unwrap();
909        assert!(guard.len() <= 100);
910        guard.clear(); // Clean up for MIRI
911    }
912}