Skip to main content

cache_rs/
lfu.rs

1//! Least Frequently Used (LFU) Cache Implementation
2//!
3//! An LFU cache evicts the least frequently accessed item when capacity is reached.
4//! This implementation tracks access frequency for each item and maintains items
5//! organized by their frequency count using a combination of hash map and frequency-indexed lists.
6//!
7//! # How the Algorithm Works
8//!
9//! LFU is based on the principle that items accessed more frequently in the past
10//! are more likely to be accessed again in the future. Unlike LRU which only considers
11//! recency, LFU considers the total number of accesses.
12//!
13//! ## Data Structure
14//!
15//! ```text
16//! ┌─────────────────────────────────────────────────────────────────────────────┐
17//! │                              LFU Cache                                       │
18//! │                                                                              │
19//! │  HashMap<K, *Node>              BTreeMap<Frequency, List>                    │
20//! │  ┌──────────────┐              ┌─────────────────────────────────────────┐   │
21//! │  │ "hot" ──────────────────────│ freq=10: [hot] ◀──▶ [warm]              │   │
22//! │  │ "warm" ─────────────────────│ freq=5:  [item_a] ◀──▶ [item_b]         │   │
23//! │  │ "cold" ─────────────────────│ freq=1:  [cold] ◀──▶ [new_item]  ← LFU  │   │
24//! │  └──────────────┘              └─────────────────────────────────────────┘   │
25//! │                                        ▲                                     │
26//! │                                        │                                     │
27//! │                                   min_frequency=1                            │
28//! └─────────────────────────────────────────────────────────────────────────────┘
29//! ```
30//!
31//! - **HashMap**: Provides O(1) key lookup, storing pointers to list nodes
32//! - **BTreeMap**: Maps frequency counts to lists of items with that frequency
33//! - **min_frequency**: Tracks the lowest frequency for O(1) eviction
34//!
35//! ## Operations
36//!
37//! | Operation | Action | Time |
38//! |-----------|--------|------|
39//! | `get(key)` | Increment frequency, move to new frequency list | O(log F)* |
40//! | `put(key, value)` | Insert at frequency 1, evict lowest freq if full | O(log F)* |
41//! | `remove(key)` | Remove from frequency list, update min_frequency | O(log F)* |
42//!
43//! *Effectively O(1) since F (distinct frequencies) is bounded to a small constant.
44//!
45//! ## Access Pattern Example
46//!
47//! ```text
48//! Cache capacity: 3
49//!
50//! put("a", 1)  →  freq_1: [a]
51//! put("b", 2)  →  freq_1: [b, a]
52//! put("c", 3)  →  freq_1: [c, b, a]
53//! get("a")     →  freq_1: [c, b], freq_2: [a]
54//! get("a")     →  freq_1: [c, b], freq_3: [a]
55//! put("d", 4)  →  freq_1: [d, c], freq_3: [a]   // "b" evicted (LFU at freq_1)
56//! ```
57//!
58//! # Dual-Limit Capacity
59//!
60//! This implementation supports two independent limits:
61//!
62//! - **`max_entries`**: Maximum number of items (bounds entry count)
63//! - **`max_size`**: Maximum total size of content (sum of item sizes)
64//!
65//! Eviction occurs when **either** limit would be exceeded.
66//!
67//! # Performance Characteristics
68//!
69//! | Metric | Value |
70//! |--------|-------|
71//! | Get | O(log F) amortized, effectively O(1) |
72//! | Put | O(log F) amortized, effectively O(1) |
73//! | Remove | O(log F) amortized, effectively O(1) |
74//! | Memory per entry | ~100 bytes overhead + key×2 + value |
75//!
76//! Where F = number of distinct frequency values. Since frequencies are small integers
77//! (1, 2, 3, ...), F is typically bounded to a small constant (< 100 in practice),
78//! making operations effectively O(1).
79//!
80//! Memory overhead includes: list node pointers (16B), `CacheEntry` metadata (32B),
81//! frequency metadata (8B), HashMap bucket (~24B), BTreeMap overhead (~16B).
82//!
83//! # When to Use LFU
84//!
85//! **Good for:**
86//! - Workloads with stable popularity patterns (some items are consistently popular)
87//! - Database query caches where certain queries are repeated frequently
88//! - CDN edge caches with predictable content popularity
89//! - Scenarios requiring excellent scan resistance
90//!
91//! **Not ideal for:**
92//! - Recency-based access patterns (use LRU instead)
93//! - Workloads where popularity changes over time (use LFUDA instead)
94//! - Short-lived caches where frequency counts don't accumulate meaningfully
95//! - "Cache pollution" scenarios where old popular items block new ones
96//!
97//! # The Cache Pollution Problem
98//!
99//! A limitation of pure LFU: items that were popular in the past but are no longer
100//! accessed can persist in the cache indefinitely due to high frequency counts.
101//! For long-running caches with changing popularity, consider [`LfudaCache`](crate::LfudaCache)
102//! which addresses this with dynamic aging.
103//!
104//! # Thread Safety
105//!
106//! `LfuCache` is **not thread-safe**. For concurrent access, either:
107//! - Wrap with `Mutex` or `RwLock`
108//! - Use `ConcurrentLfuCache` (requires `concurrent` feature)
109//!
110//! # Examples
111//!
112//! ## Basic Usage
113//!
114//! ```
115//! use cache_rs::LfuCache;
116//! use cache_rs::config::LfuCacheConfig;
117//! use core::num::NonZeroUsize;
118//!
119//! let config = LfuCacheConfig {
120//!     capacity: NonZeroUsize::new(3).unwrap(),
121//!     max_size: u64::MAX,
122//! };
123//! let mut cache = LfuCache::init(config, None);
124//!
125//! cache.put("a", 1, 1);
126//! cache.put("b", 2, 1);
127//! cache.put("c", 3, 1);
128//!
129//! // Access "a" multiple times - increases its frequency
130//! assert_eq!(cache.get(&"a"), Some(&1));
131//! assert_eq!(cache.get(&"a"), Some(&1));
132//!
133//! // Add new item - "b" or "c" evicted (both at frequency 1)
134//! cache.put("d", 4, 1);
135//!
136//! // "a" survives due to higher frequency
137//! assert_eq!(cache.get(&"a"), Some(&1));
138//! ```
139//!
140//! ## Size-Aware Caching
141//!
142//! ```
143//! use cache_rs::LfuCache;
144//! use cache_rs::config::LfuCacheConfig;
145//! use core::num::NonZeroUsize;
146//!
147//! // Cache with max 1000 entries and 10MB total size
148//! let config = LfuCacheConfig {
149//!     capacity: NonZeroUsize::new(1000).unwrap(),
150//!     max_size: 10 * 1024 * 1024,
151//! };
152//! let mut cache: LfuCache<String, Vec<u8>> = LfuCache::init(config, None);
153//!
154//! let data = vec![0u8; 1024];  // 1KB
155//! cache.put("file.bin".to_string(), data.clone(), 1024);
156//! ```
157
158extern crate alloc;
159
160use crate::config::LfuCacheConfig;
161use crate::entry::CacheEntry;
162use crate::list::{List, ListEntry};
163use crate::metrics::{CacheMetrics, LfuCacheMetrics};
164
165/// Metadata for LFU (Least Frequently Used) cache entries.
166///
167/// LFU tracks access frequency to evict the least frequently accessed items.
168/// The frequency counter is incremented on each access.
169///
170/// # Examples
171///
172/// ```
173/// use cache_rs::lfu::LfuMeta;
174///
175/// let mut meta = LfuMeta::default();
176/// assert_eq!(meta.frequency, 0);
177///
178/// // Simulate access
179/// meta.frequency += 1;
180/// assert_eq!(meta.frequency, 1);
181/// ```
182#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
183pub struct LfuMeta {
184    /// Access frequency count.
185    /// Incremented each time the entry is accessed.
186    pub frequency: u64,
187}
188
189impl LfuMeta {
190    /// Creates a new LFU metadata with the specified initial frequency.
191    ///
192    /// # Arguments
193    ///
194    /// * `frequency` - Initial frequency value (usually 0 or 1)
195    #[inline]
196    pub fn new(frequency: u64) -> Self {
197        Self { frequency }
198    }
199
200    /// Increments the frequency counter and returns the new value.
201    #[inline]
202    pub fn increment(&mut self) -> u64 {
203        self.frequency += 1;
204        self.frequency
205    }
206}
207use alloc::boxed::Box;
208use alloc::collections::BTreeMap;
209use alloc::string::String;
210use alloc::vec::Vec;
211use core::borrow::Borrow;
212use core::hash::{BuildHasher, Hash};
213use core::num::NonZeroUsize;
214
215#[cfg(feature = "hashbrown")]
216use hashbrown::DefaultHashBuilder;
217#[cfg(feature = "hashbrown")]
218use hashbrown::HashMap;
219
220#[cfg(not(feature = "hashbrown"))]
221use std::collections::hash_map::RandomState as DefaultHashBuilder;
222#[cfg(not(feature = "hashbrown"))]
223use std::collections::HashMap;
224
225/// Internal LFU segment containing the actual cache algorithm.
226///
227/// This is shared between `LfuCache` (single-threaded) and
228/// `ConcurrentLfuCache` (multi-threaded). All algorithm logic is
229/// implemented here to avoid code duplication.
230///
231/// Uses `CacheEntry<K, V, LfuMeta>` for unified entry management with built-in
232/// size tracking, timestamps, and frequency metadata. The frequency is stored
233/// only in `LfuMeta` (inside CacheEntry), eliminating duplication.
234///
235/// # Safety
236///
237/// This struct contains raw pointers in the `map` field. These pointers
238/// are always valid as long as:
239/// - The pointer was obtained from a `frequency_lists` entry's `add()` call
240/// - The node has not been removed from the list
241/// - The segment has not been dropped
242pub(crate) struct LfuSegment<K, V, S = DefaultHashBuilder> {
243    /// Configuration for the LFU cache (includes capacity and max_size)
244    config: LfuCacheConfig,
245
246    /// Current minimum frequency in the cache
247    min_frequency: usize,
248
249    /// Map from keys to their list node pointer.
250    /// Frequency is stored in CacheEntry.metadata (LfuMeta), not duplicated here.
251    map: HashMap<K, *mut ListEntry<CacheEntry<K, V, LfuMeta>>, S>,
252
253    /// Map from frequency to list of items with that frequency
254    /// Items within each frequency list are ordered by recency (LRU within frequency)
255    frequency_lists: BTreeMap<usize, List<CacheEntry<K, V, LfuMeta>>>,
256
257    /// Metrics for tracking cache performance and frequency distribution
258    metrics: LfuCacheMetrics,
259
260    /// Current total size of cached content (sum of entry sizes)
261    current_size: u64,
262}
263
264// SAFETY: LfuSegment owns all data and raw pointers point only to nodes owned by
265// `frequency_lists`. Concurrent access is safe when wrapped in proper synchronization primitives.
266unsafe impl<K: Send, V: Send, S: Send> Send for LfuSegment<K, V, S> {}
267
268// SAFETY: All mutation requires &mut self; shared references cannot cause data races.
269unsafe impl<K: Send, V: Send, S: Sync> Sync for LfuSegment<K, V, S> {}
270
271impl<K: Hash + Eq, V: Clone, S: BuildHasher> LfuSegment<K, V, S> {
272    /// Creates a new LFU segment from a configuration.
273    ///
274    /// This is the **recommended** way to create an LFU segment. All configuration
275    /// is specified through the [`LfuCacheConfig`] struct.
276    ///
277    /// # Arguments
278    ///
279    /// * `config` - Configuration specifying capacity and optional size limit
280    /// * `hasher` - Hash builder for the internal HashMap
281    #[allow(dead_code)] // Used by concurrent module when feature is enabled
282    pub(crate) fn init(config: LfuCacheConfig, hasher: S) -> Self {
283        let map_capacity = config.capacity.get().next_power_of_two();
284        LfuSegment {
285            config,
286            min_frequency: 1,
287            map: HashMap::with_capacity_and_hasher(map_capacity, hasher),
288            frequency_lists: BTreeMap::new(),
289            metrics: LfuCacheMetrics::new(config.max_size),
290            current_size: 0,
291        }
292    }
293
294    /// Returns the maximum number of key-value pairs the segment can hold.
295    #[inline]
296    pub(crate) fn cap(&self) -> NonZeroUsize {
297        self.config.capacity
298    }
299
300    /// Returns the current number of key-value pairs in the segment.
301    #[inline]
302    pub(crate) fn len(&self) -> usize {
303        self.map.len()
304    }
305
306    /// Returns `true` if the segment contains no key-value pairs.
307    #[inline]
308    pub(crate) fn is_empty(&self) -> bool {
309        self.map.is_empty()
310    }
311
312    /// Returns the current total size of cached content.
313    #[inline]
314    pub(crate) fn current_size(&self) -> u64 {
315        self.current_size
316    }
317
318    /// Returns the maximum content size the cache can hold.
319    #[inline]
320    pub(crate) fn max_size(&self) -> u64 {
321        self.config.max_size
322    }
323
324    /// Returns a reference to the metrics for this segment.
325    #[inline]
326    pub(crate) fn metrics(&self) -> &LfuCacheMetrics {
327        &self.metrics
328    }
329
330    /// Updates the frequency of an item and moves it to the appropriate frequency list.
331    /// Takes the node pointer directly to avoid aliasing issues.
332    ///
333    /// # Safety
334    ///
335    /// The caller must ensure that `node` is a valid pointer to a ListEntry that exists
336    /// in this cache's frequency lists and has not been freed.
337    unsafe fn update_frequency_by_node(
338        &mut self,
339        node: *mut ListEntry<CacheEntry<K, V, LfuMeta>>,
340        old_frequency: usize,
341    ) -> *mut ListEntry<CacheEntry<K, V, LfuMeta>>
342    where
343        K: Clone + Hash + Eq,
344    {
345        let new_frequency = old_frequency + 1;
346
347        // Record frequency increment
348        self.metrics
349            .record_frequency_increment(old_frequency, new_frequency);
350
351        // SAFETY: node is guaranteed to be valid by the caller's contract
352        let entry = (*node).get_value();
353        let key_cloned = entry.key.clone();
354
355        // Get the current node from the map
356        let node = *self.map.get(&key_cloned).unwrap();
357
358        // Remove from old frequency list
359        let boxed_entry = self
360            .frequency_lists
361            .get_mut(&old_frequency)
362            .unwrap()
363            .remove(node)
364            .unwrap();
365
366        // If the old frequency list is now empty, remove it and update min_frequency
367        if self.frequency_lists.get(&old_frequency).unwrap().is_empty() {
368            self.frequency_lists.remove(&old_frequency);
369            if old_frequency == self.min_frequency {
370                self.min_frequency = new_frequency;
371            }
372        }
373
374        // Update frequency in the entry's metadata
375        let entry_ptr = Box::into_raw(boxed_entry);
376        (*entry_ptr).get_value_mut().metadata.algorithm.frequency = new_frequency as u64;
377
378        // Ensure the new frequency list exists
379        let capacity = self.config.capacity;
380        self.frequency_lists
381            .entry(new_frequency)
382            .or_insert_with(|| List::new(capacity));
383
384        // Add to the front of the new frequency list (most recently used within that frequency)
385        self.frequency_lists
386            .get_mut(&new_frequency)
387            .unwrap()
388            .attach_from_other_list(entry_ptr);
389
390        // Update the map with the new node pointer
391        *self.map.get_mut(&key_cloned).unwrap() = entry_ptr;
392
393        // Update metrics with new frequency levels
394        self.metrics.update_frequency_levels(&self.frequency_lists);
395
396        entry_ptr
397    }
398
399    /// Returns a reference to the value corresponding to the key.
400    pub(crate) fn get<Q>(&mut self, key: &Q) -> Option<&V>
401    where
402        K: Borrow<Q> + Clone,
403        Q: ?Sized + Hash + Eq,
404    {
405        if let Some(&node) = self.map.get(key) {
406            unsafe {
407                // SAFETY: node comes from our map, so it's a valid pointer to an entry in our frequency list
408                let entry = (*node).get_value();
409                let frequency = entry.metadata.algorithm.frequency as usize;
410                let object_size = entry.metadata.size;
411                self.metrics.record_frequency_hit(object_size, frequency);
412
413                let new_node = self.update_frequency_by_node(node, frequency);
414                let new_entry = (*new_node).get_value();
415                Some(&new_entry.value)
416            }
417        } else {
418            None
419        }
420    }
421
422    /// Returns a mutable reference to the value corresponding to the key.
423    pub(crate) fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
424    where
425        K: Borrow<Q> + Clone,
426        Q: ?Sized + Hash + Eq,
427    {
428        if let Some(&node) = self.map.get(key) {
429            unsafe {
430                // SAFETY: node comes from our map, so it's a valid pointer to an entry in our frequency list
431                let entry = (*node).get_value();
432                let frequency = entry.metadata.algorithm.frequency as usize;
433                let object_size = entry.metadata.size;
434                self.metrics.record_frequency_hit(object_size, frequency);
435
436                let new_node = self.update_frequency_by_node(node, frequency);
437                let new_entry = (*new_node).get_value_mut();
438                Some(&mut new_entry.value)
439            }
440        } else {
441            None
442        }
443    }
444
445    /// Insert a key-value pair with optional size tracking.
446    ///
447    /// The `size` parameter specifies how much of `max_size` this entry consumes.
448    /// Use `SIZE_UNIT` (1) for count-based caching.
449    ///
450    /// Returns evicted entries, or `None` if no entries were evicted.
451    /// Note: Replacing an existing key does not return the old value.
452    pub(crate) fn put(&mut self, key: K, value: V, size: u64) -> Option<Vec<(K, V)>>
453    where
454        K: Clone,
455    {
456        // If key already exists, update it
457        if let Some(&node) = self.map.get(&key) {
458            unsafe {
459                // SAFETY: node comes from our map, so it's a valid pointer to an entry in our frequency list
460                let entry = (*node).get_value();
461                let frequency = entry.metadata.algorithm.frequency as usize;
462                let old_size = entry.metadata.size;
463
464                // Create new CacheEntry with same frequency
465                let new_entry = CacheEntry::with_algorithm_metadata(
466                    key.clone(),
467                    value,
468                    size,
469                    LfuMeta::new(frequency as u64),
470                );
471
472                let _old_entry = self
473                    .frequency_lists
474                    .get_mut(&frequency)
475                    .unwrap()
476                    .update(node, new_entry, true);
477
478                // Update size tracking
479                self.current_size = self.current_size.saturating_sub(old_size);
480                self.current_size += size;
481                self.metrics.core.record_size_change(old_size, size);
482                self.metrics.core.bytes_written_to_cache += size;
483
484                // Replacement is not eviction - don't return the old value
485                return None;
486            }
487        }
488
489        let mut evicted = Vec::new();
490
491        // Evict while entry count limit OR size limit would be exceeded
492        while self.len() >= self.config.capacity.get()
493            || (self.current_size + size > self.config.max_size && !self.map.is_empty())
494        {
495            if let Some(entry) = self.evict() {
496                self.metrics.core.evictions += 1;
497                evicted.push(entry);
498            } else {
499                break;
500            }
501        }
502
503        // Add new item with frequency 1
504        let frequency = 1;
505        self.min_frequency = 1;
506
507        // Ensure frequency list exists
508        let capacity = self.config.capacity;
509        self.frequency_lists
510            .entry(frequency)
511            .or_insert_with(|| List::new(capacity));
512
513        // Create CacheEntry with LfuMeta
514        let cache_entry = CacheEntry::with_algorithm_metadata(
515            key.clone(),
516            value,
517            size,
518            LfuMeta::new(frequency as u64),
519        );
520
521        if let Some(node) = self
522            .frequency_lists
523            .get_mut(&frequency)
524            .unwrap()
525            .add(cache_entry)
526        {
527            self.map.insert(key, node);
528            self.current_size += size;
529        }
530
531        self.metrics.core.record_insertion(size);
532        self.metrics.update_frequency_levels(&self.frequency_lists);
533
534        if evicted.is_empty() {
535            None
536        } else {
537            Some(evicted)
538        }
539    }
540
541    /// Removes a key from the segment, returning the value if the key was present.
542    pub(crate) fn remove<Q>(&mut self, key: &Q) -> Option<V>
543    where
544        K: Borrow<Q>,
545        Q: ?Sized + Hash + Eq,
546    {
547        let node = self.map.remove(key)?;
548
549        unsafe {
550            // SAFETY: node comes from our map; take_value moves the value out
551            // and Box::from_raw frees memory (MaybeUninit won't double-drop).
552            // Read frequency before removal — needed to find the correct frequency list
553            let frequency = (*node).get_value().metadata.algorithm.frequency as usize;
554
555            let boxed_entry = self.frequency_lists.get_mut(&frequency)?.remove(node)?;
556            let entry_ptr = Box::into_raw(boxed_entry);
557            let cache_entry = (*entry_ptr).take_value();
558            let removed_size = cache_entry.metadata.size;
559            let _ = Box::from_raw(entry_ptr);
560
561            self.current_size = self.current_size.saturating_sub(removed_size);
562            self.metrics.core.record_removal(removed_size);
563
564            // Remove empty frequency list and update min_frequency if necessary
565            if self.frequency_lists.get(&frequency).unwrap().is_empty() {
566                self.frequency_lists.remove(&frequency);
567                if frequency == self.min_frequency {
568                    self.min_frequency = self.frequency_lists.keys().copied().next().unwrap_or(1);
569                }
570            }
571
572            Some(cache_entry.value)
573        }
574    }
575
576    /// Clears the segment, removing all key-value pairs.
577    pub(crate) fn clear(&mut self) {
578        self.map.clear();
579        self.frequency_lists.clear();
580        self.min_frequency = 1;
581        self.current_size = 0;
582    }
583
584    /// Records a cache miss for metrics tracking
585    #[inline]
586    pub(crate) fn record_miss(&mut self, object_size: u64) {
587        self.metrics.record_miss(object_size);
588    }
589
590    /// Check if key exists without updating its frequency.
591    ///
592    /// Unlike `get()`, this method does NOT update the entry's frequency
593    /// or access metadata.
594    #[inline]
595    pub(crate) fn contains<Q>(&self, key: &Q) -> bool
596    where
597        K: Borrow<Q>,
598        Q: ?Sized + Hash + Eq,
599    {
600        self.map.contains_key(key)
601    }
602
603    /// Returns a reference to the value without updating frequency or access metadata.
604    ///
605    /// Unlike `get()`, this method does NOT increment the entry's frequency
606    /// or change its position in any frequency list.
607    pub(crate) fn peek<Q>(&self, key: &Q) -> Option<&V>
608    where
609        K: Borrow<Q>,
610        Q: ?Sized + Hash + Eq,
611    {
612        let &node = self.map.get(key)?;
613        unsafe {
614            // SAFETY: node comes from our map, so it's a valid pointer
615            let entry = (*node).get_value();
616            Some(&entry.value)
617        }
618    }
619
620    /// Removes and returns the eviction candidate (lowest frequency entry).
621    ///
622    /// Returns the entry with the lowest frequency. In case of a tie,
623    /// returns the least recently used entry among those with the same frequency.
624    ///
625    /// This method does **not** increment the eviction counter in metrics.
626    /// Eviction metrics are only recorded when the cache internally evicts
627    /// entries to make room during `put()` operations.
628    ///
629    /// Returns `None` if the cache is empty.
630    fn evict(&mut self) -> Option<(K, V)> {
631        if self.is_empty() {
632            return None;
633        }
634
635        let min_freq_list = self.frequency_lists.get_mut(&self.min_frequency)?;
636        let old_entry = min_freq_list.remove_last()?;
637        let is_list_empty = min_freq_list.is_empty();
638
639        unsafe {
640            // SAFETY: take_value moves the CacheEntry out by value.
641            // Box::from_raw frees memory (MaybeUninit won't double-drop).
642            let entry_ptr = Box::into_raw(old_entry);
643            let cache_entry = (*entry_ptr).take_value();
644            let evicted_size = cache_entry.metadata.size;
645            self.map.remove(&cache_entry.key);
646            self.current_size = self.current_size.saturating_sub(evicted_size);
647            self.metrics.core.record_removal(evicted_size);
648
649            // Update min_frequency if the list is now empty
650            if is_list_empty {
651                self.frequency_lists.remove(&self.min_frequency);
652                self.min_frequency = self.frequency_lists.keys().copied().next().unwrap_or(1);
653            }
654
655            let _ = Box::from_raw(entry_ptr);
656            Some((cache_entry.key, cache_entry.value))
657        }
658    }
659}
660
661// Implement Debug for LfuSegment manually since it contains raw pointers
662impl<K, V, S> core::fmt::Debug for LfuSegment<K, V, S> {
663    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
664        f.debug_struct("LfuSegment")
665            .field("capacity", &self.config.capacity)
666            .field("len", &self.map.len())
667            .field("min_frequency", &self.min_frequency)
668            .finish()
669    }
670}
671
672/// An implementation of a Least Frequently Used (LFU) cache.
673///
674/// The cache tracks the frequency of access for each item and evicts the least
675/// frequently used items when the cache reaches capacity. In case of a tie in
676/// frequency, the least recently used item among those with the same frequency
677/// is evicted.
678///
679/// # Examples
680///
681/// ```
682/// use cache_rs::lfu::LfuCache;
683/// use cache_rs::config::LfuCacheConfig;
684/// use core::num::NonZeroUsize;
685///
686/// // Create an LFU cache with capacity 3
687/// let config = LfuCacheConfig {
688///     capacity: NonZeroUsize::new(3).unwrap(),
689///     max_size: u64::MAX,
690/// };
691/// let mut cache = LfuCache::init(config, None);
692///
693/// // Add some items
694/// cache.put("a", 1, 1);
695/// cache.put("b", 2, 1);
696/// cache.put("c", 3, 1);
697///
698/// // Access "a" multiple times to increase its frequency
699/// assert_eq!(cache.get(&"a"), Some(&1));
700/// assert_eq!(cache.get(&"a"), Some(&1));
701///
702/// // Add a new item, which will evict the least frequently used item
703/// cache.put("d", 4, 1);
704/// assert_eq!(cache.get(&"b"), None); // "b" was evicted as it had frequency 0
705/// ```
706#[derive(Debug)]
707pub struct LfuCache<K, V, S = DefaultHashBuilder> {
708    segment: LfuSegment<K, V, S>,
709}
710
711impl<K: Hash + Eq, V: Clone, S: BuildHasher> LfuCache<K, V, S> {
712    /// Returns the maximum number of key-value pairs the cache can hold.
713    #[inline]
714    pub fn cap(&self) -> NonZeroUsize {
715        self.segment.cap()
716    }
717
718    /// Returns the current number of key-value pairs in the cache.
719    #[inline]
720    pub fn len(&self) -> usize {
721        self.segment.len()
722    }
723
724    /// Returns `true` if the cache contains no key-value pairs.
725    #[inline]
726    pub fn is_empty(&self) -> bool {
727        self.segment.is_empty()
728    }
729
730    /// Returns the current total size of cached content.
731    #[inline]
732    pub fn current_size(&self) -> u64 {
733        self.segment.current_size()
734    }
735
736    /// Returns the maximum content size the cache can hold.
737    #[inline]
738    pub fn max_size(&self) -> u64 {
739        self.segment.max_size()
740    }
741
742    /// Returns a reference to the value corresponding to the key.
743    ///
744    /// The key may be any borrowed form of the cache's key type, but
745    /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
746    /// the key type.
747    ///
748    /// Accessing an item increases its frequency count.
749    #[inline]
750    pub fn get<Q>(&mut self, key: &Q) -> Option<&V>
751    where
752        K: Borrow<Q> + Clone,
753        Q: ?Sized + Hash + Eq,
754    {
755        self.segment.get(key)
756    }
757
758    /// Returns a mutable reference to the value corresponding to the key.
759    ///
760    /// The key may be any borrowed form of the cache's key type, but
761    /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
762    /// the key type.
763    ///
764    /// Accessing an item increases its frequency count.
765    #[inline]
766    pub fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
767    where
768        K: Borrow<Q> + Clone,
769        Q: ?Sized + Hash + Eq,
770    {
771        self.segment.get_mut(key)
772    }
773
774    /// Inserts a key-value pair into the cache.
775    ///
776    /// If the key already exists, it is replaced. If at capacity, the least frequently
777    /// used item is evicted. Ties are broken by evicting the least recently used item
778    /// among those with the same frequency.
779    ///
780    /// New items are inserted with a frequency of 1.
781    ///
782    /// # Arguments
783    ///
784    /// * `key` - The key to insert
785    /// * `value` - The value to cache
786    /// * `size` - Size of this entry for capacity tracking. Use `SIZE_UNIT` (1) for count-based caching.
787    ///
788    /// # Returns
789    ///
790    /// - `Some(vec)` containing evicted entries (not replaced entries)
791    /// - `None` if no entries were evicted (zero allocation)
792    #[inline]
793    pub fn put(&mut self, key: K, value: V, size: u64) -> Option<Vec<(K, V)>>
794    where
795        K: Clone,
796    {
797        self.segment.put(key, value, size)
798    }
799
800    /// Removes a key from the cache, returning the value at the key if the key was previously in the cache.
801    ///
802    /// The key may be any borrowed form of the cache's key type, but
803    /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
804    /// the key type.
805    #[inline]
806    pub fn remove<Q>(&mut self, key: &Q) -> Option<V>
807    where
808        K: Borrow<Q>,
809        Q: ?Sized + Hash + Eq,
810        V: Clone,
811    {
812        self.segment.remove(key)
813    }
814
815    /// Clears the cache, removing all key-value pairs.
816    #[inline]
817    pub fn clear(&mut self) {
818        self.segment.clear()
819    }
820
821    /// Records a cache miss for metrics tracking (to be called by simulation system)
822    #[inline]
823    pub fn record_miss(&mut self, object_size: u64) {
824        self.segment.record_miss(object_size);
825    }
826
827    /// Check if key exists without updating its frequency.
828    ///
829    /// Unlike `get()`, this method does NOT update the entry's frequency
830    /// or access metadata. Useful for existence checks without affecting
831    /// cache eviction order.
832    ///
833    /// # Example
834    ///
835    /// ```
836    /// use cache_rs::LfuCache;
837    /// use cache_rs::config::LfuCacheConfig;
838    /// use core::num::NonZeroUsize;
839    ///
840    /// let config = LfuCacheConfig {
841    ///     capacity: NonZeroUsize::new(2).unwrap(),
842    ///     max_size: u64::MAX,
843    /// };
844    /// let mut cache = LfuCache::init(config, None);
845    /// cache.put("a", 1, 1);
846    /// cache.put("b", 2, 1);
847    ///
848    /// // contains() does NOT update frequency
849    /// assert!(cache.contains(&"a"));
850    /// ```
851    #[inline]
852    pub fn contains<Q>(&self, key: &Q) -> bool
853    where
854        K: Borrow<Q>,
855        Q: ?Sized + Hash + Eq,
856    {
857        self.segment.contains(key)
858    }
859
860    /// Returns a reference to the value without updating frequency or access metadata.
861    ///
862    /// Unlike [`get()`](Self::get), this does NOT increment the entry's frequency
863    /// or change its position in any frequency list.
864    ///
865    /// # Example
866    ///
867    /// ```
868    /// use cache_rs::LfuCache;
869    /// use cache_rs::config::LfuCacheConfig;
870    /// use core::num::NonZeroUsize;
871    ///
872    /// let config = LfuCacheConfig {
873    ///     capacity: NonZeroUsize::new(3).unwrap(),
874    ///     max_size: u64::MAX,
875    /// };
876    /// let mut cache = LfuCache::init(config, None);
877    /// cache.put("a", 1, 1);
878    ///
879    /// // peek does not change frequency
880    /// assert_eq!(cache.peek(&"a"), Some(&1));
881    /// assert_eq!(cache.peek(&"missing"), None);
882    /// ```
883    #[inline]
884    pub fn peek<Q>(&self, key: &Q) -> Option<&V>
885    where
886        K: Borrow<Q>,
887        Q: ?Sized + Hash + Eq,
888    {
889        self.segment.peek(key)
890    }
891}
892
893impl<K: Hash + Eq, V> LfuCache<K, V>
894where
895    V: Clone,
896{
897    /// Creates a new LFU cache from a configuration.
898    ///
899    /// This is the **recommended** way to create an LFU cache. All configuration
900    /// is specified through the [`LfuCacheConfig`] struct.
901    ///
902    /// # Arguments
903    ///
904    /// * `config` - Configuration specifying capacity and optional size limit
905    /// * `hasher` - Optional custom hash builder (uses default if `None`)
906    ///
907    /// # Example
908    ///
909    /// ```
910    /// use cache_rs::LfuCache;
911    /// use cache_rs::config::LfuCacheConfig;
912    /// use core::num::NonZeroUsize;
913    ///
914    /// // Simple capacity-only cache
915    /// let config = LfuCacheConfig {
916    ///     capacity: NonZeroUsize::new(100).unwrap(),
917    ///     max_size: u64::MAX,
918    /// };
919    /// let mut cache: LfuCache<&str, i32> = LfuCache::init(config, None);
920    /// cache.put("key", 42, 1);
921    ///
922    /// // Cache with size limit
923    /// let config = LfuCacheConfig {
924    ///     capacity: NonZeroUsize::new(1000).unwrap(),
925    ///     max_size: 10 * 1024 * 1024,  // 10MB
926    /// };
927    /// let cache: LfuCache<String, Vec<u8>> = LfuCache::init(config, None);
928    /// ```
929    pub fn init(
930        config: LfuCacheConfig,
931        hasher: Option<DefaultHashBuilder>,
932    ) -> LfuCache<K, V, DefaultHashBuilder> {
933        LfuCache {
934            segment: LfuSegment::init(config, hasher.unwrap_or_default()),
935        }
936    }
937}
938
939impl<K: Hash + Eq, V: Clone, S: BuildHasher> CacheMetrics for LfuCache<K, V, S> {
940    fn metrics(&self) -> BTreeMap<String, f64> {
941        self.segment.metrics().metrics()
942    }
943
944    fn algorithm_name(&self) -> &'static str {
945        self.segment.metrics().algorithm_name()
946    }
947}
948
949#[cfg(test)]
950mod tests {
951    extern crate std;
952    use alloc::format;
953    use alloc::vec;
954    use std::println;
955    use std::string::ToString;
956
957    use super::*;
958    use crate::config::LfuCacheConfig;
959    use alloc::string::String;
960
961    /// Helper to create an LfuCache with the given capacity
962    fn make_cache<K: Hash + Eq + Clone, V: Clone>(cap: usize) -> LfuCache<K, V> {
963        let config = LfuCacheConfig {
964            capacity: NonZeroUsize::new(cap).unwrap(),
965            max_size: u64::MAX,
966        };
967        LfuCache::init(config, None)
968    }
969
970    #[test]
971    fn test_lfu_basic() {
972        let mut cache = make_cache(3);
973
974        // Add items
975        assert_eq!(cache.put("a", 1, 1), None);
976        assert_eq!(cache.put("b", 2, 1), None);
977        assert_eq!(cache.put("c", 3, 1), None);
978
979        // Access "a" multiple times to increase its frequency
980        assert_eq!(cache.get(&"a"), Some(&1));
981        assert_eq!(cache.get(&"a"), Some(&1));
982
983        // Access "b" once
984        assert_eq!(cache.get(&"b"), Some(&2));
985
986        // Add a new item, should evict "c" (frequency 0, least recently used among frequency 0)
987        let evicted = cache.put("d", 4, 1);
988        assert!(evicted.is_some());
989        let (evicted_key, evicted_val) = evicted.unwrap()[0];
990        assert_eq!(evicted_key, "c");
991        assert_eq!(evicted_val, 3);
992
993        // Check remaining items
994        assert_eq!(cache.get(&"a"), Some(&1)); // frequency 3 now
995        assert_eq!(cache.get(&"b"), Some(&2)); // frequency 2 now
996        assert_eq!(cache.get(&"d"), Some(&4)); // frequency 1 now
997        assert_eq!(cache.get(&"c"), None); // evicted
998    }
999
1000    #[test]
1001    fn test_lfu_frequency_ordering() {
1002        let mut cache = make_cache(2);
1003
1004        // Add items
1005        cache.put("a", 1, 1);
1006        cache.put("b", 2, 1);
1007
1008        // Access "a" multiple times
1009        cache.get(&"a");
1010        cache.get(&"a");
1011        cache.get(&"a");
1012
1013        // Access "b" once
1014        cache.get(&"b");
1015
1016        // Add new item, should evict "b" (lower frequency)
1017        let evicted = cache.put("c", 3, 1);
1018        assert_eq!(evicted.unwrap()[0].0, "b");
1019
1020        assert_eq!(cache.get(&"a"), Some(&1));
1021        assert_eq!(cache.get(&"c"), Some(&3));
1022        assert_eq!(cache.get(&"b"), None);
1023    }
1024
1025    #[test]
1026    fn test_lfu_update_existing() {
1027        let mut cache = make_cache(2);
1028
1029        cache.put("a", 1, 1);
1030        cache.get(&"a"); // frequency becomes 2
1031
1032        // Update existing key - replacement returns None
1033        let old_value = cache.put("a", 10, 1);
1034        assert!(old_value.is_none());
1035
1036        // The frequency should be preserved
1037        cache.put("b", 2, 1);
1038        cache.put("c", 3, 1); // Should evict "b" because "a" has higher frequency
1039
1040        assert_eq!(cache.get(&"a"), Some(&10));
1041        assert_eq!(cache.get(&"c"), Some(&3));
1042        assert_eq!(cache.get(&"b"), None);
1043    }
1044
1045    #[test]
1046    fn test_lfu_remove() {
1047        let mut cache = make_cache(3);
1048
1049        cache.put("a", 1, 1);
1050        cache.put("b", 2, 1);
1051        cache.put("c", 3, 1);
1052
1053        // Remove item
1054        assert_eq!(cache.remove(&"b"), Some(2));
1055        assert_eq!(cache.remove(&"b"), None);
1056
1057        // Check remaining items
1058        assert_eq!(cache.get(&"a"), Some(&1));
1059        assert_eq!(cache.get(&"c"), Some(&3));
1060        assert_eq!(cache.len(), 2);
1061    }
1062
1063    #[test]
1064    fn test_lfu_clear() {
1065        let mut cache = make_cache(3);
1066
1067        cache.put("a", 1, 1);
1068        cache.put("b", 2, 1);
1069        cache.put("c", 3, 1);
1070
1071        assert_eq!(cache.len(), 3);
1072        cache.clear();
1073        assert_eq!(cache.len(), 0);
1074        assert!(cache.is_empty());
1075
1076        // Should be able to add items after clear
1077        cache.put("d", 4, 1);
1078        assert_eq!(cache.get(&"d"), Some(&4));
1079    }
1080
1081    #[test]
1082    fn test_lfu_get_mut() {
1083        let mut cache = make_cache(2);
1084
1085        cache.put("a", 1, 1);
1086
1087        // Modify value using get_mut
1088        if let Some(value) = cache.get_mut(&"a") {
1089            *value = 10;
1090        }
1091
1092        assert_eq!(cache.get(&"a"), Some(&10));
1093    }
1094
1095    #[test]
1096    fn test_lfu_complex_values() {
1097        let mut cache = make_cache(2);
1098
1099        #[derive(Debug, Clone, PartialEq)]
1100        struct ComplexValue {
1101            id: usize,
1102            data: String,
1103        }
1104
1105        // Add complex values
1106        cache.put(
1107            "a",
1108            ComplexValue {
1109                id: 1,
1110                data: "a-data".to_string(),
1111            },
1112            1,
1113        );
1114
1115        cache.put(
1116            "b",
1117            ComplexValue {
1118                id: 2,
1119                data: "b-data".to_string(),
1120            },
1121            1,
1122        );
1123
1124        // Modify a value using get_mut
1125        if let Some(value) = cache.get_mut(&"a") {
1126            value.id = 100;
1127            value.data = "a-modified".to_string();
1128        }
1129
1130        // Check the modified value
1131        let a = cache.get(&"a").unwrap();
1132        assert_eq!(a.id, 100);
1133        assert_eq!(a.data, "a-modified");
1134    }
1135
1136    /// Test to validate the fix for Stacked Borrows violations detected by Miri.
1137    #[test]
1138    fn test_miri_stacked_borrows_fix() {
1139        let mut cache = make_cache(10);
1140
1141        // Insert some items
1142        cache.put("a", 1, 1);
1143        cache.put("b", 2, 1);
1144        cache.put("c", 3, 1);
1145
1146        // Access items multiple times to trigger frequency updates
1147        for _ in 0..3 {
1148            assert_eq!(cache.get(&"a"), Some(&1));
1149            assert_eq!(cache.get(&"b"), Some(&2));
1150            assert_eq!(cache.get(&"c"), Some(&3));
1151        }
1152
1153        assert_eq!(cache.len(), 3);
1154
1155        // Test with get_mut as well
1156        if let Some(val) = cache.get_mut(&"a") {
1157            *val += 10;
1158        }
1159        assert_eq!(cache.get(&"a"), Some(&11));
1160    }
1161
1162    // Test that LfuSegment works correctly (internal tests)
1163    #[test]
1164    fn test_lfu_segment_directly() {
1165        let config = LfuCacheConfig {
1166            capacity: NonZeroUsize::new(3).unwrap(),
1167            max_size: u64::MAX,
1168        };
1169        let mut segment: LfuSegment<&str, i32, DefaultHashBuilder> =
1170            LfuSegment::init(config, DefaultHashBuilder::default());
1171
1172        assert_eq!(segment.len(), 0);
1173        assert!(segment.is_empty());
1174        assert_eq!(segment.cap().get(), 3);
1175
1176        segment.put("a", 1, 1);
1177        segment.put("b", 2, 1);
1178        assert_eq!(segment.len(), 2);
1179
1180        // Access to increase frequency
1181        assert_eq!(segment.get(&"a"), Some(&1));
1182        assert_eq!(segment.get(&"a"), Some(&1));
1183        assert_eq!(segment.get(&"b"), Some(&2));
1184    }
1185
1186    #[test]
1187    fn test_lfu_concurrent_access() {
1188        extern crate std;
1189        use std::sync::{Arc, Mutex};
1190        use std::thread;
1191        use std::vec::Vec;
1192
1193        let cache = Arc::new(Mutex::new(make_cache::<String, i32>(100)));
1194        let num_threads = 4;
1195        let ops_per_thread = 100;
1196
1197        let mut handles: Vec<std::thread::JoinHandle<()>> = Vec::new();
1198
1199        for t in 0..num_threads {
1200            let cache = Arc::clone(&cache);
1201            handles.push(thread::spawn(move || {
1202                for i in 0..ops_per_thread {
1203                    let key = std::format!("key_{}_{}", t, i);
1204                    let mut guard = cache.lock().unwrap();
1205                    guard.put(key.clone(), i, 1);
1206                    // Access some keys multiple times to test frequency tracking
1207                    if i % 3 == 0 {
1208                        let _ = guard.get(&key);
1209                        let _ = guard.get(&key);
1210                    }
1211                }
1212            }));
1213        }
1214
1215        for handle in handles {
1216            handle.join().unwrap();
1217        }
1218
1219        let mut guard = cache.lock().unwrap();
1220        assert!(guard.len() <= 100);
1221        guard.clear(); // Clean up for MIRI
1222    }
1223
1224    #[test]
1225    fn test_lfu_size_aware_tracking() {
1226        let mut cache = make_cache(10);
1227
1228        assert_eq!(cache.current_size(), 0);
1229        assert_eq!(cache.max_size(), u64::MAX);
1230
1231        cache.put("a", 1, 100);
1232        cache.put("b", 2, 200);
1233        cache.put("c", 3, 150);
1234
1235        assert_eq!(cache.current_size(), 450);
1236        assert_eq!(cache.len(), 3);
1237
1238        // Clear should reset size
1239        cache.clear();
1240        assert_eq!(cache.current_size(), 0);
1241    }
1242
1243    #[test]
1244    fn test_lfu_init_constructor() {
1245        let config = LfuCacheConfig {
1246            capacity: NonZeroUsize::new(1000).unwrap(),
1247            max_size: 1024 * 1024,
1248        };
1249        let cache: LfuCache<String, i32> = LfuCache::init(config, None);
1250
1251        assert_eq!(cache.current_size(), 0);
1252        assert_eq!(cache.max_size(), 1024 * 1024);
1253    }
1254
1255    #[test]
1256    fn test_lfu_with_limits_constructor() {
1257        let config = LfuCacheConfig {
1258            capacity: NonZeroUsize::new(100).unwrap(),
1259            max_size: 1024 * 1024,
1260        };
1261        let cache: LfuCache<String, String> = LfuCache::init(config, None);
1262
1263        assert_eq!(cache.current_size(), 0);
1264        assert_eq!(cache.max_size(), 1024 * 1024);
1265        assert_eq!(cache.cap().get(), 100);
1266    }
1267
1268    #[test]
1269    fn test_lfu_frequency_list_accumulation() {
1270        // This test verifies that empty frequency lists don't accumulate in LFU cache.
1271        // Empty list accumulation was causing 300x slowdown in simulations (896s vs 3s).
1272        //
1273        // The test simulates a constrained scenario: small cache with high-cardinality traffic
1274        // (many unique keys), which historically triggered the empty list accumulation bug.
1275        use std::time::Instant;
1276
1277        // Reproduce the constrained scenario: small cache, many unique keys
1278        let mut cache = make_cache(500);
1279
1280        // Simulate high-cardinality traffic pattern
1281        // Use fewer operations under Miri to keep test time reasonable
1282        let num_ops = if cfg!(miri) { 5_000 } else { 50_000 };
1283        let start = Instant::now();
1284        for i in 0..num_ops {
1285            // Simulate 80-20: 80% of accesses go to 20% of keys
1286            let key = if i % 5 == 0 {
1287                format!("popular_{}", i % 100) // 100 popular keys
1288            } else {
1289                format!("long_tail_{}", i) // Many unique keys
1290            };
1291            cache.put(key.clone(), i, 1);
1292
1293            // Also do some gets to build frequency
1294            if i % 10 == 0 {
1295                for j in 0..10 {
1296                    cache.get(&format!("popular_{}", j));
1297                }
1298            }
1299        }
1300        let elapsed = start.elapsed();
1301
1302        let num_freq_lists = cache.segment.frequency_lists.len();
1303        let empty_lists = cache
1304            .segment
1305            .frequency_lists
1306            .values()
1307            .filter(|l| l.is_empty())
1308            .count();
1309
1310        println!(
1311            "{} ops in {:?} ({:.0} ops/sec)",
1312            num_ops,
1313            elapsed,
1314            num_ops as f64 / elapsed.as_secs_f64()
1315        );
1316        println!(
1317            "Frequency lists: {} total, {} empty",
1318            num_freq_lists, empty_lists
1319        );
1320
1321        // Print frequency distribution
1322        let mut freq_counts: std::collections::BTreeMap<usize, usize> =
1323            std::collections::BTreeMap::new();
1324        for (freq, list) in &cache.segment.frequency_lists {
1325            if !list.is_empty() {
1326                *freq_counts.entry(*freq).or_insert(0) += list.len();
1327            }
1328        }
1329        println!("Frequency distribution (non-empty): {:?}", freq_counts);
1330
1331        // The key correctness check: verify empty frequency lists don't accumulate
1332        // This was the bug that caused the 300x slowdown (896s vs 3s) in simulations
1333        assert!(
1334            empty_lists == 0,
1335            "LFU should not accumulate empty frequency lists. Found {} empty lists out of {} total",
1336            empty_lists,
1337            num_freq_lists
1338        );
1339    }
1340
1341    #[test]
1342    fn test_lfu_contains_non_promoting() {
1343        let mut cache = make_cache(2);
1344        cache.put("a", 1, 1);
1345        cache.put("b", 2, 1);
1346
1347        // contains() should return true for existing keys
1348        assert!(cache.contains(&"a"));
1349        assert!(cache.contains(&"b"));
1350        assert!(!cache.contains(&"c"));
1351
1352        // Access "b" to increase its frequency
1353        cache.get(&"b");
1354
1355        // contains() should NOT increase frequency of "a"
1356        // So "a" should still be the eviction candidate
1357        assert!(cache.contains(&"a"));
1358
1359        // Adding "c" should evict "a" (lowest frequency), not "b"
1360        cache.put("c", 3, 1);
1361        assert!(!cache.contains(&"a")); // "a" was evicted
1362        assert!(cache.contains(&"b")); // "b" still exists
1363        assert!(cache.contains(&"c")); // "c" was just added
1364    }
1365
1366    #[test]
1367    fn test_put_returns_none_when_no_eviction() {
1368        let mut cache = make_cache(10);
1369        assert!(cache.put("a", 1, 1).is_none());
1370        assert!(cache.put("b", 2, 1).is_none());
1371    }
1372
1373    #[test]
1374    fn test_put_returns_single_eviction() {
1375        let mut cache = make_cache(2);
1376        cache.put("a", 1, 1);
1377        cache.put("b", 2, 1);
1378        // "a" has lower frequency, should be evicted
1379        let result = cache.put("c", 3, 1);
1380        assert_eq!(result, Some(vec![("a", 1)]));
1381    }
1382
1383    #[test]
1384    fn test_put_replacement_returns_none() {
1385        let mut cache = make_cache(10);
1386        cache.put("a", 1, 1);
1387        // Replacement is not eviction - returns None
1388        let result = cache.put("a", 2, 1);
1389        assert!(result.is_none());
1390        // Value should be updated
1391        assert_eq!(cache.get(&"a"), Some(&2));
1392    }
1393
1394    #[test]
1395    fn test_put_returns_multiple_evictions_size_based() {
1396        let config = LfuCacheConfig {
1397            capacity: NonZeroUsize::new(10).unwrap(),
1398            max_size: 100,
1399        };
1400        let mut cache = LfuCache::init(config, None);
1401        for i in 0..10 {
1402            cache.put(i, i, 10);
1403        }
1404        let result = cache.put(99, 99, 50);
1405        let evicted = result.unwrap();
1406        assert_eq!(evicted.len(), 5);
1407    }
1408}