cache_rs/lfuda.rs
1//! Least Frequently Used with Dynamic Aging (LFUDA) Cache Implementation
2//!
3//! LFUDA is an enhanced LFU algorithm that addresses the "cache pollution" problem
4//! by incorporating a global age factor. This allows the cache to gradually forget
5//! old access patterns, enabling new popular items to compete fairly against
6//! historically popular but now cold items.
7//!
8//! # How the Algorithm Works
9//!
10//! LFUDA maintains a **global age** that increases whenever an item is evicted.
11//! Each item's priority is the sum of its access frequency and the age at which
12//! it was last accessed. This elegant mechanism ensures that:
13//!
14//! 1. Recently accessed items get a "boost" from the current global age
15//! 2. Old items with high frequency but no recent access gradually become eviction candidates
16//! 3. New items can compete fairly against established items
17//!
18//! ## Mathematical Formulation
19//!
20//! ```text
21//! For each cache entry i:
22//! - F_i = access frequency of item i
23//! - A_i = global age when item i was last accessed
24//! - Priority_i = F_i + A_i
25//!
26//! On eviction:
27//! - Select item j where Priority_j = min{Priority_i for all i}
28//! - Set global_age = Priority_j
29//!
30//! On access:
31//! - Increment F_i
32//! - Update A_i = global_age (item gets current age)
33//! ```
34//!
35//! ## Data Structure
36//!
37//! ```text
38//! ┌─────────────────────────────────────────────────────────────────────────────┐
39//! │ LFUDA Cache (global_age=100) │
40//! │ │
41//! │ HashMap<K, *Node> BTreeMap<Priority, List> │
42//! │ ┌──────────────┐ ┌─────────────────────────────────────────┐ │
43//! │ │ "hot" ──────────────────────│ pri=115: [hot] (freq=5, age=110) │ │
44//! │ │ "warm" ─────────────────────│ pri=103: [warm] (freq=3, age=100) │ │
45//! │ │ "stale" ────────────────────│ pri=60: [stale] (freq=50, age=10) ←LFU│ │
46//! │ └──────────────┘ └─────────────────────────────────────────┘ │
47//! │ ▲ │
48//! │ │ │
49//! │ min_priority=60 │
50//! │ │
51//! │ Note: "stale" has high frequency (50) but low age (10), making it the │
52//! │ eviction candidate despite being historically popular. │
53//! └─────────────────────────────────────────────────────────────────────────────┘
54//! ```
55//!
56//! ## Operations
57//!
58//! | Operation | Action | Time |
59//! |-----------|--------|------|
60//! | `get(key)` | Increment frequency, update age to global_age | O(log P) |
61//! | `put(key, value)` | Insert with priority=global_age+1, evict min priority | O(log P) |
62//! | `remove(key)` | Remove from priority list | O(log P) |
63//!
64//! ## Aging Example
65//!
66//! ```text
67//! global_age = 0
68//!
69//! put("a", 1) → a: freq=1, age=0, priority=1
70//! get("a") x10 → a: freq=11, age=0, priority=11
71//! put("b", 2) → b: freq=1, age=0, priority=1
72//! put("c", 3) → c: freq=1, age=0, priority=1
73//!
74//! [Cache full, add "d"]
75//! - Evict min priority (b or c with priority=1)
76//! - Set global_age = 1
77//!
78//! put("d", 4) → d: freq=1, age=1, priority=2
79//!
80//! [Many evictions later, global_age = 100]
81//! - "a" still has priority=11 (no recent access)
82//! - New item "e" gets priority=101
83//! - "a" becomes eviction candidate despite high frequency!
84//! ```
85//!
86//! # Dual-Limit Capacity
87//!
88//! This implementation supports two independent limits:
89//!
90//! - **`max_entries`**: Maximum number of items
91//! - **`max_size`**: Maximum total size of content
92//!
93//! Eviction occurs when **either** limit would be exceeded.
94//!
95//! # Performance Characteristics
96//!
97//! | Metric | Value |
98//! |--------|-------|
99//! | Get | O(log P) |
100//! | Put | O(log P) amortized |
101//! | Remove | O(log P) |
102//! | Memory per entry | ~110 bytes overhead + key×2 + value |
103//!
104//! Where P = number of distinct priority values. Since priority = frequency + age,
105//! P can grow with the number of entries. BTreeMap provides O(log P) lookups.
106//!
107//! Slightly higher overhead than LFU due to age tracking per entry.
108//!
109//! # When to Use LFUDA
110//!
111//! **Good for:**
112//! - Long-running caches where item popularity changes over time
113//! - Web caches, CDN caches with evolving content popularity
114//! - Systems that need frequency-based eviction but must adapt to trends
115//! - Preventing "cache pollution" from historically popular but now stale items
116//!
117//! **Not ideal for:**
118//! - Short-lived caches (aging doesn't have time to take effect)
119//! - Static popularity patterns (pure LFU is simpler and equally effective)
120//! - Strictly recency-based workloads (use LRU instead)
121//!
122//! # LFUDA vs LFU
123//!
124//! | Aspect | LFU | LFUDA |
125//! |--------|-----|-------|
126//! | Adapts to popularity changes | No | Yes |
127//! | Memory overhead | Lower | Slightly higher |
128//! | Complexity | Simpler | More complex |
129//! | Best for | Stable patterns | Evolving patterns |
130//!
131//! # Thread Safety
132//!
133//! `LfudaCache` is **not thread-safe**. For concurrent access, either:
134//! - Wrap with `Mutex` or `RwLock`
135//! - Use `ConcurrentLfudaCache` (requires `concurrent` feature)
136//!
137//! # Examples
138//!
139//! ## Basic Usage
140//!
141//! ```
142//! use cache_rs::LfudaCache;
143//! use cache_rs::config::LfudaCacheConfig;
144//! use core::num::NonZeroUsize;
145//!
146//! let config = LfudaCacheConfig {
147//! capacity: NonZeroUsize::new(3).unwrap(),
148//! initial_age: 0,
149//! max_size: u64::MAX,
150//! };
151//! let mut cache = LfudaCache::init(config, None);
152//!
153//! cache.put("a", 1, 1);
154//! cache.put("b", 2, 1);
155//! cache.put("c", 3, 1);
156//!
157//! // Access "a" to increase its priority
158//! assert_eq!(cache.get(&"a"), Some(&1));
159//! assert_eq!(cache.get(&"a"), Some(&1));
160//!
161//! // Add new item - lowest priority item evicted
162//! cache.put("d", 4, 1);
163//!
164//! // "a" survives due to higher priority (frequency + age)
165//! assert_eq!(cache.get(&"a"), Some(&1));
166//! ```
167//!
168//! ## Long-Running Cache with Aging
169//!
170//! ```
171//! use cache_rs::LfudaCache;
172//! use cache_rs::config::LfudaCacheConfig;
173//! use core::num::NonZeroUsize;
174//!
175//! let config = LfudaCacheConfig {
176//! capacity: NonZeroUsize::new(100).unwrap(),
177//! initial_age: 0,
178//! max_size: u64::MAX,
179//! };
180//! let mut cache = LfudaCache::init(config, None);
181//!
182//! // Populate cache with initial data
183//! for i in 0..100 {
184//! cache.put(i, i * 10, 1);
185//! }
186//!
187//! // Access some items frequently
188//! for _ in 0..50 {
189//! cache.get(&0); // Item 0 becomes very popular
190//! }
191//!
192//! // Later: new content arrives, old content ages out
193//! for i in 100..200 {
194//! cache.put(i, i * 10, 1); // Each insert may evict old items
195//! }
196//!
197//! // Item 0 may eventually be evicted if not accessed recently,
198//! // despite its historical popularity - this is the power of LFUDA!
199//! ```
200
201extern crate alloc;
202
203use crate::config::LfudaCacheConfig;
204use crate::entry::CacheEntry;
205use crate::list::{List, ListEntry};
206use crate::metrics::{CacheMetrics, LfudaCacheMetrics};
207
208/// Metadata for LFUDA (LFU with Dynamic Aging) cache entries.
209///
210/// LFUDA is similar to LFU but addresses the "aging problem" where old
211/// frequently-used items can prevent new items from being cached.
212/// The age factor is maintained at the cache level, not per-entry.
213///
214/// # Algorithm
215///
216/// Entry priority = frequency + age_at_insertion
217/// - When an item is evicted, global_age = evicted_item.priority
218/// - New items start with current global_age as their insertion age
219///
220/// # Examples
221///
222/// ```
223/// use cache_rs::lfuda::LfudaMeta;
224///
225/// let meta = LfudaMeta::new(1, 10); // frequency=1, age_at_insertion=10
226/// assert_eq!(meta.frequency, 1);
227/// assert_eq!(meta.age_at_insertion, 10);
228/// assert_eq!(meta.priority(), 11);
229/// ```
230#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
231pub struct LfudaMeta {
232 /// Access frequency count.
233 pub frequency: u64,
234 /// Age value when this item was inserted (snapshot of global_age).
235 pub age_at_insertion: u64,
236}
237
238impl LfudaMeta {
239 /// Creates a new LFUDA metadata with the specified initial frequency and age.
240 ///
241 /// # Arguments
242 ///
243 /// * `frequency` - Initial frequency value (usually 1 for new items)
244 /// * `age_at_insertion` - The global_age at the time of insertion
245 #[inline]
246 pub fn new(frequency: u64, age_at_insertion: u64) -> Self {
247 Self {
248 frequency,
249 age_at_insertion,
250 }
251 }
252
253 /// Increments the frequency counter and returns the new value.
254 #[inline]
255 pub fn increment(&mut self) -> u64 {
256 self.frequency += 1;
257 self.frequency
258 }
259
260 /// Calculates the effective priority (frequency + age_at_insertion).
261 #[inline]
262 pub fn priority(&self) -> u64 {
263 self.frequency + self.age_at_insertion
264 }
265}
266use alloc::boxed::Box;
267use alloc::collections::BTreeMap;
268use alloc::string::String;
269use alloc::vec::Vec;
270use core::borrow::Borrow;
271use core::hash::{BuildHasher, Hash};
272use core::num::NonZeroUsize;
273
274#[cfg(feature = "hashbrown")]
275use hashbrown::DefaultHashBuilder;
276#[cfg(feature = "hashbrown")]
277use hashbrown::HashMap;
278
279#[cfg(not(feature = "hashbrown"))]
280use std::collections::hash_map::RandomState as DefaultHashBuilder;
281#[cfg(not(feature = "hashbrown"))]
282use std::collections::HashMap;
283
284/// Internal LFUDA segment containing the actual cache algorithm.
285///
286/// This is shared between `LfudaCache` (single-threaded) and
287/// `ConcurrentLfudaCache` (multi-threaded). All algorithm logic is
288/// implemented here to avoid code duplication.
289///
290/// Uses `CacheEntry<K, V, LfudaMeta>` for unified entry management with built-in
291/// size tracking, timestamps, and LFUDA-specific metadata (frequency + age_at_insertion).
292///
293/// # Safety
294///
295/// This struct contains raw pointers in the `map` field.
296/// These pointers are always valid as long as:
297/// - The pointer was obtained from a `priority_lists` entry's `add()` call
298/// - The node has not been removed from the list
299/// - The segment has not been dropped
300pub(crate) struct LfudaSegment<K, V, S = DefaultHashBuilder> {
301 /// Configuration for the LFUDA cache (includes capacity and max_size)
302 config: LfudaCacheConfig,
303
304 /// Global age value that increases when items are evicted
305 global_age: u64,
306
307 /// Current minimum effective priority in the cache
308 min_priority: u64,
309
310 /// Map from keys to their node pointer.
311 /// All metadata (frequency, age, size) is stored in CacheEntry.
312 map: HashMap<K, *mut ListEntry<CacheEntry<K, V, LfudaMeta>>, S>,
313
314 /// Map from effective priority to list of items with that priority
315 /// Items within each priority list are ordered by recency (LRU within priority)
316 priority_lists: BTreeMap<u64, List<CacheEntry<K, V, LfudaMeta>>>,
317
318 /// Metrics tracking for this cache instance
319 metrics: LfudaCacheMetrics,
320
321 /// Current total size of cached content (sum of entry sizes)
322 current_size: u64,
323}
324
325// SAFETY: LfudaSegment owns all data and raw pointers point only to nodes owned by
326// `priority_lists`. Concurrent access is safe when wrapped in proper synchronization primitives.
327unsafe impl<K: Send, V: Send, S: Send> Send for LfudaSegment<K, V, S> {}
328
329// SAFETY: All mutation requires &mut self; shared references cannot cause data races.
330unsafe impl<K: Send, V: Send, S: Sync> Sync for LfudaSegment<K, V, S> {}
331
332impl<K: Hash + Eq, V: Clone, S: BuildHasher> LfudaSegment<K, V, S> {
333 /// Creates a new LFUDA segment from a configuration.
334 ///
335 /// This is the **recommended** way to create an LFUDA segment. All configuration
336 /// is specified through the [`LfudaCacheConfig`] struct.
337 ///
338 /// # Arguments
339 ///
340 /// * `config` - Configuration specifying capacity, initial age, and optional size limit
341 /// * `hasher` - Hash builder for the internal HashMap
342 #[allow(dead_code)] // Used by concurrent module when feature is enabled
343 pub(crate) fn init(config: LfudaCacheConfig, hasher: S) -> Self {
344 let map_capacity = config.capacity.get().next_power_of_two();
345 LfudaSegment {
346 config,
347 global_age: config.initial_age as u64,
348 min_priority: 0,
349 map: HashMap::with_capacity_and_hasher(map_capacity, hasher),
350 priority_lists: BTreeMap::new(),
351 metrics: LfudaCacheMetrics::new(config.max_size),
352 current_size: 0,
353 }
354 }
355
356 /// Returns the maximum number of key-value pairs the segment can hold.
357 #[inline]
358 pub(crate) fn cap(&self) -> NonZeroUsize {
359 self.config.capacity
360 }
361
362 /// Returns the current number of key-value pairs in the segment.
363 #[inline]
364 pub(crate) fn len(&self) -> usize {
365 self.map.len()
366 }
367
368 /// Returns `true` if the segment contains no key-value pairs.
369 #[inline]
370 pub(crate) fn is_empty(&self) -> bool {
371 self.map.is_empty()
372 }
373
374 /// Returns the current global age value.
375 #[inline]
376 pub(crate) fn global_age(&self) -> u64 {
377 self.global_age
378 }
379
380 /// Returns the current total size of cached content.
381 #[inline]
382 pub(crate) fn current_size(&self) -> u64 {
383 self.current_size
384 }
385
386 /// Returns the maximum content size the cache can hold.
387 #[inline]
388 pub(crate) fn max_size(&self) -> u64 {
389 self.config.max_size
390 }
391
392 /// Returns a reference to the metrics for this segment.
393 #[inline]
394 pub(crate) fn metrics(&self) -> &LfudaCacheMetrics {
395 &self.metrics
396 }
397
398 /// Records a cache miss for metrics tracking
399 #[inline]
400 pub(crate) fn record_miss(&mut self, object_size: u64) {
401 self.metrics.core.record_miss(object_size);
402 }
403
404 /// Updates the priority of an item and moves it to the appropriate priority list.
405 ///
406 /// # Safety
407 ///
408 /// The caller must ensure that `node` is a valid pointer to a ListEntry that exists
409 /// in this cache's priority_lists and has not been freed.
410 unsafe fn update_priority_by_node(
411 &mut self,
412 node: *mut ListEntry<CacheEntry<K, V, LfudaMeta>>,
413 old_priority: u64,
414 ) -> *mut ListEntry<CacheEntry<K, V, LfudaMeta>>
415 where
416 K: Clone + Hash + Eq,
417 {
418 // SAFETY: node is guaranteed to be valid by the caller's contract
419 let entry = (*node).get_value();
420 let key_cloned = entry.key.clone();
421
422 // Get current node from map
423 let node = *self.map.get(&key_cloned).unwrap();
424
425 // Calculate new priority after incrementing frequency
426 let meta = &(*node).get_value().metadata.algorithm;
427 let new_priority = (meta.frequency + 1) + meta.age_at_insertion;
428
429 // If priority hasn't changed, just move to front of the same list
430 if old_priority == new_priority {
431 self.priority_lists
432 .get_mut(&old_priority)
433 .unwrap()
434 .move_to_front(node);
435 return node;
436 }
437
438 // Remove from old priority list
439 let boxed_entry = self
440 .priority_lists
441 .get_mut(&old_priority)
442 .unwrap()
443 .remove(node)
444 .unwrap();
445
446 // Clean up empty old priority list and update min_priority if necessary
447 if self.priority_lists.get(&old_priority).unwrap().is_empty() {
448 self.priority_lists.remove(&old_priority);
449 if old_priority == self.min_priority {
450 self.min_priority = new_priority;
451 }
452 }
453
454 // Update frequency in the entry's metadata
455 let entry_ptr = Box::into_raw(boxed_entry);
456 (*entry_ptr).get_value_mut().metadata.algorithm.frequency += 1;
457
458 // Ensure the new priority list exists
459 let capacity = self.config.capacity;
460 self.priority_lists
461 .entry(new_priority)
462 .or_insert_with(|| List::new(capacity));
463
464 // Add to the front of the new priority list (most recently used within that priority)
465 self.priority_lists
466 .get_mut(&new_priority)
467 .unwrap()
468 .attach_from_other_list(entry_ptr);
469
470 // Update the map with the new node pointer
471 *self.map.get_mut(&key_cloned).unwrap() = entry_ptr;
472
473 entry_ptr
474 }
475
476 /// Returns a reference to the value corresponding to the key.
477 pub(crate) fn get<Q>(&mut self, key: &Q) -> Option<&V>
478 where
479 K: Borrow<Q> + Clone,
480 Q: ?Sized + Hash + Eq,
481 {
482 if let Some(&node) = self.map.get(key) {
483 unsafe {
484 // SAFETY: node comes from our map
485 let entry = (*node).get_value();
486 let meta = &entry.metadata.algorithm;
487 let old_priority = meta.priority();
488 self.metrics.core.record_hit(entry.metadata.size);
489
490 let new_node = self.update_priority_by_node(node, old_priority);
491 let new_entry = (*new_node).get_value();
492 Some(&new_entry.value)
493 }
494 } else {
495 None
496 }
497 }
498
499 /// Returns a mutable reference to the value corresponding to the key.
500 pub(crate) fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
501 where
502 K: Borrow<Q> + Clone,
503 Q: ?Sized + Hash + Eq,
504 {
505 if let Some(&node) = self.map.get(key) {
506 unsafe {
507 // SAFETY: node comes from our map
508 let entry = (*node).get_value();
509 let meta = &entry.metadata.algorithm;
510 let old_priority = meta.priority();
511 self.metrics.core.record_hit(entry.metadata.size);
512
513 let new_priority = (meta.frequency + 1) + meta.age_at_insertion;
514 self.metrics.record_frequency_increment(new_priority);
515
516 let new_node = self.update_priority_by_node(node, old_priority);
517 let new_entry = (*new_node).get_value_mut();
518 Some(&mut new_entry.value)
519 }
520 } else {
521 None
522 }
523 }
524
525 /// Inserts a key-value pair into the segment.
526 ///
527 /// # Arguments
528 ///
529 /// * `key` - The key to insert
530 /// * `value` - The value to insert
531 /// * `size` - Optional size in bytes. Use `SIZE_UNIT` (1) for count-based caching.
532 ///
533 /// Returns evicted entries, or `None` if no entries were evicted.
534 /// Note: Replacing an existing key does not return the old value.
535 pub(crate) fn put(&mut self, key: K, value: V, size: u64) -> Option<Vec<(K, V)>>
536 where
537 K: Clone,
538 {
539 // If key already exists, update it
540 if let Some(&node) = self.map.get(&key) {
541 unsafe {
542 // SAFETY: node comes from our map
543 let entry = (*node).get_value();
544 let meta = &entry.metadata.algorithm;
545 let priority = meta.priority();
546 let old_size = entry.metadata.size;
547
548 // Create new CacheEntry with same frequency and age
549 let new_entry = CacheEntry::with_algorithm_metadata(
550 key.clone(),
551 value,
552 size,
553 LfudaMeta::new(meta.frequency, meta.age_at_insertion),
554 );
555
556 let _old_entry = self
557 .priority_lists
558 .get_mut(&priority)
559 .unwrap()
560 .update(node, new_entry, true);
561
562 // Update size tracking
563 self.current_size = self.current_size.saturating_sub(old_size);
564 self.current_size += size;
565 self.metrics.core.record_size_change(old_size, size);
566 self.metrics.core.bytes_written_to_cache += size;
567
568 // Replacement is not eviction - don't return the old value
569 return None;
570 }
571 }
572
573 let mut evicted = Vec::new();
574
575 // Add new item with frequency 1 and current global age
576 let frequency: u64 = 1;
577 let age_at_insertion = self.global_age;
578 let priority = frequency + age_at_insertion;
579
580 // Evict while entry count limit OR size limit would be exceeded
581 while self.len() >= self.config.capacity.get()
582 || (self.current_size + size > self.config.max_size && !self.map.is_empty())
583 {
584 if let Some(entry) = self.evict() {
585 self.metrics.core.evictions += 1;
586 evicted.push(entry);
587 } else {
588 break;
589 }
590 }
591
592 self.min_priority = if self.is_empty() {
593 priority
594 } else {
595 core::cmp::min(self.min_priority, priority)
596 };
597
598 // Ensure priority list exists
599 let capacity = self.config.capacity;
600 self.priority_lists
601 .entry(priority)
602 .or_insert_with(|| List::new(capacity));
603
604 // Create CacheEntry with LfudaMeta
605 let cache_entry = CacheEntry::with_algorithm_metadata(
606 key.clone(),
607 value,
608 size,
609 LfudaMeta::new(frequency, age_at_insertion),
610 );
611
612 if let Some(node) = self
613 .priority_lists
614 .get_mut(&priority)
615 .unwrap()
616 .add(cache_entry)
617 {
618 self.map.insert(key, node);
619 self.current_size += size;
620
621 self.metrics.core.record_insertion(size);
622 self.metrics.record_frequency_increment(priority);
623 if age_at_insertion > 0 {
624 self.metrics.record_aging_benefit(age_at_insertion);
625 }
626 }
627
628 if evicted.is_empty() {
629 None
630 } else {
631 Some(evicted)
632 }
633 }
634
635 /// Removes a key from the segment, returning the value if the key was present.
636 pub(crate) fn remove<Q>(&mut self, key: &Q) -> Option<V>
637 where
638 K: Borrow<Q>,
639 Q: ?Sized + Hash + Eq,
640 {
641 let node = self.map.remove(key)?;
642
643 unsafe {
644 // SAFETY: node comes from our map; take_value moves the value out
645 // and Box::from_raw frees memory (MaybeUninit won't double-drop).
646 // Read priority before removal — needed to find the correct priority list
647 let priority = (*node).get_value().metadata.algorithm.priority();
648
649 let boxed_entry = self.priority_lists.get_mut(&priority)?.remove(node)?;
650 let entry_ptr = Box::into_raw(boxed_entry);
651 let cache_entry = (*entry_ptr).take_value();
652 let removed_size = cache_entry.metadata.size;
653 let _ = Box::from_raw(entry_ptr);
654
655 self.current_size = self.current_size.saturating_sub(removed_size);
656 self.metrics.core.record_removal(removed_size);
657
658 // Clean up empty priority list and update min_priority if necessary
659 if self.priority_lists.get(&priority).unwrap().is_empty() {
660 self.priority_lists.remove(&priority);
661 if priority == self.min_priority {
662 self.min_priority = self
663 .priority_lists
664 .keys()
665 .copied()
666 .next()
667 .unwrap_or(self.global_age);
668 }
669 }
670
671 Some(cache_entry.value)
672 }
673 }
674
675 /// Clears the segment, removing all key-value pairs.
676 pub(crate) fn clear(&mut self) {
677 self.map.clear();
678 self.priority_lists.clear();
679 self.global_age = 0;
680 self.min_priority = 0;
681 self.current_size = 0;
682 }
683
684 /// Check if key exists without updating its priority or access metadata.
685 ///
686 /// Unlike `get()`, this method does NOT update the entry's frequency
687 /// or access metadata.
688 #[inline]
689 pub(crate) fn contains<Q>(&self, key: &Q) -> bool
690 where
691 K: Borrow<Q>,
692 Q: ?Sized + Hash + Eq,
693 {
694 self.map.contains_key(key)
695 }
696
697 /// Returns a reference to the value without updating priority or access metadata.
698 ///
699 /// Unlike `get()`, this method does NOT increment the entry's frequency,
700 /// change its priority, or move it between priority lists.
701 pub(crate) fn peek<Q>(&self, key: &Q) -> Option<&V>
702 where
703 K: Borrow<Q>,
704 Q: ?Sized + Hash + Eq,
705 {
706 let &node = self.map.get(key)?;
707 unsafe {
708 // SAFETY: node comes from our map, so it's a valid pointer
709 let entry = (*node).get_value();
710 Some(&entry.value)
711 }
712 }
713
714 /// Removes and returns the eviction candidate (lowest priority entry).
715 ///
716 /// Also updates the global age to the evicted item's priority (LFUDA aging).
717 ///
718 /// This method does **not** increment the eviction counter in metrics.
719 /// Eviction metrics are only recorded when the cache internally evicts
720 /// entries to make room during `put()` operations.
721 ///
722 /// Returns `None` if the cache is empty.
723 fn evict(&mut self) -> Option<(K, V)> {
724 if self.is_empty() {
725 return None;
726 }
727
728 // Find the actual minimum non-empty priority list.
729 // self.min_priority may be stale if remove() left empty lists behind.
730 let min_key = loop {
731 if let Some(min_priority_list) = self.priority_lists.get(&self.min_priority) {
732 if !min_priority_list.is_empty() {
733 break self.min_priority;
734 }
735 // Clean up empty list and advance
736 let stale = self.min_priority;
737 self.priority_lists.remove(&stale);
738 self.min_priority = self
739 .priority_lists
740 .keys()
741 .copied()
742 .next()
743 .unwrap_or(self.global_age);
744 } else {
745 // min_priority doesn't exist in the map, recalculate
746 self.min_priority = self
747 .priority_lists
748 .keys()
749 .copied()
750 .next()
751 .unwrap_or(self.global_age);
752 // If there are still no lists, the cache is effectively empty
753 if self.priority_lists.is_empty() {
754 return None;
755 }
756 }
757 };
758
759 let min_priority_list = self.priority_lists.get_mut(&min_key)?;
760 let old_entry = min_priority_list.remove_last()?;
761 let is_list_empty = min_priority_list.is_empty();
762
763 unsafe {
764 // SAFETY: take_value moves the CacheEntry out by value.
765 // Box::from_raw frees memory (MaybeUninit won't double-drop).
766 let entry_ptr = Box::into_raw(old_entry);
767 let cache_entry = (*entry_ptr).take_value();
768 let evicted_size = cache_entry.metadata.size;
769 let evicted_priority = cache_entry.metadata.algorithm.priority();
770
771 // Update global age to the evicted item's priority (LFUDA aging)
772 self.global_age = evicted_priority;
773 self.metrics.record_aging_event(self.global_age);
774
775 self.map.remove(&cache_entry.key);
776 self.current_size = self.current_size.saturating_sub(evicted_size);
777 self.metrics.core.record_removal(evicted_size);
778
779 // Update min_priority if the list is now empty
780 if is_list_empty {
781 self.priority_lists.remove(&self.min_priority);
782 self.min_priority = self
783 .priority_lists
784 .keys()
785 .copied()
786 .next()
787 .unwrap_or(self.global_age);
788 }
789
790 let _ = Box::from_raw(entry_ptr);
791 Some((cache_entry.key, cache_entry.value))
792 }
793 }
794}
795
796// Implement Debug for LfudaSegment manually since it contains raw pointers
797impl<K, V, S> core::fmt::Debug for LfudaSegment<K, V, S> {
798 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
799 f.debug_struct("LfudaSegment")
800 .field("capacity", &self.config.capacity)
801 .field("len", &self.map.len())
802 .field("global_age", &self.global_age)
803 .field("min_priority", &self.min_priority)
804 .finish()
805 }
806}
807
808/// An implementation of a Least Frequently Used with Dynamic Aging (LFUDA) cache.
809///
810/// LFUDA improves upon LFU by adding an aging mechanism that prevents old frequently-used
811/// items from blocking new items indefinitely. Each item's effective priority is calculated
812/// as (access_frequency + age_at_insertion), where age_at_insertion is the global age
813/// value when the item was first inserted.
814///
815/// When an item is evicted, the global age is set to the evicted item's effective priority,
816/// ensuring that new items start with a competitive priority.
817///
818/// # Examples
819///
820/// ```
821/// use cache_rs::lfuda::LfudaCache;
822/// use cache_rs::config::LfudaCacheConfig;
823/// use core::num::NonZeroUsize;
824///
825/// // Create an LFUDA cache with capacity 3
826/// let config = LfudaCacheConfig {
827/// capacity: NonZeroUsize::new(3).unwrap(),
828/// initial_age: 0,
829/// max_size: u64::MAX,
830/// };
831/// let mut cache = LfudaCache::init(config, None);
832///
833/// // Add some items
834/// cache.put("a", 1, 1);
835/// cache.put("b", 2, 1);
836/// cache.put("c", 3, 1);
837///
838/// // Access "a" multiple times to increase its frequency
839/// assert_eq!(cache.get(&"a"), Some(&1));
840/// assert_eq!(cache.get(&"a"), Some(&1));
841///
842/// // Add more items to trigger aging
843/// cache.put("d", 4, 1); // This will evict an item and increase global age
844/// cache.put("e", 5, 1); // New items benefit from the increased age
845/// ```
846#[derive(Debug)]
847pub struct LfudaCache<K, V, S = DefaultHashBuilder> {
848 segment: LfudaSegment<K, V, S>,
849}
850
851impl<K: Hash + Eq, V: Clone, S: BuildHasher> LfudaCache<K, V, S> {
852 /// Returns the maximum number of key-value pairs the cache can hold.
853 #[inline]
854 pub fn cap(&self) -> NonZeroUsize {
855 self.segment.cap()
856 }
857
858 /// Returns the current number of key-value pairs in the cache.
859 #[inline]
860 pub fn len(&self) -> usize {
861 self.segment.len()
862 }
863
864 /// Returns `true` if the cache contains no key-value pairs.
865 #[inline]
866 pub fn is_empty(&self) -> bool {
867 self.segment.is_empty()
868 }
869
870 /// Returns the current total size of cached content.
871 #[inline]
872 pub fn current_size(&self) -> u64 {
873 self.segment.current_size()
874 }
875
876 /// Returns the maximum content size the cache can hold.
877 #[inline]
878 pub fn max_size(&self) -> u64 {
879 self.segment.max_size()
880 }
881
882 /// Returns the current global age value.
883 #[inline]
884 pub fn global_age(&self) -> u64 {
885 self.segment.global_age()
886 }
887
888 /// Records a cache miss for metrics tracking (to be called by simulation system)
889 #[inline]
890 pub fn record_miss(&mut self, object_size: u64) {
891 self.segment.record_miss(object_size);
892 }
893
894 /// Returns a reference to the value corresponding to the key.
895 ///
896 /// The key may be any borrowed form of the cache's key type, but
897 /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
898 /// the key type.
899 ///
900 /// Accessing an item increases its frequency count.
901 #[inline]
902 pub fn get<Q>(&mut self, key: &Q) -> Option<&V>
903 where
904 K: Borrow<Q> + Clone,
905 Q: ?Sized + Hash + Eq,
906 {
907 self.segment.get(key)
908 }
909
910 /// Returns a mutable reference to the value corresponding to the key.
911 ///
912 /// The key may be any borrowed form of the cache's key type, but
913 /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
914 /// the key type.
915 ///
916 /// Accessing an item increases its frequency count.
917 #[inline]
918 pub fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
919 where
920 K: Borrow<Q> + Clone,
921 Q: ?Sized + Hash + Eq,
922 {
923 self.segment.get_mut(key)
924 }
925
926 /// Inserts a key-value pair into the cache.
927 ///
928 /// If the key already exists, it is replaced. If at capacity, the item with the
929 /// lowest effective priority is evicted and the global age is updated to the
930 /// evicted item's effective priority.
931 ///
932 /// New items are inserted with a frequency of 1 and `age_at_insertion` set to the
933 /// current `global_age`.
934 ///
935 /// # Arguments
936 ///
937 /// * `key` - The key to insert
938 /// * `value` - The value to insert
939 /// * `size` - Optional size in bytes for size-aware caching. Use `SIZE_UNIT` (1) for count-based caching.
940 ///
941 /// # Returns
942 ///
943 /// - `Some(vec)` containing evicted entries (not replaced entries)
944 /// - `None` if no entries were evicted (zero allocation)
945 #[inline]
946 pub fn put(&mut self, key: K, value: V, size: u64) -> Option<Vec<(K, V)>>
947 where
948 K: Clone,
949 {
950 self.segment.put(key, value, size)
951 }
952
953 /// Removes a key from the cache, returning the value at the key if the key was previously in the cache.
954 ///
955 /// The key may be any borrowed form of the cache's key type, but
956 /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
957 /// the key type.
958 #[inline]
959 pub fn remove<Q>(&mut self, key: &Q) -> Option<V>
960 where
961 K: Borrow<Q>,
962 Q: ?Sized + Hash + Eq,
963 V: Clone,
964 {
965 self.segment.remove(key)
966 }
967
968 /// Clears the cache, removing all key-value pairs.
969 /// Resets the global age to 0.
970 #[inline]
971 pub fn clear(&mut self) {
972 self.segment.clear()
973 }
974
975 /// Check if key exists without updating its priority or access metadata.
976 ///
977 /// Unlike `get()`, this method does NOT update the entry's frequency
978 /// or access metadata. Useful for existence checks without affecting
979 /// cache eviction order.
980 ///
981 /// # Example
982 ///
983 /// ```
984 /// use cache_rs::lfuda::LfudaCache;
985 /// use cache_rs::config::LfudaCacheConfig;
986 /// use core::num::NonZeroUsize;
987 ///
988 /// let config = LfudaCacheConfig {
989 /// capacity: NonZeroUsize::new(2).unwrap(),
990 /// initial_age: 0,
991 /// max_size: u64::MAX,
992 /// };
993 /// let mut cache = LfudaCache::init(config, None);
994 /// cache.put("a", 1, 1);
995 /// cache.put("b", 2, 1);
996 ///
997 /// // contains() does NOT update priority
998 /// assert!(cache.contains(&"a"));
999 /// ```
1000 #[inline]
1001 pub fn contains<Q>(&self, key: &Q) -> bool
1002 where
1003 K: Borrow<Q>,
1004 Q: ?Sized + Hash + Eq,
1005 {
1006 self.segment.contains(key)
1007 }
1008
1009 /// Returns a reference to the value without updating priority or access metadata.
1010 ///
1011 /// Unlike [`get()`](Self::get), this does NOT increment the entry's frequency,
1012 /// change its priority, or move it between priority lists.
1013 ///
1014 /// # Example
1015 ///
1016 /// ```
1017 /// use cache_rs::lfuda::LfudaCache;
1018 /// use cache_rs::config::LfudaCacheConfig;
1019 /// use core::num::NonZeroUsize;
1020 ///
1021 /// let config = LfudaCacheConfig {
1022 /// capacity: NonZeroUsize::new(3).unwrap(),
1023 /// initial_age: 0,
1024 /// max_size: u64::MAX,
1025 /// };
1026 /// let mut cache = LfudaCache::init(config, None);
1027 /// cache.put("a", 1, 1);
1028 ///
1029 /// // peek does not change priority
1030 /// assert_eq!(cache.peek(&"a"), Some(&1));
1031 /// assert_eq!(cache.peek(&"missing"), None);
1032 /// ```
1033 #[inline]
1034 pub fn peek<Q>(&self, key: &Q) -> Option<&V>
1035 where
1036 K: Borrow<Q>,
1037 Q: ?Sized + Hash + Eq,
1038 {
1039 self.segment.peek(key)
1040 }
1041}
1042
1043impl<K: Hash + Eq, V: Clone, S: BuildHasher> CacheMetrics for LfudaCache<K, V, S> {
1044 fn metrics(&self) -> BTreeMap<String, f64> {
1045 self.segment.metrics().metrics()
1046 }
1047
1048 fn algorithm_name(&self) -> &'static str {
1049 self.segment.metrics().algorithm_name()
1050 }
1051}
1052
1053impl<K: Hash + Eq, V> LfudaCache<K, V>
1054where
1055 V: Clone,
1056{
1057 /// Creates a new LFUDA cache from a configuration.
1058 ///
1059 /// This is the **recommended** way to create an LFUDA cache. All configuration
1060 /// is specified through the [`LfudaCacheConfig`] struct, which uses a builder
1061 /// pattern for optional parameters.
1062 ///
1063 /// # Arguments
1064 ///
1065 /// * `config` - Configuration specifying capacity and optional size limit/initial age
1066 ///
1067 /// # Example
1068 ///
1069 /// ```
1070 /// use cache_rs::LfudaCache;
1071 /// use cache_rs::config::LfudaCacheConfig;
1072 /// use core::num::NonZeroUsize;
1073 ///
1074 /// // Simple capacity-only cache
1075 /// let config = LfudaCacheConfig {
1076 /// capacity: NonZeroUsize::new(100).unwrap(),
1077 /// initial_age: 0,
1078 /// max_size: u64::MAX,
1079 /// };
1080 /// let mut cache: LfudaCache<&str, i32> = LfudaCache::init(config, None);
1081 /// cache.put("key", 42, 1);
1082 ///
1083 /// // Cache with size limit
1084 /// let config = LfudaCacheConfig {
1085 /// capacity: NonZeroUsize::new(1000).unwrap(),
1086 /// initial_age: 100,
1087 /// max_size: 10 * 1024 * 1024, // 10MB
1088 /// };
1089 /// let cache: LfudaCache<String, Vec<u8>> = LfudaCache::init(config, None);
1090 /// ```
1091 pub fn init(
1092 config: LfudaCacheConfig,
1093 hasher: Option<DefaultHashBuilder>,
1094 ) -> LfudaCache<K, V, DefaultHashBuilder> {
1095 LfudaCache {
1096 segment: LfudaSegment::init(config, hasher.unwrap_or_default()),
1097 }
1098 }
1099}
1100
1101#[cfg(test)]
1102mod tests {
1103 extern crate std;
1104 use alloc::string::ToString;
1105
1106 use super::*;
1107 use crate::config::LfudaCacheConfig;
1108 use alloc::string::String;
1109
1110 /// Helper to create an LfudaCache with the given capacity
1111 fn make_cache<K: Hash + Eq + Clone, V: Clone>(cap: usize) -> LfudaCache<K, V> {
1112 let config = LfudaCacheConfig {
1113 capacity: NonZeroUsize::new(cap).unwrap(),
1114 initial_age: 0,
1115 max_size: u64::MAX,
1116 };
1117 LfudaCache::init(config, None)
1118 }
1119
1120 #[test]
1121 fn test_lfuda_basic() {
1122 let mut cache = make_cache(3);
1123
1124 // Add items
1125 assert_eq!(cache.put("a", 1, 1), None);
1126 assert_eq!(cache.put("b", 2, 1), None);
1127 assert_eq!(cache.put("c", 3, 1), None);
1128
1129 // Access "a" multiple times to increase its frequency
1130 assert_eq!(cache.get(&"a"), Some(&1));
1131 assert_eq!(cache.get(&"a"), Some(&1));
1132
1133 // Access "b" once
1134 assert_eq!(cache.get(&"b"), Some(&2));
1135
1136 // Add a new item, should evict "c" (lowest effective priority)
1137 let evicted = cache.put("d", 4, 1);
1138 assert!(evicted.is_some());
1139 let (evicted_key, evicted_val) = evicted.unwrap()[0];
1140 assert_eq!(evicted_key, "c");
1141 assert_eq!(evicted_val, 3);
1142
1143 // Check remaining items
1144 assert_eq!(cache.get(&"a"), Some(&1));
1145 assert_eq!(cache.get(&"b"), Some(&2));
1146 assert_eq!(cache.get(&"d"), Some(&4));
1147 assert_eq!(cache.get(&"c"), None); // evicted
1148 }
1149
1150 #[test]
1151 fn test_lfuda_aging() {
1152 let mut cache = make_cache(2);
1153
1154 // Add items and access them
1155 cache.put("a", 1, 1);
1156 cache.put("b", 2, 1);
1157
1158 // Access "a" many times
1159 for _ in 0..10 {
1160 cache.get(&"a");
1161 }
1162
1163 // Initially, global age should be 0
1164 assert_eq!(cache.global_age(), 0);
1165
1166 // Fill cache and cause eviction
1167 let evicted = cache.put("c", 3, 1);
1168 assert!(evicted.is_some());
1169
1170 // Global age should have increased after eviction
1171 assert!(cache.global_age() > 0);
1172
1173 // New items should benefit from the increased global age
1174 cache.put("d", 4, 1);
1175
1176 // The new item should start with competitive priority due to aging
1177 assert!(cache.len() <= cache.cap().get());
1178 }
1179
1180 #[test]
1181 fn test_lfuda_priority_calculation() {
1182 let mut cache = make_cache(3);
1183
1184 cache.put("a", 1, 1);
1185 assert_eq!(cache.global_age(), 0);
1186
1187 // Access "a" to increase its frequency
1188 cache.get(&"a");
1189
1190 // Add more items
1191 cache.put("b", 2, 1);
1192 cache.put("c", 3, 1);
1193
1194 // Force eviction to increase global age
1195 let evicted = cache.put("d", 4, 1);
1196 assert!(evicted.is_some());
1197
1198 // Global age should now be greater than 0
1199 assert!(cache.global_age() > 0);
1200 }
1201
1202 #[test]
1203 fn test_lfuda_update_existing() {
1204 let mut cache = make_cache(2);
1205
1206 cache.put("a", 1, 1);
1207 cache.get(&"a"); // Increase frequency
1208
1209 // Update existing key - replacement returns None
1210 let old_value = cache.put("a", 10, 1);
1211 assert!(old_value.is_none());
1212
1213 // Add another item
1214 cache.put("b", 2, 1);
1215 cache.put("c", 3, 1); // Should evict "b" because "a" has higher effective priority
1216
1217 assert_eq!(cache.get(&"a"), Some(&10));
1218 assert_eq!(cache.get(&"c"), Some(&3));
1219 assert_eq!(cache.get(&"b"), None);
1220 }
1221
1222 #[test]
1223 fn test_lfuda_remove() {
1224 let mut cache = make_cache(3);
1225
1226 cache.put("a", 1, 1);
1227 cache.put("b", 2, 1);
1228 cache.put("c", 3, 1);
1229
1230 // Remove item
1231 assert_eq!(cache.remove(&"b"), Some(2));
1232 assert_eq!(cache.remove(&"b"), None);
1233
1234 // Check remaining items
1235 assert_eq!(cache.get(&"a"), Some(&1));
1236 assert_eq!(cache.get(&"c"), Some(&3));
1237 assert_eq!(cache.len(), 2);
1238 }
1239
1240 #[test]
1241 fn test_lfuda_clear() {
1242 let mut cache = make_cache(3);
1243
1244 cache.put("a", 1, 1);
1245 cache.put("b", 2, 1);
1246 cache.put("c", 3, 1);
1247
1248 // Force some aging
1249 cache.get(&"a");
1250 cache.put("d", 4, 1); // This should increase global_age
1251
1252 let age_before_clear = cache.global_age();
1253 assert!(age_before_clear > 0);
1254
1255 cache.clear();
1256 assert_eq!(cache.len(), 0);
1257 assert!(cache.is_empty());
1258 assert_eq!(cache.global_age(), 0); // Should reset to 0
1259
1260 // Should be able to add items after clear
1261 cache.put("e", 5, 1);
1262 assert_eq!(cache.get(&"e"), Some(&5));
1263 }
1264
1265 #[test]
1266 fn test_lfuda_get_mut() {
1267 let mut cache = make_cache(2);
1268
1269 cache.put("a", 1, 1);
1270
1271 // Modify value using get_mut
1272 if let Some(value) = cache.get_mut(&"a") {
1273 *value = 10;
1274 }
1275
1276 assert_eq!(cache.get(&"a"), Some(&10));
1277 }
1278
1279 #[test]
1280 fn test_lfuda_complex_values() {
1281 let mut cache = make_cache(2);
1282
1283 #[derive(Debug, Clone, PartialEq)]
1284 struct ComplexValue {
1285 id: usize,
1286 data: String,
1287 }
1288
1289 // Add complex values
1290 cache.put(
1291 "a",
1292 ComplexValue {
1293 id: 1,
1294 data: "a-data".to_string(),
1295 },
1296 1,
1297 );
1298
1299 cache.put(
1300 "b",
1301 ComplexValue {
1302 id: 2,
1303 data: "b-data".to_string(),
1304 },
1305 1,
1306 );
1307
1308 // Modify a value using get_mut
1309 if let Some(value) = cache.get_mut(&"a") {
1310 value.id = 100;
1311 value.data = "a-modified".to_string();
1312 }
1313
1314 // Check the modified value
1315 let a = cache.get(&"a").unwrap();
1316 assert_eq!(a.id, 100);
1317 assert_eq!(a.data, "a-modified");
1318 }
1319
1320 #[test]
1321 fn test_lfuda_aging_advantage() {
1322 let mut cache = make_cache(2);
1323
1324 // Add and heavily access an old item
1325 cache.put("old", 1, 1);
1326 for _ in 0..100 {
1327 cache.get(&"old");
1328 }
1329
1330 // Fill cache
1331 cache.put("temp", 2, 1);
1332
1333 // Force eviction to age the cache
1334 let _evicted = cache.put("new1", 3, 1);
1335 let age_after_first_eviction = cache.global_age();
1336
1337 // Add more items to further age the cache
1338 let _evicted = cache.put("new2", 4, 1);
1339 let age_after_second_eviction = cache.global_age();
1340
1341 // The global age should have increased
1342 assert!(age_after_second_eviction >= age_after_first_eviction);
1343
1344 // Now add a brand new item - it should benefit from the aging
1345 cache.put("newer", 5, 1);
1346
1347 // The newer item should be able to compete despite the old item's high frequency
1348 assert!(cache.len() <= cache.cap().get());
1349 }
1350
1351 /// Test to validate the fix for Stacked Borrows violations detected by Miri.
1352 #[test]
1353 fn test_miri_stacked_borrows_fix() {
1354 let mut cache = make_cache(10);
1355
1356 // Insert some items
1357 cache.put("a", 1, 1);
1358 cache.put("b", 2, 1);
1359 cache.put("c", 3, 1);
1360
1361 // Access items multiple times to trigger priority updates
1362 for _ in 0..3 {
1363 assert_eq!(cache.get(&"a"), Some(&1));
1364 assert_eq!(cache.get(&"b"), Some(&2));
1365 assert_eq!(cache.get(&"c"), Some(&3));
1366 }
1367
1368 assert_eq!(cache.len(), 3);
1369
1370 // Test with get_mut as well
1371 if let Some(val) = cache.get_mut(&"a") {
1372 *val += 10;
1373 }
1374 assert_eq!(cache.get(&"a"), Some(&11));
1375 }
1376
1377 // Test that LfudaSegment works correctly (internal tests)
1378 #[test]
1379 fn test_lfuda_segment_directly() {
1380 let config = LfudaCacheConfig {
1381 capacity: NonZeroUsize::new(3).unwrap(),
1382 initial_age: 0,
1383 max_size: u64::MAX,
1384 };
1385 let mut segment: LfudaSegment<&str, i32, DefaultHashBuilder> =
1386 LfudaSegment::init(config, DefaultHashBuilder::default());
1387
1388 assert_eq!(segment.len(), 0);
1389 assert!(segment.is_empty());
1390 assert_eq!(segment.cap().get(), 3);
1391 assert_eq!(segment.global_age(), 0);
1392
1393 segment.put("a", 1, 1);
1394 segment.put("b", 2, 1);
1395 assert_eq!(segment.len(), 2);
1396
1397 // Access to increase frequency
1398 assert_eq!(segment.get(&"a"), Some(&1));
1399 assert_eq!(segment.get(&"b"), Some(&2));
1400 }
1401
1402 #[test]
1403 fn test_lfuda_concurrent_access() {
1404 extern crate std;
1405 use std::sync::{Arc, Mutex};
1406 use std::thread;
1407 use std::vec::Vec;
1408
1409 let cache = Arc::new(Mutex::new(make_cache::<String, i32>(100)));
1410 let num_threads = 4;
1411 let ops_per_thread = 100;
1412
1413 let mut handles: Vec<std::thread::JoinHandle<()>> = Vec::new();
1414
1415 for t in 0..num_threads {
1416 let cache = Arc::clone(&cache);
1417 handles.push(thread::spawn(move || {
1418 for i in 0..ops_per_thread {
1419 let key = std::format!("key_{}_{}", t, i);
1420 let mut guard = cache.lock().unwrap();
1421 guard.put(key.clone(), i, 1);
1422 let _ = guard.get(&key);
1423 }
1424 }));
1425 }
1426
1427 for handle in handles {
1428 handle.join().unwrap();
1429 }
1430
1431 let mut guard = cache.lock().unwrap();
1432 assert!(guard.len() <= 100);
1433 guard.clear(); // Clean up for MIRI
1434 }
1435
1436 #[test]
1437 fn test_lfuda_size_aware_tracking() {
1438 let mut cache = make_cache(10);
1439
1440 assert_eq!(cache.current_size(), 0);
1441 assert_eq!(cache.max_size(), u64::MAX);
1442
1443 cache.put("a", 1, 100);
1444 cache.put("b", 2, 200);
1445 cache.put("c", 3, 150);
1446
1447 assert_eq!(cache.current_size(), 450);
1448 assert_eq!(cache.len(), 3);
1449
1450 // Clear should reset size
1451 cache.clear();
1452 assert_eq!(cache.current_size(), 0);
1453 }
1454
1455 #[test]
1456 fn test_lfuda_init_constructor() {
1457 let config = LfudaCacheConfig {
1458 capacity: NonZeroUsize::new(1000).unwrap(),
1459 initial_age: 0,
1460 max_size: 1024 * 1024,
1461 };
1462 let cache: LfudaCache<String, i32> = LfudaCache::init(config, None);
1463
1464 assert_eq!(cache.current_size(), 0);
1465 assert_eq!(cache.max_size(), 1024 * 1024);
1466 }
1467
1468 #[test]
1469 fn test_lfuda_with_limits_constructor() {
1470 let config = LfudaCacheConfig {
1471 capacity: NonZeroUsize::new(100).unwrap(),
1472 initial_age: 0,
1473 max_size: 1024 * 1024,
1474 };
1475 let cache: LfudaCache<String, String> = LfudaCache::init(config, None);
1476
1477 assert_eq!(cache.current_size(), 0);
1478 assert_eq!(cache.max_size(), 1024 * 1024);
1479 assert_eq!(cache.cap().get(), 100);
1480 }
1481
1482 #[test]
1483 fn test_lfuda_contains_non_promoting() {
1484 let mut cache = make_cache(2);
1485 cache.put("a", 1, 1);
1486 cache.put("b", 2, 1);
1487
1488 // contains() should return true for existing keys
1489 assert!(cache.contains(&"a"));
1490 assert!(cache.contains(&"b"));
1491 assert!(!cache.contains(&"c"));
1492
1493 // Access "b" to increase its priority
1494 cache.get(&"b");
1495
1496 // contains() should NOT increase priority of "a"
1497 assert!(cache.contains(&"a"));
1498
1499 // Adding "c" should evict "a" (lowest priority), not "b"
1500 cache.put("c", 3, 1);
1501 assert!(!cache.contains(&"a")); // "a" was evicted
1502 assert!(cache.contains(&"b")); // "b" still exists
1503 assert!(cache.contains(&"c")); // "c" was just added
1504 }
1505
1506 #[test]
1507 fn test_put_returns_none_when_no_eviction() {
1508 let mut cache = make_cache(10);
1509 assert!(cache.put("a", 1, 1).is_none());
1510 assert!(cache.put("b", 2, 1).is_none());
1511 }
1512
1513 #[test]
1514 fn test_put_returns_single_eviction() {
1515 let mut cache = make_cache(2);
1516 cache.put("a", 1, 1);
1517 cache.put("b", 2, 1);
1518 let result = cache.put("c", 3, 1);
1519 assert!(result.is_some());
1520 let evicted = result.unwrap();
1521 assert_eq!(evicted.len(), 1);
1522 }
1523
1524 #[test]
1525 fn test_put_replacement_returns_none() {
1526 let mut cache = make_cache(10);
1527 cache.put("a", 1, 1);
1528 // Replacement is not eviction - returns None
1529 let result = cache.put("a", 2, 1);
1530 assert!(result.is_none());
1531 // Value should be updated
1532 assert_eq!(cache.get(&"a"), Some(&2));
1533 }
1534
1535 #[test]
1536 fn test_put_returns_multiple_evictions_size_based() {
1537 let config = LfudaCacheConfig {
1538 capacity: NonZeroUsize::new(10).unwrap(),
1539 initial_age: 0,
1540 max_size: 100,
1541 };
1542 let mut cache = LfudaCache::init(config, None);
1543 for i in 0..10 {
1544 cache.put(i, i, 10);
1545 }
1546 let result = cache.put(99, 99, 50);
1547 let evicted = result.unwrap();
1548 assert_eq!(evicted.len(), 5);
1549 }
1550
1551 #[test]
1552 fn test_lfuda_remove_cleans_up_empty_priority_lists() {
1553 // Regression test: remove() should clean up empty priority lists
1554 // from BTreeMap to avoid memory leaks and stale entries.
1555 let mut cache = make_cache(5);
1556
1557 // Insert items that will have different priorities
1558 cache.put("a", 1, 1);
1559 cache.put("b", 2, 1);
1560 cache.put("c", 3, 1);
1561
1562 // Access "a" to bump its frequency (changes priority)
1563 cache.get(&"a");
1564 cache.get(&"a");
1565
1566 // Remove items and verify cache stays consistent
1567 cache.remove(&"b");
1568 cache.remove(&"c");
1569
1570 // Cache should still work correctly after removals
1571 assert_eq!(cache.len(), 1);
1572 assert_eq!(cache.get(&"a"), Some(&1));
1573
1574 // Insert new items - should not hit stale empty lists
1575 cache.put("d", 4, 1);
1576 cache.put("e", 5, 1);
1577 assert_eq!(cache.len(), 3);
1578 }
1579
1580 #[test]
1581 fn test_lfuda_remove_non_min_priority_cleans_up() {
1582 // Verify that removing items at non-minimum priorities also cleans up empty lists.
1583 let mut cache = make_cache(5);
1584
1585 cache.put("a", 1, 1);
1586 cache.put("b", 2, 1);
1587 cache.put("c", 3, 1);
1588
1589 // Access "a" many times - it will have a higher priority
1590 for _ in 0..5 {
1591 cache.get(&"a");
1592 }
1593
1594 // Remove "a" (high priority) - the high-priority list should be cleaned up
1595 cache.remove(&"a");
1596 assert_eq!(cache.len(), 2);
1597
1598 // Remaining items should still be accessible
1599 assert_eq!(cache.get(&"b"), Some(&2));
1600 assert_eq!(cache.get(&"c"), Some(&3));
1601 }
1602
1603 #[test]
1604 fn test_lfuda_update_priority_cleans_up_empty_lists() {
1605 // Regression test: update_priority_by_node() should clean up empty
1606 // priority lists when moving a node to a new priority.
1607 let mut cache = make_cache(5);
1608
1609 // Insert items at the same initial priority
1610 cache.put("a", 1, 1);
1611 cache.put("b", 2, 1);
1612
1613 // Access "a" to move it to a new priority list.
1614 // If "a" was the only item at that priority, the old list should be removed.
1615 cache.get(&"a");
1616
1617 // Now access "b" to also move it
1618 cache.get(&"b");
1619
1620 // Both should still be accessible
1621 assert_eq!(cache.get(&"a"), Some(&1));
1622 assert_eq!(cache.get(&"b"), Some(&2));
1623
1624 // Insert more items and verify eviction works correctly
1625 cache.put("c", 3, 1);
1626 cache.put("d", 4, 1);
1627 cache.put("e", 5, 1);
1628 assert_eq!(cache.len(), 5);
1629
1630 // Force eviction - should work without issues from stale empty lists
1631 cache.put("f", 6, 1);
1632 assert_eq!(cache.len(), 5);
1633 }
1634}