cache_rs/lru.rs
1//! Least Recently Used (LRU) Cache Implementation
2//!
3//! An LRU cache evicts the least recently accessed item when capacity is reached.
4//! This implementation provides O(1) time complexity for all operations using a
5//! hash map combined with a doubly-linked list.
6//!
7//! # How the Algorithm Works
8//!
9//! The LRU algorithm is based on the principle of **temporal locality**: items accessed
10//! recently are likely to be accessed again soon. The cache maintains items ordered by
11//! their last access time.
12//!
13//! ## Data Structure
14//!
15//! ```text
16//! ┌─────────────────────────────────────────────────────────────────┐
17//! │ LRU Cache │
18//! │ │
19//! │ HashMap<K, *Node> Doubly-Linked List │
20//! │ ┌──────────────┐ ┌──────────────────────────────┐ │
21//! │ │ "apple" ──────────────▶ │ MRU ◀──▶ ... ◀──▶ LRU │ │
22//! │ │ "banana" ─────────────▶ │ ▲ │ │ │
23//! │ │ "cherry" ─────────────▶ │ │ ▼ │ │
24//! │ └──────────────┘ │ head tail │ │
25//! │ └──────────────────────────────┘ │
26//! └─────────────────────────────────────────────────────────────────┘
27//! ```
28//!
29//! - **HashMap**: Provides O(1) key lookup, storing pointers to list nodes
30//! - **Doubly-Linked List**: Maintains access order (most recent at head, least recent at tail)
31//!
32//! ## Operations
33//!
34//! | Operation | Action | Time |
35//! |-----------|--------|------|
36//! | `get(key)` | Move accessed node to head (MRU position) | O(1) |
37//! | `put(key, value)` | Insert at head, evict from tail if full | O(1) |
38//! | `remove(key)` | Unlink node from list, remove from map | O(1) |
39//!
40//! ## Eviction Example
41//!
42//! ```text
43//! Cache capacity: 3
44//!
45//! put("a", 1) → [a]
46//! put("b", 2) → [b, a]
47//! put("c", 3) → [c, b, a]
48//! get("a") → [a, c, b] // "a" moved to front (MRU)
49//! put("d", 4) → [d, a, c] // "b" evicted (was LRU)
50//! ```
51//!
52//! # Dual-Limit Capacity
53//!
54//! This implementation supports two independent limits:
55//!
56//! - **`max_entries`**: Maximum number of items (bounds cache memory overhead)
57//! - **`max_size`**: Maximum total size of content (sum of item sizes)
58//!
59//! Eviction occurs when **either** limit would be exceeded.
60//!
61//! # Performance Characteristics
62//!
63//! | Metric | Value |
64//! |--------|-------|
65//! | Get | O(1) |
66//! | Put | O(1) |
67//! | Remove | O(1) |
68//! | Memory per entry | ~80 bytes overhead + key×2 + value |
69//!
70//! Memory overhead breakdown (64-bit): list node pointers (16B), `CacheEntry` metadata (24B),
71//! HashMap bucket (~24B), allocator overhead (~16B). Key is stored twice (in entry and HashMap).
72//!
73//! # When to Use LRU
74//!
75//! **Good for:**
76//! - General-purpose caching with temporal locality
77//! - Web page caches, database query caches
78//! - Any workload where recent items are accessed again soon
79//!
80//! **Not ideal for:**
81//! - Frequency-based access patterns (use LFU instead)
82//! - Scan-resistant workloads (use SLRU instead)
83//! - Size-aware caching of variable-sized objects (use GDSF instead)
84//!
85//! # Thread Safety
86//!
87//! `LruCache` is **not thread-safe**. For concurrent access, either:
88//! - Wrap with `Mutex` or `RwLock`
89//! - Use `ConcurrentLruCache` (requires `concurrent` feature)
90//!
91//! # Examples
92//!
93//! ## Basic Usage
94//!
95//! ```
96//! use cache_rs::LruCache;
97//! use cache_rs::config::LruCacheConfig;
98//! use core::num::NonZeroUsize;
99//!
100//! let config = LruCacheConfig {
101//! capacity: NonZeroUsize::new(3).unwrap(),
102//! max_size: u64::MAX,
103//! };
104//! let mut cache = LruCache::init(config, None);
105//!
106//! cache.put("a", 1);
107//! cache.put("b", 2);
108//! cache.put("c", 3);
109//!
110//! assert_eq!(cache.get(&"a"), Some(&1)); // "a" is now MRU
111//!
112//! cache.put("d", 4); // Evicts "b" (LRU)
113//! assert_eq!(cache.get(&"b"), None);
114//! ```
115//!
116//! ## Size-Aware Caching
117//!
118//! ```
119//! use cache_rs::LruCache;
120//! use cache_rs::config::LruCacheConfig;
121//! use core::num::NonZeroUsize;
122//!
123//! // Cache with max 1000 entries and 10MB total size
124//! let config = LruCacheConfig {
125//! capacity: NonZeroUsize::new(1000).unwrap(),
126//! max_size: 10 * 1024 * 1024,
127//! };
128//! let mut cache: LruCache<String, Vec<u8>> = LruCache::init(config, None);
129//!
130//! let data = vec![0u8; 1024]; // 1KB
131//! cache.put_with_size("file.bin".to_string(), data.clone(), 1024);
132//! ```
133
134extern crate alloc;
135
136use crate::config::LruCacheConfig;
137use crate::entry::CacheEntry;
138use crate::list::{List, ListEntry};
139use crate::metrics::{CacheMetrics, LruCacheMetrics};
140use alloc::boxed::Box;
141use alloc::collections::BTreeMap;
142use alloc::string::String;
143use core::borrow::Borrow;
144use core::hash::{BuildHasher, Hash};
145use core::mem;
146use core::num::NonZeroUsize;
147
148#[cfg(feature = "hashbrown")]
149use hashbrown::DefaultHashBuilder;
150#[cfg(feature = "hashbrown")]
151use hashbrown::HashMap;
152
153#[cfg(not(feature = "hashbrown"))]
154use std::collections::hash_map::RandomState as DefaultHashBuilder;
155#[cfg(not(feature = "hashbrown"))]
156use std::collections::HashMap;
157
158/// Internal LRU segment containing the actual cache algorithm.
159///
160/// This is shared between `LruCache` (single-threaded) and
161/// `ConcurrentLruCache` (multi-threaded). All algorithm logic is
162/// implemented here to avoid code duplication.
163///
164/// Uses `CacheEntry<K, V>` for unified entry management with built-in
165/// size tracking and timestamps. LRU doesn't need per-entry metadata
166/// since position in the list implicitly tracks recency.
167///
168/// # Safety
169///
170/// This struct contains raw pointers in the `map` field.
171/// These pointers are always valid as long as:
172/// - The pointer was obtained from a `list` entry's `add()` call
173/// - The node has not been removed from the list
174/// - The segment has not been dropped
175pub(crate) struct LruSegment<K, V, S = DefaultHashBuilder> {
176 /// Configuration for the LRU cache (includes capacity and max_size)
177 config: LruCacheConfig,
178 list: List<CacheEntry<K, V>>,
179 map: HashMap<K, *mut ListEntry<CacheEntry<K, V>>, S>,
180 metrics: LruCacheMetrics,
181 /// Current total size of cached content (sum of entry.size values)
182 current_size: u64,
183}
184
185// SAFETY: LruSegment owns all data and raw pointers point only to nodes owned by `list`.
186// Concurrent access is safe when wrapped in proper synchronization primitives.
187unsafe impl<K: Send, V: Send, S: Send> Send for LruSegment<K, V, S> {}
188
189// SAFETY: All mutation requires &mut self; shared references cannot cause data races.
190unsafe impl<K: Send, V: Send, S: Sync> Sync for LruSegment<K, V, S> {}
191
192impl<K: Hash + Eq, V: Clone, S: BuildHasher> LruSegment<K, V, S> {
193 /// Creates a new LRU segment from a configuration.
194 ///
195 /// This is the **recommended** way to create an LRU segment. All configuration
196 /// is specified through the [`LruCacheConfig`] struct.
197 ///
198 /// # Arguments
199 ///
200 /// * `config` - Configuration specifying capacity and optional size limit
201 /// * `hasher` - Hash builder for the internal HashMap
202 #[allow(dead_code)] // Used by concurrent module when feature is enabled
203 pub(crate) fn init(config: LruCacheConfig, hasher: S) -> Self {
204 let map_capacity = config.capacity.get().next_power_of_two();
205 LruSegment {
206 config,
207 list: List::new(config.capacity),
208 map: HashMap::with_capacity_and_hasher(map_capacity, hasher),
209 metrics: LruCacheMetrics::new(config.max_size),
210 current_size: 0,
211 }
212 }
213
214 #[inline]
215 pub(crate) fn cap(&self) -> NonZeroUsize {
216 self.config.capacity
217 }
218
219 #[inline]
220 pub(crate) fn len(&self) -> usize {
221 self.map.len()
222 }
223
224 #[inline]
225 pub(crate) fn is_empty(&self) -> bool {
226 self.map.is_empty()
227 }
228
229 /// Returns the current total size of cached content.
230 #[inline]
231 pub(crate) fn current_size(&self) -> u64 {
232 self.current_size
233 }
234
235 /// Returns the maximum content size the cache can hold.
236 #[inline]
237 pub(crate) fn max_size(&self) -> u64 {
238 self.config.max_size
239 }
240
241 #[inline]
242 pub(crate) fn metrics(&self) -> &LruCacheMetrics {
243 &self.metrics
244 }
245
246 fn estimate_object_size(&self, _key: &K, _value: &V) -> u64 {
247 mem::size_of::<K>() as u64 + mem::size_of::<V>() as u64 + 64
248 }
249
250 pub(crate) fn get<Q>(&mut self, key: &Q) -> Option<&V>
251 where
252 K: Borrow<Q>,
253 Q: ?Sized + Hash + Eq,
254 {
255 if let Some(node) = self.map.get(key).copied() {
256 unsafe {
257 // SAFETY: node comes from our map
258 self.list.move_to_front(node);
259 let entry = (*node).get_value_mut();
260 entry.touch(); // Update last_accessed timestamp
261 self.metrics.core.record_hit(entry.size);
262 Some(&entry.value)
263 }
264 } else {
265 None
266 }
267 }
268
269 #[inline]
270 pub(crate) fn record_miss(&mut self, object_size: u64) {
271 self.metrics.core.record_miss(object_size);
272 }
273
274 pub(crate) fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
275 where
276 K: Borrow<Q>,
277 Q: ?Sized + Hash + Eq,
278 {
279 let node = self.map.get(key).copied()?;
280 unsafe {
281 // SAFETY: node comes from our map
282 self.list.move_to_front(node);
283 let entry = (*node).get_value_mut();
284 entry.touch(); // Update last_accessed timestamp
285 self.metrics.core.record_hit(entry.size);
286 Some(&mut entry.value)
287 }
288 }
289
290 pub(crate) fn put(&mut self, key: K, value: V) -> Option<(K, V)>
291 where
292 K: Clone + Hash + Eq,
293 {
294 // Use estimated size for backward compatibility
295 let object_size = self.estimate_object_size(&key, &value);
296 self.put_with_size(key, value, object_size)
297 }
298
299 /// Insert a key-value pair with explicit size tracking.
300 ///
301 /// The `size` parameter specifies how much of `max_size` this entry consumes.
302 /// Use `size=1` for count-based caches.
303 pub(crate) fn put_with_size(&mut self, key: K, value: V, size: u64) -> Option<(K, V)>
304 where
305 K: Clone + Hash + Eq,
306 {
307 let mut evicted = None;
308
309 if let Some(&node) = self.map.get(&key) {
310 unsafe {
311 // SAFETY: node comes from our map
312 self.list.move_to_front(node);
313 let entry = (*node).get_value_mut();
314
315 // Update size tracking: remove old size, add new size
316 let old_size = entry.size;
317 self.current_size = self.current_size.saturating_sub(old_size);
318 self.metrics.core.cache_size_bytes =
319 self.metrics.core.cache_size_bytes.saturating_sub(old_size);
320
321 // Update entry fields
322 let old_key = core::mem::replace(&mut entry.key, key);
323 let old_value = core::mem::replace(&mut entry.value, value);
324 entry.size = size;
325 entry.touch();
326
327 self.current_size += size;
328 self.metrics.core.cache_size_bytes += size;
329
330 return Some((old_key, old_value));
331 }
332 }
333
334 // Evict while entry count limit OR size limit would be exceeded
335 while self.map.len() >= self.cap().get()
336 || (self.current_size + size > self.config.max_size && !self.map.is_empty())
337 {
338 if let Some(old_entry) = self.list.remove_last() {
339 unsafe {
340 let entry_ptr = Box::into_raw(old_entry);
341 let cache_entry = &(*entry_ptr).get_value();
342 self.map.remove(&cache_entry.key);
343 let evicted_key = cache_entry.key.clone();
344 let evicted_value = cache_entry.value.clone();
345 let evicted_size = cache_entry.size;
346 self.current_size = self.current_size.saturating_sub(evicted_size);
347 self.metrics.core.record_eviction(evicted_size);
348 evicted = Some((evicted_key, evicted_value));
349 let _ = Box::from_raw(entry_ptr);
350 }
351 } else {
352 break; // No more items to evict
353 }
354 }
355
356 // Create new CacheEntry and add to list
357 let cache_entry = CacheEntry::new(key.clone(), value, size);
358 if let Some(node) = self.list.add(cache_entry) {
359 self.map.insert(key, node);
360 self.current_size += size;
361 self.metrics.core.record_insertion(size);
362 }
363
364 evicted
365 }
366
367 pub(crate) fn remove<Q>(&mut self, key: &Q) -> Option<V>
368 where
369 K: Borrow<Q> + Clone,
370 Q: ?Sized + Hash + Eq,
371 V: Clone,
372 {
373 let node = self.map.remove(key)?;
374 unsafe {
375 // SAFETY: node comes from our map
376 let entry = (*node).get_value();
377 let object_size = entry.size;
378 let value = entry.value.clone();
379 self.list.remove(node);
380 self.current_size = self.current_size.saturating_sub(object_size);
381 self.metrics.core.record_eviction(object_size);
382 Some(value)
383 }
384 }
385
386 pub(crate) fn clear(&mut self) {
387 self.current_size = 0;
388 self.metrics.core.cache_size_bytes = 0;
389 self.map.clear();
390 self.list.clear();
391 }
392}
393
394impl<K, V, S> core::fmt::Debug for LruSegment<K, V, S> {
395 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
396 f.debug_struct("LruSegment")
397 .field("capacity", &self.config.capacity)
398 .field("len", &self.map.len())
399 .finish()
400 }
401}
402
403/// A Least Recently Used (LRU) cache with O(1) operations.
404///
405/// Maintains items in order of access recency. When capacity is reached,
406/// the least recently accessed item is evicted to make room for new entries.
407///
408/// # Type Parameters
409///
410/// - `K`: Key type. Must implement `Hash + Eq`. For mutation operations, also needs `Clone`.
411/// - `V`: Value type. Must implement `Clone` for retrieval operations.
412/// - `S`: Hash builder type. Defaults to `DefaultHashBuilder`.
413///
414/// # Capacity Modes
415///
416/// - **Count-based**: `LruCache::new(cap)` - limits number of entries
417/// - **Size-based**: `LruCache::init(config, None)` with `max_size` set - limits total content size
418/// - **Dual-limit**: `LruCache::with_limits(cap, bytes)` - limits both
419///
420/// # Example
421///
422/// ```
423/// use cache_rs::LruCache;
424/// use cache_rs::config::LruCacheConfig;
425/// use core::num::NonZeroUsize;
426///
427/// let config = LruCacheConfig {
428/// capacity: NonZeroUsize::new(2).unwrap(),
429/// max_size: u64::MAX,
430/// };
431/// let mut cache = LruCache::init(config, None);
432///
433/// cache.put("apple", 1);
434/// cache.put("banana", 2);
435/// assert_eq!(cache.get(&"apple"), Some(&1));
436///
437/// // "banana" is now LRU, so it gets evicted
438/// cache.put("cherry", 3);
439/// assert_eq!(cache.get(&"banana"), None);
440/// ```
441#[derive(Debug)]
442pub struct LruCache<K, V, S = DefaultHashBuilder> {
443 segment: LruSegment<K, V, S>,
444}
445
446impl<K: Hash + Eq, V: Clone, S: BuildHasher> LruCache<K, V, S> {
447 /// Returns the maximum number of entries the cache can hold.
448 #[inline]
449 pub fn cap(&self) -> NonZeroUsize {
450 self.segment.cap()
451 }
452
453 /// Returns the current number of entries in the cache.
454 #[inline]
455 pub fn len(&self) -> usize {
456 self.segment.len()
457 }
458
459 /// Returns `true` if the cache contains no entries.
460 #[inline]
461 pub fn is_empty(&self) -> bool {
462 self.segment.is_empty()
463 }
464
465 /// Returns the current total size of all cached content.
466 ///
467 /// This is the sum of all `size` values passed to `put_with_size()`,
468 /// or estimated sizes for entries added via `put()`.
469 #[inline]
470 pub fn current_size(&self) -> u64 {
471 self.segment.current_size()
472 }
473
474 /// Returns the maximum total content size the cache can hold.
475 ///
476 /// Returns `u64::MAX` if no size limit was configured.
477 #[inline]
478 pub fn max_size(&self) -> u64 {
479 self.segment.max_size()
480 }
481
482 /// Retrieves a reference to the value for the given key.
483 ///
484 /// If the key exists, it is moved to the most-recently-used (MRU) position.
485 /// Returns `None` if the key is not present.
486 ///
487 /// # Example
488 ///
489 /// ```
490 /// use cache_rs::LruCache;
491 /// use cache_rs::config::LruCacheConfig;
492 /// use core::num::NonZeroUsize;
493 ///
494 /// let config = LruCacheConfig {
495 /// capacity: NonZeroUsize::new(10).unwrap(),
496 /// max_size: u64::MAX,
497 /// };
498 /// let mut cache = LruCache::init(config, None);
499 /// cache.put("key", 42);
500 ///
501 /// assert_eq!(cache.get(&"key"), Some(&42));
502 /// assert_eq!(cache.get(&"missing"), None);
503 /// ```
504 #[inline]
505 pub fn get<Q>(&mut self, key: &Q) -> Option<&V>
506 where
507 K: Borrow<Q>,
508 Q: ?Sized + Hash + Eq,
509 {
510 self.segment.get(key)
511 }
512
513 /// Records a cache miss for metrics tracking.
514 ///
515 /// Call this when you look up a key, find it missing, and fetch from
516 /// the underlying data source. This updates the miss counter in metrics.
517 ///
518 /// # Arguments
519 ///
520 /// * `object_size` - Size of the object that was fetched (for byte tracking)
521 #[inline]
522 pub fn record_miss(&mut self, object_size: u64) {
523 self.segment.record_miss(object_size);
524 }
525
526 /// Retrieves a mutable reference to the value for the given key.
527 ///
528 /// If the key exists, it is moved to the MRU position.
529 /// Returns `None` if the key is not present.
530 ///
531 /// # Example
532 ///
533 /// ```
534 /// use cache_rs::LruCache;
535 /// use cache_rs::config::LruCacheConfig;
536 /// use core::num::NonZeroUsize;
537 ///
538 /// let config = LruCacheConfig {
539 /// capacity: NonZeroUsize::new(10).unwrap(),
540 /// max_size: u64::MAX,
541 /// };
542 /// let mut cache = LruCache::init(config, None);
543 /// cache.put("counter", 0);
544 ///
545 /// if let Some(val) = cache.get_mut(&"counter") {
546 /// *val += 1;
547 /// }
548 /// assert_eq!(cache.get(&"counter"), Some(&1));
549 /// ```
550 #[inline]
551 pub fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
552 where
553 K: Borrow<Q>,
554 Q: ?Sized + Hash + Eq,
555 {
556 self.segment.get_mut(key)
557 }
558}
559
560impl<K: Hash + Eq + Clone, V: Clone, S: BuildHasher> LruCache<K, V, S> {
561 /// Inserts a key-value pair into the cache.
562 ///
563 /// If the key already exists, the value is updated and the entry moves
564 /// to the MRU position. The old key-value pair is returned.
565 ///
566 /// If the cache is at capacity, the least recently used entry is evicted
567 /// and returned.
568 ///
569 /// # Returns
570 ///
571 /// - `Some((old_key, old_value))` if the key existed or an entry was evicted
572 /// - `None` if this was a new insertion with available capacity
573 ///
574 /// # Example
575 ///
576 /// ```
577 /// use cache_rs::LruCache;
578 /// use cache_rs::config::LruCacheConfig;
579 /// use core::num::NonZeroUsize;
580 ///
581 /// let config = LruCacheConfig {
582 /// capacity: NonZeroUsize::new(2).unwrap(),
583 /// max_size: u64::MAX,
584 /// };
585 /// let mut cache = LruCache::init(config, None);
586 ///
587 /// assert_eq!(cache.put("a", 1), None); // New entry
588 /// assert_eq!(cache.put("b", 2), None); // New entry
589 /// assert_eq!(cache.put("a", 10), Some(("a", 1))); // Update existing
590 /// assert_eq!(cache.put("c", 3), Some(("b", 2))); // Evicts "b"
591 /// ```
592 #[inline]
593 pub fn put(&mut self, key: K, value: V) -> Option<(K, V)> {
594 self.segment.put(key, value)
595 }
596
597 /// Inserts a key-value pair with an explicit size.
598 ///
599 /// Use this for size-aware caching where you want to track the actual
600 /// memory or storage footprint of cached items.
601 ///
602 /// # Arguments
603 ///
604 /// * `key` - The key to insert
605 /// * `value` - The value to cache
606 /// * `size` - The size this entry consumes (in your chosen unit, e.g., bytes)
607 ///
608 /// # Returns
609 ///
610 /// Same as `put()` - returns evicted or replaced entry if any.
611 ///
612 /// # Example
613 ///
614 /// ```
615 /// use cache_rs::LruCache;
616 /// use cache_rs::config::LruCacheConfig;
617 /// use core::num::NonZeroUsize;
618 ///
619 /// let config = LruCacheConfig {
620 /// capacity: NonZeroUsize::new(100).unwrap(),
621 /// max_size: 1024 * 1024, // 1MB max
622 /// };
623 /// let mut cache: LruCache<String, Vec<u8>> = LruCache::init(config, None);
624 ///
625 /// let data = vec![0u8; 1000];
626 /// cache.put_with_size("file".to_string(), data, 1000);
627 ///
628 /// assert_eq!(cache.current_size(), 1000);
629 /// ```
630 #[inline]
631 pub fn put_with_size(&mut self, key: K, value: V, size: u64) -> Option<(K, V)> {
632 self.segment.put_with_size(key, value, size)
633 }
634
635 /// Removes a key from the cache.
636 ///
637 /// Returns the value if the key was present, `None` otherwise.
638 ///
639 /// # Example
640 ///
641 /// ```
642 /// use cache_rs::LruCache;
643 /// use cache_rs::config::LruCacheConfig;
644 /// use core::num::NonZeroUsize;
645 ///
646 /// let config = LruCacheConfig {
647 /// capacity: NonZeroUsize::new(10).unwrap(),
648 /// max_size: u64::MAX,
649 /// };
650 /// let mut cache = LruCache::init(config, None);
651 /// cache.put("key", 42);
652 ///
653 /// assert_eq!(cache.remove(&"key"), Some(42));
654 /// assert_eq!(cache.remove(&"key"), None); // Already removed
655 /// ```
656 #[inline]
657 pub fn remove<Q>(&mut self, key: &Q) -> Option<V>
658 where
659 K: Borrow<Q>,
660 Q: ?Sized + Hash + Eq,
661 {
662 self.segment.remove(key)
663 }
664
665 /// Removes all entries from the cache.
666 ///
667 /// Resets `current_size` to 0 and clears all metrics counters.
668 #[inline]
669 pub fn clear(&mut self) {
670 self.segment.clear()
671 }
672
673 /// Returns an iterator over the cache entries.
674 ///
675 /// # Panics
676 ///
677 /// Not yet implemented.
678 pub fn iter(&self) -> Iter<'_, K, V> {
679 unimplemented!("Iteration not yet implemented")
680 }
681
682 /// Returns a mutable iterator over the cache entries.
683 ///
684 /// # Panics
685 ///
686 /// Not yet implemented.
687 pub fn iter_mut(&mut self) -> IterMut<'_, K, V> {
688 unimplemented!("Mutable iteration not yet implemented")
689 }
690}
691
692impl<K: Hash + Eq, V> LruCache<K, V>
693where
694 V: Clone,
695{
696 /// Creates a new LRU cache from a configuration with an optional hasher.
697 ///
698 /// This is the **only** way to create an LRU cache.
699 ///
700 /// # Arguments
701 ///
702 /// * `config` - Configuration specifying capacity and optional size limit
703 /// * `hasher` - Optional custom hash builder. If `None`, uses `DefaultHashBuilder`
704 ///
705 /// # Example
706 ///
707 /// ```
708 /// use cache_rs::LruCache;
709 /// use cache_rs::config::LruCacheConfig;
710 /// use core::num::NonZeroUsize;
711 ///
712 /// // Simple capacity-only cache
713 /// let config = LruCacheConfig {
714 /// capacity: NonZeroUsize::new(100).unwrap(),
715 /// max_size: u64::MAX,
716 /// };
717 /// let mut cache: LruCache<&str, i32> = LruCache::init(config, None);
718 /// cache.put("key", 42);
719 ///
720 /// // Cache with size limit
721 /// let config = LruCacheConfig {
722 /// capacity: NonZeroUsize::new(1000).unwrap(),
723 /// max_size: 10 * 1024 * 1024, // 10MB
724 /// };
725 /// let cache: LruCache<String, Vec<u8>> = LruCache::init(config, None);
726 /// ```
727 pub fn init(
728 config: LruCacheConfig,
729 hasher: Option<DefaultHashBuilder>,
730 ) -> LruCache<K, V, DefaultHashBuilder> {
731 LruCache {
732 segment: LruSegment::init(config, hasher.unwrap_or_default()),
733 }
734 }
735}
736
737impl<K: Hash + Eq, V: Clone, S: BuildHasher> CacheMetrics for LruCache<K, V, S> {
738 fn metrics(&self) -> BTreeMap<String, f64> {
739 self.segment.metrics().metrics()
740 }
741
742 fn algorithm_name(&self) -> &'static str {
743 self.segment.metrics().algorithm_name()
744 }
745}
746
747pub struct Iter<'a, K, V> {
748 _marker: core::marker::PhantomData<&'a (K, V)>,
749}
750
751pub struct IterMut<'a, K, V> {
752 _marker: core::marker::PhantomData<&'a mut (K, V)>,
753}
754
755#[cfg(test)]
756mod tests {
757 use super::*;
758 use crate::config::LruCacheConfig;
759 use alloc::string::String;
760
761 /// Helper to create an LruCache with the given capacity
762 fn make_cache<K: Hash + Eq + Clone, V: Clone>(cap: usize) -> LruCache<K, V> {
763 let config = LruCacheConfig {
764 capacity: NonZeroUsize::new(cap).unwrap(),
765 max_size: u64::MAX,
766 };
767 LruCache::init(config, None)
768 }
769
770 #[test]
771 fn test_lru_get_put() {
772 let mut cache = make_cache(2);
773 assert_eq!(cache.put("apple", 1), None);
774 assert_eq!(cache.put("banana", 2), None);
775 assert_eq!(cache.get(&"apple"), Some(&1));
776 assert_eq!(cache.get(&"banana"), Some(&2));
777 assert_eq!(cache.get(&"cherry"), None);
778 assert_eq!(cache.put("apple", 3).unwrap().1, 1);
779 assert_eq!(cache.get(&"apple"), Some(&3));
780 assert_eq!(cache.put("cherry", 4).unwrap().1, 2);
781 assert_eq!(cache.get(&"banana"), None);
782 assert_eq!(cache.get(&"apple"), Some(&3));
783 assert_eq!(cache.get(&"cherry"), Some(&4));
784 }
785
786 #[test]
787 fn test_lru_get_mut() {
788 let mut cache = make_cache(2);
789 cache.put("apple", 1);
790 cache.put("banana", 2);
791 if let Some(v) = cache.get_mut(&"apple") {
792 *v = 3;
793 }
794 assert_eq!(cache.get(&"apple"), Some(&3));
795 cache.put("cherry", 4);
796 assert_eq!(cache.get(&"banana"), None);
797 assert_eq!(cache.get(&"apple"), Some(&3));
798 assert_eq!(cache.get(&"cherry"), Some(&4));
799 }
800
801 #[test]
802 fn test_lru_remove() {
803 let mut cache = make_cache(2);
804 cache.put("apple", 1);
805 cache.put("banana", 2);
806 assert_eq!(cache.get(&"apple"), Some(&1));
807 assert_eq!(cache.get(&"banana"), Some(&2));
808 assert_eq!(cache.get(&"cherry"), None);
809 assert_eq!(cache.remove(&"apple"), Some(1));
810 assert_eq!(cache.get(&"apple"), None);
811 assert_eq!(cache.len(), 1);
812 assert_eq!(cache.remove(&"cherry"), None);
813 let evicted = cache.put("cherry", 3);
814 assert_eq!(evicted, None);
815 assert_eq!(cache.get(&"banana"), Some(&2));
816 assert_eq!(cache.get(&"cherry"), Some(&3));
817 }
818
819 #[test]
820 fn test_lru_clear() {
821 let mut cache = make_cache(2);
822 cache.put("apple", 1);
823 cache.put("banana", 2);
824 assert_eq!(cache.len(), 2);
825 cache.clear();
826 assert_eq!(cache.len(), 0);
827 assert!(cache.is_empty());
828 cache.put("cherry", 3);
829 assert_eq!(cache.get(&"cherry"), Some(&3));
830 }
831
832 #[test]
833 fn test_lru_capacity_limits() {
834 let mut cache = make_cache(2);
835 cache.put("apple", 1);
836 cache.put("banana", 2);
837 cache.put("cherry", 3);
838 assert_eq!(cache.len(), 2);
839 assert_eq!(cache.get(&"apple"), None);
840 assert_eq!(cache.get(&"banana"), Some(&2));
841 assert_eq!(cache.get(&"cherry"), Some(&3));
842 }
843
844 #[test]
845 fn test_lru_string_keys() {
846 let mut cache = make_cache(2);
847 let key1 = String::from("apple");
848 let key2 = String::from("banana");
849 cache.put(key1.clone(), 1);
850 cache.put(key2.clone(), 2);
851 assert_eq!(cache.get(&key1), Some(&1));
852 assert_eq!(cache.get(&key2), Some(&2));
853 assert_eq!(cache.get("apple"), Some(&1));
854 assert_eq!(cache.get("banana"), Some(&2));
855 drop(cache);
856 }
857
858 #[derive(Debug, Clone, Eq, PartialEq)]
859 struct ComplexValue {
860 val: i32,
861 description: String,
862 }
863
864 #[test]
865 fn test_lru_complex_values() {
866 let mut cache = make_cache(2);
867 let key1 = String::from("apple");
868 let key2 = String::from("banana");
869 let fruit1 = ComplexValue {
870 val: 1,
871 description: String::from("First fruit"),
872 };
873 let fruit2 = ComplexValue {
874 val: 2,
875 description: String::from("Second fruit"),
876 };
877 let fruit3 = ComplexValue {
878 val: 3,
879 description: String::from("Third fruit"),
880 };
881 cache.put(key1.clone(), fruit1.clone());
882 cache.put(key2.clone(), fruit2.clone());
883 assert_eq!(cache.get(&key1).unwrap().val, fruit1.val);
884 assert_eq!(cache.get(&key2).unwrap().val, fruit2.val);
885 let evicted = cache.put(String::from("cherry"), fruit3.clone());
886 let evicted_fruit = evicted.unwrap();
887 assert_eq!(evicted_fruit.1, fruit1);
888 let removed = cache.remove(&key1);
889 assert_eq!(removed, None);
890 }
891
892 #[test]
893 fn test_lru_metrics() {
894 use crate::metrics::CacheMetrics;
895 let mut cache = make_cache(2);
896 let metrics = cache.metrics();
897 assert_eq!(metrics.get("requests").unwrap(), &0.0);
898 assert_eq!(metrics.get("cache_hits").unwrap(), &0.0);
899 assert_eq!(metrics.get("cache_misses").unwrap(), &0.0);
900 cache.put("apple", 1);
901 cache.put("banana", 2);
902 cache.get(&"apple");
903 cache.get(&"banana");
904 let metrics = cache.metrics();
905 assert_eq!(metrics.get("cache_hits").unwrap(), &2.0);
906 cache.record_miss(64);
907 let metrics = cache.metrics();
908 assert_eq!(metrics.get("cache_misses").unwrap(), &1.0);
909 assert_eq!(metrics.get("requests").unwrap(), &3.0);
910 cache.put("cherry", 3);
911 let metrics = cache.metrics();
912 assert_eq!(metrics.get("evictions").unwrap(), &1.0);
913 assert!(metrics.get("bytes_written_to_cache").unwrap() > &0.0);
914 assert_eq!(cache.algorithm_name(), "LRU");
915 }
916
917 #[test]
918 fn test_lru_segment_directly() {
919 let config = LruCacheConfig {
920 capacity: NonZeroUsize::new(2).unwrap(),
921 max_size: u64::MAX,
922 };
923 let mut segment: LruSegment<&str, i32, DefaultHashBuilder> =
924 LruSegment::init(config, DefaultHashBuilder::default());
925 assert_eq!(segment.len(), 0);
926 assert!(segment.is_empty());
927 assert_eq!(segment.cap().get(), 2);
928 segment.put("a", 1);
929 segment.put("b", 2);
930 assert_eq!(segment.len(), 2);
931 assert_eq!(segment.get(&"a"), Some(&1));
932 assert_eq!(segment.get(&"b"), Some(&2));
933 }
934
935 #[test]
936 fn test_lru_concurrent_access() {
937 extern crate std;
938 use std::sync::{Arc, Mutex};
939 use std::thread;
940 use std::vec::Vec;
941
942 let cache = Arc::new(Mutex::new(make_cache::<String, i32>(100)));
943 let num_threads = 4;
944 let ops_per_thread = 100;
945
946 let mut handles: Vec<std::thread::JoinHandle<()>> = Vec::new();
947
948 // Spawn writer threads
949 for t in 0..num_threads {
950 let cache = Arc::clone(&cache);
951 handles.push(thread::spawn(move || {
952 for i in 0..ops_per_thread {
953 let key = std::format!("thread_{}_key_{}", t, i);
954 let mut guard = cache.lock().unwrap();
955 guard.put(key, t * 1000 + i);
956 }
957 }));
958 }
959
960 // Spawn reader threads
961 for t in 0..num_threads {
962 let cache = Arc::clone(&cache);
963 handles.push(thread::spawn(move || {
964 for i in 0..ops_per_thread {
965 let key = std::format!("thread_{}_key_{}", t, i);
966 let mut guard = cache.lock().unwrap();
967 let _ = guard.get(&key);
968 }
969 }));
970 }
971
972 for handle in handles {
973 handle.join().unwrap();
974 }
975
976 let mut guard = cache.lock().unwrap();
977 assert!(guard.len() <= 100);
978 assert!(!guard.is_empty());
979 guard.clear(); // Clean up for MIRI
980 }
981
982 #[test]
983 fn test_lru_high_contention() {
984 extern crate std;
985 use std::sync::{Arc, Mutex};
986 use std::thread;
987 use std::vec::Vec;
988
989 let cache = Arc::new(Mutex::new(make_cache::<String, i32>(50)));
990 let num_threads = 8;
991 let ops_per_thread = 500;
992
993 let mut handles: Vec<std::thread::JoinHandle<()>> = Vec::new();
994
995 for t in 0..num_threads {
996 let cache = Arc::clone(&cache);
997 handles.push(thread::spawn(move || {
998 for i in 0..ops_per_thread {
999 let key = std::format!("key_{}", i % 100); // Overlapping keys
1000 let mut guard = cache.lock().unwrap();
1001 if i % 2 == 0 {
1002 guard.put(key, t * 1000 + i);
1003 } else {
1004 let _ = guard.get(&key);
1005 }
1006 }
1007 }));
1008 }
1009
1010 for handle in handles {
1011 handle.join().unwrap();
1012 }
1013
1014 let mut guard = cache.lock().unwrap();
1015 assert!(guard.len() <= 50);
1016 guard.clear(); // Clean up for MIRI
1017 }
1018
1019 #[test]
1020 fn test_lru_concurrent_mixed_operations() {
1021 extern crate std;
1022 use std::sync::{Arc, Mutex};
1023 use std::thread;
1024 use std::vec::Vec;
1025
1026 let cache = Arc::new(Mutex::new(make_cache::<String, i32>(100)));
1027 let num_threads = 8;
1028 let ops_per_thread = 1000;
1029
1030 let mut handles: Vec<std::thread::JoinHandle<()>> = Vec::new();
1031
1032 for t in 0..num_threads {
1033 let cache = Arc::clone(&cache);
1034 handles.push(thread::spawn(move || {
1035 for i in 0..ops_per_thread {
1036 let key = std::format!("key_{}", i % 200);
1037 let mut guard = cache.lock().unwrap();
1038
1039 match i % 4 {
1040 0 => {
1041 guard.put(key, i);
1042 }
1043 1 => {
1044 let _ = guard.get(&key);
1045 }
1046 2 => {
1047 let _ = guard.get_mut(&key);
1048 }
1049 3 => {
1050 let _ = guard.remove(&key);
1051 }
1052 _ => unreachable!(),
1053 }
1054
1055 if i == 500 && t == 0 {
1056 guard.clear();
1057 }
1058 }
1059 }));
1060 }
1061
1062 for handle in handles {
1063 handle.join().unwrap();
1064 }
1065
1066 let mut guard = cache.lock().unwrap();
1067 assert!(guard.len() <= 100);
1068 guard.clear(); // Clean up for MIRI
1069 }
1070
1071 #[test]
1072 fn test_lru_size_aware_tracking() {
1073 // Test that current_size and max_size are tracked correctly
1074 let mut cache = make_cache(10);
1075
1076 assert_eq!(cache.current_size(), 0);
1077 assert_eq!(cache.max_size(), u64::MAX);
1078
1079 // Put items with explicit sizes
1080 cache.put_with_size("a", 1, 100);
1081 cache.put_with_size("b", 2, 200);
1082 cache.put_with_size("c", 3, 150);
1083
1084 assert_eq!(cache.current_size(), 450);
1085 assert_eq!(cache.len(), 3);
1086
1087 // Note: Current implementation doesn't track per-entry size on remove
1088 // The size metric tracks total insertions minus evictions
1089
1090 // Clear should reset size
1091 cache.clear();
1092 assert_eq!(cache.current_size(), 0);
1093 }
1094
1095 #[test]
1096 fn test_lru_init_constructor() {
1097 // Test the init constructor with size limit
1098 let config = LruCacheConfig {
1099 capacity: NonZeroUsize::new(1000).unwrap(),
1100 max_size: 1024 * 1024,
1101 };
1102 let cache: LruCache<String, i32> = LruCache::init(config, None);
1103
1104 assert_eq!(cache.current_size(), 0);
1105 assert_eq!(cache.max_size(), 1024 * 1024);
1106 assert_eq!(cache.len(), 0);
1107 }
1108
1109 #[test]
1110 fn test_lru_with_limits_constructor() {
1111 // Test the with_limits constructor
1112 let config = LruCacheConfig {
1113 capacity: NonZeroUsize::new(100).unwrap(),
1114 max_size: 1024 * 1024,
1115 };
1116 let cache: LruCache<String, String> = LruCache::init(config, None);
1117
1118 assert_eq!(cache.current_size(), 0);
1119 assert_eq!(cache.max_size(), 1024 * 1024);
1120 assert_eq!(cache.cap().get(), 100);
1121 }
1122}