cache_rs/lru.rs
1//! Least Recently Used (LRU) Cache Implementation
2//!
3//! An LRU cache evicts the least recently accessed item when capacity is reached.
4//! This implementation provides O(1) time complexity for all operations using a
5//! hash map combined with a doubly-linked list.
6//!
7//! # How the Algorithm Works
8//!
9//! The LRU algorithm is based on the principle of **temporal locality**: items accessed
10//! recently are likely to be accessed again soon. The cache maintains items ordered by
11//! their last access time.
12//!
13//! ## Data Structure
14//!
15//! ```text
16//! ┌─────────────────────────────────────────────────────────────────┐
17//! │ LRU Cache │
18//! │ │
19//! │ HashMap<K, *Node> Doubly-Linked List │
20//! │ ┌──────────────┐ ┌──────────────────────────────┐ │
21//! │ │ "apple" ──────────────> │ MRU ◀──▶ ... ◀──▶ LRU │ │
22//! │ │ "banana" ─────────────> │ ▲ │ │ │
23//! │ │ "cherry" ─────────────> │ │ ▼ │ │
24//! │ └──────────────┘ │ head tail │ │
25//! │ └──────────────────────────────┘ │
26//! └─────────────────────────────────────────────────────────────────┘
27//! ```
28//!
29//! - **HashMap**: Provides O(1) key lookup, storing pointers to list nodes
30//! - **Doubly-Linked List**: Maintains access order (most recent at head, least recent at tail)
31//!
32//! ## Operations
33//!
34//! | Operation | Action | Time |
35//! |-----------|--------|------|
36//! | `get(key)` | Move accessed node to head (MRU position) | O(1) |
37//! | `put(key, value)` | Insert at head, evict from tail if full | O(1) |
38//! | `remove(key)` | Unlink node from list, remove from map | O(1) |
39//!
40//! ## Eviction Example
41//!
42//! ```text
43//! Cache capacity: 3
44//!
45//! put("a", 1) → [a]
46//! put("b", 2) → [b, a]
47//! put("c", 3) → [c, b, a]
48//! get("a") → [a, c, b] // "a" moved to front (MRU)
49//! put("d", 4) → [d, a, c] // "b" evicted (was LRU)
50//! ```
51//!
52//! # Dual-Limit Capacity
53//!
54//! This implementation supports two independent limits:
55//!
56//! - **`max_entries`**: Maximum number of items (bounds cache memory overhead)
57//! - **`max_size`**: Maximum total size of content (sum of item sizes)
58//!
59//! Eviction occurs when **either** limit would be exceeded.
60//!
61//! # Performance Characteristics
62//!
63//! | Metric | Value |
64//! |--------|-------|
65//! | Get | O(1) |
66//! | Put | O(1) |
67//! | Remove | O(1) |
68//! | Memory per entry | ~80 bytes overhead + key×2 + value |
69//!
70//! Memory overhead breakdown (64-bit): list node pointers (16B), `CacheEntry` metadata (24B),
71//! HashMap bucket (~24B), allocator overhead (~16B). Key is stored twice (in entry and HashMap).
72//!
73//! # When to Use LRU
74//!
75//! **Good for:**
76//! - General-purpose caching with temporal locality
77//! - Web page caches, database query caches
78//! - Any workload where recent items are accessed again soon
79//!
80//! **Not ideal for:**
81//! - Frequency-based access patterns (use LFU instead)
82//! - Scan-resistant workloads (use SLRU instead)
83//! - Size-aware caching of variable-sized objects (use GDSF instead)
84//!
85//! # Thread Safety
86//!
87//! `LruCache` is **not thread-safe**. For concurrent access, either:
88//! - Wrap with `Mutex` or `RwLock`
89//! - Use `ConcurrentLruCache` (requires `concurrent` feature)
90//!
91//! # Examples
92//!
93//! ## Basic Usage
94//!
95//! ```
96//! use cache_rs::LruCache;
97//! use cache_rs::config::LruCacheConfig;
98//! use core::num::NonZeroUsize;
99//!
100//! let config = LruCacheConfig {
101//! capacity: NonZeroUsize::new(3).unwrap(),
102//! max_size: u64::MAX,
103//! };
104//! let mut cache = LruCache::init(config, None);
105//!
106//! cache.put("a", 1, 1);
107//! cache.put("b", 2, 1);
108//! cache.put("c", 3, 1);
109//!
110//! assert_eq!(cache.get(&"a"), Some(&1)); // "a" is now MRU
111//!
112//! cache.put("d", 4, 1); // Evicts "b" (LRU)
113//! assert_eq!(cache.get(&"b"), None);
114//! ```
115//!
116//! ## Size-Aware Caching
117//!
118//! ```
119//! use cache_rs::LruCache;
120//! use cache_rs::config::LruCacheConfig;
121//! use core::num::NonZeroUsize;
122//!
123//! // Cache with max 1000 entries and 10MB total size
124//! let config = LruCacheConfig {
125//! capacity: NonZeroUsize::new(1000).unwrap(),
126//! max_size: 10 * 1024 * 1024,
127//! };
128//! let mut cache: LruCache<String, Vec<u8>> = LruCache::init(config, None);
129//!
130//! let data = vec![0u8; 1024]; // 1KB
131//! cache.put("file.bin".to_string(), data.clone(), 1024);
132//! ```
133
134extern crate alloc;
135
136use crate::config::LruCacheConfig;
137use crate::entry::CacheEntry;
138use crate::list::{List, ListEntry};
139use crate::metrics::{CacheMetrics, LruCacheMetrics};
140use alloc::boxed::Box;
141use alloc::collections::BTreeMap;
142use alloc::string::String;
143use alloc::vec::Vec;
144use core::borrow::Borrow;
145use core::hash::{BuildHasher, Hash};
146use core::num::NonZeroUsize;
147
148#[cfg(feature = "hashbrown")]
149use hashbrown::DefaultHashBuilder;
150#[cfg(feature = "hashbrown")]
151use hashbrown::HashMap;
152
153#[cfg(not(feature = "hashbrown"))]
154use std::collections::hash_map::RandomState as DefaultHashBuilder;
155#[cfg(not(feature = "hashbrown"))]
156use std::collections::HashMap;
157
158/// Internal LRU segment containing the actual cache algorithm.
159///
160/// This is shared between `LruCache` (single-threaded) and
161/// `ConcurrentLruCache` (multi-threaded). All algorithm logic is
162/// implemented here to avoid code duplication.
163///
164/// Uses `CacheEntry<K, V>` for unified entry management with built-in
165/// size tracking and timestamps. LRU doesn't need per-entry metadata
166/// since position in the list implicitly tracks recency.
167///
168/// # Safety
169///
170/// This struct contains raw pointers in the `map` field.
171/// These pointers are always valid as long as:
172/// - The pointer was obtained from a `list` entry's `add()` call
173/// - The node has not been removed from the list
174/// - The segment has not been dropped
175pub(crate) struct LruSegment<K, V, S = DefaultHashBuilder> {
176 /// Configuration for the LRU cache (includes capacity and max_size)
177 config: LruCacheConfig,
178 list: List<CacheEntry<K, V>>,
179 map: HashMap<K, *mut ListEntry<CacheEntry<K, V>>, S>,
180 metrics: LruCacheMetrics,
181 /// Current total size of cached content (sum of entry.metadata.size values)
182 current_size: u64,
183}
184
185// SAFETY: LruSegment owns all data and raw pointers point only to nodes owned by `list`.
186// Concurrent access is safe when wrapped in proper synchronization primitives.
187unsafe impl<K: Send, V: Send, S: Send> Send for LruSegment<K, V, S> {}
188
189// SAFETY: All mutation requires &mut self; shared references cannot cause data races.
190unsafe impl<K: Send, V: Send, S: Sync> Sync for LruSegment<K, V, S> {}
191
192impl<K: Hash + Eq, V: Clone, S: BuildHasher> LruSegment<K, V, S> {
193 /// Creates a new LRU segment from a configuration.
194 ///
195 /// This is the **recommended** way to create an LRU segment. All configuration
196 /// is specified through the [`LruCacheConfig`] struct.
197 ///
198 /// # Arguments
199 ///
200 /// * `config` - Configuration specifying capacity and optional size limit
201 /// * `hasher` - Hash builder for the internal HashMap
202 #[allow(dead_code)] // Used by concurrent module when feature is enabled
203 pub(crate) fn init(config: LruCacheConfig, hasher: S) -> Self {
204 let map_capacity = config.capacity.get().next_power_of_two();
205 LruSegment {
206 config,
207 list: List::new(config.capacity),
208 map: HashMap::with_capacity_and_hasher(map_capacity, hasher),
209 metrics: LruCacheMetrics::new(config.max_size),
210 current_size: 0,
211 }
212 }
213
214 #[inline]
215 pub(crate) fn cap(&self) -> NonZeroUsize {
216 self.config.capacity
217 }
218
219 #[inline]
220 pub(crate) fn len(&self) -> usize {
221 self.map.len()
222 }
223
224 #[inline]
225 pub(crate) fn is_empty(&self) -> bool {
226 self.map.is_empty()
227 }
228
229 /// Returns the current total size of cached content.
230 #[inline]
231 pub(crate) fn current_size(&self) -> u64 {
232 self.current_size
233 }
234
235 /// Returns the maximum content size the cache can hold.
236 #[inline]
237 pub(crate) fn max_size(&self) -> u64 {
238 self.config.max_size
239 }
240
241 #[inline]
242 pub(crate) fn metrics(&self) -> &LruCacheMetrics {
243 &self.metrics
244 }
245
246 pub(crate) fn get<Q>(&mut self, key: &Q) -> Option<&V>
247 where
248 K: Borrow<Q>,
249 Q: ?Sized + Hash + Eq,
250 {
251 if let Some(node) = self.map.get(key).copied() {
252 unsafe {
253 // SAFETY: node comes from our map
254 self.list.move_to_front(node);
255 let entry = (*node).get_value_mut();
256 entry.touch(); // Update last_accessed timestamp
257 self.metrics.core.record_hit(entry.metadata.size);
258 Some(&entry.value)
259 }
260 } else {
261 None
262 }
263 }
264
265 #[inline]
266 pub(crate) fn record_miss(&mut self, object_size: u64) {
267 self.metrics.core.record_miss(object_size);
268 }
269
270 pub(crate) fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
271 where
272 K: Borrow<Q>,
273 Q: ?Sized + Hash + Eq,
274 {
275 let node = self.map.get(key).copied()?;
276 unsafe {
277 // SAFETY: node comes from our map
278 self.list.move_to_front(node);
279 let entry = (*node).get_value_mut();
280 entry.touch(); // Update last_accessed timestamp
281 self.metrics.core.record_hit(entry.metadata.size);
282 Some(&mut entry.value)
283 }
284 }
285
286 /// Insert a key-value pair with size tracking.
287 ///
288 /// The `size` parameter specifies how much of `max_size` this entry consumes.
289 /// Use `SIZE_UNIT` (1) for count-based caching.
290 ///
291 /// Returns evicted entries, or `None` if no entries were evicted.
292 /// Note: Replacing an existing key does not return the old value.
293 pub(crate) fn put(&mut self, key: K, value: V, size: u64) -> Option<Vec<(K, V)>>
294 where
295 K: Clone + Hash + Eq,
296 {
297 if let Some(&node) = self.map.get(&key) {
298 unsafe {
299 // SAFETY: node comes from our map
300 self.list.move_to_front(node);
301 let entry = (*node).get_value_mut();
302
303 // Update size tracking: remove old size, add new size
304 let old_size = entry.metadata.size;
305 self.current_size = self.current_size.saturating_sub(old_size);
306 self.metrics.core.cache_size_bytes =
307 self.metrics.core.cache_size_bytes.saturating_sub(old_size);
308
309 // Update entry fields
310 // TODO: seems wasteful to replace key since it should be the same?
311 let _old_key = core::mem::replace(&mut entry.key, key);
312 let _old_value = core::mem::replace(&mut entry.value, value);
313 entry.metadata.size = size;
314 entry.touch();
315
316 self.current_size += size;
317 self.metrics.core.cache_size_bytes += size;
318 self.metrics.core.bytes_written_to_cache += size;
319
320 // Replacement is not eviction - don't return the old value
321 return None;
322 }
323 }
324
325 let mut evicted = Vec::new();
326
327 // Evict while entry count limit OR size limit would be exceeded
328 while self.map.len() >= self.cap().get()
329 || (self.current_size + size > self.config.max_size && !self.map.is_empty())
330 {
331 if let Some(entry) = self.evict() {
332 self.metrics.core.evictions += 1;
333 evicted.push(entry);
334 } else {
335 break;
336 }
337 }
338
339 // Create new CacheEntry and add to list
340 let cache_entry = CacheEntry::new(key.clone(), value, size);
341 if let Some(node) = self.list.add(cache_entry) {
342 self.map.insert(key, node);
343 self.current_size += size;
344 self.metrics.core.record_insertion(size);
345 }
346
347 if evicted.is_empty() {
348 None
349 } else {
350 Some(evicted)
351 }
352 }
353
354 pub(crate) fn remove<Q>(&mut self, key: &Q) -> Option<V>
355 where
356 K: Borrow<Q>,
357 Q: ?Sized + Hash + Eq,
358 {
359 let node = self.map.remove(key)?;
360 unsafe {
361 // SAFETY: node comes from our map; take_value moves the value out
362 // and Box::from_raw frees memory (MaybeUninit won't double-drop).
363 if let Some(boxed) = self.list.remove(node) {
364 let entry_ptr = Box::into_raw(boxed);
365 let cache_entry = (*entry_ptr).take_value();
366 let removed_size = cache_entry.metadata.size;
367 let _ = Box::from_raw(entry_ptr);
368 self.current_size = self.current_size.saturating_sub(removed_size);
369 self.metrics.core.record_removal(removed_size);
370 Some(cache_entry.value)
371 } else {
372 None
373 }
374 }
375 }
376
377 pub(crate) fn clear(&mut self) {
378 self.current_size = 0;
379 self.metrics.core.cache_size_bytes = 0;
380 self.map.clear();
381 self.list.clear();
382 }
383
384 /// Check if key exists without promoting it in the LRU order.
385 ///
386 /// Unlike `get()`, this method does NOT update access metadata or
387 /// move the entry to the front of the list.
388 #[inline]
389 pub(crate) fn contains<Q>(&self, key: &Q) -> bool
390 where
391 K: Borrow<Q>,
392 Q: ?Sized + Hash + Eq,
393 {
394 self.map.contains_key(key)
395 }
396
397 /// Returns a reference to the value without updating the LRU order.
398 ///
399 /// Unlike `get()`, this method does NOT move the entry to the front
400 /// of the list or update the last_accessed timestamp.
401 pub(crate) fn peek<Q>(&self, key: &Q) -> Option<&V>
402 where
403 K: Borrow<Q>,
404 Q: ?Sized + Hash + Eq,
405 {
406 let node = self.map.get(key).copied()?;
407 unsafe {
408 // SAFETY: node comes from our map, so it's a valid pointer
409 let entry = (*node).get_value();
410 Some(&entry.value)
411 }
412 }
413
414 /// Removes and returns the eviction candidate (least recently used entry).
415 ///
416 /// This method does **not** increment the eviction counter in metrics.
417 /// Eviction metrics are only recorded when the cache internally evicts
418 /// entries to make room during `put()` operations.
419 ///
420 /// Returns `None` if the cache is empty.
421 fn evict(&mut self) -> Option<(K, V)> {
422 let old_entry = self.list.remove_last()?;
423 unsafe {
424 // SAFETY: entry comes from list.remove_last(); take_value moves the
425 // CacheEntry out by value. Box::from_raw frees memory without
426 // double-drop since MaybeUninit does not run Drop on its contents.
427 let entry_ptr = Box::into_raw(old_entry);
428 let cache_entry = (*entry_ptr).take_value();
429 let evicted_size = cache_entry.metadata.size;
430 self.map.remove(&cache_entry.key);
431 self.current_size = self.current_size.saturating_sub(evicted_size);
432 self.metrics.core.record_removal(evicted_size);
433 let _ = Box::from_raw(entry_ptr);
434 Some((cache_entry.key, cache_entry.value))
435 }
436 }
437}
438
439impl<K, V, S> core::fmt::Debug for LruSegment<K, V, S> {
440 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
441 f.debug_struct("LruSegment")
442 .field("capacity", &self.config.capacity)
443 .field("len", &self.map.len())
444 .finish()
445 }
446}
447
448/// A Least Recently Used (LRU) cache with O(1) operations.
449///
450/// Maintains items in order of access recency. When capacity is reached,
451/// the least recently accessed item is evicted to make room for new entries.
452///
453/// # Type Parameters
454///
455/// - `K`: Key type. Must implement `Hash + Eq`. For mutation operations, also needs `Clone`.
456/// - `V`: Value type. Must implement `Clone` for retrieval operations.
457/// - `S`: Hash builder type. Defaults to `DefaultHashBuilder`.
458///
459/// # Capacity Modes
460///
461/// - **Count-based**: `LruCache::new(cap)` - limits number of entries
462/// - **Size-based**: `LruCache::init(config, None)` with `max_size` set - limits total content size
463/// - **Dual-limit**: `LruCache::with_limits(cap, bytes)` - limits both
464///
465/// # Example
466///
467/// ```
468/// use cache_rs::LruCache;
469/// use cache_rs::config::LruCacheConfig;
470/// use core::num::NonZeroUsize;
471///
472/// let config = LruCacheConfig {
473/// capacity: NonZeroUsize::new(2).unwrap(),
474/// max_size: u64::MAX,
475/// };
476/// let mut cache = LruCache::init(config, None);
477///
478/// cache.put("apple", 1, 1);
479/// cache.put("banana", 2, 1);
480/// assert_eq!(cache.get(&"apple"), Some(&1));
481///
482/// // "banana" is now LRU, so it gets evicted
483/// cache.put("cherry", 3, 1);
484/// assert_eq!(cache.get(&"banana"), None);
485/// ```
486#[derive(Debug)]
487pub struct LruCache<K, V, S = DefaultHashBuilder> {
488 segment: LruSegment<K, V, S>,
489}
490
491impl<K: Hash + Eq, V: Clone, S: BuildHasher> LruCache<K, V, S> {
492 /// Returns the maximum number of entries the cache can hold.
493 #[inline]
494 pub fn cap(&self) -> NonZeroUsize {
495 self.segment.cap()
496 }
497
498 /// Returns the current number of entries in the cache.
499 #[inline]
500 pub fn len(&self) -> usize {
501 self.segment.len()
502 }
503
504 /// Returns `true` if the cache contains no entries.
505 #[inline]
506 pub fn is_empty(&self) -> bool {
507 self.segment.is_empty()
508 }
509
510 /// Returns the current total size of all cached content.
511 ///
512 /// This is the sum of all `size` values passed to `put()`,
513 /// or estimated sizes for entries added via `put()`.
514 #[inline]
515 pub fn current_size(&self) -> u64 {
516 self.segment.current_size()
517 }
518
519 /// Returns the maximum total content size the cache can hold.
520 ///
521 /// Returns `u64::MAX` if no size limit was configured.
522 #[inline]
523 pub fn max_size(&self) -> u64 {
524 self.segment.max_size()
525 }
526
527 /// Retrieves a reference to the value for the given key.
528 ///
529 /// If the key exists, it is moved to the most-recently-used (MRU) position.
530 /// Returns `None` if the key is not present.
531 ///
532 /// # Example
533 ///
534 /// ```
535 /// use cache_rs::LruCache;
536 /// use cache_rs::config::LruCacheConfig;
537 /// use core::num::NonZeroUsize;
538 ///
539 /// let config = LruCacheConfig {
540 /// capacity: NonZeroUsize::new(10).unwrap(),
541 /// max_size: u64::MAX,
542 /// };
543 /// let mut cache = LruCache::init(config, None);
544 /// cache.put("key", 42, 1);
545 ///
546 /// assert_eq!(cache.get(&"key"), Some(&42));
547 /// assert_eq!(cache.get(&"missing"), None);
548 /// ```
549 #[inline]
550 pub fn get<Q>(&mut self, key: &Q) -> Option<&V>
551 where
552 K: Borrow<Q>,
553 Q: ?Sized + Hash + Eq,
554 {
555 self.segment.get(key)
556 }
557
558 /// Records a cache miss for metrics tracking.
559 ///
560 /// Call this when you look up a key, find it missing, and fetch from
561 /// the underlying data source. This updates the miss counter in metrics.
562 ///
563 /// # Arguments
564 ///
565 /// * `object_size` - Size of the object that was fetched (for byte tracking)
566 #[inline]
567 pub fn record_miss(&mut self, object_size: u64) {
568 self.segment.record_miss(object_size);
569 }
570
571 /// Retrieves a mutable reference to the value for the given key.
572 ///
573 /// If the key exists, it is moved to the MRU position.
574 /// Returns `None` if the key is not present.
575 ///
576 /// # Example
577 ///
578 /// ```
579 /// use cache_rs::LruCache;
580 /// use cache_rs::config::LruCacheConfig;
581 /// use core::num::NonZeroUsize;
582 ///
583 /// let config = LruCacheConfig {
584 /// capacity: NonZeroUsize::new(10).unwrap(),
585 /// max_size: u64::MAX,
586 /// };
587 /// let mut cache = LruCache::init(config, None);
588 /// cache.put("counter", 0, 1);
589 ///
590 /// if let Some(val) = cache.get_mut(&"counter") {
591 /// *val += 1;
592 /// }
593 /// assert_eq!(cache.get(&"counter"), Some(&1));
594 /// ```
595 #[inline]
596 pub fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
597 where
598 K: Borrow<Q>,
599 Q: ?Sized + Hash + Eq,
600 {
601 self.segment.get_mut(key)
602 }
603}
604
605impl<K: Hash + Eq + Clone, V: Clone, S: BuildHasher> LruCache<K, V, S> {
606 /// Inserts a key-value pair into the cache.
607 ///
608 /// If the key already exists, the value is updated and the entry moves
609 /// to the MRU position.
610 ///
611 /// If the cache is at capacity, evicted entries are returned.
612 ///
613 /// # Returns
614 ///
615 /// - `Some(vec)` containing evicted entries (not replaced entries)
616 /// - `None` if no entries were evicted (zero allocation)
617 ///
618 /// # Arguments
619 ///
620 /// * `key` - The key to insert
621 /// * `value` - The value to cache
622 /// * `size` - Size of this entry for capacity tracking. Use `SIZE_UNIT` (1) for count-based caching.
623 ///
624 /// # Example
625 ///
626 /// ```
627 /// use cache_rs::LruCache;
628 /// use cache_rs::config::LruCacheConfig;
629 /// use core::num::NonZeroUsize;
630 ///
631 /// let config = LruCacheConfig {
632 /// capacity: NonZeroUsize::new(2).unwrap(),
633 /// max_size: u64::MAX,
634 /// };
635 /// let mut cache = LruCache::init(config, None);
636 ///
637 /// // Count-based caching (use 1 for size)
638 /// assert_eq!(cache.put("a", 1, 1), None); // New entry
639 /// assert_eq!(cache.put("b", 2, 1), None); // New entry
640 /// assert_eq!(cache.put("a", 10, 1), None); // Update existing (not eviction)
641 /// assert_eq!(cache.put("c", 3, 1), Some(vec![("b", 2)])); // Evicts "b"
642 /// ```
643 ///
644 /// Size-aware caching:
645 ///
646 /// ```
647 /// use cache_rs::LruCache;
648 /// use cache_rs::config::LruCacheConfig;
649 /// use core::num::NonZeroUsize;
650 ///
651 /// let config = LruCacheConfig {
652 /// capacity: NonZeroUsize::new(100).unwrap(),
653 /// max_size: 1024 * 1024, // 1MB max
654 /// };
655 /// let mut cache: LruCache<String, Vec<u8>> = LruCache::init(config, None);
656 ///
657 /// let data = vec![0u8; 1000];
658 /// cache.put("file".to_string(), data, 1000);
659 ///
660 /// assert_eq!(cache.current_size(), 1000);
661 /// ```
662 #[inline]
663 pub fn put(&mut self, key: K, value: V, size: u64) -> Option<Vec<(K, V)>> {
664 self.segment.put(key, value, size)
665 }
666
667 /// Removes a key from the cache.
668 ///
669 /// Returns the value if the key was present, `None` otherwise.
670 ///
671 /// # Example
672 ///
673 /// ```
674 /// use cache_rs::LruCache;
675 /// use cache_rs::config::LruCacheConfig;
676 /// use core::num::NonZeroUsize;
677 ///
678 /// let config = LruCacheConfig {
679 /// capacity: NonZeroUsize::new(10).unwrap(),
680 /// max_size: u64::MAX,
681 /// };
682 /// let mut cache = LruCache::init(config, None);
683 /// cache.put("key", 42, 1);
684 ///
685 /// assert_eq!(cache.remove(&"key"), Some(42));
686 /// assert_eq!(cache.remove(&"key"), None); // Already removed
687 /// ```
688 #[inline]
689 pub fn remove<Q>(&mut self, key: &Q) -> Option<V>
690 where
691 K: Borrow<Q>,
692 Q: ?Sized + Hash + Eq,
693 {
694 self.segment.remove(key)
695 }
696
697 /// Removes all entries from the cache.
698 ///
699 /// Resets `current_size` to 0 and clears all metrics counters.
700 #[inline]
701 pub fn clear(&mut self) {
702 self.segment.clear()
703 }
704
705 /// Check if key exists without promoting it in the LRU order.
706 ///
707 /// Unlike `get()`, this method does NOT update the entry's access time
708 /// or move it to the front of the list. Useful for existence checks
709 /// without affecting cache eviction order.
710 ///
711 /// # Example
712 ///
713 /// ```
714 /// use cache_rs::LruCache;
715 /// use cache_rs::config::LruCacheConfig;
716 /// use core::num::NonZeroUsize;
717 ///
718 /// let config = LruCacheConfig {
719 /// capacity: NonZeroUsize::new(2).unwrap(),
720 /// max_size: u64::MAX,
721 /// };
722 /// let mut cache = LruCache::init(config, None);
723 /// cache.put("a", 1, 1);
724 /// cache.put("b", 2, 1);
725 ///
726 /// // contains() does NOT promote "a"
727 /// assert!(cache.contains(&"a"));
728 ///
729 /// // "a" is still LRU, so adding "c" evicts "a"
730 /// cache.put("c", 3, 1);
731 /// assert!(!cache.contains(&"a"));
732 /// ```
733 #[inline]
734 pub fn contains<Q>(&self, key: &Q) -> bool
735 where
736 K: Borrow<Q>,
737 Q: ?Sized + Hash + Eq,
738 {
739 self.segment.contains(key)
740 }
741
742 /// Returns a reference to the value without updating the LRU order.
743 ///
744 /// Unlike [`get()`](Self::get), this does NOT move the entry to the front
745 /// of the list or update any access metadata.
746 ///
747 /// # Example
748 ///
749 /// ```
750 /// use cache_rs::LruCache;
751 /// use cache_rs::config::LruCacheConfig;
752 /// use core::num::NonZeroUsize;
753 ///
754 /// let config = LruCacheConfig {
755 /// capacity: NonZeroUsize::new(3).unwrap(),
756 /// max_size: u64::MAX,
757 /// };
758 /// let mut cache = LruCache::init(config, None);
759 /// cache.put("a", 1, 1);
760 /// cache.put("b", 2, 1);
761 ///
762 /// // peek does not change LRU ordering
763 /// assert_eq!(cache.peek(&"a"), Some(&1));
764 /// assert_eq!(cache.peek(&"missing"), None);
765 /// ```
766 #[inline]
767 pub fn peek<Q>(&self, key: &Q) -> Option<&V>
768 where
769 K: Borrow<Q>,
770 Q: ?Sized + Hash + Eq,
771 {
772 self.segment.peek(key)
773 }
774
775 /// Returns an iterator over the cache entries.
776 ///
777 /// # Panics
778 ///
779 /// Not yet implemented.
780 pub fn iter(&self) -> Iter<'_, K, V> {
781 unimplemented!("Iteration not yet implemented")
782 }
783
784 /// Returns a mutable iterator over the cache entries.
785 ///
786 /// # Panics
787 ///
788 /// Not yet implemented.
789 pub fn iter_mut(&mut self) -> IterMut<'_, K, V> {
790 unimplemented!("Mutable iteration not yet implemented")
791 }
792}
793
794impl<K: Hash + Eq, V> LruCache<K, V>
795where
796 V: Clone,
797{
798 /// Creates a new LRU cache from a configuration with an optional hasher.
799 ///
800 /// This is the **only** way to create an LRU cache.
801 ///
802 /// # Arguments
803 ///
804 /// * `config` - Configuration specifying capacity and optional size limit
805 /// * `hasher` - Optional custom hash builder. If `None`, uses `DefaultHashBuilder`
806 ///
807 /// # Example
808 ///
809 /// ```
810 /// use cache_rs::LruCache;
811 /// use cache_rs::config::LruCacheConfig;
812 /// use core::num::NonZeroUsize;
813 ///
814 /// // Simple capacity-only cache
815 /// let config = LruCacheConfig {
816 /// capacity: NonZeroUsize::new(100).unwrap(),
817 /// max_size: u64::MAX,
818 /// };
819 /// let mut cache: LruCache<&str, i32> = LruCache::init(config, None);
820 /// cache.put("key", 42, 1);
821 ///
822 /// // Cache with size limit
823 /// let config = LruCacheConfig {
824 /// capacity: NonZeroUsize::new(1000).unwrap(),
825 /// max_size: 10 * 1024 * 1024, // 10MB
826 /// };
827 /// let cache: LruCache<String, Vec<u8>> = LruCache::init(config, None);
828 /// ```
829 pub fn init(
830 config: LruCacheConfig,
831 hasher: Option<DefaultHashBuilder>,
832 ) -> LruCache<K, V, DefaultHashBuilder> {
833 LruCache {
834 segment: LruSegment::init(config, hasher.unwrap_or_default()),
835 }
836 }
837}
838
839impl<K: Hash + Eq, V: Clone, S: BuildHasher> CacheMetrics for LruCache<K, V, S> {
840 fn metrics(&self) -> BTreeMap<String, f64> {
841 self.segment.metrics().metrics()
842 }
843
844 fn algorithm_name(&self) -> &'static str {
845 self.segment.metrics().algorithm_name()
846 }
847}
848
849pub struct Iter<'a, K, V> {
850 _marker: core::marker::PhantomData<&'a (K, V)>,
851}
852
853pub struct IterMut<'a, K, V> {
854 _marker: core::marker::PhantomData<&'a mut (K, V)>,
855}
856
857#[cfg(test)]
858mod tests {
859 use super::*;
860 use crate::config::LruCacheConfig;
861 use alloc::string::String;
862 use alloc::vec;
863
864 /// Helper to create an LruCache with the given capacity
865 fn make_cache<K: Hash + Eq + Clone, V: Clone>(cap: usize) -> LruCache<K, V> {
866 let config = LruCacheConfig {
867 capacity: NonZeroUsize::new(cap).unwrap(),
868 max_size: u64::MAX,
869 };
870 LruCache::init(config, None)
871 }
872
873 #[test]
874 fn test_lru_get_put() {
875 let mut cache = make_cache(2);
876 assert_eq!(cache.put("apple", 1, 1), None);
877 assert_eq!(cache.put("banana", 2, 1), None);
878 assert_eq!(cache.get(&"apple"), Some(&1));
879 assert_eq!(cache.get(&"banana"), Some(&2));
880 assert_eq!(cache.get(&"cherry"), None);
881 // Updating existing key returns None (not eviction)
882 assert!(cache.put("apple", 3, 1).is_none());
883 assert_eq!(cache.get(&"apple"), Some(&3));
884 // Adding new key evicts LRU entry (banana, since apple was accessed)
885 assert_eq!(cache.put("cherry", 4, 1).unwrap()[0].1, 2);
886 assert_eq!(cache.get(&"banana"), None);
887 assert_eq!(cache.get(&"apple"), Some(&3));
888 assert_eq!(cache.get(&"cherry"), Some(&4));
889 }
890
891 #[test]
892 fn test_lru_get_mut() {
893 let mut cache = make_cache(2);
894 cache.put("apple", 1, 1);
895 cache.put("banana", 2, 1);
896 if let Some(v) = cache.get_mut(&"apple") {
897 *v = 3;
898 }
899 assert_eq!(cache.get(&"apple"), Some(&3));
900 cache.put("cherry", 4, 1);
901 assert_eq!(cache.get(&"banana"), None);
902 assert_eq!(cache.get(&"apple"), Some(&3));
903 assert_eq!(cache.get(&"cherry"), Some(&4));
904 }
905
906 #[test]
907 fn test_lru_remove() {
908 let mut cache = make_cache(2);
909 cache.put("apple", 1, 1);
910 cache.put("banana", 2, 1);
911 assert_eq!(cache.get(&"apple"), Some(&1));
912 assert_eq!(cache.get(&"banana"), Some(&2));
913 assert_eq!(cache.get(&"cherry"), None);
914 assert_eq!(cache.remove(&"apple"), Some(1));
915 assert_eq!(cache.get(&"apple"), None);
916 assert_eq!(cache.len(), 1);
917 assert_eq!(cache.remove(&"cherry"), None);
918 let evicted = cache.put("cherry", 3, 1);
919 assert_eq!(evicted, None);
920 assert_eq!(cache.get(&"banana"), Some(&2));
921 assert_eq!(cache.get(&"cherry"), Some(&3));
922 }
923
924 #[test]
925 fn test_lru_clear() {
926 let mut cache = make_cache(2);
927 cache.put("apple", 1, 1);
928 cache.put("banana", 2, 1);
929 assert_eq!(cache.len(), 2);
930 cache.clear();
931 assert_eq!(cache.len(), 0);
932 assert!(cache.is_empty());
933 cache.put("cherry", 3, 1);
934 assert_eq!(cache.get(&"cherry"), Some(&3));
935 }
936
937 #[test]
938 fn test_lru_capacity_limits() {
939 let mut cache = make_cache(2);
940 cache.put("apple", 1, 1);
941 cache.put("banana", 2, 1);
942 cache.put("cherry", 3, 1);
943 assert_eq!(cache.len(), 2);
944 assert_eq!(cache.get(&"apple"), None);
945 assert_eq!(cache.get(&"banana"), Some(&2));
946 assert_eq!(cache.get(&"cherry"), Some(&3));
947 }
948
949 #[test]
950 fn test_lru_string_keys() {
951 let mut cache = make_cache(2);
952 let key1 = String::from("apple");
953 let key2 = String::from("banana");
954 cache.put(key1.clone(), 1, 1);
955 cache.put(key2.clone(), 2, 1);
956 assert_eq!(cache.get(&key1), Some(&1));
957 assert_eq!(cache.get(&key2), Some(&2));
958 assert_eq!(cache.get("apple"), Some(&1));
959 assert_eq!(cache.get("banana"), Some(&2));
960 drop(cache);
961 }
962
963 #[derive(Debug, Clone, Eq, PartialEq)]
964 struct ComplexValue {
965 val: i32,
966 description: String,
967 }
968
969 #[test]
970 fn test_lru_complex_values() {
971 let mut cache = make_cache(2);
972 let key1 = String::from("apple");
973 let key2 = String::from("banana");
974 let fruit1 = ComplexValue {
975 val: 1,
976 description: String::from("First fruit"),
977 };
978 let fruit2 = ComplexValue {
979 val: 2,
980 description: String::from("Second fruit"),
981 };
982 let fruit3 = ComplexValue {
983 val: 3,
984 description: String::from("Third fruit"),
985 };
986 cache.put(key1.clone(), fruit1.clone(), 1);
987 cache.put(key2.clone(), fruit2.clone(), 1);
988 assert_eq!(cache.get(&key1).unwrap().val, fruit1.val);
989 assert_eq!(cache.get(&key2).unwrap().val, fruit2.val);
990 let evicted = cache.put(String::from("cherry"), fruit3.clone(), 1);
991 let evicted_fruit = evicted.unwrap();
992 assert_eq!(evicted_fruit[0].1, fruit1);
993 let removed = cache.remove(&key1);
994 assert_eq!(removed, None);
995 }
996
997 #[test]
998 fn test_lru_metrics() {
999 use crate::metrics::CacheMetrics;
1000 let mut cache = make_cache(2);
1001 let metrics = cache.metrics();
1002 assert_eq!(metrics.get("requests").unwrap(), &0.0);
1003 assert_eq!(metrics.get("cache_hits").unwrap(), &0.0);
1004 assert_eq!(metrics.get("cache_misses").unwrap(), &0.0);
1005 cache.put("apple", 1, 1);
1006 cache.put("banana", 2, 1);
1007 cache.get(&"apple");
1008 cache.get(&"banana");
1009 let metrics = cache.metrics();
1010 assert_eq!(metrics.get("cache_hits").unwrap(), &2.0);
1011 cache.record_miss(64);
1012 let metrics = cache.metrics();
1013 assert_eq!(metrics.get("cache_misses").unwrap(), &1.0);
1014 assert_eq!(metrics.get("requests").unwrap(), &3.0);
1015 cache.put("cherry", 3, 1);
1016 let metrics = cache.metrics();
1017 assert_eq!(metrics.get("evictions").unwrap(), &1.0);
1018 assert!(metrics.get("bytes_written_to_cache").unwrap() > &0.0);
1019 assert_eq!(cache.algorithm_name(), "LRU");
1020 }
1021
1022 #[test]
1023 fn test_lru_segment_directly() {
1024 let config = LruCacheConfig {
1025 capacity: NonZeroUsize::new(2).unwrap(),
1026 max_size: u64::MAX,
1027 };
1028 let mut segment: LruSegment<&str, i32, DefaultHashBuilder> =
1029 LruSegment::init(config, DefaultHashBuilder::default());
1030 assert_eq!(segment.len(), 0);
1031 assert!(segment.is_empty());
1032 assert_eq!(segment.cap().get(), 2);
1033 segment.put("a", 1, 1);
1034 segment.put("b", 2, 1);
1035 assert_eq!(segment.len(), 2);
1036 assert_eq!(segment.get(&"a"), Some(&1));
1037 assert_eq!(segment.get(&"b"), Some(&2));
1038 }
1039
1040 #[test]
1041 fn test_lru_concurrent_access() {
1042 extern crate std;
1043 use std::sync::{Arc, Mutex};
1044 use std::thread;
1045 use std::vec::Vec;
1046
1047 let cache = Arc::new(Mutex::new(make_cache::<String, i32>(100)));
1048 let num_threads = 4;
1049 let ops_per_thread = 100;
1050
1051 let mut handles: Vec<std::thread::JoinHandle<()>> = Vec::new();
1052
1053 // Spawn writer threads
1054 for t in 0..num_threads {
1055 let cache = Arc::clone(&cache);
1056 handles.push(thread::spawn(move || {
1057 for i in 0..ops_per_thread {
1058 let key = std::format!("thread_{}_key_{}", t, i);
1059 let mut guard = cache.lock().unwrap();
1060 guard.put(key, t * 1000 + i, 1);
1061 }
1062 }));
1063 }
1064
1065 // Spawn reader threads
1066 for t in 0..num_threads {
1067 let cache = Arc::clone(&cache);
1068 handles.push(thread::spawn(move || {
1069 for i in 0..ops_per_thread {
1070 let key = std::format!("thread_{}_key_{}", t, i);
1071 let mut guard = cache.lock().unwrap();
1072 let _ = guard.get(&key);
1073 }
1074 }));
1075 }
1076
1077 for handle in handles {
1078 handle.join().unwrap();
1079 }
1080
1081 let mut guard = cache.lock().unwrap();
1082 assert!(guard.len() <= 100);
1083 assert!(!guard.is_empty());
1084 guard.clear(); // Clean up for MIRI
1085 }
1086
1087 #[test]
1088 fn test_lru_high_contention() {
1089 extern crate std;
1090 use std::sync::{Arc, Mutex};
1091 use std::thread;
1092 use std::vec::Vec;
1093
1094 let cache = Arc::new(Mutex::new(make_cache::<String, i32>(50)));
1095 let num_threads = 8;
1096 let ops_per_thread = 500;
1097
1098 let mut handles: Vec<std::thread::JoinHandle<()>> = Vec::new();
1099
1100 for t in 0..num_threads {
1101 let cache = Arc::clone(&cache);
1102 handles.push(thread::spawn(move || {
1103 for i in 0..ops_per_thread {
1104 let key = std::format!("key_{}", i % 100); // Overlapping keys
1105 let mut guard = cache.lock().unwrap();
1106 if i % 2 == 0 {
1107 guard.put(key, t * 1000 + i, 1);
1108 } else {
1109 let _ = guard.get(&key);
1110 }
1111 }
1112 }));
1113 }
1114
1115 for handle in handles {
1116 handle.join().unwrap();
1117 }
1118
1119 let mut guard = cache.lock().unwrap();
1120 assert!(guard.len() <= 50);
1121 guard.clear(); // Clean up for MIRI
1122 }
1123
1124 #[test]
1125 fn test_lru_concurrent_mixed_operations() {
1126 extern crate std;
1127 use std::sync::{Arc, Mutex};
1128 use std::thread;
1129 use std::vec::Vec;
1130
1131 let cache = Arc::new(Mutex::new(make_cache::<String, i32>(100)));
1132 let num_threads = 8;
1133 let ops_per_thread = 1000;
1134
1135 let mut handles: Vec<std::thread::JoinHandle<()>> = Vec::new();
1136
1137 for t in 0..num_threads {
1138 let cache = Arc::clone(&cache);
1139 handles.push(thread::spawn(move || {
1140 for i in 0..ops_per_thread {
1141 let key = std::format!("key_{}", i % 200);
1142 let mut guard = cache.lock().unwrap();
1143
1144 match i % 4 {
1145 0 => {
1146 guard.put(key, i, 1);
1147 }
1148 1 => {
1149 let _ = guard.get(&key);
1150 }
1151 2 => {
1152 let _ = guard.get_mut(&key);
1153 }
1154 3 => {
1155 let _ = guard.remove(&key);
1156 }
1157 _ => unreachable!(),
1158 }
1159
1160 if i == 500 && t == 0 {
1161 guard.clear();
1162 }
1163 }
1164 }));
1165 }
1166
1167 for handle in handles {
1168 handle.join().unwrap();
1169 }
1170
1171 let mut guard = cache.lock().unwrap();
1172 assert!(guard.len() <= 100);
1173 guard.clear(); // Clean up for MIRI
1174 }
1175
1176 #[test]
1177 fn test_lru_size_aware_tracking() {
1178 // Test that current_size and max_size are tracked correctly
1179 let mut cache = make_cache(10);
1180
1181 assert_eq!(cache.current_size(), 0);
1182 assert_eq!(cache.max_size(), u64::MAX);
1183
1184 // Put items with explicit sizes
1185 cache.put("a", 1, 100);
1186 cache.put("b", 2, 200);
1187 cache.put("c", 3, 150);
1188
1189 assert_eq!(cache.current_size(), 450);
1190 assert_eq!(cache.len(), 3);
1191
1192 // Note: Current implementation doesn't track per-entry size on remove
1193 // The size metric tracks total insertions minus evictions
1194
1195 // Clear should reset size
1196 cache.clear();
1197 assert_eq!(cache.current_size(), 0);
1198 }
1199
1200 #[test]
1201 fn test_lru_init_constructor() {
1202 // Test the init constructor with size limit
1203 let config = LruCacheConfig {
1204 capacity: NonZeroUsize::new(1000).unwrap(),
1205 max_size: 1024 * 1024,
1206 };
1207 let cache: LruCache<String, i32> = LruCache::init(config, None);
1208
1209 assert_eq!(cache.current_size(), 0);
1210 assert_eq!(cache.max_size(), 1024 * 1024);
1211 assert_eq!(cache.len(), 0);
1212 }
1213
1214 #[test]
1215 fn test_lru_with_limits_constructor() {
1216 // Test the with_limits constructor
1217 let config = LruCacheConfig {
1218 capacity: NonZeroUsize::new(100).unwrap(),
1219 max_size: 1024 * 1024,
1220 };
1221 let cache: LruCache<String, String> = LruCache::init(config, None);
1222
1223 assert_eq!(cache.current_size(), 0);
1224 assert_eq!(cache.max_size(), 1024 * 1024);
1225 assert_eq!(cache.cap().get(), 100);
1226 }
1227
1228 #[test]
1229 fn test_lru_contains_non_promoting() {
1230 let mut cache = make_cache(2);
1231 cache.put("a", 1, 1);
1232 cache.put("b", 2, 1);
1233
1234 // contains() should return true for existing keys
1235 assert!(cache.contains(&"a"));
1236 assert!(cache.contains(&"b"));
1237 assert!(!cache.contains(&"c"));
1238
1239 // contains() should NOT promote "a", so it's still LRU
1240 // Adding "c" should evict "a", not "b"
1241 cache.put("c", 3, 1);
1242 assert!(!cache.contains(&"a")); // "a" was evicted
1243 assert!(cache.contains(&"b")); // "b" still exists
1244 assert!(cache.contains(&"c")); // "c" was just added
1245 }
1246
1247 #[test]
1248 fn test_put_eviction_increments_eviction_count() {
1249 let mut cache = make_cache(2);
1250 cache.put("a", 1, 1);
1251 cache.put("b", 2, 1);
1252 assert_eq!(cache.segment.metrics().core.evictions, 0);
1253
1254 // Inserting a 3rd item should evict one (capacity=2)
1255 cache.put("c", 3, 1);
1256 assert_eq!(cache.segment.metrics().core.evictions, 1);
1257
1258 // Another insert should evict again
1259 cache.put("d", 4, 1);
1260 assert_eq!(cache.segment.metrics().core.evictions, 2);
1261 }
1262
1263 #[test]
1264 fn test_put_returns_none_when_no_eviction() {
1265 let mut cache = make_cache(10);
1266 assert!(cache.put("a", 1, 1).is_none());
1267 assert!(cache.put("b", 2, 1).is_none());
1268 }
1269
1270 #[test]
1271 fn test_put_returns_single_eviction() {
1272 let mut cache = make_cache(2);
1273 cache.put("a", 1, 1);
1274 cache.put("b", 2, 1);
1275 let result = cache.put("c", 3, 1);
1276 assert_eq!(result, Some(vec![("a", 1)]));
1277 }
1278
1279 #[test]
1280 fn test_put_replacement_returns_none() {
1281 let mut cache = make_cache(10);
1282 cache.put("a", 1, 1);
1283 // Replacement is not eviction - returns None
1284 let result = cache.put("a", 2, 1);
1285 assert!(result.is_none());
1286 // Value should be updated
1287 assert_eq!(cache.get(&"a"), Some(&2));
1288 }
1289
1290 #[test]
1291 fn test_put_returns_multiple_evictions_size_based() {
1292 let config = LruCacheConfig {
1293 capacity: NonZeroUsize::new(10).unwrap(),
1294 max_size: 100,
1295 };
1296 let mut cache = LruCache::init(config, None);
1297 // Fill with small entries: 10 entries × 10 bytes = 100 bytes
1298 for i in 0..10 {
1299 cache.put(i, i, 10);
1300 }
1301 // Insert large entry that requires evicting multiple
1302 let result = cache.put(99, 99, 50);
1303 let evicted = result.unwrap();
1304 assert_eq!(evicted.len(), 5);
1305 }
1306}