evictor/lib.rs
1#![doc = include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/README.md"))]
2#![deny(missing_docs)]
3#![cfg_attr(all(doc, ENABLE_DOC_AUTO_CFG), feature(doc_cfg))]
4#![forbid(unsafe_code)]
5
6mod lfu;
7mod lru;
8mod mru;
9mod queue;
10#[cfg(feature = "rand")]
11mod random;
12mod r#ref;
13mod sieve;
14
15use std::hash::Hash;
16use std::num::NonZeroUsize;
17
18pub use lfu::LfuPolicy;
19pub use lru::LruPolicy;
20pub use mru::MruPolicy;
21pub use queue::FifoPolicy;
22pub use queue::LifoPolicy;
23#[cfg(feature = "rand")]
24pub use random::RandomPolicy;
25pub use r#ref::Entry;
26pub use sieve::SievePolicy;
27use tether_map::Ptr;
28use tether_map::linked_hash_map::LinkedHashMap;
29
30#[cfg(not(feature = "ahash"))]
31type RandomState = std::hash::RandomState;
32#[cfg(feature = "ahash")]
33type RandomState = ahash::RandomState;
34
35mod private {
36 pub trait Sealed {}
37}
38
39#[doc(hidden)]
40#[must_use]
41pub enum InsertionResult<K, T> {
42 Inserted(Ptr, Option<T>),
43 Updated(Ptr),
44 FoundTouchedNoUpdate(Ptr),
45 NotFoundNoInsert(K, Option<T>),
46}
47
48impl<K, T> std::fmt::Debug for InsertionResult<K, T> {
49 fn fmt(
50 &self,
51 f: &mut std::fmt::Formatter<'_>,
52 ) -> std::fmt::Result {
53 match self {
54 InsertionResult::Inserted(ptr, _) => f.debug_tuple("Inserted").field(ptr).finish(),
55 InsertionResult::Updated(ptr) => f.debug_tuple("Updated").field(ptr).finish(),
56 InsertionResult::FoundTouchedNoUpdate(ptr) => {
57 f.debug_tuple("FoundTouchedNoUpdate").field(ptr).finish()
58 }
59 InsertionResult::NotFoundNoInsert(_, _) => f.debug_tuple("NotFoundNoInsert").finish(),
60 }
61 }
62}
63
64#[doc(hidden)]
65pub enum InsertOrUpdateAction<T> {
66 InsertOrUpdate(T),
67 TouchNoUpdate,
68 NoInsert(Option<T>),
69}
70
71/// Trait for wrapping values in cache entries.
72///
73/// This trait allows policies to associate additional metadata with cached
74/// values while providing uniform access to the underlying data. Different
75/// eviction policies may store additional information alongside the cached
76/// value (such as access frequency for Lfu or access timestamps).
77///
78/// # Type Parameters
79///
80/// * `T` - The type of the cached value being wrapped
81///
82/// # Design Notes
83///
84/// This trait abstracts over the storage requirements of different eviction
85/// policies. Some policies like Lru only need to store the value itself,
86/// while others like Lfu need to track additional metadata such as access
87/// frequency counts. By using this trait, the cache implementation can
88/// remain generic over different storage strategies.
89#[doc(hidden)]
90pub trait EntryValue<T>: private::Sealed {
91 /// Creates a new entry containing the given value.
92 ///
93 /// This method should initialize any policy-specific metadata to
94 /// appropriate default values. For example, an Lfu policy might
95 /// initialize access counts to zero, while an Lru policy might not need
96 /// any additional initialization.
97 ///
98 /// # Arguments
99 ///
100 /// * `value` - The value to be stored in the cache entry
101 fn new(value: T) -> Self;
102
103 /// Converts the entry into its contained value.
104 ///
105 /// This method consumes the entry and extracts the wrapped value,
106 /// discarding any policy-specific metadata. This is typically used
107 /// when an entry is being removed from the cache.
108 fn into_value(self) -> T;
109
110 /// Returns a reference to the contained value.
111 ///
112 /// This method provides read-only access to the cached value without
113 /// affecting any policy-specific metadata or consuming the entry.
114 fn value(&self) -> &T;
115
116 /// Returns a mutable reference to the contained value.
117 ///
118 /// This method provides write access to the cached value. The policy
119 /// may or may not update its metadata when this method is called,
120 /// depending on the specific implementation requirements.
121 fn value_mut(&mut self) -> &mut T;
122}
123
124/// Trait for cache metadata that tracks eviction state.
125///
126/// This trait provides policy-specific metadata storage and the ability
127/// to identify which entry should be evicted next. Different eviction
128/// policies maintain different types of metadata to efficiently determine
129/// eviction order.
130///
131/// # Design Philosophy
132///
133/// The metadata abstraction allows each eviction policy to maintain its
134/// own internal state without exposing implementation details to the
135/// generic cache structure. This enables efficient O(1) eviction decisions
136/// across all supported policies.
137///
138/// # Metadata Examples by Policy
139///
140/// - **Lru/Mru**: Typically maintains a doubly-linked list order via indices
141/// - **Lfu**: May track frequency buckets and least-used indices
142/// - **Fifo/Lifo**: Often just tracks head/tail pointers for queue ordering
143/// - **Random**: May maintain no additional state or seed information
144///
145/// # Default Implementation
146///
147/// The `Default` trait bound ensures that metadata can be easily initialized
148/// when creating a new cache. The default state should represent an empty
149/// cache with no entries.
150#[doc(hidden)]
151pub trait Metadata<T>: Default + private::Sealed + Clone {
152 /// The entry type used by this policy to wrap cached values.
153 type EntryType: EntryValue<T>;
154
155 /// Returns the index of the entry that should be evicted next.
156 ///
157 /// This method provides the core eviction logic by identifying which
158 /// entry in the cache should be removed when space is needed. The
159 /// returned index corresponds to a position in the cache's internal
160 /// storage (typically an `LinkedHashMap`).
161 ///
162 /// # Returns
163 ///
164 /// The index of the entry that would be evicted next according to
165 /// this policy's eviction strategy. For an empty cache, this method's
166 /// behavior is unspecified.
167 ///
168 /// # Performance
169 ///
170 /// This method must have O(1) time complexity to maintain the cache's
171 /// performance guarantees. Implementations should pre-compute or
172 /// efficiently track the tail index rather than scanning entries.
173 ///
174 /// # Policy-Specific Behavior
175 ///
176 /// - **Lru**: Returns index of least recently used entry
177 /// - **Mru**: Returns index of most recently used entry
178 /// - **Lfu**: Returns index of least frequently used entry
179 /// - **Fifo**: Returns index of first inserted entry
180 /// - **Lifo**: Returns index of last inserted entry
181 /// - **Random**: Returns index of randomly selected entry
182 ///
183 /// # Usage
184 ///
185 /// This method is typically called by the cache implementation when:
186 /// - The cache is at capacity and a new entry needs to be inserted
187 /// - The `pop()` method is called to explicitly remove the tail entry
188 /// - The `tail()` method is called to inspect the next eviction candidate
189 fn candidate_removal_index<K>(
190 &self,
191 queue: &LinkedHashMap<K, Self::EntryType, RandomState>,
192 ) -> Option<Ptr>;
193
194 fn clone_from<K>(
195 &mut self,
196 other: &Self,
197 new_queue: &LinkedHashMap<K, Self::EntryType, RandomState>,
198 ) {
199 let _ = new_queue;
200 <Self as Clone>::clone_from(self, other);
201 }
202}
203
204/// Trait defining cache eviction policies.
205///
206/// This trait encapsulates the behavior of different cache eviction strategies
207/// such as Lru, Mru, Lfu, Fifo, Lifo, and Random. Each policy defines how
208/// entries are prioritized and which entry should be evicted when the cache is
209/// full.
210///
211/// # Type Parameters
212///
213/// * `T` - The type of values stored in the cache
214///
215/// # Associated Types
216///
217/// This trait defines three associated types that work together:
218/// - `EntryType`: Wraps cached values with policy-specific metadata
219/// - `MetadataType`: Maintains global policy state for eviction decisions
220/// - `IntoIter`: Provides owned iteration over cache contents in eviction order
221///
222/// # Policy Implementations
223///
224/// The crate provides several built-in policy implementations:
225///
226/// ## Recency-Based Policies
227/// - **Lru (Least Recently Used)**: Evicts entries that haven't been accessed
228/// recently
229/// - **Mru (Most Recently Used)**: Evicts entries that were accessed most
230/// recently
231///
232/// ## Frequency-Based Policies
233/// - **Lfu (Least Frequently Used)**: Evicts entries with the lowest access
234/// count
235///
236/// ## Insertion-Order Policies
237/// - **Fifo (First In, First Out)**: Evicts entries in insertion order
238/// - **Lifo (Last In, First Out)**: Evicts entries in reverse insertion order
239///
240/// ## Randomized Policies
241/// - **Random**: Evicts entries randomly
242///
243/// ## Sieve Policy
244/// - **Sieve**: Uses a visited bit and hand pointer for efficient eviction with
245/// second-chance semantics
246///
247/// # Error Handling
248///
249/// Policy implementations should handle edge cases gracefully:
250/// - Empty caches (no entries to evict)
251/// - Single-entry caches
252/// - Invalid indices (though these should not occur in normal operation)
253///
254/// # Iteration Order Consistency
255///
256/// The `iter()` and `into_iter()` methods must return entries in eviction
257/// order, meaning the first item returned should be the same as what `tail()`
258/// would return (except for policies where the order is specified as
259/// arbitrary e.g. [`Random`]).
260#[doc(hidden)]
261pub trait Policy<T>: private::Sealed {
262 /// The metadata type used by this policy to track eviction state.
263 type MetadataType: Metadata<T>;
264
265 /// The iterator type returned by `into_iter()`.
266 type IntoIter<K: Hash + Eq>: Iterator<Item = (K, T)>;
267
268 #[track_caller]
269 fn insert_or_update_entry<K: Hash + Eq>(
270 key: K,
271 make_room_on_insert: bool,
272 get_value: impl FnOnce(&K, /* is_insert */ bool) -> InsertOrUpdateAction<T>,
273 metadata: &mut Self::MetadataType,
274 queue: &mut LinkedHashMap<K, <Self::MetadataType as Metadata<T>>::EntryType, RandomState>,
275 ) -> InsertionResult<K, T>;
276
277 /// Updates the entry's position in the eviction order.
278 ///
279 /// This method is called when an entry is accessed (via get, insert, etc.)
280 /// and should update the policy's internal state to reflect the access.
281 ///
282 /// # Parameters
283 /// - `index`: The index of the accessed entry
284 /// - `metadata`: Mutable reference to policy metadata
285 /// - `queue`: Mutable reference to the cache's storage
286 ///
287 /// # Returns
288 /// The new index of the entry after any reordering
289 #[track_caller]
290 fn touch_entry<K: Hash + Eq>(
291 ptr: Ptr,
292 metadata: &mut Self::MetadataType,
293 queue: &mut LinkedHashMap<K, <Self::MetadataType as Metadata<T>>::EntryType, RandomState>,
294 );
295
296 /// Evict the next entry from the cache. Returns the pointer to the next
297 /// entry in the queue after removal, and the removed key-value pair if
298 /// available.
299 #[inline]
300 #[track_caller]
301 fn evict_entry<K: Hash + Eq>(
302 metadata: &mut Self::MetadataType,
303 queue: &mut LinkedHashMap<K, <Self::MetadataType as Metadata<T>>::EntryType, RandomState>,
304 ) -> Option<(K, <Self::MetadataType as Metadata<T>>::EntryType)> {
305 let removed = metadata.candidate_removal_index(queue)?;
306 Self::remove_entry(removed, metadata, queue).1
307 }
308
309 /// Removes the entry at the specified index and returns the index of the
310 /// entry which replaced it.
311 ///
312 /// This method handles the removal of an entry and updates the policy's
313 /// internal state to maintain consistency.
314 ///
315 /// **Note**: After the removal of the item at `index`, the element at
316 /// `index` **must be** the last element in the queue. That is, if you have
317 /// a queue like `[0, 1, 2, 3]`, and you remove index 1, the queue must
318 /// become `[0, 3, 2]`. This is relied on for insertion and retain
319 /// operations.
320 ///
321 /// # Parameters
322 /// - `index`: The index of the entry to remove
323 /// - `metadata`: Mutable reference to policy metadata
324 /// - `queue`: Mutable reference to the cache's storage
325 ///
326 /// # Returns
327 /// - A pointer to the next entry in the queue after removal
328 /// - The removed key-value pair, or None if the index was invalid
329 #[allow(clippy::type_complexity)]
330 #[track_caller]
331 fn remove_entry<K: Hash + Eq>(
332 ptr: Ptr,
333 metadata: &mut Self::MetadataType,
334 queue: &mut LinkedHashMap<K, <Self::MetadataType as Metadata<T>>::EntryType, RandomState>,
335 ) -> (
336 Option<Ptr>,
337 Option<(K, <Self::MetadataType as Metadata<T>>::EntryType)>,
338 );
339
340 #[track_caller]
341 fn remove_key<K: Hash + Eq>(
342 key: &K,
343 metadata: &mut Self::MetadataType,
344 queue: &mut LinkedHashMap<K, <Self::MetadataType as Metadata<T>>::EntryType, RandomState>,
345 ) -> Option<<Self::MetadataType as Metadata<T>>::EntryType>;
346
347 /// Returns an iterator over the entries in the cache in eviction order.
348 ///
349 /// The iterator yields key-value pairs in the order they would be evicted,
350 /// starting with the entry that would be evicted first (the "tail" of the
351 /// eviction order). The specific ordering depends on the policy:
352 ///
353 /// - **Lru**: Iterates from least recently used to most recently used
354 /// - **Mru**: Iterates from most recently used to least recently used
355 /// - **Lfu**: Iterates from least frequently used to most frequently used,
356 /// with ties broken by insertion/access order
357 /// - **Fifo**: Iterates from first inserted to last inserted
358 /// - **Lifo**: Iterates from last inserted to first inserted
359 /// - **Random**: Iterates in random order (debug) or arbitrary order
360 /// (release)
361 ///
362 /// This is an internal trait method used by policy implementations.
363 ///
364 /// # Parameters
365 /// - `metadata`: Reference to policy-specific metadata for traversal
366 /// - `queue`: Reference to the cache's underlying storage
367 ///
368 /// # Returns
369 /// An iterator yielding `(&Key, &Value)` pairs in eviction order
370 fn iter<'q, K>(
371 metadata: &'q Self::MetadataType,
372 queue: &'q LinkedHashMap<K, <Self::MetadataType as Metadata<T>>::EntryType, RandomState>,
373 ) -> impl Iterator<Item = (&'q K, &'q T)>
374 where
375 T: 'q;
376
377 /// Converts the cache into an iterator over key-value pairs in eviction
378 /// order.
379 ///
380 /// This method consumes the cache and returns an iterator that yields `(K,
381 /// T)` pairs. The iteration order depends on the specific eviction
382 /// policy:
383 ///
384 /// - **Lru**: Items are yielded from least recently used to most recently
385 /// used
386 /// - **Mru**: Items are yielded from most recently used to least recently
387 /// used
388 /// - **Lfu**: Items are yielded from least frequently used to most
389 /// frequently used
390 /// - **Fifo**: Items are yielded from first inserted to last inserted
391 /// - **Lifo**: Items are yielded from last inserted to first inserted
392 /// - **Random**: Items are yielded in an arbitrary order.
393 ///
394 /// # Parameters
395 /// - `metadata`: The policy's metadata containing eviction state
396 /// information
397 /// - `queue`: The cache's internal storage map containing all key-value
398 /// pairs
399 ///
400 /// # Returns
401 /// An iterator that yields `(K, T)` pairs in the policy-specific eviction
402 /// order. The iterator takes ownership of the cache contents, making
403 /// this a consuming operation.
404 fn into_iter<K: Hash + Eq>(
405 metadata: Self::MetadataType,
406 queue: LinkedHashMap<K, <Self::MetadataType as Metadata<T>>::EntryType, RandomState>,
407 ) -> Self::IntoIter<K>;
408
409 /// Converts the cache entries into an iterator of key-entry pairs in
410 /// eviction order.
411 ///
412 /// This method is similar to `into_iter()` but returns the full entry
413 /// wrappers instead of just the values. This allows access to any
414 /// policy-specific metadata stored within the entries, which can be
415 /// useful for debugging, analysis, or advanced cache operations.
416 ///
417 /// # Parameters
418 ///
419 /// * `metadata` - The policy's metadata containing eviction state
420 /// information
421 /// * `queue` - The cache's internal storage map containing all key-entry
422 /// pairs
423 ///
424 /// # Returns
425 ///
426 /// An iterator that yields `(K, <Self::MetadataType as
427 /// Metadata<T>>::EntryType)` pairs in the policy-specific eviction
428 /// order. The iterator consumes the cache contents.
429 ///
430 /// # Use Cases
431 ///
432 /// This method is primarily used for internal implementation details where
433 /// entry metadata is needed
434 ///
435 /// For typical usage, prefer `into_iter()` which extracts just the values.
436 fn into_entries<K: Hash + Eq>(
437 metadata: Self::MetadataType,
438 queue: LinkedHashMap<K, <Self::MetadataType as Metadata<T>>::EntryType, RandomState>,
439 ) -> impl Iterator<Item = (K, <Self::MetadataType as Metadata<T>>::EntryType)>;
440
441 /// Validates the cache's internal state against the full set of kv pairs
442 /// known. This is **expensive** and should only be used for debugging
443 /// purposes.
444 #[cfg(all(debug_assertions, feature = "internal-debugging"))]
445 #[doc(hidden)]
446 fn debug_validate<K: Hash + Eq + std::fmt::Debug>(
447 metadata: &Self::MetadataType,
448 queue: &LinkedHashMap<K, <Self::MetadataType as Metadata<T>>::EntryType, RandomState>,
449 ) where
450 T: std::fmt::Debug;
451}
452
453/// A least-recently-used (Lru) cache.
454///
455/// Evicts the entry that was accessed longest ago when the cache is full.
456/// This policy is ideal for workloads where recently accessed items are
457/// more likely to be accessed again.
458///
459/// # Time Complexity
460/// - Insert/Get/Remove: O(1) average, O(n) worst case
461/// - Peek/Contains: O(1) average, O(n) worst case
462/// - Pop/Clear: O(1)
463///
464/// # Examples
465///
466/// ```
467/// use std::num::NonZeroUsize;
468///
469/// use evictor::Lru;
470///
471/// let mut cache = Lru::<i32, String>::new(NonZeroUsize::new(3).unwrap());
472/// cache.insert(1, "one".to_string());
473/// cache.insert(2, "two".to_string());
474/// cache.insert(3, "three".to_string());
475///
476/// cache.get(&1); // Mark as recently used
477/// cache.insert(4, "four".to_string()); // Evicts key 2
478///
479/// // `into_iter` returns the items in eviction order, which is Lru first.
480/// assert_eq!(
481/// cache.into_iter().collect::<Vec<_>>(),
482/// [
483/// (3, "three".to_string()),
484/// (1, "one".to_string()),
485/// (4, "four".to_string()),
486/// ]
487/// );
488/// ```
489#[doc(alias = "LRU")]
490pub type Lru<Key, Value> = Cache<Key, Value, LruPolicy>;
491
492/// See [`Lru`].
493#[doc(hidden)]
494pub type LRU<Key, Value> = Lru<Key, Value>;
495
496/// A most-recently-used (Mru) cache.
497///
498/// Evicts the entry that was accessed most recently when the cache is full.
499/// This policy can be useful for workloads with sequential access patterns
500/// where the most recently accessed item is unlikely to be needed again.
501///
502/// # Time Complexity
503/// - Insert/Get/Remove: O(1) average, O(n) worst case
504/// - Peek/Contains: O(1) average, O(n) worst case
505/// - Pop/Clear: O(1)
506///
507/// # Examples
508///
509/// ```
510/// use std::num::NonZeroUsize;
511///
512/// use evictor::Mru;
513///
514/// let mut cache = Mru::<i32, String>::new(NonZeroUsize::new(3).unwrap());
515/// cache.insert(1, "one".to_string());
516/// cache.insert(2, "two".to_string());
517/// cache.insert(3, "three".to_string());
518///
519/// cache.get(&1); // Mark as recently used
520/// cache.insert(4, "four".to_string()); // Evicts key 1
521///
522/// // `into_iter` returns the items in eviction order, which is Mru first.
523/// assert_eq!(
524/// cache.into_iter().collect::<Vec<_>>(),
525/// [
526/// (4, "four".to_string()),
527/// (3, "three".to_string()),
528/// (2, "two".to_string()),
529/// ]
530/// );
531/// ```
532#[doc(alias = "MRU")]
533pub type Mru<Key, Value> = Cache<Key, Value, MruPolicy>;
534
535/// See [`Mru`].
536#[doc(hidden)]
537pub type MRU<Key, Value> = Mru<Key, Value>;
538
539/// A least-frequently-used (Lfu) cache.
540///
541/// This implementation uses frequency buckets with doubly-linked lists to
542/// achieve O(1) time complexity for all operations.
543///
544/// # Time Complexity
545/// - Insert/Get/Remove: O(1) average, O(n) worst case
546/// - Peek/Contains: O(1) average, O(n) worst case
547/// - Pop/Clear: O(1)
548///
549/// # Examples
550///
551/// ```
552/// use std::num::NonZeroUsize;
553///
554/// use evictor::Lfu;
555///
556/// let mut cache = Lfu::<i32, String>::new(NonZeroUsize::new(4).unwrap());
557///
558/// cache.insert(1, "one".to_string());
559/// cache.insert(2, "two".to_string());
560/// cache.insert(3, "three".to_string());
561/// cache.insert(4, "four".to_string());
562///
563/// // Access key 1 multiple times to increase its frequency
564/// cache.get(&1);
565/// cache.get(&1);
566/// cache.get(&1);
567///
568/// // Access key 2, 3 once
569/// cache.get(&2);
570/// cache.get(&3);
571///
572/// // Key 4 has frequency 0 (never accessed after insertion)
573/// // Insert new item - this will evict the least frequently used (key 4)
574/// cache.insert(5, "five".to_string());
575///
576/// // `into_iter` returns the items in eviction order, which is Lfu first, with Lru tiebreaking.
577/// assert_eq!(
578/// cache.into_iter().collect::<Vec<_>>(),
579/// [
580/// (5, "five".to_string()),
581/// (2, "two".to_string()),
582/// (3, "three".to_string()),
583/// (1, "one".to_string()),
584/// ]
585/// );
586/// ```
587#[doc(alias = "LFU")]
588pub type Lfu<Key, Value> = Cache<Key, Value, LfuPolicy>;
589
590/// See [`Lfu`].
591#[doc(hidden)]
592pub type LFU<Key, Value> = Lfu<Key, Value>;
593
594/// A first-in-first-out (Fifo) cache.
595///
596/// Evicts the entry that was inserted earliest when the cache is full.
597/// This policy treats the cache as a queue where the oldest inserted
598/// item is removed first, regardless of access patterns.
599///
600/// # Time Complexity
601/// - Insert/Get/Remove: O(1) average, O(n) worst case
602/// - Peek/Contains: O(1) average, O(n) worst case
603/// - Pop/Clear: O(1)
604///
605/// # Examples
606///
607/// ```
608/// use std::num::NonZeroUsize;
609///
610/// use evictor::Fifo;
611///
612/// let mut cache = Fifo::<i32, String>::new(NonZeroUsize::new(3).unwrap());
613/// cache.insert(1, "one".to_string());
614/// cache.insert(2, "two".to_string());
615/// cache.insert(3, "three".to_string());
616///
617/// cache.get(&1); // Access key 1 - this doesn't affect eviction order in Fifo
618/// cache.insert(4, "four".to_string()); // Evicts key 1 (first inserted)
619///
620/// // `into_iter` returns the items in eviction order, which is Fifo.
621/// assert_eq!(
622/// cache.into_iter().collect::<Vec<_>>(),
623/// [
624/// (2, "two".to_string()),
625/// (3, "three".to_string()),
626/// (4, "four".to_string()),
627/// ]
628/// );
629/// ```
630#[doc(alias = "FIFO")]
631pub type Fifo<Key, Value> = Cache<Key, Value, FifoPolicy>;
632
633/// See [`Fifo`].
634#[doc(hidden)]
635pub type FIFO<Key, Value> = Fifo<Key, Value>;
636
637/// A last-in-first-out (Lifo) cache.
638///
639/// Evicts the entry that was inserted most recently when the cache is full.
640/// This policy treats the cache as a stack where the most recently inserted
641/// item is removed first, regardless of access patterns.
642///
643/// # Time Complexity
644/// - Insert/Get/Remove: O(1) average, O(n) worst case
645/// - Peek/Contains: O(1) average, O(n) worst case
646/// - Pop/Clear: O(1)
647///
648/// # Examples
649///
650/// ```
651/// use std::num::NonZeroUsize;
652///
653/// use evictor::Lifo;
654///
655/// let mut cache = Lifo::<i32, String>::new(NonZeroUsize::new(3).unwrap());
656/// cache.insert(1, "one".to_string());
657/// cache.insert(2, "two".to_string());
658/// cache.insert(3, "three".to_string());
659///
660/// cache.get(&1); // Access key 1 - this doesn't affect eviction order in Lifo
661/// cache.insert(4, "four".to_string()); // Evicts key 3 (most recently inserted)
662///
663/// // `into_iter` returns the items in eviction order, which is Lifo.
664/// assert_eq!(
665/// cache.into_iter().collect::<Vec<_>>(),
666/// [
667/// (4, "four".to_string()),
668/// (2, "two".to_string()),
669/// (1, "one".to_string()),
670/// ]
671/// );
672/// ```
673#[doc(alias = "LIFO")]
674pub type Lifo<Key, Value> = Cache<Key, Value, LifoPolicy>;
675
676/// See [`Lifo`].
677#[doc(hidden)]
678pub type LIFO<Key, Value> = Lifo<Key, Value>;
679
680/// A random eviction cache.
681///
682/// Evicts entries randomly when the cache is full. This policy can be useful
683/// for workloads where no particular access pattern can be predicted, or as
684/// a simple baseline for comparison with other eviction policies.
685///
686/// The random selection is performed using the `rand` crate. You **must
687/// not** rely on the order returned by iteration. This is enforced by
688/// randomizing the order of iteration during debug builds. Release builds do
689/// not perform this randomization for performance reasons, but the order is
690/// still arbitrary and should not be relied upon.
691///
692/// # Time Complexity
693/// - Insert/Get/Remove: O(1) average, O(n) worst case
694/// - Peek/Contains: O(1) average, O(n) worst case
695/// - Pop/Clear: O(1)
696///
697/// # Examples
698///
699/// ```
700/// use std::num::NonZeroUsize;
701///
702/// use evictor::Random;
703///
704/// let mut cache = Random::<i32, String>::new(NonZeroUsize::new(3).unwrap());
705/// cache.insert(1, "one".to_string());
706/// cache.insert(2, "two".to_string());
707/// cache.insert(3, "three".to_string());
708///
709/// // Access doesn't affect eviction order in Random policy
710/// cache.get(&1);
711/// cache.insert(4, "four".to_string()); // Evicts a random entry
712///
713/// // The order returned by `into_iter` is not predictable
714/// let pairs: Vec<_> = cache.into_iter().collect();
715/// assert_eq!(pairs.len(), 3);
716/// ```
717#[cfg(feature = "rand")]
718pub type Random<Key, Value> = Cache<Key, Value, RandomPolicy>;
719
720/// A Sieve cache implementing the algorithm outlined in the paper
721/// [Sieve is Simpler than Lru](https://junchengyang.com/publication/nsdi24-SIEVE.pdf).
722///
723/// Sieve uses a "visited" bit per entry and a hand pointer that scans
724/// for eviction candidates, giving recently accessed items a "second chance"
725/// before eviction.
726///
727/// # Algorithm Overview
728///
729/// Sieve maintains:
730/// - A **visited bit** for each cache entry (initially false)
731/// - A **hand pointer** that moves through entries looking for eviction
732/// candidates
733/// - A **doubly-linked list** structure for efficient traversal
734///
735/// ## Key Operations
736///
737/// ### Access (get/get_mut)
738/// When an entry is accessed:
739/// 1. The visited bit is set to `true`
740/// 2. The entry position in eviction order may be updated
741///
742/// ### Eviction
743/// When space is needed:
744/// 1. The hand pointer scans entries starting from its current position
745/// 2. For each entry with `visited = true`: reset to `false` and continue
746/// scanning
747/// 3. The first entry with `visited = false` is evicted
748/// 4. The hand pointer advances to the next position
749///
750/// This gives recently accessed entries a "second chance" - they're skipped
751/// once during eviction scanning, but evicted if accessed again without being
752/// used.
753///
754/// # Time Complexity
755/// - Insert/Get/Remove: O(1) average, O(n) worst case
756/// - Peek/Contains: O(1) average, O(n) worst case
757/// - Pop/Clear: O(1) average, O(n) worst case
758///
759/// # Examples
760///
761/// ## Basic Usage
762/// ```rust
763/// use std::num::NonZeroUsize;
764///
765/// use evictor::Sieve;
766///
767/// let mut cache = Sieve::new(NonZeroUsize::new(3).unwrap());
768/// cache.insert(1, "one");
769/// cache.insert(2, "two");
770/// cache.insert(3, "three");
771///
772/// // Access entry 1 to set its visited bit
773/// cache.get(&1);
774///
775/// // This will evict entry 2 (unvisited), not entry 1
776/// cache.insert(4, "four");
777/// assert!(cache.contains_key(&1)); // Still present (second chance)
778/// assert!(!cache.contains_key(&2)); // Evicted
779/// ```
780///
781/// ## Second Chance Behavior
782/// ```rust
783/// use std::num::NonZeroUsize;
784///
785/// use evictor::Sieve;
786///
787/// let mut cache = Sieve::new(NonZeroUsize::new(2).unwrap());
788/// cache.insert("A", 1);
789/// cache.insert("B", 2);
790///
791/// // Access A to mark it as visited
792/// cache.get(&"A");
793///
794/// // Insert C - will scan and find A visited, reset its bit, continue to B
795/// cache.insert("C", 3);
796/// assert!(cache.contains_key(&"A")); // A got second chance
797/// assert!(!cache.contains_key(&"B")); // B was evicted
798///
799/// // Now A's visited bit is false, so next eviction will remove A
800/// cache.insert("D", 4);
801/// assert!(!cache.contains_key(&"A")); // A evicted (used its second chance)
802/// assert!(cache.contains_key(&"C"));
803/// assert!(cache.contains_key(&"D"));
804/// ```
805///
806/// ## Comparison with peek() vs get()
807/// ```rust
808/// use std::num::NonZeroUsize;
809///
810/// use evictor::Sieve;
811///
812/// let mut cache = Sieve::new(NonZeroUsize::new(2).unwrap());
813/// cache.insert(1, "one");
814/// cache.insert(2, "two");
815///
816/// // peek() does NOT set visited bit
817/// cache.peek(&1);
818/// cache.insert(3, "three"); // Evicts 1 (not visited)
819/// assert!(!cache.contains_key(&1));
820///
821/// // get() DOES set visited bit
822/// cache.get(&2);
823/// cache.insert(4, "four"); // Evicts 3 (2 gets second chance)
824/// assert!(cache.contains_key(&2));
825/// assert!(!cache.contains_key(&3));
826/// ```
827///
828/// ## Iteration Order
829/// ```rust
830/// use std::num::NonZeroUsize;
831///
832/// use evictor::Sieve;
833///
834/// let mut cache = Sieve::new(NonZeroUsize::new(3).unwrap());
835/// cache.insert("A", 1);
836/// cache.insert("B", 2);
837/// cache.insert("C", 3);
838///
839/// // Iteration follows eviction order (starting from hand position)
840/// let items: Vec<_> = cache.iter().collect();
841/// // Order depends on hand position and visited bits
842/// assert_eq!(items.len(), 3);
843///
844/// // First item should match tail() (next eviction candidate)
845/// assert_eq!(cache.tail().unwrap(), cache.iter().next().unwrap());
846/// ```
847#[doc(alias = "SIEVE")]
848pub type Sieve<Key, Value> = Cache<Key, Value, SievePolicy>;
849
850/// See [`Sieve`].
851#[doc(hidden)]
852pub type SIEVE<Key, Value> = Sieve<Key, Value>;
853
854/// Cache performance and usage statistics.
855///
856/// # Examples
857///
858/// ```
859/// # #[cfg(feature = "statistics")] {
860/// use evictor::Lru;
861///
862/// let mut cache = Lru::new(std::num::NonZeroUsize::new(2).unwrap());
863/// cache.insert("a".to_string(), 1);
864/// cache.insert("b".to_string(), 2);
865///
866/// let stats = cache.statistics();
867/// assert_eq!(stats.len(), 2);
868/// assert_eq!(stats.insertions(), 2);
869/// assert_eq!(stats.evictions(), 0);
870/// # }
871/// ```
872#[derive(Clone, Debug)]
873#[cfg(feature = "statistics")]
874pub struct Statistics {
875 len: usize,
876 capacity: NonZeroUsize,
877 evictions: u64,
878 insertions: u64,
879 misses: u64,
880 hits: u64,
881}
882
883#[cfg(feature = "statistics")]
884impl Statistics {
885 fn with_capacity(capacity: NonZeroUsize) -> Self {
886 Self {
887 len: 0,
888 capacity,
889 evictions: 0,
890 insertions: 0,
891 misses: 0,
892 hits: 0,
893 }
894 }
895
896 /// Returns `true` if the cache contains no entries.
897 ///
898 /// # Examples
899 ///
900 /// ```
901 /// # #[cfg(feature = "statistics")] {
902 /// use evictor::Lru;
903 ///
904 /// let mut cache = Lru::new(std::num::NonZeroUsize::new(2).unwrap());
905 /// assert!(cache.statistics().is_empty());
906 ///
907 /// cache.insert("key".to_string(), "value".to_string());
908 /// assert!(!cache.statistics().is_empty());
909 /// # }
910 /// ```
911 pub fn is_empty(&self) -> bool {
912 self.len == 0
913 }
914
915 /// Returns the current number of entries stored in the cache.
916 ///
917 /// This count represents the actual number of key-value pairs currently
918 /// residing in the cache, which may be less than the cache's capacity.
919 ///
920 /// # Examples
921 ///
922 /// ```
923 /// # #[cfg(feature = "statistics")] {
924 /// use evictor::Lru;
925 ///
926 /// let mut cache = Lru::new(std::num::NonZeroUsize::new(3).unwrap());
927 /// assert_eq!(cache.statistics().len(), 0);
928 ///
929 /// cache.insert("a".to_string(), 1);
930 /// cache.insert("b".to_string(), 2);
931 /// assert_eq!(cache.statistics().len(), 2);
932 /// # }
933 /// ```
934 pub fn len(&self) -> usize {
935 self.len
936 }
937
938 /// Returns the current cache residency as a percentage (0.0 to 100.0).
939 ///
940 /// Residency represents how full the cache is, calculated as the current
941 /// number of entries divided by the maximum capacity, expressed as a
942 /// percentage. A residency of 100.0% indicates the cache is at full
943 /// capacity.
944 ///
945 /// # Examples
946 ///
947 /// ```
948 /// # #[cfg(feature = "statistics")] {
949 /// use evictor::Lru;
950 ///
951 /// let mut cache = Lru::new(std::num::NonZeroUsize::new(4).unwrap());
952 /// assert_eq!(cache.statistics().residency(), 0.0);
953 ///
954 /// cache.insert("a".to_string(), 1);
955 /// cache.insert("b".to_string(), 2);
956 /// assert_eq!(cache.statistics().residency(), 50.0);
957 ///
958 /// cache.insert("c".to_string(), 3);
959 /// cache.insert("d".to_string(), 4);
960 /// assert_eq!(cache.statistics().residency(), 100.0);
961 /// # }
962 /// ```
963 pub fn residency(&self) -> f64 {
964 self.len as f64 / self.capacity.get() as f64 * 100.0
965 }
966
967 /// Returns the maximum capacity of the cache.
968 ///
969 /// This is the maximum number of key-value pairs that the cache can store
970 /// before eviction policies take effect to make room for new entries.
971 ///
972 /// # Examples
973 ///
974 /// ```
975 /// # #[cfg(feature = "statistics")] {
976 /// use evictor::Lru;
977 ///
978 /// let cache: Lru<String, i32> = Lru::new(std::num::NonZeroUsize::new(100).unwrap());
979 /// assert_eq!(cache.statistics().capacity(), 100);
980 /// # }
981 /// ```
982 pub fn capacity(&self) -> usize {
983 self.capacity.get()
984 }
985
986 /// Returns the total number of evictions that have occurred.
987 ///
988 /// An eviction happens when an entry is removed from the cache to make room
989 /// for a new entry, according to the cache's eviction policy (LRU, LFU,
990 /// etc.). This counter tracks the lifetime total of such removals.
991 ///
992 /// # Examples
993 ///
994 /// ```
995 /// # #[cfg(feature = "statistics")] {
996 /// use evictor::Lru;
997 ///
998 /// let mut cache = Lru::new(std::num::NonZeroUsize::new(2).unwrap());
999 /// assert_eq!(cache.statistics().evictions(), 0);
1000 ///
1001 /// cache.insert("a".to_string(), 1);
1002 /// cache.insert("b".to_string(), 2);
1003 /// assert_eq!(cache.statistics().evictions(), 0);
1004 ///
1005 /// // This insertion will evict the least recently used entry
1006 /// cache.insert("c".to_string(), 3);
1007 /// assert_eq!(cache.statistics().evictions(), 1);
1008 /// # }
1009 /// ```
1010 pub fn evictions(&self) -> u64 {
1011 self.evictions
1012 }
1013
1014 /// Returns the total number of insertion operations that have occurred.
1015 ///
1016 /// This counter increments every time a new key-value pair is inserted into
1017 /// the cache, regardless of whether it causes an eviction. Updates to
1018 /// existing keys do not count as insertions.
1019 ///
1020 /// # Examples
1021 ///
1022 /// ```
1023 /// # #[cfg(feature = "statistics")] {
1024 /// use evictor::Lru;
1025 ///
1026 /// let mut cache = Lru::new(std::num::NonZeroUsize::new(2).unwrap());
1027 /// assert_eq!(cache.statistics().insertions(), 0);
1028 ///
1029 /// cache.insert("a".to_string(), 1);
1030 /// assert_eq!(cache.statistics().insertions(), 1);
1031 ///
1032 /// cache.insert("a".to_string(), 2); // Update existing key
1033 /// assert_eq!(cache.statistics().insertions(), 1);
1034 /// # }
1035 /// ```
1036 pub fn insertions(&self) -> u64 {
1037 self.insertions
1038 }
1039
1040 /// Returns the total number of cache hits.
1041 ///
1042 /// A cache hit occurs when a requested key is found in the cache.
1043 /// This metric is useful for measuring cache effectiveness.
1044 ///
1045 /// # Examples
1046 ///
1047 /// ```
1048 /// # #[cfg(feature = "statistics")] {
1049 /// use evictor::Lru;
1050 ///
1051 /// let mut cache = Lru::new(std::num::NonZeroUsize::new(2).unwrap());
1052 /// cache.insert("a".to_string(), 1);
1053 ///
1054 /// assert_eq!(cache.statistics().hits(), 0);
1055 ///
1056 /// cache.get(&"a".to_string()); // Hit
1057 /// assert_eq!(cache.statistics().hits(), 1);
1058 ///
1059 /// cache.get(&"b".to_string()); // Miss
1060 /// assert_eq!(cache.statistics().hits(), 1);
1061 /// # }
1062 /// ```
1063 pub fn hits(&self) -> u64 {
1064 self.hits
1065 }
1066
1067 /// Returns the total number of cache misses.
1068 ///
1069 /// A cache miss occurs when a requested key is not found in the cache.
1070 /// High miss rates may indicate the cache size is too small or the access
1071 /// pattern is not well-suited for caching.
1072 ///
1073 /// # Examples
1074 ///
1075 /// ```
1076 /// # #[cfg(feature = "statistics")] {
1077 /// use evictor::Lru;
1078 ///
1079 /// let mut cache = Lru::new(std::num::NonZeroUsize::new(2).unwrap());
1080 /// cache.insert("a".to_string(), 1);
1081 ///
1082 /// assert_eq!(cache.statistics().misses(), 0);
1083 ///
1084 /// cache.get(&"a".to_string()); // Hit
1085 /// assert_eq!(cache.statistics().misses(), 0);
1086 ///
1087 /// cache.get(&"b".to_string()); // Miss
1088 /// assert_eq!(cache.statistics().misses(), 1);
1089 /// # }
1090 /// ```
1091 pub fn misses(&self) -> u64 {
1092 self.misses
1093 }
1094
1095 /// Returns the cache hit rate as a percentage (0.0 to 100.0).
1096 ///
1097 /// The hit rate is calculated as `hits / (hits + misses) * 100`.
1098 /// A higher hit rate indicates better cache performance. Returns 0.0
1099 /// if no cache access operations have been performed yet.
1100 ///
1101 /// # Examples
1102 ///
1103 /// ```
1104 /// # #[cfg(feature = "statistics")] {
1105 /// use evictor::Lru;
1106 ///
1107 /// let mut cache = Lru::new(std::num::NonZeroUsize::new(2).unwrap());
1108 /// cache.insert("a".to_string(), 1);
1109 /// cache.insert("b".to_string(), 2);
1110 ///
1111 /// // Initially no accesses, so hit rate is 0
1112 /// assert_eq!(cache.statistics().hit_rate(), 0.0);
1113 ///
1114 /// cache.get(&"a".to_string()); // Hit
1115 /// cache.get(&"b".to_string()); // Hit
1116 /// cache.get(&"c".to_string()); // Miss
1117 ///
1118 /// // 2 hits out of 3 total accesses = 66.67%
1119 /// assert!((cache.statistics().hit_rate() - 66.66666666666666).abs() < f64::EPSILON);
1120 /// # }
1121 /// ```
1122 pub fn hit_rate(&self) -> f64 {
1123 if self.hits + self.misses == 0 {
1124 0.0
1125 } else {
1126 self.hits as f64 / (self.hits + self.misses) as f64 * 100.0
1127 }
1128 }
1129
1130 /// Returns the cache miss rate as a percentage (0.0 to 100.0).
1131 ///
1132 /// The miss rate is calculated as `misses / (hits + misses) * 100`.
1133 /// A lower miss rate indicates better cache performance. Returns 0.0
1134 /// if no cache access operations have been performed yet.
1135 ///
1136 /// Note: `hit_rate() + miss_rate()` always equals 100.0 (when there have
1137 /// been accesses).
1138 ///
1139 /// # Examples
1140 ///
1141 /// ```
1142 /// # #[cfg(feature = "statistics")] {
1143 /// use evictor::Lru;
1144 ///
1145 /// let mut cache = Lru::new(std::num::NonZeroUsize::new(2).unwrap());
1146 /// cache.insert("a".to_string(), 1);
1147 ///
1148 /// // Initially no accesses, so miss rate is 0
1149 /// assert_eq!(cache.statistics().miss_rate(), 0.0);
1150 ///
1151 /// cache.get(&"a".to_string()); // Hit
1152 /// cache.get(&"b".to_string()); // Miss
1153 /// cache.get(&"c".to_string()); // Miss
1154 ///
1155 /// // 2 misses out of 3 total accesses = 66.67%
1156 /// assert!((cache.statistics().miss_rate() - 66.66666666666666).abs() < f64::EPSILON);
1157 /// # }
1158 /// ```
1159 pub fn miss_rate(&self) -> f64 {
1160 if self.hits + self.misses == 0 {
1161 0.0
1162 } else {
1163 self.misses as f64 / (self.hits + self.misses) as f64 * 100.0
1164 }
1165 }
1166}
1167
1168/// A generic cache implementation with configurable eviction policies.
1169///
1170/// `Cache` is the underlying generic structure that powers Lru, Mru, Lfu,
1171/// Fifo, Lifo, Random, and Sieve caches. The eviction behavior is determined by
1172/// the `PolicyType` parameter, which must implement the `Policy` trait.
1173///
1174/// For most use cases, you should use the type aliases [`Lru`], [`Mru`],
1175/// [`Lfu`], [`Fifo`], [`Lifo`], [`Random`], or [`Sieve`] instead of using
1176/// `Cache` directly.
1177///
1178/// # Type Parameters
1179///
1180/// * `Key` - The type of keys stored in the cache. Must implement [`Hash`] +
1181/// [`Eq`].
1182/// * `Value` - The type of values stored in the cache.
1183/// * `PolicyType` - The eviction policy implementation. Must implement
1184/// `Policy`.
1185///
1186/// # Memory Management
1187///
1188/// - Pre-allocates space for `capacity` entries to minimize reallocations
1189/// - Automatically evicts entries when capacity is exceeded
1190pub struct Cache<Key, Value, PolicyType: Policy<Value>> {
1191 queue:
1192 LinkedHashMap<Key, <PolicyType::MetadataType as Metadata<Value>>::EntryType, RandomState>,
1193 capacity: NonZeroUsize,
1194 metadata: PolicyType::MetadataType,
1195
1196 #[cfg(feature = "statistics")]
1197 statistics: Statistics,
1198}
1199
1200impl<Key, Value, PolicyType: Policy<Value>> std::fmt::Debug for Cache<Key, Value, PolicyType>
1201where
1202 Key: std::fmt::Debug,
1203 Value: std::fmt::Debug,
1204 PolicyType::MetadataType: std::fmt::Debug,
1205 <PolicyType::MetadataType as Metadata<Value>>::EntryType: std::fmt::Debug,
1206{
1207 fn fmt(
1208 &self,
1209 f: &mut std::fmt::Formatter<'_>,
1210 ) -> std::fmt::Result {
1211 f.debug_struct("Cache")
1212 .field("queue", &self.queue)
1213 .field("capacity", &self.capacity)
1214 .field("metadata", &self.metadata)
1215 .finish()
1216 }
1217}
1218
1219impl<Key, Value, PolicyType: Policy<Value>> Clone for Cache<Key, Value, PolicyType>
1220where
1221 Key: Clone + Hash + Eq,
1222 Value: Clone,
1223 <PolicyType::MetadataType as Metadata<Value>>::EntryType: Clone,
1224{
1225 fn clone(&self) -> Self {
1226 let mut metadata = <PolicyType::MetadataType as Default>::default();
1227 <PolicyType::MetadataType as Metadata<Value>>::clone_from(
1228 &mut metadata,
1229 &self.metadata,
1230 &self.queue,
1231 );
1232 Self {
1233 queue: self.queue.clone(),
1234 metadata,
1235 capacity: self.capacity,
1236 #[cfg(feature = "statistics")]
1237 statistics: self.statistics.clone(),
1238 }
1239 }
1240}
1241
1242impl<Key: Hash + Eq, Value, PolicyType: Policy<Value>> Cache<Key, Value, PolicyType> {
1243 /// Creates a new, empty cache with the specified capacity.
1244 ///
1245 /// The cache will be able to hold at most `capacity` entries. When the
1246 /// cache is full and a new entry is inserted, the policy determines
1247 /// which existing entry will be evicted.
1248 ///
1249 /// # Arguments
1250 ///
1251 /// * `capacity` - The maximum number of entries the cache can hold. Must be
1252 /// greater than zero.
1253 ///
1254 /// # Examples
1255 ///
1256 /// ```rust
1257 /// use std::num::NonZeroUsize;
1258 ///
1259 /// use evictor::Lru;
1260 ///
1261 /// let cache: Lru<i32, String> = Lru::new(NonZeroUsize::new(100).unwrap());
1262 /// assert_eq!(cache.capacity(), 100);
1263 /// assert!(cache.is_empty());
1264 /// ```
1265 pub fn new(capacity: NonZeroUsize) -> Self {
1266 Self {
1267 queue: LinkedHashMap::with_capacity_and_hasher(capacity.get(), RandomState::default()),
1268 capacity,
1269 metadata: PolicyType::MetadataType::default(),
1270 #[cfg(feature = "statistics")]
1271 statistics: Statistics::with_capacity(capacity),
1272 }
1273 }
1274
1275 /// Returns the current statistics for the cache.
1276 #[cfg(feature = "statistics")]
1277 pub fn statistics(&self) -> Statistics {
1278 Statistics {
1279 len: self.queue.len(),
1280 capacity: self.capacity,
1281 ..self.statistics
1282 }
1283 }
1284
1285 /// Resets the cache statistics to their initial state.
1286 #[cfg(feature = "statistics")]
1287 pub fn reset_statistics(&mut self) {
1288 self.statistics = Statistics::with_capacity(self.capacity);
1289 }
1290
1291 /// Removes all entries from the cache.
1292 ///
1293 /// After calling this method, the cache will be empty and
1294 /// [`len()`](Self::len) will return 0. The capacity remains unchanged.
1295 ///
1296 /// # Examples
1297 ///
1298 /// ```rust
1299 /// use std::num::NonZeroUsize;
1300 ///
1301 /// use evictor::Lru;
1302 ///
1303 /// let mut cache = Lru::<i32, String>::new(NonZeroUsize::new(3).unwrap());
1304 /// cache.insert(1, "one".to_string());
1305 /// cache.insert(2, "two".to_string());
1306 ///
1307 /// assert_eq!(cache.len(), 2);
1308 /// cache.clear();
1309 /// assert_eq!(cache.len(), 0);
1310 /// assert!(cache.is_empty());
1311 /// ```
1312 pub fn clear(&mut self) {
1313 self.queue.clear();
1314 self.metadata = PolicyType::MetadataType::default();
1315 }
1316
1317 /// Returns a reference to the value without updating its position in the
1318 /// cache.
1319 ///
1320 /// This method provides read-only access to a value without affecting the
1321 /// eviction order. Unlike [`get()`](Self::get), this will not mark the
1322 /// entry as touched.
1323 ///
1324 /// # Arguments
1325 ///
1326 /// * `key` - The key to look up in the cache
1327 ///
1328 /// # Returns
1329 ///
1330 /// * `Some(&Value)` if the key exists in the cache
1331 /// * `None` if the key is not found
1332 ///
1333 /// # Examples
1334 ///
1335 /// ```rust
1336 /// use std::num::NonZeroUsize;
1337 ///
1338 /// use evictor::Lru;
1339 ///
1340 /// let mut cache = Lru::<i32, String>::new(NonZeroUsize::new(3).unwrap());
1341 /// cache.insert(1, "one".to_string());
1342 /// cache.insert(2, "two".to_string());
1343 ///
1344 /// let previous_order = cache
1345 /// .iter()
1346 /// .map(|(k, v)| (*k, v.clone()))
1347 /// .collect::<Vec<_>>();
1348 ///
1349 /// // Peek doesn't affect eviction order
1350 /// assert_eq!(cache.peek(&1), Some(&"one".to_string()));
1351 /// assert_eq!(cache.peek(&3), None);
1352 ///
1353 /// assert_eq!(cache.into_iter().collect::<Vec<_>>(), previous_order);
1354 /// ```
1355 pub fn peek(
1356 &self,
1357 key: &Key,
1358 ) -> Option<&Value> {
1359 self.queue.get(key).map(|entry| entry.value())
1360 }
1361
1362 /// Returns a mutable reference to the value, updating the cache if the
1363 /// value is modified while borrowed.
1364 ///
1365 /// This method provides mutable access to a cached value through a smart
1366 /// `Entry` wrapper that automatically updates the cache's eviction
1367 /// order **only if** the value is actually modified during the borrow.
1368 /// Unlike [`get_mut()`](Self::get_mut), which always marks an entry as
1369 /// touched, `peek_mut` preserves the current eviction order unless changes
1370 /// are made to the value.
1371 ///
1372 /// The returned `Entry` acts as a smart pointer that:
1373 /// - Provides transparent access to the underlying value via
1374 /// `Deref`/`DerefMut`
1375 /// - Tracks whether the value was modified during the borrow
1376 /// - Automatically updates the cache's eviction order on drop if
1377 /// modifications occurred
1378 /// - Leaves the eviction order unchanged if no modifications were made
1379 ///
1380 /// # Arguments
1381 ///
1382 /// * `key` - The key to look up in the cache
1383 ///
1384 /// # Returns
1385 ///
1386 /// * `Some(`[`Entry<'_, Key, Value, PolicyType>`](crate::Entry)`)` - If the
1387 /// key exists
1388 /// * `None` - If the key is not found in the cache
1389 ///
1390 /// # Behavior
1391 ///
1392 /// ## No Modification
1393 /// If you obtain an `Entry` but don't modify the value, the cache's
1394 /// eviction order remains unchanged:
1395 ///
1396 /// ```rust
1397 /// use std::num::NonZeroUsize;
1398 ///
1399 /// use evictor::Lru;
1400 ///
1401 /// let mut cache = Lru::new(NonZeroUsize::new(3).unwrap());
1402 /// cache.insert("A", vec![1, 2, 3]);
1403 /// cache.insert("B", vec![4, 5, 6]);
1404 ///
1405 /// let original_order: Vec<_> = cache.iter().map(|(k, _)| *k).collect();
1406 ///
1407 /// // Peek at the value without modifying it
1408 /// if let Some(entry) = cache.peek_mut(&"A") {
1409 /// let _len = entry.len(); // Read-only access
1410 /// // No modifications made
1411 /// }
1412 ///
1413 /// // Eviction order is preserved
1414 /// let new_order: Vec<_> = cache.iter().map(|(k, _)| *k).collect();
1415 /// assert_eq!(original_order, new_order);
1416 /// ```
1417 ///
1418 /// ## With Modification
1419 /// If you modify the value through the `Entry`, the cache automatically
1420 /// updates the eviction order when the `Entry` is dropped:
1421 ///
1422 /// ```rust
1423 /// use std::num::NonZeroUsize;
1424 ///
1425 /// use evictor::Lru;
1426 ///
1427 /// let mut cache = Lru::new(NonZeroUsize::new(3).unwrap());
1428 /// cache.insert("A", vec![1, 2, 3]);
1429 /// cache.insert("B", vec![4, 5, 6]);
1430 /// cache.insert("C", vec![7, 8, 9]);
1431 ///
1432 /// // Before: eviction order is A -> B -> C (A would be evicted first)
1433 /// assert_eq!(cache.tail().unwrap().0, &"A");
1434 ///
1435 /// // Modify "A" through peek_mut
1436 /// if let Some(mut entry) = cache.peek_mut(&"A") {
1437 /// entry.push(4); // This modifies the value
1438 /// } // Entry is dropped here, triggering cache update
1439 ///
1440 /// // After: "A" is now most recently used, "B" would be evicted first
1441 /// assert_eq!(cache.tail().unwrap().0, &"B");
1442 /// ```
1443 ///
1444 /// # Performance
1445 ///
1446 /// - **Lookup**: O(1) average, O(n) worst case (same as hash map lookup)
1447 /// - **No modification**: No additional overhead beyond the lookup
1448 /// - **With modification**: O(1) cache reordering when the `Entry` is
1449 /// dropped
1450 pub fn peek_mut<'q>(
1451 &'q mut self,
1452 key: &'q Key,
1453 ) -> Option<Entry<'q, Key, Value, PolicyType>> {
1454 self.queue
1455 .get_ptr(key)
1456 .map(|ptr| Entry::new(ptr, key, self))
1457 }
1458
1459 /// Returns a reference to the entry that would be evicted next.
1460 ///
1461 /// This method provides access to the entry at the "tail" of the eviction
1462 /// order without affecting the cache state. The specific entry returned
1463 /// depends on the policy:
1464 /// - Lru: Returns the least recently used entry
1465 /// - Mru: Returns the most recently used entry
1466 /// - Lfu: Returns the least frequently used entry
1467 /// - Fifo: Returns the first inserted entry
1468 /// - Lifo: Returns the last inserted entry
1469 /// - Random: Returns a randomly selected entry
1470 /// - Sieve: Returns the next entry that would be evicted after any second
1471 /// chances. This may involve an O(n) scan across the cache to find the
1472 /// next unvisited entry. **This does not update the visited bit, so an
1473 /// eviction will need to rescan even if this method is called**.
1474 ///
1475 /// # Returns
1476 ///
1477 /// * `Some((&Key, &Value))` if the cache is not empty
1478 /// * `None` if the cache is empty
1479 ///
1480 /// # Examples
1481 ///
1482 /// ```rust
1483 /// use std::num::NonZeroUsize;
1484 ///
1485 /// use evictor::Lru;
1486 ///
1487 /// let mut cache = Lru::<i32, String>::new(NonZeroUsize::new(3).unwrap());
1488 /// assert_eq!(cache.tail(), None);
1489 ///
1490 /// cache.insert(1, "one".to_string());
1491 /// cache.insert(2, "two".to_string());
1492 ///
1493 /// // In Lru, tail returns the least recently used
1494 /// let (key, value) = cache.tail().unwrap();
1495 /// assert_eq!(key, &1);
1496 /// assert_eq!(value, &"one".to_string());
1497 /// ```
1498 pub fn tail(&self) -> Option<(&Key, &Value)> {
1499 let ptr = self.metadata.candidate_removal_index(&self.queue)?;
1500 self.queue
1501 .ptr_get_entry(ptr)
1502 .map(|(key, value)| (key, value.value()))
1503 }
1504
1505 /// Returns true if the cache contains the given key.
1506 ///
1507 /// This method provides a quick way to check for key existence without
1508 /// affecting the eviction order or retrieving the value.
1509 ///
1510 /// # Arguments
1511 ///
1512 /// * `key` - The key to check for existence
1513 ///
1514 /// # Examples
1515 ///
1516 /// ```rust
1517 /// use std::num::NonZeroUsize;
1518 ///
1519 /// use evictor::Lru;
1520 ///
1521 /// let mut cache = Lru::<i32, String>::new(NonZeroUsize::new(3).unwrap());
1522 /// cache.insert(1, "one".to_string());
1523 ///
1524 /// assert!(cache.contains_key(&1));
1525 /// assert!(!cache.contains_key(&2));
1526 /// ```
1527 pub fn contains_key(
1528 &self,
1529 key: &Key,
1530 ) -> bool {
1531 self.queue.contains_key(key)
1532 }
1533
1534 /// Gets the value for a key, or inserts it using the provided function.
1535 ///
1536 /// If the key exists, returns a reference to the existing value and marks
1537 /// it as touched. If the key doesn't exist, calls the provided function to
1538 /// create a new value, inserts it, and returns a reference to it.
1539 ///
1540 /// When inserting into a full cache, the policy determines which entry is
1541 /// evicted. The evicted entry's value is returned as the second tuple
1542 /// element when an eviction occurs, otherwise `None`.
1543 ///
1544 /// # Arguments
1545 ///
1546 /// * `key` - The key to look up or insert
1547 /// * `or_insert` - Function called to create the value if the key doesn't
1548 /// exist
1549 ///
1550 /// # Returns
1551 ///
1552 /// A tuple `(&Value, Option<Value>)` where the first element is a reference
1553 /// to the value (existing or newly inserted), and the second element is the
1554 /// evicted value if an eviction occurred.
1555 ///
1556 /// # Examples
1557 ///
1558 /// ```rust
1559 /// use std::num::NonZeroUsize;
1560 ///
1561 /// use evictor::Lru;
1562 ///
1563 /// let mut cache = Lru::<i32, String>::new(NonZeroUsize::new(3).unwrap());
1564 ///
1565 /// // Insert new value
1566 /// let (value, evicted) = cache.get_or_insert_with(1, |&key| format!("value_{}", key));
1567 /// assert_eq!(value, "value_1");
1568 /// assert!(evicted.is_none());
1569 ///
1570 /// // Get existing value (function not called)
1571 /// let (value, evicted) = cache.get_or_insert_with(1, |&key| format!("different_{}", key));
1572 /// assert_eq!(value, "value_1");
1573 /// assert!(evicted.is_none());
1574 /// ```
1575 pub fn get_or_insert_with(
1576 &mut self,
1577 key: Key,
1578 or_insert: impl FnOnce(&Key) -> Value,
1579 ) -> (&Value, Option<Value>) {
1580 let (value, evicted) = self.get_or_insert_with_mut(key, or_insert);
1581 (&*value, evicted)
1582 }
1583
1584 /// Gets the value for a key, or inserts it using the provided function.
1585 ///
1586 /// This is the mutable version of
1587 /// [`get_or_insert_with()`](Self::get_or_insert_with). If the key
1588 /// exists, returns a mutable reference to the existing value and marks
1589 /// it as touched. If the key doesn't exist, calls the provided function to
1590 /// create a new value, inserts it, and returns a mutable reference to
1591 /// it.
1592 ///
1593 /// When inserting into a full cache, the policy determines which entry is
1594 /// evicted. The evicted entry's value is returned as the second tuple
1595 /// element when an eviction occurs, otherwise `None`.
1596 ///
1597 /// # Arguments
1598 ///
1599 /// * `key` - The key to look up or insert
1600 /// * `or_insert` - Function called to create the value if the key doesn't
1601 /// exist
1602 ///
1603 /// # Returns
1604 ///
1605 /// A tuple `(&mut Value, Option<Value>)` where the first element is a
1606 /// mutable reference to the value (existing or newly inserted), and the
1607 /// second element is the evicted value if an eviction occurred.
1608 ///
1609 /// # Examples
1610 ///
1611 /// ```rust
1612 /// use std::num::NonZeroUsize;
1613 ///
1614 /// use evictor::Lru;
1615 ///
1616 /// let mut cache = Lru::<i32, String>::new(NonZeroUsize::new(3).unwrap());
1617 ///
1618 /// // Insert new value and modify it
1619 /// let (value, _) = cache.get_or_insert_with_mut(1, |&key| format!("value_{}", key));
1620 /// value.push_str("_modified");
1621 /// assert_eq!(cache.peek(&1), Some(&"value_1_modified".to_string()));
1622 ///
1623 /// // Get existing value and modify it further (function not called)
1624 /// let (value, _) = cache.get_or_insert_with_mut(1, |&key| format!("different_{}", key));
1625 /// value.push_str("_again");
1626 /// assert_eq!(cache.peek(&1), Some(&"value_1_modified_again".to_string()));
1627 /// ```
1628 pub fn get_or_insert_with_mut(
1629 &mut self,
1630 key: Key,
1631 or_insert: impl FnOnce(&Key) -> Value,
1632 ) -> (&mut Value, Option<Value>) {
1633 let will_evict = self.queue.len() == self.capacity.get();
1634 let result = PolicyType::insert_or_update_entry(
1635 key,
1636 will_evict,
1637 |value, is_insert| {
1638 if is_insert {
1639 InsertOrUpdateAction::InsertOrUpdate(or_insert(value))
1640 } else {
1641 InsertOrUpdateAction::TouchNoUpdate
1642 }
1643 },
1644 &mut self.metadata,
1645 &mut self.queue,
1646 );
1647
1648 match result {
1649 InsertionResult::Inserted(ptr, evicted) => {
1650 #[cfg(feature = "statistics")]
1651 {
1652 self.statistics.insertions += 1;
1653 self.statistics.misses += 1;
1654
1655 self.statistics.evictions += u64::from(will_evict);
1656 }
1657
1658 (self.queue[ptr].value_mut(), evicted)
1659 }
1660 InsertionResult::FoundTouchedNoUpdate(ptr) => {
1661 #[cfg(feature = "statistics")]
1662 {
1663 self.statistics.hits += 1;
1664 }
1665
1666 (self.queue[ptr].value_mut(), None)
1667 }
1668 _ => unreachable!("Unexpected insertion result: {:?}", result),
1669 }
1670 }
1671
1672 /// Gets a value from the cache, marking it as touched. If the key is not
1673 /// present, inserts the default value and returns a reference to it.
1674 ///
1675 /// This method will trigger eviction if the cache is at capacity and the
1676 /// key is not already present.
1677 ///
1678 /// # Type Requirements
1679 ///
1680 /// The value type must implement [`Default`].
1681 ///
1682 /// # Returns
1683 ///
1684 /// A tuple `(&Value, Option<Value>)` where the first element is a reference
1685 /// to the value (existing or newly inserted), and the second element is the
1686 /// evicted value if an eviction occurred.
1687 ///
1688 /// # Examples
1689 ///
1690 /// ```rust
1691 /// use std::num::NonZeroUsize;
1692 ///
1693 /// use evictor::Lru;
1694 ///
1695 /// let mut cache = Lru::<i32, Vec<String>>::new(NonZeroUsize::new(2).unwrap());
1696 ///
1697 /// // Get or insert default (empty Vec<String>)
1698 /// let (vec_ref, _) = cache.get_or_default(1);
1699 /// assert!(vec_ref.is_empty());
1700 ///
1701 /// // Modify the value through another method
1702 /// cache.get_mut(&1).unwrap().push("hello".to_string());
1703 ///
1704 /// // Subsequent calls return the existing value
1705 /// let (vec_ref, _) = cache.get_or_default(1);
1706 /// assert_eq!(vec_ref.len(), 1);
1707 /// assert_eq!(vec_ref[0], "hello");
1708 /// ```
1709 pub fn get_or_default(
1710 &mut self,
1711 key: Key,
1712 ) -> (&Value, Option<Value>)
1713 where
1714 Value: Default,
1715 {
1716 self.get_or_insert_with(key, |_| Value::default())
1717 }
1718
1719 /// Gets a mutable value from the cache, marking it as touched. If the key
1720 /// is not present, inserts the default value and returns a mutable
1721 /// reference to it.
1722 ///
1723 /// This method will trigger eviction if the cache is at capacity and the
1724 /// key is not already present.
1725 ///
1726 /// # Type Requirements
1727 ///
1728 /// The value type must implement [`Default`].
1729 ///
1730 /// # Returns
1731 ///
1732 /// A tuple `(&mut Value, Option<Value>)` where the first element is a
1733 /// mutable reference to the value (existing or newly inserted), and the
1734 /// second element is the evicted value if an eviction occurred.
1735 ///
1736 /// # Examples
1737 ///
1738 /// ```rust
1739 /// use std::num::NonZeroUsize;
1740 ///
1741 /// use evictor::Lru;
1742 ///
1743 /// let mut cache = Lru::<i32, Vec<String>>::new(NonZeroUsize::new(2).unwrap());
1744 ///
1745 /// // Get or insert default and modify it immediately
1746 /// let (vec_ref, _) = cache.get_or_default_mut(1);
1747 /// vec_ref.push("hello".to_string());
1748 /// vec_ref.push("world".to_string());
1749 ///
1750 /// // Verify the changes were made
1751 /// assert_eq!(cache.get(&1).unwrap().len(), 2);
1752 /// assert_eq!(cache.get(&1).unwrap()[0], "hello");
1753 ///
1754 /// // Subsequent calls return the existing mutable value
1755 /// let (vec_ref, _) = cache.get_or_default_mut(1);
1756 /// vec_ref.clear();
1757 /// assert!(cache.get(&1).unwrap().is_empty());
1758 /// ```
1759 pub fn get_or_default_mut(
1760 &mut self,
1761 key: Key,
1762 ) -> (&mut Value, Option<Value>)
1763 where
1764 Value: Default,
1765 {
1766 self.get_or_insert_with_mut(key, |_| Value::default())
1767 }
1768
1769 /// Inserts a key-value pair into the cache.
1770 ///
1771 /// If the key already exists, its value is updated and the entry is marked
1772 /// as touched. If the key is new and the cache is at capacity, the policy
1773 /// determines which entry is evicted to make room.
1774 ///
1775 /// # Arguments
1776 ///
1777 /// * `key` - The key to insert or update
1778 /// * `value` - The value to associate with the key
1779 ///
1780 /// # Returns
1781 ///
1782 /// `Some(Value)` containing the evicted entry if the cache was at capacity
1783 /// and a new key was inserted. `None` otherwise (including the case where
1784 /// the key already existed and was updated in place).
1785 ///
1786 /// # Examples
1787 ///
1788 /// ```rust
1789 /// use std::num::NonZeroUsize;
1790 ///
1791 /// use evictor::Lru;
1792 ///
1793 /// let mut cache = Lru::<i32, String>::new(NonZeroUsize::new(2).unwrap());
1794 ///
1795 /// // Insert into empty cache — nothing evicted
1796 /// assert_eq!(cache.insert(1, "one".to_string()), None);
1797 ///
1798 /// // Update existing key — nothing evicted
1799 /// assert_eq!(cache.insert(1, "new_one".to_string()), None);
1800 ///
1801 /// // Fill the cache
1802 /// assert_eq!(cache.insert(2, "two".to_string()), None);
1803 ///
1804 /// // Insert when at capacity — evicts based on policy
1805 /// assert_eq!(
1806 /// cache.insert(3, "three".to_string()),
1807 /// Some("new_one".to_string())
1808 /// );
1809 /// ```
1810 pub fn insert(
1811 &mut self,
1812 key: Key,
1813 value: Value,
1814 ) -> Option<Value> {
1815 let will_evict = self.queue.len() == self.capacity.get();
1816 let result = PolicyType::insert_or_update_entry(
1817 key,
1818 will_evict,
1819 |_, _| InsertOrUpdateAction::InsertOrUpdate(value),
1820 &mut self.metadata,
1821 &mut self.queue,
1822 );
1823
1824 match result {
1825 InsertionResult::Inserted(_, evicted) => {
1826 #[cfg(feature = "statistics")]
1827 {
1828 self.statistics.insertions += 1;
1829 self.statistics.evictions += u64::from(will_evict);
1830 }
1831
1832 evicted
1833 }
1834 InsertionResult::Updated(_) => {
1835 #[cfg(feature = "statistics")]
1836 {
1837 self.statistics.hits += 1;
1838 }
1839
1840 None
1841 }
1842 _ => unreachable!("Unexpected insertion result: {:?}", result),
1843 }
1844 }
1845
1846 /// Inserts a key-value pair into the cache and returns a mutable reference
1847 /// to the inserted value along with any evicted entry.
1848 ///
1849 /// This is the mutable version of [`insert()`](Self::insert). If the key
1850 /// already exists, its value is updated and the entry is marked as touched.
1851 /// If the key is new and the cache is at capacity, the policy determines
1852 /// which entry is evicted to make room.
1853 ///
1854 /// # Arguments
1855 ///
1856 /// * `key` - The key to insert or update
1857 /// * `value` - The value to associate with the key
1858 ///
1859 /// # Returns
1860 ///
1861 /// A tuple `(&mut Value, Option<Value>)` where the first element is a
1862 /// mutable reference to the inserted (or updated) value, and the second
1863 /// element is the evicted value if an eviction occurred.
1864 ///
1865 /// # Examples
1866 ///
1867 /// ```rust
1868 /// use std::num::NonZeroUsize;
1869 ///
1870 /// use evictor::Lru;
1871 ///
1872 /// let mut cache = Lru::<i32, String>::new(NonZeroUsize::new(2).unwrap());
1873 ///
1874 /// // Insert new value and modify it immediately
1875 /// let (value, evicted) = cache.insert_mut(1, "one".to_string());
1876 /// value.push_str("_modified");
1877 /// assert!(evicted.is_none());
1878 /// assert_eq!(cache.peek(&1), Some(&"one_modified".to_string()));
1879 ///
1880 /// // Update existing value
1881 /// let (value, evicted) = cache.insert_mut(1, "new_one".to_string());
1882 /// value.push_str("_updated");
1883 /// assert!(evicted.is_none());
1884 /// assert_eq!(cache.peek(&1), Some(&"new_one_updated".to_string()));
1885 ///
1886 /// // Insert when at capacity (evicts based on policy)
1887 /// cache.insert_mut(2, "two".to_string());
1888 /// let (value, evicted) = cache.insert_mut(3, "three".to_string());
1889 /// value.push_str("_latest");
1890 /// assert_eq!(evicted, Some("new_one_updated".to_string()));
1891 /// assert_eq!(
1892 /// cache.into_iter().collect::<Vec<_>>(),
1893 /// [(2, "two".to_string()), (3, "three_latest".to_string())]
1894 /// );
1895 /// ```
1896 pub fn insert_mut(
1897 &mut self,
1898 key: Key,
1899 value: Value,
1900 ) -> (&mut Value, Option<Value>) {
1901 let will_evict = self.queue.len() == self.capacity.get();
1902 let result = PolicyType::insert_or_update_entry(
1903 key,
1904 will_evict,
1905 |_, _| InsertOrUpdateAction::InsertOrUpdate(value),
1906 &mut self.metadata,
1907 &mut self.queue,
1908 );
1909
1910 match result {
1911 InsertionResult::Inserted(ptr, evicted) => {
1912 #[cfg(feature = "statistics")]
1913 {
1914 self.statistics.insertions += 1;
1915 self.statistics.evictions += u64::from(will_evict);
1916 }
1917
1918 (self.queue[ptr].value_mut(), evicted)
1919 }
1920 InsertionResult::Updated(ptr) => {
1921 #[cfg(feature = "statistics")]
1922 {
1923 self.statistics.hits += 1;
1924 }
1925
1926 (self.queue[ptr].value_mut(), None)
1927 }
1928 _ => unreachable!("Unexpected insertion result: {:?}", result),
1929 }
1930 }
1931
1932 /// The immutable version of `try_insert_or_update_mut`. See
1933 /// [`Self::try_insert_or_update_mut`] for details.
1934 pub fn try_insert_or_update(
1935 &mut self,
1936 key: Key,
1937 value: Value,
1938 ) -> Result<&Value, (Key, Value)> {
1939 self.try_insert_or_update_mut(key, value).map(|v| &*v)
1940 }
1941
1942 /// Attempts to insert a key-value pair into the cache without triggering
1943 /// eviction, returning a mutable reference to the inserted value.
1944 ///
1945 /// This method only inserts the entry if the cache has available capacity.
1946 /// If the cache is at capacity, the insertion fails and the key-value pair
1947 /// is returned unchanged. Unlike [`insert_mut`](Self::insert_mut), this
1948 /// method will never evict existing entries.
1949 ///
1950 /// # Arguments
1951 ///
1952 /// * `key` - The key to insert
1953 /// * `value` - The value to associate with the key
1954 ///
1955 /// # Returns
1956 ///
1957 /// * `Ok(&mut Value)` - A mutable reference to the inserted value if
1958 /// successful
1959 /// * `Err((Key, Value))` - The original key-value pair if insertion failed
1960 ///
1961 /// # Examples
1962 ///
1963 /// ```rust
1964 /// use std::num::NonZeroUsize;
1965 ///
1966 /// use evictor::Lru;
1967 ///
1968 /// let mut cache = Lru::<i32, Vec<String>>::new(NonZeroUsize::new(2).unwrap());
1969 ///
1970 /// // Successful insertion with immediate mutation
1971 /// let vec_ref = cache
1972 /// .try_insert_or_update_mut(1, vec!["hello".to_string()])
1973 /// .unwrap();
1974 /// vec_ref.push("world".to_string());
1975 /// assert_eq!(cache.get(&1).unwrap().len(), 2);
1976 ///
1977 /// // Fill remaining capacity
1978 /// cache
1979 /// .try_insert_or_update_mut(2, vec!["foo".to_string()])
1980 /// .unwrap();
1981 ///
1982 /// // Failed insertion when cache is at capacity
1983 /// let result = cache.try_insert_or_update_mut(3, vec!["bar".to_string()]);
1984 /// assert!(result.is_err());
1985 /// if let Err((key, value)) = result {
1986 /// assert_eq!(key, 3);
1987 /// assert_eq!(value, vec!["bar".to_string()]);
1988 /// }
1989 /// assert_eq!(cache.len(), 2); // Cache unchanged
1990 /// ```
1991 ///
1992 /// # Updating Existing Keys
1993 ///
1994 /// ```rust
1995 /// use std::num::NonZeroUsize;
1996 ///
1997 /// use evictor::Lru;
1998 ///
1999 /// let mut cache = Lru::<i32, Vec<i32>>::new(NonZeroUsize::new(1).unwrap());
2000 /// cache.insert(1, vec![1, 2, 3]);
2001 ///
2002 /// // Updating an existing key always succeeds and allows mutation
2003 /// let vec_ref = cache.try_insert_or_update_mut(1, vec![4, 5]).unwrap();
2004 /// vec_ref.push(6);
2005 /// assert_eq!(cache.get(&1).unwrap(), &vec![4, 5, 6]);
2006 /// ```
2007 pub fn try_insert_or_update_mut(
2008 &mut self,
2009 key: Key,
2010 value: Value,
2011 ) -> Result<&mut Value, (Key, Value)> {
2012 let will_evict = self.queue.len() == self.capacity.get();
2013 let result = PolicyType::insert_or_update_entry(
2014 key,
2015 false,
2016 |_, is_insert| {
2017 if is_insert && will_evict {
2018 InsertOrUpdateAction::NoInsert(Some(value))
2019 } else {
2020 InsertOrUpdateAction::InsertOrUpdate(value)
2021 }
2022 },
2023 &mut self.metadata,
2024 &mut self.queue,
2025 );
2026
2027 match result {
2028 InsertionResult::Inserted(ptr, _) => {
2029 #[cfg(feature = "statistics")]
2030 {
2031 self.statistics.insertions += 1;
2032 }
2033
2034 Ok(self.queue[ptr].value_mut())
2035 }
2036 InsertionResult::Updated(ptr) => {
2037 #[cfg(feature = "statistics")]
2038 {
2039 self.statistics.hits += 1;
2040 }
2041
2042 Ok(self.queue[ptr].value_mut())
2043 }
2044 InsertionResult::NotFoundNoInsert(k, v) => Err((k, v.unwrap())),
2045 _ => unreachable!("Unexpected insertion result: {:?}", result),
2046 }
2047 }
2048
2049 /// The immutable version of `try_get_or_insert_with_mut`. See
2050 /// [`Self::try_get_or_insert_with_mut`] for details.
2051 pub fn try_get_or_insert_with(
2052 &mut self,
2053 key: Key,
2054 or_insert: impl FnOnce(&Key) -> Value,
2055 ) -> Result<&Value, Key> {
2056 self.try_get_or_insert_with_mut(key, or_insert).map(|v| &*v)
2057 }
2058
2059 /// Attempts to get a mutable value from the cache or insert it using a
2060 /// closure, without triggering eviction of other entries.
2061 ///
2062 /// If the key exists, returns a mutable reference to the existing value and
2063 /// marks it as touched. If the key doesn't exist and the cache has
2064 /// available capacity, the closure is called to generate a value which
2065 /// is then inserted. If the cache is at capacity and the key doesn't
2066 /// exist, the operation fails and returns the key.
2067 ///
2068 /// # Arguments
2069 ///
2070 /// * `key` - The key to look up or insert
2071 /// * `or_insert` - A closure that generates the value if the key is not
2072 /// found
2073 ///
2074 /// # Returns
2075 ///
2076 /// * `Ok(&mut Value)` - A mutable reference to the value if successful
2077 /// * `Err(Key)` - The original key if insertion failed due to capacity
2078 ///
2079 /// # Examples
2080 ///
2081 /// ```rust
2082 /// use std::num::NonZeroUsize;
2083 ///
2084 /// use evictor::Lru;
2085 ///
2086 /// let mut cache = Lru::<i32, Vec<String>>::new(NonZeroUsize::new(2).unwrap());
2087 ///
2088 /// // Insert new value and modify it immediately
2089 /// let vec_ref = cache
2090 /// .try_get_or_insert_with_mut(1, |_| vec!["initial".to_string()])
2091 /// .unwrap();
2092 /// vec_ref.push("added".to_string());
2093 /// assert_eq!(cache.get(&1).unwrap().len(), 2);
2094 ///
2095 /// // Get existing value and modify it further
2096 /// let vec_ref = cache
2097 /// .try_get_or_insert_with_mut(1, |_| vec!["not-called".to_string()])
2098 /// .unwrap();
2099 /// vec_ref.push("more".to_string());
2100 /// assert_eq!(cache.get(&1).unwrap().len(), 3);
2101 ///
2102 /// // Fill remaining capacity
2103 /// cache
2104 /// .try_get_or_insert_with_mut(2, |_| vec!["second".to_string()])
2105 /// .unwrap();
2106 ///
2107 /// // Fail when cache is at capacity and key doesn't exist
2108 /// let result = cache.try_get_or_insert_with_mut(3, |_| vec!["third".to_string()]);
2109 /// assert!(result.is_err());
2110 /// assert_eq!(result.unwrap_err(), 3);
2111 /// assert_eq!(cache.len(), 2); // Cache unchanged
2112 /// ```
2113 ///
2114 /// # Use Cases
2115 ///
2116 /// This method is particularly useful for scenarios where you need to:
2117 /// - Initialize complex data structures in-place
2118 /// - Accumulate data in existing cache entries
2119 /// - Avoid unnecessary allocations when the entry already exists
2120 ///
2121 /// ```rust
2122 /// use std::collections::HashMap;
2123 /// use std::num::NonZeroUsize;
2124 ///
2125 /// use evictor::Lru;
2126 ///
2127 /// let mut cache = Lru::<String, HashMap<String, i32>>::new(NonZeroUsize::new(3).unwrap());
2128 ///
2129 /// // Build up a map incrementally
2130 /// let map_ref = cache
2131 /// .try_get_or_insert_with_mut("counters".to_string(), |_| HashMap::new())
2132 /// .unwrap();
2133 /// map_ref.insert("clicks".to_string(), 1);
2134 /// map_ref.insert("views".to_string(), 5);
2135 ///
2136 /// // Later, update the existing map
2137 /// let map_ref = cache
2138 /// .try_get_or_insert_with_mut(
2139 /// "counters".to_string(),
2140 /// |_| HashMap::new(), // Not called since key exists
2141 /// )
2142 /// .unwrap();
2143 /// *map_ref.get_mut("clicks").unwrap() += 1;
2144 /// ```
2145 pub fn try_get_or_insert_with_mut(
2146 &mut self,
2147 key: Key,
2148 or_insert: impl FnOnce(&Key) -> Value,
2149 ) -> Result<&mut Value, Key> {
2150 let will_evict = self.queue.len() == self.capacity.get();
2151
2152 let result = PolicyType::insert_or_update_entry(
2153 key,
2154 false,
2155 |key, is_insert| {
2156 if is_insert && !will_evict {
2157 InsertOrUpdateAction::InsertOrUpdate(or_insert(key))
2158 } else if !is_insert {
2159 InsertOrUpdateAction::TouchNoUpdate
2160 } else {
2161 InsertOrUpdateAction::NoInsert(None)
2162 }
2163 },
2164 &mut self.metadata,
2165 &mut self.queue,
2166 );
2167
2168 match result {
2169 InsertionResult::Inserted(ptr, _) => {
2170 #[cfg(feature = "statistics")]
2171 {
2172 self.statistics.insertions += 1;
2173 }
2174
2175 Ok(self.queue[ptr].value_mut())
2176 }
2177 InsertionResult::Updated(ptr) => {
2178 #[cfg(feature = "statistics")]
2179 {
2180 self.statistics.hits += 1;
2181 }
2182
2183 Ok(self.queue[ptr].value_mut())
2184 }
2185 InsertionResult::NotFoundNoInsert(k, _) => Err(k),
2186 InsertionResult::FoundTouchedNoUpdate(ptr) => {
2187 #[cfg(feature = "statistics")]
2188 {
2189 self.statistics.hits += 1;
2190 }
2191
2192 Ok(self.queue[ptr].value_mut())
2193 }
2194 }
2195 }
2196
2197 /// The immutable version of `get_mut`. See [`Self::get_mut`] for details.
2198 pub fn get(
2199 &mut self,
2200 key: &Key,
2201 ) -> Option<&Value> {
2202 self.get_mut(key).map(|v| &*v)
2203 }
2204
2205 /// Gets a value from the cache, marking it as touched.
2206 ///
2207 /// This is the mutable version of [`get()`](Self::get). If the key exists,
2208 /// returns a mutable reference to the value and updates the entry's
2209 /// position in the eviction order according to the policy. If the key
2210 /// doesn't exist, returns `None` and the cache is unchanged.
2211 ///
2212 /// # Arguments
2213 ///
2214 /// * `key` - The key to look up
2215 ///
2216 /// # Returns
2217 ///
2218 /// * `Some(&mut Value)` if the key exists
2219 /// * `None` if the key doesn't exist
2220 ///
2221 /// # Examples
2222 ///
2223 /// ```rust
2224 /// use std::num::NonZeroUsize;
2225 ///
2226 /// use evictor::Lru;
2227 ///
2228 /// let mut cache = Lru::<i32, String>::new(NonZeroUsize::new(3).unwrap());
2229 /// cache.insert(1, "one".to_string());
2230 /// cache.insert(2, "two".to_string());
2231 ///
2232 /// // Get and modify the value, marking as recently used
2233 /// if let Some(value) = cache.get_mut(&1) {
2234 /// value.push_str("_modified");
2235 /// }
2236 /// assert_eq!(cache.peek(&1), Some(&"one_modified".to_string()));
2237 ///
2238 /// // Non-existent key returns None
2239 /// assert_eq!(cache.get_mut(&3), None);
2240 ///
2241 /// assert_eq!(
2242 /// cache.into_iter().collect::<Vec<_>>(),
2243 /// vec![(2, "two".to_string()), (1, "one_modified".to_string())]
2244 /// );
2245 /// ```
2246 pub fn get_mut(
2247 &mut self,
2248 key: &Key,
2249 ) -> Option<&mut Value> {
2250 if let Some(index) = self.queue.get_ptr(key) {
2251 PolicyType::touch_entry(index, &mut self.metadata, &mut self.queue);
2252
2253 #[cfg(feature = "statistics")]
2254 {
2255 self.statistics.hits += 1;
2256 }
2257
2258 Some(self.queue[index].value_mut())
2259 } else {
2260 #[cfg(feature = "statistics")]
2261 {
2262 self.statistics.misses += 1;
2263 }
2264 None
2265 }
2266 }
2267
2268 /// Removes and returns the entry that would be evicted next.
2269 ///
2270 /// This method removes and returns the entry at the "tail" of the eviction
2271 /// order. The specific entry removed depends on the policy:
2272 /// - Lru: Removes the least recently used entry
2273 /// - Mru: Removes the most recently used entry
2274 /// - Lfu: Removes the least frequently used entry
2275 /// - Fifo: Removes the first inserted entry
2276 /// - Lifo: Removes the last inserted entry
2277 /// - Random: Removes a randomly selected entry
2278 ///
2279 /// # Returns
2280 ///
2281 /// * `Some((Key, Value))` if the cache is not empty
2282 /// * `None` if the cache is empty
2283 ///
2284 /// # Examples
2285 ///
2286 /// ```rust
2287 /// use std::num::NonZeroUsize;
2288 ///
2289 /// use evictor::Lru;
2290 ///
2291 /// let mut cache = Lru::<i32, String>::new(NonZeroUsize::new(3).unwrap());
2292 /// cache.insert(1, "one".to_string());
2293 /// cache.insert(2, "two".to_string());
2294 ///
2295 /// // Pop the least recently used entry
2296 /// let (key, value) = cache.pop().unwrap();
2297 /// assert_eq!(key, 1);
2298 /// assert_eq!(value, "one".to_string());
2299 /// assert_eq!(
2300 /// cache.into_iter().collect::<Vec<_>>(),
2301 /// vec![(2, "two".to_string())]
2302 /// );
2303 /// ```
2304 pub fn pop(&mut self) -> Option<(Key, Value)> {
2305 PolicyType::evict_entry(&mut self.metadata, &mut self.queue).map(|(key, entry)| {
2306 #[cfg(feature = "statistics")]
2307 {
2308 self.statistics.evictions += 1;
2309 }
2310
2311 (key, entry.into_value())
2312 })
2313 }
2314
2315 /// Removes a specific entry from the cache.
2316 ///
2317 /// If the key exists, removes it from the cache and returns the associated
2318 /// value. If the key doesn't exist, returns `None` and the cache is
2319 /// unchanged.
2320 ///
2321 /// # Arguments
2322 ///
2323 /// * `key` - The key to remove from the cache
2324 ///
2325 /// # Returns
2326 ///
2327 /// * `Some(Value)` if the key existed and was removed
2328 /// * `None` if the key was not found
2329 ///
2330 /// # Examples
2331 ///
2332 /// ```rust
2333 /// use std::num::NonZeroUsize;
2334 ///
2335 /// use evictor::Lru;
2336 ///
2337 /// let mut cache = Lru::<i32, String>::new(NonZeroUsize::new(3).unwrap());
2338 /// cache.insert(1, "one".to_string());
2339 /// cache.insert(2, "two".to_string());
2340 ///
2341 /// assert_eq!(cache.remove(&1), Some("one".to_string()));
2342 /// assert_eq!(cache.remove(&1), None); // Already removed
2343 /// assert_eq!(
2344 /// cache.into_iter().collect::<Vec<_>>(),
2345 /// vec![(2, "two".to_string())]
2346 /// );
2347 /// ```
2348 pub fn remove(
2349 &mut self,
2350 key: &Key,
2351 ) -> Option<Value> {
2352 PolicyType::remove_key(key, &mut self.metadata, &mut self.queue)
2353 .map(|entry| entry.into_value())
2354 }
2355
2356 /// Returns true if the cache contains no entries.
2357 ///
2358 /// # Examples
2359 ///
2360 /// ```rust
2361 /// use std::num::NonZeroUsize;
2362 ///
2363 /// use evictor::Lru;
2364 ///
2365 /// let mut cache = Lru::<i32, String>::new(NonZeroUsize::new(3).unwrap());
2366 /// assert!(cache.is_empty());
2367 ///
2368 /// cache.insert(1, "one".to_string());
2369 /// assert!(!cache.is_empty());
2370 /// ```
2371 pub fn is_empty(&self) -> bool {
2372 self.queue.is_empty()
2373 }
2374
2375 /// Returns the number of entries currently in the cache.
2376 ///
2377 /// # Examples
2378 ///
2379 /// ```rust
2380 /// use std::num::NonZeroUsize;
2381 ///
2382 /// use evictor::Lru;
2383 ///
2384 /// let mut cache = Lru::<i32, String>::new(NonZeroUsize::new(3).unwrap());
2385 /// assert_eq!(cache.len(), 0);
2386 ///
2387 /// cache.insert(1, "one".to_string());
2388 /// cache.insert(2, "two".to_string());
2389 /// assert_eq!(cache.len(), 2);
2390 /// ```
2391 pub fn len(&self) -> usize {
2392 self.queue.len()
2393 }
2394
2395 /// Returns the maximum number of entries the cache can hold.
2396 ///
2397 /// # Examples
2398 ///
2399 /// ```rust
2400 /// use std::num::NonZeroUsize;
2401 ///
2402 /// use evictor::Lru;
2403 ///
2404 /// let cache = Lru::<i32, String>::new(NonZeroUsize::new(100).unwrap());
2405 /// assert_eq!(cache.capacity(), 100);
2406 /// assert_eq!(cache.len(), 0); // Empty but has capacity for 100
2407 /// ```
2408 pub fn capacity(&self) -> usize {
2409 self.capacity.get()
2410 }
2411
2412 /// Sets a new capacity for the cache.
2413 ///
2414 /// If the new capacity is smaller than the current number of entries,
2415 /// entries will be evicted according to the cache's eviction policy
2416 /// until the cache size fits within the new capacity.
2417 ///
2418 /// # Arguments
2419 ///
2420 /// * `capacity` - The new maximum number of entries the cache can hold.
2421 /// Must be greater than zero.
2422 ///
2423 /// # Returns
2424 ///
2425 /// A vector of evicted entries.
2426 ///
2427 /// # Examples
2428 ///
2429 /// ```rust
2430 /// use std::num::NonZeroUsize;
2431 ///
2432 /// use evictor::Lru;
2433 ///
2434 /// let mut cache = Lru::<i32, String>::new(NonZeroUsize::new(3).unwrap());
2435 /// cache.insert(1, "one".to_string());
2436 /// cache.insert(2, "two".to_string());
2437 /// cache.insert(3, "three".to_string());
2438 /// assert_eq!(cache.len(), 3);
2439 /// assert_eq!(cache.capacity(), 3);
2440 ///
2441 /// // Increase capacity
2442 /// cache.set_capacity(NonZeroUsize::new(5).unwrap());
2443 /// assert_eq!(cache.capacity(), 5);
2444 /// assert_eq!(cache.len(), 3); // Existing entries remain
2445 ///
2446 /// // Decrease capacity - will evict entries
2447 /// cache.set_capacity(NonZeroUsize::new(2).unwrap());
2448 /// assert_eq!(cache.capacity(), 2);
2449 /// assert_eq!(cache.len(), 2); // One entry was evicted
2450 /// ```
2451 pub fn set_capacity(
2452 &mut self,
2453 capacity: NonZeroUsize,
2454 ) -> Vec<(Key, Value)> {
2455 assert!((capacity.get() as u32) < u32::MAX - 1, "Capacity too large");
2456 let mut evicted = Vec::with_capacity(self.queue.len().saturating_sub(capacity.get()));
2457 if capacity.get() < self.queue.len() {
2458 // If new capacity is smaller than current size, we need to evict
2459 // entries until we fit
2460 while self.queue.len() > capacity.get()
2461 && let Some(kv) = self.pop()
2462 {
2463 evicted.push(kv);
2464 }
2465 }
2466
2467 self.capacity = capacity;
2468 evicted
2469 }
2470
2471 /// Evicts entries from the cache until the size is reduced to the specified
2472 /// number.
2473 ///
2474 /// This method removes entries according to the cache's eviction policy
2475 /// until the cache contains at most `n` entries. If the cache already
2476 /// has `n` or fewer entries, no eviction occurs.
2477 ///
2478 /// # Arguments
2479 ///
2480 /// * `n` - The target number of entries to keep in the cache
2481 ///
2482 /// # Returns
2483 ///
2484 /// A vector containing all evicted key-value pairs in the order they were
2485 /// removed from the cache.
2486 ///
2487 /// # Examples
2488 ///
2489 /// ```rust
2490 /// use std::num::NonZeroUsize;
2491 ///
2492 /// use evictor::Lru;
2493 ///
2494 /// let mut cache = Lru::<i32, String>::new(NonZeroUsize::new(5).unwrap());
2495 ///
2496 /// // Fill the cache
2497 /// for i in 1..=5 {
2498 /// cache.insert(i, format!("value{}", i));
2499 /// }
2500 /// assert_eq!(cache.len(), 5);
2501 ///
2502 /// // Evict until only 2 entries remain
2503 /// let evicted = cache.evict_to_size(2);
2504 /// assert_eq!(cache.len(), 2);
2505 /// assert_eq!(evicted.len(), 3);
2506 ///
2507 /// // The evicted entries follow the cache's eviction policy
2508 /// // For LRU, oldest entries are evicted first
2509 /// assert_eq!(evicted[0], (1, "value1".to_string()));
2510 /// assert_eq!(evicted[1], (2, "value2".to_string()));
2511 /// assert_eq!(evicted[2], (3, "value3".to_string()));
2512 ///
2513 /// // Cache now contains the most recently used entries
2514 /// assert!(cache.contains_key(&4));
2515 /// assert!(cache.contains_key(&5));
2516 /// ```
2517 ///
2518 /// # Behavior with Different Cache Sizes
2519 ///
2520 /// ```rust
2521 /// use std::num::NonZeroUsize;
2522 ///
2523 /// use evictor::Lru;
2524 ///
2525 /// let mut cache = Lru::<i32, String>::new(NonZeroUsize::new(3).unwrap());
2526 /// cache.insert(1, "one".to_string());
2527 /// cache.insert(2, "two".to_string());
2528 ///
2529 /// // No eviction when target size >= current size
2530 /// let evicted = cache.evict_to_size(5);
2531 /// assert_eq!(evicted.len(), 0);
2532 /// assert_eq!(cache.len(), 2);
2533 ///
2534 /// // Evict to exact current size
2535 /// let evicted = cache.evict_to_size(2);
2536 /// assert_eq!(evicted.len(), 0);
2537 /// assert_eq!(cache.len(), 2);
2538 ///
2539 /// // Evict to zero (clear cache)
2540 /// let evicted = cache.evict_to_size(0);
2541 /// assert_eq!(evicted.len(), 2);
2542 /// assert_eq!(cache.len(), 0);
2543 /// ```
2544 pub fn evict_to_size(
2545 &mut self,
2546 n: usize,
2547 ) -> Vec<(Key, Value)> {
2548 let mut evicted = Vec::with_capacity(n);
2549 while self.queue.len() > n
2550 && let Some(kv) = self.pop()
2551 {
2552 evicted.push(kv);
2553 }
2554 evicted
2555 }
2556
2557 /// Evicts up to `n` entries from the cache according to the eviction
2558 /// policy.
2559 ///
2560 /// This method removes at most `n` entries from the cache, following the
2561 /// cache's eviction policy. If the cache contains fewer than `n` entries,
2562 /// all remaining entries are evicted.
2563 ///
2564 /// # Arguments
2565 ///
2566 /// * `n` - The maximum number of entries to evict
2567 ///
2568 /// # Returns
2569 ///
2570 /// A vector containing all evicted key-value pairs in the order they were
2571 /// removed from the cache. The vector will contain at most `n` entries.
2572 ///
2573 /// # Examples
2574 ///
2575 /// ```rust
2576 /// use std::num::NonZeroUsize;
2577 ///
2578 /// use evictor::Lru;
2579 ///
2580 /// let mut cache = Lru::<i32, String>::new(NonZeroUsize::new(5).unwrap());
2581 ///
2582 /// // Fill the cache
2583 /// for i in 1..=5 {
2584 /// cache.insert(i, format!("value{}", i));
2585 /// }
2586 /// assert_eq!(cache.len(), 5);
2587 ///
2588 /// // Evict exactly 3 entries
2589 /// let evicted = cache.evict_n_entries(3);
2590 /// assert_eq!(cache.len(), 2);
2591 /// assert_eq!(evicted.len(), 3);
2592 ///
2593 /// // For LRU, oldest entries are evicted first
2594 /// assert_eq!(evicted[0], (1, "value1".to_string()));
2595 /// assert_eq!(evicted[1], (2, "value2".to_string()));
2596 /// assert_eq!(evicted[2], (3, "value3".to_string()));
2597 ///
2598 /// // Cache retains the most recently used entries
2599 /// assert!(cache.contains_key(&4));
2600 /// assert!(cache.contains_key(&5));
2601 /// ```
2602 ///
2603 /// # Behavior with Edge Cases
2604 ///
2605 /// ```rust
2606 /// use std::num::NonZeroUsize;
2607 ///
2608 /// use evictor::Lru;
2609 ///
2610 /// let mut cache = Lru::<i32, String>::new(NonZeroUsize::new(3).unwrap());
2611 /// cache.insert(1, "one".to_string());
2612 /// cache.insert(2, "two".to_string());
2613 ///
2614 /// // Requesting more entries than available
2615 /// let evicted = cache.evict_n_entries(5);
2616 /// assert_eq!(evicted.len(), 2); // Only 2 entries were available
2617 /// assert_eq!(cache.len(), 0); // Cache is now empty
2618 ///
2619 /// // Evicting from empty cache
2620 /// let evicted = cache.evict_n_entries(3);
2621 /// assert_eq!(evicted.len(), 0); // No entries to evict
2622 /// assert_eq!(cache.len(), 0);
2623 ///
2624 /// // Evicting zero entries
2625 /// cache.insert(1, "one".to_string());
2626 /// let evicted = cache.evict_n_entries(0);
2627 /// assert_eq!(evicted.len(), 0); // No eviction requested
2628 /// assert_eq!(cache.len(), 1); // Cache unchanged
2629 /// ```
2630 pub fn evict_n_entries(
2631 &mut self,
2632 n: usize,
2633 ) -> Vec<(Key, Value)> {
2634 let mut evicted = Vec::with_capacity(n);
2635 for _ in 0..n {
2636 if let Some(kv) = self.pop() {
2637 evicted.push(kv);
2638 } else {
2639 break; // No more entries to evict
2640 }
2641 }
2642 evicted
2643 }
2644
2645 /// Returns an iterator over cache entries in eviction order.
2646 ///
2647 /// The iterator yields key-value pairs `(&Key, &Value)` in the order they
2648 /// would be evicted from the cache, starting with the entry that would be
2649 /// evicted first. This allows you to inspect the cache's internal ordering
2650 /// and understand which entries are most at risk of eviction. This does not
2651 /// permute the order of the cache.
2652 ///
2653 /// The specific iteration order depends on the cache's eviction policy:
2654 ///
2655 /// ## Lru (Least Recently Used)
2656 /// Iterates from **least recently used** to **most recently used**:
2657 /// - First yielded: The entry accessed longest ago (would be evicted first)
2658 /// - Last yielded: The entry accessed most recently (would be evicted last)
2659 ///
2660 /// ## Mru (Most Recently Used)
2661 /// Iterates from **most recently used** to **least recently used**:
2662 /// - First yielded: The entry accessed most recently (would be evicted
2663 /// first)
2664 /// - Last yielded: The entry accessed longest ago (would be evicted last)
2665 ///
2666 /// ## Lfu (Least Frequently Used)
2667 /// Iterates from **least frequently used** to **most frequently used**:
2668 /// - Entries are ordered by access frequency (lower frequency first)
2669 /// - Ties are broken by insertion/access order within frequency buckets
2670 /// - First yielded: Entry with lowest access count (would be evicted first)
2671 /// - Last yielded: Entry with highest access count (would be evicted last)
2672 ///
2673 /// ## Fifo (First In, First Out)
2674 /// Iterates from **first inserted** to **last inserted**:
2675 /// - Entries are ordered by insertion time
2676 /// - First yielded: Entry inserted earliest (would be evicted first)
2677 /// - Last yielded: Entry inserted most recently (would be evicted last)
2678 /// - Access patterns do not affect iteration order
2679 ///
2680 /// ## Lifo (Last In, First Out)
2681 /// Iterates from **last inserted** to **first inserted**:
2682 /// - Entries are ordered by insertion time in reverse
2683 /// - First yielded: Entry inserted most recently (would be evicted first)
2684 /// - Last yielded: Entry inserted earliest (would be evicted last)
2685 /// - Access patterns do not affect iteration order
2686 ///
2687 /// ## Random
2688 /// Iterates in **random order** in debug builds, **arbitrary order** in
2689 /// release builds:
2690 /// - Debug builds: Order is randomized for testing purposes
2691 /// - Release builds: Order is arbitrary and depends on the pattern of
2692 /// eviction and insertion.
2693 /// - The eviction order itself is always random regardless of iteration
2694 /// order
2695 ///
2696 /// ## Sieve
2697 /// Iterates in **eviction order**:
2698 /// - Starts from the current hand position (next eviction candidate)
2699 /// - Simulates the eviction scanning process: skips visited entries once,
2700 /// then includes them in order
2701 /// - Uses additional state tracking to maintain this logical ordering
2702 /// - **Higher iteration overhead** than other policies due to eviction
2703 /// simulation
2704 ///
2705 /// # Time Complexity
2706 /// - Creating the iterator: **O(1)**
2707 /// - Iterating through all entries: **O(n)** where n is the cache size
2708 /// - Each `next()` call: **O(1)** for Lru/Mru/Fifo/Lifo/Random, **O(1)
2709 /// amortized** for Lfu, **O(1) with higher constant overhead** for Sieve
2710 ///
2711 /// # Examples
2712 ///
2713 /// ## Basic Usage
2714 /// ```rust
2715 /// use std::num::NonZeroUsize;
2716 ///
2717 /// use evictor::Lru;
2718 ///
2719 /// let mut cache = Lru::new(NonZeroUsize::new(3).unwrap());
2720 /// cache.insert("A", 1);
2721 /// cache.insert("B", 2);
2722 /// cache.insert("C", 3);
2723 ///
2724 /// // Lru: Iterates from least recently used to most recently used
2725 /// let items: Vec<_> = cache.iter().collect();
2726 /// assert_eq!(items, [(&"A", &1), (&"B", &2), (&"C", &3)]);
2727 /// ```
2728 ///
2729 /// ## After Access Pattern
2730 /// ```rust
2731 /// use std::num::NonZeroUsize;
2732 ///
2733 /// use evictor::Lru;
2734 ///
2735 /// let mut cache = Lru::new(NonZeroUsize::new(3).unwrap());
2736 /// cache.insert("A", 1);
2737 /// cache.insert("B", 2);
2738 /// cache.insert("C", 3);
2739 ///
2740 /// // Access "A" to make it recently used
2741 /// cache.get(&"A");
2742 ///
2743 /// // Now "B" is least recently used, "A" is most recently used
2744 /// let items: Vec<_> = cache.iter().collect();
2745 /// assert_eq!(items, [(&"B", &2), (&"C", &3), (&"A", &1)]);
2746 /// ```
2747 ///
2748 /// ## Mru Policy Example
2749 /// ```rust
2750 /// use std::num::NonZeroUsize;
2751 ///
2752 /// use evictor::Mru;
2753 ///
2754 /// let mut cache = Mru::new(NonZeroUsize::new(3).unwrap());
2755 /// cache.insert("A", 1);
2756 /// cache.insert("B", 2);
2757 /// cache.insert("C", 3);
2758 ///
2759 /// // Mru: Iterates from most recently used to least recently used
2760 /// let items: Vec<_> = cache.iter().collect();
2761 /// assert_eq!(items, [(&"C", &3), (&"B", &2), (&"A", &1)]);
2762 /// ```
2763 ///
2764 /// ## Lfu Policy Example
2765 /// ```rust
2766 /// use std::num::NonZeroUsize;
2767 ///
2768 /// use evictor::Lfu;
2769 ///
2770 /// let mut cache = Lfu::new(NonZeroUsize::new(3).unwrap());
2771 /// cache.insert("A", 1);
2772 /// cache.insert("B", 2);
2773 /// cache.insert("C", 3);
2774 ///
2775 /// // Access "A" multiple times to increase its frequency
2776 /// cache.get(&"A");
2777 /// cache.get(&"A");
2778 /// cache.get(&"B"); // Access "B" once
2779 ///
2780 /// // Lfu: "C" (freq=0), "B" (freq=1), "A" (freq=2)
2781 /// let items: Vec<_> = cache.iter().collect();
2782 /// assert_eq!(items, [(&"C", &3), (&"B", &2), (&"A", &1)]);
2783 /// ```
2784 ///
2785 /// ## Consistency with `tail()`
2786 /// The first item from the iterator always matches `tail()` with the
2787 /// exception of the `Random` policy:
2788 /// ```rust
2789 /// use std::num::NonZeroUsize;
2790 ///
2791 /// use evictor::Lru;
2792 ///
2793 /// let mut cache = Lru::new(NonZeroUsize::new(3).unwrap());
2794 /// cache.insert("A", 1);
2795 /// cache.insert("B", 2);
2796 ///
2797 /// let tail_item = cache.tail();
2798 /// let first_iter_item = cache.iter().next();
2799 /// assert_eq!(tail_item, first_iter_item);
2800 /// ```
2801 pub fn iter(&self) -> impl Iterator<Item = (&Key, &Value)> {
2802 PolicyType::iter(&self.metadata, &self.queue)
2803 }
2804
2805 /// Returns an iterator over the keys in the cache in eviction order.
2806 ///
2807 /// This method returns an iterator that yields references to all keys
2808 /// currently stored in the cache. The keys are returned in the same order
2809 /// as [`iter()`](Self::iter), which follows the cache's eviction policy.
2810 ///
2811 /// # Eviction Order
2812 ///
2813 /// The iteration order depends on the cache's eviction policy:
2814 /// - **Lru**: Keys from least recently used to most recently used
2815 /// - **Mru**: Keys from most recently used to least recently used
2816 /// - **Lfu**: Keys from least frequently used to most frequently used
2817 /// - **Fifo**: Keys from first inserted to last inserted
2818 /// - **Lifo**: Keys from last inserted to first inserted
2819 /// - **Random**: Keys in random order (debug) or arbitrary order (release)
2820 /// - **Sieve**: Keys in eviction order.
2821 ///
2822 /// # Examples
2823 ///
2824 /// ```rust
2825 /// use std::num::NonZeroUsize;
2826 ///
2827 /// use evictor::Lru;
2828 ///
2829 /// let mut cache = Lru::new(NonZeroUsize::new(3).unwrap());
2830 /// cache.insert("A", 1);
2831 /// cache.insert("B", 2);
2832 /// cache.insert("C", 3);
2833 ///
2834 /// // Get keys in Lru order (least recently used first)
2835 /// let keys: Vec<_> = cache.keys().collect();
2836 /// assert_eq!(keys, [&"A", &"B", &"C"]);
2837 /// ```
2838 pub fn keys(&self) -> impl Iterator<Item = &Key> {
2839 PolicyType::iter(&self.metadata, &self.queue).map(|(k, _)| k)
2840 }
2841
2842 /// Returns an iterator over the values in the cache in eviction order.
2843 ///
2844 /// This method returns an iterator that yields references to all values
2845 /// currently stored in the cache. The values are returned in the same order
2846 /// as [`iter()`](Self::iter), which follows the cache's eviction policy.
2847 ///
2848 /// # Eviction Order
2849 ///
2850 /// The iteration order depends on the cache's eviction policy:
2851 /// - **Lru**: Values from least recently used to most recently used
2852 /// - **Mru**: Values from most recently used to least recently used
2853 /// - **Lfu**: Values from least frequently used to most frequently used
2854 /// - **Fifo**: Values from first inserted to last inserted
2855 /// - **Lifo**: Values from last inserted to first inserted
2856 /// - **Random**: Values in random order (debug) or arbitrary order
2857 /// (release)
2858 /// - **Sieve**: Values following eviction order.
2859 ///
2860 /// # Examples
2861 ///
2862 /// ```rust
2863 /// use std::num::NonZeroUsize;
2864 ///
2865 /// use evictor::Lru;
2866 ///
2867 /// let mut cache = Lru::new(NonZeroUsize::new(3).unwrap());
2868 /// cache.insert("A", 1);
2869 /// cache.insert("B", 2);
2870 /// cache.insert("C", 3);
2871 ///
2872 /// // Get values in Lru order (least recently used first)
2873 /// let values: Vec<_> = cache.values().collect();
2874 /// assert_eq!(values, [&1, &2, &3]);
2875 /// ```
2876 pub fn values(&self) -> impl Iterator<Item = &Value> {
2877 PolicyType::iter(&self.metadata, &self.queue).map(|(_, v)| v)
2878 }
2879
2880 /// Retains only the entries for which the predicate returns `true`.
2881 ///
2882 /// This iterates in **arbitrary order**. I.e. unlike `iter()`, the order of
2883 /// items you see is not guaranteed to match the eviction order.
2884 ///
2885 /// This method removes all entries from the cache for which the provided
2886 /// closure returns `false`. The predicate is called with a reference to
2887 /// each key and a mutable reference to each value, allowing for both
2888 /// filtering based on key/value content and in-place modification of
2889 /// retained values.
2890 ///
2891 /// The operation preserves the relative order of retained entries according
2892 /// to their original insertion order. This operation does not perturb the
2893 /// eviction order, with the exception of entries that are removed.
2894 ///
2895 /// # Arguments
2896 ///
2897 /// * `f` - A closure that takes `(&Key, &mut Value)` and returns `true` for
2898 /// entries that should be kept, `false` for entries that should be
2899 /// removed
2900 ///
2901 /// # Examples
2902 ///
2903 /// ```rust
2904 /// use std::num::NonZeroUsize;
2905 ///
2906 /// use evictor::Lru;
2907 ///
2908 /// let mut cache = Lru::<i32, String>::new(NonZeroUsize::new(5).unwrap());
2909 /// cache.insert(1, "apple".to_string());
2910 /// cache.insert(2, "banana".to_string());
2911 /// cache.insert(3, "cherry".to_string());
2912 /// cache.insert(4, "date".to_string());
2913 ///
2914 /// // Keep only entries where the key is even
2915 /// cache.retain(|&key, _value| key % 2 == 0);
2916 /// assert_eq!(
2917 /// cache.into_iter().collect::<Vec<_>>(),
2918 /// [(2, "banana".to_string()), (4, "date".to_string())]
2919 /// );
2920 /// ```
2921 pub fn retain<F>(
2922 &mut self,
2923 mut f: F,
2924 ) where
2925 F: FnMut(&Key, &mut Value) -> bool,
2926 {
2927 let mut next = self.queue.head_cursor_mut();
2928 while let Some((k, v)) = next.current_mut() {
2929 if !f(k, v.value_mut()) {
2930 let (ptr, _) = PolicyType::remove_entry(
2931 next.ptr().unwrap(),
2932 &mut self.metadata,
2933 &mut self.queue,
2934 );
2935 if let Some(ptr) = ptr {
2936 next = self.queue.ptr_cursor_mut(ptr);
2937 } else {
2938 break;
2939 }
2940 } else {
2941 next.move_next();
2942 if next.at_head() {
2943 break;
2944 }
2945 }
2946 }
2947 }
2948
2949 /// Shrinks the internal storage to fit the current number of entries.
2950 pub fn shrink_to_fit(&mut self) {
2951 self.queue.shrink_to_fit();
2952 }
2953}
2954
2955impl<K: Hash + Eq + std::fmt::Debug, Value: std::fmt::Debug, PolicyType: Policy<Value>>
2956 Cache<K, Value, PolicyType>
2957{
2958 #[doc(hidden)]
2959 #[cfg(all(debug_assertions, feature = "internal-debugging"))]
2960 pub fn debug_validate(&self) {
2961 PolicyType::debug_validate(&self.metadata, &self.queue);
2962 }
2963}
2964
2965impl<K: Hash + Eq, V, PolicyType: Policy<V>> IntoIterator for Cache<K, V, PolicyType> {
2966 type IntoIter = PolicyType::IntoIter<K>;
2967 type Item = (K, V);
2968
2969 /// Converts the cache into an iterator over key-value pairs.
2970 ///
2971 /// This method implements the standard library's `IntoIterator` trait,
2972 /// allowing caches to be consumed and converted into iterators. The
2973 /// iteration order follows the eviction policy's ordering:
2974 ///
2975 /// - **Lru**: From least recently used to most recently used
2976 /// - **Mru**: From most recently used to least recently used
2977 /// - **Lfu**: From least frequently used to most frequently used
2978 /// - **Fifo**: From first inserted to last inserted
2979 /// - **Lifo**: From last inserted to first inserted
2980 /// - **Random**: In random order (debug) or arbitrary order (release)
2981 /// - **Sieve**: Following eviction order.
2982 ///
2983 /// # Returns
2984 /// An iterator that yields `(K, V)` pairs in eviction order, consuming the
2985 /// cache.
2986 ///
2987 /// # Examples
2988 /// ```rust
2989 /// use std::num::NonZeroUsize;
2990 ///
2991 /// use evictor::Lru;
2992 ///
2993 /// let mut cache = Lru::new(NonZeroUsize::new(3).unwrap());
2994 /// cache.insert(1, "first");
2995 /// cache.insert(2, "second");
2996 /// cache.insert(3, "third");
2997 ///
2998 /// // Convert to iterator and collect all pairs
2999 /// let pairs: Vec<_> = cache.into_iter().collect();
3000 /// assert_eq!(pairs, [(1, "first"), (2, "second"), (3, "third")]);
3001 /// ```
3002 ///
3003 /// # Performance
3004 /// - Time complexity: O(n) where n is the number of cached items
3005 /// - Space complexity: O(n) for the iterator's internal state
3006 ///
3007 /// # Note
3008 /// This is a consuming operation - the cache cannot be used after calling
3009 /// `into_iter()`. Use `iter()` if you need to iterate without consuming the
3010 /// cache.
3011 fn into_iter(self) -> Self::IntoIter {
3012 PolicyType::into_iter(self.metadata, self.queue)
3013 }
3014}
3015
3016impl<Key: Hash + Eq, Value, PolicyType: Policy<Value>> std::iter::FromIterator<(Key, Value)>
3017 for Cache<Key, Value, PolicyType>
3018{
3019 /// Creates a new cache from an iterator of key-value pairs.
3020 ///
3021 /// This method consumes the iterator and constructs a new cache, with a
3022 /// **capacity of at least 1** and at most the number of items in the
3023 /// iterator.
3024 fn from_iter<I>(iter: I) -> Self
3025 where
3026 I: IntoIterator<Item = (Key, Value)>,
3027 {
3028 let mut queue: LinkedHashMap<
3029 Key,
3030 <PolicyType::MetadataType as Metadata<Value>>::EntryType,
3031 RandomState,
3032 > = LinkedHashMap::default();
3033 let mut metadata = PolicyType::MetadataType::default();
3034
3035 #[cfg(feature = "statistics")]
3036 let mut hits = 0;
3037 #[cfg(feature = "statistics")]
3038 let mut insertions = 0;
3039
3040 for (key, value) in iter {
3041 let result = PolicyType::insert_or_update_entry(
3042 key,
3043 false,
3044 |_, _| InsertOrUpdateAction::InsertOrUpdate(value),
3045 &mut metadata,
3046 &mut queue,
3047 );
3048 match result {
3049 InsertionResult::Inserted(_, _) => {
3050 #[cfg(feature = "statistics")]
3051 {
3052 insertions += 1;
3053 }
3054 }
3055 InsertionResult::Updated(_) => {
3056 #[cfg(feature = "statistics")]
3057 {
3058 hits += 1;
3059 }
3060 }
3061 _ => unreachable!("Unexpected insertion result: {:?}", result),
3062 }
3063 }
3064
3065 let capacity = NonZeroUsize::new(queue.len().max(1)).unwrap();
3066 Self {
3067 #[cfg(feature = "statistics")]
3068 statistics: Statistics {
3069 hits,
3070 insertions,
3071 evictions: 0,
3072 len: queue.len(),
3073 capacity,
3074 misses: 0,
3075 },
3076 queue,
3077 capacity,
3078 metadata,
3079 }
3080 }
3081}
3082
3083impl<K: Hash + Eq, V, PolicyType: Policy<V>> Extend<(K, V)> for Cache<K, V, PolicyType> {
3084 /// Extends the cache with key-value pairs from an iterator.
3085 ///
3086 /// This method consumes the iterator and inserts each `(Key, Value)` pair
3087 /// into the cache. If the cache is at capacity, the policy determines which
3088 /// entry is evicted to make room for new entries.
3089 ///
3090 /// # Arguments
3091 ///
3092 /// * `iter` - An iterator of `(Key, Value)` pairs to insert into the cache
3093 ///
3094 /// # Examples
3095 ///
3096 /// ```rust
3097 /// use std::num::NonZeroUsize;
3098 ///
3099 /// use evictor::Lru;
3100 ///
3101 /// let mut cache = Lru::<i32, String>::new(NonZeroUsize::new(3).unwrap());
3102 /// cache.extend(vec![(1, "one".to_string()), (2, "two".to_string())]);
3103 /// assert_eq!(cache.len(), 2);
3104 /// ```
3105 fn extend<I>(
3106 &mut self,
3107 iter: I,
3108 ) where
3109 I: IntoIterator<Item = (K, V)>,
3110 {
3111 for (key, value) in iter {
3112 self.insert(key, value);
3113 }
3114 }
3115}
3116
3117#[cfg(test)]
3118mod tests {
3119 //! These are mostly just tests to make sure that if the stored k/v pairs
3120 //! implement various traits, the Cache also implements those traits.
3121
3122 use ntest::timeout;
3123
3124 #[test]
3125 #[timeout(1000)]
3126 fn test_lru_cache_clone() {
3127 use std::num::NonZeroUsize;
3128
3129 use crate::Cache;
3130 use crate::LruPolicy;
3131
3132 let mut cache = Cache::<i32, String, LruPolicy>::new(NonZeroUsize::new(3).unwrap());
3133 cache.insert(1, "one".to_string());
3134 cache.insert(2, "two".to_string());
3135
3136 let cloned_cache = cache.clone();
3137 assert_eq!(cloned_cache.len(), 2);
3138 assert_eq!(cloned_cache.peek(&1), Some(&"one".to_string()));
3139 assert_eq!(cloned_cache.peek(&2), Some(&"two".to_string()));
3140 }
3141
3142 #[test]
3143 #[timeout(1000)]
3144 fn test_lfu_cache_clone() {
3145 use std::num::NonZeroUsize;
3146
3147 use crate::Cache;
3148 use crate::LfuPolicy;
3149
3150 let mut cache = Cache::<i32, String, LfuPolicy>::new(NonZeroUsize::new(3).unwrap());
3151 cache.insert(1, "one".to_string());
3152 cache.insert(2, "two".to_string());
3153
3154 let cloned_cache = cache.clone();
3155 assert_eq!(cloned_cache.len(), 2);
3156 assert_eq!(cloned_cache.peek(&1), Some(&"one".to_string()));
3157 assert_eq!(cloned_cache.peek(&2), Some(&"two".to_string()));
3158 }
3159
3160 #[test]
3161 #[timeout(1000)]
3162 fn test_mru_cache_clone() {
3163 use std::num::NonZeroUsize;
3164
3165 use crate::Cache;
3166 use crate::MruPolicy;
3167 let mut cache = Cache::<i32, String, MruPolicy>::new(NonZeroUsize::new(3).unwrap());
3168 cache.insert(1, "one".to_string());
3169 cache.insert(2, "two".to_string());
3170 let cloned_cache = cache.clone();
3171 assert_eq!(cloned_cache.len(), 2);
3172 assert_eq!(cloned_cache.peek(&1), Some(&"one".to_string()));
3173 assert_eq!(cloned_cache.peek(&2), Some(&"two".to_string()));
3174 }
3175
3176 #[test]
3177 #[timeout(1000)]
3178 fn test_fifo_cache_clone() {
3179 use std::num::NonZeroUsize;
3180
3181 use crate::Cache;
3182 use crate::FifoPolicy;
3183
3184 let mut cache = Cache::<i32, String, FifoPolicy>::new(NonZeroUsize::new(3).unwrap());
3185 cache.insert(1, "one".to_string());
3186 cache.insert(2, "two".to_string());
3187
3188 let cloned_cache = cache.clone();
3189 assert_eq!(cloned_cache.len(), 2);
3190 assert_eq!(cloned_cache.peek(&1), Some(&"one".to_string()));
3191 assert_eq!(cloned_cache.peek(&2), Some(&"two".to_string()));
3192 }
3193
3194 #[test]
3195 #[timeout(1000)]
3196 fn test_lifo_cache_clone() {
3197 use std::num::NonZeroUsize;
3198
3199 use crate::Cache;
3200 use crate::LifoPolicy;
3201
3202 let mut cache = Cache::<i32, String, LifoPolicy>::new(NonZeroUsize::new(3).unwrap());
3203 cache.insert(1, "one".to_string());
3204 cache.insert(2, "two".to_string());
3205
3206 let cloned_cache = cache.clone();
3207 assert_eq!(cloned_cache.len(), 2);
3208 assert_eq!(cloned_cache.peek(&1), Some(&"one".to_string()));
3209 assert_eq!(cloned_cache.peek(&2), Some(&"two".to_string()));
3210 }
3211
3212 #[test]
3213 #[timeout(1000)]
3214 #[cfg(feature = "rand")]
3215 fn test_random_cache_clone() {
3216 use std::num::NonZeroUsize;
3217
3218 use crate::Cache;
3219 use crate::RandomPolicy;
3220
3221 let mut cache = Cache::<i32, String, RandomPolicy>::new(NonZeroUsize::new(3).unwrap());
3222 cache.insert(1, "one".to_string());
3223 cache.insert(2, "two".to_string());
3224
3225 let cloned_cache = cache.clone();
3226 assert_eq!(cloned_cache.len(), 2);
3227 assert_eq!(cloned_cache.peek(&1), Some(&"one".to_string()));
3228 assert_eq!(cloned_cache.peek(&2), Some(&"two".to_string()));
3229 }
3230
3231 #[test]
3232 #[timeout(1000)]
3233 fn test_sieve_cache_clone() {
3234 use std::num::NonZeroUsize;
3235
3236 use crate::Cache;
3237 use crate::SievePolicy;
3238
3239 let mut cache = Cache::<i32, String, SievePolicy>::new(NonZeroUsize::new(3).unwrap());
3240 cache.insert(1, "one".to_string());
3241 cache.insert(2, "two".to_string());
3242
3243 let cloned_cache = cache.clone();
3244 assert_eq!(cloned_cache.len(), 2);
3245 assert_eq!(cloned_cache.peek(&1), Some(&"one".to_string()));
3246 assert_eq!(cloned_cache.peek(&2), Some(&"two".to_string()));
3247 }
3248
3249 #[test]
3250 #[timeout(1000)]
3251 fn test_lfu_cache_debug() {
3252 use std::num::NonZeroUsize;
3253
3254 use crate::Cache;
3255 use crate::LfuPolicy;
3256
3257 let mut cache = Cache::<i32, String, LfuPolicy>::new(NonZeroUsize::new(3).unwrap());
3258 cache.insert(1, "one".to_string());
3259 cache.insert(2, "two".to_string());
3260
3261 let debug_str = format!("{:?}", cache);
3262 assert!(debug_str.contains("Cache"));
3263 assert!(debug_str.contains("\"one\""));
3264 assert!(debug_str.contains("\"two\""));
3265 }
3266
3267 #[test]
3268 #[timeout(1000)]
3269 fn test_lru_cache_debug() {
3270 use std::num::NonZeroUsize;
3271
3272 use crate::Cache;
3273 use crate::LruPolicy;
3274
3275 let mut cache = Cache::<i32, String, LruPolicy>::new(NonZeroUsize::new(3).unwrap());
3276 cache.insert(1, "one".to_string());
3277 cache.insert(2, "two".to_string());
3278
3279 let debug_str = format!("{:?}", cache);
3280 assert!(debug_str.contains("Cache"));
3281 assert!(debug_str.contains("\"one\""));
3282 assert!(debug_str.contains("\"two\""));
3283 }
3284
3285 #[test]
3286 #[timeout(1000)]
3287 fn test_mru_cache_debug() {
3288 use std::num::NonZeroUsize;
3289
3290 use crate::Cache;
3291 use crate::MruPolicy;
3292
3293 let mut cache = Cache::<i32, String, MruPolicy>::new(NonZeroUsize::new(3).unwrap());
3294 cache.insert(1, "one".to_string());
3295 cache.insert(2, "two".to_string());
3296
3297 let debug_str = format!("{:?}", cache);
3298 assert!(debug_str.contains("Cache"));
3299 assert!(debug_str.contains("\"one\""));
3300 assert!(debug_str.contains("\"two\""));
3301 }
3302
3303 #[test]
3304 #[timeout(1000)]
3305 fn test_fifo_cache_debug() {
3306 use std::num::NonZeroUsize;
3307
3308 use crate::Cache;
3309 use crate::FifoPolicy;
3310
3311 let mut cache = Cache::<i32, String, FifoPolicy>::new(NonZeroUsize::new(3).unwrap());
3312 cache.insert(1, "one".to_string());
3313 cache.insert(2, "two".to_string());
3314
3315 let debug_str = format!("{:?}", cache);
3316 assert!(debug_str.contains("Cache"));
3317 assert!(debug_str.contains("\"one\""));
3318 assert!(debug_str.contains("\"two\""));
3319 }
3320
3321 #[test]
3322 #[timeout(1000)]
3323 fn test_lifo_cache_debug() {
3324 use std::num::NonZeroUsize;
3325
3326 use crate::Cache;
3327 use crate::LifoPolicy;
3328
3329 let mut cache = Cache::<i32, String, LifoPolicy>::new(NonZeroUsize::new(3).unwrap());
3330 cache.insert(1, "one".to_string());
3331 cache.insert(2, "two".to_string());
3332
3333 let debug_str = format!("{:?}", cache);
3334 assert!(debug_str.contains("Cache"));
3335 assert!(debug_str.contains("\"one\""));
3336 assert!(debug_str.contains("\"two\""));
3337 }
3338
3339 #[test]
3340 #[timeout(1000)]
3341 #[cfg(feature = "rand")]
3342 fn test_random_cache_debug() {
3343 use std::num::NonZeroUsize;
3344
3345 use crate::Cache;
3346 use crate::RandomPolicy;
3347
3348 let mut cache = Cache::<i32, String, RandomPolicy>::new(NonZeroUsize::new(3).unwrap());
3349 cache.insert(1, "one".to_string());
3350 cache.insert(2, "two".to_string());
3351
3352 let debug_str = format!("{:?}", cache);
3353 assert!(debug_str.contains("Cache"));
3354 assert!(debug_str.contains("\"one\""));
3355 assert!(debug_str.contains("\"two\""));
3356 }
3357
3358 #[test]
3359 #[timeout(1000)]
3360 fn test_sieve_cache_debug() {
3361 use std::num::NonZeroUsize;
3362
3363 use crate::Cache;
3364 use crate::SievePolicy;
3365
3366 let mut cache = Cache::<i32, String, SievePolicy>::new(NonZeroUsize::new(3).unwrap());
3367 cache.insert(1, "one".to_string());
3368 cache.insert(2, "two".to_string());
3369
3370 let debug_str = format!("{:?}", cache);
3371 assert!(debug_str.contains("Cache"));
3372 assert!(debug_str.contains("\"one\""));
3373 assert!(debug_str.contains("\"two\""));
3374 }
3375}
3376
3377#[cfg(all(test, feature = "statistics"))]
3378mod statistics_tests {
3379 use ntest::timeout;
3380
3381 use super::*;
3382
3383 #[test]
3384 #[timeout(1000)]
3385 fn test_statistics_initialization() {
3386 let cache = Cache::<i32, String, LruPolicy>::new(NonZeroUsize::new(3).unwrap());
3387 let stats = cache.statistics();
3388
3389 assert_eq!(stats.len, 0);
3390 assert_eq!(stats.capacity.get(), 3);
3391 assert_eq!(stats.hits, 0);
3392 assert_eq!(stats.misses, 0);
3393 assert_eq!(stats.insertions, 0);
3394 assert_eq!(stats.evictions, 0);
3395 }
3396
3397 #[test]
3398 #[timeout(1000)]
3399 fn test_statistics_insertions_and_misses() {
3400 let mut cache = Cache::<i32, String, LruPolicy>::new(NonZeroUsize::new(3).unwrap());
3401
3402 cache.insert(1, "one".to_string());
3403 cache.insert(2, "two".to_string());
3404 cache.insert(3, "three".to_string());
3405
3406 let stats = cache.statistics();
3407 assert_eq!(stats.len, 3);
3408 assert_eq!(stats.insertions, 3);
3409 assert_eq!(stats.misses, 0);
3410 assert_eq!(stats.hits, 0);
3411 assert_eq!(stats.evictions, 0);
3412 }
3413
3414 #[test]
3415 #[timeout(1000)]
3416 fn test_statistics_hits() {
3417 let mut cache = Cache::<i32, String, LruPolicy>::new(NonZeroUsize::new(3).unwrap());
3418
3419 cache.insert(1, "one".to_string());
3420 cache.insert(2, "two".to_string());
3421
3422 // Test hits with get
3423 cache.get(&1);
3424 cache.get(&2);
3425 cache.get(&1);
3426
3427 let stats = cache.statistics();
3428 assert_eq!(stats.hits, 3);
3429 assert_eq!(stats.misses, 0);
3430 assert_eq!(stats.insertions, 2);
3431 }
3432
3433 #[test]
3434 #[timeout(1000)]
3435 fn test_statistics_misses_on_failed_get() {
3436 let mut cache = Cache::<i32, String, LruPolicy>::new(NonZeroUsize::new(3).unwrap());
3437
3438 cache.insert(1, "one".to_string());
3439
3440 // Miss on non-existent key
3441 cache.get(&999);
3442 cache.get(&888);
3443
3444 let stats = cache.statistics();
3445 assert_eq!(stats.hits, 0);
3446 assert_eq!(stats.misses, 2);
3447 assert_eq!(stats.insertions, 1);
3448 }
3449
3450 #[test]
3451 #[timeout(1000)]
3452 fn test_statistics_evictions() {
3453 let mut cache = Cache::<i32, String, LruPolicy>::new(NonZeroUsize::new(2).unwrap());
3454
3455 cache.insert(1, "one".to_string());
3456 cache.insert(2, "two".to_string());
3457
3458 let stats_before = cache.statistics();
3459 assert_eq!(stats_before.evictions, 0);
3460
3461 // This should trigger an eviction
3462 cache.insert(3, "three".to_string());
3463
3464 let stats_after = cache.statistics();
3465 assert_eq!(stats_after.evictions, 1);
3466 assert_eq!(stats_after.len, 2);
3467 assert_eq!(stats_after.insertions, 3);
3468 }
3469
3470 #[test]
3471 #[timeout(1000)]
3472 fn test_statistics_with_get_or_insert_with() {
3473 let mut cache = Cache::<i32, String, LruPolicy>::new(NonZeroUsize::new(3).unwrap());
3474
3475 // Insert through get_or_insert_with
3476 cache.get_or_insert_with(1, |_| "one".to_string());
3477 cache.get_or_insert_with(2, |_| "two".to_string());
3478
3479 let stats = cache.statistics();
3480 assert_eq!(stats.insertions, 2);
3481 assert_eq!(stats.misses, 2);
3482 assert_eq!(stats.hits, 0);
3483
3484 // Hit through get_or_insert_with
3485 cache.get_or_insert_with(1, |_| "one_new".to_string());
3486
3487 let stats = cache.statistics();
3488 assert_eq!(stats.insertions, 2);
3489 assert_eq!(stats.misses, 2);
3490 assert_eq!(stats.hits, 1);
3491 }
3492
3493 #[test]
3494 #[timeout(1000)]
3495 fn test_statistics_with_try_insert() {
3496 let mut cache = Cache::<i32, String, LruPolicy>::new(NonZeroUsize::new(2).unwrap());
3497
3498 // Successful insertions
3499 assert!(cache.try_insert_or_update(1, "one".to_string()).is_ok());
3500 assert!(cache.try_insert_or_update(2, "two".to_string()).is_ok());
3501
3502 let stats = cache.statistics();
3503 assert_eq!(stats.insertions, 2);
3504
3505 // Failed insertion (key already exists)
3506 assert_eq!(
3507 cache.try_insert_or_update(1, "one_new".to_string()),
3508 Ok(&"one_new".to_string())
3509 );
3510
3511 let stats = cache.statistics();
3512 assert_eq!(stats.insertions, 2); // Should not increment
3513 assert_eq!(stats.hits, 1); // Should increment for the failed try_insert
3514 }
3515
3516 #[test]
3517 #[timeout(1000)]
3518 fn test_statistics_reset() {
3519 let mut cache = Cache::<i32, String, LruPolicy>::new(NonZeroUsize::new(3).unwrap());
3520
3521 cache.insert(1, "one".to_string());
3522 cache.insert(2, "two".to_string());
3523 cache.get(&1);
3524 cache.get(&999); // miss
3525
3526 let stats_before = cache.statistics();
3527 assert!(stats_before.hits > 0);
3528 assert!(stats_before.misses > 0);
3529 assert!(stats_before.insertions > 0);
3530
3531 cache.reset_statistics();
3532
3533 let stats_after = cache.statistics();
3534 assert_eq!(stats_after.hits, 0);
3535 assert_eq!(stats_after.misses, 0);
3536 assert_eq!(stats_after.insertions, 0);
3537 assert_eq!(stats_after.evictions, 0);
3538 assert_eq!(stats_after.len, 2); // len should match current cache state
3539 assert_eq!(stats_after.capacity.get(), 3);
3540 }
3541
3542 #[test]
3543 #[timeout(1000)]
3544 fn test_statistics_with_remove() {
3545 let mut cache = Cache::<i32, String, LruPolicy>::new(NonZeroUsize::new(3).unwrap());
3546
3547 cache.insert(1, "one".to_string());
3548 cache.insert(2, "two".to_string());
3549 cache.insert(3, "three".to_string());
3550
3551 let stats_before = cache.statistics();
3552 assert_eq!(stats_before.len, 3);
3553
3554 cache.remove(&2);
3555
3556 let stats_after = cache.statistics();
3557 assert_eq!(stats_after.len, 2);
3558 // Remove shouldn't affect other statistics
3559 assert_eq!(stats_after.insertions, stats_before.insertions);
3560 assert_eq!(stats_after.hits, stats_before.hits);
3561 assert_eq!(stats_after.misses, stats_before.misses);
3562 }
3563
3564 #[test]
3565 #[timeout(1000)]
3566 fn test_statistics_with_clear() {
3567 let mut cache = Cache::<i32, String, LruPolicy>::new(NonZeroUsize::new(3).unwrap());
3568
3569 cache.insert(1, "one".to_string());
3570 cache.insert(2, "two".to_string());
3571 cache.get(&1);
3572
3573 let stats_before = cache.statistics();
3574 assert!(stats_before.len > 0);
3575
3576 cache.clear();
3577
3578 let stats_after = cache.statistics();
3579 assert_eq!(stats_after.len, 0);
3580 // Clear shouldn't affect historical statistics
3581 assert_eq!(stats_after.insertions, stats_before.insertions);
3582 assert_eq!(stats_after.hits, stats_before.hits);
3583 assert_eq!(stats_after.misses, stats_before.misses);
3584 }
3585
3586 #[test]
3587 #[timeout(1000)]
3588 fn test_statistics_with_set_capacity() {
3589 let mut cache = Cache::<i32, String, LruPolicy>::new(NonZeroUsize::new(3).unwrap());
3590
3591 cache.insert(1, "one".to_string());
3592 cache.insert(2, "two".to_string());
3593 cache.insert(3, "three".to_string());
3594
3595 let stats_before = cache.statistics();
3596 assert_eq!(stats_before.capacity.get(), 3);
3597 assert_eq!(stats_before.evictions, 0);
3598
3599 // Set capacity to smaller size should trigger evictions
3600 let evicted = cache.set_capacity(NonZeroUsize::new(1).unwrap());
3601 assert_eq!(evicted.len(), 2); // Should evict 2 items
3602
3603 let stats_after = cache.statistics();
3604 assert_eq!(stats_after.capacity.get(), 1);
3605 assert_eq!(stats_after.len, 1);
3606 assert_eq!(stats_after.evictions, 2); // Should evict 2 items
3607 }
3608
3609 #[test]
3610 #[timeout(1000)]
3611 fn test_statistics_with_different_policies() {
3612 // Test with LFU policy
3613 let mut lfu_cache = Cache::<i32, String, LfuPolicy>::new(NonZeroUsize::new(2).unwrap());
3614 lfu_cache.insert(1, "one".to_string());
3615 lfu_cache.insert(2, "two".to_string());
3616 lfu_cache.get(&1); // Increase frequency of key 1
3617 lfu_cache.insert(3, "three".to_string()); // Should evict key 2 (least frequently used)
3618
3619 let lfu_stats = lfu_cache.statistics();
3620 assert_eq!(lfu_stats.evictions, 1);
3621 assert_eq!(lfu_stats.hits, 1);
3622 assert_eq!(lfu_stats.insertions, 3);
3623
3624 // Test with MRU policy
3625 let mut mru_cache = Cache::<i32, String, MruPolicy>::new(NonZeroUsize::new(2).unwrap());
3626 mru_cache.insert(1, "one".to_string());
3627 mru_cache.insert(2, "two".to_string());
3628 mru_cache.get(&1); // Make key 1 most recently used
3629 mru_cache.insert(3, "three".to_string()); // Should evict key 1 (most recently used)
3630
3631 let mru_stats = mru_cache.statistics();
3632 assert_eq!(mru_stats.evictions, 1);
3633 assert_eq!(mru_stats.hits, 1);
3634 assert_eq!(mru_stats.insertions, 3);
3635 }
3636
3637 #[test]
3638 #[timeout(1000)]
3639 fn test_statistics_clone() {
3640 let mut cache = Cache::<i32, String, LruPolicy>::new(NonZeroUsize::new(3).unwrap());
3641
3642 cache.insert(1, "one".to_string());
3643 cache.insert(2, "two".to_string());
3644 cache.get(&1);
3645 cache.get(&999); // miss
3646
3647 let cloned_cache = cache.clone();
3648 let original_stats = cache.statistics();
3649 let cloned_stats = cloned_cache.statistics();
3650
3651 // Statistics should be identical after clone
3652 assert_eq!(original_stats.len, cloned_stats.len);
3653 assert_eq!(original_stats.capacity, cloned_stats.capacity);
3654 assert_eq!(original_stats.hits, cloned_stats.hits);
3655 assert_eq!(original_stats.misses, cloned_stats.misses);
3656 assert_eq!(original_stats.insertions, cloned_stats.insertions);
3657 assert_eq!(original_stats.evictions, cloned_stats.evictions);
3658 }
3659}