sieve_cache/
sharded.rs

1use crate::SieveCache;
2use std::borrow::Borrow;
3use std::collections::hash_map::DefaultHasher;
4use std::fmt;
5use std::hash::{Hash, Hasher};
6use std::sync::{Arc, Mutex, MutexGuard, PoisonError};
7
8/// Default number of shards to use if not specified explicitly.
9/// This value was chosen as a good default that balances memory overhead
10/// with concurrency in most practical scenarios.
11const DEFAULT_SHARDS: usize = 16;
12
13/// A thread-safe implementation of `SieveCache` that uses multiple shards to reduce contention.
14///
15/// This provides better concurrency than `SyncSieveCache` by splitting the cache into multiple
16/// independent shards, each protected by its own mutex. Operations on different shards can
17/// proceed in parallel, which can significantly improve throughput in multi-threaded environments.
18///
19/// # How Sharding Works
20///
21/// The cache is partitioned into multiple independent segments (shards) based on the hash of the key.
22/// Each shard has its own mutex, allowing operations on different shards to proceed concurrently.
23/// This reduces lock contention compared to a single-mutex approach, especially under high
24/// concurrency with access patterns distributed across different keys.
25///
26/// # Thread Safety Characteristics
27///
28/// ## Key-Based Locking
29///
30/// - Operations on the same key will always map to the same shard and are serialized
31/// - Operations on different keys that hash to different shards can execute concurrently
32/// - Hash-based sharding ensures good distribution of keys across shards by default
33///
34/// ## Concurrent Operations
35///
36/// - Single-key operations only lock one shard, allowing high concurrency
37/// - Multi-key operations (like `clear()`, `keys()`, `values()`) access shards sequentially
38/// - No operation holds locks on multiple shards simultaneously, preventing deadlocks
39///
40/// ## Consistency Model
41///
42/// - **Per-Key Consistency**: Operations on individual keys are atomic and isolated
43/// - **Cross-Shard Consistency**: There are no guarantees of a globally consistent view across shards
44/// - **Iteration Methods**: Methods like `keys()`, `values()`, and `entries()` create point-in-time snapshots that may not reflect concurrent modifications
45/// - **Bulk Operations**: Methods like `retain()`, `for_each_value()`, and `for_each_entry()` operate on each shard independently
46///
47/// ## Callback Handling
48///
49/// - `get_mut`: Executes callbacks while holding only the lock for the relevant shard
50/// - `with_key_lock`: Provides exclusive access to a specific shard for atomic multi-step operations
51/// - `for_each_value`, `for_each_entry`: Process one shard at a time, with lock released between shards
52///
53/// # Performance Considerations
54///
55/// - For workloads with high concurrency across different keys, `ShardedSieveCache` typically offers
56///   better performance than `SyncSieveCache`
57/// - The benefits increase with the number of concurrent threads and the distribution of keys
58/// - More shards reduce contention but increase memory overhead
59/// - If most operations target the same few keys (which map to the same shards), the benefits of
60///   sharding may be limited
61/// - Default of 16 shards provides a good balance for most workloads, but can be customized
62///
63/// # Examples
64///
65/// ```
66/// # use sieve_cache::ShardedSieveCache;
67/// // Create a cache with default number of shards (16)
68/// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(1000).unwrap();
69///
70/// // Or specify a custom number of shards
71/// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::with_shards(1000, 32).unwrap();
72///
73/// cache.insert("key1".to_string(), "value1".to_string());
74/// assert_eq!(cache.get(&"key1".to_string()), Some("value1".to_string()));
75/// ```
76///
77/// ## Multi-Threaded Example
78///
79/// ```
80/// # use sieve_cache::ShardedSieveCache;
81/// # use std::thread;
82/// # use std::sync::Arc;
83///
84/// // Create a sharded cache with 8 shards
85/// let cache = Arc::new(ShardedSieveCache::with_shards(1000, 8).unwrap());
86///
87/// // Spawn 4 threads that each insert 100 items
88/// let mut handles = vec![];
89/// for t in 0..4 {
90///     let cache_clone = Arc::clone(&cache);
91///     let handle = thread::spawn(move || {
92///         for i in 0..100 {
93///             let key = format!("thread{}key{}", t, i);
94///             let value = format!("value{}_{}", t, i);
95///             // Different threads can insert concurrently
96///             cache_clone.insert(key, value);
97///         }
98///     });
99///     handles.push(handle);
100/// }
101///
102/// // Wait for all threads to complete
103/// for handle in handles {
104///     handle.join().unwrap();
105/// }
106///
107/// assert_eq!(cache.len(), 400); // All 400 items were inserted
108/// ```
109#[derive(Clone)]
110pub struct ShardedSieveCache<K, V>
111where
112    K: Eq + Hash + Clone + Send + Sync,
113    V: Send + Sync,
114{
115    /// Array of shard mutexes, each containing a separate SieveCache instance
116    shards: Vec<Arc<Mutex<SieveCache<K, V>>>>,
117    /// Number of shards in the cache - kept as a separate field for quick access
118    num_shards: usize,
119}
120
121unsafe impl<K, V> Sync for ShardedSieveCache<K, V>
122where
123    K: Eq + Hash + Clone + Send + Sync,
124    V: Send + Sync,
125{
126}
127
128impl<K, V> Default for ShardedSieveCache<K, V>
129where
130    K: Eq + Hash + Clone + Send + Sync,
131    V: Send + Sync,
132{
133    /// Creates a new sharded cache with a default capacity of 100 entries and default number of shards.
134    ///
135    /// # Panics
136    ///
137    /// Panics if the underlying `ShardedSieveCache::new()` returns an error, which should never
138    /// happen for a non-zero capacity.
139    ///
140    /// # Examples
141    ///
142    /// ```
143    /// # use sieve_cache::ShardedSieveCache;
144    /// # use std::default::Default;
145    /// let cache: ShardedSieveCache<String, u32> = Default::default();
146    /// assert!(cache.capacity() >= 100); // Due to shard distribution, might be slightly larger
147    /// assert_eq!(cache.num_shards(), 16); // Default shard count
148    /// ```
149    fn default() -> Self {
150        Self::new(100).expect("Failed to create cache with default capacity")
151    }
152}
153
154impl<K, V> fmt::Debug for ShardedSieveCache<K, V>
155where
156    K: Eq + Hash + Clone + Send + Sync + fmt::Debug,
157    V: Send + Sync + fmt::Debug,
158{
159    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
160        f.debug_struct("ShardedSieveCache")
161            .field("capacity", &self.capacity())
162            .field("len", &self.len())
163            .field("num_shards", &self.num_shards)
164            .finish()
165    }
166}
167
168impl<K, V> IntoIterator for ShardedSieveCache<K, V>
169where
170    K: Eq + Hash + Clone + Send + Sync,
171    V: Clone + Send + Sync,
172{
173    type Item = (K, V);
174    type IntoIter = std::vec::IntoIter<(K, V)>;
175
176    /// Converts the cache into an iterator over its key-value pairs.
177    ///
178    /// This collects all entries into a Vec and returns an iterator over that Vec.
179    ///
180    /// # Examples
181    ///
182    /// ```
183    /// # use sieve_cache::ShardedSieveCache;
184    /// # use std::collections::HashMap;
185    /// let cache = ShardedSieveCache::new(100).unwrap();
186    /// cache.insert("key1".to_string(), "value1".to_string());
187    /// cache.insert("key2".to_string(), "value2".to_string());
188    ///
189    /// // Collect into a HashMap
190    /// let map: HashMap<_, _> = cache.into_iter().collect();
191    /// assert_eq!(map.len(), 2);
192    /// assert_eq!(map.get("key1"), Some(&"value1".to_string()));
193    /// ```
194    fn into_iter(self) -> Self::IntoIter {
195        self.entries().into_iter()
196    }
197}
198
199#[cfg(feature = "sync")]
200impl<K, V> From<crate::SyncSieveCache<K, V>> for ShardedSieveCache<K, V>
201where
202    K: Eq + Hash + Clone + Send + Sync,
203    V: Clone + Send + Sync,
204{
205    /// Creates a new sharded cache from an existing `SyncSieveCache`.
206    ///
207    /// This allows for upgrading a standard thread-safe cache to a more scalable sharded version.
208    ///
209    /// # Examples
210    ///
211    /// ```
212    /// # use sieve_cache::{SyncSieveCache, ShardedSieveCache};
213    /// let sync_cache = SyncSieveCache::new(100).unwrap();
214    /// sync_cache.insert("key".to_string(), "value".to_string());
215    ///
216    /// // Convert to sharded version with default sharding
217    /// let sharded_cache = ShardedSieveCache::from(sync_cache);
218    /// assert_eq!(sharded_cache.get(&"key".to_string()), Some("value".to_string()));
219    /// ```
220    fn from(sync_cache: crate::SyncSieveCache<K, V>) -> Self {
221        // Create a new sharded cache with the same capacity
222        let capacity = sync_cache.capacity();
223        let sharded = Self::new(capacity).expect("Failed to create sharded cache");
224
225        // Transfer all entries
226        for (key, value) in sync_cache.entries() {
227            sharded.insert(key, value);
228        }
229
230        sharded
231    }
232}
233
234impl<K, V> ShardedSieveCache<K, V>
235where
236    K: Eq + Hash + Clone + Send + Sync,
237    V: Send + Sync,
238{
239    /// Creates a new sharded cache with the specified capacity, using the default number of shards.
240    ///
241    /// The capacity will be divided evenly among the shards. The default shard count (16)
242    /// provides a good balance between concurrency and memory overhead for most workloads.
243    ///
244    /// # Errors
245    ///
246    /// Returns an error if the capacity is zero.
247    ///
248    /// # Examples
249    ///
250    /// ```
251    /// # use sieve_cache::ShardedSieveCache;
252    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(1000).unwrap();
253    /// assert_eq!(cache.num_shards(), 16); // Default shard count
254    /// ```
255    pub fn new(capacity: usize) -> Result<Self, &'static str> {
256        Self::with_shards(capacity, DEFAULT_SHARDS)
257    }
258
259    /// Creates a new sharded cache with the specified capacity and number of shards.
260    ///
261    /// The capacity will be divided among the shards, distributing any remainder to ensure
262    /// the total capacity is at least the requested amount.
263    ///
264    /// # Arguments
265    ///
266    /// * `capacity` - The total capacity of the cache
267    /// * `num_shards` - The number of shards to divide the cache into
268    ///
269    /// # Errors
270    ///
271    /// Returns an error if either the capacity or number of shards is zero.
272    ///
273    /// # Performance Impact
274    ///
275    /// - More shards can reduce contention in highly concurrent environments
276    /// - However, each shard has memory overhead, so very high shard counts may
277    ///   increase memory usage without providing additional performance benefits
278    /// - For most workloads, a value between 8 and 32 shards is optimal
279    ///
280    /// # Examples
281    ///
282    /// ```
283    /// # use sieve_cache::ShardedSieveCache;
284    /// // Create a cache with 8 shards
285    /// let cache: ShardedSieveCache<String, u32> = ShardedSieveCache::with_shards(1000, 8).unwrap();
286    /// assert_eq!(cache.num_shards(), 8);
287    /// assert!(cache.capacity() >= 1000);
288    /// ```
289    pub fn with_shards(capacity: usize, num_shards: usize) -> Result<Self, &'static str> {
290        if capacity == 0 {
291            return Err("capacity must be greater than 0");
292        }
293        if num_shards == 0 {
294            return Err("number of shards must be greater than 0");
295        }
296
297        // Calculate per-shard capacity
298        let base_capacity_per_shard = capacity / num_shards;
299        let remaining = capacity % num_shards;
300
301        let mut shards = Vec::with_capacity(num_shards);
302        for i in 0..num_shards {
303            // Distribute the remaining capacity to the first 'remaining' shards
304            let shard_capacity = if i < remaining {
305                base_capacity_per_shard + 1
306            } else {
307                base_capacity_per_shard
308            };
309
310            // Ensure at least capacity 1 per shard
311            let shard_capacity = std::cmp::max(1, shard_capacity);
312            shards.push(Arc::new(Mutex::new(SieveCache::new(shard_capacity)?)));
313        }
314
315        Ok(Self { shards, num_shards })
316    }
317
318    /// Returns the shard index for a given key.
319    ///
320    /// This function computes a hash of the key and uses it to determine which shard
321    /// the key belongs to.
322    #[inline]
323    fn get_shard_index<Q>(&self, key: &Q) -> usize
324    where
325        Q: Hash + ?Sized,
326    {
327        let mut hasher = DefaultHasher::new();
328        key.hash(&mut hasher);
329        let hash = hasher.finish() as usize;
330        hash % self.num_shards
331    }
332
333    /// Returns a reference to the shard for a given key.
334    ///
335    /// This is an internal helper method that maps a key to its corresponding shard.
336    #[inline]
337    fn get_shard<Q>(&self, key: &Q) -> &Arc<Mutex<SieveCache<K, V>>>
338    where
339        Q: Hash + ?Sized,
340    {
341        let index = self.get_shard_index(key);
342        &self.shards[index]
343    }
344
345    /// Returns a locked reference to the shard for a given key.
346    ///
347    /// This is an internal helper method to abstract away the lock handling and error recovery.
348    /// If the mutex is poisoned due to a panic in another thread, the poison error is
349    /// recovered from by calling `into_inner()` to access the underlying data.
350    #[inline]
351    fn locked_shard<Q>(&self, key: &Q) -> MutexGuard<'_, SieveCache<K, V>>
352    where
353        Q: Hash + ?Sized,
354    {
355        self.get_shard(key)
356            .lock()
357            .unwrap_or_else(PoisonError::into_inner)
358    }
359
360    /// Returns the total capacity of the cache (sum of all shard capacities).
361    ///
362    /// # Examples
363    ///
364    /// ```
365    /// # use sieve_cache::ShardedSieveCache;
366    /// let cache: ShardedSieveCache<String, u32> = ShardedSieveCache::new(1000).unwrap();
367    /// assert!(cache.capacity() >= 1000);
368    /// ```
369    pub fn capacity(&self) -> usize {
370        self.shards
371            .iter()
372            .map(|shard| {
373                shard
374                    .lock()
375                    .unwrap_or_else(PoisonError::into_inner)
376                    .capacity()
377            })
378            .sum()
379    }
380
381    /// Returns the total number of entries in the cache (sum of all shard lengths).
382    ///
383    /// Note that this operation requires acquiring a lock on each shard, so it may
384    /// cause temporary contention if called frequently in a high-concurrency environment.
385    ///
386    /// # Examples
387    ///
388    /// ```
389    /// # use sieve_cache::ShardedSieveCache;
390    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
391    ///
392    /// cache.insert("key1".to_string(), "value1".to_string());
393    /// cache.insert("key2".to_string(), "value2".to_string());
394    ///
395    /// assert_eq!(cache.len(), 2);
396    /// ```
397    pub fn len(&self) -> usize {
398        self.shards
399            .iter()
400            .map(|shard| shard.lock().unwrap_or_else(PoisonError::into_inner).len())
401            .sum()
402    }
403
404    /// Returns `true` when no values are currently cached in any shard.
405    ///
406    /// Note that this operation requires acquiring a lock on each shard, so it may
407    /// cause temporary contention if called frequently in a high-concurrency environment.
408    ///
409    /// # Examples
410    ///
411    /// ```
412    /// # use sieve_cache::ShardedSieveCache;
413    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
414    /// assert!(cache.is_empty());
415    ///
416    /// cache.insert("key".to_string(), "value".to_string());
417    /// assert!(!cache.is_empty());
418    /// ```
419    pub fn is_empty(&self) -> bool {
420        self.shards.iter().all(|shard| {
421            shard
422                .lock()
423                .unwrap_or_else(PoisonError::into_inner)
424                .is_empty()
425        })
426    }
427
428    /// Returns `true` if there is a value in the cache mapped to by `key`.
429    ///
430    /// This operation only locks the specific shard containing the key.
431    ///
432    /// # Examples
433    ///
434    /// ```
435    /// # use sieve_cache::ShardedSieveCache;
436    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
437    /// cache.insert("key".to_string(), "value".to_string());
438    ///
439    /// assert!(cache.contains_key(&"key".to_string()));
440    /// assert!(!cache.contains_key(&"missing".to_string()));
441    /// ```
442    pub fn contains_key<Q>(&self, key: &Q) -> bool
443    where
444        Q: Hash + Eq + ?Sized,
445        K: Borrow<Q>,
446    {
447        let mut guard = self.locked_shard(key);
448        guard.contains_key(key)
449    }
450
451    /// Gets a clone of the value in the cache mapped to by `key`.
452    ///
453    /// If no value exists for `key`, this returns `None`. This operation only locks
454    /// the specific shard containing the key.
455    ///
456    /// # Note
457    ///
458    /// This method returns a clone of the value rather than a reference, since the
459    /// mutex guard would be dropped after this method returns. This means that
460    /// `V` must implement `Clone`.
461    ///
462    /// # Examples
463    ///
464    /// ```
465    /// # use sieve_cache::ShardedSieveCache;
466    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
467    /// cache.insert("key".to_string(), "value".to_string());
468    ///
469    /// assert_eq!(cache.get(&"key".to_string()), Some("value".to_string()));
470    /// assert_eq!(cache.get(&"missing".to_string()), None);
471    /// ```
472    pub fn get<Q>(&self, key: &Q) -> Option<V>
473    where
474        Q: Hash + Eq + ?Sized,
475        K: Borrow<Q>,
476        V: Clone,
477    {
478        let mut guard = self.locked_shard(key);
479        guard.get(key).cloned()
480    }
481
482    /// Gets a mutable reference to the value in the cache mapped to by `key` via a callback function.
483    ///
484    /// If no value exists for `key`, the callback will not be invoked and this returns `false`.
485    /// Otherwise, the callback is invoked with a mutable reference to the value and this returns `true`.
486    /// This operation only locks the specific shard containing the key.
487    ///
488    /// This operation marks the entry as "visited" in the SIEVE algorithm,
489    /// which affects eviction decisions.
490    ///
491    /// # Thread Safety
492    ///
493    /// This method operates safely with recursive calls by:
494    ///
495    /// 1. Cloning the value with a short-lived lock on only the relevant shard
496    /// 2. Releasing the lock during callback execution
497    /// 3. Re-acquiring the lock to update the original value
498    ///
499    /// This approach means:
500    ///
501    /// - The callback can safely make other calls to the same cache instance
502    /// - The value can be modified by other threads during the callback execution
503    /// - Changes are not visible to other threads until the callback completes
504    /// - Last writer wins if multiple threads modify the same key concurrently
505    ///
506    /// Compared to `SyncSieveCache.get_mut()`:
507    /// - Only locks a single shard rather than the entire cache
508    /// - Reduces contention when operating on different keys in different shards
509    ///
510    /// # Examples
511    ///
512    /// ```
513    /// # use sieve_cache::ShardedSieveCache;
514    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
515    /// cache.insert("key".to_string(), "value".to_string());
516    ///
517    /// // Modify the value in-place
518    /// cache.get_mut(&"key".to_string(), |value| {
519    ///     *value = "new_value".to_string();
520    /// });
521    ///
522    /// assert_eq!(cache.get(&"key".to_string()), Some("new_value".to_string()));
523    /// ```
524    pub fn get_mut<Q, F>(&self, key: &Q, f: F) -> bool
525    where
526        Q: Hash + Eq + ?Sized,
527        K: Borrow<Q>,
528        F: FnOnce(&mut V),
529        V: Clone,
530    {
531        // Get a clone of the value if it exists, to avoid deadlocks
532        // if the callback tries to use other methods on this cache
533        let value_opt = {
534            let mut guard = self.locked_shard(key);
535            if let Some(v) = guard.get_mut(key) {
536                // Clone the value before releasing the lock
537                Some(v.clone())
538            } else {
539                None
540            }
541        };
542
543        if let Some(mut value) = value_opt {
544            // Execute the callback on the cloned value without holding the lock
545            f(&mut value);
546
547            // Update the value back to the cache
548            let mut guard = self.locked_shard(key);
549            if let Some(original) = guard.get_mut(key) {
550                *original = value;
551                true
552            } else {
553                // Key was removed while callback was executing
554                false
555            }
556        } else {
557            false
558        }
559    }
560
561    /// Maps `key` to `value` in the cache, possibly evicting old entries from the appropriate shard.
562    ///
563    /// This method returns `true` when this is a new entry, and `false` if an existing entry was
564    /// updated. This operation only locks the specific shard containing the key.
565    ///
566    /// # Examples
567    ///
568    /// ```
569    /// # use sieve_cache::ShardedSieveCache;
570    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
571    ///
572    /// // Insert a new key
573    /// assert!(cache.insert("key1".to_string(), "value1".to_string()));
574    ///
575    /// // Update an existing key
576    /// assert!(!cache.insert("key1".to_string(), "updated_value".to_string()));
577    /// ```
578    pub fn insert(&self, key: K, value: V) -> bool {
579        let mut guard = self.locked_shard(&key);
580        guard.insert(key, value)
581    }
582
583    /// Removes the cache entry mapped to by `key`.
584    ///
585    /// This method returns the value removed from the cache. If `key` did not map to any value,
586    /// then this returns `None`. This operation only locks the specific shard containing the key.
587    ///
588    /// # Examples
589    ///
590    /// ```
591    /// # use sieve_cache::ShardedSieveCache;
592    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
593    /// cache.insert("key".to_string(), "value".to_string());
594    ///
595    /// // Remove an existing key
596    /// assert_eq!(cache.remove(&"key".to_string()), Some("value".to_string()));
597    ///
598    /// // Attempt to remove a missing key
599    /// assert_eq!(cache.remove(&"key".to_string()), None);
600    /// ```
601    pub fn remove<Q>(&self, key: &Q) -> Option<V>
602    where
603        K: Borrow<Q>,
604        Q: Eq + Hash + ?Sized,
605    {
606        let mut guard = self.locked_shard(key);
607        guard.remove(key)
608    }
609
610    /// Removes and returns a value from the cache that was not recently accessed.
611    ///
612    /// This method tries to evict from each shard in turn until it finds a value to evict.
613    /// If no suitable value exists in any shard, this returns `None`.
614    ///
615    /// # Note
616    ///
617    /// This implementation differs from the non-sharded version in that it checks each shard
618    /// in sequence until it finds a suitable entry to evict. This may not provide the globally
619    /// optimal eviction decision across all shards, but it avoids the need to lock all shards
620    /// simultaneously.
621    ///
622    /// # Examples
623    ///
624    /// ```
625    /// # use sieve_cache::ShardedSieveCache;
626    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::with_shards(10, 2).unwrap();
627    ///
628    /// // Fill the cache
629    /// for i in 0..15 {
630    ///     cache.insert(format!("key{}", i), format!("value{}", i));
631    /// }
632    ///
633    /// // Should be able to evict something
634    /// assert!(cache.evict().is_some());
635    /// ```
636    pub fn evict(&self) -> Option<V> {
637        // Try each shard in turn
638        for shard in &self.shards {
639            let result = shard.lock().unwrap_or_else(PoisonError::into_inner).evict();
640            if result.is_some() {
641                return result;
642            }
643        }
644        None
645    }
646
647    /// Removes all entries from the cache.
648    ///
649    /// This operation clears all stored values across all shards and resets the cache to an empty state,
650    /// while maintaining the original capacity.
651    ///
652    /// # Examples
653    ///
654    /// ```
655    /// # use sieve_cache::ShardedSieveCache;
656    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
657    /// cache.insert("key1".to_string(), "value1".to_string());
658    /// cache.insert("key2".to_string(), "value2".to_string());
659    ///
660    /// assert_eq!(cache.len(), 2);
661    ///
662    /// cache.clear();
663    /// assert_eq!(cache.len(), 0);
664    /// assert!(cache.is_empty());
665    /// ```
666    pub fn clear(&self) {
667        for shard in &self.shards {
668            let mut guard = shard.lock().unwrap_or_else(PoisonError::into_inner);
669            guard.clear();
670        }
671    }
672
673    /// Returns an iterator over all keys in the cache.
674    ///
675    /// The order of keys is not specified and should not be relied upon.
676    ///
677    /// # Examples
678    ///
679    /// ```
680    /// # use sieve_cache::ShardedSieveCache;
681    /// # use std::collections::HashSet;
682    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
683    /// cache.insert("key1".to_string(), "value1".to_string());
684    /// cache.insert("key2".to_string(), "value2".to_string());
685    ///
686    /// let keys: HashSet<_> = cache.keys().into_iter().collect();
687    /// assert_eq!(keys.len(), 2);
688    /// assert!(keys.contains(&"key1".to_string()));
689    /// assert!(keys.contains(&"key2".to_string()));
690    /// ```
691    pub fn keys(&self) -> Vec<K> {
692        let mut all_keys = Vec::new();
693        for shard in &self.shards {
694            let guard = shard.lock().unwrap_or_else(PoisonError::into_inner);
695            all_keys.extend(guard.keys().cloned());
696        }
697        all_keys
698    }
699
700    /// Returns all values in the cache.
701    ///
702    /// The order of values is not specified and should not be relied upon.
703    ///
704    /// # Examples
705    ///
706    /// ```
707    /// # use sieve_cache::ShardedSieveCache;
708    /// # use std::collections::HashSet;
709    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
710    /// cache.insert("key1".to_string(), "value1".to_string());
711    /// cache.insert("key2".to_string(), "value2".to_string());
712    ///
713    /// let values: HashSet<_> = cache.values().into_iter().collect();
714    /// assert_eq!(values.len(), 2);
715    /// assert!(values.contains(&"value1".to_string()));
716    /// assert!(values.contains(&"value2".to_string()));
717    /// ```
718    pub fn values(&self) -> Vec<V>
719    where
720        V: Clone,
721    {
722        let mut all_values = Vec::new();
723        for shard in &self.shards {
724            let guard = shard.lock().unwrap_or_else(PoisonError::into_inner);
725            all_values.extend(guard.values().cloned());
726        }
727        all_values
728    }
729
730    /// Returns all key-value pairs in the cache.
731    ///
732    /// The order of pairs is not specified and should not be relied upon.
733    ///
734    /// # Examples
735    ///
736    /// ```
737    /// # use sieve_cache::ShardedSieveCache;
738    /// # use std::collections::HashMap;
739    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
740    /// cache.insert("key1".to_string(), "value1".to_string());
741    /// cache.insert("key2".to_string(), "value2".to_string());
742    ///
743    /// let entries: HashMap<_, _> = cache.entries().into_iter().collect();
744    /// assert_eq!(entries.len(), 2);
745    /// assert_eq!(entries.get(&"key1".to_string()), Some(&"value1".to_string()));
746    /// assert_eq!(entries.get(&"key2".to_string()), Some(&"value2".to_string()));
747    /// ```
748    pub fn entries(&self) -> Vec<(K, V)>
749    where
750        V: Clone,
751    {
752        let mut all_entries = Vec::new();
753        for shard in &self.shards {
754            let guard = shard.lock().unwrap_or_else(PoisonError::into_inner);
755            all_entries.extend(guard.iter().map(|(k, v)| (k.clone(), v.clone())));
756        }
757        all_entries
758    }
759
760    /// Applies a function to all values in the cache across all shards.
761    ///
762    /// This method marks all entries as visited.
763    ///
764    /// # Examples
765    ///
766    /// ```
767    /// # use sieve_cache::ShardedSieveCache;
768    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
769    /// cache.insert("key1".to_string(), "value1".to_string());
770    /// cache.insert("key2".to_string(), "value2".to_string());
771    ///
772    /// // Update all values by appending text
773    /// cache.for_each_value(|value| {
774    ///     *value = format!("{}_updated", value);
775    /// });
776    ///
777    /// assert_eq!(cache.get(&"key1".to_string()), Some("value1_updated".to_string()));
778    /// assert_eq!(cache.get(&"key2".to_string()), Some("value2_updated".to_string()));
779    /// ```
780    pub fn for_each_value<F>(&self, mut f: F)
781    where
782        F: FnMut(&mut V),
783    {
784        for shard in &self.shards {
785            let mut guard = shard.lock().unwrap_or_else(PoisonError::into_inner);
786            guard.values_mut().for_each(&mut f);
787        }
788    }
789
790    /// Applies a function to all key-value pairs in the cache across all shards.
791    ///
792    /// This method marks all entries as visited.
793    ///
794    /// # Examples
795    ///
796    /// ```
797    /// # use sieve_cache::ShardedSieveCache;
798    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
799    /// cache.insert("key1".to_string(), "value1".to_string());
800    /// cache.insert("key2".to_string(), "value2".to_string());
801    ///
802    /// // Update all values associated with keys containing '1'
803    /// cache.for_each_entry(|(key, value)| {
804    ///     if key.contains('1') {
805    ///         *value = format!("{}_special", value);
806    ///     }
807    /// });
808    ///
809    /// assert_eq!(cache.get(&"key1".to_string()), Some("value1_special".to_string()));
810    /// assert_eq!(cache.get(&"key2".to_string()), Some("value2".to_string()));
811    /// ```
812    pub fn for_each_entry<F>(&self, mut f: F)
813    where
814        F: FnMut((&K, &mut V)),
815    {
816        for shard in &self.shards {
817            let mut guard = shard.lock().unwrap_or_else(PoisonError::into_inner);
818            guard.iter_mut().for_each(&mut f);
819        }
820    }
821
822    /// Gets exclusive access to a specific shard based on the key.
823    ///
824    /// This can be useful for performing multiple operations atomically on entries
825    /// that share the same shard. Note that only keys that hash to the same shard
826    /// can be manipulated within a single transaction.
827    ///
828    /// # Thread Safety
829    ///
830    /// This method provides a way to perform atomic operations on a subset of the cache:
831    ///
832    /// - Acquires a lock on a single shard determined by the key's hash
833    /// - Provides exclusive access to that shard for the duration of the callback
834    /// - Allows multiple operations to be performed atomically within the shard
835    /// - Operations on different shards remain concurrent (unlike `SyncSieveCache.with_lock()`)
836    ///
837    /// Important thread safety considerations:
838    ///
839    /// - Only keys that hash to the same shard can be accessed atomically in a single call
840    /// - Operations affect only one shard, providing partial atomicity (limited to that shard)
841    /// - The callback should not attempt to acquire other locks to avoid deadlocks
842    /// - Long-running callbacks will block other threads from accessing the same shard
843    ///
844    /// This method provides a good balance between atomicity and concurrency:
845    /// it allows atomic multi-step operations while still permitting operations
846    /// on other shards to proceed concurrently.
847    ///
848    /// # Examples
849    ///
850    /// ```
851    /// # use sieve_cache::ShardedSieveCache;
852    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
853    ///
854    /// // Perform multiple operations atomically
855    /// cache.with_key_lock(&"foo", |shard| {
856    ///     // All operations within this closure have exclusive access to the shard
857    ///     shard.insert("key1".to_string(), "value1".to_string());
858    ///     shard.insert("key2".to_string(), "value2".to_string());
859    ///     
860    ///     // We can check state mid-transaction
861    ///     assert!(shard.contains_key(&"key1".to_string()));
862    /// });
863    /// ```
864    pub fn with_key_lock<Q, F, T>(&self, key: &Q, f: F) -> T
865    where
866        Q: Hash + ?Sized,
867        F: FnOnce(&mut SieveCache<K, V>) -> T,
868    {
869        let mut guard = self.locked_shard(key);
870        f(&mut guard)
871    }
872
873    /// Returns the number of shards in this cache.
874    ///
875    /// # Examples
876    ///
877    /// ```
878    /// # use sieve_cache::ShardedSieveCache;
879    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::with_shards(1000, 32).unwrap();
880    /// assert_eq!(cache.num_shards(), 32);
881    /// ```
882    pub fn num_shards(&self) -> usize {
883        self.num_shards
884    }
885
886    /// Gets a specific shard by index.
887    ///
888    /// This is mainly useful for advanced use cases and maintenance operations.
889    /// Returns `None` if the index is out of bounds.
890    ///
891    /// # Examples
892    ///
893    /// ```
894    /// # use sieve_cache::ShardedSieveCache;
895    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::with_shards(1000, 8).unwrap();
896    ///
897    /// // Access a valid shard
898    /// assert!(cache.get_shard_by_index(0).is_some());
899    ///
900    /// // Out of bounds index
901    /// assert!(cache.get_shard_by_index(100).is_none());
902    /// ```
903    pub fn get_shard_by_index(&self, index: usize) -> Option<&Arc<Mutex<SieveCache<K, V>>>> {
904        self.shards.get(index)
905    }
906
907    /// Retains only the elements specified by the predicate.
908    ///
909    /// Removes all entries for which the provided function returns `false`.
910    /// The elements are visited in arbitrary, unspecified order, across all shards.
911    /// This operation processes each shard individually, acquiring and releasing locks as it goes.
912    ///
913    /// # Thread Safety
914    ///
915    /// This method has the following thread safety characteristics:
916    ///
917    /// - It first collects all entries across all shards into a snapshot
918    /// - The lock for each shard is acquired and released independently
919    /// - The predicate is evaluated outside any lock
920    /// - Individual removals lock only the specific shard containing the key
921    ///
922    /// This design ensures:
923    /// - Minimal lock contention during predicate evaluation
924    /// - No deadlocks due to holding multiple shard locks simultaneously
925    /// - Operations on different shards can proceed concurrently
926    ///
927    /// However, this also means:
928    /// - The snapshot might not reflect concurrent modifications
929    /// - There's no guarantee of cross-shard atomicity or consistency
930    /// - Race conditions can occur if entries are modified between collection and removal
931    ///
932    /// # Examples
933    ///
934    /// ```
935    /// # use sieve_cache::ShardedSieveCache;
936    /// let cache: ShardedSieveCache<String, i32> = ShardedSieveCache::new(100).unwrap();
937    /// cache.insert("key1".to_string(), 100);
938    /// cache.insert("key2".to_string(), 200);
939    /// cache.insert("key3".to_string(), 300);
940    ///
941    /// // Keep only entries with values greater than 150
942    /// cache.retain(|_, value| *value > 150);
943    ///
944    /// assert_eq!(cache.len(), 2);
945    /// assert!(!cache.contains_key(&"key1".to_string()));
946    /// assert!(cache.contains_key(&"key2".to_string()));
947    /// assert!(cache.contains_key(&"key3".to_string()));
948    /// ```
949    pub fn retain<F>(&self, mut f: F)
950    where
951        F: FnMut(&K, &V) -> bool,
952        V: Clone,
953    {
954        // First, collect all entries so we can check them without holding locks
955        let entries = self.entries();
956
957        // Now go through all entries and determine which ones to remove
958        for (key, value) in entries {
959            // Check the predicate outside the lock - using cloned data
960            if !f(&key, &value) {
961                // The predicate returned false, so remove this entry
962                self.remove(&key);
963            }
964        }
965    }
966}
967
968#[cfg(test)]
969mod tests {
970    use super::*;
971    use std::sync::Arc;
972    use std::thread;
973    use std::time::Duration;
974
975    #[test]
976    fn test_sharded_cache_basics() {
977        let cache = ShardedSieveCache::new(100).unwrap();
978
979        // Insert a value
980        assert!(cache.insert("key1".to_string(), "value1".to_string()));
981
982        // Read back the value
983        assert_eq!(cache.get(&"key1".to_string()), Some("value1".to_string()));
984
985        // Check contains_key
986        assert!(cache.contains_key(&"key1".to_string()));
987
988        // Check capacity and length
989        assert!(cache.capacity() >= 100); // May be slightly higher due to rounding up per shard
990        assert_eq!(cache.len(), 1);
991
992        // Remove a value
993        assert_eq!(
994            cache.remove(&"key1".to_string()),
995            Some("value1".to_string())
996        );
997        assert_eq!(cache.len(), 0);
998        assert!(cache.is_empty());
999    }
1000
1001    #[test]
1002    fn test_custom_shard_count() {
1003        let cache = ShardedSieveCache::with_shards(100, 4).unwrap();
1004        assert_eq!(cache.num_shards(), 4);
1005
1006        for i in 0..10 {
1007            let key = format!("key{}", i);
1008            let value = format!("value{}", i);
1009            cache.insert(key, value);
1010        }
1011
1012        assert_eq!(cache.len(), 10);
1013    }
1014
1015    #[test]
1016    fn test_parallel_access() {
1017        let cache = Arc::new(ShardedSieveCache::with_shards(1000, 16).unwrap());
1018        let mut handles = vec![];
1019
1020        // Spawn 8 threads that each insert 100 items
1021        for t in 0..8 {
1022            let cache_clone = Arc::clone(&cache);
1023            let handle = thread::spawn(move || {
1024                for i in 0..100 {
1025                    let key = format!("thread{}key{}", t, i);
1026                    let value = format!("value{}_{}", t, i);
1027                    cache_clone.insert(key, value);
1028                }
1029            });
1030            handles.push(handle);
1031        }
1032
1033        // Wait for all threads to complete
1034        for handle in handles {
1035            handle.join().unwrap();
1036        }
1037
1038        // Verify total item count
1039        assert_eq!(cache.len(), 800);
1040
1041        // Check a few random keys
1042        assert_eq!(
1043            cache.get(&"thread0key50".to_string()),
1044            Some("value0_50".to_string())
1045        );
1046        assert_eq!(
1047            cache.get(&"thread7key99".to_string()),
1048            Some("value7_99".to_string())
1049        );
1050    }
1051
1052    #[test]
1053    fn test_with_key_lock() {
1054        let cache = ShardedSieveCache::new(100).unwrap();
1055
1056        // Perform multiple operations atomically on keys that map to the same shard
1057        cache.with_key_lock(&"test_key", |shard| {
1058            shard.insert("key1".to_string(), "value1".to_string());
1059            shard.insert("key2".to_string(), "value2".to_string());
1060            shard.insert("key3".to_string(), "value3".to_string());
1061        });
1062
1063        assert_eq!(cache.len(), 3);
1064    }
1065
1066    #[test]
1067    fn test_eviction() {
1068        let cache = ShardedSieveCache::with_shards(10, 2).unwrap();
1069
1070        // Fill the cache
1071        for i in 0..15 {
1072            let key = format!("key{}", i);
1073            let value = format!("value{}", i);
1074            cache.insert(key, value);
1075        }
1076
1077        // The cache should not exceed its capacity
1078        assert!(cache.len() <= 10);
1079
1080        // We should be able to evict items
1081        let evicted = cache.evict();
1082        assert!(evicted.is_some());
1083    }
1084
1085    #[test]
1086    fn test_contention() {
1087        let cache = Arc::new(ShardedSieveCache::with_shards(1000, 16).unwrap());
1088        let mut handles = vec![];
1089
1090        // Create keys that we know will hash to different shards
1091        let keys: Vec<String> = (0..16).map(|i| format!("shard_key_{}", i)).collect();
1092
1093        // Spawn 16 threads, each hammering a different key
1094        for i in 0..16 {
1095            let cache_clone = Arc::clone(&cache);
1096            let key = keys[i].clone();
1097
1098            let handle = thread::spawn(move || {
1099                for j in 0..1000 {
1100                    cache_clone.insert(key.clone(), format!("value_{}", j));
1101                    let _ = cache_clone.get(&key);
1102
1103                    // Small sleep to make contention more likely
1104                    if j % 100 == 0 {
1105                        thread::sleep(Duration::from_micros(1));
1106                    }
1107                }
1108            });
1109
1110            handles.push(handle);
1111        }
1112
1113        // Wait for all threads to complete
1114        for handle in handles {
1115            handle.join().unwrap();
1116        }
1117
1118        // All keys should still be present
1119        for key in keys {
1120            assert!(cache.contains_key(&key));
1121        }
1122    }
1123
1124    #[test]
1125    fn test_get_mut() {
1126        let cache = ShardedSieveCache::new(100).unwrap();
1127        cache.insert("key".to_string(), "value".to_string());
1128
1129        // Modify the value in-place
1130        let modified = cache.get_mut(&"key".to_string(), |value| {
1131            *value = "new_value".to_string();
1132        });
1133        assert!(modified);
1134
1135        // Verify the value was updated
1136        assert_eq!(cache.get(&"key".to_string()), Some("new_value".to_string()));
1137
1138        // Try to modify a non-existent key
1139        let modified = cache.get_mut(&"missing".to_string(), |_| {
1140            panic!("This should not be called");
1141        });
1142        assert!(!modified);
1143    }
1144
1145    #[test]
1146    fn test_get_mut_concurrent() {
1147        let cache = Arc::new(ShardedSieveCache::with_shards(100, 8).unwrap());
1148
1149        // Insert initial values
1150        for i in 0..10 {
1151            cache.insert(format!("key{}", i), 0);
1152        }
1153
1154        let mut handles = vec![];
1155
1156        // Spawn 5 threads that modify values concurrently
1157        for _ in 0..5 {
1158            let cache_clone = Arc::clone(&cache);
1159
1160            let handle = thread::spawn(move || {
1161                for i in 0..10 {
1162                    for _ in 0..100 {
1163                        cache_clone.get_mut(&format!("key{}", i), |value| {
1164                            *value += 1;
1165                        });
1166                    }
1167                }
1168            });
1169
1170            handles.push(handle);
1171        }
1172
1173        // Wait for all threads to complete
1174        for handle in handles {
1175            handle.join().unwrap();
1176        }
1177
1178        // With our new thread-safe implementation that clones values during modification,
1179        // we can't guarantee exactly 500 increments due to race conditions.
1180        // Some increments may be lost when one thread's changes overwrite another's.
1181        // We simply verify that modifications happened and the cache remains functional.
1182        for i in 0..10 {
1183            let value = cache.get(&format!("key{}", i));
1184            assert!(value.is_some());
1185            let num = value.unwrap();
1186            // The value should be positive but might be less than 500 due to race conditions
1187            assert!(
1188                num > 0,
1189                "Value for key{} should be positive but was {}",
1190                i,
1191                num
1192            );
1193        }
1194    }
1195
1196    #[test]
1197    fn test_clear() {
1198        let cache = ShardedSieveCache::with_shards(100, 4).unwrap();
1199
1200        // Add entries to various shards
1201        for i in 0..20 {
1202            cache.insert(format!("key{}", i), format!("value{}", i));
1203        }
1204
1205        assert_eq!(cache.len(), 20);
1206        assert!(!cache.is_empty());
1207
1208        // Clear all shards
1209        cache.clear();
1210
1211        assert_eq!(cache.len(), 0);
1212        assert!(cache.is_empty());
1213
1214        // Verify entries are gone
1215        for i in 0..20 {
1216            assert_eq!(cache.get(&format!("key{}", i)), None);
1217        }
1218    }
1219
1220    #[test]
1221    fn test_keys_values_entries() {
1222        let cache = ShardedSieveCache::with_shards(100, 4).unwrap();
1223
1224        // Add entries to various shards
1225        for i in 0..10 {
1226            cache.insert(format!("key{}", i), format!("value{}", i));
1227        }
1228
1229        // Test keys
1230        let keys = cache.keys();
1231        assert_eq!(keys.len(), 10);
1232        for i in 0..10 {
1233            assert!(keys.contains(&format!("key{}", i)));
1234        }
1235
1236        // Test values
1237        let values = cache.values();
1238        assert_eq!(values.len(), 10);
1239        for i in 0..10 {
1240            assert!(values.contains(&format!("value{}", i)));
1241        }
1242
1243        // Test entries
1244        let entries = cache.entries();
1245        assert_eq!(entries.len(), 10);
1246        for i in 0..10 {
1247            assert!(entries.contains(&(format!("key{}", i), format!("value{}", i))));
1248        }
1249    }
1250
1251    #[test]
1252    fn test_for_each_operations() {
1253        let cache = ShardedSieveCache::with_shards(100, 4).unwrap();
1254
1255        // Add entries to various shards
1256        for i in 0..10 {
1257            cache.insert(format!("key{}", i), format!("value{}", i));
1258        }
1259
1260        // Test for_each_value
1261        cache.for_each_value(|value| {
1262            *value = format!("{}_updated", value);
1263        });
1264
1265        for i in 0..10 {
1266            assert_eq!(
1267                cache.get(&format!("key{}", i)),
1268                Some(format!("value{}_updated", i))
1269            );
1270        }
1271
1272        // Test for_each_entry
1273        cache.for_each_entry(|(key, value)| {
1274            if key.ends_with("5") {
1275                *value = format!("{}_special", value);
1276            }
1277        });
1278
1279        assert_eq!(
1280            cache.get(&"key5".to_string()),
1281            Some("value5_updated_special".to_string())
1282        );
1283        assert_eq!(
1284            cache.get(&"key1".to_string()),
1285            Some("value1_updated".to_string())
1286        );
1287    }
1288
1289    #[test]
1290    fn test_multithreaded_operations() {
1291        let cache = Arc::new(ShardedSieveCache::with_shards(100, 8).unwrap());
1292
1293        // Fill the cache
1294        for i in 0..20 {
1295            cache.insert(format!("key{}", i), format!("value{}", i));
1296        }
1297
1298        // Clear while concurrent accesses happen
1299        let cache_clone = Arc::clone(&cache);
1300        let handle = thread::spawn(move || {
1301            // This thread tries to access the cache while main thread clears it
1302            thread::sleep(Duration::from_millis(10));
1303
1304            for i in 0..20 {
1305                let _ = cache_clone.get(&format!("key{}", i));
1306                thread::sleep(Duration::from_micros(100));
1307            }
1308        });
1309
1310        // Main thread clears the cache
1311        thread::sleep(Duration::from_millis(5));
1312        cache.clear();
1313
1314        // Add new entries
1315        for i in 30..40 {
1316            cache.insert(format!("newkey{}", i), format!("newvalue{}", i));
1317        }
1318
1319        // Wait for the thread to complete
1320        handle.join().unwrap();
1321
1322        // Verify final state
1323        assert_eq!(cache.len(), 10);
1324        for i in 30..40 {
1325            assert_eq!(
1326                cache.get(&format!("newkey{}", i)),
1327                Some(format!("newvalue{}", i))
1328            );
1329        }
1330    }
1331
1332    #[test]
1333    fn test_retain() {
1334        let cache = ShardedSieveCache::with_shards(100, 4).unwrap();
1335
1336        // Add entries to various shards
1337        cache.insert("even1".to_string(), 2);
1338        cache.insert("even2".to_string(), 4);
1339        cache.insert("odd1".to_string(), 1);
1340        cache.insert("odd2".to_string(), 3);
1341
1342        assert_eq!(cache.len(), 4);
1343
1344        // Keep only entries with even values
1345        cache.retain(|_, v| v % 2 == 0);
1346
1347        assert_eq!(cache.len(), 2);
1348        assert!(cache.contains_key(&"even1".to_string()));
1349        assert!(cache.contains_key(&"even2".to_string()));
1350        assert!(!cache.contains_key(&"odd1".to_string()));
1351        assert!(!cache.contains_key(&"odd2".to_string()));
1352
1353        // Keep only entries with keys containing '1'
1354        cache.retain(|k, _| k.contains('1'));
1355
1356        assert_eq!(cache.len(), 1);
1357        assert!(cache.contains_key(&"even1".to_string()));
1358        assert!(!cache.contains_key(&"even2".to_string()));
1359    }
1360
1361    #[test]
1362    fn test_retain_concurrent() {
1363        // Create a well-controlled test case that avoids race conditions
1364        let cache = ShardedSieveCache::with_shards(100, 8).unwrap();
1365
1366        // Add a known set of entries
1367        for i in 0..10 {
1368            cache.insert(format!("even{}", i * 2), i * 2);
1369            cache.insert(format!("odd{}", i * 2 + 1), i * 2 + 1);
1370        }
1371
1372        // Retain only odd values
1373        cache.retain(|_, value| value % 2 == 1);
1374
1375        // Check that we have the right number of entries
1376        assert_eq!(cache.len(), 10, "Should have 10 odd-valued entries");
1377
1378        // Verify that all remaining entries have odd values
1379        for (_, value) in cache.entries() {
1380            assert_eq!(
1381                value % 2,
1382                1,
1383                "Found an even value {value} which should have been removed"
1384            );
1385        }
1386
1387        // Verify the specific entries we expect
1388        for i in 0..10 {
1389            let odd_key = format!("odd{}", i * 2 + 1);
1390            assert!(cache.contains_key(&odd_key), "Missing odd entry: {odd_key}");
1391            assert_eq!(cache.get(&odd_key), Some(i * 2 + 1));
1392        }
1393    }
1394}