sieve_cache/
sharded.rs

1use crate::SieveCache;
2use std::borrow::Borrow;
3use std::collections::hash_map::DefaultHasher;
4use std::hash::{Hash, Hasher};
5use std::sync::{Arc, Mutex, MutexGuard, PoisonError};
6
7/// Default number of shards to use if not specified explicitly.
8/// This value was chosen as a good default that balances memory overhead
9/// with concurrency in most practical scenarios.
10const DEFAULT_SHARDS: usize = 16;
11
12/// A thread-safe implementation of `SieveCache` that uses multiple shards to reduce contention.
13///
14/// This provides better concurrency than `SyncSieveCache` by splitting the cache into multiple
15/// independent shards, each protected by its own mutex. Operations on different shards can
16/// proceed in parallel, which can significantly improve throughput in multi-threaded environments.
17///
18/// # How Sharding Works
19///
20/// The cache is partitioned into multiple independent segments (shards) based on the hash of the key.
21/// Each shard has its own mutex, allowing operations on different shards to proceed concurrently.
22/// This reduces lock contention compared to a single-mutex approach, especially under high
23/// concurrency with access patterns distributed across different keys.
24///
25/// # Performance Considerations
26///
27/// - For workloads with high concurrency across different keys, `ShardedSieveCache` typically offers
28///   better performance than `SyncSieveCache`
29/// - The benefits increase with the number of concurrent threads and the distribution of keys
30/// - More shards reduce contention but increase memory overhead
31/// - If most operations target the same few keys (which map to the same shards), the benefits of
32///   sharding may be limited
33///
34/// # Examples
35///
36/// ```
37/// # use sieve_cache::ShardedSieveCache;
38/// // Create a cache with default number of shards (16)
39/// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(1000).unwrap();
40///
41/// // Or specify a custom number of shards
42/// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::with_shards(1000, 32).unwrap();
43///
44/// cache.insert("key1".to_string(), "value1".to_string());
45/// assert_eq!(cache.get(&"key1".to_string()), Some("value1".to_string()));
46/// ```
47#[derive(Clone)]
48pub struct ShardedSieveCache<K, V>
49where
50    K: Eq + Hash + Clone + Send + Sync,
51    V: Send + Sync,
52{
53    /// Array of shard mutexes, each containing a separate SieveCache instance
54    shards: Vec<Arc<Mutex<SieveCache<K, V>>>>,
55    /// Number of shards in the cache - kept as a separate field for quick access
56    num_shards: usize,
57}
58
59impl<K, V> ShardedSieveCache<K, V>
60where
61    K: Eq + Hash + Clone + Send + Sync,
62    V: Send + Sync,
63{
64    /// Creates a new sharded cache with the specified capacity, using the default number of shards.
65    ///
66    /// The capacity will be divided evenly among the shards. The default shard count (16)
67    /// provides a good balance between concurrency and memory overhead for most workloads.
68    ///
69    /// # Errors
70    ///
71    /// Returns an error if the capacity is zero.
72    ///
73    /// # Examples
74    ///
75    /// ```
76    /// # use sieve_cache::ShardedSieveCache;
77    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(1000).unwrap();
78    /// assert_eq!(cache.num_shards(), 16); // Default shard count
79    /// ```
80    pub fn new(capacity: usize) -> Result<Self, &'static str> {
81        Self::with_shards(capacity, DEFAULT_SHARDS)
82    }
83
84    /// Creates a new sharded cache with the specified capacity and number of shards.
85    ///
86    /// The capacity will be divided among the shards, distributing any remainder to ensure
87    /// the total capacity is at least the requested amount.
88    ///
89    /// # Arguments
90    ///
91    /// * `capacity` - The total capacity of the cache
92    /// * `num_shards` - The number of shards to divide the cache into
93    ///
94    /// # Errors
95    ///
96    /// Returns an error if either the capacity or number of shards is zero.
97    ///
98    /// # Performance Impact
99    ///
100    /// - More shards can reduce contention in highly concurrent environments
101    /// - However, each shard has memory overhead, so very high shard counts may
102    ///   increase memory usage without providing additional performance benefits
103    /// - For most workloads, a value between 8 and 32 shards is optimal
104    ///
105    /// # Examples
106    ///
107    /// ```
108    /// # use sieve_cache::ShardedSieveCache;
109    /// // Create a cache with 8 shards
110    /// let cache: ShardedSieveCache<String, u32> = ShardedSieveCache::with_shards(1000, 8).unwrap();
111    /// assert_eq!(cache.num_shards(), 8);
112    /// assert!(cache.capacity() >= 1000);
113    /// ```
114    pub fn with_shards(capacity: usize, num_shards: usize) -> Result<Self, &'static str> {
115        if capacity == 0 {
116            return Err("capacity must be greater than 0");
117        }
118        if num_shards == 0 {
119            return Err("number of shards must be greater than 0");
120        }
121
122        // Calculate per-shard capacity
123        let base_capacity_per_shard = capacity / num_shards;
124        let remaining = capacity % num_shards;
125
126        let mut shards = Vec::with_capacity(num_shards);
127        for i in 0..num_shards {
128            // Distribute the remaining capacity to the first 'remaining' shards
129            let shard_capacity = if i < remaining {
130                base_capacity_per_shard + 1
131            } else {
132                base_capacity_per_shard
133            };
134
135            // Ensure at least capacity 1 per shard
136            let shard_capacity = std::cmp::max(1, shard_capacity);
137            shards.push(Arc::new(Mutex::new(SieveCache::new(shard_capacity)?)));
138        }
139
140        Ok(Self { shards, num_shards })
141    }
142
143    /// Returns the shard index for a given key.
144    ///
145    /// This function computes a hash of the key and uses it to determine which shard
146    /// the key belongs to.
147    #[inline]
148    fn get_shard_index<Q>(&self, key: &Q) -> usize
149    where
150        Q: Hash + ?Sized,
151    {
152        let mut hasher = DefaultHasher::new();
153        key.hash(&mut hasher);
154        let hash = hasher.finish() as usize;
155        hash % self.num_shards
156    }
157
158    /// Returns a reference to the shard for a given key.
159    ///
160    /// This is an internal helper method that maps a key to its corresponding shard.
161    #[inline]
162    fn get_shard<Q>(&self, key: &Q) -> &Arc<Mutex<SieveCache<K, V>>>
163    where
164        Q: Hash + ?Sized,
165    {
166        let index = self.get_shard_index(key);
167        &self.shards[index]
168    }
169
170    /// Returns a locked reference to the shard for a given key.
171    ///
172    /// This is an internal helper method to abstract away the lock handling and error recovery.
173    /// If the mutex is poisoned due to a panic in another thread, the poison error is
174    /// recovered from by calling `into_inner()` to access the underlying data.
175    #[inline]
176    fn locked_shard<Q>(&self, key: &Q) -> MutexGuard<'_, SieveCache<K, V>>
177    where
178        Q: Hash + ?Sized,
179    {
180        self.get_shard(key)
181            .lock()
182            .unwrap_or_else(PoisonError::into_inner)
183    }
184
185    /// Returns the total capacity of the cache (sum of all shard capacities).
186    ///
187    /// # Examples
188    ///
189    /// ```
190    /// # use sieve_cache::ShardedSieveCache;
191    /// let cache: ShardedSieveCache<String, u32> = ShardedSieveCache::new(1000).unwrap();
192    /// assert!(cache.capacity() >= 1000);
193    /// ```
194    pub fn capacity(&self) -> usize {
195        self.shards
196            .iter()
197            .map(|shard| {
198                shard
199                    .lock()
200                    .unwrap_or_else(PoisonError::into_inner)
201                    .capacity()
202            })
203            .sum()
204    }
205
206    /// Returns the total number of entries in the cache (sum of all shard lengths).
207    ///
208    /// Note that this operation requires acquiring a lock on each shard, so it may
209    /// cause temporary contention if called frequently in a high-concurrency environment.
210    ///
211    /// # Examples
212    ///
213    /// ```
214    /// # use sieve_cache::ShardedSieveCache;
215    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
216    ///
217    /// cache.insert("key1".to_string(), "value1".to_string());
218    /// cache.insert("key2".to_string(), "value2".to_string());
219    ///
220    /// assert_eq!(cache.len(), 2);
221    /// ```
222    pub fn len(&self) -> usize {
223        self.shards
224            .iter()
225            .map(|shard| shard.lock().unwrap_or_else(PoisonError::into_inner).len())
226            .sum()
227    }
228
229    /// Returns `true` when no values are currently cached in any shard.
230    ///
231    /// Note that this operation requires acquiring a lock on each shard, so it may
232    /// cause temporary contention if called frequently in a high-concurrency environment.
233    ///
234    /// # Examples
235    ///
236    /// ```
237    /// # use sieve_cache::ShardedSieveCache;
238    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
239    /// assert!(cache.is_empty());
240    ///
241    /// cache.insert("key".to_string(), "value".to_string());
242    /// assert!(!cache.is_empty());
243    /// ```
244    pub fn is_empty(&self) -> bool {
245        self.shards.iter().all(|shard| {
246            shard
247                .lock()
248                .unwrap_or_else(PoisonError::into_inner)
249                .is_empty()
250        })
251    }
252
253    /// Returns `true` if there is a value in the cache mapped to by `key`.
254    ///
255    /// This operation only locks the specific shard containing the key.
256    ///
257    /// # Examples
258    ///
259    /// ```
260    /// # use sieve_cache::ShardedSieveCache;
261    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
262    /// cache.insert("key".to_string(), "value".to_string());
263    ///
264    /// assert!(cache.contains_key(&"key".to_string()));
265    /// assert!(!cache.contains_key(&"missing".to_string()));
266    /// ```
267    pub fn contains_key<Q>(&self, key: &Q) -> bool
268    where
269        Q: Hash + Eq + ?Sized,
270        K: Borrow<Q>,
271    {
272        let mut guard = self.locked_shard(key);
273        guard.contains_key(key)
274    }
275
276    /// Gets a clone of the value in the cache mapped to by `key`.
277    ///
278    /// If no value exists for `key`, this returns `None`. This operation only locks
279    /// the specific shard containing the key.
280    ///
281    /// # Note
282    ///
283    /// This method returns a clone of the value rather than a reference, since the
284    /// mutex guard would be dropped after this method returns. This means that
285    /// `V` must implement `Clone`.
286    ///
287    /// # Examples
288    ///
289    /// ```
290    /// # use sieve_cache::ShardedSieveCache;
291    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
292    /// cache.insert("key".to_string(), "value".to_string());
293    ///
294    /// assert_eq!(cache.get(&"key".to_string()), Some("value".to_string()));
295    /// assert_eq!(cache.get(&"missing".to_string()), None);
296    /// ```
297    pub fn get<Q>(&self, key: &Q) -> Option<V>
298    where
299        Q: Hash + Eq + ?Sized,
300        K: Borrow<Q>,
301        V: Clone,
302    {
303        let mut guard = self.locked_shard(key);
304        guard.get(key).cloned()
305    }
306
307    /// Gets a mutable reference to the value in the cache mapped to by `key` via a callback function.
308    ///
309    /// If no value exists for `key`, the callback will not be invoked and this returns `false`.
310    /// Otherwise, the callback is invoked with a mutable reference to the value and this returns `true`.
311    /// This operation only locks the specific shard containing the key.
312    ///
313    /// This operation marks the entry as "visited" in the SIEVE algorithm,
314    /// which affects eviction decisions.
315    ///
316    /// # Examples
317    ///
318    /// ```
319    /// # use sieve_cache::ShardedSieveCache;
320    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
321    /// cache.insert("key".to_string(), "value".to_string());
322    ///
323    /// // Modify the value in-place
324    /// cache.get_mut(&"key".to_string(), |value| {
325    ///     *value = "new_value".to_string();
326    /// });
327    ///
328    /// assert_eq!(cache.get(&"key".to_string()), Some("new_value".to_string()));
329    /// ```
330    pub fn get_mut<Q, F>(&self, key: &Q, f: F) -> bool
331    where
332        Q: Hash + Eq + ?Sized,
333        K: Borrow<Q>,
334        F: FnOnce(&mut V),
335    {
336        let mut guard = self.locked_shard(key);
337        if let Some(value) = guard.get_mut(key) {
338            f(value);
339            true
340        } else {
341            false
342        }
343    }
344
345    /// Maps `key` to `value` in the cache, possibly evicting old entries from the appropriate shard.
346    ///
347    /// This method returns `true` when this is a new entry, and `false` if an existing entry was
348    /// updated. This operation only locks the specific shard containing the key.
349    ///
350    /// # Examples
351    ///
352    /// ```
353    /// # use sieve_cache::ShardedSieveCache;
354    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
355    ///
356    /// // Insert a new key
357    /// assert!(cache.insert("key1".to_string(), "value1".to_string()));
358    ///
359    /// // Update an existing key
360    /// assert!(!cache.insert("key1".to_string(), "updated_value".to_string()));
361    /// ```
362    pub fn insert(&self, key: K, value: V) -> bool {
363        let mut guard = self.locked_shard(&key);
364        guard.insert(key, value)
365    }
366
367    /// Removes the cache entry mapped to by `key`.
368    ///
369    /// This method returns the value removed from the cache. If `key` did not map to any value,
370    /// then this returns `None`. This operation only locks the specific shard containing the key.
371    ///
372    /// # Examples
373    ///
374    /// ```
375    /// # use sieve_cache::ShardedSieveCache;
376    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
377    /// cache.insert("key".to_string(), "value".to_string());
378    ///
379    /// // Remove an existing key
380    /// assert_eq!(cache.remove(&"key".to_string()), Some("value".to_string()));
381    ///
382    /// // Attempt to remove a missing key
383    /// assert_eq!(cache.remove(&"key".to_string()), None);
384    /// ```
385    pub fn remove<Q>(&self, key: &Q) -> Option<V>
386    where
387        K: Borrow<Q>,
388        Q: Eq + Hash + ?Sized,
389    {
390        let mut guard = self.locked_shard(key);
391        guard.remove(key)
392    }
393
394    /// Removes and returns a value from the cache that was not recently accessed.
395    ///
396    /// This method tries to evict from each shard in turn until it finds a value to evict.
397    /// If no suitable value exists in any shard, this returns `None`.
398    ///
399    /// # Note
400    ///
401    /// This implementation differs from the non-sharded version in that it checks each shard
402    /// in sequence until it finds a suitable entry to evict. This may not provide the globally
403    /// optimal eviction decision across all shards, but it avoids the need to lock all shards
404    /// simultaneously.
405    ///
406    /// # Examples
407    ///
408    /// ```
409    /// # use sieve_cache::ShardedSieveCache;
410    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::with_shards(10, 2).unwrap();
411    ///
412    /// // Fill the cache
413    /// for i in 0..15 {
414    ///     cache.insert(format!("key{}", i), format!("value{}", i));
415    /// }
416    ///
417    /// // Should be able to evict something
418    /// assert!(cache.evict().is_some());
419    /// ```
420    pub fn evict(&self) -> Option<V> {
421        // Try each shard in turn
422        for shard in &self.shards {
423            let result = shard.lock().unwrap_or_else(PoisonError::into_inner).evict();
424            if result.is_some() {
425                return result;
426            }
427        }
428        None
429    }
430
431    /// Removes all entries from the cache.
432    ///
433    /// This operation clears all stored values across all shards and resets the cache to an empty state,
434    /// while maintaining the original capacity.
435    ///
436    /// # Examples
437    ///
438    /// ```
439    /// # use sieve_cache::ShardedSieveCache;
440    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
441    /// cache.insert("key1".to_string(), "value1".to_string());
442    /// cache.insert("key2".to_string(), "value2".to_string());
443    ///
444    /// assert_eq!(cache.len(), 2);
445    ///
446    /// cache.clear();
447    /// assert_eq!(cache.len(), 0);
448    /// assert!(cache.is_empty());
449    /// ```
450    pub fn clear(&self) {
451        for shard in &self.shards {
452            let mut guard = shard.lock().unwrap_or_else(PoisonError::into_inner);
453            guard.clear();
454        }
455    }
456
457    /// Returns an iterator over all keys in the cache.
458    ///
459    /// The order of keys is not specified and should not be relied upon.
460    ///
461    /// # Examples
462    ///
463    /// ```
464    /// # use sieve_cache::ShardedSieveCache;
465    /// # use std::collections::HashSet;
466    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
467    /// cache.insert("key1".to_string(), "value1".to_string());
468    /// cache.insert("key2".to_string(), "value2".to_string());
469    ///
470    /// let keys: HashSet<_> = cache.keys().into_iter().collect();
471    /// assert_eq!(keys.len(), 2);
472    /// assert!(keys.contains(&"key1".to_string()));
473    /// assert!(keys.contains(&"key2".to_string()));
474    /// ```
475    pub fn keys(&self) -> Vec<K> {
476        let mut all_keys = Vec::new();
477        for shard in &self.shards {
478            let guard = shard.lock().unwrap_or_else(PoisonError::into_inner);
479            all_keys.extend(guard.keys().cloned());
480        }
481        all_keys
482    }
483
484    /// Returns all values in the cache.
485    ///
486    /// The order of values is not specified and should not be relied upon.
487    ///
488    /// # Examples
489    ///
490    /// ```
491    /// # use sieve_cache::ShardedSieveCache;
492    /// # use std::collections::HashSet;
493    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
494    /// cache.insert("key1".to_string(), "value1".to_string());
495    /// cache.insert("key2".to_string(), "value2".to_string());
496    ///
497    /// let values: HashSet<_> = cache.values().into_iter().collect();
498    /// assert_eq!(values.len(), 2);
499    /// assert!(values.contains(&"value1".to_string()));
500    /// assert!(values.contains(&"value2".to_string()));
501    /// ```
502    pub fn values(&self) -> Vec<V>
503    where
504        V: Clone,
505    {
506        let mut all_values = Vec::new();
507        for shard in &self.shards {
508            let guard = shard.lock().unwrap_or_else(PoisonError::into_inner);
509            all_values.extend(guard.values().cloned());
510        }
511        all_values
512    }
513
514    /// Returns all key-value pairs in the cache.
515    ///
516    /// The order of pairs is not specified and should not be relied upon.
517    ///
518    /// # Examples
519    ///
520    /// ```
521    /// # use sieve_cache::ShardedSieveCache;
522    /// # use std::collections::HashMap;
523    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
524    /// cache.insert("key1".to_string(), "value1".to_string());
525    /// cache.insert("key2".to_string(), "value2".to_string());
526    ///
527    /// let entries: HashMap<_, _> = cache.entries().into_iter().collect();
528    /// assert_eq!(entries.len(), 2);
529    /// assert_eq!(entries.get(&"key1".to_string()), Some(&"value1".to_string()));
530    /// assert_eq!(entries.get(&"key2".to_string()), Some(&"value2".to_string()));
531    /// ```
532    pub fn entries(&self) -> Vec<(K, V)>
533    where
534        V: Clone,
535    {
536        let mut all_entries = Vec::new();
537        for shard in &self.shards {
538            let guard = shard.lock().unwrap_or_else(PoisonError::into_inner);
539            all_entries.extend(guard.iter().map(|(k, v)| (k.clone(), v.clone())));
540        }
541        all_entries
542    }
543
544    /// Applies a function to all values in the cache across all shards.
545    ///
546    /// This method marks all entries as visited.
547    ///
548    /// # Examples
549    ///
550    /// ```
551    /// # use sieve_cache::ShardedSieveCache;
552    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
553    /// cache.insert("key1".to_string(), "value1".to_string());
554    /// cache.insert("key2".to_string(), "value2".to_string());
555    ///
556    /// // Update all values by appending text
557    /// cache.for_each_value(|value| {
558    ///     *value = format!("{}_updated", value);
559    /// });
560    ///
561    /// assert_eq!(cache.get(&"key1".to_string()), Some("value1_updated".to_string()));
562    /// assert_eq!(cache.get(&"key2".to_string()), Some("value2_updated".to_string()));
563    /// ```
564    pub fn for_each_value<F>(&self, mut f: F)
565    where
566        F: FnMut(&mut V),
567    {
568        for shard in &self.shards {
569            let mut guard = shard.lock().unwrap_or_else(PoisonError::into_inner);
570            guard.values_mut().for_each(&mut f);
571        }
572    }
573
574    /// Applies a function to all key-value pairs in the cache across all shards.
575    ///
576    /// This method marks all entries as visited.
577    ///
578    /// # Examples
579    ///
580    /// ```
581    /// # use sieve_cache::ShardedSieveCache;
582    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
583    /// cache.insert("key1".to_string(), "value1".to_string());
584    /// cache.insert("key2".to_string(), "value2".to_string());
585    ///
586    /// // Update all values associated with keys containing '1'
587    /// cache.for_each_entry(|(key, value)| {
588    ///     if key.contains('1') {
589    ///         *value = format!("{}_special", value);
590    ///     }
591    /// });
592    ///
593    /// assert_eq!(cache.get(&"key1".to_string()), Some("value1_special".to_string()));
594    /// assert_eq!(cache.get(&"key2".to_string()), Some("value2".to_string()));
595    /// ```
596    pub fn for_each_entry<F>(&self, mut f: F)
597    where
598        F: FnMut((&K, &mut V)),
599    {
600        for shard in &self.shards {
601            let mut guard = shard.lock().unwrap_or_else(PoisonError::into_inner);
602            guard.iter_mut().for_each(|entry| f(entry));
603        }
604    }
605
606    /// Gets exclusive access to a specific shard based on the key.
607    ///
608    /// This can be useful for performing multiple operations atomically on entries
609    /// that share the same shard. Note that only keys that hash to the same shard
610    /// can be manipulated within a single transaction.
611    ///
612    /// # Examples
613    ///
614    /// ```
615    /// # use sieve_cache::ShardedSieveCache;
616    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
617    ///
618    /// // Perform multiple operations atomically
619    /// cache.with_key_lock(&"foo", |shard| {
620    ///     // All operations within this closure have exclusive access to the shard
621    ///     shard.insert("key1".to_string(), "value1".to_string());
622    ///     shard.insert("key2".to_string(), "value2".to_string());
623    ///     
624    ///     // We can check state mid-transaction
625    ///     assert!(shard.contains_key(&"key1".to_string()));
626    /// });
627    /// ```
628    pub fn with_key_lock<Q, F, T>(&self, key: &Q, f: F) -> T
629    where
630        Q: Hash + ?Sized,
631        F: FnOnce(&mut SieveCache<K, V>) -> T,
632    {
633        let mut guard = self.locked_shard(key);
634        f(&mut guard)
635    }
636
637    /// Returns the number of shards in this cache.
638    ///
639    /// # Examples
640    ///
641    /// ```
642    /// # use sieve_cache::ShardedSieveCache;
643    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::with_shards(1000, 32).unwrap();
644    /// assert_eq!(cache.num_shards(), 32);
645    /// ```
646    pub fn num_shards(&self) -> usize {
647        self.num_shards
648    }
649
650    /// Gets a specific shard by index.
651    ///
652    /// This is mainly useful for advanced use cases and maintenance operations.
653    /// Returns `None` if the index is out of bounds.
654    ///
655    /// # Examples
656    ///
657    /// ```
658    /// # use sieve_cache::ShardedSieveCache;
659    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::with_shards(1000, 8).unwrap();
660    ///
661    /// // Access a valid shard
662    /// assert!(cache.get_shard_by_index(0).is_some());
663    ///
664    /// // Out of bounds index
665    /// assert!(cache.get_shard_by_index(100).is_none());
666    /// ```
667    pub fn get_shard_by_index(&self, index: usize) -> Option<&Arc<Mutex<SieveCache<K, V>>>> {
668        self.shards.get(index)
669    }
670
671    /// Retains only the elements specified by the predicate.
672    ///
673    /// Removes all entries for which the provided function returns `false`.
674    /// The elements are visited in arbitrary, unspecified order, across all shards.
675    /// This operation processes each shard individually, acquiring and releasing locks as it goes.
676    ///
677    /// # Examples
678    ///
679    /// ```
680    /// # use sieve_cache::ShardedSieveCache;
681    /// let cache: ShardedSieveCache<String, i32> = ShardedSieveCache::new(100).unwrap();
682    /// cache.insert("key1".to_string(), 100);
683    /// cache.insert("key2".to_string(), 200);
684    /// cache.insert("key3".to_string(), 300);
685    ///
686    /// // Keep only entries with values greater than 150
687    /// cache.retain(|_, value| *value > 150);
688    ///
689    /// assert_eq!(cache.len(), 2);
690    /// assert!(!cache.contains_key(&"key1".to_string()));
691    /// assert!(cache.contains_key(&"key2".to_string()));
692    /// assert!(cache.contains_key(&"key3".to_string()));
693    /// ```
694    pub fn retain<F>(&self, mut f: F)
695    where
696        F: FnMut(&K, &V) -> bool,
697        V: Clone,
698    {
699        // First, collect all entries so we can check them without holding locks
700        let entries = self.entries();
701
702        // Now go through all entries and determine which ones to remove
703        for (key, value) in entries {
704            // Check the predicate outside the lock - using cloned data
705            if !f(&key, &value) {
706                // The predicate returned false, so remove this entry
707                self.remove(&key);
708            }
709        }
710    }
711}
712
713#[cfg(test)]
714mod tests {
715    use super::*;
716    use std::sync::Arc;
717    use std::thread;
718    use std::time::Duration;
719
720    #[test]
721    fn test_sharded_cache_basics() {
722        let cache = ShardedSieveCache::new(100).unwrap();
723
724        // Insert a value
725        assert!(cache.insert("key1".to_string(), "value1".to_string()));
726
727        // Read back the value
728        assert_eq!(cache.get(&"key1".to_string()), Some("value1".to_string()));
729
730        // Check contains_key
731        assert!(cache.contains_key(&"key1".to_string()));
732
733        // Check capacity and length
734        assert!(cache.capacity() >= 100); // May be slightly higher due to rounding up per shard
735        assert_eq!(cache.len(), 1);
736
737        // Remove a value
738        assert_eq!(
739            cache.remove(&"key1".to_string()),
740            Some("value1".to_string())
741        );
742        assert_eq!(cache.len(), 0);
743        assert!(cache.is_empty());
744    }
745
746    #[test]
747    fn test_custom_shard_count() {
748        let cache = ShardedSieveCache::with_shards(100, 4).unwrap();
749        assert_eq!(cache.num_shards(), 4);
750
751        for i in 0..10 {
752            let key = format!("key{}", i);
753            let value = format!("value{}", i);
754            cache.insert(key, value);
755        }
756
757        assert_eq!(cache.len(), 10);
758    }
759
760    #[test]
761    fn test_parallel_access() {
762        let cache = Arc::new(ShardedSieveCache::with_shards(1000, 16).unwrap());
763        let mut handles = vec![];
764
765        // Spawn 8 threads that each insert 100 items
766        for t in 0..8 {
767            let cache_clone = Arc::clone(&cache);
768            let handle = thread::spawn(move || {
769                for i in 0..100 {
770                    let key = format!("thread{}key{}", t, i);
771                    let value = format!("value{}_{}", t, i);
772                    cache_clone.insert(key, value);
773                }
774            });
775            handles.push(handle);
776        }
777
778        // Wait for all threads to complete
779        for handle in handles {
780            handle.join().unwrap();
781        }
782
783        // Verify total item count
784        assert_eq!(cache.len(), 800);
785
786        // Check a few random keys
787        assert_eq!(
788            cache.get(&"thread0key50".to_string()),
789            Some("value0_50".to_string())
790        );
791        assert_eq!(
792            cache.get(&"thread7key99".to_string()),
793            Some("value7_99".to_string())
794        );
795    }
796
797    #[test]
798    fn test_with_key_lock() {
799        let cache = ShardedSieveCache::new(100).unwrap();
800
801        // Perform multiple operations atomically on keys that map to the same shard
802        cache.with_key_lock(&"test_key", |shard| {
803            shard.insert("key1".to_string(), "value1".to_string());
804            shard.insert("key2".to_string(), "value2".to_string());
805            shard.insert("key3".to_string(), "value3".to_string());
806        });
807
808        assert_eq!(cache.len(), 3);
809    }
810
811    #[test]
812    fn test_eviction() {
813        let cache = ShardedSieveCache::with_shards(10, 2).unwrap();
814
815        // Fill the cache
816        for i in 0..15 {
817            let key = format!("key{}", i);
818            let value = format!("value{}", i);
819            cache.insert(key, value);
820        }
821
822        // The cache should not exceed its capacity
823        assert!(cache.len() <= 10);
824
825        // We should be able to evict items
826        let evicted = cache.evict();
827        assert!(evicted.is_some());
828    }
829
830    #[test]
831    fn test_contention() {
832        let cache = Arc::new(ShardedSieveCache::with_shards(1000, 16).unwrap());
833        let mut handles = vec![];
834
835        // Create keys that we know will hash to different shards
836        let keys: Vec<String> = (0..16).map(|i| format!("shard_key_{}", i)).collect();
837
838        // Spawn 16 threads, each hammering a different key
839        for i in 0..16 {
840            let cache_clone = Arc::clone(&cache);
841            let key = keys[i].clone();
842
843            let handle = thread::spawn(move || {
844                for j in 0..1000 {
845                    cache_clone.insert(key.clone(), format!("value_{}", j));
846                    let _ = cache_clone.get(&key);
847
848                    // Small sleep to make contention more likely
849                    if j % 100 == 0 {
850                        thread::sleep(Duration::from_micros(1));
851                    }
852                }
853            });
854
855            handles.push(handle);
856        }
857
858        // Wait for all threads to complete
859        for handle in handles {
860            handle.join().unwrap();
861        }
862
863        // All keys should still be present
864        for key in keys {
865            assert!(cache.contains_key(&key));
866        }
867    }
868
869    #[test]
870    fn test_get_mut() {
871        let cache = ShardedSieveCache::new(100).unwrap();
872        cache.insert("key".to_string(), "value".to_string());
873
874        // Modify the value in-place
875        let modified = cache.get_mut(&"key".to_string(), |value| {
876            *value = "new_value".to_string();
877        });
878        assert!(modified);
879
880        // Verify the value was updated
881        assert_eq!(cache.get(&"key".to_string()), Some("new_value".to_string()));
882
883        // Try to modify a non-existent key
884        let modified = cache.get_mut(&"missing".to_string(), |_| {
885            panic!("This should not be called");
886        });
887        assert!(!modified);
888    }
889
890    #[test]
891    fn test_get_mut_concurrent() {
892        let cache = Arc::new(ShardedSieveCache::with_shards(100, 8).unwrap());
893
894        // Insert initial values
895        for i in 0..10 {
896            cache.insert(format!("key{}", i), 0);
897        }
898
899        let mut handles = vec![];
900
901        // Spawn 5 threads that modify values concurrently
902        for _ in 0..5 {
903            let cache_clone = Arc::clone(&cache);
904
905            let handle = thread::spawn(move || {
906                for i in 0..10 {
907                    for _ in 0..100 {
908                        cache_clone.get_mut(&format!("key{}", i), |value| {
909                            *value += 1;
910                        });
911                    }
912                }
913            });
914
915            handles.push(handle);
916        }
917
918        // Wait for all threads to complete
919        for handle in handles {
920            handle.join().unwrap();
921        }
922
923        // Each key should have been incremented 500 times (5 threads * 100 increments each)
924        for i in 0..10 {
925            assert_eq!(cache.get(&format!("key{}", i)), Some(500));
926        }
927    }
928
929    #[test]
930    fn test_clear() {
931        let cache = ShardedSieveCache::with_shards(100, 4).unwrap();
932
933        // Add entries to various shards
934        for i in 0..20 {
935            cache.insert(format!("key{}", i), format!("value{}", i));
936        }
937
938        assert_eq!(cache.len(), 20);
939        assert!(!cache.is_empty());
940
941        // Clear all shards
942        cache.clear();
943
944        assert_eq!(cache.len(), 0);
945        assert!(cache.is_empty());
946
947        // Verify entries are gone
948        for i in 0..20 {
949            assert_eq!(cache.get(&format!("key{}", i)), None);
950        }
951    }
952
953    #[test]
954    fn test_keys_values_entries() {
955        let cache = ShardedSieveCache::with_shards(100, 4).unwrap();
956
957        // Add entries to various shards
958        for i in 0..10 {
959            cache.insert(format!("key{}", i), format!("value{}", i));
960        }
961
962        // Test keys
963        let keys = cache.keys();
964        assert_eq!(keys.len(), 10);
965        for i in 0..10 {
966            assert!(keys.contains(&format!("key{}", i)));
967        }
968
969        // Test values
970        let values = cache.values();
971        assert_eq!(values.len(), 10);
972        for i in 0..10 {
973            assert!(values.contains(&format!("value{}", i)));
974        }
975
976        // Test entries
977        let entries = cache.entries();
978        assert_eq!(entries.len(), 10);
979        for i in 0..10 {
980            assert!(entries.contains(&(format!("key{}", i), format!("value{}", i))));
981        }
982    }
983
984    #[test]
985    fn test_for_each_operations() {
986        let cache = ShardedSieveCache::with_shards(100, 4).unwrap();
987
988        // Add entries to various shards
989        for i in 0..10 {
990            cache.insert(format!("key{}", i), format!("value{}", i));
991        }
992
993        // Test for_each_value
994        cache.for_each_value(|value| {
995            *value = format!("{}_updated", value);
996        });
997
998        for i in 0..10 {
999            assert_eq!(
1000                cache.get(&format!("key{}", i)),
1001                Some(format!("value{}_updated", i))
1002            );
1003        }
1004
1005        // Test for_each_entry
1006        cache.for_each_entry(|(key, value)| {
1007            if key.ends_with("5") {
1008                *value = format!("{}_special", value);
1009            }
1010        });
1011
1012        assert_eq!(
1013            cache.get(&"key5".to_string()),
1014            Some("value5_updated_special".to_string())
1015        );
1016        assert_eq!(
1017            cache.get(&"key1".to_string()),
1018            Some("value1_updated".to_string())
1019        );
1020    }
1021
1022    #[test]
1023    fn test_multithreaded_operations() {
1024        let cache = Arc::new(ShardedSieveCache::with_shards(100, 8).unwrap());
1025
1026        // Fill the cache
1027        for i in 0..20 {
1028            cache.insert(format!("key{}", i), format!("value{}", i));
1029        }
1030
1031        // Clear while concurrent accesses happen
1032        let cache_clone = Arc::clone(&cache);
1033        let handle = thread::spawn(move || {
1034            // This thread tries to access the cache while main thread clears it
1035            thread::sleep(Duration::from_millis(10));
1036
1037            for i in 0..20 {
1038                let _ = cache_clone.get(&format!("key{}", i));
1039                thread::sleep(Duration::from_micros(100));
1040            }
1041        });
1042
1043        // Main thread clears the cache
1044        thread::sleep(Duration::from_millis(5));
1045        cache.clear();
1046
1047        // Add new entries
1048        for i in 30..40 {
1049            cache.insert(format!("newkey{}", i), format!("newvalue{}", i));
1050        }
1051
1052        // Wait for the thread to complete
1053        handle.join().unwrap();
1054
1055        // Verify final state
1056        assert_eq!(cache.len(), 10);
1057        for i in 30..40 {
1058            assert_eq!(
1059                cache.get(&format!("newkey{}", i)),
1060                Some(format!("newvalue{}", i))
1061            );
1062        }
1063    }
1064
1065    #[test]
1066    fn test_retain() {
1067        let cache = ShardedSieveCache::with_shards(100, 4).unwrap();
1068
1069        // Add entries to various shards
1070        cache.insert("even1".to_string(), 2);
1071        cache.insert("even2".to_string(), 4);
1072        cache.insert("odd1".to_string(), 1);
1073        cache.insert("odd2".to_string(), 3);
1074
1075        assert_eq!(cache.len(), 4);
1076
1077        // Keep only entries with even values
1078        cache.retain(|_, v| v % 2 == 0);
1079
1080        assert_eq!(cache.len(), 2);
1081        assert!(cache.contains_key(&"even1".to_string()));
1082        assert!(cache.contains_key(&"even2".to_string()));
1083        assert!(!cache.contains_key(&"odd1".to_string()));
1084        assert!(!cache.contains_key(&"odd2".to_string()));
1085
1086        // Keep only entries with keys containing '1'
1087        cache.retain(|k, _| k.contains('1'));
1088
1089        assert_eq!(cache.len(), 1);
1090        assert!(cache.contains_key(&"even1".to_string()));
1091        assert!(!cache.contains_key(&"even2".to_string()));
1092    }
1093
1094    #[test]
1095    fn test_retain_concurrent() {
1096        // Create a well-controlled test case that avoids race conditions
1097        let cache = ShardedSieveCache::with_shards(100, 8).unwrap();
1098
1099        // Add a known set of entries
1100        for i in 0..10 {
1101            cache.insert(format!("even{}", i * 2), i * 2);
1102            cache.insert(format!("odd{}", i * 2 + 1), i * 2 + 1);
1103        }
1104
1105        // Retain only odd values
1106        cache.retain(|_, value| value % 2 == 1);
1107
1108        // Check that we have the right number of entries
1109        assert_eq!(cache.len(), 10, "Should have 10 odd-valued entries");
1110
1111        // Verify that all remaining entries have odd values
1112        for (_, value) in cache.entries() {
1113            assert_eq!(
1114                value % 2,
1115                1,
1116                "Found an even value {value} which should have been removed"
1117            );
1118        }
1119
1120        // Verify the specific entries we expect
1121        for i in 0..10 {
1122            let odd_key = format!("odd{}", i * 2 + 1);
1123            assert!(cache.contains_key(&odd_key), "Missing odd entry: {odd_key}");
1124            assert_eq!(cache.get(&odd_key), Some(i * 2 + 1));
1125        }
1126    }
1127}