sieve_cache/
sharded.rs

1use crate::SieveCache;
2use std::borrow::Borrow;
3use std::collections::hash_map::DefaultHasher;
4use std::fmt;
5use std::hash::{Hash, Hasher};
6use std::sync::{Arc, Mutex, MutexGuard, PoisonError};
7
8/// Default number of shards to use if not specified explicitly.
9/// This value was chosen as a good default that balances memory overhead
10/// with concurrency in most practical scenarios.
11const DEFAULT_SHARDS: usize = 16;
12
13/// A thread-safe implementation of `SieveCache` that uses multiple shards to reduce contention.
14///
15/// This provides better concurrency than `SyncSieveCache` by splitting the cache into multiple
16/// independent shards, each protected by its own mutex. Operations on different shards can
17/// proceed in parallel, which can significantly improve throughput in multi-threaded environments.
18///
19/// # How Sharding Works
20///
21/// The cache is partitioned into multiple independent segments (shards) based on the hash of the key.
22/// Each shard has its own mutex, allowing operations on different shards to proceed concurrently.
23/// This reduces lock contention compared to a single-mutex approach, especially under high
24/// concurrency with access patterns distributed across different keys.
25///
26/// # Performance Considerations
27///
28/// - For workloads with high concurrency across different keys, `ShardedSieveCache` typically offers
29///   better performance than `SyncSieveCache`
30/// - The benefits increase with the number of concurrent threads and the distribution of keys
31/// - More shards reduce contention but increase memory overhead
32/// - If most operations target the same few keys (which map to the same shards), the benefits of
33///   sharding may be limited
34///
35/// # Examples
36///
37/// ```
38/// # use sieve_cache::ShardedSieveCache;
39/// // Create a cache with default number of shards (16)
40/// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(1000).unwrap();
41///
42/// // Or specify a custom number of shards
43/// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::with_shards(1000, 32).unwrap();
44///
45/// cache.insert("key1".to_string(), "value1".to_string());
46/// assert_eq!(cache.get(&"key1".to_string()), Some("value1".to_string()));
47/// ```
48#[derive(Clone)]
49pub struct ShardedSieveCache<K, V>
50where
51    K: Eq + Hash + Clone + Send + Sync,
52    V: Send + Sync,
53{
54    /// Array of shard mutexes, each containing a separate SieveCache instance
55    shards: Vec<Arc<Mutex<SieveCache<K, V>>>>,
56    /// Number of shards in the cache - kept as a separate field for quick access
57    num_shards: usize,
58}
59
60unsafe impl<K, V> Sync for ShardedSieveCache<K, V>
61where
62    K: Eq + Hash + Clone + Send + Sync,
63    V: Send + Sync,
64{
65}
66
67impl<K, V> Default for ShardedSieveCache<K, V>
68where
69    K: Eq + Hash + Clone + Send + Sync,
70    V: Send + Sync,
71{
72    /// Creates a new sharded cache with a default capacity of 100 entries and default number of shards.
73    ///
74    /// # Panics
75    ///
76    /// Panics if the underlying `ShardedSieveCache::new()` returns an error, which should never
77    /// happen for a non-zero capacity.
78    ///
79    /// # Examples
80    ///
81    /// ```
82    /// # use sieve_cache::ShardedSieveCache;
83    /// # use std::default::Default;
84    /// let cache: ShardedSieveCache<String, u32> = Default::default();
85    /// assert!(cache.capacity() >= 100); // Due to shard distribution, might be slightly larger
86    /// assert_eq!(cache.num_shards(), 16); // Default shard count
87    /// ```
88    fn default() -> Self {
89        Self::new(100).expect("Failed to create cache with default capacity")
90    }
91}
92
93impl<K, V> fmt::Debug for ShardedSieveCache<K, V>
94where
95    K: Eq + Hash + Clone + Send + Sync + fmt::Debug,
96    V: Send + Sync + fmt::Debug,
97{
98    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
99        f.debug_struct("ShardedSieveCache")
100            .field("capacity", &self.capacity())
101            .field("len", &self.len())
102            .field("num_shards", &self.num_shards)
103            .finish()
104    }
105}
106
107impl<K, V> IntoIterator for ShardedSieveCache<K, V>
108where
109    K: Eq + Hash + Clone + Send + Sync,
110    V: Clone + Send + Sync,
111{
112    type Item = (K, V);
113    type IntoIter = std::vec::IntoIter<(K, V)>;
114
115    /// Converts the cache into an iterator over its key-value pairs.
116    ///
117    /// This collects all entries into a Vec and returns an iterator over that Vec.
118    ///
119    /// # Examples
120    ///
121    /// ```
122    /// # use sieve_cache::ShardedSieveCache;
123    /// # use std::collections::HashMap;
124    /// let cache = ShardedSieveCache::new(100).unwrap();
125    /// cache.insert("key1".to_string(), "value1".to_string());
126    /// cache.insert("key2".to_string(), "value2".to_string());
127    ///
128    /// // Collect into a HashMap
129    /// let map: HashMap<_, _> = cache.into_iter().collect();
130    /// assert_eq!(map.len(), 2);
131    /// assert_eq!(map.get("key1"), Some(&"value1".to_string()));
132    /// ```
133    fn into_iter(self) -> Self::IntoIter {
134        self.entries().into_iter()
135    }
136}
137
138#[cfg(feature = "sync")]
139impl<K, V> From<crate::SyncSieveCache<K, V>> for ShardedSieveCache<K, V>
140where
141    K: Eq + Hash + Clone + Send + Sync,
142    V: Clone + Send + Sync,
143{
144    /// Creates a new sharded cache from an existing `SyncSieveCache`.
145    ///
146    /// This allows for upgrading a standard thread-safe cache to a more scalable sharded version.
147    ///
148    /// # Examples
149    ///
150    /// ```
151    /// # use sieve_cache::{SyncSieveCache, ShardedSieveCache};
152    /// let sync_cache = SyncSieveCache::new(100).unwrap();
153    /// sync_cache.insert("key".to_string(), "value".to_string());
154    ///
155    /// // Convert to sharded version with default sharding
156    /// let sharded_cache = ShardedSieveCache::from(sync_cache);
157    /// assert_eq!(sharded_cache.get(&"key".to_string()), Some("value".to_string()));
158    /// ```
159    fn from(sync_cache: crate::SyncSieveCache<K, V>) -> Self {
160        // Create a new sharded cache with the same capacity
161        let capacity = sync_cache.capacity();
162        let sharded = Self::new(capacity).expect("Failed to create sharded cache");
163
164        // Transfer all entries
165        for (key, value) in sync_cache.entries() {
166            sharded.insert(key, value);
167        }
168
169        sharded
170    }
171}
172
173impl<K, V> ShardedSieveCache<K, V>
174where
175    K: Eq + Hash + Clone + Send + Sync,
176    V: Send + Sync,
177{
178    /// Creates a new sharded cache with the specified capacity, using the default number of shards.
179    ///
180    /// The capacity will be divided evenly among the shards. The default shard count (16)
181    /// provides a good balance between concurrency and memory overhead for most workloads.
182    ///
183    /// # Errors
184    ///
185    /// Returns an error if the capacity is zero.
186    ///
187    /// # Examples
188    ///
189    /// ```
190    /// # use sieve_cache::ShardedSieveCache;
191    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(1000).unwrap();
192    /// assert_eq!(cache.num_shards(), 16); // Default shard count
193    /// ```
194    pub fn new(capacity: usize) -> Result<Self, &'static str> {
195        Self::with_shards(capacity, DEFAULT_SHARDS)
196    }
197
198    /// Creates a new sharded cache with the specified capacity and number of shards.
199    ///
200    /// The capacity will be divided among the shards, distributing any remainder to ensure
201    /// the total capacity is at least the requested amount.
202    ///
203    /// # Arguments
204    ///
205    /// * `capacity` - The total capacity of the cache
206    /// * `num_shards` - The number of shards to divide the cache into
207    ///
208    /// # Errors
209    ///
210    /// Returns an error if either the capacity or number of shards is zero.
211    ///
212    /// # Performance Impact
213    ///
214    /// - More shards can reduce contention in highly concurrent environments
215    /// - However, each shard has memory overhead, so very high shard counts may
216    ///   increase memory usage without providing additional performance benefits
217    /// - For most workloads, a value between 8 and 32 shards is optimal
218    ///
219    /// # Examples
220    ///
221    /// ```
222    /// # use sieve_cache::ShardedSieveCache;
223    /// // Create a cache with 8 shards
224    /// let cache: ShardedSieveCache<String, u32> = ShardedSieveCache::with_shards(1000, 8).unwrap();
225    /// assert_eq!(cache.num_shards(), 8);
226    /// assert!(cache.capacity() >= 1000);
227    /// ```
228    pub fn with_shards(capacity: usize, num_shards: usize) -> Result<Self, &'static str> {
229        if capacity == 0 {
230            return Err("capacity must be greater than 0");
231        }
232        if num_shards == 0 {
233            return Err("number of shards must be greater than 0");
234        }
235
236        // Calculate per-shard capacity
237        let base_capacity_per_shard = capacity / num_shards;
238        let remaining = capacity % num_shards;
239
240        let mut shards = Vec::with_capacity(num_shards);
241        for i in 0..num_shards {
242            // Distribute the remaining capacity to the first 'remaining' shards
243            let shard_capacity = if i < remaining {
244                base_capacity_per_shard + 1
245            } else {
246                base_capacity_per_shard
247            };
248
249            // Ensure at least capacity 1 per shard
250            let shard_capacity = std::cmp::max(1, shard_capacity);
251            shards.push(Arc::new(Mutex::new(SieveCache::new(shard_capacity)?)));
252        }
253
254        Ok(Self { shards, num_shards })
255    }
256
257    /// Returns the shard index for a given key.
258    ///
259    /// This function computes a hash of the key and uses it to determine which shard
260    /// the key belongs to.
261    #[inline]
262    fn get_shard_index<Q>(&self, key: &Q) -> usize
263    where
264        Q: Hash + ?Sized,
265    {
266        let mut hasher = DefaultHasher::new();
267        key.hash(&mut hasher);
268        let hash = hasher.finish() as usize;
269        hash % self.num_shards
270    }
271
272    /// Returns a reference to the shard for a given key.
273    ///
274    /// This is an internal helper method that maps a key to its corresponding shard.
275    #[inline]
276    fn get_shard<Q>(&self, key: &Q) -> &Arc<Mutex<SieveCache<K, V>>>
277    where
278        Q: Hash + ?Sized,
279    {
280        let index = self.get_shard_index(key);
281        &self.shards[index]
282    }
283
284    /// Returns a locked reference to the shard for a given key.
285    ///
286    /// This is an internal helper method to abstract away the lock handling and error recovery.
287    /// If the mutex is poisoned due to a panic in another thread, the poison error is
288    /// recovered from by calling `into_inner()` to access the underlying data.
289    #[inline]
290    fn locked_shard<Q>(&self, key: &Q) -> MutexGuard<'_, SieveCache<K, V>>
291    where
292        Q: Hash + ?Sized,
293    {
294        self.get_shard(key)
295            .lock()
296            .unwrap_or_else(PoisonError::into_inner)
297    }
298
299    /// Returns the total capacity of the cache (sum of all shard capacities).
300    ///
301    /// # Examples
302    ///
303    /// ```
304    /// # use sieve_cache::ShardedSieveCache;
305    /// let cache: ShardedSieveCache<String, u32> = ShardedSieveCache::new(1000).unwrap();
306    /// assert!(cache.capacity() >= 1000);
307    /// ```
308    pub fn capacity(&self) -> usize {
309        self.shards
310            .iter()
311            .map(|shard| {
312                shard
313                    .lock()
314                    .unwrap_or_else(PoisonError::into_inner)
315                    .capacity()
316            })
317            .sum()
318    }
319
320    /// Returns the total number of entries in the cache (sum of all shard lengths).
321    ///
322    /// Note that this operation requires acquiring a lock on each shard, so it may
323    /// cause temporary contention if called frequently in a high-concurrency environment.
324    ///
325    /// # Examples
326    ///
327    /// ```
328    /// # use sieve_cache::ShardedSieveCache;
329    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
330    ///
331    /// cache.insert("key1".to_string(), "value1".to_string());
332    /// cache.insert("key2".to_string(), "value2".to_string());
333    ///
334    /// assert_eq!(cache.len(), 2);
335    /// ```
336    pub fn len(&self) -> usize {
337        self.shards
338            .iter()
339            .map(|shard| shard.lock().unwrap_or_else(PoisonError::into_inner).len())
340            .sum()
341    }
342
343    /// Returns `true` when no values are currently cached in any shard.
344    ///
345    /// Note that this operation requires acquiring a lock on each shard, so it may
346    /// cause temporary contention if called frequently in a high-concurrency environment.
347    ///
348    /// # Examples
349    ///
350    /// ```
351    /// # use sieve_cache::ShardedSieveCache;
352    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
353    /// assert!(cache.is_empty());
354    ///
355    /// cache.insert("key".to_string(), "value".to_string());
356    /// assert!(!cache.is_empty());
357    /// ```
358    pub fn is_empty(&self) -> bool {
359        self.shards.iter().all(|shard| {
360            shard
361                .lock()
362                .unwrap_or_else(PoisonError::into_inner)
363                .is_empty()
364        })
365    }
366
367    /// Returns `true` if there is a value in the cache mapped to by `key`.
368    ///
369    /// This operation only locks the specific shard containing the key.
370    ///
371    /// # Examples
372    ///
373    /// ```
374    /// # use sieve_cache::ShardedSieveCache;
375    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
376    /// cache.insert("key".to_string(), "value".to_string());
377    ///
378    /// assert!(cache.contains_key(&"key".to_string()));
379    /// assert!(!cache.contains_key(&"missing".to_string()));
380    /// ```
381    pub fn contains_key<Q>(&self, key: &Q) -> bool
382    where
383        Q: Hash + Eq + ?Sized,
384        K: Borrow<Q>,
385    {
386        let mut guard = self.locked_shard(key);
387        guard.contains_key(key)
388    }
389
390    /// Gets a clone of the value in the cache mapped to by `key`.
391    ///
392    /// If no value exists for `key`, this returns `None`. This operation only locks
393    /// the specific shard containing the key.
394    ///
395    /// # Note
396    ///
397    /// This method returns a clone of the value rather than a reference, since the
398    /// mutex guard would be dropped after this method returns. This means that
399    /// `V` must implement `Clone`.
400    ///
401    /// # Examples
402    ///
403    /// ```
404    /// # use sieve_cache::ShardedSieveCache;
405    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
406    /// cache.insert("key".to_string(), "value".to_string());
407    ///
408    /// assert_eq!(cache.get(&"key".to_string()), Some("value".to_string()));
409    /// assert_eq!(cache.get(&"missing".to_string()), None);
410    /// ```
411    pub fn get<Q>(&self, key: &Q) -> Option<V>
412    where
413        Q: Hash + Eq + ?Sized,
414        K: Borrow<Q>,
415        V: Clone,
416    {
417        let mut guard = self.locked_shard(key);
418        guard.get(key).cloned()
419    }
420
421    /// Gets a mutable reference to the value in the cache mapped to by `key` via a callback function.
422    ///
423    /// If no value exists for `key`, the callback will not be invoked and this returns `false`.
424    /// Otherwise, the callback is invoked with a mutable reference to the value and this returns `true`.
425    /// This operation only locks the specific shard containing the key.
426    ///
427    /// This operation marks the entry as "visited" in the SIEVE algorithm,
428    /// which affects eviction decisions.
429    ///
430    /// # Examples
431    ///
432    /// ```
433    /// # use sieve_cache::ShardedSieveCache;
434    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
435    /// cache.insert("key".to_string(), "value".to_string());
436    ///
437    /// // Modify the value in-place
438    /// cache.get_mut(&"key".to_string(), |value| {
439    ///     *value = "new_value".to_string();
440    /// });
441    ///
442    /// assert_eq!(cache.get(&"key".to_string()), Some("new_value".to_string()));
443    /// ```
444    pub fn get_mut<Q, F>(&self, key: &Q, f: F) -> bool
445    where
446        Q: Hash + Eq + ?Sized,
447        K: Borrow<Q>,
448        F: FnOnce(&mut V),
449    {
450        let mut guard = self.locked_shard(key);
451        if let Some(value) = guard.get_mut(key) {
452            f(value);
453            true
454        } else {
455            false
456        }
457    }
458
459    /// Maps `key` to `value` in the cache, possibly evicting old entries from the appropriate shard.
460    ///
461    /// This method returns `true` when this is a new entry, and `false` if an existing entry was
462    /// updated. This operation only locks the specific shard containing the key.
463    ///
464    /// # Examples
465    ///
466    /// ```
467    /// # use sieve_cache::ShardedSieveCache;
468    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
469    ///
470    /// // Insert a new key
471    /// assert!(cache.insert("key1".to_string(), "value1".to_string()));
472    ///
473    /// // Update an existing key
474    /// assert!(!cache.insert("key1".to_string(), "updated_value".to_string()));
475    /// ```
476    pub fn insert(&self, key: K, value: V) -> bool {
477        let mut guard = self.locked_shard(&key);
478        guard.insert(key, value)
479    }
480
481    /// Removes the cache entry mapped to by `key`.
482    ///
483    /// This method returns the value removed from the cache. If `key` did not map to any value,
484    /// then this returns `None`. This operation only locks the specific shard containing the key.
485    ///
486    /// # Examples
487    ///
488    /// ```
489    /// # use sieve_cache::ShardedSieveCache;
490    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
491    /// cache.insert("key".to_string(), "value".to_string());
492    ///
493    /// // Remove an existing key
494    /// assert_eq!(cache.remove(&"key".to_string()), Some("value".to_string()));
495    ///
496    /// // Attempt to remove a missing key
497    /// assert_eq!(cache.remove(&"key".to_string()), None);
498    /// ```
499    pub fn remove<Q>(&self, key: &Q) -> Option<V>
500    where
501        K: Borrow<Q>,
502        Q: Eq + Hash + ?Sized,
503    {
504        let mut guard = self.locked_shard(key);
505        guard.remove(key)
506    }
507
508    /// Removes and returns a value from the cache that was not recently accessed.
509    ///
510    /// This method tries to evict from each shard in turn until it finds a value to evict.
511    /// If no suitable value exists in any shard, this returns `None`.
512    ///
513    /// # Note
514    ///
515    /// This implementation differs from the non-sharded version in that it checks each shard
516    /// in sequence until it finds a suitable entry to evict. This may not provide the globally
517    /// optimal eviction decision across all shards, but it avoids the need to lock all shards
518    /// simultaneously.
519    ///
520    /// # Examples
521    ///
522    /// ```
523    /// # use sieve_cache::ShardedSieveCache;
524    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::with_shards(10, 2).unwrap();
525    ///
526    /// // Fill the cache
527    /// for i in 0..15 {
528    ///     cache.insert(format!("key{}", i), format!("value{}", i));
529    /// }
530    ///
531    /// // Should be able to evict something
532    /// assert!(cache.evict().is_some());
533    /// ```
534    pub fn evict(&self) -> Option<V> {
535        // Try each shard in turn
536        for shard in &self.shards {
537            let result = shard.lock().unwrap_or_else(PoisonError::into_inner).evict();
538            if result.is_some() {
539                return result;
540            }
541        }
542        None
543    }
544
545    /// Removes all entries from the cache.
546    ///
547    /// This operation clears all stored values across all shards and resets the cache to an empty state,
548    /// while maintaining the original capacity.
549    ///
550    /// # Examples
551    ///
552    /// ```
553    /// # use sieve_cache::ShardedSieveCache;
554    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
555    /// cache.insert("key1".to_string(), "value1".to_string());
556    /// cache.insert("key2".to_string(), "value2".to_string());
557    ///
558    /// assert_eq!(cache.len(), 2);
559    ///
560    /// cache.clear();
561    /// assert_eq!(cache.len(), 0);
562    /// assert!(cache.is_empty());
563    /// ```
564    pub fn clear(&self) {
565        for shard in &self.shards {
566            let mut guard = shard.lock().unwrap_or_else(PoisonError::into_inner);
567            guard.clear();
568        }
569    }
570
571    /// Returns an iterator over all keys in the cache.
572    ///
573    /// The order of keys is not specified and should not be relied upon.
574    ///
575    /// # Examples
576    ///
577    /// ```
578    /// # use sieve_cache::ShardedSieveCache;
579    /// # use std::collections::HashSet;
580    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
581    /// cache.insert("key1".to_string(), "value1".to_string());
582    /// cache.insert("key2".to_string(), "value2".to_string());
583    ///
584    /// let keys: HashSet<_> = cache.keys().into_iter().collect();
585    /// assert_eq!(keys.len(), 2);
586    /// assert!(keys.contains(&"key1".to_string()));
587    /// assert!(keys.contains(&"key2".to_string()));
588    /// ```
589    pub fn keys(&self) -> Vec<K> {
590        let mut all_keys = Vec::new();
591        for shard in &self.shards {
592            let guard = shard.lock().unwrap_or_else(PoisonError::into_inner);
593            all_keys.extend(guard.keys().cloned());
594        }
595        all_keys
596    }
597
598    /// Returns all values in the cache.
599    ///
600    /// The order of values is not specified and should not be relied upon.
601    ///
602    /// # Examples
603    ///
604    /// ```
605    /// # use sieve_cache::ShardedSieveCache;
606    /// # use std::collections::HashSet;
607    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
608    /// cache.insert("key1".to_string(), "value1".to_string());
609    /// cache.insert("key2".to_string(), "value2".to_string());
610    ///
611    /// let values: HashSet<_> = cache.values().into_iter().collect();
612    /// assert_eq!(values.len(), 2);
613    /// assert!(values.contains(&"value1".to_string()));
614    /// assert!(values.contains(&"value2".to_string()));
615    /// ```
616    pub fn values(&self) -> Vec<V>
617    where
618        V: Clone,
619    {
620        let mut all_values = Vec::new();
621        for shard in &self.shards {
622            let guard = shard.lock().unwrap_or_else(PoisonError::into_inner);
623            all_values.extend(guard.values().cloned());
624        }
625        all_values
626    }
627
628    /// Returns all key-value pairs in the cache.
629    ///
630    /// The order of pairs is not specified and should not be relied upon.
631    ///
632    /// # Examples
633    ///
634    /// ```
635    /// # use sieve_cache::ShardedSieveCache;
636    /// # use std::collections::HashMap;
637    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
638    /// cache.insert("key1".to_string(), "value1".to_string());
639    /// cache.insert("key2".to_string(), "value2".to_string());
640    ///
641    /// let entries: HashMap<_, _> = cache.entries().into_iter().collect();
642    /// assert_eq!(entries.len(), 2);
643    /// assert_eq!(entries.get(&"key1".to_string()), Some(&"value1".to_string()));
644    /// assert_eq!(entries.get(&"key2".to_string()), Some(&"value2".to_string()));
645    /// ```
646    pub fn entries(&self) -> Vec<(K, V)>
647    where
648        V: Clone,
649    {
650        let mut all_entries = Vec::new();
651        for shard in &self.shards {
652            let guard = shard.lock().unwrap_or_else(PoisonError::into_inner);
653            all_entries.extend(guard.iter().map(|(k, v)| (k.clone(), v.clone())));
654        }
655        all_entries
656    }
657
658    /// Applies a function to all values in the cache across all shards.
659    ///
660    /// This method marks all entries as visited.
661    ///
662    /// # Examples
663    ///
664    /// ```
665    /// # use sieve_cache::ShardedSieveCache;
666    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
667    /// cache.insert("key1".to_string(), "value1".to_string());
668    /// cache.insert("key2".to_string(), "value2".to_string());
669    ///
670    /// // Update all values by appending text
671    /// cache.for_each_value(|value| {
672    ///     *value = format!("{}_updated", value);
673    /// });
674    ///
675    /// assert_eq!(cache.get(&"key1".to_string()), Some("value1_updated".to_string()));
676    /// assert_eq!(cache.get(&"key2".to_string()), Some("value2_updated".to_string()));
677    /// ```
678    pub fn for_each_value<F>(&self, mut f: F)
679    where
680        F: FnMut(&mut V),
681    {
682        for shard in &self.shards {
683            let mut guard = shard.lock().unwrap_or_else(PoisonError::into_inner);
684            guard.values_mut().for_each(&mut f);
685        }
686    }
687
688    /// Applies a function to all key-value pairs in the cache across all shards.
689    ///
690    /// This method marks all entries as visited.
691    ///
692    /// # Examples
693    ///
694    /// ```
695    /// # use sieve_cache::ShardedSieveCache;
696    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
697    /// cache.insert("key1".to_string(), "value1".to_string());
698    /// cache.insert("key2".to_string(), "value2".to_string());
699    ///
700    /// // Update all values associated with keys containing '1'
701    /// cache.for_each_entry(|(key, value)| {
702    ///     if key.contains('1') {
703    ///         *value = format!("{}_special", value);
704    ///     }
705    /// });
706    ///
707    /// assert_eq!(cache.get(&"key1".to_string()), Some("value1_special".to_string()));
708    /// assert_eq!(cache.get(&"key2".to_string()), Some("value2".to_string()));
709    /// ```
710    pub fn for_each_entry<F>(&self, mut f: F)
711    where
712        F: FnMut((&K, &mut V)),
713    {
714        for shard in &self.shards {
715            let mut guard = shard.lock().unwrap_or_else(PoisonError::into_inner);
716            guard.iter_mut().for_each(&mut f);
717        }
718    }
719
720    /// Gets exclusive access to a specific shard based on the key.
721    ///
722    /// This can be useful for performing multiple operations atomically on entries
723    /// that share the same shard. Note that only keys that hash to the same shard
724    /// can be manipulated within a single transaction.
725    ///
726    /// # Examples
727    ///
728    /// ```
729    /// # use sieve_cache::ShardedSieveCache;
730    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::new(100).unwrap();
731    ///
732    /// // Perform multiple operations atomically
733    /// cache.with_key_lock(&"foo", |shard| {
734    ///     // All operations within this closure have exclusive access to the shard
735    ///     shard.insert("key1".to_string(), "value1".to_string());
736    ///     shard.insert("key2".to_string(), "value2".to_string());
737    ///     
738    ///     // We can check state mid-transaction
739    ///     assert!(shard.contains_key(&"key1".to_string()));
740    /// });
741    /// ```
742    pub fn with_key_lock<Q, F, T>(&self, key: &Q, f: F) -> T
743    where
744        Q: Hash + ?Sized,
745        F: FnOnce(&mut SieveCache<K, V>) -> T,
746    {
747        let mut guard = self.locked_shard(key);
748        f(&mut guard)
749    }
750
751    /// Returns the number of shards in this cache.
752    ///
753    /// # Examples
754    ///
755    /// ```
756    /// # use sieve_cache::ShardedSieveCache;
757    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::with_shards(1000, 32).unwrap();
758    /// assert_eq!(cache.num_shards(), 32);
759    /// ```
760    pub fn num_shards(&self) -> usize {
761        self.num_shards
762    }
763
764    /// Gets a specific shard by index.
765    ///
766    /// This is mainly useful for advanced use cases and maintenance operations.
767    /// Returns `None` if the index is out of bounds.
768    ///
769    /// # Examples
770    ///
771    /// ```
772    /// # use sieve_cache::ShardedSieveCache;
773    /// let cache: ShardedSieveCache<String, String> = ShardedSieveCache::with_shards(1000, 8).unwrap();
774    ///
775    /// // Access a valid shard
776    /// assert!(cache.get_shard_by_index(0).is_some());
777    ///
778    /// // Out of bounds index
779    /// assert!(cache.get_shard_by_index(100).is_none());
780    /// ```
781    pub fn get_shard_by_index(&self, index: usize) -> Option<&Arc<Mutex<SieveCache<K, V>>>> {
782        self.shards.get(index)
783    }
784
785    /// Retains only the elements specified by the predicate.
786    ///
787    /// Removes all entries for which the provided function returns `false`.
788    /// The elements are visited in arbitrary, unspecified order, across all shards.
789    /// This operation processes each shard individually, acquiring and releasing locks as it goes.
790    ///
791    /// # Examples
792    ///
793    /// ```
794    /// # use sieve_cache::ShardedSieveCache;
795    /// let cache: ShardedSieveCache<String, i32> = ShardedSieveCache::new(100).unwrap();
796    /// cache.insert("key1".to_string(), 100);
797    /// cache.insert("key2".to_string(), 200);
798    /// cache.insert("key3".to_string(), 300);
799    ///
800    /// // Keep only entries with values greater than 150
801    /// cache.retain(|_, value| *value > 150);
802    ///
803    /// assert_eq!(cache.len(), 2);
804    /// assert!(!cache.contains_key(&"key1".to_string()));
805    /// assert!(cache.contains_key(&"key2".to_string()));
806    /// assert!(cache.contains_key(&"key3".to_string()));
807    /// ```
808    pub fn retain<F>(&self, mut f: F)
809    where
810        F: FnMut(&K, &V) -> bool,
811        V: Clone,
812    {
813        // First, collect all entries so we can check them without holding locks
814        let entries = self.entries();
815
816        // Now go through all entries and determine which ones to remove
817        for (key, value) in entries {
818            // Check the predicate outside the lock - using cloned data
819            if !f(&key, &value) {
820                // The predicate returned false, so remove this entry
821                self.remove(&key);
822            }
823        }
824    }
825}
826
827#[cfg(test)]
828mod tests {
829    use super::*;
830    use std::sync::Arc;
831    use std::thread;
832    use std::time::Duration;
833
834    #[test]
835    fn test_sharded_cache_basics() {
836        let cache = ShardedSieveCache::new(100).unwrap();
837
838        // Insert a value
839        assert!(cache.insert("key1".to_string(), "value1".to_string()));
840
841        // Read back the value
842        assert_eq!(cache.get(&"key1".to_string()), Some("value1".to_string()));
843
844        // Check contains_key
845        assert!(cache.contains_key(&"key1".to_string()));
846
847        // Check capacity and length
848        assert!(cache.capacity() >= 100); // May be slightly higher due to rounding up per shard
849        assert_eq!(cache.len(), 1);
850
851        // Remove a value
852        assert_eq!(
853            cache.remove(&"key1".to_string()),
854            Some("value1".to_string())
855        );
856        assert_eq!(cache.len(), 0);
857        assert!(cache.is_empty());
858    }
859
860    #[test]
861    fn test_custom_shard_count() {
862        let cache = ShardedSieveCache::with_shards(100, 4).unwrap();
863        assert_eq!(cache.num_shards(), 4);
864
865        for i in 0..10 {
866            let key = format!("key{}", i);
867            let value = format!("value{}", i);
868            cache.insert(key, value);
869        }
870
871        assert_eq!(cache.len(), 10);
872    }
873
874    #[test]
875    fn test_parallel_access() {
876        let cache = Arc::new(ShardedSieveCache::with_shards(1000, 16).unwrap());
877        let mut handles = vec![];
878
879        // Spawn 8 threads that each insert 100 items
880        for t in 0..8 {
881            let cache_clone = Arc::clone(&cache);
882            let handle = thread::spawn(move || {
883                for i in 0..100 {
884                    let key = format!("thread{}key{}", t, i);
885                    let value = format!("value{}_{}", t, i);
886                    cache_clone.insert(key, value);
887                }
888            });
889            handles.push(handle);
890        }
891
892        // Wait for all threads to complete
893        for handle in handles {
894            handle.join().unwrap();
895        }
896
897        // Verify total item count
898        assert_eq!(cache.len(), 800);
899
900        // Check a few random keys
901        assert_eq!(
902            cache.get(&"thread0key50".to_string()),
903            Some("value0_50".to_string())
904        );
905        assert_eq!(
906            cache.get(&"thread7key99".to_string()),
907            Some("value7_99".to_string())
908        );
909    }
910
911    #[test]
912    fn test_with_key_lock() {
913        let cache = ShardedSieveCache::new(100).unwrap();
914
915        // Perform multiple operations atomically on keys that map to the same shard
916        cache.with_key_lock(&"test_key", |shard| {
917            shard.insert("key1".to_string(), "value1".to_string());
918            shard.insert("key2".to_string(), "value2".to_string());
919            shard.insert("key3".to_string(), "value3".to_string());
920        });
921
922        assert_eq!(cache.len(), 3);
923    }
924
925    #[test]
926    fn test_eviction() {
927        let cache = ShardedSieveCache::with_shards(10, 2).unwrap();
928
929        // Fill the cache
930        for i in 0..15 {
931            let key = format!("key{}", i);
932            let value = format!("value{}", i);
933            cache.insert(key, value);
934        }
935
936        // The cache should not exceed its capacity
937        assert!(cache.len() <= 10);
938
939        // We should be able to evict items
940        let evicted = cache.evict();
941        assert!(evicted.is_some());
942    }
943
944    #[test]
945    fn test_contention() {
946        let cache = Arc::new(ShardedSieveCache::with_shards(1000, 16).unwrap());
947        let mut handles = vec![];
948
949        // Create keys that we know will hash to different shards
950        let keys: Vec<String> = (0..16).map(|i| format!("shard_key_{}", i)).collect();
951
952        // Spawn 16 threads, each hammering a different key
953        for i in 0..16 {
954            let cache_clone = Arc::clone(&cache);
955            let key = keys[i].clone();
956
957            let handle = thread::spawn(move || {
958                for j in 0..1000 {
959                    cache_clone.insert(key.clone(), format!("value_{}", j));
960                    let _ = cache_clone.get(&key);
961
962                    // Small sleep to make contention more likely
963                    if j % 100 == 0 {
964                        thread::sleep(Duration::from_micros(1));
965                    }
966                }
967            });
968
969            handles.push(handle);
970        }
971
972        // Wait for all threads to complete
973        for handle in handles {
974            handle.join().unwrap();
975        }
976
977        // All keys should still be present
978        for key in keys {
979            assert!(cache.contains_key(&key));
980        }
981    }
982
983    #[test]
984    fn test_get_mut() {
985        let cache = ShardedSieveCache::new(100).unwrap();
986        cache.insert("key".to_string(), "value".to_string());
987
988        // Modify the value in-place
989        let modified = cache.get_mut(&"key".to_string(), |value| {
990            *value = "new_value".to_string();
991        });
992        assert!(modified);
993
994        // Verify the value was updated
995        assert_eq!(cache.get(&"key".to_string()), Some("new_value".to_string()));
996
997        // Try to modify a non-existent key
998        let modified = cache.get_mut(&"missing".to_string(), |_| {
999            panic!("This should not be called");
1000        });
1001        assert!(!modified);
1002    }
1003
1004    #[test]
1005    fn test_get_mut_concurrent() {
1006        let cache = Arc::new(ShardedSieveCache::with_shards(100, 8).unwrap());
1007
1008        // Insert initial values
1009        for i in 0..10 {
1010            cache.insert(format!("key{}", i), 0);
1011        }
1012
1013        let mut handles = vec![];
1014
1015        // Spawn 5 threads that modify values concurrently
1016        for _ in 0..5 {
1017            let cache_clone = Arc::clone(&cache);
1018
1019            let handle = thread::spawn(move || {
1020                for i in 0..10 {
1021                    for _ in 0..100 {
1022                        cache_clone.get_mut(&format!("key{}", i), |value| {
1023                            *value += 1;
1024                        });
1025                    }
1026                }
1027            });
1028
1029            handles.push(handle);
1030        }
1031
1032        // Wait for all threads to complete
1033        for handle in handles {
1034            handle.join().unwrap();
1035        }
1036
1037        // Each key should have been incremented 500 times (5 threads * 100 increments each)
1038        for i in 0..10 {
1039            assert_eq!(cache.get(&format!("key{}", i)), Some(500));
1040        }
1041    }
1042
1043    #[test]
1044    fn test_clear() {
1045        let cache = ShardedSieveCache::with_shards(100, 4).unwrap();
1046
1047        // Add entries to various shards
1048        for i in 0..20 {
1049            cache.insert(format!("key{}", i), format!("value{}", i));
1050        }
1051
1052        assert_eq!(cache.len(), 20);
1053        assert!(!cache.is_empty());
1054
1055        // Clear all shards
1056        cache.clear();
1057
1058        assert_eq!(cache.len(), 0);
1059        assert!(cache.is_empty());
1060
1061        // Verify entries are gone
1062        for i in 0..20 {
1063            assert_eq!(cache.get(&format!("key{}", i)), None);
1064        }
1065    }
1066
1067    #[test]
1068    fn test_keys_values_entries() {
1069        let cache = ShardedSieveCache::with_shards(100, 4).unwrap();
1070
1071        // Add entries to various shards
1072        for i in 0..10 {
1073            cache.insert(format!("key{}", i), format!("value{}", i));
1074        }
1075
1076        // Test keys
1077        let keys = cache.keys();
1078        assert_eq!(keys.len(), 10);
1079        for i in 0..10 {
1080            assert!(keys.contains(&format!("key{}", i)));
1081        }
1082
1083        // Test values
1084        let values = cache.values();
1085        assert_eq!(values.len(), 10);
1086        for i in 0..10 {
1087            assert!(values.contains(&format!("value{}", i)));
1088        }
1089
1090        // Test entries
1091        let entries = cache.entries();
1092        assert_eq!(entries.len(), 10);
1093        for i in 0..10 {
1094            assert!(entries.contains(&(format!("key{}", i), format!("value{}", i))));
1095        }
1096    }
1097
1098    #[test]
1099    fn test_for_each_operations() {
1100        let cache = ShardedSieveCache::with_shards(100, 4).unwrap();
1101
1102        // Add entries to various shards
1103        for i in 0..10 {
1104            cache.insert(format!("key{}", i), format!("value{}", i));
1105        }
1106
1107        // Test for_each_value
1108        cache.for_each_value(|value| {
1109            *value = format!("{}_updated", value);
1110        });
1111
1112        for i in 0..10 {
1113            assert_eq!(
1114                cache.get(&format!("key{}", i)),
1115                Some(format!("value{}_updated", i))
1116            );
1117        }
1118
1119        // Test for_each_entry
1120        cache.for_each_entry(|(key, value)| {
1121            if key.ends_with("5") {
1122                *value = format!("{}_special", value);
1123            }
1124        });
1125
1126        assert_eq!(
1127            cache.get(&"key5".to_string()),
1128            Some("value5_updated_special".to_string())
1129        );
1130        assert_eq!(
1131            cache.get(&"key1".to_string()),
1132            Some("value1_updated".to_string())
1133        );
1134    }
1135
1136    #[test]
1137    fn test_multithreaded_operations() {
1138        let cache = Arc::new(ShardedSieveCache::with_shards(100, 8).unwrap());
1139
1140        // Fill the cache
1141        for i in 0..20 {
1142            cache.insert(format!("key{}", i), format!("value{}", i));
1143        }
1144
1145        // Clear while concurrent accesses happen
1146        let cache_clone = Arc::clone(&cache);
1147        let handle = thread::spawn(move || {
1148            // This thread tries to access the cache while main thread clears it
1149            thread::sleep(Duration::from_millis(10));
1150
1151            for i in 0..20 {
1152                let _ = cache_clone.get(&format!("key{}", i));
1153                thread::sleep(Duration::from_micros(100));
1154            }
1155        });
1156
1157        // Main thread clears the cache
1158        thread::sleep(Duration::from_millis(5));
1159        cache.clear();
1160
1161        // Add new entries
1162        for i in 30..40 {
1163            cache.insert(format!("newkey{}", i), format!("newvalue{}", i));
1164        }
1165
1166        // Wait for the thread to complete
1167        handle.join().unwrap();
1168
1169        // Verify final state
1170        assert_eq!(cache.len(), 10);
1171        for i in 30..40 {
1172            assert_eq!(
1173                cache.get(&format!("newkey{}", i)),
1174                Some(format!("newvalue{}", i))
1175            );
1176        }
1177    }
1178
1179    #[test]
1180    fn test_retain() {
1181        let cache = ShardedSieveCache::with_shards(100, 4).unwrap();
1182
1183        // Add entries to various shards
1184        cache.insert("even1".to_string(), 2);
1185        cache.insert("even2".to_string(), 4);
1186        cache.insert("odd1".to_string(), 1);
1187        cache.insert("odd2".to_string(), 3);
1188
1189        assert_eq!(cache.len(), 4);
1190
1191        // Keep only entries with even values
1192        cache.retain(|_, v| v % 2 == 0);
1193
1194        assert_eq!(cache.len(), 2);
1195        assert!(cache.contains_key(&"even1".to_string()));
1196        assert!(cache.contains_key(&"even2".to_string()));
1197        assert!(!cache.contains_key(&"odd1".to_string()));
1198        assert!(!cache.contains_key(&"odd2".to_string()));
1199
1200        // Keep only entries with keys containing '1'
1201        cache.retain(|k, _| k.contains('1'));
1202
1203        assert_eq!(cache.len(), 1);
1204        assert!(cache.contains_key(&"even1".to_string()));
1205        assert!(!cache.contains_key(&"even2".to_string()));
1206    }
1207
1208    #[test]
1209    fn test_retain_concurrent() {
1210        // Create a well-controlled test case that avoids race conditions
1211        let cache = ShardedSieveCache::with_shards(100, 8).unwrap();
1212
1213        // Add a known set of entries
1214        for i in 0..10 {
1215            cache.insert(format!("even{}", i * 2), i * 2);
1216            cache.insert(format!("odd{}", i * 2 + 1), i * 2 + 1);
1217        }
1218
1219        // Retain only odd values
1220        cache.retain(|_, value| value % 2 == 1);
1221
1222        // Check that we have the right number of entries
1223        assert_eq!(cache.len(), 10, "Should have 10 odd-valued entries");
1224
1225        // Verify that all remaining entries have odd values
1226        for (_, value) in cache.entries() {
1227            assert_eq!(
1228                value % 2,
1229                1,
1230                "Found an even value {value} which should have been removed"
1231            );
1232        }
1233
1234        // Verify the specific entries we expect
1235        for i in 0..10 {
1236            let odd_key = format!("odd{}", i * 2 + 1);
1237            assert!(cache.contains_key(&odd_key), "Missing odd entry: {odd_key}");
1238            assert_eq!(cache.get(&odd_key), Some(i * 2 + 1));
1239        }
1240    }
1241}