sieve_cache/
sync.rs

1use crate::SieveCache;
2use std::borrow::Borrow;
3use std::fmt;
4use std::hash::Hash;
5use std::sync::{Arc, Mutex, MutexGuard, PoisonError};
6
7/// A thread-safe wrapper around `SieveCache`.
8///
9/// This provides a thread-safe implementation of the SIEVE cache algorithm by wrapping
10/// the standard `SieveCache` in an `Arc<Mutex<>>`. It offers the same functionality as
11/// the underlying cache but with thread safety guarantees.
12///
13/// # Thread Safety
14///
15/// All operations acquire a lock on the entire cache, which provides strong consistency
16/// but may become a bottleneck under high contention. For workloads with high concurrency,
17/// consider using [`ShardedSieveCache`](crate::ShardedSieveCache) instead, which partitions
18/// the cache into multiple independently-locked segments.
19///
20/// ## Lock Behavior
21///
22/// The thread safety mechanism works as follows:
23///
24/// - Simple query operations (e.g., `get`, `contains_key`) hold the lock only long enough to
25///   read and clone the value
26/// - Modification operations (e.g., `insert`, `remove`) hold the lock for the duration of the change
27/// - Operations that accept callbacks have specific lock behavior:
28///   - `get_mut` acquires and releases the lock repeatedly to avoid deadlocks, using a clone-modify-update pattern
29///   - `for_each_value`, `for_each_entry`, and `retain` collect data under the lock, then
30///     release it before processing to avoid blocking other threads
31///
32/// ## Deadlock Prevention
33///
34/// This implementation prevents deadlocks by:
35///
36/// - Never allowing callbacks to execute while holding the cache lock
37/// - Using a clone-modify-update pattern for all callbacks that need to modify values
38/// - Ensuring lock acquisition is always done in a consistent order
39/// - Providing explicit transaction methods that make locking transparent to the user
40///
41/// ## Consistency Guarantees
42///
43/// - Operations on individual keys are atomic and isolated
44/// - Snapshot-based operations (e.g., iteration, bulk operations) may not reflect
45///   concurrent modifications
46/// - When using callback functions, be aware they execute outside the lock which means
47///   the cache state may change between lock acquisitions
48///
49/// # Examples
50///
51/// ```
52/// # use sieve_cache::SyncSieveCache;
53/// let cache = SyncSieveCache::new(100).unwrap();
54///
55/// // Multiple threads can safely access the cache
56/// cache.insert("key1".to_string(), "value1".to_string());
57/// assert_eq!(cache.get(&"key1".to_string()), Some("value1".to_string()));
58/// ```
59///
60/// Example with callbacks:
61///
62/// ```
63/// # use sieve_cache::SyncSieveCache;
64/// # use std::thread;
65/// let cache = SyncSieveCache::new(100).unwrap();
66/// cache.insert("key1".to_string(), 1);
67/// cache.insert("key2".to_string(), 2);
68///
69/// // Create a clone to move into another thread
70/// let cache_clone = cache.clone();
71///
72/// // Spawn a thread that modifies values
73/// let handle = thread::spawn(move || {
74///     // The cache is safely accessible from multiple threads
75///     cache_clone.for_each_value(|value| {
76///         *value += 10;  // Add 10 to each value
77///     });
78/// });
79///
80/// // Wait for the background thread to complete
81/// handle.join().unwrap();
82///
83/// // Values have been updated
84/// assert_eq!(cache.get(&"key1".to_string()), Some(11));
85/// assert_eq!(cache.get(&"key2".to_string()), Some(12));
86/// ```
87#[derive(Clone)]
88pub struct SyncSieveCache<K, V>
89where
90    K: Eq + Hash + Clone + Send + Sync,
91    V: Send + Sync,
92{
93    inner: Arc<Mutex<SieveCache<K, V>>>,
94}
95
96impl<K, V> Default for SyncSieveCache<K, V>
97where
98    K: Eq + Hash + Clone + Send + Sync,
99    V: Send + Sync,
100{
101    /// Creates a new cache with a default capacity of 100 entries.
102    ///
103    /// # Panics
104    ///
105    /// Panics if the underlying `SieveCache::new()` returns an error, which should never
106    /// happen for a non-zero capacity.
107    ///
108    /// # Examples
109    ///
110    /// ```
111    /// # use sieve_cache::SyncSieveCache;
112    /// # use std::default::Default;
113    /// let cache: SyncSieveCache<String, u32> = Default::default();
114    /// assert_eq!(cache.capacity(), 100);
115    /// ```
116    fn default() -> Self {
117        Self::new(100).expect("Failed to create cache with default capacity")
118    }
119}
120
121impl<K, V> fmt::Debug for SyncSieveCache<K, V>
122where
123    K: Eq + Hash + Clone + Send + Sync + fmt::Debug,
124    V: Send + Sync + fmt::Debug,
125{
126    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
127        let guard = self.locked_cache();
128        f.debug_struct("SyncSieveCache")
129            .field("capacity", &guard.capacity())
130            .field("len", &guard.len())
131            .finish()
132    }
133}
134
135impl<K, V> From<SieveCache<K, V>> for SyncSieveCache<K, V>
136where
137    K: Eq + Hash + Clone + Send + Sync,
138    V: Send + Sync,
139{
140    /// Creates a new thread-safe cache from an existing `SieveCache`.
141    ///
142    /// This allows for easily converting a single-threaded cache to a thread-safe version.
143    ///
144    /// # Examples
145    ///
146    /// ```
147    /// # use sieve_cache::{SieveCache, SyncSieveCache};
148    /// let mut single_threaded = SieveCache::new(100).unwrap();
149    /// single_threaded.insert("key".to_string(), "value".to_string());
150    ///
151    /// // Convert to thread-safe version
152    /// let thread_safe = SyncSieveCache::from(single_threaded);
153    /// assert_eq!(thread_safe.get(&"key".to_string()), Some("value".to_string()));
154    /// ```
155    fn from(cache: SieveCache<K, V>) -> Self {
156        Self {
157            inner: Arc::new(Mutex::new(cache)),
158        }
159    }
160}
161
162impl<K, V> IntoIterator for SyncSieveCache<K, V>
163where
164    K: Eq + Hash + Clone + Send + Sync,
165    V: Clone + Send + Sync,
166{
167    type Item = (K, V);
168    type IntoIter = std::vec::IntoIter<(K, V)>;
169
170    /// Converts the cache into an iterator over its key-value pairs.
171    ///
172    /// This collects all entries into a Vec and returns an iterator over that Vec.
173    ///
174    /// # Examples
175    ///
176    /// ```
177    /// # use sieve_cache::SyncSieveCache;
178    /// # use std::collections::HashMap;
179    /// let cache = SyncSieveCache::new(100).unwrap();
180    /// cache.insert("key1".to_string(), "value1".to_string());
181    /// cache.insert("key2".to_string(), "value2".to_string());
182    ///
183    /// // Collect into a HashMap
184    /// let map: HashMap<_, _> = cache.into_iter().collect();
185    /// assert_eq!(map.len(), 2);
186    /// assert_eq!(map.get("key1"), Some(&"value1".to_string()));
187    /// ```
188    fn into_iter(self) -> Self::IntoIter {
189        self.entries().into_iter()
190    }
191}
192
193impl<K, V> SyncSieveCache<K, V>
194where
195    K: Eq + Hash + Clone + Send + Sync,
196    V: Send + Sync,
197{
198    /// Creates a new thread-safe cache with the given capacity.
199    ///
200    /// # Errors
201    ///
202    /// Returns an error if the capacity is zero.
203    ///
204    /// # Examples
205    ///
206    /// ```
207    /// # use sieve_cache::SyncSieveCache;
208    /// let cache = SyncSieveCache::<String, String>::new(100).unwrap();
209    /// ```
210    pub fn new(capacity: usize) -> Result<Self, &'static str> {
211        let cache = SieveCache::new(capacity)?;
212        Ok(Self {
213            inner: Arc::new(Mutex::new(cache)),
214        })
215    }
216
217    /// Returns a locked reference to the underlying cache.
218    ///
219    /// This is an internal helper method to abstract away the lock handling.
220    /// If the mutex is poisoned due to a panic in another thread, the poison
221    /// error is recovered from by calling `into_inner()` to access the underlying data.
222    #[inline]
223    fn locked_cache(&self) -> MutexGuard<'_, SieveCache<K, V>> {
224        self.inner.lock().unwrap_or_else(PoisonError::into_inner)
225    }
226
227    /// Returns the capacity of the cache.
228    ///
229    /// # Examples
230    ///
231    /// ```
232    /// # use sieve_cache::SyncSieveCache;
233    /// let cache = SyncSieveCache::<String, u32>::new(100).unwrap();
234    /// assert_eq!(cache.capacity(), 100);
235    /// ```
236    pub fn capacity(&self) -> usize {
237        self.locked_cache().capacity()
238    }
239
240    /// Returns the number of cached values.
241    ///
242    /// # Examples
243    ///
244    /// ```
245    /// # use sieve_cache::SyncSieveCache;
246    /// let cache = SyncSieveCache::new(100).unwrap();
247    /// cache.insert("key".to_string(), "value".to_string());
248    /// assert_eq!(cache.len(), 1);
249    /// ```
250    pub fn len(&self) -> usize {
251        self.locked_cache().len()
252    }
253
254    /// Returns `true` when no values are currently cached.
255    ///
256    /// # Examples
257    ///
258    /// ```
259    /// # use sieve_cache::SyncSieveCache;
260    /// let cache = SyncSieveCache::<String, String>::new(100).unwrap();
261    /// assert!(cache.is_empty());
262    ///
263    /// cache.insert("key".to_string(), "value".to_string());
264    /// assert!(!cache.is_empty());
265    /// ```
266    pub fn is_empty(&self) -> bool {
267        self.locked_cache().is_empty()
268    }
269
270    /// Returns `true` if there is a value in the cache mapped to by `key`.
271    ///
272    /// # Examples
273    ///
274    /// ```
275    /// # use sieve_cache::SyncSieveCache;
276    /// let cache = SyncSieveCache::new(100).unwrap();
277    /// cache.insert("key".to_string(), "value".to_string());
278    ///
279    /// assert!(cache.contains_key(&"key".to_string()));
280    /// assert!(!cache.contains_key(&"missing".to_string()));
281    /// ```
282    pub fn contains_key<Q>(&self, key: &Q) -> bool
283    where
284        Q: Hash + Eq + ?Sized,
285        K: Borrow<Q>,
286    {
287        let guard = self.locked_cache();
288        guard.contains_key(key)
289    }
290
291    /// Gets a clone of the value in the cache mapped to by `key`.
292    ///
293    /// If no value exists for `key`, this returns `None`.
294    ///
295    /// # Note
296    ///
297    /// Unlike the unwrapped `SieveCache`, this returns a clone of the value
298    /// rather than a reference, since the mutex guard would be dropped after
299    /// this method returns. This means that `V` must implement `Clone`.
300    ///
301    /// # Examples
302    ///
303    /// ```
304    /// # use sieve_cache::SyncSieveCache;
305    /// let cache = SyncSieveCache::new(100).unwrap();
306    /// cache.insert("key".to_string(), "value".to_string());
307    ///
308    /// assert_eq!(cache.get(&"key".to_string()), Some("value".to_string()));
309    /// assert_eq!(cache.get(&"missing".to_string()), None);
310    /// ```
311    pub fn get<Q>(&self, key: &Q) -> Option<V>
312    where
313        Q: Hash + Eq + ?Sized,
314        K: Borrow<Q>,
315        V: Clone,
316    {
317        let mut guard = self.locked_cache();
318        guard.get(key).cloned()
319    }
320
321    /// Gets a mutable reference to the value in the cache mapped to by `key` via a callback function.
322    ///
323    /// If no value exists for `key`, the callback will not be invoked and this returns `false`.
324    /// Otherwise, the callback is invoked with a mutable reference to the value and this returns `true`.
325    ///
326    /// This operation marks the entry as "visited" in the SIEVE algorithm,
327    /// which affects eviction decisions.
328    ///
329    /// # Thread Safety
330    ///
331    /// This method operates safely with recursive calls by:
332    ///
333    /// 1. Cloning the value with a short-lived lock
334    /// 2. Releasing the lock during callback execution
335    /// 3. Re-acquiring the lock to update the original value
336    ///
337    /// This approach means:
338    ///
339    /// - The callback can safely make other calls to the same cache instance
340    /// - The value can be modified by other threads during the callback execution
341    /// - Changes are not visible to other threads until the callback completes
342    /// - Last writer wins if multiple threads modify the same key concurrently
343    ///
344    /// # Examples
345    ///
346    /// ```
347    /// # use sieve_cache::SyncSieveCache;
348    /// let cache = SyncSieveCache::new(100).unwrap();
349    /// cache.insert("key".to_string(), "value".to_string());
350    ///
351    /// // Modify the value in-place
352    /// cache.get_mut(&"key".to_string(), |value| {
353    ///     *value = "new_value".to_string();
354    /// });
355    ///
356    /// assert_eq!(cache.get(&"key".to_string()), Some("new_value".to_string()));
357    /// ```
358    pub fn get_mut<Q, F>(&self, key: &Q, f: F) -> bool
359    where
360        Q: Hash + Eq + ?Sized,
361        K: Borrow<Q>,
362        F: FnOnce(&mut V),
363        V: Clone,
364    {
365        // Get a clone of the value if it exists, to avoid deadlocks
366        // if the callback tries to use other methods on this cache
367        let value_opt = {
368            let mut guard = self.locked_cache();
369            // Clone the value before releasing the lock
370            guard.get_mut(key).map(|v| v.clone())
371        };
372
373        if let Some(mut value) = value_opt {
374            // Execute the callback on the cloned value without holding the lock
375            f(&mut value);
376
377            // Update the value back to the cache
378            let mut guard = self.locked_cache();
379            if let Some(original) = guard.get_mut(key) {
380                *original = value;
381                true
382            } else {
383                // Key was removed while callback was executing
384                false
385            }
386        } else {
387            false
388        }
389    }
390
391    /// Maps `key` to `value` in the cache, possibly evicting old entries.
392    ///
393    /// This method returns `true` when this is a new entry, and `false` if an existing entry was
394    /// updated.
395    ///
396    /// # Examples
397    ///
398    /// ```
399    /// # use sieve_cache::SyncSieveCache;
400    /// let cache = SyncSieveCache::new(100).unwrap();
401    ///
402    /// // Insert a new key
403    /// assert!(cache.insert("key1".to_string(), "value1".to_string()));
404    ///
405    /// // Update an existing key
406    /// assert!(!cache.insert("key1".to_string(), "updated_value".to_string()));
407    /// ```
408    pub fn insert(&self, key: K, value: V) -> bool {
409        let mut guard = self.locked_cache();
410        guard.insert(key, value)
411    }
412
413    /// Removes the cache entry mapped to by `key`.
414    ///
415    /// This method returns the value removed from the cache. If `key` did not map to any value,
416    /// then this returns `None`.
417    ///
418    /// # Examples
419    ///
420    /// ```
421    /// # use sieve_cache::SyncSieveCache;
422    /// let cache = SyncSieveCache::new(100).unwrap();
423    /// cache.insert("key".to_string(), "value".to_string());
424    ///
425    /// // Remove an existing key
426    /// assert_eq!(cache.remove(&"key".to_string()), Some("value".to_string()));
427    ///
428    /// // Attempt to remove a missing key
429    /// assert_eq!(cache.remove(&"key".to_string()), None);
430    /// ```
431    pub fn remove<Q>(&self, key: &Q) -> Option<V>
432    where
433        K: Borrow<Q>,
434        Q: Eq + Hash + ?Sized,
435    {
436        let mut guard = self.locked_cache();
437        guard.remove(key)
438    }
439
440    /// Removes and returns a value from the cache that was not recently accessed.
441    ///
442    /// This implements the SIEVE eviction algorithm to select an entry for removal.
443    /// If no suitable value exists, this returns `None`.
444    ///
445    /// # Examples
446    ///
447    /// ```
448    /// # use sieve_cache::SyncSieveCache;
449    /// let cache = SyncSieveCache::new(2).unwrap();
450    /// cache.insert("key1".to_string(), "value1".to_string());
451    /// cache.insert("key2".to_string(), "value2".to_string());
452    ///
453    /// // Accessing key1 marks it as recently used
454    /// assert_eq!(cache.get(&"key1".to_string()), Some("value1".to_string()));
455    ///
456    /// // Insert a new key, which should evict key2
457    /// cache.insert("key3".to_string(), "value3".to_string());
458    ///
459    /// // key2 should have been evicted
460    /// assert_eq!(cache.get(&"key2".to_string()), None);
461    /// ```
462    pub fn evict(&self) -> Option<V> {
463        let mut guard = self.locked_cache();
464        guard.evict()
465    }
466
467    /// Removes all entries from the cache.
468    ///
469    /// This operation clears all stored values and resets the cache to an empty state,
470    /// while maintaining the original capacity.
471    ///
472    /// # Examples
473    ///
474    /// ```
475    /// # use sieve_cache::SyncSieveCache;
476    /// let cache = SyncSieveCache::new(100).unwrap();
477    /// cache.insert("key1".to_string(), "value1".to_string());
478    /// cache.insert("key2".to_string(), "value2".to_string());
479    ///
480    /// assert_eq!(cache.len(), 2);
481    ///
482    /// cache.clear();
483    /// assert_eq!(cache.len(), 0);
484    /// assert!(cache.is_empty());
485    /// ```
486    pub fn clear(&self) {
487        let mut guard = self.locked_cache();
488        guard.clear();
489    }
490
491    /// Returns an iterator over all keys in the cache.
492    ///
493    /// The order of keys is not specified and should not be relied upon.
494    ///
495    /// # Examples
496    ///
497    /// ```
498    /// # use sieve_cache::SyncSieveCache;
499    /// # use std::collections::HashSet;
500    /// let cache = SyncSieveCache::new(100).unwrap();
501    /// cache.insert("key1".to_string(), "value1".to_string());
502    /// cache.insert("key2".to_string(), "value2".to_string());
503    ///
504    /// let keys: HashSet<_> = cache.keys().into_iter().collect();
505    /// assert_eq!(keys.len(), 2);
506    /// assert!(keys.contains(&"key1".to_string()));
507    /// assert!(keys.contains(&"key2".to_string()));
508    /// ```
509    pub fn keys(&self) -> Vec<K> {
510        let guard = self.locked_cache();
511        guard.keys().cloned().collect()
512    }
513
514    /// Returns all values in the cache.
515    ///
516    /// The order of values is not specified and should not be relied upon.
517    ///
518    /// # Examples
519    ///
520    /// ```
521    /// # use sieve_cache::SyncSieveCache;
522    /// # use std::collections::HashSet;
523    /// let cache = SyncSieveCache::new(100).unwrap();
524    /// cache.insert("key1".to_string(), "value1".to_string());
525    /// cache.insert("key2".to_string(), "value2".to_string());
526    ///
527    /// let values: HashSet<_> = cache.values().into_iter().collect();
528    /// assert_eq!(values.len(), 2);
529    /// assert!(values.contains(&"value1".to_string()));
530    /// assert!(values.contains(&"value2".to_string()));
531    /// ```
532    pub fn values(&self) -> Vec<V>
533    where
534        V: Clone,
535    {
536        let guard = self.locked_cache();
537        guard.values().cloned().collect()
538    }
539
540    /// Returns all key-value pairs in the cache.
541    ///
542    /// The order of pairs is not specified and should not be relied upon.
543    ///
544    /// # Examples
545    ///
546    /// ```
547    /// # use sieve_cache::SyncSieveCache;
548    /// # use std::collections::HashMap;
549    /// let cache = SyncSieveCache::new(100).unwrap();
550    /// cache.insert("key1".to_string(), "value1".to_string());
551    /// cache.insert("key2".to_string(), "value2".to_string());
552    ///
553    /// let entries: HashMap<_, _> = cache.entries().into_iter().collect();
554    /// assert_eq!(entries.len(), 2);
555    /// assert_eq!(entries.get(&"key1".to_string()), Some(&"value1".to_string()));
556    /// assert_eq!(entries.get(&"key2".to_string()), Some(&"value2".to_string()));
557    /// ```
558    pub fn entries(&self) -> Vec<(K, V)>
559    where
560        V: Clone,
561    {
562        let guard = self.locked_cache();
563        guard.iter().map(|(k, v)| (k.clone(), v.clone())).collect()
564    }
565
566    /// Applies a function to all values in the cache.
567    ///
568    /// This method safely processes values by collecting them with the lock held,
569    /// then releasing the lock before applying the function to each value individually.
570    /// If the function modifies the values, the changes are saved back to the cache.
571    ///
572    /// # Thread Safety
573    ///
574    /// This method operates in three phases:
575    /// 1. It acquires the lock and creates a complete snapshot of the cache
576    /// 2. It releases the lock and applies the callback to each value
577    /// 3. It acquires the lock again individually for each value when writing changes back
578    ///
579    /// This approach means:
580    /// - The lock is not held during callback execution, preventing lock contention
581    /// - If other threads modify the cache between steps 1 and 3, those changes might be overwritten
582    /// - The callback sees a point-in-time snapshot that might not reflect the latest state
583    /// - For long-running operations, consider using individual key operations instead
584    ///
585    /// # Examples
586    ///
587    /// ```
588    /// # use sieve_cache::SyncSieveCache;
589    /// let cache = SyncSieveCache::new(100).unwrap();
590    /// cache.insert("key1".to_string(), "value1".to_string());
591    /// cache.insert("key2".to_string(), "value2".to_string());
592    ///
593    /// // Update all values by appending text
594    /// cache.for_each_value(|value| {
595    ///     *value = format!("{}_updated", value);
596    /// });
597    ///
598    /// assert_eq!(cache.get(&"key1".to_string()), Some("value1_updated".to_string()));
599    /// assert_eq!(cache.get(&"key2".to_string()), Some("value2_updated".to_string()));
600    /// ```
601    pub fn for_each_value<F>(&self, mut f: F)
602    where
603        F: FnMut(&mut V),
604        V: Clone,
605    {
606        // First, safely collect all keys and values while holding the lock
607        let entries = self.with_lock(|inner_cache| {
608            inner_cache
609                .iter()
610                .map(|(k, v)| (k.clone(), v.clone()))
611                .collect::<Vec<(K, V)>>()
612        });
613
614        // Process each value outside the lock
615        let mut updated_entries = Vec::new();
616        for (key, mut value) in entries {
617            f(&mut value);
618            updated_entries.push((key, value));
619        }
620
621        // Update any changed values back to the cache
622        for (key, value) in updated_entries {
623            self.insert(key, value);
624        }
625    }
626
627    /// Applies a function to all key-value pairs in the cache.
628    ///
629    /// This method safely processes key-value pairs by collecting them with the lock held,
630    /// then releasing the lock before applying the function to each pair individually.
631    /// If the function modifies the values, the changes are saved back to the cache.
632    ///
633    /// # Thread Safety
634    ///
635    /// This method operates in three phases:
636    /// 1. It acquires the lock and creates a complete snapshot of the cache
637    /// 2. It releases the lock and applies the callback to each key-value pair
638    /// 3. It acquires the lock again individually for each entry when writing changes back
639    ///
640    /// This approach means:
641    /// - The lock is not held during callback execution, preventing lock contention
642    /// - If other threads modify the cache between steps 1 and 3, those changes might be overwritten
643    /// - The callback sees a point-in-time snapshot that might not reflect the latest state
644    /// - For long-running operations, consider using individual key operations instead
645    ///
646    /// # Examples
647    ///
648    /// ```
649    /// # use sieve_cache::SyncSieveCache;
650    /// let cache = SyncSieveCache::new(100).unwrap();
651    /// cache.insert("key1".to_string(), "value1".to_string());
652    /// cache.insert("key2".to_string(), "value2".to_string());
653    ///
654    /// // Update all values associated with keys containing '1'
655    /// cache.for_each_entry(|(key, value)| {
656    ///     if key.contains('1') {
657    ///         *value = format!("{}_special", value);
658    ///     }
659    /// });
660    ///
661    /// assert_eq!(cache.get(&"key1".to_string()), Some("value1_special".to_string()));
662    /// assert_eq!(cache.get(&"key2".to_string()), Some("value2".to_string()));
663    /// ```
664    pub fn for_each_entry<F>(&self, mut f: F)
665    where
666        F: FnMut((&K, &mut V)),
667        V: Clone,
668    {
669        // First, safely collect all keys and values while holding the lock
670        let entries = self.with_lock(|inner_cache| {
671            inner_cache
672                .iter()
673                .map(|(k, v)| (k.clone(), v.clone()))
674                .collect::<Vec<(K, V)>>()
675        });
676
677        // Process each entry outside the lock
678        let mut updated_entries = Vec::new();
679        for (key, mut value) in entries {
680            let key_ref = &key;
681            f((key_ref, &mut value));
682            updated_entries.push((key, value));
683        }
684
685        // Update any changed values back to the cache
686        for (key, value) in updated_entries {
687            self.insert(key, value);
688        }
689    }
690
691    /// Gets exclusive access to the underlying cache to perform multiple operations atomically.
692    ///
693    /// This is useful when you need to perform a series of operations that depend on each other
694    /// and you want to ensure that no other thread can access the cache between operations.
695    ///
696    /// # Thread Safety
697    ///
698    /// This method provides the strongest thread safety guarantees by:
699    ///
700    /// - Acquiring the lock once for the entire duration of the callback
701    /// - Allowing multiple operations to be performed atomically within a single lock scope
702    /// - Ensuring all operations within the callback are fully isolated from other threads
703    ///
704    /// However, this also means:
705    ///
706    /// - The entire cache is locked during the callback execution
707    /// - Other threads will be blocked from accessing the cache until the callback completes
708    /// - Long-running operations within the callback will cause high contention
709    /// - Callbacks must never try to access the same cache instance recursively (deadlock risk)
710    ///
711    /// This method should be used when you need atomic multi-step operations but used
712    /// sparingly in high-concurrency environments.
713    ///
714    /// # Examples
715    ///
716    /// ```
717    /// # use sieve_cache::SyncSieveCache;
718    /// let cache = SyncSieveCache::new(100).unwrap();
719    ///
720    /// cache.with_lock(|inner_cache| {
721    ///     // Perform multiple operations atomically
722    ///     inner_cache.insert("key1".to_string(), "value1".to_string());
723    ///     inner_cache.insert("key2".to_string(), "value2".to_string());
724    ///
725    ///     // We can check internal state mid-transaction
726    ///     assert_eq!(inner_cache.len(), 2);
727    /// });
728    /// ```
729    pub fn with_lock<F, T>(&self, f: F) -> T
730    where
731        F: FnOnce(&mut SieveCache<K, V>) -> T,
732    {
733        let mut guard = self.locked_cache();
734        f(&mut guard)
735    }
736
737    /// Retains only the elements specified by the predicate.
738    ///
739    /// Removes all entries for which the provided function returns `false`.
740    /// The elements are visited in arbitrary, unspecified order.
741    ///
742    /// This method offers two modes of operation:
743    /// - Default mode: evaluates predicates outside the lock, with separate remove operations
744    /// - Batch mode: evaluates predicates outside the lock, then performs removals in a single batch
745    ///
746    /// # Thread Safety
747    ///
748    /// This method operates in three phases:
749    /// 1. It acquires the lock and creates a complete snapshot of the cache
750    /// 2. It releases the lock and applies the predicate to each entry
751    /// 3. It either:
752    ///    - Acquires the lock again individually for each entry to be removed (default), or
753    ///    - Acquires the lock once and removes all entries in a batch (batch mode)
754    ///
755    /// This approach means:
756    /// - The lock is not held during predicate execution, preventing lock contention
757    /// - If other threads modify the cache between steps 1 and 3, the method may:
758    ///   - Remove entries that were modified and would now pass the predicate
759    ///   - Miss removing entries added after the snapshot was taken
760    /// - The predicate sees a point-in-time snapshot that might not reflect the latest state
761    /// - For strict consistency requirements, use `with_lock` instead
762    ///
763    /// # Examples
764    ///
765    /// Basic usage:
766    ///
767    /// ```
768    /// # use sieve_cache::SyncSieveCache;
769    /// let cache = SyncSieveCache::new(100).unwrap();
770    /// cache.insert("key1".to_string(), 100);
771    /// cache.insert("key2".to_string(), 200);
772    /// cache.insert("key3".to_string(), 300);
773    ///
774    /// // Keep only entries with values greater than 150
775    /// cache.retain(|_, value| *value > 150);
776    ///
777    /// assert_eq!(cache.len(), 2);
778    /// assert!(!cache.contains_key(&"key1".to_string()));
779    /// assert!(cache.contains_key(&"key2".to_string()));
780    /// assert!(cache.contains_key(&"key3".to_string()));
781    /// ```
782    ///
783    /// Using batch mode (more efficient):
784    ///
785    /// ```
786    /// # use sieve_cache::SyncSieveCache;
787    /// let cache = SyncSieveCache::new(100).unwrap();
788    /// cache.insert("key1".to_string(), 100);
789    /// cache.insert("key2".to_string(), 200);
790    /// cache.insert("key3".to_string(), 300);
791    ///
792    /// // Keep only entries with values greater than 150 (batch mode)
793    /// cache.retain_batch(|_, value| *value > 150);
794    ///
795    /// assert_eq!(cache.len(), 2);
796    /// assert!(!cache.contains_key(&"key1".to_string()));
797    /// assert!(cache.contains_key(&"key2".to_string()));
798    /// assert!(cache.contains_key(&"key3".to_string()));
799    /// ```
800    pub fn retain<F>(&self, mut f: F)
801    where
802        F: FnMut(&K, &V) -> bool,
803        V: Clone,
804    {
805        // First, safely collect all entries while holding the lock
806        let entries = self.with_lock(|inner_cache| {
807            inner_cache
808                .iter()
809                .map(|(k, v)| (k.clone(), v.clone()))
810                .collect::<Vec<(K, V)>>()
811        });
812
813        // Now check each entry against the predicate without holding the lock
814        for (key, value) in entries {
815            if !f(&key, &value) {
816                // Remove entries that don't match the predicate
817                // This acquires the lock for each removal operation
818                self.remove(&key);
819            }
820        }
821    }
822
823    /// Retains only the elements specified by the predicate, using batch processing.
824    ///
825    /// This is an optimized version of `retain()` that collects all keys to remove first,
826    /// then removes them in a single batch operation with a single lock acquisition.
827    /// This is more efficient when removing many entries, especially in high-contention scenarios.
828    ///
829    /// # Thread Safety
830    ///
831    /// This method has improved performance characteristics:
832    /// - Only acquires the lock twice (once for collection, once for removal)
833    /// - Performs all removals in a single atomic operation
834    /// - Reduces lock contention compared to standard `retain()`
835    ///
836    /// However, it still has the same consistency characteristics as `retain()`:
837    /// - The snapshot might not reflect concurrent modifications
838    /// - There's a window between collecting entries and removing them where
839    ///   other threads might modify the cache
840    ///
841    /// # Examples
842    ///
843    /// ```
844    /// # use sieve_cache::SyncSieveCache;
845    /// let cache = SyncSieveCache::new(100).unwrap();
846    /// cache.insert("key1".to_string(), 100);
847    /// cache.insert("key2".to_string(), 200);
848    /// cache.insert("key3".to_string(), 300);
849    ///
850    /// // Keep only entries with values greater than 150
851    /// cache.retain_batch(|_, value| *value > 150);
852    ///
853    /// assert_eq!(cache.len(), 2);
854    /// assert!(!cache.contains_key(&"key1".to_string()));
855    /// assert!(cache.contains_key(&"key2".to_string()));
856    /// assert!(cache.contains_key(&"key3".to_string()));
857    /// ```
858    pub fn retain_batch<F>(&self, mut f: F)
859    where
860        F: FnMut(&K, &V) -> bool,
861        V: Clone,
862    {
863        // First, safely collect all entries while holding the lock
864        let entries = self.with_lock(|inner_cache| {
865            inner_cache
866                .iter()
867                .map(|(k, v)| (k.clone(), v.clone()))
868                .collect::<Vec<(K, V)>>()
869        });
870
871        // Collect keys to remove without holding the lock
872        let mut keys_to_remove = Vec::new();
873        for (key, value) in entries {
874            if !f(&key, &value) {
875                keys_to_remove.push(key);
876            }
877        }
878
879        // If there are keys to remove, do it in a single batch operation
880        if !keys_to_remove.is_empty() {
881            self.with_lock(|inner_cache| {
882                for key in keys_to_remove {
883                    inner_cache.remove(&key);
884                }
885            });
886        }
887    }
888}
889
890#[cfg(test)]
891mod tests {
892    use super::*;
893    use std::thread;
894
895    #[test]
896    fn test_sync_cache() {
897        let cache = SyncSieveCache::new(100).unwrap();
898
899        // Insert a value
900        assert!(cache.insert("key1".to_string(), "value1".to_string()));
901
902        // Read back the value
903        assert_eq!(cache.get(&"key1".to_string()), Some("value1".to_string()));
904
905        // Check contains_key
906        assert!(cache.contains_key(&"key1".to_string()));
907
908        // Check capacity and length
909        assert_eq!(cache.capacity(), 100);
910        assert_eq!(cache.len(), 1);
911
912        // Remove a value
913        assert_eq!(
914            cache.remove(&"key1".to_string()),
915            Some("value1".to_string())
916        );
917        assert_eq!(cache.len(), 0);
918        assert!(cache.is_empty());
919    }
920
921    #[test]
922    fn test_multithreaded_access() {
923        let cache = SyncSieveCache::new(100).unwrap();
924        let cache_clone = cache.clone();
925
926        // Add some initial data
927        cache.insert("shared".to_string(), "initial".to_string());
928
929        // Spawn a thread that updates the cache
930        let thread = thread::spawn(move || {
931            cache_clone.insert("shared".to_string(), "updated".to_string());
932            cache_clone.insert("thread_only".to_string(), "thread_value".to_string());
933        });
934
935        // Main thread operations
936        cache.insert("main_only".to_string(), "main_value".to_string());
937
938        // Wait for thread to complete
939        thread.join().unwrap();
940
941        // Verify results
942        assert_eq!(
943            cache.get(&"shared".to_string()),
944            Some("updated".to_string())
945        );
946        assert_eq!(
947            cache.get(&"thread_only".to_string()),
948            Some("thread_value".to_string())
949        );
950        assert_eq!(
951            cache.get(&"main_only".to_string()),
952            Some("main_value".to_string())
953        );
954        assert_eq!(cache.len(), 3);
955    }
956
957    #[test]
958    fn test_with_lock() {
959        let cache = SyncSieveCache::new(100).unwrap();
960
961        // Perform multiple operations atomically
962        cache.with_lock(|inner_cache| {
963            inner_cache.insert("key1".to_string(), "value1".to_string());
964            inner_cache.insert("key2".to_string(), "value2".to_string());
965            inner_cache.insert("key3".to_string(), "value3".to_string());
966        });
967
968        assert_eq!(cache.len(), 3);
969    }
970
971    #[test]
972    fn test_get_mut() {
973        let cache = SyncSieveCache::new(100).unwrap();
974        cache.insert("key".to_string(), "value".to_string());
975
976        // Modify the value in-place
977        let modified = cache.get_mut(&"key".to_string(), |value| {
978            *value = "new_value".to_string();
979        });
980        assert!(modified);
981
982        // Verify the value was updated
983        assert_eq!(cache.get(&"key".to_string()), Some("new_value".to_string()));
984
985        // Try to modify a non-existent key
986        let modified = cache.get_mut(&"missing".to_string(), |_| {
987            panic!("This should not be called");
988        });
989        assert!(!modified);
990    }
991
992    #[test]
993    fn test_clear() {
994        let cache = SyncSieveCache::new(10).unwrap();
995        cache.insert("key1".to_string(), "value1".to_string());
996        cache.insert("key2".to_string(), "value2".to_string());
997
998        assert_eq!(cache.len(), 2);
999        assert!(!cache.is_empty());
1000
1001        cache.clear();
1002
1003        assert_eq!(cache.len(), 0);
1004        assert!(cache.is_empty());
1005        assert_eq!(cache.get(&"key1".to_string()), None);
1006        assert_eq!(cache.get(&"key2".to_string()), None);
1007    }
1008
1009    #[test]
1010    fn test_keys_values_entries() {
1011        let cache = SyncSieveCache::new(10).unwrap();
1012        cache.insert("key1".to_string(), "value1".to_string());
1013        cache.insert("key2".to_string(), "value2".to_string());
1014
1015        // Test keys
1016        let keys = cache.keys();
1017        assert_eq!(keys.len(), 2);
1018        assert!(keys.contains(&"key1".to_string()));
1019        assert!(keys.contains(&"key2".to_string()));
1020
1021        // Test values
1022        let values = cache.values();
1023        assert_eq!(values.len(), 2);
1024        assert!(values.contains(&"value1".to_string()));
1025        assert!(values.contains(&"value2".to_string()));
1026
1027        // Test entries
1028        let entries = cache.entries();
1029        assert_eq!(entries.len(), 2);
1030        assert!(entries.contains(&("key1".to_string(), "value1".to_string())));
1031        assert!(entries.contains(&("key2".to_string(), "value2".to_string())));
1032    }
1033
1034    #[test]
1035    fn test_for_each_methods() {
1036        let cache = SyncSieveCache::new(10).unwrap();
1037        cache.insert("key1".to_string(), "value1".to_string());
1038        cache.insert("key2".to_string(), "value2".to_string());
1039
1040        // Test for_each_value
1041        cache.for_each_value(|value| {
1042            *value = format!("{}_updated", value);
1043        });
1044
1045        assert_eq!(
1046            cache.get(&"key1".to_string()),
1047            Some("value1_updated".to_string())
1048        );
1049        assert_eq!(
1050            cache.get(&"key2".to_string()),
1051            Some("value2_updated".to_string())
1052        );
1053
1054        // Test for_each_entry
1055        cache.for_each_entry(|(key, value)| {
1056            if key == "key1" {
1057                *value = format!("{}_special", value);
1058            }
1059        });
1060
1061        assert_eq!(
1062            cache.get(&"key1".to_string()),
1063            Some("value1_updated_special".to_string())
1064        );
1065        assert_eq!(
1066            cache.get(&"key2".to_string()),
1067            Some("value2_updated".to_string())
1068        );
1069    }
1070
1071    #[test]
1072    fn test_retain() {
1073        let cache = SyncSieveCache::new(10).unwrap();
1074
1075        // Add some entries
1076        cache.insert("even1".to_string(), 2);
1077        cache.insert("even2".to_string(), 4);
1078        cache.insert("odd1".to_string(), 1);
1079        cache.insert("odd2".to_string(), 3);
1080
1081        assert_eq!(cache.len(), 4);
1082
1083        // Keep only entries with even values
1084        cache.retain(|_, v| v % 2 == 0);
1085
1086        assert_eq!(cache.len(), 2);
1087        assert!(cache.contains_key(&"even1".to_string()));
1088        assert!(cache.contains_key(&"even2".to_string()));
1089        assert!(!cache.contains_key(&"odd1".to_string()));
1090        assert!(!cache.contains_key(&"odd2".to_string()));
1091
1092        // Keep only entries with keys containing '1'
1093        cache.retain(|k, _| k.contains('1'));
1094
1095        assert_eq!(cache.len(), 1);
1096        assert!(cache.contains_key(&"even1".to_string()));
1097        assert!(!cache.contains_key(&"even2".to_string()));
1098    }
1099
1100    #[test]
1101    fn test_deadlock_prevention() {
1102        let cache = Arc::new(SyncSieveCache::new(100).unwrap());
1103
1104        // Add some initial data
1105        cache.insert("key1".to_string(), 1);
1106        cache.insert("key2".to_string(), 2);
1107
1108        // Clone for use in threads
1109        let cache_clone1 = cache.clone();
1110        let cache_clone2 = cache.clone();
1111
1112        // Thread 1: Recursively accesses the cache within get_mut callback
1113        let thread1 = thread::spawn(move || {
1114            cache_clone1.get_mut(&"key1".to_string(), |value| {
1115                // This would deadlock with an unsafe implementation!
1116                // Attempt to get another value while holding the lock
1117                let value2 = cache_clone1.get(&"key2".to_string());
1118                assert_eq!(value2, Some(2));
1119
1120                // Even modify another value
1121                cache_clone1.insert("key3".to_string(), 3);
1122
1123                *value += 10;
1124            });
1125        });
1126
1127        // Thread 2: Also performs operations that would deadlock with unsafe impl
1128        let thread2 = thread::spawn(move || {
1129            // Sleep to ensure thread1 starts first
1130            thread::sleep(std::time::Duration::from_millis(10));
1131
1132            // These operations would deadlock if thread1 held a lock during its callback
1133            cache_clone2.insert("key4".to_string(), 4);
1134            assert_eq!(cache_clone2.get(&"key2".to_string()), Some(2));
1135        });
1136
1137        // Both threads should complete without deadlock
1138        thread1.join().unwrap();
1139        thread2.join().unwrap();
1140
1141        // Verify final state
1142        assert_eq!(cache.get(&"key1".to_string()), Some(11)); // 1 + 10
1143        assert_eq!(cache.get(&"key3".to_string()), Some(3));
1144        assert_eq!(cache.get(&"key4".to_string()), Some(4));
1145    }
1146}