sieve_cache/
sync.rs

1use crate::SieveCache;
2use std::borrow::Borrow;
3use std::hash::Hash;
4use std::sync::{Arc, Mutex, MutexGuard, PoisonError};
5
6/// A thread-safe wrapper around `SieveCache`.
7///
8/// This provides a thread-safe implementation of the SIEVE cache algorithm by wrapping
9/// the standard `SieveCache` in an `Arc<Mutex<>>`. It offers the same functionality as
10/// the underlying cache but with thread safety guarantees.
11///
12/// # Thread Safety
13///
14/// All operations acquire a lock on the entire cache, which provides strong consistency
15/// but may become a bottleneck under high contention. For workloads with high concurrency,
16/// consider using [`ShardedSieveCache`](crate::ShardedSieveCache) instead, which partitions
17/// the cache into multiple independently-locked segments.
18///
19/// # Examples
20///
21/// ```
22/// # use sieve_cache::SyncSieveCache;
23/// let cache = SyncSieveCache::new(100).unwrap();
24///
25/// // Multiple threads can safely access the cache
26/// cache.insert("key1".to_string(), "value1".to_string());
27/// assert_eq!(cache.get(&"key1".to_string()), Some("value1".to_string()));
28/// ```
29#[derive(Clone)]
30pub struct SyncSieveCache<K, V>
31where
32    K: Eq + Hash + Clone + Send + Sync,
33    V: Send + Sync,
34{
35    inner: Arc<Mutex<SieveCache<K, V>>>,
36}
37
38impl<K, V> SyncSieveCache<K, V>
39where
40    K: Eq + Hash + Clone + Send + Sync,
41    V: Send + Sync,
42{
43    /// Creates a new thread-safe cache with the given capacity.
44    ///
45    /// # Errors
46    ///
47    /// Returns an error if the capacity is zero.
48    ///
49    /// # Examples
50    ///
51    /// ```
52    /// # use sieve_cache::SyncSieveCache;
53    /// let cache = SyncSieveCache::<String, String>::new(100).unwrap();
54    /// ```
55    pub fn new(capacity: usize) -> Result<Self, &'static str> {
56        let cache = SieveCache::new(capacity)?;
57        Ok(Self {
58            inner: Arc::new(Mutex::new(cache)),
59        })
60    }
61
62    /// Returns a locked reference to the underlying cache.
63    ///
64    /// This is an internal helper method to abstract away the lock handling.
65    /// If the mutex is poisoned due to a panic in another thread, the poison
66    /// error is recovered from by calling `into_inner()` to access the underlying data.
67    #[inline]
68    fn locked_cache(&self) -> MutexGuard<'_, SieveCache<K, V>> {
69        self.inner.lock().unwrap_or_else(PoisonError::into_inner)
70    }
71
72    /// Returns the capacity of the cache.
73    ///
74    /// # Examples
75    ///
76    /// ```
77    /// # use sieve_cache::SyncSieveCache;
78    /// let cache = SyncSieveCache::<String, u32>::new(100).unwrap();
79    /// assert_eq!(cache.capacity(), 100);
80    /// ```
81    pub fn capacity(&self) -> usize {
82        self.locked_cache().capacity()
83    }
84
85    /// Returns the number of cached values.
86    ///
87    /// # Examples
88    ///
89    /// ```
90    /// # use sieve_cache::SyncSieveCache;
91    /// let cache = SyncSieveCache::new(100).unwrap();
92    /// cache.insert("key".to_string(), "value".to_string());
93    /// assert_eq!(cache.len(), 1);
94    /// ```
95    pub fn len(&self) -> usize {
96        self.locked_cache().len()
97    }
98
99    /// Returns `true` when no values are currently cached.
100    ///
101    /// # Examples
102    ///
103    /// ```
104    /// # use sieve_cache::SyncSieveCache;
105    /// let cache = SyncSieveCache::<String, String>::new(100).unwrap();
106    /// assert!(cache.is_empty());
107    ///
108    /// cache.insert("key".to_string(), "value".to_string());
109    /// assert!(!cache.is_empty());
110    /// ```
111    pub fn is_empty(&self) -> bool {
112        self.locked_cache().is_empty()
113    }
114
115    /// Returns `true` if there is a value in the cache mapped to by `key`.
116    ///
117    /// # Examples
118    ///
119    /// ```
120    /// # use sieve_cache::SyncSieveCache;
121    /// let cache = SyncSieveCache::new(100).unwrap();
122    /// cache.insert("key".to_string(), "value".to_string());
123    ///
124    /// assert!(cache.contains_key(&"key".to_string()));
125    /// assert!(!cache.contains_key(&"missing".to_string()));
126    /// ```
127    pub fn contains_key<Q>(&self, key: &Q) -> bool
128    where
129        Q: Hash + Eq + ?Sized,
130        K: Borrow<Q>,
131    {
132        let mut guard = self.locked_cache();
133        guard.contains_key(key)
134    }
135
136    /// Gets a clone of the value in the cache mapped to by `key`.
137    ///
138    /// If no value exists for `key`, this returns `None`.
139    ///
140    /// # Note
141    ///
142    /// Unlike the unwrapped `SieveCache`, this returns a clone of the value
143    /// rather than a reference, since the mutex guard would be dropped after
144    /// this method returns. This means that `V` must implement `Clone`.
145    ///
146    /// # Examples
147    ///
148    /// ```
149    /// # use sieve_cache::SyncSieveCache;
150    /// let cache = SyncSieveCache::new(100).unwrap();
151    /// cache.insert("key".to_string(), "value".to_string());
152    ///
153    /// assert_eq!(cache.get(&"key".to_string()), Some("value".to_string()));
154    /// assert_eq!(cache.get(&"missing".to_string()), None);
155    /// ```
156    pub fn get<Q>(&self, key: &Q) -> Option<V>
157    where
158        Q: Hash + Eq + ?Sized,
159        K: Borrow<Q>,
160        V: Clone,
161    {
162        let mut guard = self.locked_cache();
163        guard.get(key).cloned()
164    }
165
166    /// Maps `key` to `value` in the cache, possibly evicting old entries.
167    ///
168    /// This method returns `true` when this is a new entry, and `false` if an existing entry was
169    /// updated.
170    ///
171    /// # Examples
172    ///
173    /// ```
174    /// # use sieve_cache::SyncSieveCache;
175    /// let cache = SyncSieveCache::new(100).unwrap();
176    ///
177    /// // Insert a new key
178    /// assert!(cache.insert("key1".to_string(), "value1".to_string()));
179    ///
180    /// // Update an existing key
181    /// assert!(!cache.insert("key1".to_string(), "updated_value".to_string()));
182    /// ```
183    pub fn insert(&self, key: K, value: V) -> bool {
184        let mut guard = self.locked_cache();
185        guard.insert(key, value)
186    }
187
188    /// Removes the cache entry mapped to by `key`.
189    ///
190    /// This method returns the value removed from the cache. If `key` did not map to any value,
191    /// then this returns `None`.
192    ///
193    /// # Examples
194    ///
195    /// ```
196    /// # use sieve_cache::SyncSieveCache;
197    /// let cache = SyncSieveCache::new(100).unwrap();
198    /// cache.insert("key".to_string(), "value".to_string());
199    ///
200    /// // Remove an existing key
201    /// assert_eq!(cache.remove(&"key".to_string()), Some("value".to_string()));
202    ///
203    /// // Attempt to remove a missing key
204    /// assert_eq!(cache.remove(&"key".to_string()), None);
205    /// ```
206    pub fn remove<Q>(&self, key: &Q) -> Option<V>
207    where
208        K: Borrow<Q>,
209        Q: Eq + Hash + ?Sized,
210    {
211        let mut guard = self.locked_cache();
212        guard.remove(key)
213    }
214
215    /// Removes and returns a value from the cache that was not recently accessed.
216    ///
217    /// This implements the SIEVE eviction algorithm to select an entry for removal.
218    /// If no suitable value exists, this returns `None`.
219    ///
220    /// # Examples
221    ///
222    /// ```
223    /// # use sieve_cache::SyncSieveCache;
224    /// let cache = SyncSieveCache::new(2).unwrap();
225    /// cache.insert("key1".to_string(), "value1".to_string());
226    /// cache.insert("key2".to_string(), "value2".to_string());
227    ///
228    /// // Accessing key1 marks it as recently used
229    /// assert_eq!(cache.get(&"key1".to_string()), Some("value1".to_string()));
230    ///
231    /// // Insert a new key, which should evict key2
232    /// cache.insert("key3".to_string(), "value3".to_string());
233    ///
234    /// // key2 should have been evicted
235    /// assert_eq!(cache.get(&"key2".to_string()), None);
236    /// ```
237    pub fn evict(&self) -> Option<V> {
238        let mut guard = self.locked_cache();
239        guard.evict()
240    }
241
242    /// Gets exclusive access to the underlying cache to perform multiple operations atomically.
243    ///
244    /// This is useful when you need to perform a series of operations that depend on each other
245    /// and you want to ensure that no other thread can access the cache between operations.
246    ///
247    /// # Examples
248    ///
249    /// ```
250    /// # use sieve_cache::SyncSieveCache;
251    /// let cache = SyncSieveCache::new(100).unwrap();
252    ///
253    /// cache.with_lock(|inner_cache| {
254    ///     // Perform multiple operations atomically
255    ///     inner_cache.insert("key1".to_string(), "value1".to_string());
256    ///     inner_cache.insert("key2".to_string(), "value2".to_string());
257    ///
258    ///     // We can check internal state mid-transaction
259    ///     assert_eq!(inner_cache.len(), 2);
260    /// });
261    /// ```
262    pub fn with_lock<F, T>(&self, f: F) -> T
263    where
264        F: FnOnce(&mut SieveCache<K, V>) -> T,
265    {
266        let mut guard = self.locked_cache();
267        f(&mut guard)
268    }
269}
270
271#[cfg(test)]
272mod tests {
273    use super::*;
274    use std::thread;
275
276    #[test]
277    fn test_sync_cache() {
278        let cache = SyncSieveCache::new(100).unwrap();
279
280        // Insert a value
281        assert!(cache.insert("key1".to_string(), "value1".to_string()));
282
283        // Read back the value
284        assert_eq!(cache.get(&"key1".to_string()), Some("value1".to_string()));
285
286        // Check contains_key
287        assert!(cache.contains_key(&"key1".to_string()));
288
289        // Check capacity and length
290        assert_eq!(cache.capacity(), 100);
291        assert_eq!(cache.len(), 1);
292
293        // Remove a value
294        assert_eq!(
295            cache.remove(&"key1".to_string()),
296            Some("value1".to_string())
297        );
298        assert_eq!(cache.len(), 0);
299        assert!(cache.is_empty());
300    }
301
302    #[test]
303    fn test_multithreaded_access() {
304        let cache = SyncSieveCache::new(100).unwrap();
305        let cache_clone = cache.clone();
306
307        // Add some initial data
308        cache.insert("shared".to_string(), "initial".to_string());
309
310        // Spawn a thread that updates the cache
311        let thread = thread::spawn(move || {
312            cache_clone.insert("shared".to_string(), "updated".to_string());
313            cache_clone.insert("thread_only".to_string(), "thread_value".to_string());
314        });
315
316        // Main thread operations
317        cache.insert("main_only".to_string(), "main_value".to_string());
318
319        // Wait for thread to complete
320        thread.join().unwrap();
321
322        // Verify results
323        assert_eq!(
324            cache.get(&"shared".to_string()),
325            Some("updated".to_string())
326        );
327        assert_eq!(
328            cache.get(&"thread_only".to_string()),
329            Some("thread_value".to_string())
330        );
331        assert_eq!(
332            cache.get(&"main_only".to_string()),
333            Some("main_value".to_string())
334        );
335        assert_eq!(cache.len(), 3);
336    }
337
338    #[test]
339    fn test_with_lock() {
340        let cache = SyncSieveCache::new(100).unwrap();
341
342        // Perform multiple operations atomically
343        cache.with_lock(|inner_cache| {
344            inner_cache.insert("key1".to_string(), "value1".to_string());
345            inner_cache.insert("key2".to_string(), "value2".to_string());
346            inner_cache.insert("key3".to_string(), "value3".to_string());
347        });
348
349        assert_eq!(cache.len(), 3);
350    }
351}