quick_cache/
unsync.rs

1use crate::{
2    linked_slab::Token,
3    options::*,
4    shard::{self, CacheShard, InsertStrategy},
5    DefaultHashBuilder, Equivalent, Lifecycle, MemoryUsed, UnitWeighter, Weighter,
6};
7use std::hash::{BuildHasher, Hash};
8
9/// A non-concurrent cache.
10#[derive(Clone)]
11pub struct Cache<
12    Key,
13    Val,
14    We = UnitWeighter,
15    B = DefaultHashBuilder,
16    L = DefaultLifecycle<Key, Val>,
17> {
18    shard: CacheShard<Key, Val, We, B, L, SharedPlaceholder>,
19}
20
21impl<Key: Eq + Hash, Val> Cache<Key, Val> {
22    /// Creates a new cache with holds up to `items_capacity` items (approximately).
23    pub fn new(items_capacity: usize) -> Self {
24        Self::with(
25            items_capacity,
26            items_capacity as u64,
27            Default::default(),
28            Default::default(),
29            Default::default(),
30        )
31    }
32}
33
34impl<Key: Eq + Hash, Val, We: Weighter<Key, Val>> Cache<Key, Val, We> {
35    pub fn with_weighter(
36        estimated_items_capacity: usize,
37        weight_capacity: u64,
38        weighter: We,
39    ) -> Self {
40        Self::with(
41            estimated_items_capacity,
42            weight_capacity,
43            weighter,
44            Default::default(),
45            Default::default(),
46        )
47    }
48}
49
50impl<Key: Eq + Hash, Val, We: Weighter<Key, Val>, B: BuildHasher, L: Lifecycle<Key, Val>>
51    Cache<Key, Val, We, B, L>
52{
53    /// Creates a new cache that can hold up to `weight_capacity` in weight.
54    /// `estimated_items_capacity` is the estimated number of items the cache is expected to hold,
55    /// roughly equivalent to `weight_capacity / average item weight`.
56    pub fn with(
57        estimated_items_capacity: usize,
58        weight_capacity: u64,
59        weighter: We,
60        hash_builder: B,
61        lifecycle: L,
62    ) -> Self {
63        Self::with_options(
64            OptionsBuilder::new()
65                .estimated_items_capacity(estimated_items_capacity)
66                .weight_capacity(weight_capacity)
67                .build()
68                .unwrap(),
69            weighter,
70            hash_builder,
71            lifecycle,
72        )
73    }
74
75    /// Constructs a cache based on [OptionsBuilder].
76    ///
77    /// # Example
78    ///
79    /// ```rust
80    /// use quick_cache::{unsync::{Cache, DefaultLifecycle}, OptionsBuilder, UnitWeighter, DefaultHashBuilder};
81    ///
82    /// Cache::<(String, u64), String>::with_options(
83    ///   OptionsBuilder::new()
84    ///     .estimated_items_capacity(10000)
85    ///     .weight_capacity(10000)
86    ///     .build()
87    ///     .unwrap(),
88    ///     UnitWeighter,
89    ///     DefaultHashBuilder::default(),
90    ///     DefaultLifecycle::default(),
91    /// );
92    /// ```
93    pub fn with_options(options: Options, weighter: We, hash_builder: B, lifecycle: L) -> Self {
94        let shard = CacheShard::new(
95            options.hot_allocation,
96            options.ghost_allocation,
97            options.estimated_items_capacity,
98            options.weight_capacity,
99            weighter,
100            hash_builder,
101            lifecycle,
102        );
103        Self { shard }
104    }
105
106    /// Returns whether the cache is empty.
107    pub fn is_empty(&self) -> bool {
108        self.shard.len() == 0
109    }
110
111    /// Returns the number of cached items
112    pub fn len(&self) -> usize {
113        self.shard.len()
114    }
115
116    /// Returns the total weight of cached items
117    pub fn weight(&self) -> u64 {
118        self.shard.weight()
119    }
120
121    /// Returns the maximum weight of cached items
122    pub fn capacity(&self) -> u64 {
123        self.shard.capacity()
124    }
125
126    /// Returns the number of misses
127    #[cfg(feature = "stats")]
128    pub fn misses(&self) -> u64 {
129        self.shard.misses()
130    }
131
132    /// Returns the number of hits
133    #[cfg(feature = "stats")]
134    pub fn hits(&self) -> u64 {
135        self.shard.hits()
136    }
137
138    /// Reserver additional space for `additional` entries.
139    /// Note that this is counted in entries, and is not weighted.
140    pub fn reserve(&mut self, additional: usize) {
141        self.shard.reserve(additional);
142    }
143
144    /// Check if a key exist in the cache.
145    pub fn contains_key<Q>(&self, key: &Q) -> bool
146    where
147        Q: Hash + Equivalent<Key> + ?Sized,
148    {
149        self.shard.contains(self.shard.hash(key), key)
150    }
151
152    /// Fetches an item from the cache.
153    pub fn get<Q>(&self, key: &Q) -> Option<&Val>
154    where
155        Q: Hash + Equivalent<Key> + ?Sized,
156    {
157        self.shard.get(self.shard.hash(key), key)
158    }
159
160    /// Fetches an item from the cache.
161    ///
162    /// Note: Leaking the returned RefMut might cause cache weight tracking to be inaccurate.
163    pub fn get_mut<Q>(&mut self, key: &Q) -> Option<RefMut<'_, Key, Val, We, B, L>>
164    where
165        Q: Hash + Equivalent<Key> + ?Sized,
166    {
167        self.shard.get_mut(self.shard.hash(key), key).map(RefMut)
168    }
169
170    /// Peeks an item from the cache. Contrary to gets, peeks don't alter the key "hotness".
171    pub fn peek<Q>(&self, key: &Q) -> Option<&Val>
172    where
173        Q: Hash + Equivalent<Key> + ?Sized,
174    {
175        self.shard.peek(self.shard.hash(key), key)
176    }
177
178    /// Peeks an item from the cache. Contrary to gets, peeks don't alter the key "hotness".
179    ///
180    /// Note: Leaking the returned RefMut might cause cache weight tracking to be inaccurate.
181    pub fn peek_mut<Q>(&mut self, key: &Q) -> Option<RefMut<'_, Key, Val, We, B, L>>
182    where
183        Q: Hash + Equivalent<Key> + ?Sized,
184    {
185        self.shard.peek_mut(self.shard.hash(key), key).map(RefMut)
186    }
187
188    /// Remove an item from the cache whose key is `key`.
189    /// Returns the removed entry, if any.
190    pub fn remove<Q>(&mut self, key: &Q) -> Option<(Key, Val)>
191    where
192        Q: Hash + Equivalent<Key> + ?Sized,
193    {
194        self.shard.remove(self.shard.hash(key), key)
195    }
196
197    /// Remove an item from the cache whose key is `key` if `f(&value)` returns `true` for that entry.
198    /// Compared to peek and remove, this method is more efficient as it requires only 1 lookup.
199    ///
200    /// Returns the removed entry, if any.
201    pub fn remove_if<Q, F>(&mut self, key: &Q, f: F) -> Option<(Key, Val)>
202    where
203        Q: Hash + Equivalent<Key> + ?Sized,
204        F: FnOnce(&Val) -> bool,
205    {
206        self.shard.remove_if(self.shard.hash(key), key, f)
207    }
208
209    /// Replaces an item in the cache, but only if it already exists.
210    /// If `soft` is set, the replace operation won't affect the "hotness" of the key,
211    /// even if the value is replaced.
212    ///
213    /// Returns `Ok` if the entry was admitted and `Err(_)` if it wasn't.
214    pub fn replace(&mut self, key: Key, value: Val, soft: bool) -> Result<(), (Key, Val)> {
215        let lcs = self.replace_with_lifecycle(key, value, soft)?;
216        self.shard.lifecycle.end_request(lcs);
217        Ok(())
218    }
219
220    /// Replaces an item in the cache, but only if it already exists.
221    /// If `soft` is set, the replace operation won't affect the "hotness" of the key,
222    /// even if the value is replaced.
223    ///
224    /// Returns `Ok` if the entry was admitted and `Err(_)` if it wasn't.
225    pub fn replace_with_lifecycle(
226        &mut self,
227        key: Key,
228        value: Val,
229        soft: bool,
230    ) -> Result<L::RequestState, (Key, Val)> {
231        let mut lcs = self.shard.lifecycle.begin_request();
232        self.shard.insert(
233            &mut lcs,
234            self.shard.hash(&key),
235            key,
236            value,
237            InsertStrategy::Replace { soft },
238        )?;
239        Ok(lcs)
240    }
241
242    /// Retains only the items specified by the predicate.
243    /// In other words, remove all items for which `f(&key, &value)` returns `false`. The
244    /// elements are visited in arbitrary order.
245    pub fn retain<F>(&mut self, f: F)
246    where
247        F: Fn(&Key, &Val) -> bool,
248    {
249        self.shard.retain(f);
250    }
251
252    /// Gets or inserts an item in the cache with key `key`.
253    /// Returns a reference to the inserted `value` if it was admitted to the cache.
254    ///
255    /// See also `get_ref_or_guard`.
256    pub fn get_or_insert_with<Q, E>(
257        &mut self,
258        key: &Q,
259        with: impl FnOnce() -> Result<Val, E>,
260    ) -> Result<Option<&Val>, E>
261    where
262        Q: Hash + Equivalent<Key> + ToOwned<Owned = Key> + ?Sized,
263    {
264        let idx = match self.shard.upsert_placeholder(self.shard.hash(key), key) {
265            Ok((idx, _)) => idx,
266            Err((plh, _)) => {
267                let v = with()?;
268                let mut lcs = self.shard.lifecycle.begin_request();
269                let replaced = self.shard.replace_placeholder(&mut lcs, &plh, false, v);
270                self.shard.lifecycle.end_request(lcs);
271                debug_assert!(replaced.is_ok(), "unsync replace_placeholder can't fail");
272                plh.idx
273            }
274        };
275        Ok(self.shard.peek_token(idx))
276    }
277
278    /// Gets or inserts an item in the cache with key `key`.
279    /// Returns a mutable reference to the inserted `value` if it was admitted to the cache.
280    ///
281    /// See also `get_mut_or_guard`.
282    pub fn get_mut_or_insert_with<'a, Q, E>(
283        &'a mut self,
284        key: &Q,
285        with: impl FnOnce() -> Result<Val, E>,
286    ) -> Result<Option<RefMut<'a, Key, Val, We, B, L>>, E>
287    where
288        Q: Hash + Equivalent<Key> + ToOwned<Owned = Key> + ?Sized,
289    {
290        let idx = match self.shard.upsert_placeholder(self.shard.hash(key), key) {
291            Ok((idx, _)) => idx,
292            Err((plh, _)) => {
293                let v = with()?;
294                let mut lcs = self.shard.lifecycle.begin_request();
295                let replaced = self.shard.replace_placeholder(&mut lcs, &plh, false, v);
296                debug_assert!(replaced.is_ok(), "unsync replace_placeholder can't fail");
297                self.shard.lifecycle.end_request(lcs);
298                plh.idx
299            }
300        };
301        Ok(self.shard.peek_token_mut(idx).map(RefMut))
302    }
303
304    /// Gets an item from the cache with key `key` .
305    /// If the corresponding value isn't present in the cache, this functions returns a guard
306    /// that can be used to insert the value once it's computed.
307    pub fn get_ref_or_guard<Q>(&mut self, key: &Q) -> Result<&Val, Guard<'_, Key, Val, We, B, L>>
308    where
309        Q: Hash + Equivalent<Key> + ToOwned<Owned = Key> + ?Sized,
310    {
311        // TODO: this could be using a simpler entry API
312        match self.shard.upsert_placeholder(self.shard.hash(key), key) {
313            Ok((_, v)) => unsafe {
314                // Rustc gets insanely confused about returning from mut borrows
315                // Safety: v has the same lifetime as self
316                let v: *const Val = v;
317                Ok(&*v)
318            },
319            Err((placeholder, _)) => Err(Guard {
320                cache: self,
321                placeholder,
322                inserted: false,
323            }),
324        }
325    }
326
327    /// Gets an item from the cache with key `key` .
328    /// If the corresponding value isn't present in the cache, this functions returns a guard
329    /// that can be used to insert the value once it's computed.
330    ///
331    /// Note: Leaking the returned RefMut might cause cache weight tracking to be inaccurate.
332    pub fn get_mut_or_guard<'a, Q>(
333        &'a mut self,
334        key: &Q,
335    ) -> Result<Option<RefMut<'a, Key, Val, We, B, L>>, Guard<'a, Key, Val, We, B, L>>
336    where
337        Q: Hash + Equivalent<Key> + ToOwned<Owned = Key> + ?Sized,
338    {
339        // TODO: this could be using a simpler entry API
340        match self.shard.upsert_placeholder(self.shard.hash(key), key) {
341            Ok((idx, _)) => Ok(self.shard.peek_token_mut(idx).map(RefMut)),
342            Err((placeholder, _)) => Err(Guard {
343                cache: self,
344                placeholder,
345                inserted: false,
346            }),
347        }
348    }
349
350    /// Inserts an item in the cache with key `key`.
351    pub fn insert(&mut self, key: Key, value: Val) {
352        let lcs = self.insert_with_lifecycle(key, value);
353        self.shard.lifecycle.end_request(lcs);
354    }
355
356    /// Inserts an item in the cache with key `key`.
357    pub fn insert_with_lifecycle(&mut self, key: Key, value: Val) -> L::RequestState {
358        let mut lcs = self.shard.lifecycle.begin_request();
359        let result = self.shard.insert(
360            &mut lcs,
361            self.shard.hash(&key),
362            key,
363            value,
364            InsertStrategy::Insert,
365        );
366        // result cannot err with the Insert strategy
367        debug_assert!(result.is_ok());
368        lcs
369    }
370
371    /// Clear all items from the cache
372    pub fn clear(&mut self) {
373        self.shard.clear();
374    }
375
376    /// Iterator for the items in the cache
377    pub fn iter(&self) -> impl Iterator<Item = (&'_ Key, &'_ Val)> + '_ {
378        // TODO: add a concrete type, impl trait in the public api is really bad.
379        self.shard.iter()
380    }
381
382    /// Drain all items from the cache
383    ///
384    /// The cache will be emptied even if the returned iterator isn't fully consumed.
385    pub fn drain(&mut self) -> impl Iterator<Item = (Key, Val)> + '_ {
386        // TODO: add a concrete type, impl trait in the public api is really bad.
387        self.shard.drain()
388    }
389
390    /// Sets the cache to a new weight capacity.
391    ///
392    /// If the new capacity is smaller than the current weight, items will be evicted
393    /// to bring the cache within the new limit.
394    pub fn set_capacity(&mut self, new_weight_capacity: u64) {
395        self.shard.set_capacity(new_weight_capacity);
396    }
397
398    #[cfg(any(fuzzing, test))]
399    pub fn validate(&self, accept_overweight: bool) {
400        self.shard.validate(accept_overweight);
401    }
402
403    /// Get total memory used by cache data structures
404    ///
405    /// It should be noted that if cache key or value is some type like `Vec<T>`,
406    /// the memory allocated in the heap will not be counted.
407    pub fn memory_used(&self) -> MemoryUsed {
408        self.shard.memory_used()
409    }
410}
411
412impl<Key, Val, We, B, L> std::fmt::Debug for Cache<Key, Val, We, B, L> {
413    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
414        f.debug_struct("Cache").finish_non_exhaustive()
415    }
416}
417
418/// Default `Lifecycle` for the unsync cache.
419pub struct DefaultLifecycle<Key, Val>(std::marker::PhantomData<(Key, Val)>);
420
421impl<Key, Val> std::fmt::Debug for DefaultLifecycle<Key, Val> {
422    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
423        f.debug_tuple("DefaultLifecycle").finish()
424    }
425}
426
427impl<Key, Val> Default for DefaultLifecycle<Key, Val> {
428    #[inline]
429    fn default() -> Self {
430        Self(Default::default())
431    }
432}
433
434impl<Key, Val> Clone for DefaultLifecycle<Key, Val> {
435    #[inline]
436    fn clone(&self) -> Self {
437        Self(Default::default())
438    }
439}
440
441impl<Key, Val> Lifecycle<Key, Val> for DefaultLifecycle<Key, Val> {
442    type RequestState = ();
443
444    #[inline]
445    fn begin_request(&self) -> Self::RequestState {}
446
447    #[inline]
448    fn on_evict(&self, _state: &mut Self::RequestState, _key: Key, _val: Val) {}
449}
450
451#[derive(Debug, Clone)]
452pub(crate) struct SharedPlaceholder {
453    hash: u64,
454    idx: Token,
455}
456
457pub struct Guard<'a, Key, Val, We, B, L> {
458    cache: &'a mut Cache<Key, Val, We, B, L>,
459    placeholder: SharedPlaceholder,
460    inserted: bool,
461}
462
463impl<Key: Eq + Hash, Val, We: Weighter<Key, Val>, B: BuildHasher, L: Lifecycle<Key, Val>>
464    Guard<'_, Key, Val, We, B, L>
465{
466    /// Inserts the value into the placeholder
467    pub fn insert(self, value: Val) {
468        self.insert_internal(value, false);
469    }
470
471    /// Inserts the value into the placeholder
472    pub fn insert_with_lifecycle(self, value: Val) -> L::RequestState {
473        self.insert_internal(value, true).unwrap()
474    }
475
476    #[inline]
477    fn insert_internal(mut self, value: Val, return_lcs: bool) -> Option<L::RequestState> {
478        let mut lcs = self.cache.shard.lifecycle.begin_request();
479        let replaced =
480            self.cache
481                .shard
482                .replace_placeholder(&mut lcs, &self.placeholder, false, value);
483        debug_assert!(replaced.is_ok(), "unsync replace_placeholder can't fail");
484        self.inserted = true;
485        if return_lcs {
486            Some(lcs)
487        } else {
488            self.cache.shard.lifecycle.end_request(lcs);
489            None
490        }
491    }
492}
493
494impl<Key, Val, We, B, L> Drop for Guard<'_, Key, Val, We, B, L> {
495    #[inline]
496    fn drop(&mut self) {
497        #[cold]
498        fn drop_slow<Key, Val, We, B, L>(this: &mut Guard<'_, Key, Val, We, B, L>) {
499            this.cache.shard.remove_placeholder(&this.placeholder);
500        }
501        if !self.inserted {
502            drop_slow(self);
503        }
504    }
505}
506
507pub struct RefMut<'cache, Key, Val, We: Weighter<Key, Val>, B, L>(
508    crate::shard::RefMut<'cache, Key, Val, We, B, L, SharedPlaceholder>,
509);
510
511impl<Key, Val, We: Weighter<Key, Val>, B, L> std::ops::Deref for RefMut<'_, Key, Val, We, B, L> {
512    type Target = Val;
513
514    #[inline]
515    fn deref(&self) -> &Self::Target {
516        self.0.pair().1
517    }
518}
519
520impl<Key, Val, We: Weighter<Key, Val>, B, L> std::ops::DerefMut for RefMut<'_, Key, Val, We, B, L> {
521    #[inline]
522    fn deref_mut(&mut self) -> &mut Self::Target {
523        self.0.value_mut()
524    }
525}
526
527impl shard::SharedPlaceholder for SharedPlaceholder {
528    #[inline]
529    fn new(hash: u64, idx: Token) -> Self {
530        Self { hash, idx }
531    }
532
533    #[inline]
534    fn same_as(&self, _other: &Self) -> bool {
535        true
536    }
537
538    #[inline]
539    fn hash(&self) -> u64 {
540        self.hash
541    }
542
543    #[inline]
544    fn idx(&self) -> Token {
545        self.idx
546    }
547}
548
549#[cfg(test)]
550mod tests {
551    use super::*;
552
553    struct Weighter;
554
555    impl crate::Weighter<u32, u32> for Weighter {
556        fn weight(&self, _key: &u32, val: &u32) -> u64 {
557            *val as u64
558        }
559    }
560
561    #[test]
562    fn test_zero_weights() {
563        let mut cache = Cache::with_weighter(100, 100, Weighter);
564        cache.insert(0, 0);
565        assert_eq!(cache.weight(), 0);
566        for i in 1..100 {
567            cache.insert(i, i);
568            cache.insert(i, i);
569        }
570        assert_eq!(cache.get(&0).copied(), Some(0));
571        assert!(cache.contains_key(&0));
572        let a = cache.weight();
573        *cache.get_mut(&0).unwrap() += 1;
574        assert_eq!(cache.weight(), a + 1);
575        for i in 1..100 {
576            cache.insert(i, i);
577            cache.insert(i, i);
578        }
579        assert_eq!(cache.get(&0), None);
580        assert!(!cache.contains_key(&0));
581
582        cache.insert(0, 1);
583        let a = cache.weight();
584        *cache.get_mut(&0).unwrap() -= 1;
585        assert_eq!(cache.weight(), a - 1);
586        for i in 1..100 {
587            cache.insert(i, i);
588            cache.insert(i, i);
589        }
590        assert_eq!(cache.get(&0).copied(), Some(0));
591        assert!(cache.contains_key(&0));
592    }
593
594    #[test]
595    fn test_set_capacity() {
596        let mut cache = Cache::new(100);
597        for i in 0..80 {
598            cache.insert(i, i);
599        }
600        let initial_len = cache.len();
601        assert!(initial_len <= 80);
602
603        // Set to smaller capacity
604        cache.set_capacity(50);
605        assert!(cache.len() <= 50);
606        assert!(cache.weight() <= 50);
607        cache.validate(false);
608
609        // Set to larger capacity
610        cache.set_capacity(200);
611        assert_eq!(cache.capacity(), 200);
612        cache.validate(false);
613
614        // Insert more items
615        for i in 100..180 {
616            cache.insert(i, i);
617        }
618        assert!(cache.len() <= 180);
619        assert!(cache.weight() <= 200);
620        cache.validate(false);
621    }
622
623    #[test]
624    fn test_set_capacity_with_ghosts() {
625        // Create a cache that will generate ghost entries
626        let mut cache = Cache::new(50);
627
628        // Insert items to fill the cache
629        for i in 0..100 {
630            cache.insert(i, i);
631        }
632        cache.validate(false);
633
634        // Set to smaller capacity - should trim both resident and ghost entries
635        cache.set_capacity(25);
636        assert!(cache.weight() <= 25);
637        cache.validate(false);
638
639        // Set back to larger capacity
640        cache.set_capacity(100);
641        assert_eq!(cache.capacity(), 100);
642        cache.validate(false);
643
644        // Insert more items
645        for i in 100..150 {
646            cache.insert(i, i);
647        }
648        cache.validate(false);
649    }
650
651    #[test]
652    fn test_remove_if() {
653        let mut cache = Cache::new(100);
654
655        // Insert test data
656        cache.insert(1, 10);
657        cache.insert(2, 20);
658        cache.insert(3, 30);
659
660        // Test removing with predicate that returns true
661        let removed = cache.remove_if(&2, |v| *v == 20);
662        assert_eq!(removed, Some((2, 20)));
663        assert_eq!(cache.get(&2), None);
664
665        // Test removing with predicate that returns false
666        let not_removed = cache.remove_if(&3, |v| *v == 999);
667        assert_eq!(not_removed, None);
668        assert_eq!(cache.get(&3), Some(&30));
669
670        // Test removing non-existent key
671        let not_found = cache.remove_if(&999, |_| true);
672        assert_eq!(not_found, None);
673
674        cache.validate(false);
675    }
676}