mini_moka_wasm/sync/
cache.rs

1use super::{base_cache::BaseCache, CacheBuilder, ConcurrentCacheExt, EntryRef, Iter};
2use crate::{
3    common::{
4        concurrent::{
5            constants::{MAX_SYNC_REPEATS, WRITE_RETRY_INTERVAL_MICROS},
6            housekeeper::{Housekeeper, InnerSync},
7            Weigher, WriteOp,
8        },
9        time::Duration,
10        time::Instant,
11    },
12    Policy,
13};
14
15use crossbeam_channel::{Sender, TrySendError};
16use std::{
17    borrow::Borrow,
18    collections::hash_map::RandomState,
19    fmt,
20    hash::{BuildHasher, Hash},
21    sync::Arc,
22};
23
24/// A thread-safe concurrent in-memory cache built upon [`dashmap::DashMap`][dashmap].
25///
26/// The `Cache` uses `DashMap` as the central key-value storage. It performs a
27/// best-effort bounding of the map using an entry replacement algorithm to determine
28/// which entries to evict when the capacity is exceeded.
29///
30/// To use this cache, enable a crate feature called "dash" in your Cargo.toml.
31/// Please note that the API of `dash` cache will _be changed very often_ in next few
32/// releases as this is yet an experimental component.
33///
34/// # Examples
35///
36/// Cache entries are manually added using [`insert`](#method.insert) method, and are
37/// stored in the cache until either evicted or manually invalidated.
38///
39/// Here's an example of reading and updating a cache by using multiple threads:
40///
41/// ```rust
42/// use mini_moka::sync::Cache;
43///
44/// use std::thread;
45///
46/// fn value(n: usize) -> String {
47///     format!("value {}", n)
48/// }
49///
50/// const NUM_THREADS: usize = 16;
51/// const NUM_KEYS_PER_THREAD: usize = 64;
52///
53/// // Create a cache that can store up to 10,000 entries.
54/// let cache = Cache::new(10_000);
55///
56/// // Spawn threads and read and update the cache simultaneously.
57/// let threads: Vec<_> = (0..NUM_THREADS)
58///     .map(|i| {
59///         // To share the same cache across the threads, clone it.
60///         // This is a cheap operation.
61///         let my_cache = cache.clone();
62///         let start = i * NUM_KEYS_PER_THREAD;
63///         let end = (i + 1) * NUM_KEYS_PER_THREAD;
64///
65///         thread::spawn(move || {
66///             // Insert 64 entries. (NUM_KEYS_PER_THREAD = 64)
67///             for key in start..end {
68///                 my_cache.insert(key, value(key));
69///                 // get() returns Option<String>, a clone of the stored value.
70///                 assert_eq!(my_cache.get(&key), Some(value(key)));
71///             }
72///
73///             // Invalidate every 4 element of the inserted entries.
74///             for key in (start..end).step_by(4) {
75///                 my_cache.invalidate(&key);
76///             }
77///         })
78///     })
79///     .collect();
80///
81/// // Wait for all threads to complete.
82/// threads.into_iter().for_each(|t| t.join().expect("Failed"));
83///
84/// // Verify the result.
85/// for key in 0..(NUM_THREADS * NUM_KEYS_PER_THREAD) {
86///     if key % 4 == 0 {
87///         assert_eq!(cache.get(&key), None);
88///     } else {
89///         assert_eq!(cache.get(&key), Some(value(key)));
90///     }
91/// }
92/// ```
93///
94/// # Avoiding to clone the value at `get`
95///
96/// The return type of `get` method is `Option<V>` instead of `Option<&V>`. Every
97/// time `get` is called for an existing key, it creates a clone of the stored value
98/// `V` and returns it. This is because the `Cache` allows concurrent updates from
99/// threads so a value stored in the cache can be dropped or replaced at any time by
100/// any other thread. `get` cannot return a reference `&V` as it is impossible to
101/// guarantee the value outlives the reference.
102///
103/// If you want to store values that will be expensive to clone, wrap them by
104/// `std::sync::Arc` before storing in a cache. [`Arc`][rustdoc-std-arc] is a
105/// thread-safe reference-counted pointer and its `clone()` method is cheap.
106///
107/// [rustdoc-std-arc]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html
108///
109/// # Size-based Eviction
110///
111/// ```rust
112/// use std::convert::TryInto;
113/// use mini_moka::sync::Cache;
114///
115/// // Evict based on the number of entries in the cache.
116/// let cache = Cache::builder()
117///     // Up to 10,000 entries.
118///     .max_capacity(10_000)
119///     // Create the cache.
120///     .build();
121/// cache.insert(1, "one".to_string());
122///
123/// // Evict based on the byte length of strings in the cache.
124/// let cache = Cache::builder()
125///     // A weigher closure takes &K and &V and returns a u32
126///     // representing the relative size of the entry.
127///     .weigher(|_key, value: &String| -> u32 {
128///         value.len().try_into().unwrap_or(u32::MAX)
129///     })
130///     // This cache will hold up to 32MiB of values.
131///     .max_capacity(32 * 1024 * 1024)
132///     .build();
133/// cache.insert(2, "two".to_string());
134/// ```
135///
136/// If your cache should not grow beyond a certain size, use the `max_capacity`
137/// method of the [`CacheBuilder`][builder-struct] to set the upper bound. The cache
138/// will try to evict entries that have not been used recently or very often.
139///
140/// At the cache creation time, a weigher closure can be set by the `weigher` method
141/// of the `CacheBuilder`. A weigher closure takes `&K` and `&V` as the arguments and
142/// returns a `u32` representing the relative size of the entry:
143///
144/// - If the `weigher` is _not_ set, the cache will treat each entry has the same
145///   size of `1`. This means the cache will be bounded by the number of entries.
146/// - If the `weigher` is set, the cache will call the weigher to calculate the
147///   weighted size (relative size) on an entry. This means the cache will be bounded
148///   by the total weighted size of entries.
149///
150/// Note that weighted sizes are not used when making eviction selections.
151///
152/// [builder-struct]: ./struct.CacheBuilder.html
153///
154/// # Time-based Expirations
155///
156/// `Cache` supports the following expiration policies:
157///
158/// - **Time to live**: A cached entry will be expired after the specified duration
159///   past from `insert`.
160/// - **Time to idle**: A cached entry will be expired after the specified duration
161///   past from `get` or `insert`.
162///
163/// ```rust
164/// use mini_moka::sync::Cache;
165/// use std::time::Duration;
166///
167/// let cache = Cache::builder()
168///     // Time to live (TTL): 30 minutes
169///     .time_to_live(Duration::from_secs(30 * 60))
170///     // Time to idle (TTI):  5 minutes
171///     .time_to_idle(Duration::from_secs( 5 * 60))
172///     // Create the cache.
173///     .build();
174///
175/// // This entry will expire after 5 minutes (TTI) if there is no get().
176/// cache.insert(0, "zero");
177///
178/// // This get() will extend the entry life for another 5 minutes.
179/// cache.get(&0);
180///
181/// // Even though we keep calling get(), the entry will expire
182/// // after 30 minutes (TTL) from the insert().
183/// ```
184///
185/// # Thread Safety
186///
187/// All methods provided by the `Cache` are considered thread-safe, and can be safely
188/// accessed by multiple concurrent threads.
189///
190/// - `Cache<K, V, S>` requires trait bounds `Send`, `Sync` and `'static` for `K`
191///   (key), `V` (value) and `S` (hasher state).
192/// - `Cache<K, V, S>` will implement `Send` and `Sync`.
193///
194/// # Sharing a cache across threads
195///
196/// To share a cache across threads, do one of the followings:
197///
198/// - Create a clone of the cache by calling its `clone` method and pass it to other
199///   thread.
200/// - Wrap the cache by a `sync::OnceCell` or `sync::Lazy` from
201///   [once_cell][once-cell-crate] create, and set it to a `static` variable.
202///
203/// Cloning is a cheap operation for `Cache` as it only creates thread-safe
204/// reference-counted pointers to the internal data structures.
205///
206/// [once-cell-crate]: https://crates.io/crates/once_cell
207///
208/// # Hashing Algorithm
209///
210/// By default, `Cache` uses a hashing algorithm selected to provide resistance
211/// against HashDoS attacks. It will be the same one used by
212/// `std::collections::HashMap`, which is currently SipHash 1-3.
213///
214/// While SipHash's performance is very competitive for medium sized keys, other
215/// hashing algorithms will outperform it for small keys such as integers as well as
216/// large keys such as long strings. However those algorithms will typically not
217/// protect against attacks such as HashDoS.
218///
219/// The hashing algorithm can be replaced on a per-`Cache` basis using the
220/// [`build_with_hasher`][build-with-hasher-method] method of the
221/// `CacheBuilder`. Many alternative algorithms are available on crates.io, such
222/// as the [aHash][ahash-crate] crate.
223///
224/// [build-with-hasher-method]: ./struct.CacheBuilder.html#method.build_with_hasher
225/// [ahash-crate]: https://crates.io/crates/ahash
226///
227pub struct Cache<K, V, S = RandomState> {
228    base: BaseCache<K, V, S>,
229}
230
231// TODO: https://github.com/moka-rs/moka/issues/54
232#[allow(clippy::non_send_fields_in_send_ty)]
233unsafe impl<K, V, S> Send for Cache<K, V, S>
234where
235    K: Send + Sync,
236    V: Send + Sync,
237    S: Send,
238{
239}
240
241unsafe impl<K, V, S> Sync for Cache<K, V, S>
242where
243    K: Send + Sync,
244    V: Send + Sync,
245    S: Sync,
246{
247}
248
249// NOTE: We cannot do `#[derive(Clone)]` because it will add `Clone` bound to `K`.
250impl<K, V, S> Clone for Cache<K, V, S> {
251    /// Makes a clone of this shared cache.
252    ///
253    /// This operation is cheap as it only creates thread-safe reference counted
254    /// pointers to the shared internal data structures.
255    fn clone(&self) -> Self {
256        Self {
257            base: self.base.clone(),
258        }
259    }
260}
261
262impl<K, V, S> fmt::Debug for Cache<K, V, S>
263where
264    K: Eq + Hash + fmt::Debug,
265    V: fmt::Debug,
266    S: BuildHasher + Clone,
267{
268    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
269        let mut d_map = f.debug_map();
270
271        for r in self.iter() {
272            let (k, v) = r.pair();
273            d_map.entry(k, v);
274        }
275
276        d_map.finish()
277    }
278}
279
280impl<K, V> Cache<K, V, RandomState>
281where
282    K: Hash + Eq + Send + Sync + 'static,
283    V: Clone + Send + Sync + 'static,
284{
285    /// Constructs a new `Cache<K, V>` that will store up to the `max_capacity`.
286    ///
287    /// To adjust various configuration knobs such as `initial_capacity` or
288    /// `time_to_live`, use the [`CacheBuilder`][builder-struct].
289    ///
290    /// [builder-struct]: ./struct.CacheBuilder.html
291    pub fn new(max_capacity: u64) -> Self {
292        let build_hasher = RandomState::default();
293        Self::with_everything(Some(max_capacity), None, build_hasher, None, None, None)
294    }
295
296    /// Returns a [`CacheBuilder`][builder-struct], which can builds a `Cache` with
297    /// various configuration knobs.
298    ///
299    /// [builder-struct]: ./struct.CacheBuilder.html
300    pub fn builder() -> CacheBuilder<K, V, Cache<K, V, RandomState>> {
301        CacheBuilder::default()
302    }
303}
304
305impl<K, V, S> Cache<K, V, S> {
306    /// Returns a read-only cache policy of this cache.
307    ///
308    /// At this time, cache policy cannot be modified after cache creation.
309    /// A future version may support to modify it.
310    pub fn policy(&self) -> Policy {
311        self.base.policy()
312    }
313
314    /// Returns an approximate number of entries in this cache.
315    ///
316    /// The value returned is _an estimate_; the actual count may differ if there are
317    /// concurrent insertions or removals, or if some entries are pending removal due
318    /// to expiration. This inaccuracy can be mitigated by performing a `sync()`
319    /// first.
320    ///
321    /// # Example
322    ///
323    /// ```rust
324    /// use mini_moka::sync::Cache;
325    ///
326    /// let cache = Cache::new(10);
327    /// cache.insert('n', "Netherland Dwarf");
328    /// cache.insert('l', "Lop Eared");
329    /// cache.insert('d', "Dutch");
330    ///
331    /// // Ensure an entry exists.
332    /// assert!(cache.contains_key(&'n'));
333    ///
334    /// // However, followings may print stale number zeros instead of threes.
335    /// println!("{}", cache.entry_count());   // -> 0
336    /// println!("{}", cache.weighted_size()); // -> 0
337    ///
338    /// // To mitigate the inaccuracy, bring `ConcurrentCacheExt` trait to
339    /// // the scope so we can use `sync` method.
340    /// use mini_moka::sync::ConcurrentCacheExt;
341    /// // Call `sync` to run pending internal tasks.
342    /// cache.sync();
343    ///
344    /// // Followings will print the actual numbers.
345    /// println!("{}", cache.entry_count());   // -> 3
346    /// println!("{}", cache.weighted_size()); // -> 3
347    /// ```
348    ///
349    pub fn entry_count(&self) -> u64 {
350        self.base.entry_count()
351    }
352
353    /// Returns an approximate total weighted size of entries in this cache.
354    ///
355    /// The value returned is _an estimate_; the actual size may differ if there are
356    /// concurrent insertions or removals, or if some entries are pending removal due
357    /// to expiration. This inaccuracy can be mitigated by performing a `sync()`
358    /// first. See [`entry_count`](#method.entry_count) for a sample code.
359    pub fn weighted_size(&self) -> u64 {
360        self.base.weighted_size()
361    }
362}
363
364impl<K, V, S> Cache<K, V, S>
365where
366    K: Hash + Eq + Send + Sync + 'static,
367    V: Clone + Send + Sync + 'static,
368    S: BuildHasher + Clone + Send + Sync + 'static,
369{
370    pub(crate) fn with_everything(
371        max_capacity: Option<u64>,
372        initial_capacity: Option<usize>,
373        build_hasher: S,
374        weigher: Option<Weigher<K, V>>,
375        time_to_live: Option<Duration>,
376        time_to_idle: Option<Duration>,
377    ) -> Self {
378        Self {
379            base: BaseCache::new(
380                max_capacity,
381                initial_capacity,
382                build_hasher,
383                weigher,
384                time_to_live,
385                time_to_idle,
386            ),
387        }
388    }
389
390    /// Returns `true` if the cache contains a value for the key.
391    ///
392    /// Unlike the `get` method, this method is not considered a cache read operation,
393    /// so it does not update the historic popularity estimator or reset the idle
394    /// timer for the key.
395    ///
396    /// The key may be any borrowed form of the cache's key type, but `Hash` and `Eq`
397    /// on the borrowed form _must_ match those for the key type.
398    pub fn contains_key<Q>(&self, key: &Q) -> bool
399    where
400        Arc<K>: Borrow<Q>,
401        Q: Hash + Eq + ?Sized,
402    {
403        self.base.contains_key(key)
404    }
405
406    /// Returns a _clone_ of the value corresponding to the key.
407    ///
408    /// If you want to store values that will be expensive to clone, wrap them by
409    /// `std::sync::Arc` before storing in a cache. [`Arc`][rustdoc-std-arc] is a
410    /// thread-safe reference-counted pointer and its `clone()` method is cheap.
411    ///
412    /// The key may be any borrowed form of the cache's key type, but `Hash` and `Eq`
413    /// on the borrowed form _must_ match those for the key type.
414    ///
415    /// [rustdoc-std-arc]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html
416    pub fn get<Q>(&self, key: &Q) -> Option<V>
417    where
418        Arc<K>: Borrow<Q>,
419        Q: Hash + Eq + ?Sized,
420    {
421        self.base.get_with_hash(key, self.base.hash(key))
422    }
423
424    /// Deprecated, replaced with [`get`](#method.get)
425    #[doc(hidden)]
426    #[deprecated(since = "0.8.0", note = "Replaced with `get`")]
427    pub fn get_if_present<Q>(&self, key: &Q) -> Option<V>
428    where
429        Arc<K>: Borrow<Q>,
430        Q: Hash + Eq + ?Sized,
431    {
432        self.get(key)
433    }
434
435    /// Inserts a key-value pair into the cache.
436    ///
437    /// If the cache has this key present, the value is updated.
438    pub fn insert(&self, key: K, value: V) {
439        let hash = self.base.hash(&key);
440        let key = Arc::new(key);
441        self.insert_with_hash(key, hash, value)
442    }
443
444    pub(crate) fn insert_with_hash(&self, key: Arc<K>, hash: u64, value: V) {
445        let (op, now) = self.base.do_insert_with_hash(key, hash, value);
446        let hk = self.base.housekeeper.as_ref();
447        Self::schedule_write_op(
448            self.base.inner.as_ref(),
449            &self.base.write_op_ch,
450            op,
451            now,
452            hk,
453        )
454        .expect("Failed to insert");
455    }
456
457    /// Discards any cached value for the key.
458    ///
459    /// The key may be any borrowed form of the cache's key type, but `Hash` and `Eq`
460    /// on the borrowed form _must_ match those for the key type.
461    pub fn invalidate<Q>(&self, key: &Q)
462    where
463        Arc<K>: Borrow<Q>,
464        Q: Hash + Eq + ?Sized,
465    {
466        if let Some(kv) = self.base.remove_entry(key) {
467            let op = WriteOp::Remove(kv);
468            let now = self.base.current_time_from_expiration_clock();
469            let hk = self.base.housekeeper.as_ref();
470            Self::schedule_write_op(
471                self.base.inner.as_ref(),
472                &self.base.write_op_ch,
473                op,
474                now,
475                hk,
476            )
477            .expect("Failed to remove");
478        }
479    }
480
481    /// Discards all cached values.
482    ///
483    /// This method returns immediately and a background thread will evict all the
484    /// cached values inserted before the time when this method was called. It is
485    /// guaranteed that the `get` method must not return these invalidated values
486    /// even if they have not been evicted.
487    ///
488    /// Like the `invalidate` method, this method does not clear the historic
489    /// popularity estimator of keys so that it retains the client activities of
490    /// trying to retrieve an item.
491    pub fn invalidate_all(&self) {
492        self.base.invalidate_all();
493    }
494}
495
496// Clippy beta 0.1.83 (f41c7ed9889 2024-10-31) warns about unused lifetimes on 'a.
497// This seems a false positive. The lifetimes are used in the trait bounds.
498// https://rust-lang.github.io/rust-clippy/master/index.html#extra_unused_lifetimes
499#[allow(clippy::extra_unused_lifetimes)]
500impl<'a, K, V, S> Cache<K, V, S>
501where
502    K: 'a + Eq + Hash,
503    V: 'a,
504    S: BuildHasher + Clone,
505{
506    /// Creates an iterator visiting all key-value pairs in arbitrary order. The
507    /// iterator element type is [`EntryRef<'a, K, V, S>`][moka-entry-ref].
508    ///
509    /// Unlike the `get` method, visiting entries via an iterator do not update the
510    /// historic popularity estimator or reset idle timers for keys.
511    ///
512    /// # Locking behavior
513    ///
514    /// This iterator relies on the iterator of [`dashmap::DashMap`][dashmap-iter],
515    /// which employs read-write locks. May deadlock if the thread holding an
516    /// iterator attempts to update the cache.
517    ///
518    /// [moka-entry-ref]: ./struct.EntryRef.html
519    /// [dashmap-iter]: <https://docs.rs/dashmap/*/dashmap/struct.DashMap.html#method.iter>
520    ///
521    /// # Examples
522    ///
523    /// ```rust
524    /// use mini_moka::sync::Cache;
525    ///
526    /// let cache = Cache::new(100);
527    /// cache.insert("Julia", 14);
528    ///
529    /// let mut iter = cache.iter();
530    /// let entry_ref = iter.next().unwrap();
531    /// assert_eq!(entry_ref.pair(), (&"Julia", &14));
532    /// assert_eq!(entry_ref.key(), &"Julia");
533    /// assert_eq!(entry_ref.value(), &14);
534    /// assert_eq!(*entry_ref, 14);
535    ///
536    /// assert!(iter.next().is_none());
537    /// ```
538    ///
539    pub fn iter(&self) -> Iter<'_, K, V, S> {
540        self.base.iter()
541    }
542}
543
544impl<K, V, S> ConcurrentCacheExt<K, V> for Cache<K, V, S>
545where
546    K: Hash + Eq + Send + Sync + 'static,
547    V: Send + Sync + 'static,
548    S: BuildHasher + Clone + Send + Sync + 'static,
549{
550    fn sync(&self) {
551        self.base.inner.sync(MAX_SYNC_REPEATS);
552    }
553}
554
555impl<'a, K, V, S> IntoIterator for &'a Cache<K, V, S>
556where
557    K: 'a + Eq + Hash,
558    V: 'a,
559    S: BuildHasher + Clone,
560{
561    type Item = EntryRef<'a, K, V>;
562
563    type IntoIter = Iter<'a, K, V, S>;
564
565    fn into_iter(self) -> Self::IntoIter {
566        self.iter()
567    }
568}
569
570// private methods
571impl<K, V, S> Cache<K, V, S>
572where
573    K: Hash + Eq + Send + Sync + 'static,
574    V: Clone + Send + Sync + 'static,
575    S: BuildHasher + Clone + Send + Sync + 'static,
576{
577    #[inline]
578    fn schedule_write_op(
579        inner: &impl InnerSync,
580        ch: &Sender<WriteOp<K, V>>,
581        op: WriteOp<K, V>,
582        now: Instant,
583        housekeeper: Option<&Arc<Housekeeper>>,
584    ) -> Result<(), TrySendError<WriteOp<K, V>>> {
585        let mut op = op;
586
587        // NOTES:
588        // - This will block when the channel is full.
589        // - We are doing a busy-loop here. We were originally calling `ch.send(op)?`,
590        //   but we got a notable performance degradation.
591        loop {
592            BaseCache::<K, V, S>::apply_reads_writes_if_needed(inner, ch, now, housekeeper);
593            match ch.try_send(op) {
594                Ok(()) => break,
595                Err(TrySendError::Full(op1)) => {
596                    op = op1;
597                    std::thread::sleep(Duration::from_micros(WRITE_RETRY_INTERVAL_MICROS));
598                }
599                Err(e @ TrySendError::Disconnected(_)) => return Err(e),
600            }
601        }
602        Ok(())
603    }
604}
605
606// For unit tests.
607#[cfg(test)]
608impl<K, V, S> Cache<K, V, S>
609where
610    K: Hash + Eq + Send + Sync + 'static,
611    V: Clone + Send + Sync + 'static,
612    S: BuildHasher + Clone + Send + Sync + 'static,
613{
614    pub(crate) fn is_table_empty(&self) -> bool {
615        self.entry_count() == 0
616    }
617
618    pub(crate) fn reconfigure_for_testing(&mut self) {
619        self.base.reconfigure_for_testing();
620    }
621
622    pub(crate) fn set_expiration_clock(&self, clock: Option<crate::common::time::Clock>) {
623        self.base.set_expiration_clock(clock);
624    }
625}
626
627// To see the debug prints, run test as `cargo test -- --nocapture`
628#[cfg(test)]
629mod tests {
630    use super::{Cache, ConcurrentCacheExt};
631    use crate::common::time::Clock;
632
633    use std::{sync::Arc, time::Duration};
634
635    #[test]
636    fn basic_single_thread() {
637        let mut cache = Cache::new(3);
638        cache.reconfigure_for_testing();
639
640        // Make the cache exterior immutable.
641        let cache = cache;
642
643        cache.insert("a", "alice");
644        cache.insert("b", "bob");
645        assert_eq!(cache.get(&"a"), Some("alice"));
646        assert!(cache.contains_key(&"a"));
647        assert!(cache.contains_key(&"b"));
648        assert_eq!(cache.get(&"b"), Some("bob"));
649        cache.sync();
650        // counts: a -> 1, b -> 1
651
652        cache.insert("c", "cindy");
653        assert_eq!(cache.get(&"c"), Some("cindy"));
654        assert!(cache.contains_key(&"c"));
655        // counts: a -> 1, b -> 1, c -> 1
656        cache.sync();
657
658        assert!(cache.contains_key(&"a"));
659        assert_eq!(cache.get(&"a"), Some("alice"));
660        assert_eq!(cache.get(&"b"), Some("bob"));
661        assert!(cache.contains_key(&"b"));
662        cache.sync();
663        // counts: a -> 2, b -> 2, c -> 1
664
665        // "d" should not be admitted because its frequency is too low.
666        cache.insert("d", "david"); //   count: d -> 0
667        cache.sync();
668        assert_eq!(cache.get(&"d"), None); //   d -> 1
669        assert!(!cache.contains_key(&"d"));
670
671        cache.insert("d", "david");
672        cache.sync();
673        assert!(!cache.contains_key(&"d"));
674        assert_eq!(cache.get(&"d"), None); //   d -> 2
675
676        // "d" should be admitted and "c" should be evicted
677        // because d's frequency is higher than c's.
678        cache.insert("d", "dennis");
679        cache.sync();
680        assert_eq!(cache.get(&"a"), Some("alice"));
681        assert_eq!(cache.get(&"b"), Some("bob"));
682        assert_eq!(cache.get(&"c"), None);
683        assert_eq!(cache.get(&"d"), Some("dennis"));
684        assert!(cache.contains_key(&"a"));
685        assert!(cache.contains_key(&"b"));
686        assert!(!cache.contains_key(&"c"));
687        assert!(cache.contains_key(&"d"));
688
689        cache.invalidate(&"b");
690        assert_eq!(cache.get(&"b"), None);
691        assert!(!cache.contains_key(&"b"));
692    }
693
694    #[test]
695    fn size_aware_eviction() {
696        let weigher = |_k: &&str, v: &(&str, u32)| v.1;
697
698        let alice = ("alice", 10);
699        let bob = ("bob", 15);
700        let bill = ("bill", 20);
701        let cindy = ("cindy", 5);
702        let david = ("david", 15);
703        let dennis = ("dennis", 15);
704
705        let mut cache = Cache::builder().max_capacity(31).weigher(weigher).build();
706        cache.reconfigure_for_testing();
707
708        // Make the cache exterior immutable.
709        let cache = cache;
710
711        cache.insert("a", alice);
712        cache.insert("b", bob);
713        assert_eq!(cache.get(&"a"), Some(alice));
714        assert!(cache.contains_key(&"a"));
715        assert!(cache.contains_key(&"b"));
716        assert_eq!(cache.get(&"b"), Some(bob));
717        cache.sync();
718        // order (LRU -> MRU) and counts: a -> 1, b -> 1
719
720        cache.insert("c", cindy);
721        assert_eq!(cache.get(&"c"), Some(cindy));
722        assert!(cache.contains_key(&"c"));
723        // order and counts: a -> 1, b -> 1, c -> 1
724        cache.sync();
725
726        assert!(cache.contains_key(&"a"));
727        assert_eq!(cache.get(&"a"), Some(alice));
728        assert_eq!(cache.get(&"b"), Some(bob));
729        assert!(cache.contains_key(&"b"));
730        cache.sync();
731        // order and counts: c -> 1, a -> 2, b -> 2
732
733        // To enter "d" (weight: 15), it needs to evict "c" (w: 5) and "a" (w: 10).
734        // "d" must have higher count than 3, which is the aggregated count
735        // of "a" and "c".
736        cache.insert("d", david); //   count: d -> 0
737        cache.sync();
738        assert_eq!(cache.get(&"d"), None); //   d -> 1
739        assert!(!cache.contains_key(&"d"));
740
741        cache.insert("d", david);
742        cache.sync();
743        assert!(!cache.contains_key(&"d"));
744        assert_eq!(cache.get(&"d"), None); //   d -> 2
745
746        cache.insert("d", david);
747        cache.sync();
748        assert_eq!(cache.get(&"d"), None); //   d -> 3
749        assert!(!cache.contains_key(&"d"));
750
751        cache.insert("d", david);
752        cache.sync();
753        assert!(!cache.contains_key(&"d"));
754        assert_eq!(cache.get(&"d"), None); //   d -> 4
755
756        // Finally "d" should be admitted by evicting "c" and "a".
757        cache.insert("d", dennis);
758        cache.sync();
759        assert_eq!(cache.get(&"a"), None);
760        assert_eq!(cache.get(&"b"), Some(bob));
761        assert_eq!(cache.get(&"c"), None);
762        assert_eq!(cache.get(&"d"), Some(dennis));
763        assert!(!cache.contains_key(&"a"));
764        assert!(cache.contains_key(&"b"));
765        assert!(!cache.contains_key(&"c"));
766        assert!(cache.contains_key(&"d"));
767
768        // Update "b" with "bill" (w: 15 -> 20). This should evict "d" (w: 15).
769        cache.insert("b", bill);
770        cache.sync();
771        assert_eq!(cache.get(&"b"), Some(bill));
772        assert_eq!(cache.get(&"d"), None);
773        assert!(cache.contains_key(&"b"));
774        assert!(!cache.contains_key(&"d"));
775
776        // Re-add "a" (w: 10) and update "b" with "bob" (w: 20 -> 15).
777        cache.insert("a", alice);
778        cache.insert("b", bob);
779        cache.sync();
780        assert_eq!(cache.get(&"a"), Some(alice));
781        assert_eq!(cache.get(&"b"), Some(bob));
782        assert_eq!(cache.get(&"d"), None);
783        assert!(cache.contains_key(&"a"));
784        assert!(cache.contains_key(&"b"));
785        assert!(!cache.contains_key(&"d"));
786
787        // Verify the sizes.
788        assert_eq!(cache.entry_count(), 2);
789        assert_eq!(cache.weighted_size(), 25);
790    }
791
792    #[test]
793    fn basic_multi_threads() {
794        let num_threads = 4;
795        let cache = Cache::new(100);
796
797        // https://rust-lang.github.io/rust-clippy/master/index.html#needless_collect
798        #[allow(clippy::needless_collect)]
799        let handles = (0..num_threads)
800            .map(|id| {
801                let cache = cache.clone();
802                std::thread::spawn(move || {
803                    cache.insert(10, format!("{}-100", id));
804                    cache.get(&10);
805                    cache.insert(20, format!("{}-200", id));
806                    cache.invalidate(&10);
807                })
808            })
809            .collect::<Vec<_>>();
810
811        handles.into_iter().for_each(|h| h.join().expect("Failed"));
812
813        assert!(cache.get(&10).is_none());
814        assert!(cache.get(&20).is_some());
815        assert!(!cache.contains_key(&10));
816        assert!(cache.contains_key(&20));
817    }
818
819    #[test]
820    fn invalidate_all() {
821        let mut cache = Cache::new(100);
822        cache.reconfigure_for_testing();
823
824        // Make the cache exterior immutable.
825        let cache = cache;
826
827        cache.insert("a", "alice");
828        cache.insert("b", "bob");
829        cache.insert("c", "cindy");
830        assert_eq!(cache.get(&"a"), Some("alice"));
831        assert_eq!(cache.get(&"b"), Some("bob"));
832        assert_eq!(cache.get(&"c"), Some("cindy"));
833        assert!(cache.contains_key(&"a"));
834        assert!(cache.contains_key(&"b"));
835        assert!(cache.contains_key(&"c"));
836
837        // `cache.sync()` is no longer needed here before invalidating. The last
838        // modified timestamp of the entries were updated when they were inserted.
839        // https://github.com/moka-rs/moka/issues/155
840
841        cache.invalidate_all();
842        cache.sync();
843
844        cache.insert("d", "david");
845        cache.sync();
846
847        assert!(cache.get(&"a").is_none());
848        assert!(cache.get(&"b").is_none());
849        assert!(cache.get(&"c").is_none());
850        assert_eq!(cache.get(&"d"), Some("david"));
851        assert!(!cache.contains_key(&"a"));
852        assert!(!cache.contains_key(&"b"));
853        assert!(!cache.contains_key(&"c"));
854        assert!(cache.contains_key(&"d"));
855    }
856
857    #[test]
858    fn time_to_live() {
859        let mut cache = Cache::builder()
860            .max_capacity(100)
861            .time_to_live(Duration::from_secs(10))
862            .build();
863
864        cache.reconfigure_for_testing();
865
866        let (clock, mock) = Clock::mock();
867        cache.set_expiration_clock(Some(clock));
868
869        // Make the cache exterior immutable.
870        let cache = cache;
871
872        cache.insert("a", "alice");
873        cache.sync();
874
875        mock.increment(Duration::from_secs(5)); // 5 secs from the start.
876        cache.sync();
877
878        assert_eq!(cache.get(&"a"), Some("alice"));
879        assert!(cache.contains_key(&"a"));
880
881        mock.increment(Duration::from_secs(5)); // 10 secs.
882        assert_eq!(cache.get(&"a"), None);
883        assert!(!cache.contains_key(&"a"));
884
885        assert_eq!(cache.iter().count(), 0);
886
887        cache.sync();
888        assert!(cache.is_table_empty());
889
890        cache.insert("b", "bob");
891        cache.sync();
892
893        assert_eq!(cache.entry_count(), 1);
894
895        mock.increment(Duration::from_secs(5)); // 15 secs.
896        cache.sync();
897
898        assert_eq!(cache.get(&"b"), Some("bob"));
899        assert!(cache.contains_key(&"b"));
900        assert_eq!(cache.entry_count(), 1);
901
902        cache.insert("b", "bill");
903        cache.sync();
904
905        mock.increment(Duration::from_secs(5)); // 20 secs
906        cache.sync();
907
908        assert_eq!(cache.get(&"b"), Some("bill"));
909        assert!(cache.contains_key(&"b"));
910        assert_eq!(cache.entry_count(), 1);
911
912        mock.increment(Duration::from_secs(5)); // 25 secs
913        assert_eq!(cache.get(&"a"), None);
914        assert_eq!(cache.get(&"b"), None);
915        assert!(!cache.contains_key(&"a"));
916        assert!(!cache.contains_key(&"b"));
917
918        assert_eq!(cache.iter().count(), 0);
919
920        cache.sync();
921        assert!(cache.is_table_empty());
922    }
923
924    #[test]
925    fn time_to_idle() {
926        let mut cache = Cache::builder()
927            .max_capacity(100)
928            .time_to_idle(Duration::from_secs(10))
929            .build();
930
931        cache.reconfigure_for_testing();
932
933        let (clock, mock) = Clock::mock();
934        cache.set_expiration_clock(Some(clock));
935
936        // Make the cache exterior immutable.
937        let cache = cache;
938
939        cache.insert("a", "alice");
940        cache.sync();
941
942        mock.increment(Duration::from_secs(5)); // 5 secs from the start.
943        cache.sync();
944
945        assert_eq!(cache.get(&"a"), Some("alice"));
946
947        mock.increment(Duration::from_secs(5)); // 10 secs.
948        cache.sync();
949
950        cache.insert("b", "bob");
951        cache.sync();
952
953        assert_eq!(cache.entry_count(), 2);
954
955        mock.increment(Duration::from_secs(2)); // 12 secs.
956        cache.sync();
957
958        // contains_key does not reset the idle timer for the key.
959        assert!(cache.contains_key(&"a"));
960        assert!(cache.contains_key(&"b"));
961        cache.sync();
962
963        assert_eq!(cache.entry_count(), 2);
964
965        mock.increment(Duration::from_secs(3)); // 15 secs.
966        assert_eq!(cache.get(&"a"), None);
967        assert_eq!(cache.get(&"b"), Some("bob"));
968        assert!(!cache.contains_key(&"a"));
969        assert!(cache.contains_key(&"b"));
970
971        assert_eq!(cache.iter().count(), 1);
972
973        cache.sync();
974        assert_eq!(cache.entry_count(), 1);
975
976        mock.increment(Duration::from_secs(10)); // 25 secs
977        assert_eq!(cache.get(&"a"), None);
978        assert_eq!(cache.get(&"b"), None);
979        assert!(!cache.contains_key(&"a"));
980        assert!(!cache.contains_key(&"b"));
981
982        assert_eq!(cache.iter().count(), 0);
983
984        cache.sync();
985        assert!(cache.is_table_empty());
986    }
987
988    #[test]
989    fn test_iter() {
990        const NUM_KEYS: usize = 50;
991
992        fn make_value(key: usize) -> String {
993            format!("val: {}", key)
994        }
995
996        let cache = Cache::builder()
997            .max_capacity(100)
998            .time_to_idle(Duration::from_secs(10))
999            .build();
1000
1001        for key in 0..NUM_KEYS {
1002            cache.insert(key, make_value(key));
1003        }
1004
1005        let mut key_set = std::collections::HashSet::new();
1006
1007        for entry in &cache {
1008            let (key, value) = entry.pair();
1009            assert_eq!(value, &make_value(*key));
1010
1011            key_set.insert(*key);
1012        }
1013
1014        // Ensure there are no missing or duplicate keys in the iteration.
1015        assert_eq!(key_set.len(), NUM_KEYS);
1016
1017        // DO NOT REMOVE THE COMMENT FROM THIS BLOCK.
1018        // This block demonstrates how you can write a code to get a deadlock.
1019        // {
1020        //     let mut iter = cache.iter();
1021        //     let _ = iter.next();
1022
1023        //     for key in 0..NUM_KEYS {
1024        //         cache.insert(key, make_value(key));
1025        //         println!("{}", key);
1026        //     }
1027
1028        //     let _ = iter.next();
1029        // }
1030    }
1031
1032    /// Runs 16 threads at the same time and ensures no deadlock occurs.
1033    ///
1034    /// - Eight of the threads will update key-values in the cache.
1035    /// - Eight others will iterate the cache.
1036    ///
1037    #[test]
1038    fn test_iter_multi_threads() {
1039        use std::collections::HashSet;
1040
1041        const NUM_KEYS: usize = 1024;
1042        const NUM_THREADS: usize = 16;
1043
1044        fn make_value(key: usize) -> String {
1045            format!("val: {}", key)
1046        }
1047
1048        let cache = Cache::builder()
1049            .max_capacity(2048)
1050            .time_to_idle(Duration::from_secs(10))
1051            .build();
1052
1053        // Initialize the cache.
1054        for key in 0..NUM_KEYS {
1055            cache.insert(key, make_value(key));
1056        }
1057
1058        let rw_lock = Arc::new(std::sync::RwLock::<()>::default());
1059        let write_lock = rw_lock.write().unwrap();
1060
1061        // https://rust-lang.github.io/rust-clippy/master/index.html#needless_collect
1062        #[allow(clippy::needless_collect)]
1063        let handles = (0..NUM_THREADS)
1064            .map(|n| {
1065                let cache = cache.clone();
1066                let rw_lock = Arc::clone(&rw_lock);
1067
1068                if n % 2 == 0 {
1069                    // This thread will update the cache.
1070                    std::thread::spawn(move || {
1071                        let read_lock = rw_lock.read().unwrap();
1072                        for key in 0..NUM_KEYS {
1073                            // TODO: Update keys in a random order?
1074                            cache.insert(key, make_value(key));
1075                        }
1076                        std::mem::drop(read_lock);
1077                    })
1078                } else {
1079                    // This thread will iterate the cache.
1080                    std::thread::spawn(move || {
1081                        let read_lock = rw_lock.read().unwrap();
1082                        let mut key_set = HashSet::new();
1083                        for entry in &cache {
1084                            let (key, value) = entry.pair();
1085                            assert_eq!(value, &make_value(*key));
1086                            key_set.insert(*key);
1087                        }
1088                        // Ensure there are no missing or duplicate keys in the iteration.
1089                        assert_eq!(key_set.len(), NUM_KEYS);
1090                        std::mem::drop(read_lock);
1091                    })
1092                }
1093            })
1094            .collect::<Vec<_>>();
1095
1096        // Let these threads to run by releasing the write lock.
1097        std::mem::drop(write_lock);
1098
1099        handles.into_iter().for_each(|h| h.join().expect("Failed"));
1100
1101        // Ensure there are no missing or duplicate keys in the iteration.
1102        let key_set = cache.iter().map(|ent| *ent.key()).collect::<HashSet<_>>();
1103        assert_eq!(key_set.len(), NUM_KEYS);
1104    }
1105
1106    #[test]
1107    fn test_debug_format() {
1108        let cache = Cache::new(10);
1109        cache.insert('a', "alice");
1110        cache.insert('b', "bob");
1111        cache.insert('c', "cindy");
1112
1113        let debug_str = format!("{:?}", cache);
1114        assert!(debug_str.starts_with('{'));
1115        assert!(debug_str.contains(r#"'a': "alice""#));
1116        assert!(debug_str.contains(r#"'b': "bob""#));
1117        assert!(debug_str.contains(r#"'c': "cindy""#));
1118        assert!(debug_str.ends_with('}'));
1119    }
1120}