aba_cache/lru/asynchronous.rs
1use super::Cache as InnerCache;
2use std::{borrow::Borrow, hash::Hash, sync::Arc, time::Duration};
3use tokio::{sync::Mutex, task, time};
4
5/// Async version of Cache with LRU eviction strategy
6pub struct Cache<K, V>(Mutex<InnerCache<K, V>>);
7
8#[allow(clippy::needless_doctest_main)]
9impl<K: 'static + Hash + Eq + Sync + Send, V: 'static + Clone + Send> Cache<K, V> {
10 /// Create new Cache, which will expiring its entry after `timeout_secs`
11 /// and allocating new slab with capacity `multiply_cap` when no space
12 /// is ready and no entry expires
13 pub fn new(multiply_cap: usize, timeout_secs: u64) -> Arc<Self> {
14 let cache = Arc::new(Cache(Mutex::new(InnerCache::new(
15 multiply_cap,
16 timeout_secs,
17 ))));
18 let cache_async = cache.clone();
19 task::spawn(async move {
20 let duration = Duration::from_secs(timeout_secs);
21 loop {
22 time::delay_for(duration).await;
23 cache_async.evict().await
24 }
25 });
26 cache
27 }
28
29 /// Returns the clone value of the key in the cache or `None` if it is not
30 /// present in the cache. Moves the key to the head of the LRU list if it exists.
31 ///
32 /// # Example
33 ///
34 /// ```
35 /// use aba_cache as cache;
36 /// use cache::LruAsyncCache;
37 ///
38 /// #[tokio::main]
39 /// async fn main() {
40 /// let cache = LruAsyncCache::new(2, 60);
41 ///
42 /// assert_eq!(cache.put(String::from("1"), "a").await, None);
43 /// assert_eq!(cache.put(String::from("2"), "b").await, None);
44 /// assert_eq!(cache.put(String::from("2"), "c").await, Some("b"));
45 /// assert_eq!(cache.put(String::from("3"), "d").await, None);
46 ///
47 /// assert_eq!(cache.get(&String::from("1")).await, Some("a"));
48 /// assert_eq!(cache.get(&String::from("2")).await, Some("c"));
49 /// assert_eq!(cache.get(&String::from("3")).await, Some("d"));
50 /// }
51 /// ```
52 pub async fn get<Q: ?Sized>(&self, key: &Q) -> Option<V>
53 where
54 Arc<K>: Borrow<Q>,
55 Q: Hash + Eq,
56 {
57 let mut cache = self.0.lock().await;
58 cache.get(key).cloned()
59 }
60
61 /// Puts a key-value pair into cache. If the key already exists in the cache, then it updates
62 /// the key's value and returns the old value. Otherwise, `None` is returned.
63 ///
64 /// # Example
65 ///
66 /// ```
67 /// use aba_cache as cache;
68 /// use cache::LruAsyncCache;
69 ///
70 /// #[tokio::main]
71 /// async fn main() {
72 /// let cache = LruAsyncCache::new(2, 60);
73 ///
74 /// assert_eq!(None, cache.put(String::from("1"), "a").await);
75 /// assert_eq!(None, cache.put(String::from("2"), "b").await);
76 /// assert_eq!(Some("b"), cache.put(String::from("2"), "beta").await);
77 ///
78 /// assert_eq!(cache.get(&String::from("1")).await, Some("a"));
79 /// assert_eq!(cache.get(&String::from("2")).await, Some("beta"));
80 /// }
81 /// ```
82 pub async fn put(&self, key: K, value: V) -> Option<V> {
83 let mut cache = self.0.lock().await;
84 cache.put(key, value)
85 }
86
87 /// Removes expired entry.
88 /// This operation will deallocate empty slab caused by entry removal if any.
89 async fn evict(&self) {
90 let mut cache = self.0.lock().await;
91 cache.evict();
92 }
93
94 /// Returns the maximum number of key-value pairs the cache can hold.
95 /// Note that on data insertion, when no space is available and no
96 /// entry is timeout, then capacity will be added with `multiply_cap`
97 /// to accomodate.
98 ///
99 /// # Example
100 ///
101 /// ```
102 /// use aba_cache as cache;
103 /// use cache::LruAsyncCache;
104 ///
105 /// #[tokio::main]
106 /// async fn main() {
107 /// let cache = LruAsyncCache::new(2, 60);
108 /// assert_eq!(cache.capacity().await, 2);
109 ///
110 /// cache.put(1, "a").await;
111 /// assert_eq!(cache.capacity().await, 2);
112 ///
113 /// cache.put(2, "b").await;
114 /// assert_eq!(cache.capacity().await, 2);
115 ///
116 /// cache.put(3, "c").await;
117 /// assert_eq!(cache.capacity().await, 4);
118 /// }
119 /// ```
120 pub async fn capacity(&self) -> usize {
121 let cache = self.0.lock().await;
122 cache.capacity()
123 }
124
125 /// Returns the number of key-value pairs that are currently in the the cache.
126 /// Note that len should be less than or equal to capacity
127 ///
128 /// # Example
129 ///
130 /// ```
131 /// use aba_cache as cache;
132 /// use cache::LruAsyncCache;
133 ///
134 /// #[tokio::main]
135 /// async fn main() {
136 /// let cache = LruAsyncCache::new(2, 60);
137 /// assert_eq!(cache.len().await, 0);
138 ///
139 /// cache.put(1, "a").await;
140 /// assert_eq!(cache.len().await, 1);
141 ///
142 /// cache.put(2, "b").await;
143 /// assert_eq!(cache.len().await, 2);
144 /// assert_eq!(cache.capacity().await, 2);
145 ///
146 /// cache.put(3, "c").await;
147 /// assert_eq!(cache.len().await, 3);
148 /// assert_eq!(cache.capacity().await, 4);
149 /// }
150 /// ```
151 pub async fn len(&self) -> usize {
152 let cache = self.0.lock().await;
153 cache.len()
154 }
155
156 /// Returns a bool indicating whether the cache is empty or not.
157 ///
158 /// # Example
159 ///
160 /// ```
161 /// use aba_cache as cache;
162 /// use cache::LruAsyncCache;
163 ///
164 /// #[tokio::main]
165 /// async fn main() {
166 /// let cache = LruAsyncCache::new(2, 60);
167 /// assert!(cache.is_empty().await);
168 ///
169 /// cache.put(String::from("1"), "a").await;
170 /// assert!(!cache.is_empty().await);
171 /// }
172 /// ```
173 pub async fn is_empty(&self) -> bool {
174 let cache = self.0.lock().await;
175 cache.is_empty()
176 }
177}