cachers/
lib.rs

1// Copyright 2018 Alex Snaps
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! # Cachers [WIP!]
16//!
17//! `cachers` is a library for in-memory caching in Rust.
18//!
19//! ## What's in it?
20//!
21//! It currently only consists of a single `CacheThough` type, that internally uses a `ClockEvictor`
22//! for freeing memory when capacity is reached.
23//!
24//! Eventually this will hopefully become a full set of out-of-the-box ready-to-use caching tools
25//!
26//! ## Word of warning
27//!
28//! This is all very much _work in progress_. Fundamentally, it's just me having fun with Rust...
29//!
30
31mod eviction;
32mod segment;
33
34use std::ops::Fn;
35use std::sync::{Arc, RwLock};
36
37use segment::Segment;
38
39/// A thread-safe cache that will populate entries on misses using the provided
40/// function, aka a cache-through cache. Uses interior mutability, which means you can simply
41/// share a non-mutable reference to both read & insert/update entries to the cache.
42///
43/// The `CacheThrough` cache uses clock eviction to free elements when it reaches capacity.
44///
45///
46/// In the example below, we create a `CacheThrough` shared across the main thread and another
47/// thread we span ourselves. The main thread will populate the `42` entry and the other thread
48/// will read it back:
49///
50/// ```
51/// use std::sync::{Arc, Barrier};
52/// use std::thread;
53///
54/// use cachers::CacheThrough;
55///
56/// let cache: Arc<CacheThrough<i32, String>> = Arc::new(CacheThrough::new(100));
57/// let other_cache = cache.clone();
58/// let our_key = 42;
59///
60/// let barrier = Arc::new(Barrier::new(2));
61/// let other_barrier = barrier.clone();
62///
63/// let t = thread::spawn(move || {
64///   other_barrier.wait(); // wait for main thread to populate
65///   let value = other_cache.get(our_key, |_| unimplemented!() ); // entry should be there!
66///   assert_eq!(*value.unwrap(), "42");
67/// });
68///
69/// let value = cache.get(our_key, |key| Some(key.to_string()) ); // miss, so populating
70/// assert_eq!(*value.unwrap(), "42");
71/// barrier.wait(); // let the other thread proceed
72///
73/// t.join().unwrap();
74/// ```
75pub struct CacheThrough<K, V> {
76  data: RwLock<Segment<K, V>>,
77}
78
79impl<K, V> CacheThrough<K, V>
80where
81  K: std::cmp::Eq + std::hash::Hash + Copy,
82{
83  /// Creates a new `CacheThrough` instance of the given `capacity`
84  ///
85  /// ```
86  /// use cachers::CacheThrough;
87  ///
88  /// let cache = CacheThrough::<usize, String>::new(100);
89  /// ```
90  pub fn new(capacity: usize) -> CacheThrough<K, V> {
91    CacheThrough {
92      data: RwLock::new(Segment::new(capacity)),
93    }
94  }
95
96  /// Retrieves a shared reference to the `V` for the given `key`.
97  /// The `populating_fn` will be invoked to populate the cache, should there be no mapping
98  /// for the `key` already present. The `populating_fn` receives the `key` as an argument.
99  ///
100  /// It is guaranteed that `populating_fn` will only be invoked once. That is if multiple thread
101  /// race to populate the cache for a given `key`, only one thread will invoke the `populating_fn`.
102  /// Once `populating_fn` has returned `Some<V>`, the other threads waiting for the entry to
103  /// be populated will get the `Arc<V>` returned.
104  ///
105  /// In the case where `populating_fn` yield no results (i.e. returns `Option::None`), no
106  /// guarantees are made about how many times the `populating_fn` may be called.
107  ///
108  /// If you want to cache misses, consider wrapping your `V` into an `Option`.
109  pub fn get<F>(&self, key: K, populating_fn: F) -> Option<Arc<V>>
110  where
111    F: Fn(&K) -> Option<V>,
112  {
113    if let Some(value) = self.data.read().unwrap().get(&key) {
114      return Some(value);
115    }
116
117    self.data.write().unwrap().get_or_populate(key, populating_fn)
118  }
119
120  /// Updates an entry in the cache, or populates it if absent.
121  ///
122  /// The update can be an actual update or a remove, should the `updating_fn` return a `None`.
123  /// The `updating_fn` receives the `key`, as well as an `Option<Arc<V>>` which holds the previous
124  /// value for the `key`, which would be `None` if the function is about to populate the cache.
125  ///
126  /// It is guaranteed that the mapping will not be altered by another thread while the
127  /// `populating_fn` executes.
128  pub fn update<F>(&self, key: K, updating_fn: F) -> Option<Arc<V>>
129  where
130    F: Fn(&K, Option<Arc<V>>) -> Option<V>,
131  {
132    self.data.write().unwrap().update(key, updating_fn)
133  }
134
135  /// Removes the entry for `key` from the cache.
136  /// This is the equivalent of `cache.update(key, |_, _| None)`. Consider this a convenience method.
137  pub fn remove(&self, key: K) {
138    self.data.write().unwrap().update(key, |_, _| None);
139  }
140
141  #[cfg(test)]
142  fn len(&self) -> usize {
143    self.data.read().unwrap().len()
144  }
145}
146
147#[cfg(test)]
148mod tests {
149  use super::CacheThrough;
150  use std::sync::Arc;
151
152  fn test_cache() -> CacheThrough<i32, String> {
153    CacheThrough::new(3)
154  }
155
156  #[test]
157  fn hit_populates() {
158    let cache: CacheThrough<i32, String> = test_cache();
159    let our_key = 42;
160    {
161      let value = cache.get(our_key, populate);
162      assert_eq!(*value.unwrap(), "42");
163      assert_eq!(cache.len(), 1);
164    }
165
166    {
167      let value = cache.get(our_key, do_not_invoke);
168      assert_eq!(*value.unwrap(), "42");
169      assert_eq!(cache.len(), 1);
170    }
171  }
172
173  #[test]
174  fn miss_populates_not() {
175    let cache: CacheThrough<i32, String> = test_cache();
176    let our_key = 42;
177    {
178      let value = cache.get(our_key, miss);
179      assert_eq!(value, None);
180      assert_eq!(cache.len(), 0);
181    }
182
183    {
184      let value = cache.get(our_key, populate);
185      assert_eq!(*value.unwrap(), "42");
186      assert_eq!(cache.len(), 1);
187      cache.get(2, populate);
188      cache.get(3, populate);
189      // cache.get(4, populate); // todo: don't panic, evict!!!
190    }
191  }
192
193  #[test]
194  fn update_populates() {
195    let cache: CacheThrough<i32, String> = test_cache();
196    let our_key = 42;
197
198    {
199      let value = cache.update(our_key, upsert);
200      assert_eq!(*value.unwrap(), "42");
201      assert_eq!(cache.len(), 1);
202    }
203
204    {
205      let value = cache.get(our_key, do_not_invoke);
206      assert_eq!(*value.unwrap(), "42");
207      assert_eq!(cache.len(), 1);
208    }
209  }
210
211  #[test]
212  fn update_updates() {
213    let cache: CacheThrough<i32, String> = test_cache();
214    let our_key = 42;
215
216    {
217      let value = cache.get(our_key, populate);
218      assert_eq!(*value.unwrap(), "42");
219      assert_eq!(cache.len(), 1);
220    }
221
222    {
223      let value = cache.update(our_key, update);
224      assert_eq!(*value.unwrap(), "42 updated!");
225      assert_eq!(cache.len(), 1);
226    }
227
228    {
229      let value = cache.get(our_key, do_not_invoke);
230      assert_eq!(*value.unwrap(), "42 updated!");
231      assert_eq!(cache.len(), 1);
232    }
233  }
234
235  #[test]
236  fn update_removes() {
237    let cache: CacheThrough<i32, String> = test_cache();
238    let our_key = 42;
239
240    {
241      let value = cache.get(our_key, populate);
242      assert_eq!(*value.unwrap(), "42");
243      assert_eq!(cache.len(), 1);
244    }
245
246    {
247      let value = cache.update(our_key, updel);
248      assert_eq!(value, None);
249      assert_eq!(cache.len(), 0);
250    }
251
252    {
253      let value = cache.get(our_key, miss);
254      assert_eq!(value, None);
255      assert_eq!(cache.len(), 0);
256    }
257  }
258
259  #[test]
260  fn remove_removes() {
261    let cache: CacheThrough<i32, String> = test_cache();
262    let our_key = 42;
263
264    {
265      let value = cache.get(our_key, populate);
266      assert_eq!(*value.unwrap(), "42");
267      assert_eq!(cache.len(), 1);
268    }
269
270    {
271      cache.remove(our_key);
272      assert_eq!(cache.len(), 0);
273    }
274  }
275
276  #[test]
277  fn evicts() {
278    let cache: CacheThrough<i32, String> = test_cache();
279
280    {
281      assert_eq!(*cache.get(1, populate).unwrap(), "1"); // eviction candidate
282      assert_eq!(cache.len(), 1);
283      assert_eq!(*cache.get(2, populate).unwrap(), "2");
284      assert_eq!(cache.len(), 2);
285      assert_eq!(*cache.get(3, populate).unwrap(), "3");
286      assert_eq!(cache.len(), 3);
287
288      // Clock state & hand:
289      // _
290      // 111
291    }
292
293    {
294      assert_eq!(*cache.get(4, populate).unwrap(), "4"); // evicts 1
295      assert_eq!(cache.len(), 3);
296      //  _
297      // 100
298
299      assert_eq!(*cache.get(2, do_not_invoke).unwrap(), "2");
300      assert_eq!(cache.len(), 3);
301      //  _
302      // 110
303
304      assert_eq!(*cache.get(3, do_not_invoke).unwrap(), "3");
305      assert_eq!(cache.len(), 3);
306      //  _
307      // 111
308    }
309
310    {
311      assert_eq!(*cache.get(5, populate).unwrap(), "5"); // evicts 3
312      assert_eq!(cache.len(), 3);
313      //   _
314      // 010
315
316      assert_eq!(*cache.get(2, do_not_invoke).unwrap(), "2"); // 011
317      assert_eq!(cache.len(), 3);
318      assert_eq!(*cache.get(4, do_not_invoke).unwrap(), "4"); // 111
319      assert_eq!(cache.len(), 3);
320    }
321
322    {
323      assert_eq!(*cache.get(6, populate).unwrap(), "6"); // evicts 4
324      assert_eq!(cache.len(), 3);
325      assert_eq!(*cache.get(5, do_not_invoke).unwrap(), "5");
326      assert_eq!(cache.len(), 3);
327      assert_eq!(*cache.get(2, do_not_invoke).unwrap(), "2");
328      assert_eq!(cache.len(), 3);
329    }
330  }
331
332  fn miss(_key: &i32) -> Option<String> {
333    None
334  }
335
336  fn populate(key: &i32) -> Option<String> {
337    Some(key.to_string())
338  }
339
340  fn upsert(key: &i32, value: Option<Arc<String>>) -> Option<String> {
341    assert_eq!(value, None);
342    populate(key)
343  }
344
345  fn update(_key: &i32, value: Option<Arc<String>>) -> Option<String> {
346    let previous = &*value.unwrap();
347    Some(previous.clone() + " updated!")
348  }
349
350  fn updel(_key: &i32, value: Option<Arc<String>>) -> Option<String> {
351    assert!(value.is_some());
352    None
353  }
354
355  fn do_not_invoke(_key: &i32) -> Option<String> {
356    assert_eq!("", "I shall not be invoked!");
357    None
358  }
359}