k_cache/
segmented.rs

1use std::{borrow::Borrow, hash::BuildHasher};
2
3use crate::{
4    cache::{DefaultLifecycle, Lifecycle},
5    Cache, One, Weigher,
6};
7
8#[derive(Debug)]
9pub struct SegmentedCache<
10    K,
11    V,
12    S: BuildHasher = std::hash::RandomState,
13    W: Weigher<K, V> = One,
14    L: Lifecycle<K, V> = DefaultLifecycle,
15> {
16    segments: Vec<k_lock::Mutex<Cache<K, V, S, W, L>>>,
17    hasher: S,
18}
19
20impl<K, V, S, W, L> SegmentedCache<K, V, S, W, L>
21where
22    K: Eq + std::hash::Hash + Clone,
23    V: Clone,
24    S: BuildHasher + Default,
25    W: Weigher<K, V> + Clone,
26    L: Lifecycle<K, V> + Default,
27{
28    pub fn new(segments: usize, max_weight: usize) -> Self {
29        let weight_per_segment = max_weight / segments;
30        let segments = (0..segments)
31            .map(|_| {
32                k_lock::Mutex::new(Cache::new_with_lifecycle(
33                    S::default(),
34                    weight_per_segment,
35                    L::default(),
36                ))
37            })
38            .collect();
39        Self {
40            segments,
41            hasher: S::default(),
42        }
43    }
44}
45
46impl<K, V, S, W, L> SegmentedCache<K, V, S, W, L>
47where
48    K: Eq + std::hash::Hash + Clone,
49    V: Clone,
50    S: BuildHasher + Default,
51    W: Weigher<K, V> + Clone,
52    L: Lifecycle<K, V> + Clone,
53{
54    pub fn new_with_lifecycle(segments: usize, max_weight: usize, lifecycle: L) -> Self {
55        let weight_per_segment = max_weight / segments;
56        let segments = (0..segments)
57            .map(|_| {
58                k_lock::Mutex::new(Cache::new_with_lifecycle(
59                    S::default(),
60                    weight_per_segment,
61                    lifecycle.clone(),
62                ))
63            })
64            .collect();
65        Self {
66            segments,
67            hasher: S::default(),
68        }
69    }
70
71    pub fn put(&self, key: K, value: V) {
72        let slot = self.hasher.hash_one(&key) as usize % self.segments.len();
73        self.segments[slot]
74            .lock()
75            .expect("mutex must not be poisoned")
76            .put(key, value)
77    }
78
79    pub fn remove(&self, key: &K) -> Option<V> {
80        let slot = self.hasher.hash_one(key) as usize % self.segments.len();
81        self.segments[slot]
82            .lock()
83            .expect("mutex must not be poisoned")
84            .remove(key)
85    }
86
87    pub fn get<Q>(&self, key: &Q) -> Option<V>
88    where
89        K: Borrow<Q>,
90        Q: std::hash::Hash + Eq + ?Sized,
91    {
92        let slot = self.hasher.hash_one(key) as usize % self.segments.len();
93        self.segments[slot]
94            .lock()
95            .expect("mutex must not be poisoned")
96            .get(key)
97            .cloned()
98    }
99    /// Clears the cache, via eviction.
100    ///
101    /// If you want to handle the evicted entries, use the Lifecycle trait.
102    /// The order of eviction is unspecified.
103    ///
104    /// This function does not guarantee that the cache is empty after calling it
105    /// in the presence of concurrent writes. If you want to empty the cache,
106    /// you need to stop writes first, and then call this function.
107    /// If you just want to reset the cache, then you can call this function with
108    /// concurrent writes. It's safe, it's just not going to be empty.
109    ///
110    /// It will block `1/segments` of the cache at a time for segment size time, so
111    /// it's potentially fairly intrusive with large segments, especially with an
112    /// expensive on_eviction Lifecycle if you have one.
113    pub fn evict_all(&self) {
114        for segment in &self.segments {
115            segment
116                .lock()
117                .expect("mutex must not be poisoned")
118                .evict_all();
119        }
120    }
121}