1use lru::LruCache;
2use std::{hash::Hash, num::NonZeroUsize};
3
4enum Value<V> {
5 Detached,
6 Pinned(V),
7 Regular(V),
8}
9
10impl<V> Value<V> {
11 fn get(&self) -> &V {
12 match self {
13 Value::Detached => panic!("Value is detached"),
14 Value::Pinned(v) => v,
15 Value::Regular(v) => v,
16 }
17 }
18
19 fn get_mut(&mut self) -> &mut V {
20 match self {
21 Value::Detached => panic!("Value is detached"),
22 Value::Pinned(v) => v,
23 Value::Regular(v) => v,
24 }
25 }
26
27 fn out(self, pin_count: &mut usize) -> V {
28 match self {
29 Value::Detached => panic!("Value is detached"),
30 Value::Pinned(v) => {
31 *pin_count -= 1;
32 v
33 }
34 Value::Regular(v) => v,
35 }
36 }
37}
38
39pub struct Cache<K: Clone + PartialEq + Eq + Hash, V> {
40 entries: LruCache<K, Value<V>>,
41 detach_count: usize,
42 pin_count: usize,
43}
44
45impl<K: Clone + PartialEq + Eq + Hash, V> Cache<K, V> {
46 pub fn attach(&mut self, key: &K, val: V) {
47 let (key, d_val) = self
48 .entries
49 .pop_entry(key)
50 .expect("Cannot attach: Requested entry is not cached");
51 assert!(
52 matches!(d_val, Value::Detached),
53 "Only detached entries can be attached"
54 );
55 self.entries.push(key, Value::Regular(val));
56 self.detach_count -= 1;
57 }
58
59 pub fn detach(&mut self, key: &K) -> V {
61 let (key, val) = self
62 .entries
63 .pop_entry(key)
64 .expect("Cannot detach: Requested entry is not cached");
65 self.entries.push(key, Value::Detached);
66 self.detach_count += 1;
67 val.out(&mut self.pin_count)
68 }
69
70 pub fn empty(&mut self) -> Vec<(K, V)> {
72 let mut entries = Vec::new();
73 while let Some((key, val)) = self.entries.pop_lru() {
74 entries.push((key, val.out(&mut self.pin_count)))
75 }
76 entries
77 }
78
79 pub fn get(&mut self, key: &K) -> &V {
80 self.entries
81 .get(key)
82 .expect("Cannot get: Requested entry is not cached")
83 .get()
84 }
85
86 pub fn get_mut(&mut self, key: &K) -> &mut V {
87 self.entries
88 .get_mut(key)
89 .expect("Cannot get_mut: Requested entry is not cached")
90 .get_mut()
91 }
92
93 pub fn is_cached(&self, key: &K) -> bool {
94 self.entries.contains(key)
95 }
96
97 pub fn len(&self) -> usize {
98 self.entries.len()
99 }
100
101 pub fn margin(&self) -> usize {
102 self.entries.cap().get() - self.detach_count - self.pin_count
103 }
104
105 pub fn new(cap: NonZeroUsize) -> Self {
106 Self {
107 entries: LruCache::new(cap),
108 detach_count: 0,
109 pin_count: 0,
110 }
111 }
112
113 pub fn pin(&mut self, key: &K) {
114 let (key, val) = self
115 .entries
116 .pop_entry(key)
117 .expect("Cannot pin: Requested entry is not cached");
118 self.entries
119 .push(key, Value::Pinned(val.out(&mut self.pin_count)));
120 self.pin_count += 1;
121 }
122
123 pub fn pop(&mut self, key: &K) -> V {
124 self.entries
125 .pop(key)
126 .expect("Cannot pop: Requested entry is not cached")
127 .out(&mut self.pin_count)
128 }
129
130 pub fn pop_lru(&mut self) -> Option<(K, V)> {
131 self.refresh();
132 self.entries.pop_lru().map(|(k, v)| (k, v.out(&mut 0)))
133 }
134
135 pub fn push(&mut self, key: K, val: V) -> Option<(K, V)> {
136 self.refresh();
137 self.entries
138 .push(key, Value::Regular(val))
139 .map(|(k, v)| (k, v.out(&mut 0)))
140 }
141
142 fn refresh(&mut self) {
143 if self.entries.len() < self.entries.cap().get() {
144 return;
145 }
146 assert!(
147 self.margin() > 0,
148 "Cache Overflow -- Capacity: {}, Detached: {}, Pinned: {}",
149 self.entries.cap().get(),
150 self.detach_count,
151 self.pin_count
152 );
153 loop {
154 let key = match self.entries.peek_lru() {
155 Some((key, val)) if !matches!(val, Value::Regular(_)) => key.clone(),
156 _ => break,
157 };
158 self.entries.promote(&key);
159 }
160 }
161
162 pub fn unpin(&mut self, key: &K) {
163 let (key, val) = self
164 .entries
165 .pop_entry(key)
166 .expect("Cannot unpin: Requested entry is not cached");
167 self.entries
168 .push(key, Value::Regular(val.out(&mut self.pin_count)));
169 }
170}