1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
//! Storing tokens sent from servers in NEW_TOKEN frames and using them in subsequent connections
use std::{
collections::{HashMap, VecDeque, hash_map},
sync::{Arc, Mutex},
};
use bytes::Bytes;
use lru_slab::LruSlab;
use tracing::trace;
use crate::token::TokenStore;
/// `TokenStore` implementation that stores up to `N` tokens per server name for up to a
/// limited number of server names, in-memory
#[derive(Debug)]
pub struct TokenMemoryCache(Mutex<State>);
impl TokenMemoryCache {
/// Construct empty
pub fn new(max_server_names: u32, max_tokens_per_server: usize) -> Self {
Self(Mutex::new(State::new(
max_server_names,
max_tokens_per_server,
)))
}
}
impl TokenStore for TokenMemoryCache {
fn insert(&self, server_name: &str, token: Bytes) {
trace!(%server_name, "storing token");
self.0.lock().unwrap().store(server_name, token)
}
fn take(&self, server_name: &str) -> Option<Bytes> {
let token = self.0.lock().unwrap().take(server_name);
trace!(%server_name, found=%token.is_some(), "taking token");
token
}
}
/// Defaults to a maximum of 256 servers and 2 tokens per server
impl Default for TokenMemoryCache {
fn default() -> Self {
Self::new(256, 2)
}
}
/// Lockable inner state of `TokenMemoryCache`
#[derive(Debug)]
struct State {
max_server_names: u32,
max_tokens_per_server: usize,
// map from server name to index in lru
lookup: HashMap<Arc<str>, u32>,
lru: LruSlab<CacheEntry>,
}
impl State {
fn new(max_server_names: u32, max_tokens_per_server: usize) -> Self {
Self {
max_server_names,
max_tokens_per_server,
lookup: HashMap::new(),
lru: LruSlab::default(),
}
}
fn store(&mut self, server_name: &str, token: Bytes) {
if self.max_server_names == 0 {
// the rest of this method assumes that we can always insert a new entry so long as
// we're willing to evict a pre-existing entry. thus, an entry limit of 0 is an edge
// case we must short-circuit on now.
return;
}
if self.max_tokens_per_server == 0 {
// similarly to above, the rest of this method assumes that we can always push a new
// token to a queue so long as we're willing to evict a pre-existing token, so we
// short-circuit on the edge case of a token limit of 0.
return;
}
let server_name = Arc::<str>::from(server_name);
match self.lookup.entry(server_name.clone()) {
hash_map::Entry::Occupied(hmap_entry) => {
// key already exists, push the new token to its token queue
let tokens = &mut self.lru.get_mut(*hmap_entry.get()).tokens;
if tokens.len() >= self.max_tokens_per_server {
debug_assert!(tokens.len() == self.max_tokens_per_server);
tokens.pop_front().unwrap();
}
tokens.push_back(token);
}
hash_map::Entry::Vacant(hmap_entry) => {
// key does not yet exist, create a new one, evicting the oldest if necessary
let removed_key = if self.lru.len() >= self.max_server_names {
// unwrap safety: max_server_names is > 0, so there's at least one entry, so
// lru() is some
Some(self.lru.remove(self.lru.lru().unwrap()).server_name)
} else {
None
};
hmap_entry.insert(self.lru.insert(CacheEntry::new(server_name, token)));
// for borrowing reasons, we must defer removing the evicted hmap entry to here
if let Some(removed_slot) = removed_key {
let removed = self.lookup.remove(&removed_slot);
debug_assert!(removed.is_some());
}
}
};
}
fn take(&mut self, server_name: &str) -> Option<Bytes> {
let slab_key = *self.lookup.get(server_name)?;
// pop from entry's token queue
let entry = self.lru.get_mut(slab_key);
// unwrap safety: we never leave tokens empty
let token = entry.tokens.pop_front().unwrap();
if entry.tokens.is_empty() {
// token stack emptied, remove entry
self.lru.remove(slab_key);
self.lookup.remove(server_name);
}
Some(token)
}
}
/// Cache entry within `TokenMemoryCache`'s LRU slab
#[derive(Debug)]
struct CacheEntry {
server_name: Arc<str>,
// invariant: tokens is never empty
tokens: VecDeque<Bytes>,
}
impl CacheEntry {
/// Construct with a single token
fn new(server_name: Arc<str>, token: Bytes) -> Self {
let mut tokens = VecDeque::new();
tokens.push_back(token);
Self {
server_name,
tokens,
}
}
}
#[cfg(test)]
mod tests {
use std::collections::VecDeque;
use super::*;
use rand::prelude::*;
use rand_pcg::Pcg32;
use tracing::info;
fn new_rng() -> impl Rng {
Pcg32::from_seed(0xdeadbeefdeadbeefdeadbeefdeadbeefu128.to_le_bytes())
}
#[test]
fn cache_test() {
let mut rng = new_rng();
const N: usize = 2;
for _ in 0..10 {
let mut cache_1: Vec<(u32, VecDeque<Bytes>)> = Vec::new(); // keep it sorted oldest to newest
let cache_2 = TokenMemoryCache::new(20, 2);
for i in 0..200 {
let server_name = rng.random::<u32>() % 10;
if rng.random_bool(0.666) {
// store
let token = Bytes::from(vec![i]);
info!("STORE {server_name} {token:?}");
if let Some((j, _)) = cache_1
.iter()
.enumerate()
.find(|&(_, &(server_name_2, _))| server_name_2 == server_name)
{
let (_, mut queue) = cache_1.remove(j);
queue.push_back(token.clone());
if queue.len() > N {
queue.pop_front();
}
cache_1.push((server_name, queue));
} else {
let mut queue = VecDeque::new();
queue.push_back(token.clone());
cache_1.push((server_name, queue));
if cache_1.len() > 20 {
cache_1.remove(0);
}
}
cache_2.insert(&server_name.to_string(), token);
} else {
// take
info!("TAKE {server_name}");
let expecting = cache_1
.iter()
.enumerate()
.find(|&(_, &(server_name_2, _))| server_name_2 == server_name)
.map(|(j, _)| j)
.map(|j| {
let (_, mut queue) = cache_1.remove(j);
let token = queue.pop_front().unwrap();
if !queue.is_empty() {
cache_1.push((server_name, queue));
}
token
});
info!("EXPECTING {expecting:?}");
assert_eq!(cache_2.take(&server_name.to_string()), expecting);
}
}
}
}
#[test]
fn zero_max_server_names() {
// test that this edge case doesn't panic
let cache = TokenMemoryCache::new(0, 2);
for i in 0..10 {
cache.insert(&i.to_string(), Bytes::from(vec![i]));
for j in 0..10 {
assert!(cache.take(&j.to_string()).is_none());
}
}
}
#[test]
fn zero_queue_length() {
// test that this edge case doesn't panic
let cache = TokenMemoryCache::new(256, 0);
for i in 0..10 {
cache.insert(&i.to_string(), Bytes::from(vec![i]));
for j in 0..10 {
assert!(cache.take(&j.to_string()).is_none());
}
}
}
}