simple_rate_limiter/
lib.rs

1#[cfg(test)]
2mod test;
3
4use parking_lot::RwLock;
5use std::{
6    collections::{hash_map::Entry, HashMap, VecDeque},
7    hash::Hash,
8    iter, mem,
9    ops::DerefMut as _,
10    sync::{
11        atomic::{AtomicUsize, Ordering as MemoryOrdering},
12        Arc,
13    },
14};
15
16pub struct RateLimiter<K: Eq + Hash> {
17    limit: usize,
18    prior_slots: RwLock<VecDeque<HashMap<K, AtomicUsize>>>,
19    current_slot: RwLock<HashMap<K, AtomicUsize>>,
20}
21
22impl<K: Eq + Hash> RateLimiter<K> {
23    pub fn new(limit: usize, slots: usize) -> Arc<Self> {
24        assert!(slots > 0);
25        let rate_limiter = Arc::new(Self {
26            limit,
27            prior_slots: RwLock::new(VecDeque::from_iter(
28                iter::repeat_with(|| HashMap::new()).take(slots - 1),
29            )),
30            current_slot: RwLock::default(),
31        });
32        rate_limiter
33    }
34
35    pub fn rotate_slots(&self) {
36        // Take the current slot data. Rotate the previous slots and place the current slot at the
37        // back. This automatically prunes entries that are infrequently used.
38        let mut prior_slots = self.prior_slots.write();
39        let front = prior_slots
40            .pop_front()
41            .map(|mut front| {
42                front.clear();
43                front
44            })
45            .unwrap_or_default();
46        let current_slot = mem::replace(self.current_slot.write().deref_mut(), front);
47        prior_slots.push_back(current_slot);
48    }
49
50    pub fn check_limited(&self, key: K) -> bool {
51        // We want to avoid a situation where a maliciously overactive client can degrade the
52        // ability to serve others. So we limit the contention and mutually exclusive locking
53        // that can be caused by such a client. A malicious client will most often be limited based
54        // on their count from prior slots, which are infrequently modified. If we need to check the
55        // current slot, then the malicious client will only be able to trigger a write lock
56        // acquisition up to `limit` times in the worst case for the entire window.
57        let mut sum: usize = self
58            .prior_slots
59            .read()
60            .iter()
61            .map(|slot| {
62                slot.get(&key)
63                    .map(|counter| counter.load(MemoryOrdering::Relaxed))
64                    .unwrap_or(0)
65            })
66            .sum();
67        // Don't increment if the limit is already reached from prior slots.
68        if sum > self.limit {
69            return true;
70        }
71        sum += self.increment(key);
72        sum >= self.limit
73    }
74
75    #[inline]
76    fn increment(&self, key: K) -> usize {
77        if let Some(map) = self.current_slot.try_read() {
78            if let Some(counter) = map.get(&key) {
79                return counter.fetch_add(1, MemoryOrdering::Relaxed);
80            }
81        }
82        match self.current_slot.write().entry(key) {
83            Entry::Occupied(entry) => entry.get().fetch_add(1, MemoryOrdering::Relaxed),
84            Entry::Vacant(entry) => {
85                entry.insert(AtomicUsize::new(1));
86                1
87            }
88        }
89    }
90}