Skip to main content

pingora_cache/
hashtable.rs

1// Copyright 2026 Cloudflare, Inc.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! Concurrent hash tables and LRUs
16
17use lru::LruCache;
18use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard};
19use std::collections::HashMap;
20
21// There are probably off-the-shelf crates of this, DashMap?
22/// A hash table that shards to a constant number of tables to reduce lock contention
23#[derive(Debug)]
24pub struct ConcurrentHashTable<V, const N: usize> {
25    tables: [RwLock<HashMap<u128, V>>; N],
26}
27
28#[inline]
29fn get_shard(key: u128, n_shards: usize) -> usize {
30    (key % n_shards as u128) as usize
31}
32
33impl<V, const N: usize> ConcurrentHashTable<V, N>
34where
35    [RwLock<HashMap<u128, V>>; N]: Default,
36{
37    pub fn new() -> Self {
38        ConcurrentHashTable {
39            tables: Default::default(),
40        }
41    }
42    pub fn get(&self, key: u128) -> &RwLock<HashMap<u128, V>> {
43        &self.tables[get_shard(key, N)]
44    }
45
46    #[allow(dead_code)]
47    pub fn get_shard_at_idx(&self, idx: usize) -> Option<&RwLock<HashMap<u128, V>>> {
48        self.tables.get(idx)
49    }
50
51    #[allow(dead_code)]
52    pub fn read(&self, key: u128) -> RwLockReadGuard<'_, HashMap<u128, V>> {
53        self.get(key).read()
54    }
55
56    pub fn write(&self, key: u128) -> RwLockWriteGuard<'_, HashMap<u128, V>> {
57        self.get(key).write()
58    }
59
60    #[allow(dead_code)]
61    pub fn for_each<F>(&self, mut f: F)
62    where
63        F: FnMut(&u128, &V),
64    {
65        for shard in &self.tables {
66            let guard = shard.read();
67            for (key, value) in guard.iter() {
68                f(key, value);
69            }
70        }
71    }
72
73    // TODO: work out the lifetimes to provide get/set directly
74}
75
76impl<V, const N: usize> Default for ConcurrentHashTable<V, N>
77where
78    [RwLock<HashMap<u128, V>>; N]: Default,
79{
80    fn default() -> Self {
81        Self::new()
82    }
83}
84
85#[doc(hidden)] // not need in public API
86pub struct LruShard<V>(RwLock<LruCache<u128, V>>);
87impl<V> Default for LruShard<V> {
88    fn default() -> Self {
89        // help satisfy default construction of arrays
90        LruShard(RwLock::new(LruCache::unbounded()))
91    }
92}
93
94/// Sharded concurrent data structure for LruCache
95pub struct ConcurrentLruCache<V, const N: usize> {
96    lrus: [LruShard<V>; N],
97}
98
99impl<V, const N: usize> ConcurrentLruCache<V, N>
100where
101    [LruShard<V>; N]: Default,
102{
103    pub fn new(shard_capacity: usize) -> Self {
104        use std::num::NonZeroUsize;
105        // safe, 1 != 0
106        const ONE: NonZeroUsize = NonZeroUsize::new(1).unwrap();
107        let mut cache = ConcurrentLruCache {
108            lrus: Default::default(),
109        };
110        for lru in &mut cache.lrus {
111            lru.0
112                .write()
113                .resize(shard_capacity.try_into().unwrap_or(ONE));
114        }
115        cache
116    }
117    pub fn get(&self, key: u128) -> &RwLock<LruCache<u128, V>> {
118        &self.lrus[get_shard(key, N)].0
119    }
120
121    #[allow(dead_code)]
122    pub fn read(&self, key: u128) -> RwLockReadGuard<'_, LruCache<u128, V>> {
123        self.get(key).read()
124    }
125
126    pub fn write(&self, key: u128) -> RwLockWriteGuard<'_, LruCache<u128, V>> {
127        self.get(key).write()
128    }
129
130    // TODO: work out the lifetimes to provide get/set directly
131}