1use crate::table::block::Block;
27use std::collections::{HashMap, VecDeque};
28use std::sync::atomic::{AtomicU64, Ordering};
29use std::sync::{Arc, Mutex};
30
31pub const DEFAULT_BLOCK_CACHE_CAPACITY: usize = 8 * 1024 * 1024;
33
34struct CacheInner {
35 capacity: usize,
37 usage: usize,
39 order: VecDeque<(u64, u64)>,
41 map: HashMap<(u64, u64), Block>,
43}
44
45impl CacheInner {
46 fn get(&mut self, cache_id: u64, offset: u64) -> Option<Block> {
47 let key = (cache_id, offset);
48 let block = self.map.get(&key)?.clone();
49 if let Some(pos) = self.order.iter().position(|&k| k == key) {
51 self.order.remove(pos);
52 }
53 self.order.push_back(key);
54 Some(block)
55 }
56
57 fn insert(&mut self, cache_id: u64, offset: u64, block: Block) {
58 let key = (cache_id, offset);
59 let charge = block.data().len();
60
61 if let Some(old) = self.map.remove(&key) {
63 self.usage = self.usage.saturating_sub(old.data().len());
64 if let Some(pos) = self.order.iter().position(|&k| k == key) {
65 self.order.remove(pos);
66 }
67 }
68
69 while self.usage + charge > self.capacity && !self.order.is_empty() {
71 if let Some(evict_key) = self.order.pop_front() {
72 if let Some(evicted) = self.map.remove(&evict_key) {
73 self.usage = self.usage.saturating_sub(evicted.data().len());
74 }
75 }
76 }
77
78 self.order.push_back(key);
79 self.map.insert(key, block);
80 self.usage += charge;
81 }
82}
83
84#[derive(Clone)]
90pub struct BlockCache {
91 next_id: Arc<AtomicU64>,
92 inner: Arc<Mutex<CacheInner>>,
93}
94
95impl BlockCache {
96 pub fn new(capacity: usize) -> Self {
98 BlockCache {
99 next_id: Arc::new(AtomicU64::new(1)),
100 inner: Arc::new(Mutex::new(CacheInner {
101 capacity,
102 usage: 0,
103 order: VecDeque::new(),
104 map: HashMap::new(),
105 })),
106 }
107 }
108
109 pub(crate) fn new_id(&self) -> u64 {
114 self.next_id.fetch_add(1, Ordering::Relaxed)
115 }
116
117 pub(crate) fn get(&self, cache_id: u64, offset: u64) -> Option<Block> {
121 self.inner.lock().unwrap().get(cache_id, offset)
122 }
123
124 pub(crate) fn insert(&self, cache_id: u64, offset: u64, block: Block) {
126 self.inner.lock().unwrap().insert(cache_id, offset, block);
127 }
128}
129
130#[cfg(test)]
131mod tests {
132 use super::*;
133
134 fn make_block(size: usize) -> Block {
135 Block::new(vec![0u8; size])
136 }
137
138 #[test]
139 fn hit_after_insert() {
140 let cache = BlockCache::new(1024);
141 let id = cache.new_id();
142 cache.insert(id, 0, make_block(100));
143 assert!(cache.get(id, 0).is_some());
144 }
145
146 #[test]
147 fn miss_on_absent_key() {
148 let cache = BlockCache::new(1024);
149 assert!(cache.get(1, 0).is_none());
150 }
151
152 #[test]
153 fn evicts_lru_when_over_capacity() {
154 let cache = BlockCache::new(200);
156 let id = cache.new_id();
157 cache.insert(id, 0, make_block(100)); cache.insert(id, 100, make_block(100)); cache.insert(id, 200, make_block(100)); assert!(cache.get(id, 0).is_none(), "A should have been evicted");
161 assert!(cache.get(id, 100).is_some(), "B should still be present");
162 assert!(cache.get(id, 200).is_some(), "C should be present");
163 }
164
165 #[test]
166 fn lru_promotes_on_get() {
167 let cache = BlockCache::new(200);
169 let id = cache.new_id();
170 cache.insert(id, 0, make_block(100)); cache.insert(id, 100, make_block(100)); cache.get(id, 0); cache.insert(id, 200, make_block(100)); assert!(
175 cache.get(id, 0).is_some(),
176 "A should still be present (was promoted)"
177 );
178 assert!(cache.get(id, 100).is_none(), "B should have been evicted");
179 assert!(cache.get(id, 200).is_some(), "C should be present");
180 }
181
182 #[test]
183 fn unique_ids_per_new_id_call() {
184 let cache = BlockCache::new(1024);
185 let id1 = cache.new_id();
186 let id2 = cache.new_id();
187 assert_ne!(id1, id2);
188 }
189
190 #[test]
191 fn clone_shares_state() {
192 let cache = BlockCache::new(1024);
193 let cache2 = cache.clone();
194 let id = cache.new_id();
195 cache.insert(id, 0, make_block(64));
196 assert!(cache2.get(id, 0).is_some());
198 }
199}