Skip to main content

dualcache_ff/
filters.rs

1#[cfg(not(feature = "std"))]
2use alloc::{boxed::Box, vec::Vec};
3
4use core::sync::atomic::{AtomicPtr, Ordering};
5use core::ptr;
6use crate::storage::Node;
7
8/// T1 — Hottest tier: direct-mapped L1 filter (fits in CPU L1 cache).
9///
10/// Each slot stores a raw pointer to the most recently seen node for
11/// that hash bucket. Slot assignment is pure bitmask: `hash & mask`.
12/// Collisions simply overwrite; no chaining, no locks.
13pub struct T1<K, V> {
14    pub(crate) mask: usize,
15    pub(crate) slots: Box<[AtomicPtr<Node<K, V>>]>,
16}
17
18unsafe impl<K: Send, V: Send> Send for T1<K, V> {}
19unsafe impl<K: Send + Sync, V: Send + Sync> Sync for T1<K, V> {}
20
21impl<K, V> T1<K, V> {
22    pub fn new(slots_count: usize) -> Self {
23        let mut slots = Vec::with_capacity(slots_count);
24        for _ in 0..slots_count {
25            slots.push(AtomicPtr::new(ptr::null_mut()));
26        }
27        Self {
28            mask: slots_count - 1,
29            slots: slots.into_boxed_slice(),
30        }
31    }
32
33    #[inline(always)]
34    pub fn load_slot(&self, hash: u64) -> *mut Node<K, V> {
35        let idx = hash as usize & self.mask;
36        self.slots[idx].load(Ordering::Acquire)
37    }
38
39    #[inline(always)]
40    pub fn store_slot(&self, hash: u64, ptr: *mut Node<K, V>) {
41        let idx = hash as usize & self.mask;
42        self.slots[idx].store(ptr, Ordering::Release);
43    }
44
45    #[inline(always)]
46    pub fn clear_if_matches(&self, hash: u64, expected_ptr: *mut Node<K, V>) {
47        let idx = hash as usize & self.mask;
48        let _ = self.slots[idx].compare_exchange(
49            expected_ptr,
50            ptr::null_mut(),
51            Ordering::Release,
52            Ordering::Relaxed,
53        );
54    }
55
56    #[inline(always)]
57    pub fn clear_at(&self, idx: usize) {
58        self.slots[idx].store(ptr::null_mut(), Ordering::Relaxed);
59    }
60
61    #[inline(always)]
62    pub fn len(&self) -> usize {
63        self.slots.len()
64    }
65}
66
67/// T2 — Warm tier: direct-mapped L2 filter (intercepts warm data).
68///
69/// Structurally identical to T1 but sized differently (20% of capacity).
70/// Logically separated so future experiments can apply different policies
71/// (e.g., T2 uses CLOCK-Pro promotion while T1 uses LRU-hot demotion).
72pub struct T2<K, V> {
73    pub(crate) mask: usize,
74    pub(crate) slots: Box<[AtomicPtr<Node<K, V>>]>,
75}
76
77unsafe impl<K: Send, V: Send> Send for T2<K, V> {}
78unsafe impl<K: Send + Sync, V: Send + Sync> Sync for T2<K, V> {}
79
80impl<K, V> T2<K, V> {
81    pub fn new(slots_count: usize) -> Self {
82        let mut slots = Vec::with_capacity(slots_count);
83        for _ in 0..slots_count {
84            slots.push(AtomicPtr::new(ptr::null_mut()));
85        }
86        Self {
87            mask: slots_count - 1,
88            slots: slots.into_boxed_slice(),
89        }
90    }
91
92    #[inline(always)]
93    pub fn load_slot(&self, hash: u64) -> *mut Node<K, V> {
94        let idx = hash as usize & self.mask;
95        self.slots[idx].load(Ordering::Acquire)
96    }
97
98    #[inline(always)]
99    pub fn store_slot(&self, hash: u64, ptr: *mut Node<K, V>) {
100        let idx = hash as usize & self.mask;
101        self.slots[idx].store(ptr, Ordering::Release);
102    }
103
104    #[inline(always)]
105    pub fn clear_if_matches(&self, hash: u64, expected_ptr: *mut Node<K, V>) {
106        let idx = hash as usize & self.mask;
107        let _ = self.slots[idx].compare_exchange(
108            expected_ptr,
109            ptr::null_mut(),
110            Ordering::Release,
111            Ordering::Relaxed,
112        );
113    }
114
115    #[inline(always)]
116    pub fn clear_at(&self, idx: usize) {
117        self.slots[idx].store(ptr::null_mut(), Ordering::Relaxed);
118    }
119
120    #[inline(always)]
121    pub fn len(&self) -> usize {
122        self.slots.len()
123    }
124}