1#[cfg(not(feature = "std"))]
2use alloc::{boxed::Box, vec::Vec};
3
4use core::sync::atomic::{AtomicU64, AtomicPtr, Ordering};
5use core::ptr;
6
7pub struct Node<K, V> {
9 pub key: K,
10 pub value: V,
11 pub expire_at: u32,
13 pub g_idx: u32,
15}
16
17pub struct Cache<K, V> {
24 pub(crate) index_mask: usize,
25 pub(crate) index: Box<[AtomicU64]>,
26 pub(crate) nodes: Box<[AtomicPtr<Node<K, V>>]>,
27}
28
29unsafe impl<K: Send, V: Send> Send for Cache<K, V> {}
30unsafe impl<K: Send + Sync, V: Send + Sync> Sync for Cache<K, V> {}
31
32impl<K, V> Cache<K, V> {
33 pub fn new(capacity: usize) -> Self {
34 let index_size = (capacity * 2).next_power_of_two();
35 let mut index = Vec::with_capacity(index_size);
36 for _ in 0..index_size {
37 index.push(AtomicU64::new(0));
38 }
39
40 let mut nodes = Vec::with_capacity(capacity);
41 for _ in 0..capacity {
42 nodes.push(AtomicPtr::new(ptr::null_mut()));
43 }
44
45 Self {
46 index_mask: index_size - 1,
47 index: index.into_boxed_slice(),
48 nodes: nodes.into_boxed_slice(),
49 }
50 }
51
52 #[inline(always)]
53 pub fn index_probe(&self, hash: u64, tag: u16) -> Option<usize> {
54 let mut idx = hash as usize & self.index_mask;
55 for _ in 0..16 {
56 let entry = self.index[idx].load(Ordering::Acquire);
57 if entry == 0 {
58 return None;
59 }
60 if entry != u64::MAX && (entry >> 48) as u16 == tag {
61 return Some((entry & 0x0000_FFFF_FFFF_FFFF) as usize);
62 }
63 idx = (idx + 1) & self.index_mask;
64 }
65 None
66 }
67
68 #[inline(always)]
69 pub fn index_store(&self, hash: u64, tag: u16, entry: u64) {
70 let mut idx = hash as usize & self.index_mask;
71 for i in 0..16 {
72 let prev = self.index[idx].load(Ordering::Acquire);
73 if prev == 0 || prev == u64::MAX || (prev >> 48) == (tag as u64) {
74 self.index[idx].store(entry, Ordering::Release);
75 return;
76 }
77 if i == 15 {
78 self.index[hash as usize & self.index_mask].store(entry, Ordering::Release);
79 }
80 idx = (idx + 1) & self.index_mask;
81 }
82 }
83
84 #[inline(always)]
85 pub fn index_remove(&self, hash: u64, tag: u16, g_idx: usize) {
86 let mut idx = hash as usize & self.index_mask;
87 for _ in 0..16 {
88 let entry = self.index[idx].load(Ordering::Acquire);
89 if entry == 0 {
90 return;
91 }
92 if entry != u64::MAX
93 && (entry >> 48) as u16 == tag
94 && (entry & 0x0000_FFFF_FFFF_FFFF) == (g_idx as u64)
95 {
96 self.index[idx].store(u64::MAX, Ordering::Release);
97 return;
98 }
99 idx = (idx + 1) & self.index_mask;
100 }
101 }
102
103 #[inline(always)]
104 pub fn index_clear_at(&self, idx: usize) {
105 self.index[idx].store(0, Ordering::Relaxed);
106 }
107
108 #[inline(always)]
109 pub fn index_len(&self) -> usize {
110 self.index.len()
111 }
112
113 #[inline(always)]
114 pub fn node_get_full(&self, idx: usize, key: &K, current_epoch: u32) -> Option<V>
115 where
116 K: Eq,
117 V: Clone,
118 {
119 let ptr = self.nodes[idx].load(Ordering::Acquire);
120 if ptr.is_null() {
121 return None;
122 }
123 let node = unsafe { &*ptr };
125 if node.key == *key {
126 if node.expire_at > 0 && node.expire_at < current_epoch {
127 return None;
128 }
129 Some(node.value.clone())
130 } else {
131 None
132 }
133 }
134
135 pub fn clear(&self) {
136 for i in 0..self.index.len() {
137 self.index[i].store(0, Ordering::Relaxed);
138 }
139 for i in 0..self.nodes.len() {
140 self.nodes[i].store(ptr::null_mut(), Ordering::Release);
141 }
142 }
143}