hopper_core/collections/
slab.rs1use crate::account::{FixedLayout, Pod};
35use hopper_runtime::error::ProgramError;
36
37pub const SLAB_HEADER_SIZE: usize = 16;
39
40const NO_FREE: u32 = 0xFFFF_FFFF;
42
43#[inline(always)]
45pub const fn bitmap_bytes(capacity: usize) -> usize {
46 capacity.div_ceil(8)
47}
48
49pub struct Slab<'a, T: Pod + FixedLayout> {
54 data: &'a mut [u8],
55 capacity: usize,
56 _phantom: core::marker::PhantomData<T>,
57}
58
59impl<'a, T: Pod + FixedLayout> Slab<'a, T> {
60 #[inline]
62 pub fn from_bytes_mut(data: &'a mut [u8]) -> Result<Self, ProgramError> {
63 if data.len() < SLAB_HEADER_SIZE {
64 return Err(ProgramError::AccountDataTooSmall);
65 }
66 if T::SIZE < 4 {
67 return Err(ProgramError::InvalidArgument);
68 }
69 let capacity = u32::from_le_bytes([data[4], data[5], data[6], data[7]]) as usize;
70 let needed = SLAB_HEADER_SIZE + bitmap_bytes(capacity) + capacity * T::SIZE;
71 if data.len() < needed {
72 return Err(ProgramError::AccountDataTooSmall);
73 }
74 Ok(Self {
75 data,
76 capacity,
77 _phantom: core::marker::PhantomData,
78 })
79 }
80
81 #[inline]
86 pub fn init(data: &mut [u8], capacity: usize) -> Result<(), ProgramError> {
87 if T::SIZE < 4 {
88 return Err(ProgramError::InvalidArgument);
89 }
90 let bmap_len = bitmap_bytes(capacity);
91 let needed = SLAB_HEADER_SIZE + bmap_len + capacity * T::SIZE;
92 if data.len() < needed {
93 return Err(ProgramError::AccountDataTooSmall);
94 }
95
96 data[0..4].copy_from_slice(&0u32.to_le_bytes());
98 data[4..8].copy_from_slice(&(capacity as u32).to_le_bytes());
99 data[8..12].copy_from_slice(&0u32.to_le_bytes()); data[12..16].copy_from_slice(&0u32.to_le_bytes());
101
102 let bmap_start = SLAB_HEADER_SIZE;
104 let mut i = 0;
105 while i < bmap_len {
106 data[bmap_start + i] = 0;
107 i += 1;
108 }
109
110 let slots_start = SLAB_HEADER_SIZE + bmap_len;
112 i = 0;
113 while i < capacity {
114 let slot_offset = slots_start + i * T::SIZE;
115 let next = if i + 1 < capacity {
116 (i + 1) as u32
117 } else {
118 NO_FREE
119 };
120 data[slot_offset..slot_offset + 4].copy_from_slice(&next.to_le_bytes());
121 i += 1;
122 }
123
124 Ok(())
125 }
126
127 #[inline(always)]
129 fn bitmap_offset(&self) -> usize {
130 SLAB_HEADER_SIZE
131 }
132
133 #[inline(always)]
135 fn slots_offset(&self) -> usize {
136 SLAB_HEADER_SIZE + bitmap_bytes(self.capacity)
137 }
138
139 #[inline(always)]
141 fn is_allocated(&self, index: usize) -> bool {
142 let bmap = self.bitmap_offset();
143 let byte_idx = index / 8;
144 let bit_idx = index % 8;
145 (self.data[bmap + byte_idx] >> bit_idx) & 1 == 1
146 }
147
148 #[inline(always)]
150 fn mark_allocated(&mut self, index: usize) {
151 let bmap = self.bitmap_offset();
152 let byte_idx = index / 8;
153 let bit_idx = index % 8;
154 self.data[bmap + byte_idx] |= 1 << bit_idx;
155 }
156
157 #[inline(always)]
159 fn mark_free(&mut self, index: usize) {
160 let bmap = self.bitmap_offset();
161 let byte_idx = index / 8;
162 let bit_idx = index % 8;
163 self.data[bmap + byte_idx] &= !(1 << bit_idx);
164 }
165
166 #[inline(always)]
168 pub fn count(&self) -> u32 {
169 u32::from_le_bytes([self.data[0], self.data[1], self.data[2], self.data[3]])
170 }
171
172 #[inline(always)]
174 pub fn capacity(&self) -> usize {
175 self.capacity
176 }
177
178 #[inline(always)]
180 fn free_head(&self) -> u32 {
181 u32::from_le_bytes([self.data[8], self.data[9], self.data[10], self.data[11]])
182 }
183
184 #[inline(always)]
186 pub fn is_full(&self) -> bool {
187 self.free_head() == NO_FREE
188 }
189
190 #[inline(always)]
192 pub fn is_slot_allocated(&self, index: u32) -> bool {
193 let idx = index as usize;
194 idx < self.capacity && self.is_allocated(idx)
195 }
196
197 #[inline]
199 pub fn alloc(&mut self, value: T) -> Result<u32, ProgramError> {
200 let head = self.free_head();
201 if head == NO_FREE {
202 return Err(ProgramError::AccountDataTooSmall);
203 }
204
205 let idx = head as usize;
206 let slot_offset = self.slots_offset() + idx * T::SIZE;
207
208 let next_free = u32::from_le_bytes([
210 self.data[slot_offset],
211 self.data[slot_offset + 1],
212 self.data[slot_offset + 2],
213 self.data[slot_offset + 3],
214 ]);
215
216 unsafe {
219 core::ptr::copy_nonoverlapping(
220 &value as *const T as *const u8,
221 self.data.as_mut_ptr().add(slot_offset),
222 T::SIZE,
223 );
224 }
225
226 self.mark_allocated(idx);
228
229 self.data[8..12].copy_from_slice(&next_free.to_le_bytes());
231
232 let count = self.count() + 1;
234 self.data[0..4].copy_from_slice(&count.to_le_bytes());
235
236 Ok(head)
237 }
238
239 #[inline]
243 pub fn free(&mut self, index: u32) -> Result<(), ProgramError> {
244 let idx = index as usize;
245 if idx >= self.capacity {
246 return Err(ProgramError::InvalidArgument);
247 }
248 if !self.is_allocated(idx) {
249 return Err(ProgramError::InvalidArgument);
250 }
251
252 let slot_offset = self.slots_offset() + idx * T::SIZE;
253
254 let current_head = self.free_head();
256 self.data[slot_offset..slot_offset + 4].copy_from_slice(¤t_head.to_le_bytes());
257
258 let mut i = 4;
260 while i < T::SIZE {
261 self.data[slot_offset + i] = 0;
262 i += 1;
263 }
264
265 self.mark_free(idx);
267
268 self.data[8..12].copy_from_slice(&index.to_le_bytes());
270
271 let count = self.count().saturating_sub(1);
273 self.data[0..4].copy_from_slice(&count.to_le_bytes());
274
275 Ok(())
276 }
277
278 #[inline]
282 pub fn get(&self, index: u32) -> Result<T, ProgramError> {
283 let idx = index as usize;
284 if idx >= self.capacity || !self.is_allocated(idx) {
285 return Err(ProgramError::InvalidArgument);
286 }
287 let slot_offset = self.slots_offset() + idx * T::SIZE;
288 Ok(unsafe { core::ptr::read_unaligned(self.data.as_ptr().add(slot_offset) as *const T) })
290 }
291
292 #[inline]
296 pub fn get_ref(&self, index: u32) -> Result<&T, ProgramError> {
297 let idx = index as usize;
298 if idx >= self.capacity || !self.is_allocated(idx) {
299 return Err(ProgramError::InvalidArgument);
300 }
301 let slot_offset = self.slots_offset() + idx * T::SIZE;
302 Ok(unsafe { &*(self.data.as_ptr().add(slot_offset) as *const T) })
304 }
305
306 #[inline]
310 pub fn get_mut(&mut self, index: u32) -> Result<&mut T, ProgramError> {
311 let idx = index as usize;
312 if idx >= self.capacity || !self.is_allocated(idx) {
313 return Err(ProgramError::InvalidArgument);
314 }
315 let slot_offset = self.slots_offset() + idx * T::SIZE;
316 Ok(unsafe { &mut *(self.data.as_mut_ptr().add(slot_offset) as *mut T) })
318 }
319
320 #[inline]
324 pub fn set(&mut self, index: u32, value: T) -> Result<(), ProgramError> {
325 let idx = index as usize;
326 if idx >= self.capacity || !self.is_allocated(idx) {
327 return Err(ProgramError::InvalidArgument);
328 }
329 let slot_offset = self.slots_offset() + idx * T::SIZE;
330 unsafe {
332 core::ptr::copy_nonoverlapping(
333 &value as *const T as *const u8,
334 self.data.as_mut_ptr().add(slot_offset),
335 T::SIZE,
336 );
337 }
338 Ok(())
339 }
340
341 #[inline(always)]
343 pub const fn required_bytes(capacity: usize) -> usize {
344 SLAB_HEADER_SIZE + bitmap_bytes(capacity) + capacity * T::SIZE
345 }
346}