use crate::account::{FixedLayout, Pod};
use hopper_runtime::error::ProgramError;
pub const SLAB_HEADER_SIZE: usize = 16;
const NO_FREE: u32 = 0xFFFF_FFFF;
#[inline(always)]
pub const fn bitmap_bytes(capacity: usize) -> usize {
capacity.div_ceil(8)
}
pub struct Slab<'a, T: Pod + FixedLayout> {
data: &'a mut [u8],
capacity: usize,
_phantom: core::marker::PhantomData<T>,
}
impl<'a, T: Pod + FixedLayout> Slab<'a, T> {
#[inline]
pub fn from_bytes_mut(data: &'a mut [u8]) -> Result<Self, ProgramError> {
if data.len() < SLAB_HEADER_SIZE {
return Err(ProgramError::AccountDataTooSmall);
}
if T::SIZE < 4 {
return Err(ProgramError::InvalidArgument);
}
let capacity = u32::from_le_bytes([data[4], data[5], data[6], data[7]]) as usize;
let needed = SLAB_HEADER_SIZE + bitmap_bytes(capacity) + capacity * T::SIZE;
if data.len() < needed {
return Err(ProgramError::AccountDataTooSmall);
}
Ok(Self {
data,
capacity,
_phantom: core::marker::PhantomData,
})
}
#[inline]
pub fn init(data: &mut [u8], capacity: usize) -> Result<(), ProgramError> {
if T::SIZE < 4 {
return Err(ProgramError::InvalidArgument);
}
let bmap_len = bitmap_bytes(capacity);
let needed = SLAB_HEADER_SIZE + bmap_len + capacity * T::SIZE;
if data.len() < needed {
return Err(ProgramError::AccountDataTooSmall);
}
data[0..4].copy_from_slice(&0u32.to_le_bytes());
data[4..8].copy_from_slice(&(capacity as u32).to_le_bytes());
data[8..12].copy_from_slice(&0u32.to_le_bytes()); data[12..16].copy_from_slice(&0u32.to_le_bytes());
let bmap_start = SLAB_HEADER_SIZE;
let mut i = 0;
while i < bmap_len {
data[bmap_start + i] = 0;
i += 1;
}
let slots_start = SLAB_HEADER_SIZE + bmap_len;
i = 0;
while i < capacity {
let slot_offset = slots_start + i * T::SIZE;
let next = if i + 1 < capacity {
(i + 1) as u32
} else {
NO_FREE
};
data[slot_offset..slot_offset + 4].copy_from_slice(&next.to_le_bytes());
i += 1;
}
Ok(())
}
#[inline(always)]
fn bitmap_offset(&self) -> usize {
SLAB_HEADER_SIZE
}
#[inline(always)]
fn slots_offset(&self) -> usize {
SLAB_HEADER_SIZE + bitmap_bytes(self.capacity)
}
#[inline(always)]
fn is_allocated(&self, index: usize) -> bool {
let bmap = self.bitmap_offset();
let byte_idx = index / 8;
let bit_idx = index % 8;
(self.data[bmap + byte_idx] >> bit_idx) & 1 == 1
}
#[inline(always)]
fn mark_allocated(&mut self, index: usize) {
let bmap = self.bitmap_offset();
let byte_idx = index / 8;
let bit_idx = index % 8;
self.data[bmap + byte_idx] |= 1 << bit_idx;
}
#[inline(always)]
fn mark_free(&mut self, index: usize) {
let bmap = self.bitmap_offset();
let byte_idx = index / 8;
let bit_idx = index % 8;
self.data[bmap + byte_idx] &= !(1 << bit_idx);
}
#[inline(always)]
pub fn count(&self) -> u32 {
u32::from_le_bytes([self.data[0], self.data[1], self.data[2], self.data[3]])
}
#[inline(always)]
pub fn capacity(&self) -> usize {
self.capacity
}
#[inline(always)]
fn free_head(&self) -> u32 {
u32::from_le_bytes([self.data[8], self.data[9], self.data[10], self.data[11]])
}
#[inline(always)]
pub fn is_full(&self) -> bool {
self.free_head() == NO_FREE
}
#[inline(always)]
pub fn is_slot_allocated(&self, index: u32) -> bool {
let idx = index as usize;
idx < self.capacity && self.is_allocated(idx)
}
#[inline]
pub fn alloc(&mut self, value: T) -> Result<u32, ProgramError> {
let head = self.free_head();
if head == NO_FREE {
return Err(ProgramError::AccountDataTooSmall);
}
let idx = head as usize;
let slot_offset = self.slots_offset() + idx * T::SIZE;
let next_free = u32::from_le_bytes([
self.data[slot_offset],
self.data[slot_offset + 1],
self.data[slot_offset + 2],
self.data[slot_offset + 3],
]);
unsafe {
core::ptr::copy_nonoverlapping(
&value as *const T as *const u8,
self.data.as_mut_ptr().add(slot_offset),
T::SIZE,
);
}
self.mark_allocated(idx);
self.data[8..12].copy_from_slice(&next_free.to_le_bytes());
let count = self.count() + 1;
self.data[0..4].copy_from_slice(&count.to_le_bytes());
Ok(head)
}
#[inline]
pub fn free(&mut self, index: u32) -> Result<(), ProgramError> {
let idx = index as usize;
if idx >= self.capacity {
return Err(ProgramError::InvalidArgument);
}
if !self.is_allocated(idx) {
return Err(ProgramError::InvalidArgument);
}
let slot_offset = self.slots_offset() + idx * T::SIZE;
let current_head = self.free_head();
self.data[slot_offset..slot_offset + 4].copy_from_slice(¤t_head.to_le_bytes());
let mut i = 4;
while i < T::SIZE {
self.data[slot_offset + i] = 0;
i += 1;
}
self.mark_free(idx);
self.data[8..12].copy_from_slice(&index.to_le_bytes());
let count = self.count().saturating_sub(1);
self.data[0..4].copy_from_slice(&count.to_le_bytes());
Ok(())
}
#[inline]
pub fn get(&self, index: u32) -> Result<T, ProgramError> {
let idx = index as usize;
if idx >= self.capacity || !self.is_allocated(idx) {
return Err(ProgramError::InvalidArgument);
}
let slot_offset = self.slots_offset() + idx * T::SIZE;
Ok(unsafe { core::ptr::read_unaligned(self.data.as_ptr().add(slot_offset) as *const T) })
}
#[inline]
pub fn get_ref(&self, index: u32) -> Result<&T, ProgramError> {
let idx = index as usize;
if idx >= self.capacity || !self.is_allocated(idx) {
return Err(ProgramError::InvalidArgument);
}
let slot_offset = self.slots_offset() + idx * T::SIZE;
Ok(unsafe { &*(self.data.as_ptr().add(slot_offset) as *const T) })
}
#[inline]
pub fn get_mut(&mut self, index: u32) -> Result<&mut T, ProgramError> {
let idx = index as usize;
if idx >= self.capacity || !self.is_allocated(idx) {
return Err(ProgramError::InvalidArgument);
}
let slot_offset = self.slots_offset() + idx * T::SIZE;
Ok(unsafe { &mut *(self.data.as_mut_ptr().add(slot_offset) as *mut T) })
}
#[inline]
pub fn set(&mut self, index: u32, value: T) -> Result<(), ProgramError> {
let idx = index as usize;
if idx >= self.capacity || !self.is_allocated(idx) {
return Err(ProgramError::InvalidArgument);
}
let slot_offset = self.slots_offset() + idx * T::SIZE;
unsafe {
core::ptr::copy_nonoverlapping(
&value as *const T as *const u8,
self.data.as_mut_ptr().add(slot_offset),
T::SIZE,
);
}
Ok(())
}
#[inline(always)]
pub const fn required_bytes(capacity: usize) -> usize {
SLAB_HEADER_SIZE + bitmap_bytes(capacity) + capacity * T::SIZE
}
}