use core::ptr::NonNull;
use core::sync::atomic::{AtomicU32, AtomicUsize, Ordering};
use super::size_class::SizeClass;
pub const SLAB_MAGIC: u32 = 0x534C_4142;
pub const MAX_OBJECTS_PER_SLAB: usize = 512;
pub const BITMAP_WORDS: usize = MAX_OBJECTS_PER_SLAB / 64;
pub const MAX_SLAB_PAGES: usize = 4;
#[repr(C)]
pub struct SlabPageHeader {
pub magic: u32,
pub size_class: SizeClass,
pub object_count: u16,
pub local_free_count: u16,
pub owner_cpu: u16,
_pad: u16,
pub slab_bytes: u32,
pub list_prev: usize,
pub list_next: usize,
pub local_bitmap: [u64; BITMAP_WORDS],
pub remote_free_head: AtomicUsize,
pub remote_free_count: AtomicU32,
}
impl SlabPageHeader {
pub const HEADER_SIZE: usize = core::mem::size_of::<Self>();
pub fn init(&mut self, size_class: SizeClass, bytes: usize, owner_cpu: u16) {
let obj_size = size_class.size();
let data_start = Self::data_offset(obj_size);
let usable = bytes.saturating_sub(data_start);
let count = (usable / obj_size).min(MAX_OBJECTS_PER_SLAB);
self.magic = SLAB_MAGIC;
self.size_class = size_class;
self.object_count = count as u16;
self.local_free_count = count as u16;
self.owner_cpu = owner_cpu;
self._pad = 0;
self.slab_bytes = bytes as u32;
self.list_prev = 0;
self.list_next = 0;
self.local_bitmap = [0u64; BITMAP_WORDS];
self.remote_free_head = AtomicUsize::new(0);
self.remote_free_count = AtomicU32::new(0);
let full_words = count / 64;
let remaining_bits = count % 64;
for w in self.local_bitmap.iter_mut().take(full_words) {
*w = u64::MAX;
}
if remaining_bits > 0 {
self.local_bitmap[full_words] = (1u64 << remaining_bits) - 1;
}
}
pub fn data_offset(obj_size: usize) -> usize {
let raw = Self::HEADER_SIZE;
(raw + obj_size - 1) & !(obj_size - 1)
}
#[inline]
pub fn data_start(&self, base: usize) -> usize {
base + Self::data_offset(self.size_class.size())
}
#[inline]
pub fn object_addr(&self, base: usize, index: usize) -> usize {
self.data_start(base) + index * self.size_class.size()
}
#[inline]
pub fn object_index(&self, base: usize, addr: usize) -> usize {
(addr - self.data_start(base)) / self.size_class.size()
}
#[inline]
pub fn base_from_obj_addr<const PAGE_SIZE: usize>(addr: usize, slab_bytes: usize) -> usize {
let slab_pages = slab_bytes / PAGE_SIZE;
debug_assert!(slab_pages > 0);
let page_base = addr & !(PAGE_SIZE - 1);
for page_idx in 0..slab_pages {
let Some(candidate) = page_base.checked_sub(page_idx * PAGE_SIZE) else {
break;
};
let hdr = unsafe { &*(candidate as *const SlabPageHeader) };
if hdr.magic == SLAB_MAGIC
&& hdr.slab_bytes as usize == slab_bytes
&& addr >= candidate
&& addr < candidate + slab_bytes
{
return candidate;
}
}
debug_assert!(false, "object address does not belong to a live slab");
page_base
}
fn base_from_obj_addr_unknown_with_page_size(addr: usize, page_size: usize) -> Option<usize> {
if page_size == 0 || !page_size.is_power_of_two() {
return None;
}
let page_base = addr & !(page_size - 1);
for page_idx in 0..MAX_SLAB_PAGES {
let Some(candidate) = page_base.checked_sub(page_idx * page_size) else {
break;
};
let hdr = unsafe { &*(candidate as *const SlabPageHeader) };
let slab_bytes = hdr.slab_bytes as usize;
if hdr.magic != SLAB_MAGIC
|| slab_bytes == 0
|| !slab_bytes.is_multiple_of(page_size)
|| slab_bytes / page_size > MAX_SLAB_PAGES
{
continue;
}
if addr >= candidate && addr < candidate + slab_bytes {
return Some(candidate);
}
}
None
}
#[inline]
pub fn base_from_obj_addr_unknown<const PAGE_SIZE: usize>(addr: usize) -> Option<usize> {
Self::base_from_obj_addr_unknown_with_page_size(addr, PAGE_SIZE)
}
pub unsafe fn remote_free_object(ptr: NonNull<u8>, owner_cpu: u16, page_size: usize) {
let obj_addr = ptr.as_ptr() as usize;
let Some(base) = Self::base_from_obj_addr_unknown_with_page_size(obj_addr, page_size)
else {
debug_assert!(false, "object address does not belong to a live slab");
return;
};
let hdr = unsafe { &*(base as *const SlabPageHeader) };
debug_assert_eq!(hdr.magic, SLAB_MAGIC);
debug_assert_eq!(hdr.owner_cpu, owner_cpu);
unsafe { hdr.remote_free(obj_addr) };
}
pub fn local_alloc(&mut self) -> Option<usize> {
for (wi, word) in self.local_bitmap.iter_mut().enumerate() {
if *word != 0 {
let bit = word.trailing_zeros() as usize;
*word &= !(1u64 << bit);
self.local_free_count -= 1;
return Some(wi * 64 + bit);
}
}
None
}
pub fn local_free(&mut self, index: usize) {
let wi = index / 64;
let bit = index % 64;
debug_assert!(self.local_bitmap[wi] & (1u64 << bit) == 0, "double free");
self.local_bitmap[wi] |= 1u64 << bit;
self.local_free_count += 1;
}
#[inline]
pub fn has_local_free(&self) -> bool {
self.local_free_count > 0
}
#[inline]
pub fn is_all_free(&self) -> bool {
self.local_free_count == self.object_count
}
#[inline]
pub fn is_local_full(&self) -> bool {
self.local_free_count == 0
}
pub unsafe fn remote_free(&self, obj_addr: usize) {
unsafe {
loop {
let old_head = self.remote_free_head.load(Ordering::Acquire);
(obj_addr as *mut usize).write(old_head);
if self
.remote_free_head
.compare_exchange_weak(old_head, obj_addr, Ordering::AcqRel, Ordering::Relaxed)
.is_ok()
{
self.remote_free_count.fetch_add(1, Ordering::Relaxed);
return;
}
}
}
}
pub fn drain_remote_frees(&mut self, base: usize) {
let head = self.remote_free_head.swap(0, Ordering::AcqRel);
if head == 0 {
return;
}
self.remote_free_count.store(0, Ordering::Relaxed);
let mut ptr = head;
while ptr != 0 {
let next = unsafe { *(ptr as *const usize) };
let idx = self.object_index(base, ptr);
let wi = idx / 64;
let bit = idx % 64;
debug_assert!(
self.local_bitmap[wi] & (1u64 << bit) == 0,
"remote double free"
);
self.local_bitmap[wi] |= 1u64 << bit;
self.local_free_count += 1;
ptr = next;
}
}
#[inline]
pub fn has_remote_frees(&self) -> bool {
self.remote_free_count.load(Ordering::Relaxed) > 0
}
}