use std::sync::atomic::{AtomicU32, Ordering};
pub const DEFAULT_PAGE_SIZE: usize = 64 * 1024;
pub const DEFAULT_POOL_SIZE: usize = 256;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct SlabId {
pub core_id: u16,
pub page_index: u16,
pub len: u32,
}
pub struct SlabPool {
memory: Vec<u8>,
page_size: usize,
pool_size: usize,
free_list: Vec<u16>,
core_id: u16,
alloc_count: u64,
}
impl SlabPool {
pub fn new(core_id: u16, page_size: usize, pool_size: usize) -> Self {
let memory = vec![0u8; page_size * pool_size];
let free_list = (0..pool_size as u16).rev().collect();
Self {
memory,
page_size,
pool_size,
free_list,
core_id,
alloc_count: 0,
}
}
pub fn alloc(&mut self, data: &[u8]) -> Option<SlabId> {
if data.len() > self.page_size {
return None; }
let page_index = self.free_list.pop()?;
let offset = page_index as usize * self.page_size;
self.memory[offset..offset + data.len()].copy_from_slice(data);
self.alloc_count += 1;
Some(SlabId {
core_id: self.core_id,
page_index,
len: data.len() as u32,
})
}
pub fn read(&self, id: SlabId) -> &[u8] {
assert_eq!(id.core_id, self.core_id, "slab ID core mismatch");
let offset = id.page_index as usize * self.page_size;
&self.memory[offset..offset + id.len as usize]
}
pub fn free(&mut self, id: SlabId) {
assert_eq!(id.core_id, self.core_id, "slab ID core mismatch");
self.free_list.push(id.page_index);
}
pub fn available(&self) -> usize {
self.free_list.len()
}
pub fn alloc_count(&self) -> u64 {
self.alloc_count
}
pub fn utilization(&self) -> f32 {
1.0 - (self.available() as f32 / self.pool_size as f32)
}
}
pub struct SlabReturnQueue {
buffer: Vec<AtomicU32>,
write_pos: AtomicU32,
read_pos: AtomicU32,
capacity: u32,
}
impl SlabReturnQueue {
pub fn new(capacity: usize) -> Self {
let buffer = (0..capacity).map(|_| AtomicU32::new(u32::MAX)).collect();
Self {
buffer,
write_pos: AtomicU32::new(0),
read_pos: AtomicU32::new(0),
capacity: capacity as u32,
}
}
pub fn push(&self, id: SlabId) -> bool {
let packed = ((id.core_id as u32) << 16) | (id.page_index as u32);
let pos = self.write_pos.fetch_add(1, Ordering::Relaxed);
let idx = (pos % self.capacity) as usize;
self.buffer[idx].store(packed, Ordering::Release);
true
}
pub fn drain(&self) -> Vec<SlabId> {
let write = self.write_pos.load(Ordering::Acquire);
let read = self.read_pos.load(Ordering::Relaxed);
if write == read {
return Vec::new();
}
let count = write.wrapping_sub(read) as usize;
let mut ids = Vec::with_capacity(count.min(self.capacity as usize));
for i in 0..count {
let idx = ((read.wrapping_add(i as u32)) % self.capacity) as usize;
let packed = self.buffer[idx].load(Ordering::Acquire);
if packed == u32::MAX {
break; }
ids.push(SlabId {
core_id: (packed >> 16) as u16,
page_index: (packed & 0xFFFF) as u16,
len: 0, });
self.buffer[idx].store(u32::MAX, Ordering::Release);
}
self.read_pos
.store(read.wrapping_add(ids.len() as u32), Ordering::Release);
ids
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn alloc_and_read() {
let mut pool = SlabPool::new(0, 1024, 4);
let data = b"hello slab allocator";
let id = pool.alloc(data).unwrap();
assert_eq!(id.core_id, 0);
assert_eq!(id.len, data.len() as u32);
assert_eq!(pool.read(id), data);
}
#[test]
fn alloc_exhaustion() {
let mut pool = SlabPool::new(0, 64, 2);
let _id1 = pool.alloc(b"first").unwrap();
let _id2 = pool.alloc(b"second").unwrap();
assert!(pool.alloc(b"third").is_none()); }
#[test]
fn free_and_reuse() {
let mut pool = SlabPool::new(0, 64, 1);
let id = pool.alloc(b"data").unwrap();
pool.free(id);
assert_eq!(pool.available(), 1);
let id2 = pool.alloc(b"reused").unwrap();
assert_eq!(pool.read(id2), b"reused");
}
#[test]
fn oversized_rejected() {
let mut pool = SlabPool::new(0, 16, 4);
let big = vec![0u8; 32];
assert!(pool.alloc(&big).is_none());
}
#[test]
fn utilization() {
let mut pool = SlabPool::new(0, 64, 4);
assert_eq!(pool.utilization(), 0.0);
pool.alloc(b"a").unwrap();
pool.alloc(b"b").unwrap();
assert!((pool.utilization() - 0.5).abs() < 0.01);
}
#[test]
fn return_queue_push_drain() {
let queue = SlabReturnQueue::new(16);
let id = SlabId {
core_id: 1,
page_index: 5,
len: 100,
};
queue.push(id);
let drained = queue.drain();
assert_eq!(drained.len(), 1);
assert_eq!(drained[0].core_id, 1);
assert_eq!(drained[0].page_index, 5);
}
#[test]
fn return_queue_empty_drain() {
let queue = SlabReturnQueue::new(16);
let drained = queue.drain();
assert!(drained.is_empty());
}
}