use core::mem::MaybeUninit;
use core::ops::{Deref, DerefMut};
use core::sync::atomic::Ordering;
use core::{marker::PhantomData, ptr::NonNull, result::Result, sync::atomic::AtomicU8};
fn atomic_bts(arr: &[AtomicU8], idx: usize) -> Option<bool> {
let el = arr.get(idx / 8)?;
let bit = (idx % 8) as u8;
let val = el.fetch_or(1 << bit, Ordering::Relaxed);
Some((val & (1 << bit)) != 0)
}
fn atomic_btc(arr: &[AtomicU8], idx: usize) -> Option<bool> {
let el = arr.get(idx / 8)?;
let bit = (idx % 8) as u8;
let val = el.fetch_and(!(1 << bit), Ordering::Relaxed);
Some((val & (1 << bit)) != 0)
}
fn div_ceil(num: usize, dem: usize) -> usize {
(num + dem) / dem
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum SlabError {
BadBaseAlignment,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum SlabAllocError {
HeapExhausted,
}
unsafe impl<T> Send for SlabAllocator<T> {}
unsafe impl<T> Sync for SlabAllocator<T> {}
#[derive(Debug)]
pub struct SlabAllocator<T: Sized> {
_data: PhantomData<T>,
num_elems: usize,
bitmap: &'static mut [AtomicU8],
data: *mut MaybeUninit<T>,
}
impl<T: Sized> SlabAllocator<T> {
pub fn new(mem: *mut MaybeUninit<u8>, size: usize) -> Result<Self, SlabError> {
if size & (core::mem::size_of::<T>() - 1) != 0 {
Err(SlabError::BadBaseAlignment)?;
}
let element_size = core::cmp::max(core::mem::size_of::<T>(), core::mem::align_of::<T>());
let data_size = size - div_ceil(size / element_size, 8);
let data = mem as *mut MaybeUninit<T>;
let num_elems = data_size / element_size;
let bitmap_size = div_ceil(num_elems, 8);
let bitmap = {
let bitmap = unsafe {
core::slice::from_raw_parts_mut(
mem.add(size - bitmap_size) as *mut MaybeUninit<AtomicU8>,
bitmap_size,
)
};
for b in bitmap.iter_mut() {
b.write(AtomicU8::new(0));
}
unsafe { MaybeUninit::slice_assume_init_mut(bitmap) }
};
Ok(Self {
_data: PhantomData,
num_elems,
bitmap,
data,
})
}
fn allocate_slot(&self) -> Option<usize> {
for i in 0..self.num_elems {
if let Some(false) = atomic_bts(self.bitmap, i) {
return Some(i);
}
}
None
}
fn deallocate_slot(&self, slot: usize) {
atomic_btc(self.bitmap, slot);
}
fn allocate_raw(&self) -> Result<&mut MaybeUninit<T>, SlabAllocError> {
let slot = self.allocate_slot().ok_or(SlabAllocError::HeapExhausted)?;
let data = unsafe { &mut *self.data.add(slot) };
Ok(&mut *data)
}
unsafe fn deallocate_raw(&self, elm: *mut T) {
let slot = elm.offset_from(self.data as *const T);
if slot < 0 || (slot as usize) >= self.num_elems {
panic!("Deallocate given an element that does not belong to us!");
}
self.deallocate_slot(slot as usize);
}
pub fn allocate<'a>(&'a self, item: T) -> Result<SlabAlloc<'a, T>, SlabAllocError> {
let e = unsafe {
let e = self.allocate_raw()?;
e.as_mut_ptr().write(item);
e.assume_init_mut()
};
Ok(SlabAlloc(self, unsafe { NonNull::new_unchecked(e) }))
}
}
#[derive(Debug)]
pub struct SlabAlloc<'a, T>(&'a SlabAllocator<T>, NonNull<T>);
impl<T> Drop for SlabAlloc<'_, T> {
fn drop(&mut self) {
unsafe {
core::ptr::drop_in_place(self.1.as_ptr());
self.0.deallocate_raw(self.1.as_ptr())
};
}
}
impl<T> Deref for SlabAlloc<'_, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
unsafe { self.1.as_ref() }
}
}
impl<T> DerefMut for SlabAlloc<'_, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { self.1.as_mut() }
}
}
#[cfg(test)]
mod test {
extern crate std;
use core::alloc::Layout;
use std::vec;
use std::vec::Vec;
#[test]
fn test_atomic_bt() {
use super::*;
let arr = vec![AtomicU8::new(0), AtomicU8::new(0)];
assert_eq!(Some(false), atomic_bts(&arr, 0));
assert_eq!(Some(true), atomic_bts(&arr, 0));
assert_eq!(Some(true), atomic_btc(&arr, 0));
assert_eq!(Some(false), atomic_bts(&arr, 0));
assert_eq!(Some(true), atomic_btc(&arr, 0));
assert_eq!(Some(false), atomic_bts(&arr, 1));
assert_eq!(Some(false), atomic_bts(&arr, 2));
assert_eq!(Some(true), atomic_bts(&arr, 2));
assert_eq!(Some(true), atomic_btc(&arr, 2));
assert_eq!(Some(true), atomic_bts(&arr, 1));
assert_eq!(Some(true), atomic_btc(&arr, 1));
assert_eq!(Some(false), atomic_bts(&arr, 0));
assert_eq!(Some(false), atomic_bts(&arr, 8));
assert_eq!(Some(true), atomic_bts(&arr, 8));
assert_eq!(Some(true), atomic_btc(&arr, 8));
assert_eq!(Some(true), atomic_bts(&arr, 0));
assert_eq!(Some(true), atomic_btc(&arr, 0));
assert_eq!(None, atomic_bts(&arr, 20));
}
#[test]
fn test_basic() {
use super::*;
#[derive(Debug)]
struct Element {
data: usize,
}
let layout = Layout::from_size_align(16384, 128).unwrap();
let mem = unsafe { std::alloc::alloc(layout) };
let alloc: SlabAllocator<Element> =
SlabAllocator::new(mem as *mut MaybeUninit<u8>, layout.size()).unwrap();
let mut allocs = Vec::new();
for i in 0..32 {
allocs.push(alloc.allocate(Element { data: i as usize }).unwrap());
}
drop(allocs);
drop(alloc);
unsafe { std::alloc::dealloc(mem, layout) };
}
#[test]
fn test_small() {
use super::*;
#[derive(Debug)]
struct Element {
data: usize,
}
let layout = Layout::from_size_align(32, 128).unwrap();
let mem = unsafe { std::alloc::alloc(layout) };
let alloc: SlabAllocator<Element> =
SlabAllocator::new(mem as *mut MaybeUninit<u8>, layout.size()).unwrap();
let mut allocs = Vec::new();
allocs.push(alloc.allocate(Element { data: 0 as usize }).unwrap());
allocs.push(alloc.allocate(Element { data: 1 as usize }).unwrap());
allocs.push(alloc.allocate(Element { data: 2 as usize }).unwrap());
assert_eq!(
SlabAllocError::HeapExhausted,
alloc.allocate(Element { data: 3 as usize }).unwrap_err()
);
assert_eq!(allocs[0].data, 0);
assert_eq!(allocs[1].data, 1);
assert_eq!(allocs[2].data, 2);
drop(allocs);
drop(alloc);
unsafe { std::alloc::dealloc(mem, layout) };
}
}