pub mod cache;
pub mod page;
pub mod size_class;
pub use page::SlabPageHeader;
pub use size_class::SizeClass;
use cache::{CacheDeallocResult, SlabCache};
use core::alloc::Layout;
use core::ptr::NonNull;
use spin::Mutex as SpinMutex;
use crate::error::{AllocError, AllocResult};
pub enum SlabAllocResult {
Allocated(NonNull<u8>),
NeedsSlab { size_class: SizeClass, pages: usize },
}
pub enum SlabDeallocResult {
Done,
FreeSlab { base: usize, pages: usize },
}
pub enum SlabPoolDeallocResult {
Done,
RemoteQueued,
FreeSlab { base: usize, pages: usize },
}
pub trait SlabTrait: Sync {
fn cpu_id(&self) -> usize;
fn page_size(&self) -> usize;
fn alloc(&self, layout: Layout) -> AllocResult<SlabAllocResult>;
fn add_slab(&self, size_class: SizeClass, base: usize, bytes: usize);
fn dealloc_local(&self, ptr: NonNull<u8>, layout: Layout) -> SlabDeallocResult;
fn dealloc_remote(&self, ptr: NonNull<u8>) {
let owner_cpu = u16::try_from(self.cpu_id()).expect("CPU id exceeds slab owner range");
unsafe { SlabPageHeader::remote_free_object(ptr, owner_cpu, self.page_size()) };
}
}
pub trait SlabPoolTrait: Sync {
fn current_slab(&self) -> &dyn SlabTrait;
fn owner_slab(&self, cpu_idx: usize) -> &dyn SlabTrait;
fn current_cpu_id(&self) -> usize {
self.current_slab().cpu_id()
}
fn alloc(&self, layout: Layout) -> AllocResult<SlabAllocResult> {
self.current_slab().alloc(layout)
}
fn add_slab(&self, size_class: SizeClass, base: usize, bytes: usize) {
self.current_slab().add_slab(size_class, base, bytes)
}
fn dealloc(&self, ptr: NonNull<u8>, layout: Layout, owner_cpu: usize) -> SlabPoolDeallocResult {
if owner_cpu == self.current_cpu_id() {
match self.current_slab().dealloc_local(ptr, layout) {
SlabDeallocResult::Done => SlabPoolDeallocResult::Done,
SlabDeallocResult::FreeSlab { base, pages } => {
SlabPoolDeallocResult::FreeSlab { base, pages }
}
}
} else {
self.owner_slab(owner_cpu).dealloc_remote(ptr);
SlabPoolDeallocResult::RemoteQueued
}
}
}
pub trait SlabPoolExt: SlabPoolTrait {
fn with_current_slab<R>(&self, f: impl FnOnce(&dyn SlabTrait) -> R) -> R {
f(self.current_slab())
}
fn with_owner_slab<R>(&self, cpu_idx: usize, f: impl FnOnce(&dyn SlabTrait) -> R) -> R {
f(self.owner_slab(cpu_idx))
}
}
impl<T: ?Sized + SlabPoolTrait> SlabPoolExt for T {}
pub struct SlabAllocator<const PAGE_SIZE: usize = 0x1000> {
caches: [SlabCache; SizeClass::COUNT],
}
pub struct PerCpuSlab<const PAGE_SIZE: usize = 0x1000> {
cpu_id: u16,
inner: SpinMutex<SlabAllocator<PAGE_SIZE>>,
}
pub struct StaticSlabPool<const PAGE_SIZE: usize = 0x1000, const N: usize = 1> {
slabs: [PerCpuSlab<PAGE_SIZE>; N],
current_cpu_id: fn() -> usize,
}
impl<const PAGE_SIZE: usize> PerCpuSlab<PAGE_SIZE> {
pub const fn new(cpu_id: u16) -> Self {
Self {
cpu_id,
inner: SpinMutex::new(SlabAllocator::new()),
}
}
pub fn reset(&self) {
*self.inner.lock() = SlabAllocator::new();
}
pub const fn cpu_id(&self) -> usize {
self.cpu_id as usize
}
pub fn alloc(&self, layout: Layout) -> AllocResult<SlabAllocResult> {
self.inner.lock().alloc(layout)
}
pub fn add_slab(&self, size_class: SizeClass, base: usize, bytes: usize) {
self.inner
.lock()
.add_slab(size_class, base, bytes, self.cpu_id);
}
pub fn dealloc_local(&self, ptr: NonNull<u8>, layout: Layout) -> SlabDeallocResult {
self.inner.lock().dealloc(ptr, layout)
}
pub fn dealloc_remote(&self, ptr: NonNull<u8>) {
unsafe { SlabPageHeader::remote_free_object(ptr, self.cpu_id, PAGE_SIZE) };
}
}
impl<const PAGE_SIZE: usize, const N: usize> StaticSlabPool<PAGE_SIZE, N> {
pub const fn new(slabs: [PerCpuSlab<PAGE_SIZE>; N], current_cpu_id: fn() -> usize) -> Self {
Self {
slabs,
current_cpu_id,
}
}
}
impl<const PAGE_SIZE: usize> SlabAllocator<PAGE_SIZE> {
pub const fn new() -> Self {
Self {
caches: [
SlabCache::new(SizeClass::Bytes8),
SlabCache::new(SizeClass::Bytes16),
SlabCache::new(SizeClass::Bytes32),
SlabCache::new(SizeClass::Bytes64),
SlabCache::new(SizeClass::Bytes128),
SlabCache::new(SizeClass::Bytes256),
SlabCache::new(SizeClass::Bytes512),
SlabCache::new(SizeClass::Bytes1024),
SlabCache::new(SizeClass::Bytes2048),
],
}
}
}
impl<const PAGE_SIZE: usize> Default for SlabAllocator<PAGE_SIZE> {
fn default() -> Self {
Self::new()
}
}
impl<const PAGE_SIZE: usize> SlabAllocator<PAGE_SIZE> {
pub fn alloc(&mut self, layout: Layout) -> AllocResult<SlabAllocResult> {
let sc = SizeClass::from_layout(layout).ok_or(AllocError::InvalidParam)?;
let cache = &mut self.caches[sc.index()];
match cache.alloc_object::<PAGE_SIZE>() {
Some(addr) => {
let ptr = unsafe { NonNull::new_unchecked(addr as *mut u8) };
Ok(SlabAllocResult::Allocated(ptr))
}
None => Ok(SlabAllocResult::NeedsSlab {
size_class: sc,
pages: sc.slab_pages(PAGE_SIZE),
}),
}
}
pub fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) -> SlabDeallocResult {
let sc = SizeClass::from_layout(layout).expect("layout exceeds slab size");
let cache = &mut self.caches[sc.index()];
match cache.dealloc_object::<PAGE_SIZE>(ptr.as_ptr() as usize) {
CacheDeallocResult::Done => SlabDeallocResult::Done,
CacheDeallocResult::FreeSlab { base, pages } => {
SlabDeallocResult::FreeSlab { base, pages }
}
}
}
pub fn add_slab(&mut self, size_class: SizeClass, base: usize, bytes: usize, owner_cpu: u16) {
self.caches[size_class.index()].add_slab(base, bytes, owner_cpu);
}
}
impl<const PAGE_SIZE: usize> SlabTrait for PerCpuSlab<PAGE_SIZE> {
fn cpu_id(&self) -> usize {
PerCpuSlab::cpu_id(self)
}
fn page_size(&self) -> usize {
PAGE_SIZE
}
fn alloc(&self, layout: Layout) -> AllocResult<SlabAllocResult> {
PerCpuSlab::alloc(self, layout)
}
fn add_slab(&self, size_class: SizeClass, base: usize, bytes: usize) {
PerCpuSlab::add_slab(self, size_class, base, bytes)
}
fn dealloc_local(&self, ptr: NonNull<u8>, layout: Layout) -> SlabDeallocResult {
PerCpuSlab::dealloc_local(self, ptr, layout)
}
}
impl<const PAGE_SIZE: usize, const N: usize> SlabPoolTrait for StaticSlabPool<PAGE_SIZE, N> {
fn current_slab(&self) -> &dyn SlabTrait {
&self.slabs[(self.current_cpu_id)()]
}
fn owner_slab(&self, cpu_idx: usize) -> &dyn SlabTrait {
&self.slabs[cpu_idx]
}
}