use alloc::allocator::Layout;
use core::nonzero::NonZero;
use core::ptr;
use core::sync::atomic::AtomicPtr;
use core::sync::atomic::Ordering::*;
pub struct Pool {
free: AtomicPtr<u8>,
head: AtomicPtr<u8>,
edge: *mut u8,
size: usize,
}
pub trait Fits: Copy {
fn fits(self, pool: &Pool) -> bool;
}
impl<'a> Fits for &'a Layout {
#[inline(always)]
fn fits(self, pool: &Pool) -> bool {
self.size() <= pool.size
}
}
impl Fits for *mut u8 {
#[inline(always)]
fn fits(self, pool: &Pool) -> bool {
self < pool.edge
}
}
impl Pool {
#[inline(always)]
pub const fn new(offset: usize, size: usize, capacity: usize) -> Self {
Self {
free: AtomicPtr::new(ptr::null_mut()),
head: AtomicPtr::new(offset as *mut u8),
edge: (offset + size * capacity) as *mut u8,
size,
}
}
#[inline(always)]
pub unsafe fn init(&mut self, start: &mut usize) {
let offset = start as *mut _ as usize;
let head = self.head.get_mut();
*head = head.add(offset);
self.edge = self.edge.add(offset);
}
#[inline(always)]
pub fn size(&self) -> usize {
self.size
}
#[inline(always)]
pub fn alloc(&self) -> Option<NonZero<*mut u8>> {
unsafe { self.alloc_free().or_else(|| self.alloc_head()) }
}
#[inline(always)]
pub unsafe fn dealloc(&self, ptr: *mut u8) {
loop {
let head = self.free.load(Relaxed);
ptr::write(ptr as *mut *mut u8, head);
if self.free.compare_and_swap(head, ptr, Release) == head {
break;
}
}
}
#[inline(always)]
unsafe fn alloc_free(&self) -> Option<NonZero<*mut u8>> {
loop {
let head = self.free.load(Acquire);
if head.is_null() {
break None;
}
let next = ptr::read(head as *const *mut u8);
if self.free.compare_and_swap(head, next, Relaxed) == head {
break Some(NonZero::new_unchecked(head));
}
}
}
#[inline(always)]
unsafe fn alloc_head(&self) -> Option<NonZero<*mut u8>> {
loop {
let current = self.head.load(Relaxed);
if current == self.edge {
break None;
}
let new = current.add(self.size);
if self.head.compare_and_swap(current, new, Relaxed) == current {
break Some(NonZero::new_unchecked(current));
}
}
}
}