use core::{
alloc::Layout,
ptr::{self, NonNull},
sync::atomic::{AtomicPtr, Ordering},
};
pub struct Pool {
size: usize,
edge: *mut u8,
free: AtomicPtr<u8>,
uninit: AtomicPtr<u8>,
}
unsafe impl Sync for Pool {}
impl Pool {
pub const fn new(address: usize, size: usize, capacity: usize) -> Self {
Self {
size,
edge: (address + size * capacity) as *mut u8,
free: AtomicPtr::new(ptr::null_mut()),
uninit: AtomicPtr::new(address as *mut u8),
}
}
#[inline]
pub fn size(&self) -> usize {
self.size
}
pub fn allocate(&self) -> Option<NonNull<u8>> {
unsafe { self.alloc_free().or_else(|| self.alloc_uninit()) }
}
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn deallocate(&self, ptr: NonNull<u8>) {
loop {
let curr = self.free.load(Ordering::Acquire);
unsafe { ptr::write(ptr.as_ptr().cast::<*mut u8>(), curr) };
let next = ptr.as_ptr().cast::<u8>();
if self
.free
.compare_exchange_weak(curr, next, Ordering::AcqRel, Ordering::Acquire)
.is_ok()
{
break;
}
}
}
#[allow(clippy::cast_ptr_alignment)]
unsafe fn alloc_free(&self) -> Option<NonNull<u8>> {
loop {
let curr = self.free.load(Ordering::Acquire);
if curr.is_null() {
break None;
}
let next = unsafe { ptr::read(curr as *const *mut u8) };
if self
.free
.compare_exchange_weak(curr, next, Ordering::AcqRel, Ordering::Acquire)
.is_ok()
{
break Some(unsafe { NonNull::new_unchecked(curr) });
}
}
}
unsafe fn alloc_uninit(&self) -> Option<NonNull<u8>> {
loop {
let curr = self.uninit.load(Ordering::Relaxed);
if curr == self.edge {
break None;
}
let next = unsafe { curr.add(self.size) };
if self
.uninit
.compare_exchange_weak(curr, next, Ordering::Relaxed, Ordering::Relaxed)
.is_ok()
{
break Some(unsafe { NonNull::new_unchecked(curr) });
}
}
}
}
pub trait Fits: Copy {
fn fits(self, pool: &Pool) -> bool;
}
impl<'a> Fits for &'a Layout {
#[inline]
fn fits(self, pool: &Pool) -> bool {
self.size() <= pool.size
}
}
impl Fits for NonNull<u8> {
#[inline]
fn fits(self, pool: &Pool) -> bool {
(self.as_ptr().cast::<u8>()) < pool.edge
}
}