use std::alloc::{Layout, alloc, dealloc, handle_alloc_error};
use std::cell::UnsafeCell;
use std::mem::size_of;
use std::ptr as StdPtr;
use seize::Collector;
use crate::internode::InternodeNode;
use crate::leaf15::LeafNode15;
use crate::policy::LeafPolicy;
const CACHE_LINE: usize = 64;
const MAX_SIZE_CLASSES: usize = 20;
const POOL_CAPACITY: usize = 512;
const REFILL_BATCH: usize = 128;
#[cfg(test)]
#[inline]
const fn size_class(layout: Layout) -> Option<usize> {
let nl: usize = layout.size().div_ceil(CACHE_LINE);
if nl == 0 || nl > MAX_SIZE_CLASSES {
None
} else {
Some(nl)
}
}
#[inline(always)]
const fn node_size_class<T>() -> usize {
let nl: usize = size_of::<T>().div_ceil(CACHE_LINE);
assert!(
nl > 0 && nl <= MAX_SIZE_CLASSES,
"node type too large for pool"
);
nl
}
#[inline]
unsafe fn bucket_layout(nl: usize) -> Layout {
debug_assert!(nl > 0 && nl <= MAX_SIZE_CLASSES);
unsafe { Layout::from_size_align_unchecked(nl * CACHE_LINE, CACHE_LINE) }
}
struct Freelist {
head: *mut u8,
count: usize,
}
impl Freelist {
const fn new() -> Self {
Self {
head: StdPtr::null_mut(),
count: 0,
}
}
#[inline(always)]
const fn pop(&mut self) -> Option<*mut u8> {
if self.head.is_null() {
return None;
}
let ptr: *mut u8 = self.head;
self.head = unsafe { StdPtr::read(ptr.cast::<*mut u8>()) };
self.count -= 1;
Some(ptr)
}
#[inline(always)]
const unsafe fn push(&mut self, ptr: *mut u8) {
unsafe { StdPtr::write(ptr.cast::<*mut u8>(), self.head) };
self.head = ptr;
self.count += 1;
}
#[cold]
fn refill(&mut self, layout: Layout) {
for _ in 0..REFILL_BATCH {
let ptr: *mut u8 = unsafe { alloc(layout) };
if ptr.is_null() {
break;
}
unsafe { self.push(ptr) };
}
}
fn drain(&mut self, layout: Layout) {
while let Some(ptr) = self.pop() {
unsafe { dealloc(ptr, layout) };
}
}
#[inline(always)]
const fn has_capacity(&self) -> bool {
self.count < POOL_CAPACITY
}
}
struct ThreadPool {
buckets: [Freelist; MAX_SIZE_CLASSES],
}
impl ThreadPool {
const fn new() -> Self {
const EMPTY: Freelist = Freelist::new();
Self {
buckets: [EMPTY; MAX_SIZE_CLASSES],
}
}
#[cfg(test)]
#[inline]
fn alloc(&mut self, layout: Layout) -> *mut u8 {
let Some(nl) = size_class(layout) else {
return unsafe { alloc(layout) };
};
self.alloc_from_bucket(nl)
}
#[inline(always)]
fn alloc_from_bucket(&mut self, nl: usize) -> *mut u8 {
let bucket: &mut Freelist = unsafe { self.buckets.get_unchecked_mut(nl - 1) };
let bl: Layout = unsafe { bucket_layout(nl) };
if let Some(ptr) = bucket.pop() {
return ptr;
}
bucket.refill(bl);
bucket.pop().unwrap_or(StdPtr::null_mut())
}
#[cfg(test)]
#[inline]
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
let Some(nl) = size_class(layout) else {
unsafe { dealloc(ptr, layout) };
return;
};
unsafe { self.dealloc_to_bucket(ptr, nl) };
}
#[inline(always)]
unsafe fn dealloc_to_bucket(&mut self, ptr: *mut u8, nl: usize) {
let bucket: &mut Freelist = unsafe { self.buckets.get_unchecked_mut(nl - 1) };
if bucket.has_capacity() {
unsafe { bucket.push(ptr) };
} else {
unsafe { dealloc(ptr, bucket_layout(nl)) };
}
}
}
impl Drop for ThreadPool {
fn drop(&mut self) {
for (i, bucket) in self.buckets.iter_mut().enumerate() {
let nl: usize = i + 1;
bucket.drain(unsafe { bucket_layout(nl) });
}
}
}
thread_local! {
static POOL: UnsafeCell<ThreadPool> = const { UnsafeCell::new(ThreadPool::new()) };
}
#[cfg(test)]
#[inline]
#[must_use]
pub fn pool_alloc(layout: Layout) -> *mut u8 {
POOL.with(|cell: &UnsafeCell<ThreadPool>| {
let pool: &mut ThreadPool = unsafe { &mut *cell.get() };
let ptr: *mut u8 = pool.alloc(layout);
if ptr.is_null() {
handle_alloc_error(layout);
}
ptr
})
}
#[cfg(test)]
#[inline]
pub unsafe fn pool_dealloc(ptr: *mut u8, layout: Layout) {
debug_assert!(
layout.align() <= CACHE_LINE,
"pool_dealloc: layout alignment ({}) exceeds CACHE_LINE ({})",
layout.align(),
CACHE_LINE,
);
POOL.with(|cell: &UnsafeCell<ThreadPool>| {
let pool: &mut ThreadPool = unsafe { &mut *cell.get() };
unsafe { pool.dealloc(ptr, layout) };
});
}
#[inline]
#[must_use]
pub fn pool_alloc_leaf<P: LeafPolicy>() -> *mut u8 {
let nl: usize = const { node_size_class::<LeafNode15<P>>() };
POOL.with(|cell: &UnsafeCell<ThreadPool>| {
let pool: &mut ThreadPool = unsafe { &mut *cell.get() };
let ptr: *mut u8 = pool.alloc_from_bucket(nl);
if ptr.is_null() {
handle_alloc_error(unsafe { bucket_layout(nl) });
}
ptr
})
}
#[inline]
#[must_use]
pub fn pool_alloc_internode() -> *mut u8 {
const NL: usize = node_size_class::<InternodeNode>();
POOL.with(|cell: &UnsafeCell<ThreadPool>| {
let pool: &mut ThreadPool = unsafe { &mut *cell.get() };
let ptr: *mut u8 = pool.alloc_from_bucket(NL);
if ptr.is_null() {
handle_alloc_error(unsafe { bucket_layout(NL) });
}
ptr
})
}
#[inline]
pub unsafe fn pool_dealloc_leaf<P: LeafPolicy>(ptr: *mut u8) {
let nl: usize = const { node_size_class::<LeafNode15<P>>() };
POOL.with(|cell: &UnsafeCell<ThreadPool>| {
let pool: &mut ThreadPool = unsafe { &mut *cell.get() };
unsafe { pool.dealloc_to_bucket(ptr, nl) };
});
}
#[inline]
pub unsafe fn pool_dealloc_internode(ptr: *mut u8) {
const NL: usize = node_size_class::<InternodeNode>();
POOL.with(|cell: &UnsafeCell<ThreadPool>| {
let pool: &mut ThreadPool = unsafe { &mut *cell.get() };
unsafe { pool.dealloc_to_bucket(ptr, NL) };
});
}
#[cfg(test)]
#[inline]
pub unsafe fn pool_teardown_dealloc(ptr: *mut u8, layout: Layout) {
debug_assert!(
layout.align() <= CACHE_LINE,
"pool_teardown_dealloc: layout alignment ({}) exceeds CACHE_LINE ({})",
layout.align(),
CACHE_LINE,
);
let Some(nl) = size_class(layout) else {
unsafe { dealloc(ptr, layout) };
return;
};
unsafe { dealloc(ptr, bucket_layout(nl)) };
}
#[inline]
pub unsafe fn pool_teardown_dealloc_leaf<P: LeafPolicy>(ptr: *mut u8) {
let nl: usize = const { node_size_class::<LeafNode15<P>>() };
unsafe { dealloc(ptr, bucket_layout(nl)) };
}
#[inline]
pub unsafe fn pool_teardown_dealloc_internode(ptr: *mut u8) {
const NL: usize = node_size_class::<InternodeNode>();
unsafe { dealloc(ptr, bucket_layout(NL)) };
}
#[inline]
pub unsafe fn reclaim_leaf15<P: LeafPolicy>(ptr: *mut LeafNode15<P>, _collector: &Collector) {
unsafe { StdPtr::drop_in_place(ptr) };
unsafe { pool_dealloc_leaf::<P>(ptr.cast()) };
}
#[inline]
pub unsafe fn reclaim_internode(ptr: *mut InternodeNode, _collector: &Collector) {
unsafe { StdPtr::drop_in_place(ptr) };
unsafe { pool_dealloc_internode(ptr.cast()) };
}
#[cfg(test)]
#[expect(clippy::similar_names)]
#[expect(clippy::unwrap_used, reason = "Fail fast in tests")]
mod unit_tests;