use crossbeam_epoch::{Atomic, Shared, pin};
use std::mem::{self, MaybeUninit};
use std::ptr::NonNull;
use std::sync::Mutex;
use std::sync::atomic::Ordering;
const SLAB_SIZE: usize = 128;
struct Slab<T> {
_memory: Box<[MaybeUninit<T>]>,
}
impl<T> Slab<T> {
fn new() -> Self {
let mut memory = Vec::with_capacity(SLAB_SIZE);
unsafe {
memory.set_len(SLAB_SIZE);
}
Slab {
_memory: memory.into_boxed_slice(),
}
}
fn as_ptr(&self) -> *mut T {
self._memory.as_ptr() as *mut T
}
}
#[repr(C)]
struct FreeNode {
next: Atomic<FreeNode>,
}
pub struct SlabAllocator<T> {
slabs: Mutex<Vec<Slab<T>>>,
head: Atomic<FreeNode>,
grow_lock: Mutex<()>,
}
unsafe impl<T> Send for SlabAllocator<T> {}
unsafe impl<T> Sync for SlabAllocator<T> {}
impl<T> SlabAllocator<T> {
pub fn new() -> Self {
assert!(
mem::size_of::<T>() >= mem::size_of::<FreeNode>(),
"Size of T must be >= size of FreeNode"
);
assert!(
mem::align_of::<T>() >= mem::align_of::<FreeNode>(),
"Alignment of T must be >= alignment of FreeNode"
);
SlabAllocator {
slabs: Mutex::new(Vec::new()),
head: Atomic::null(),
grow_lock: Mutex::new(()),
}
}
fn grow(&self) {
let new_slab = Slab::new();
let slab_ptr = new_slab.as_ptr();
self.slabs.lock().unwrap().push(new_slab);
for i in 0..(SLAB_SIZE - 1) {
let current_node = unsafe { slab_ptr.add(i) as *mut FreeNode };
let next_node = unsafe { slab_ptr.add(i + 1) as *mut FreeNode };
unsafe {
(*current_node)
.next
.store(Shared::from(next_node as *const _), Ordering::Relaxed);
}
}
let last_node = unsafe { slab_ptr.add(SLAB_SIZE - 1) as *mut FreeNode };
let new_head = Shared::from(slab_ptr as *const FreeNode);
let guard = &pin();
loop {
let old_head = self.head.load(Ordering::Acquire, guard);
unsafe {
(*last_node).next.store(old_head, Ordering::Relaxed);
}
if self
.head
.compare_exchange(
old_head,
new_head,
Ordering::AcqRel,
Ordering::Acquire,
guard,
)
.is_ok()
{
break;
}
}
}
pub fn alloc(&self) -> NonNull<T> {
let guard = &pin();
loop {
let head = self.head.load(Ordering::Acquire, guard);
if let Some(head_ref) = unsafe { head.as_ref() } {
let next = head_ref.next.load(Ordering::Relaxed, guard);
if self
.head
.compare_exchange(head, next, Ordering::AcqRel, Ordering::Acquire, guard)
.is_ok()
{
return NonNull::new(head.as_raw() as *mut T).unwrap();
}
continue;
} else {
let _lock = self.grow_lock.lock().unwrap();
if self.head.load(Ordering::Relaxed, guard).is_null() {
self.grow();
}
}
}
}
pub fn free(&self, ptr: NonNull<T>) {
let node_ptr = ptr.as_ptr() as *mut FreeNode;
let node_shared = Shared::from(node_ptr as *const FreeNode);
let guard = &pin();
loop {
let old_head = self.head.load(Ordering::Acquire, guard);
unsafe {
node_shared.deref().next.store(old_head, Ordering::Relaxed);
}
if self
.head
.compare_exchange(
old_head,
node_shared,
Ordering::AcqRel,
Ordering::Acquire,
guard,
)
.is_ok()
{
return;
}
}
}
}
impl<T> Default for SlabAllocator<T> {
fn default() -> Self {
Self::new()
}
}