use core::{
alloc::Layout, cell::Cell, hint::unreachable_unchecked, ptr::NonNull, sync::atomic::AtomicUsize,
};
use std::thread_local;
use allocator_api2::alloc::{AllocError, Allocator, Global};
use parking_lot::Mutex;
use crate::layout_max;
type Chunk<const N: usize> = crate::chunk::Chunk<AtomicUsize, N>;
const TINY_ALLOCATION_MAX_SIZE: usize = 16;
const TINY_ALLOCATION_CHUNK_SIZE: usize = 16384;
const SMALL_ALLOCATION_MAX_SIZE: usize = 256;
const SMALL_ALLOCATION_CHUNK_SIZE: usize = 65536;
const LARGE_ALLOCATION_MAX_SIZE: usize = 65536;
const LARGE_ALLOCATION_CHUNK_SIZE: usize = 2097152;
type TinyChunk = Chunk<{ TINY_ALLOCATION_CHUNK_SIZE }>;
type SmallChunk = Chunk<{ SMALL_ALLOCATION_CHUNK_SIZE }>;
type LargeChunk = Chunk<{ LARGE_ALLOCATION_CHUNK_SIZE }>;
struct LocalRing<T> {
head: Cell<Option<NonNull<T>>>,
tail: Cell<Option<NonNull<T>>>,
}
impl<T> LocalRing<T> {
const fn new() -> Self {
LocalRing {
head: Cell::new(None),
tail: Cell::new(None),
}
}
}
struct GlobalRing<T> {
head: Option<NonNull<T>>,
tail: Option<NonNull<T>>,
}
impl<T> GlobalRing<T> {
const fn new() -> Self {
GlobalRing {
head: None,
tail: None,
}
}
}
struct GlobalRings {
tiny_ring: Mutex<GlobalRing<TinyChunk>>,
small_ring: Mutex<GlobalRing<SmallChunk>>,
large_ring: Mutex<GlobalRing<LargeChunk>>,
}
impl Drop for GlobalRings {
fn drop(&mut self) {
Self::clean(self.tiny_ring.get_mut());
Self::clean(self.small_ring.get_mut());
Self::clean(self.large_ring.get_mut());
}
}
impl GlobalRings {
#[inline(never)]
fn clean_all(&self) {
Self::clean(&mut self.tiny_ring.lock());
Self::clean(&mut self.small_ring.lock());
Self::clean(&mut self.large_ring.lock());
}
#[inline(never)]
fn clean<const N: usize>(ring: &mut GlobalRing<Chunk<N>>) {
let mut chunk = &mut ring.head;
while let Some(mut c) = *chunk {
if unsafe { c.as_ref().unused() } {
*chunk = unsafe { c.as_mut().next() };
unsafe {
Chunk::free(c, Global);
}
} else {
chunk = unsafe { c.as_mut().next.get_mut() };
}
}
if ring.head.is_none() {
ring.tail = None;
}
}
}
unsafe impl Send for GlobalRings {}
unsafe impl Sync for GlobalRings {}
struct LocalRings {
tiny_ring: LocalRing<TinyChunk>,
small_ring: LocalRing<SmallChunk>,
large_ring: LocalRing<LargeChunk>,
}
impl Drop for LocalRings {
fn drop(&mut self) {
self.clean_all();
self.flush_all();
}
}
impl LocalRings {
#[inline(never)]
fn clean_all(&self) {
Self::clean(&self.tiny_ring);
Self::clean(&self.small_ring);
Self::clean(&self.large_ring);
}
#[inline(never)]
fn clean<const N: usize>(ring: &LocalRing<Chunk<N>>) {
let mut chunk = &ring.head;
while let Some(c) = chunk.get() {
if unsafe { c.as_ref().unused() } {
chunk.set(unsafe { c.as_ref().next() });
unsafe {
Chunk::free(c, Global);
}
} else {
chunk = unsafe { &c.as_ref().next };
}
}
if ring.head.get().is_none() {
ring.tail.set(None);
}
}
#[inline(never)]
fn flush_all(&mut self) {
Self::flush(&mut self.tiny_ring, &GLOBAL_RINGS.tiny_ring);
Self::flush(&mut self.small_ring, &GLOBAL_RINGS.small_ring);
Self::flush(&mut self.large_ring, &GLOBAL_RINGS.large_ring);
}
#[inline(never)]
fn flush<const N: usize>(ring: &mut LocalRing<Chunk<N>>, global: &Mutex<GlobalRing<Chunk<N>>>) {
match (ring.head.take(), ring.tail.take()) {
(None, None) => {}
(Some(head), Some(tail)) => {
let mut global = global.lock();
match (global.head, global.tail) {
(None, None) => {
global.head = Some(head);
global.tail = Some(tail);
}
(Some(_g_head), Some(mut g_tail)) => unsafe {
*g_tail.as_mut().next.get_mut() = Some(head);
global.tail = Some(tail);
},
_ => unsafe { unreachable_unchecked() },
}
}
_ => unsafe { unreachable_unchecked() },
}
}
}
thread_local! {
static LOCAL_RINGS: LocalRings = const { LocalRings {
tiny_ring: LocalRing::new(),
small_ring: LocalRing::new(),
large_ring: LocalRing::new(),
} };
}
static GLOBAL_RINGS: GlobalRings = GlobalRings {
tiny_ring: Mutex::new(GlobalRing::new()),
small_ring: Mutex::new(GlobalRing::new()),
large_ring: Mutex::new(GlobalRing::new()),
};
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct OneRingAlloc;
#[inline]
fn _allocate<const N: usize>(
ring: &LocalRing<Chunk<N>>,
global: &Mutex<GlobalRing<Chunk<N>>>,
layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
if let Some(chunk_ptr) = ring.head.get() {
let chunk = unsafe { chunk_ptr.as_ref() };
match chunk.allocate(chunk_ptr, layout) {
Some(ptr) => {
return Ok(unsafe {
NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
ptr.as_ptr(),
layout.size(),
))
});
}
None => match chunk.next.take() {
None => {
debug_assert_eq!(ring.tail.get(), ring.head.get());
}
Some(next_ptr) => {
let tail_chunk = unsafe { ring.tail.get().unwrap().as_ref() };
debug_assert_eq!(tail_chunk.next(), None);
tail_chunk.next.set(Some(chunk_ptr));
ring.tail.set(Some(chunk_ptr));
ring.head.set(Some(next_ptr));
let next = unsafe { next_ptr.as_ref() };
if let Some(ptr) = next.allocate(next_ptr, layout) {
return Ok(unsafe {
NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
ptr.as_ptr(),
layout.size(),
))
});
}
}
},
}
} else {
debug_assert_eq!(ring.tail.get(), None);
}
let (g_head, g_tail) = {
let mut global = global.lock();
(global.head.take(), global.tail.take())
};
let ptr = match (g_head, g_tail) {
(None, None) => None,
(Some(g_head), Some(mut g_tail)) => {
let ptr = unsafe { g_head.as_ref().allocate(g_head, layout) };
match (ring.head.get(), ring.tail.get()) {
(None, None) => {
ring.head.set(Some(g_head));
ring.tail.set(Some(g_tail));
}
(Some(head), Some(_tail)) => unsafe {
*g_tail.as_mut().next.get_mut() = Some(head);
ring.head.set(Some(g_head));
},
_ => unsafe { unreachable_unchecked() },
}
ptr
}
_ => unsafe { unreachable_unchecked() },
};
let ptr = match ptr {
None => {
let chunk_ptr = Chunk::<N>::new(Global)?;
let chunk = unsafe { chunk_ptr.as_ref() };
let ptr = chunk
.allocate(chunk_ptr, layout)
.expect("Failed to allocate from fresh chunk");
chunk.next.set(ring.head.get());
if ring.tail.get().is_none() {
debug_assert_eq!(ring.head.get(), None);
ring.tail.set(Some(chunk_ptr));
} else {
debug_assert!(ring.head.get().is_some());
}
ring.head.set(Some(chunk_ptr));
ptr
}
Some(ptr) => ptr,
};
Ok(unsafe {
NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
ptr.as_ptr(),
layout.size(),
))
})
}
#[inline(never)]
unsafe fn _deallocate<const N: usize>(ptr: NonNull<u8>, layout: Layout) {
unsafe {
Chunk::<N>::deallocate(ptr.as_ptr(), layout);
}
}
impl OneRingAlloc {
#[inline(never)]
pub fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
if layout_max(layout) <= TINY_ALLOCATION_MAX_SIZE {
LOCAL_RINGS.with(|rings| _allocate(&rings.tiny_ring, &GLOBAL_RINGS.tiny_ring, layout))
} else if layout_max(layout) <= SMALL_ALLOCATION_MAX_SIZE {
LOCAL_RINGS.with(|rings| _allocate(&rings.small_ring, &GLOBAL_RINGS.small_ring, layout))
} else if layout_max(layout) <= LARGE_ALLOCATION_MAX_SIZE {
LOCAL_RINGS.with(|rings| _allocate(&rings.large_ring, &GLOBAL_RINGS.large_ring, layout))
} else {
Global.allocate(layout)
}
}
#[inline(never)]
pub unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
if layout_max(layout) <= TINY_ALLOCATION_MAX_SIZE {
unsafe {
_deallocate::<{ TINY_ALLOCATION_CHUNK_SIZE }>(ptr, layout);
}
} else if layout_max(layout) <= SMALL_ALLOCATION_MAX_SIZE {
unsafe {
_deallocate::<{ SMALL_ALLOCATION_CHUNK_SIZE }>(ptr, layout);
}
} else if layout_max(layout) <= LARGE_ALLOCATION_MAX_SIZE {
unsafe {
_deallocate::<{ LARGE_ALLOCATION_CHUNK_SIZE }>(ptr, layout);
}
} else {
unsafe { Global.deallocate(ptr, layout) }
}
}
pub fn clean_global(&self) {
GLOBAL_RINGS.clean_all();
}
pub fn clean_local(&self) {
LOCAL_RINGS.with(|rings| rings.clean_all());
}
}
unsafe impl Allocator for OneRingAlloc {
#[inline(never)]
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
self.allocate(layout)
}
#[inline(never)]
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
unsafe {
self.deallocate(ptr, layout);
}
}
}