#![cfg_attr(not(feature = "std"), no_std)]
#![warn(missing_docs)]
#![doc = include_str!("../README.md")]
#![cfg_attr(nightly, feature(allocator_api))]
use core::cell::UnsafeCell;
use core::mem::MaybeUninit;
use core::ptr::NonNull;
use core::sync::atomic::AtomicUsize;
use core::sync::atomic::Ordering;
extern crate alloc;
#[cfg(not(nightly))]
use allocator_api2::alloc::{AllocError, Allocator, Layout};
#[cfg(nightly)]
use core::alloc::{AllocError, Allocator, Layout};
pub struct StackAllocator<const N: usize> {
buf: UnsafeCell<MaybeUninit<[u8; N]>>,
offset: AtomicUsize,
}
impl<const N: usize> Default for StackAllocator<N> {
fn default() -> Self {
Self::new()
}
}
unsafe impl<const N: usize> Send for StackAllocator<N> {}
unsafe impl<const N: usize> Sync for StackAllocator<N> {}
impl<const N: usize> StackAllocator<N> {
pub const fn new() -> Self {
Self {
buf: UnsafeCell::new(MaybeUninit::uninit()),
offset: AtomicUsize::new(0),
}
}
pub unsafe fn reset(&mut self) {
self.offset.store(0, Ordering::Release);
}
#[inline]
const fn align_up(addr: usize, align: usize) -> usize {
(addr + align - 1) & !(align - 1)
}
pub fn current_offset(&self) -> usize {
self.offset.load(Ordering::Acquire)
}
}
unsafe impl<const N: usize> Allocator for StackAllocator<N> {
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
let base = self.buf.get() as usize;
let mut current = self.offset.load(Ordering::Acquire);
let mut start;
loop {
let current_ptr = base + current;
let aligned_ptr = Self::align_up(current_ptr, layout.align());
start = aligned_ptr - base;
let end = start.checked_add(layout.size()).ok_or(AllocError)?;
if end > N {
return Err(AllocError);
}
if self
.offset
.compare_exchange(current, end, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
break;
}
current = self.offset.load(Ordering::Acquire);
}
let ptr = unsafe { self.buf.get().cast::<u8>().add(start) };
Ok(NonNull::slice_from_raw_parts(
NonNull::new(ptr).unwrap(),
layout.size(),
))
}
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
let base = self.buf.get() as usize;
let start = ptr.as_ptr() as usize - base;
let end = start + layout.size();
let _ = self
.offset
.compare_exchange(end, start, Ordering::Release, Ordering::Relaxed);
}
unsafe fn grow(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
let base = self.buf.get() as usize;
let old_start = ptr.as_ptr() as usize - base;
let expected_offset = old_start + old_layout.size();
let current_offset = self.offset.load(Ordering::Acquire);
if current_offset != expected_offset {
return Err(AllocError);
}
if new_layout.size() < old_layout.size() {
return Err(AllocError);
}
let new_end = old_start.checked_add(new_layout.size()).ok_or(AllocError)?;
if new_end > N {
return Err(AllocError);
}
if self
.offset
.compare_exchange(
expected_offset,
new_end,
Ordering::Release,
Ordering::Relaxed,
)
.is_err()
{
return self.allocate(new_layout);
}
Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()))
}
unsafe fn shrink(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
let base = self.buf.get() as usize;
let old_start = ptr.as_ptr() as usize - base;
let expected_offset = old_start + old_layout.size();
let current_offset = self.offset.load(Ordering::Acquire);
if current_offset != expected_offset {
return Err(AllocError);
}
if new_layout.size() > old_layout.size() {
return Err(AllocError);
}
let new_end = old_start + new_layout.size();
_ = self.offset.compare_exchange(
expected_offset,
new_end,
Ordering::Release,
Ordering::Relaxed,
);
Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()))
}
fn by_ref(&self) -> &Self
where
Self: Sized,
{
self
}
}
pub struct HybridAllocator<const N: usize, F: Allocator> {
stack_alloc: StackAllocator<N>,
fallback: F,
}
#[cfg(feature = "alloc")]
impl<const N: usize> Default for HybridAllocator<N, alloc::alloc::Global> {
fn default() -> Self {
Self::new(alloc::alloc::Global)
}
}
impl<const N: usize, F: Allocator> HybridAllocator<N, F> {
pub const fn new(fallback: F) -> Self {
Self {
stack_alloc: StackAllocator::new(),
fallback,
}
}
pub unsafe fn reset(&mut self) {
self.stack_alloc.reset();
}
pub fn current_offset(&self) -> usize {
self.stack_alloc.current_offset()
}
pub fn fallback(&self) -> &F {
&self.fallback
}
pub fn is_stack_exausted(&self) -> bool {
self.current_offset() >= N
}
}
unsafe impl<const N: usize, F: Allocator> Allocator for HybridAllocator<N, F> {
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
match self.stack_alloc.allocate(layout) {
ok @ Ok(_) => ok,
Err(_) => self.fallback.allocate(layout),
}
}
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
let base = self.stack_alloc.buf.get() as usize;
let end = base + N;
let addr = ptr.as_ptr() as usize;
if (base..end).contains(&addr) {
self.stack_alloc.deallocate(ptr, layout);
} else {
self.fallback.deallocate(ptr, layout);
}
}
unsafe fn grow(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
let base = self.stack_alloc.buf.get() as usize;
let addr = ptr.as_ptr() as usize;
if (base..base + N).contains(&addr) {
if let Ok(res) = self.stack_alloc.grow(ptr, old_layout, new_layout) {
return Ok(res);
} else {
let mut new_ptr = self.fallback.allocate(new_layout)?;
core::ptr::copy_nonoverlapping(
ptr.as_ptr(),
new_ptr.as_mut() as *mut [u8] as *mut u8,
old_layout.size(),
);
self.stack_alloc.deallocate(ptr, old_layout);
return Ok(new_ptr);
}
}
self.fallback.grow(ptr, old_layout, new_layout)
}
unsafe fn shrink(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
let base = self.stack_alloc.buf.get() as usize;
let addr = ptr.as_ptr() as usize;
if (base..base + N).contains(&addr) {
if let Ok(res) = self.stack_alloc.shrink(ptr, old_layout, new_layout) {
return Ok(res);
}
}
self.fallback.shrink(ptr, old_layout, new_layout)
}
fn by_ref(&self) -> &Self
where
Self: Sized,
{
self
}
}