#![no_std]
#![deny(missing_docs)]
#![cfg_attr(feature = "allocator-api", feature(allocator_api))]
#![warn(clippy::nursery, clippy::pedantic)]
use core::cell::UnsafeCell;
use core::fmt::{self, Debug, Formatter};
use core::hint::assert_unchecked;
use core::mem::MaybeUninit;
use core::ptr::NonNull;
mod align;
pub use align::*;
mod unsafestalloc;
pub use unsafestalloc::*;
mod chain;
pub use chain::*;
mod alloc;
#[allow(clippy::wildcard_imports)]
use alloc::*;
#[cfg(feature = "std")]
mod syncstalloc;
#[cfg(feature = "std")]
pub use syncstalloc::*;
#[cfg(test)]
#[cfg(feature = "allocator-api")]
mod tests;
#[derive(Clone, Copy)]
#[repr(C)]
struct Header {
next: u16,
length: u16,
}
#[derive(Clone, Copy)]
#[repr(C)]
union Block<const B: usize>
where
Align<B>: Alignment,
{
header: Header,
bytes: [MaybeUninit<u8>; B],
_align: Align<B>,
}
fn header_in_block<const B: usize>(ptr: *mut Block<B>) -> *mut Header
where
Align<B>: Alignment,
{
unsafe { &raw mut (*ptr).header }
}
#[allow(clippy::cast_possible_truncation)]
const unsafe fn as_u16(val: usize) -> u16 {
unsafe {
assert_unchecked(val <= 0xffff);
}
val as u16
}
const OOM_MARKER: u16 = u16::MAX;
#[repr(C)]
pub struct Stalloc<const L: usize, const B: usize>
where
Align<B>: Alignment,
{
data: UnsafeCell<[Block<B>; L]>,
base: UnsafeCell<Header>,
}
impl<const L: usize, const B: usize> Stalloc<L, B>
where
Align<B>: Alignment,
{
#[must_use]
#[inline]
pub const fn new() -> Self {
const {
assert!(L >= 1 && L <= 0xffff, "block count must be in 1..65536");
assert!(B >= 4, "block size must be at least 4 bytes");
}
let mut blocks = [Block {
bytes: const { [MaybeUninit::uninit(); B] },
}; L];
blocks[0].header = Header {
next: 0,
length: unsafe { as_u16(L) },
};
Self {
base: UnsafeCell::new(Header { next: 0, length: 0 }),
data: UnsafeCell::new(blocks),
}
}
pub const fn is_oom(&self) -> bool {
unsafe { *self.base.get() }.length == OOM_MARKER
}
pub fn is_empty(&self) -> bool {
!self.is_oom() && unsafe { *self.base.get() }.next == 0
}
pub unsafe fn clear(&self) {
unsafe {
(*self.base.get()).next = 0;
(*self.base.get()).length = 0;
(*self.header_at(0)).next = 0;
(*self.header_at(0)).length = as_u16(L);
}
}
pub unsafe fn allocate_blocks(
&self,
size: usize,
align: usize,
) -> Result<NonNull<u8>, AllocError> {
unsafe {
assert_unchecked(size >= 1 && align.is_power_of_two() && align <= 2usize.pow(29) / B);
}
if self.is_oom() {
return Err(AllocError);
}
unsafe {
let base = self.base.get();
let mut prev = base;
let mut curr = self.header_at((*base).next.into());
loop {
let curr_idx = usize::from((*prev).next);
let next_idx = (*curr).next.into();
let curr_chunk_len = (*curr).length.into();
let spare_front = (curr.addr() / B).wrapping_neg() % align;
if spare_front + size <= curr_chunk_len {
let avail_blocks = curr_chunk_len - spare_front;
let avail_blocks_ptr = self.block_at(curr_idx + spare_front);
let spare_back = avail_blocks - size;
if spare_back > 0 {
let spare_back_idx = curr_idx + spare_front + size;
let spare_back_ptr = self.header_at(spare_back_idx);
(*spare_back_ptr).next = as_u16(next_idx);
(*spare_back_ptr).length = as_u16(spare_back);
if spare_front > 0 {
(*curr).next = as_u16(spare_back_idx);
(*curr).length = as_u16(spare_front);
} else {
(*prev).next = as_u16(spare_back_idx);
}
} else if spare_front > 0 {
(*curr).next = as_u16(curr_idx + spare_front + size);
(*curr).length = as_u16(spare_front);
(*prev).next = as_u16(next_idx);
} else {
(*prev).next = as_u16(next_idx);
if next_idx == 0 {
(*base).length = OOM_MARKER;
}
}
return Ok(NonNull::new_unchecked(avail_blocks_ptr.cast()));
}
if next_idx == 0 {
return Err(AllocError);
}
prev = curr;
curr = self.header_at(next_idx);
}
}
}
pub unsafe fn deallocate_blocks(&self, ptr: NonNull<u8>, size: usize) {
unsafe {
assert_unchecked(size >= 1 && size <= L);
}
let freed_ptr = header_in_block(ptr.as_ptr().cast());
let freed_idx = self.index_of(freed_ptr);
let base = self.base.get();
let before = self.header_before(freed_idx);
unsafe {
let prev_next = (*before).next.into();
(*freed_ptr).next = as_u16(prev_next);
(*freed_ptr).length = as_u16(size);
if freed_idx + size == prev_next {
let header_to_merge = self.header_at(prev_next);
(*freed_ptr).next = (*header_to_merge).next;
(*freed_ptr).length += (*header_to_merge).length;
}
if before.eq(&base) {
(*base).next = as_u16(freed_idx);
(*base).length = 0;
} else if self.index_of(before) + usize::from((*before).length) == freed_idx {
(*before).next = (*freed_ptr).next;
(*before).length += (*freed_ptr).length;
} else {
(*before).next = as_u16(freed_idx);
}
}
}
pub unsafe fn shrink_in_place(&self, ptr: NonNull<u8>, old_size: usize, new_size: usize) {
unsafe {
assert_unchecked(new_size > 0 && new_size < old_size);
}
let curr_block: *mut Block<B> = ptr.as_ptr().cast();
let curr_idx = (curr_block.addr() - self.data.get().addr()) / B;
let new_idx = curr_idx + new_size;
let spare_blocks = old_size - new_size;
unsafe {
let prev_free_chunk = self.header_before(curr_idx);
let next_free_idx = (*prev_free_chunk).next.into(); let new_chunk = header_in_block(curr_block.add(new_size));
(*prev_free_chunk).next = as_u16(new_idx);
if new_idx + spare_blocks == next_free_idx {
let next_free_chunk = self.header_at(next_free_idx);
(*new_chunk).next = (*next_free_chunk).next;
(*new_chunk).length = as_u16(spare_blocks) + (*next_free_chunk).length;
} else {
(*new_chunk).next = as_u16(next_free_idx);
(*new_chunk).length = as_u16(spare_blocks);
}
(*self.base.get()).length = 0;
}
}
pub unsafe fn grow_in_place(
&self,
ptr: NonNull<u8>,
old_size: usize,
new_size: usize,
) -> Result<(), AllocError> {
unsafe {
assert_unchecked(old_size >= 1 && old_size <= L && new_size > old_size);
}
let curr_block: *mut Block<B> = ptr.as_ptr().cast();
let curr_idx = (curr_block.addr() - self.data.get().addr()) / B;
let prev_free_chunk = self.header_before(curr_idx);
unsafe {
let next_free_idx = (*prev_free_chunk).next.into();
if curr_idx + old_size != next_free_idx {
return Err(AllocError);
}
let next_free_chunk = self.header_at(next_free_idx);
let room_to_grow = (*next_free_chunk).length.into();
let needed_blocks = new_size - old_size;
if needed_blocks > room_to_grow {
return Err(AllocError);
}
let blocks_left_over = room_to_grow - needed_blocks;
if blocks_left_over > 0 {
let new_chunk_idx = next_free_idx + needed_blocks;
let new_chunk_head = self.header_at(new_chunk_idx);
(*prev_free_chunk).next = as_u16(new_chunk_idx);
(*new_chunk_head).next = (*next_free_chunk).next;
(*new_chunk_head).length = as_u16(blocks_left_over);
} else {
(*prev_free_chunk).next = (*next_free_chunk).next;
let base = self.base.get();
if prev_free_chunk.eq(&base) && (*next_free_chunk).next == 0 {
(*base).length = OOM_MARKER;
}
}
Ok(())
}
}
pub unsafe fn grow_up_to(&self, ptr: NonNull<u8>, old_size: usize, new_size: usize) -> usize {
unsafe {
assert_unchecked(old_size >= 1 && old_size <= L && new_size > old_size);
}
let curr_block: *mut Block<B> = ptr.as_ptr().cast();
let curr_idx = (curr_block.addr() - self.data.get().addr()) / B;
let prev_free_chunk = self.header_before(curr_idx);
unsafe {
let next_free_idx = (*prev_free_chunk).next.into();
if curr_idx + old_size != next_free_idx {
return old_size;
}
let next_free_chunk = self.header_at(next_free_idx);
let room_to_grow = (*next_free_chunk).length.into();
let needed_blocks = (new_size - old_size).min(room_to_grow);
let blocks_left_over = room_to_grow - needed_blocks;
if blocks_left_over > 0 {
let new_chunk_idx = next_free_idx + needed_blocks;
let new_chunk_head = self.header_at(new_chunk_idx);
(*prev_free_chunk).next = as_u16(new_chunk_idx);
(*new_chunk_head).next = (*next_free_chunk).next;
(*new_chunk_head).length = as_u16(blocks_left_over);
} else {
(*prev_free_chunk).next = (*next_free_chunk).next;
let base = self.base.get();
if prev_free_chunk.eq(&base) && (*next_free_chunk).next == 0 {
(*base).length = OOM_MARKER;
}
}
old_size + needed_blocks
}
}
}
impl<const L: usize, const B: usize> Stalloc<L, B>
where
Align<B>: Alignment,
{
fn index_of(&self, ptr: *mut Header) -> usize {
(ptr.addr() - self.data.get().addr()) / B
}
const unsafe fn block_at(&self, idx: usize) -> *mut Block<B> {
let root: *mut Block<B> = self.data.get().cast();
unsafe { root.add(idx) }
}
unsafe fn header_at(&self, idx: usize) -> *mut Header {
header_in_block(unsafe { self.block_at(idx) })
}
fn header_before(&self, idx: usize) -> *mut Header {
let mut ptr = self.base.get();
unsafe {
if (*ptr).length == OOM_MARKER || usize::from((*ptr).next) >= idx {
return ptr;
}
loop {
ptr = self.header_at((*ptr).next.into());
let next_idx = usize::from((*ptr).next);
if next_idx == 0 || next_idx >= idx {
return ptr;
}
}
}
}
}
impl<const L: usize, const B: usize> Debug for Stalloc<L, B>
where
Align<B>: Alignment,
{
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "Stallocator with {L} blocks of {B} bytes each")?;
let mut ptr = self.base.get();
if unsafe { (*ptr).length } == OOM_MARKER {
return write!(f, "\n\tNo free blocks (OOM)");
}
loop {
unsafe {
let idx = (*ptr).next.into();
ptr = self.header_at(idx);
let length = (*ptr).length;
if length == 1 {
write!(f, "\n\tindex {idx}: {length} free block")?;
} else {
write!(f, "\n\tindex {idx}: {length} free blocks")?;
}
if (*ptr).next == 0 {
return Ok(());
}
}
}
}
}
impl<const L: usize, const B: usize> Default for Stalloc<L, B>
where
Align<B>: Alignment,
{
fn default() -> Self {
Self::new()
}
}
#[cfg(any(feature = "allocator-api", feature = "allocator-api2"))]
unsafe impl<const L: usize, const B: usize> Allocator for &Stalloc<L, B>
where
Align<B>: Alignment,
{
#[inline]
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
let size = layout.size().div_ceil(B);
let align = layout.align().div_ceil(B);
if size == 0 {
let addr = layout.align().try_into().unwrap();
let dangling = NonNull::without_provenance(addr);
return Ok(NonNull::slice_from_raw_parts(dangling, 0));
}
unsafe { self.allocate_blocks(size, align) }
.map(|p| NonNull::slice_from_raw_parts(p, size * B))
}
fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
let ptr = self.allocate(layout)?;
let ptr = NonNull::slice_from_raw_parts(ptr.cast(), layout.size());
unsafe { ptr.cast::<u8>().write_bytes(0, ptr.len()) }
Ok(ptr)
}
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
let size = layout.size().div_ceil(B);
if size == 0 {
return;
}
unsafe { self.deallocate_blocks(ptr, size) };
}
unsafe fn grow(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
let old_size = old_layout.size().div_ceil(B);
let new_size = new_layout.size().div_ceil(B);
let align = new_layout.align().div_ceil(B);
if new_size == old_size {
return Ok(NonNull::slice_from_raw_parts(ptr, new_size * B));
}
if old_size == 0 {
return unsafe {
self.allocate_blocks(new_size, align)
.map(|p| NonNull::slice_from_raw_parts(p, new_size * B))
};
}
unsafe {
if self.grow_in_place(ptr, old_size, new_size).is_ok() {
Ok(NonNull::slice_from_raw_parts(ptr, new_size * B))
} else {
let new = self.allocate_blocks(new_size, align)?;
ptr.copy_to_nonoverlapping(new, old_layout.size());
self.deallocate_blocks(ptr, old_size);
Ok(NonNull::slice_from_raw_parts(new, new_size * B))
}
}
}
unsafe fn grow_zeroed(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
unsafe {
let new_ptr = self.grow(ptr, old_layout, new_layout)?;
let count = new_ptr.len() - old_layout.size();
new_ptr
.cast::<u8>()
.add(old_layout.size())
.write_bytes(0, count);
Ok(new_ptr)
}
}
unsafe fn shrink(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
let old_size = old_layout.size().div_ceil(B);
let new_size = new_layout.size().div_ceil(B);
if new_size == 0 {
unsafe {
if old_size != 0 {
self.deallocate_blocks(ptr, old_size);
}
let addr = new_layout.align().try_into().unwrap();
let dangling = NonNull::without_provenance(addr);
return Ok(NonNull::slice_from_raw_parts(dangling, 0));
}
}
if !ptr.as_ptr().addr().is_multiple_of(new_layout.align()) {
let align = new_layout.align() / B;
unsafe {
let new = self.allocate_blocks(new_size, align)?;
ptr.copy_to_nonoverlapping(new, old_layout.size());
self.deallocate_blocks(ptr, old_size);
return Ok(NonNull::slice_from_raw_parts(new, new_size * B));
}
}
if old_size == new_size {
return Ok(NonNull::slice_from_raw_parts(ptr, old_size * B));
}
unsafe {
self.shrink_in_place(ptr, old_size, new_size);
}
Ok(NonNull::slice_from_raw_parts(ptr, new_size * B))
}
}
unsafe impl<const L: usize, const B: usize> ChainableAlloc for Stalloc<L, B>
where
Align<B>: Alignment,
{
#[inline]
fn claims(&self, ptr: *mut u8, layout: Layout) -> bool {
let base_addr = self.data.get().addr();
layout.size() == 0 || (ptr.addr() >= base_addr && ptr.addr() < base_addr + B * L)
}
}
impl<const L: usize, const B: usize> Stalloc<L, B>
where
Align<B>: Alignment,
{
pub const fn chain<T>(self, next: &T) -> AllocChain<'_, Self, T>
where
Self: Sized,
{
AllocChain::new(self, next)
}
}