use core::ptr::NonNull;
use core::sync::atomic::{AtomicUsize, Ordering};
use crate::arena::Arena;
use crate::central_pool::CentralPool;
use crate::config::AllocatorConfig;
use crate::error::{AllocError, FreeError, InitError};
#[cfg(test)]
use crate::free_list::Batch;
use crate::header::{
AllocationHeader, AllocationKind, HEADER_ALIGNMENT, HEADER_SIZE, block_start_from_user_ptr,
header_from_user_ptr, user_ptr_from_block_start,
};
use crate::large_object::LargeObjectAllocator;
use crate::size_class::SizeClass;
use crate::thread_cache::ThreadCache;
static NEXT_ALLOCATOR_ID: AtomicUsize = AtomicUsize::new(1);
pub struct Allocator {
id: usize,
config: AllocatorConfig,
arena: Arena,
central: CentralPool,
large: LargeObjectAllocator,
}
unsafe impl Send for Allocator {}
unsafe impl Sync for Allocator {}
impl Allocator {
pub fn new(config: AllocatorConfig) -> Result<Self, InitError> {
validate_allocator_config(&config)?;
let arena = Arena::new(&config)?;
Ok(Self {
id: NEXT_ALLOCATOR_ID.fetch_add(1, Ordering::Relaxed),
config,
arena,
central: CentralPool::new(),
large: LargeObjectAllocator::new(),
})
}
#[must_use]
pub const fn config(&self) -> &AllocatorConfig {
&self.config
}
#[must_use]
pub(crate) const fn id(&self) -> usize {
self.id
}
pub(crate) fn drain_thread_cache_on_exit(&self, cache: &mut ThreadCache) {
unsafe {
cache.drain_all_to_central(&self.central);
}
}
#[cfg(test)]
pub(crate) fn take_central_batch_for_test(&self, class: SizeClass, max: usize) -> Batch {
self.central.take_batch(class, max)
}
pub fn allocate_with_cache(
&self,
cache: &mut ThreadCache,
requested_size: usize,
) -> Result<NonNull<u8>, AllocError> {
cache.bind_to_allocator(self);
if requested_size == 0 {
return Err(AllocError::ZeroSize);
}
SizeClass::from_request(requested_size).map_or_else(
|| self.allocate_large(requested_size),
|class| self.allocate_small_with_cache(cache, class, requested_size),
)
}
pub unsafe fn deallocate_with_cache(
&self,
cache: &mut ThreadCache,
user_ptr: NonNull<u8>,
) -> Result<(), FreeError> {
cache.bind_to_allocator(self);
unsafe { self.deallocate_impl(cache, user_ptr, None) }
}
pub unsafe fn deallocate_with_size_checked(
&self,
cache: &mut ThreadCache,
user_ptr: NonNull<u8>,
expected_size: usize,
) -> Result<(), FreeError> {
cache.bind_to_allocator(self);
unsafe { self.deallocate_impl(cache, user_ptr, Some(expected_size)) }
}
fn allocate_small_with_cache(
&self,
cache: &mut ThreadCache,
class: SizeClass,
requested_size: usize,
) -> Result<NonNull<u8>, AllocError> {
if cache.needs_refill(class) {
let moved = cache.refill_from_central(class, &self.central);
if moved == 0 {
let carved = self.refill_cache_from_arena(cache, class, requested_size)?;
if carved == 0 {
return Err(AllocError::OutOfMemory {
requested: requested_size,
remaining: self.arena.remaining(),
});
}
}
}
let block_start = cache.pop(class).ok_or_else(|| AllocError::OutOfMemory {
requested: requested_size,
remaining: self.arena.remaining(),
})?;
let header = AllocationHeader::new_small(class, requested_size).ok_or_else(|| {
AllocError::OutOfMemory {
requested: requested_size,
remaining: self.arena.remaining(),
}
})?;
let _ = header.write_to_block(block_start);
Ok(user_ptr_from_block_start(block_start))
}
fn refill_cache_from_arena(
&self,
cache: &mut ThreadCache,
class: SizeClass,
requested_size: usize,
) -> Result<usize, AllocError> {
let block_size = class.block_size_for_alignment(self.config.alignment);
let refill_count = self.config.refill_count(class);
let Some(span) = self.reserve_refill_span(block_size, refill_count, requested_size)? else {
return Err(AllocError::OutOfMemory {
requested: requested_size,
remaining: self.arena.remaining(),
});
};
let carved = span.size() / block_size;
let span_start = span.start().as_ptr();
for index in 0..carved {
let offset = index * block_size;
let block_start = span_start.wrapping_add(offset);
let block_start = unsafe { NonNull::new_unchecked(block_start) };
unsafe {
cache.push(class, block_start);
}
}
Ok(carved)
}
fn allocate_large(&self, requested_size: usize) -> Result<NonNull<u8>, AllocError> {
let total_size =
HEADER_SIZE
.checked_add(requested_size)
.ok_or_else(|| AllocError::OutOfMemory {
requested: requested_size,
remaining: self.arena.remaining(),
})?;
let block_size = align_up_checked(total_size, self.config.alignment).ok_or_else(|| {
AllocError::OutOfMemory {
requested: requested_size,
remaining: self.arena.remaining(),
}
})?;
let usable_size =
block_size
.checked_sub(HEADER_SIZE)
.ok_or_else(|| AllocError::OutOfMemory {
requested: requested_size,
remaining: self.arena.remaining(),
})?;
let (block_start, actual_block_size, usable_size) =
self.large.take_reusable_block(block_size).map_or_else(
|| {
self.arena
.allocate_block(block_size)
.map(|block_start| (block_start, block_size, usable_size))
.map_err(|error| match error {
AllocError::OutOfMemory { remaining, .. } => AllocError::OutOfMemory {
requested: requested_size,
remaining,
},
other => other,
})
},
|block| Ok((block.block_start(), block.block_size, block.usable_size())),
)?;
let header = AllocationHeader::new_large(requested_size, usable_size).ok_or_else(|| {
AllocError::OutOfMemory {
requested: requested_size,
remaining: self.arena.remaining(),
}
})?;
let _ = header.write_to_block(block_start);
let user_ptr = user_ptr_from_block_start(block_start);
self.large.record_live_allocation(
user_ptr,
block_start,
actual_block_size,
requested_size,
usable_size,
);
Ok(user_ptr)
}
unsafe fn deallocate_impl(
&self,
cache: &mut ThreadCache,
user_ptr: NonNull<u8>,
expected_size: Option<usize>,
) -> Result<(), FreeError> {
let (header, kind) = unsafe { Self::decode_header(user_ptr)? };
if let Some(expected_size) = expected_size {
let recorded = header.requested_size();
if expected_size != recorded {
return Err(FreeError::SizeMismatch {
provided: expected_size,
recorded,
});
}
}
match kind {
AllocationKind::Small(class) => {
let block_start = block_start_from_user_ptr(user_ptr);
if !self.arena.contains_block_start(block_start) {
return Err(FreeError::ForeignPointer);
}
unsafe {
cache.push(class, block_start);
}
if cache.should_drain(class) {
unsafe {
cache.drain_excess_to_central(class, &self.central);
}
}
Ok(())
}
AllocationKind::Large => {
self.large.validate_and_release_live_allocation(
user_ptr,
header.requested_size(),
header.usable_size(),
)?;
Ok(())
}
}
}
unsafe fn decode_header(
user_ptr: NonNull<u8>,
) -> Result<(AllocationHeader, AllocationKind), FreeError> {
let user_addr = user_ptr.as_ptr().addr();
let Some(header_addr) = user_addr.checked_sub(HEADER_SIZE) else {
return Err(FreeError::CorruptHeader);
};
if !header_addr.is_multiple_of(HEADER_ALIGNMENT) {
return Err(FreeError::CorruptHeader);
}
let header_ptr = header_from_user_ptr(user_ptr);
let header = unsafe { header_ptr.as_ptr().read() };
let kind = header.validate()?;
Ok((header, kind))
}
fn reserve_refill_span(
&self,
block_size: usize,
refill_count: usize,
requested_size: usize,
) -> Result<Option<crate::arena::ReservedSpan>, AllocError> {
let requested_span_size =
block_size
.checked_mul(refill_count)
.ok_or_else(|| AllocError::OutOfMemory {
requested: requested_size,
remaining: self.arena.remaining(),
})?;
match self.arena.reserve_span(requested_span_size) {
Ok(span) => Ok(Some(span)),
Err(AllocError::OutOfMemory { remaining, .. }) => {
let mut reduced_count = remaining / block_size;
while reduced_count > 0 {
let reduced_span_size =
block_size
.checked_mul(reduced_count)
.ok_or(AllocError::OutOfMemory {
requested: requested_size,
remaining,
})?;
match self.arena.reserve_span(reduced_span_size) {
Ok(span) => return Ok(Some(span)),
Err(AllocError::OutOfMemory { .. }) => {
reduced_count -= 1;
}
Err(AllocError::GlobalInitFailed) => {
return Err(AllocError::GlobalInitFailed);
}
Err(AllocError::ZeroSize) => return Err(AllocError::ZeroSize),
}
}
Ok(None)
}
Err(AllocError::GlobalInitFailed) => Err(AllocError::GlobalInitFailed),
Err(AllocError::ZeroSize) => Err(AllocError::ZeroSize),
}
}
}
const fn align_up_checked(value: usize, alignment: usize) -> Option<usize> {
let remainder = value % alignment;
if remainder == 0 {
Some(value)
} else {
value.checked_add(alignment - remainder)
}
}
const fn validate_allocator_config(config: &AllocatorConfig) -> Result<(), InitError> {
if config.alignment < HEADER_ALIGNMENT {
return Err(InitError::InvalidConfig(
"allocator alignment must be at least 64 bytes",
));
}
Ok(())
}