use core::ptr::NonNull;
use core::sync::atomic::{AtomicUsize, Ordering};
use crate::arena::Arena;
use crate::central_pool::CentralPool;
use crate::config::AllocatorConfig;
use crate::error::{AllocError, FreeError, InitError};
#[cfg(test)]
use crate::free_list::Batch;
use crate::header::{
AllocationHeader, AllocationKind, HEADER_ALIGNMENT, HEADER_SIZE, block_start_from_user_ptr,
user_ptr_from_block_start,
};
use crate::large_object::LargeObjectAllocator;
use crate::size_class::SizeClass;
use crate::stats::{AllocatorStats, SizeClassStats};
use crate::thread_cache::{BlockReuse, ThreadCache};
static NEXT_ALLOCATOR_ID: AtomicUsize = AtomicUsize::new(1);
pub struct Allocator {
id: usize,
config: AllocatorConfig,
arena: Arena,
central: CentralPool,
large: LargeObjectAllocator,
}
unsafe impl Send for Allocator {}
unsafe impl Sync for Allocator {}
impl Allocator {
pub fn new(config: AllocatorConfig) -> Result<Self, InitError> {
validate_allocator_config(&config)?;
let arena = Arena::new(&config)?;
Ok(Self {
id: NEXT_ALLOCATOR_ID.fetch_add(1, Ordering::Relaxed),
config,
arena,
central: CentralPool::new(),
large: LargeObjectAllocator::new(),
})
}
#[must_use]
pub const fn config(&self) -> &AllocatorConfig {
&self.config
}
#[must_use]
pub(crate) const fn id(&self) -> usize {
self.id
}
pub(crate) fn drain_thread_cache_on_exit(&self, cache: &mut ThreadCache) {
unsafe {
cache.drain_all_to_central(&self.central);
}
}
#[cfg(test)]
pub(crate) fn take_central_batch_for_test(&self, class: SizeClass, max: usize) -> Batch {
self.central.take_batch(class, max)
}
#[must_use]
pub fn stats(&self) -> AllocatorStats {
let central_counts = self.central.block_counts();
let small_central = core::array::from_fn(|index| {
let class = SizeClass::ALL[index];
let blocks = central_counts[index];
SizeClassStats {
class,
blocks,
bytes: blocks * self.config.class_block_size(class),
}
});
let large = self.large.stats();
AllocatorStats {
arena_capacity: self.arena.capacity(),
arena_remaining: self.arena.remaining(),
small_central,
large_live_allocations: large.live_allocations,
large_live_bytes: large.live_bytes,
large_free_blocks: large.free_blocks,
large_free_bytes: large.free_bytes,
}
}
pub fn allocate_with_cache(
&self,
cache: &mut ThreadCache,
requested_size: usize,
) -> Result<NonNull<u8>, AllocError> {
cache.bind_to_allocator(self);
if requested_size == 0 {
return Err(AllocError::ZeroSize);
}
SizeClass::from_request(requested_size).map_or_else(
|| self.allocate_large(requested_size),
|class| self.allocate_small_with_cache(cache, class, requested_size),
)
}
pub unsafe fn deallocate_with_cache(
&self,
cache: &mut ThreadCache,
user_ptr: NonNull<u8>,
) -> Result<(), FreeError> {
cache.bind_to_allocator(self);
unsafe { self.deallocate_impl(cache, user_ptr, None) }
}
pub unsafe fn deallocate_with_size_checked(
&self,
cache: &mut ThreadCache,
user_ptr: NonNull<u8>,
expected_size: usize,
) -> Result<(), FreeError> {
cache.bind_to_allocator(self);
unsafe { self.deallocate_impl(cache, user_ptr, Some(expected_size)) }
}
fn allocate_small_with_cache(
&self,
cache: &mut ThreadCache,
class: SizeClass,
requested_size: usize,
) -> Result<NonNull<u8>, AllocError> {
if cache.needs_refill(class) {
let moved = cache.refill_from_central(class, &self.central);
if moved == 0 {
let carved = self.refill_cache_from_arena(cache, class, requested_size)?;
if carved == 0 {
return Err(AllocError::OutOfMemory {
requested: requested_size,
remaining: self.arena.remaining(),
});
}
}
}
let (block_start, reuse) = cache.pop(class).ok_or_else(|| AllocError::OutOfMemory {
requested: requested_size,
remaining: self.arena.remaining(),
})?;
match reuse {
BlockReuse::HotReuseRequestedOnly => {
let _ = AllocationHeader::refresh_small_requested_size(block_start, requested_size)
.ok_or_else(|| AllocError::OutOfMemory {
requested: requested_size,
remaining: self.arena.remaining(),
})?;
}
BlockReuse::FreshNeedsOwnerRefresh => {
let _ = AllocationHeader::refresh_small_requested_size_and_owner(
block_start,
requested_size,
cache.cache_id(),
)
.ok_or_else(|| AllocError::OutOfMemory {
requested: requested_size,
remaining: self.arena.remaining(),
})?;
}
BlockReuse::NeedsHeaderRewrite => {
let _ = AllocationHeader::write_small_to_block(
block_start,
class,
requested_size,
cache.cache_id(),
)
.ok_or_else(|| AllocError::OutOfMemory {
requested: requested_size,
remaining: self.arena.remaining(),
})?;
}
}
Ok(user_ptr_from_block_start(block_start))
}
fn refill_cache_from_arena(
&self,
cache: &mut ThreadCache,
class: SizeClass,
requested_size: usize,
) -> Result<usize, AllocError> {
let block_size = class.block_size_for_alignment(self.config.alignment);
let refill_count = self.config.refill_count(class);
let Some(span) = self.reserve_refill_span(block_size, refill_count, requested_size)? else {
return Err(AllocError::OutOfMemory {
requested: requested_size,
remaining: self.arena.remaining(),
});
};
let carved = span.size() / block_size;
initialize_small_span_headers(span.start(), block_size, carved, class)?;
cache.push_owned_slab(class, span.start(), block_size, carved);
Ok(carved)
}
fn allocate_large(&self, requested_size: usize) -> Result<NonNull<u8>, AllocError> {
let total_size =
HEADER_SIZE
.checked_add(requested_size)
.ok_or_else(|| AllocError::OutOfMemory {
requested: requested_size,
remaining: self.arena.remaining(),
})?;
let block_size = align_up_checked(total_size, self.config.alignment).ok_or_else(|| {
AllocError::OutOfMemory {
requested: requested_size,
remaining: self.arena.remaining(),
}
})?;
let usable_size =
block_size
.checked_sub(HEADER_SIZE)
.ok_or_else(|| AllocError::OutOfMemory {
requested: requested_size,
remaining: self.arena.remaining(),
})?;
let (block_start, actual_block_size, usable_size) =
self.large.take_reusable_block(block_size).map_or_else(
|| {
self.arena
.allocate_block(block_size)
.map(|block_start| (block_start, block_size, usable_size))
.map_err(|error| match error {
AllocError::OutOfMemory { remaining, .. } => AllocError::OutOfMemory {
requested: requested_size,
remaining,
},
other => other,
})
},
|block| Ok((block.block_start(), block.block_size, block.usable_size())),
)?;
let _ = AllocationHeader::write_large_to_block(block_start, requested_size, usable_size)
.ok_or_else(|| AllocError::OutOfMemory {
requested: requested_size,
remaining: self.arena.remaining(),
})?;
let user_ptr = user_ptr_from_block_start(block_start);
self.large.record_live_allocation(
user_ptr,
block_start,
actual_block_size,
requested_size,
usable_size,
);
Ok(user_ptr)
}
unsafe fn deallocate_impl(
&self,
cache: &mut ThreadCache,
user_ptr: NonNull<u8>,
expected_size: Option<usize>,
) -> Result<(), FreeError> {
let (header, kind) = unsafe { Self::decode_header(user_ptr)? };
if let Some(expected_size) = expected_size {
let recorded = header.requested_size();
if expected_size != recorded {
return Err(FreeError::SizeMismatch {
provided: expected_size,
recorded,
});
}
}
match kind {
AllocationKind::Small(class) => {
let block_start = block_start_from_user_ptr(user_ptr);
if !self.arena.contains_block_start(block_start) {
return Err(FreeError::ForeignPointer);
}
let owner_cache_id = header
.small_owner_cache_id()
.ok_or(FreeError::CorruptHeader)?;
if owner_cache_id == cache.cache_id() {
unsafe {
cache.push(class, block_start);
}
if cache.should_drain(class) {
unsafe {
cache.drain_excess_to_central(class, &self.central);
}
}
} else {
unsafe {
cache.push_remote_and_maybe_flush(class, block_start, &self.central);
}
}
Ok(())
}
AllocationKind::Large => {
self.large.validate_and_release_live_allocation(
user_ptr,
header.requested_size(),
header.usable_size(),
)?;
Ok(())
}
}
}
unsafe fn decode_header(
user_ptr: NonNull<u8>,
) -> Result<(AllocationHeader, AllocationKind), FreeError> {
let user_addr = user_ptr.as_ptr().addr();
let Some(header_addr) = user_addr.checked_sub(HEADER_SIZE) else {
return Err(FreeError::CorruptHeader);
};
if !header_addr.is_multiple_of(HEADER_ALIGNMENT) {
return Err(FreeError::CorruptHeader);
}
unsafe { AllocationHeader::read_from_user_ptr(user_ptr) }
}
fn reserve_refill_span(
&self,
block_size: usize,
refill_count: usize,
requested_size: usize,
) -> Result<Option<crate::arena::ReservedSpan>, AllocError> {
let _ = block_size
.checked_mul(refill_count)
.ok_or_else(|| AllocError::OutOfMemory {
requested: requested_size,
remaining: self.arena.remaining(),
})?;
self.arena
.reserve_block_span(block_size, refill_count)
.map_err(|error| match error {
AllocError::OutOfMemory { remaining, .. } => AllocError::OutOfMemory {
requested: requested_size,
remaining,
},
other => other,
})
}
}
fn initialize_small_span_headers(
span_start: NonNull<u8>,
block_size: usize,
blocks: usize,
class: SizeClass,
) -> Result<(), AllocError> {
for index in 0..blocks {
let offset = index
.checked_mul(block_size)
.ok_or(AllocError::OutOfMemory {
requested: block_size,
remaining: 0,
})?;
let block_start = span_start.as_ptr().wrapping_add(offset);
let block_start = unsafe { NonNull::new_unchecked(block_start) };
let _ =
AllocationHeader::initialize_small_to_block(block_start, class).ok_or_else(|| {
AllocError::OutOfMemory {
requested: class.payload_size(),
remaining: 0,
}
})?;
}
Ok(())
}
const fn align_up_checked(value: usize, alignment: usize) -> Option<usize> {
let remainder = value % alignment;
if remainder == 0 {
Some(value)
} else {
value.checked_add(alignment - remainder)
}
}
const fn validate_allocator_config(config: &AllocatorConfig) -> Result<(), InitError> {
if config.alignment < HEADER_ALIGNMENT {
return Err(InitError::InvalidConfig(
"allocator alignment must be at least 64 bytes",
));
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::Allocator;
use crate::config::AllocatorConfig;
use crate::size_class::SizeClass;
use crate::thread_cache::ThreadCache;
const fn test_config() -> AllocatorConfig {
AllocatorConfig {
arena_size: 64 * 1024 * 1024,
alignment: 64,
refill_target_bytes: 256,
local_cache_target_bytes: 384,
}
}
#[test]
fn stats_report_arena_capacity_and_small_central_occupancy() {
let allocator = Allocator::new(test_config())
.unwrap_or_else(|error| panic!("expected allocator to initialize: {error}"));
let mut source = ThreadCache::new(test_config());
let mut pointers = [None; 4];
for slot in &mut pointers {
let ptr = allocator
.allocate_with_cache(&mut source, 32)
.unwrap_or_else(|error| panic!("expected small allocation to succeed: {error}"));
*slot = Some(ptr);
}
for ptr in pointers.into_iter().flatten() {
unsafe {
allocator
.deallocate_with_cache(&mut source, ptr)
.unwrap_or_else(|error| panic!("expected small free to succeed: {error}"));
}
}
allocator.drain_thread_cache_on_exit(&mut source);
let stats = allocator.stats();
let class_stats = stats.small_central[SizeClass::B64.index()];
assert_eq!(stats.arena_capacity, test_config().arena_size);
assert!(stats.arena_remaining < stats.arena_capacity);
assert_eq!(class_stats.class, SizeClass::B64);
assert_eq!(class_stats.blocks, 4);
assert_eq!(
class_stats.bytes,
4 * SizeClass::B64.block_size_for_alignment(64)
);
assert_eq!(stats.total_small_central_blocks(), 4);
assert_eq!(stats.total_small_central_bytes(), class_stats.bytes);
}
#[test]
fn stats_report_large_live_and_reusable_bytes() {
let allocator = Allocator::new(test_config())
.unwrap_or_else(|error| panic!("expected allocator to initialize: {error}"));
let mut cache = ThreadCache::new(test_config());
let requested = SizeClass::max_small_request() + 1;
let ptr = allocator
.allocate_with_cache(&mut cache, requested)
.unwrap_or_else(|error| panic!("expected large allocation to succeed: {error}"));
let live_stats = allocator.stats();
assert_eq!(live_stats.large_live_allocations, 1);
assert!(live_stats.large_live_bytes >= requested);
assert_eq!(live_stats.large_free_blocks, 0);
unsafe {
allocator
.deallocate_with_cache(&mut cache, ptr)
.unwrap_or_else(|error| panic!("expected large free to succeed: {error}"));
}
let freed_stats = allocator.stats();
assert_eq!(freed_stats.large_live_allocations, 0);
assert_eq!(freed_stats.large_free_blocks, 1);
assert_eq!(freed_stats.large_free_bytes, live_stats.large_live_bytes);
}
#[test]
fn slab_refill_counts_each_fresh_block_once_and_reuses_freed_block() {
let allocator = Allocator::new(test_config())
.unwrap_or_else(|error| panic!("expected allocator to initialize: {error}"));
let mut cache = ThreadCache::new(test_config());
let class = SizeClass::B64;
let request = 32;
let carved = test_config().refill_count(class);
let mut allocated = Vec::with_capacity(carved);
for _ in 0..carved {
let ptr = allocator
.allocate_with_cache(&mut cache, request)
.unwrap_or_else(|error| panic!("expected slab allocation to succeed: {error}"));
allocated.push(ptr);
}
let stats = cache.stats();
assert_eq!(stats.local[class.index()].blocks, 0);
assert!(cache.needs_refill(class));
let freed = allocated[0];
unsafe {
allocator
.deallocate_with_cache(&mut cache, freed)
.unwrap_or_else(|error| panic!("expected slab free to succeed: {error}"));
}
let stats = cache.stats();
assert_eq!(stats.local[class.index()].blocks, 1);
assert!(!cache.needs_refill(class));
let reused = allocator
.allocate_with_cache(&mut cache, request)
.unwrap_or_else(|error| panic!("expected freed slab block to be reused: {error}"));
assert_eq!(reused, freed);
for ptr in allocated.into_iter().skip(1) {
unsafe {
allocator
.deallocate_with_cache(&mut cache, ptr)
.unwrap_or_else(|error| {
panic!("expected remaining slab allocation free to succeed: {error}")
});
}
}
unsafe {
allocator
.deallocate_with_cache(&mut cache, reused)
.unwrap_or_else(|error| {
panic!("expected reused slab block free to succeed: {error}")
});
}
}
}