use std::ptr;
use std::sync::atomic::{AtomicPtr, AtomicU32, Ordering};
#[cfg(target_pointer_width = "32")]
const BLOCK_SHIFT: u32 = 22;
#[cfg(not(target_pointer_width = "32"))]
const BLOCK_SHIFT: u32 = 26;
const BLOCK_SIZE: u32 = 1 << BLOCK_SHIFT;
const BLOCK_MASK: u32 = BLOCK_SIZE - 1;
const MAX_BLOCKS: usize = 1 << (32 - BLOCK_SHIFT);
pub struct Arena {
blocks: Box<[AtomicPtr<u8>]>,
cursor: AtomicU32,
}
impl Arena {
pub fn new() -> Self {
let mut blocks = Vec::with_capacity(MAX_BLOCKS);
for _ in 0..MAX_BLOCKS {
blocks.push(AtomicPtr::new(ptr::null_mut()));
}
Self {
blocks: blocks.into_boxed_slice(),
cursor: AtomicU32::new(1),
}
}
pub fn alloc(&self, size: u32, align: u32) -> Option<u32> {
if !align.is_power_of_two() || size == 0 || size >= BLOCK_SIZE {
return None;
}
loop {
let cur = self.cursor.load(Ordering::Acquire);
let block_idx = cur >> BLOCK_SHIFT;
let offset = cur & BLOCK_MASK;
let aligned = (offset + align - 1) & !(align - 1);
if let Some(new_end) = aligned.checked_add(size) {
if new_end < BLOCK_SIZE {
self.ensure_block(block_idx as usize);
let new_cursor = (block_idx << BLOCK_SHIFT) | new_end;
if self
.cursor
.compare_exchange_weak(cur, new_cursor, Ordering::AcqRel, Ordering::Relaxed)
.is_ok()
{
return Some((block_idx << BLOCK_SHIFT) | aligned);
}
} else {
let new_block = block_idx + 1;
if new_block as usize >= MAX_BLOCKS {
return None;
}
self.ensure_block(new_block as usize);
let new_cursor = new_block << BLOCK_SHIFT;
let _ = self.cursor.compare_exchange_weak(
cur,
new_cursor,
Ordering::AcqRel,
Ordering::Relaxed,
);
}
} else {
return None;
}
}
}
pub unsafe fn get_bytes(&self, offset: u32, len: u32) -> &[u8] {
let (ptr, off) = unsafe { self.decode(offset) };
debug_assert!(
off + len as usize <= BLOCK_SIZE as usize,
"get_bytes: off={off} + len={len} exceeds BLOCK_SIZE={BLOCK_SIZE} (offset={offset})",
);
unsafe { std::slice::from_raw_parts(ptr.add(off), len as usize) }
}
#[expect(
clippy::mut_from_ref,
reason = "interior mutability by design; caller guarantees exclusive access"
)]
pub unsafe fn get_bytes_mut(&self, offset: u32, len: u32) -> &mut [u8] {
let (ptr, off) = unsafe { self.decode(offset) };
unsafe { std::slice::from_raw_parts_mut(ptr.add(off), len as usize) }
}
pub unsafe fn get_atomic_u32(&self, offset: u32) -> &AtomicU32 {
let (ptr, off) = unsafe { self.decode(offset) };
#[expect(
clippy::cast_ptr_alignment,
reason = "caller guarantees 4-byte alignment via alloc(..., 4)"
)]
let atom_ptr = unsafe { ptr.add(off).cast::<u32>() };
unsafe { AtomicU32::from_ptr(atom_ptr) }
}
#[inline]
#[expect(
clippy::indexing_slicing,
reason = "block_idx < MAX_BLOCKS by construction (alloc enforces this)"
)]
unsafe fn decode(&self, offset: u32) -> (*mut u8, usize) {
let block_idx = (offset >> BLOCK_SHIFT) as usize;
let off = (offset & BLOCK_MASK) as usize;
let mut ptr = self.blocks[block_idx].load(Ordering::Acquire);
if ptr.is_null() {
for _ in 0..1000 {
std::hint::spin_loop();
ptr = self.blocks[block_idx].load(Ordering::Acquire);
if !ptr.is_null() {
return (ptr, off);
}
}
self.ensure_block(block_idx);
ptr = self.blocks[block_idx].load(Ordering::Acquire);
}
(ptr, off)
}
#[expect(
clippy::indexing_slicing,
reason = "idx < MAX_BLOCKS enforced by alloc()"
)]
fn ensure_block(&self, idx: usize) {
if self.blocks[idx].load(Ordering::Acquire).is_null() {
let layout = Self::block_layout();
let raw = unsafe { std::alloc::alloc_zeroed(layout) };
if raw.is_null() {
std::alloc::handle_alloc_error(layout);
}
if self.blocks[idx]
.compare_exchange(ptr::null_mut(), raw, Ordering::AcqRel, Ordering::Acquire)
.is_err()
{
unsafe {
std::alloc::dealloc(raw, layout);
}
}
}
}
fn block_layout() -> std::alloc::Layout {
unsafe { std::alloc::Layout::from_size_align_unchecked(BLOCK_SIZE as usize, 4) }
}
}
impl Default for Arena {
fn default() -> Self {
Self::new()
}
}
impl Drop for Arena {
fn drop(&mut self) {
let layout = Self::block_layout();
for block in &*self.blocks {
let ptr = block.load(Ordering::Relaxed);
if !ptr.is_null() {
unsafe {
std::alloc::dealloc(ptr, layout);
}
}
}
}
}
#[cfg(test)]
#[allow(clippy::expect_used, reason = "tests use expect for brevity")]
#[allow(
clippy::unwrap_used,
clippy::indexing_slicing,
clippy::useless_vec,
clippy::doc_markdown,
clippy::stable_sort_primitive,
reason = "test code"
)]
mod tests {
use super::*;
#[test]
fn basic_alloc_and_read() {
let arena = Arena::new();
let off = arena.alloc(4, 4).expect("should succeed");
assert!(off >= 1);
assert_eq!(off & 3, 0);
unsafe {
let bytes = arena.get_bytes_mut(off, 4);
bytes.copy_from_slice(&[1, 2, 3, 4]);
}
let read = unsafe { arena.get_bytes(off, 4) };
assert_eq!(read, &[1, 2, 3, 4]);
}
#[test]
fn alloc_respects_alignment() {
let arena = Arena::new();
let a = arena.alloc(1, 1).expect("ok");
let b = arena.alloc(4, 4).expect("ok");
assert_eq!(b & 3, 0);
assert!(b > a);
}
#[test]
fn alloc_crosses_block_boundary() {
let arena = Arena::new();
let big = BLOCK_SIZE - 64;
let off1 = arena.alloc(big, 1).expect("ok");
assert_eq!(off1 >> BLOCK_SHIFT, 0);
let off2 = arena.alloc(128, 4).expect("ok");
assert_eq!(off2 >> BLOCK_SHIFT, 1);
}
#[test]
fn atomic_u32_round_trip() {
let arena = Arena::new();
let off = arena.alloc(4, 4).expect("ok");
unsafe {
let atom = arena.get_atomic_u32(off);
atom.store(42, Ordering::Relaxed);
assert_eq!(atom.load(Ordering::Relaxed), 42);
}
}
#[test]
fn concurrent_alloc() {
use std::sync::Arc;
let arena = Arc::new(Arena::new());
let handles: Vec<_> = (0..8)
.map(|_| {
let arena = Arc::clone(&arena);
std::thread::spawn(move || {
let mut offsets = Vec::new();
for _ in 0..1000 {
if let Some(off) = arena.alloc(64, 4) {
offsets.push(off);
}
}
offsets
})
})
.collect();
let mut all_offsets: Vec<u32> = Vec::new();
for h in handles {
all_offsets.extend(h.join().expect("thread ok"));
}
all_offsets.sort();
all_offsets.dedup();
assert_eq!(all_offsets.len(), 8000);
}
#[test]
fn alloc_invalid_alignment_returns_none() {
let arena = Arena::new();
assert!(arena.alloc(100, 3).is_none()); assert!(arena.alloc(0, 4).is_none()); assert!(arena.alloc(BLOCK_SIZE, 1).is_none()); assert!(arena.alloc(BLOCK_SIZE + 1, 1).is_none()); }
#[test]
fn default_impl() {
let arena = Arena::default();
let off = arena.alloc(8, 4).expect("should work");
assert!(off > 0);
}
#[test]
fn drop_with_multiple_blocks() {
let arena = Arena::new();
let big = BLOCK_SIZE - 8;
let _ = arena.alloc(big, 1).expect("block 0");
let _ = arena.alloc(64, 4).expect("block 1");
}
#[test]
fn exact_block_fill_does_not_corrupt() {
let arena = Arena::new();
arena.cursor.store(1 << BLOCK_SHIFT, Ordering::Relaxed);
let filler = BLOCK_SIZE - 4;
let f = arena.alloc(filler, 1).expect("filler");
assert_eq!(f >> BLOCK_SHIFT, 1, "filler should be in block 1");
unsafe {
let bytes = arena.get_bytes_mut(f, filler);
bytes[filler as usize - 1] = 0xAB;
}
let boundary = arena.alloc(4, 4).expect("boundary alloc");
assert_eq!(
boundary >> BLOCK_SHIFT,
2,
"exact-fill allocation must advance to the next block"
);
let next = arena.alloc(8, 4).expect("next alloc");
assert_eq!(
next >> BLOCK_SHIFT,
2,
"subsequent allocation must stay in the advanced block"
);
let read_sentinel = unsafe { arena.get_bytes(f, filler) };
assert_eq!(
read_sentinel[filler as usize - 1],
0xAB,
"block 1 data must not be corrupted by subsequent allocations"
);
}
}