use std::alloc::{Layout, alloc, dealloc};
use std::ptr;
use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
const BLOCK_SIZE: usize = crate::constants::ARENA_BLOCK_SIZE;
struct Block {
data: *mut u8,
next: AtomicPtr<Block>,
}
impl Block {
fn new() -> *mut Self {
unsafe {
let layout = Layout::from_size_align(BLOCK_SIZE, 8).unwrap();
let data = alloc(layout);
let block = Box::new(Self {
data,
next: AtomicPtr::new(ptr::null_mut()),
});
Box::into_raw(block)
}
}
}
impl Drop for Block {
fn drop(&mut self) {
unsafe {
let layout = Layout::from_size_align(BLOCK_SIZE, 8).unwrap();
dealloc(self.data, layout);
}
}
}
pub struct Arena {
memory_usage: AtomicUsize,
current_block: AtomicPtr<Block>,
current_block_offset: AtomicUsize,
}
impl Arena {
pub fn new() -> Self {
Self {
memory_usage: AtomicUsize::new(0),
current_block: AtomicPtr::new(Block::new()),
current_block_offset: AtomicUsize::new(0),
}
}
pub fn allocate(&self, size: usize) -> *mut u8 {
let aligned_size = (size + 7) & !7;
loop {
let block = self.current_block.load(Ordering::Acquire);
let offset = self
.current_block_offset
.fetch_add(aligned_size, Ordering::SeqCst);
if offset + aligned_size <= BLOCK_SIZE {
self.memory_usage.fetch_add(aligned_size, Ordering::Relaxed);
unsafe {
return (*block).data.add(offset);
}
}
if aligned_size > BLOCK_SIZE {
unsafe {
let layout = Layout::from_size_align(aligned_size, 8).unwrap();
let huge_data = alloc(layout);
let new_block = Box::into_raw(Box::new(Block {
data: huge_data,
next: AtomicPtr::new(ptr::null_mut()),
}));
self.memory_usage.fetch_add(aligned_size, Ordering::Relaxed);
let mut curr_next = (*block).next.load(Ordering::Acquire);
while let Err(actual_next) = (*block).next.compare_exchange_weak(
curr_next,
new_block,
Ordering::SeqCst,
Ordering::Relaxed,
) {
curr_next = actual_next;
}
return huge_data;
}
}
let new_block = Block::new();
match self.current_block.compare_exchange(
block,
new_block,
Ordering::SeqCst,
Ordering::Relaxed,
) {
Ok(_) => {
unsafe {
(*new_block).next.store(block, Ordering::Release);
}
self.current_block_offset
.store(aligned_size, Ordering::SeqCst);
self.memory_usage.fetch_add(aligned_size, Ordering::Relaxed);
unsafe {
return (*new_block).data;
}
}
Err(_) => {
unsafe {
let _ = Box::from_raw(new_block);
}
}
}
}
}
#[allow(dead_code)]
pub fn allocate_obj<T>(&self, value: T) -> *mut T {
let size = std::mem::size_of::<T>();
if size == 0 {
return ptr::NonNull::dangling().as_ptr();
}
let ptr = self.allocate(size) as *mut T;
unsafe {
ptr::write(ptr, value);
}
ptr
}
pub fn memory_usage(&self) -> usize {
self.memory_usage.load(Ordering::Relaxed)
}
}
impl Drop for Arena {
fn drop(&mut self) {
let mut current = self.current_block.load(Ordering::Relaxed);
while !current.is_null() {
unsafe {
let next = (*current).next.load(Ordering::Relaxed);
let _ = Box::from_raw(current);
current = next;
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::Arc;
use std::thread;
#[test]
fn test_basic_allocation() {
let arena = Arena::new();
let p1 = arena.allocate_obj(42u64);
let p2 = arena.allocate_obj(100u64);
unsafe {
assert_eq!(*p1, 42);
assert_eq!(*p2, 100);
assert_eq!((p2 as usize) - (p1 as usize), 8);
}
assert_eq!(arena.memory_usage(), 16);
}
#[test]
fn test_block_overflow() {
let arena = Arena::new();
let chunk_size = 1024 * 1024;
let p1 = arena.allocate(chunk_size);
let p2 = arena.allocate(chunk_size);
let _p3 = arena.allocate(chunk_size);
let p4 = arena.allocate(chunk_size);
assert_eq!(arena.memory_usage(), 4 * chunk_size);
let p5 = arena.allocate(100);
assert_eq!(arena.memory_usage(), (4 * chunk_size) + 104);
assert_eq!((p2 as usize) - (p1 as usize), chunk_size);
let diff = (p5 as isize - p4 as isize).abs() as usize;
assert!(diff >= chunk_size);
}
#[test]
fn test_concurrent_allocation() {
let arena = Arc::new(Arena::new());
let mut handles = vec![];
for _ in 0..10 {
let arena_clone = arena.clone();
handles.push(thread::spawn(move || {
for i in 0..100_000 {
arena_clone.allocate_obj(i as u64);
}
}));
}
for handle in handles {
handle.join().unwrap();
}
assert_eq!(arena.memory_usage(), 8_000_000);
}
}