#![allow(clippy::mut_from_ref)]
#![allow(clippy::expect_used)]
#![allow(unsafe_code)]
use crate::error::{OxiGdalError, Result};
use parking_lot::Mutex;
use std::alloc::{Layout, alloc, dealloc};
use std::cell::RefCell;
use std::ptr::NonNull;
use std::sync::Arc;
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
pub const DEFAULT_ARENA_SIZE: usize = 1024 * 1024;
#[derive(Debug, Default)]
pub struct ArenaStats {
pub total_allocations: AtomicU64,
pub bytes_allocated: AtomicUsize,
pub resets: AtomicU64,
pub peak_usage: AtomicUsize,
}
impl ArenaStats {
#[must_use]
pub fn new() -> Self {
Self::default()
}
pub fn record_allocation(&self, size: usize) {
self.total_allocations.fetch_add(1, Ordering::Relaxed);
let prev = self.bytes_allocated.fetch_add(size, Ordering::Relaxed);
let new_allocated = prev.saturating_add(size);
let mut peak = self.peak_usage.load(Ordering::Relaxed);
while new_allocated > peak {
match self.peak_usage.compare_exchange_weak(
peak,
new_allocated,
Ordering::Relaxed,
Ordering::Relaxed,
) {
Ok(_) => break,
Err(x) => peak = x,
}
}
}
pub fn record_reset(&self, bytes_freed: usize) {
self.resets.fetch_add(1, Ordering::Relaxed);
self.bytes_allocated
.fetch_sub(bytes_freed, Ordering::Relaxed);
}
pub fn allocation_rate(&self) -> f64 {
let total = self.total_allocations.load(Ordering::Relaxed);
let peak = self.peak_usage.load(Ordering::Relaxed);
if peak == 0 {
0.0
} else {
total as f64 / peak as f64
}
}
}
pub struct Arena {
base: NonNull<u8>,
offset: AtomicUsize,
capacity: usize,
stats: Arc<ArenaStats>,
alignment: usize,
}
impl Arena {
pub fn new() -> Result<Self> {
Self::with_capacity(DEFAULT_ARENA_SIZE)
}
pub fn with_capacity(capacity: usize) -> Result<Self> {
Self::with_capacity_and_alignment(capacity, 16)
}
pub fn with_capacity_and_alignment(capacity: usize, alignment: usize) -> Result<Self> {
if capacity == 0 {
return Err(OxiGdalError::invalid_parameter(
"parameter",
"Arena capacity must be non-zero".to_string(),
));
}
if !alignment.is_power_of_two() {
return Err(OxiGdalError::invalid_parameter(
"parameter",
"Alignment must be a power of 2".to_string(),
));
}
let layout = Layout::from_size_align(capacity, alignment)
.map_err(|e| OxiGdalError::allocation_error(e.to_string()))?;
let base = unsafe {
let ptr = alloc(layout);
if ptr.is_null() {
return Err(OxiGdalError::allocation_error(
"Failed to allocate arena".to_string(),
));
}
NonNull::new_unchecked(ptr)
};
Ok(Self {
base,
offset: AtomicUsize::new(0),
capacity,
stats: Arc::new(ArenaStats::new()),
alignment,
})
}
pub fn allocate(&self, size: usize) -> Result<NonNull<u8>> {
self.allocate_aligned(size, self.alignment)
}
pub fn allocate_aligned(&self, size: usize, alignment: usize) -> Result<NonNull<u8>> {
if size == 0 {
return Err(OxiGdalError::invalid_parameter(
"parameter",
"Allocation size must be non-zero".to_string(),
));
}
let mut current = self.offset.load(Ordering::Relaxed);
loop {
let aligned = (current + alignment - 1) & !(alignment - 1);
let new_offset = aligned + size;
if new_offset > self.capacity {
return Err(OxiGdalError::allocation_error(format!(
"Arena exhausted: requested {}, available {}",
size,
self.capacity - current
)));
}
match self.offset.compare_exchange_weak(
current,
new_offset,
Ordering::Relaxed,
Ordering::Relaxed,
) {
Ok(_) => {
self.stats.record_allocation(size);
let ptr = unsafe { NonNull::new_unchecked(self.base.as_ptr().add(aligned)) };
return Ok(ptr);
}
Err(x) => current = x,
}
}
}
pub fn reset(&self) {
let freed = self.offset.swap(0, Ordering::Relaxed);
self.stats.record_reset(freed);
}
pub fn usage(&self) -> usize {
self.offset.load(Ordering::Relaxed)
}
pub fn capacity(&self) -> usize {
self.capacity
}
pub fn available(&self) -> usize {
self.capacity.saturating_sub(self.usage())
}
pub fn is_exhausted(&self) -> bool {
self.available() == 0
}
pub fn stats(&self) -> Arc<ArenaStats> {
Arc::clone(&self.stats)
}
pub fn allocate_slice<T>(&self, count: usize) -> Result<&mut [T]> {
let size = count * std::mem::size_of::<T>();
let alignment = std::mem::align_of::<T>();
let ptr = self.allocate_aligned(size, alignment)?;
Ok(unsafe { std::slice::from_raw_parts_mut(ptr.as_ptr().cast::<T>(), count) })
}
pub fn allocate_value<T>(&self, value: T) -> Result<&mut T> {
let size = std::mem::size_of::<T>();
let alignment = std::mem::align_of::<T>();
let ptr = self.allocate_aligned(size, alignment)?;
unsafe {
let typed_ptr = ptr.as_ptr().cast::<T>();
std::ptr::write(typed_ptr, value);
Ok(&mut *typed_ptr)
}
}
}
impl Drop for Arena {
fn drop(&mut self) {
unsafe {
let layout = Layout::from_size_align_unchecked(self.capacity, self.alignment);
dealloc(self.base.as_ptr(), layout);
}
}
}
unsafe impl Send for Arena {}
unsafe impl Sync for Arena {}
pub struct ArenaPool {
available: Mutex<Vec<Arena>>,
capacity: usize,
max_pool_size: usize,
stats: Arc<ArenaStats>,
}
impl ArenaPool {
#[must_use]
pub fn new(capacity: usize, max_pool_size: usize) -> Self {
Self {
available: Mutex::new(Vec::new()),
capacity,
max_pool_size,
stats: Arc::new(ArenaStats::new()),
}
}
#[must_use]
pub fn with_defaults() -> Self {
Self::new(DEFAULT_ARENA_SIZE, 16)
}
pub fn acquire(&self) -> Result<Arena> {
let mut available = self.available.lock();
if let Some(arena) = available.pop() {
arena.reset();
Ok(arena)
} else {
drop(available);
Arena::with_capacity(self.capacity)
}
}
pub fn release(&self, arena: Arena) {
let mut available = self.available.lock();
if available.len() < self.max_pool_size {
available.push(arena);
}
}
pub fn stats(&self) -> Arc<ArenaStats> {
Arc::clone(&self.stats)
}
pub fn pool_size(&self) -> usize {
self.available.lock().len()
}
pub fn clear(&self) {
self.available.lock().clear();
}
#[inline]
pub fn checkout(&self) -> Result<Arena> {
self.acquire()
}
#[inline]
pub fn return_arena(&self, arena: Arena) {
self.release(arena);
}
}
impl Default for ArenaPool {
fn default() -> Self {
Self::with_defaults()
}
}
thread_local! {
static THREAD_ARENA: RefCell<Option<Arena>> = const { RefCell::new(None) };
}
pub fn get_thread_arena() -> Result<Arena> {
THREAD_ARENA.with(|arena| {
let mut arena_ref = arena.borrow_mut();
if arena_ref.is_none() {
*arena_ref = Some(Arena::new()?);
}
Arena::new()
})
}
pub fn reset_thread_arena() -> Result<()> {
THREAD_ARENA.with(|arena| {
if let Some(arena) = arena.borrow().as_ref() {
arena.reset();
}
Ok(())
})
}
pub struct ArenaGuard<'a> {
arena: &'a Arena,
saved_offset: usize,
}
impl<'a> ArenaGuard<'a> {
pub fn new(arena: &'a Arena) -> Self {
let saved_offset = arena.offset.load(Ordering::Relaxed);
Self {
arena,
saved_offset,
}
}
#[must_use]
pub fn arena(&self) -> &Arena {
self.arena
}
}
impl Drop for ArenaGuard<'_> {
fn drop(&mut self) {
self.arena
.offset
.store(self.saved_offset, Ordering::Relaxed);
}
}
pub struct ArenaVec<'a, T> {
ptr: *mut T,
len: usize,
capacity: usize,
arena: &'a Arena,
}
unsafe impl<T: Send> Send for ArenaVec<'_, T> {}
unsafe impl<T: Sync> Sync for ArenaVec<'_, T> {}
impl<'a, T: Copy> ArenaVec<'a, T> {
pub fn with_capacity_in(arena: &'a Arena, capacity: usize) -> Result<Self> {
if capacity == 0 {
return Ok(Self {
ptr: core::ptr::null_mut(),
len: 0,
capacity: 0,
arena,
});
}
let slice: &mut [T] = arena.allocate_slice(capacity)?;
Ok(Self {
ptr: slice.as_mut_ptr(),
len: 0,
capacity,
arena,
})
}
pub fn push(&mut self, value: T) -> Result<()> {
if self.len == self.capacity {
self.grow()?;
}
unsafe {
core::ptr::write(self.ptr.add(self.len), value);
}
self.len += 1;
Ok(())
}
#[must_use]
pub const fn len(&self) -> usize {
self.len
}
#[must_use]
pub const fn is_empty(&self) -> bool {
self.len == 0
}
#[must_use]
pub fn as_slice(&self) -> &[T] {
if self.len == 0 {
return &[];
}
unsafe { core::slice::from_raw_parts(self.ptr, self.len) }
}
fn grow(&mut self) -> Result<()> {
let new_cap = if self.capacity == 0 {
4
} else {
self.capacity * 2
};
let new_slab: &mut [T] = self.arena.allocate_slice(new_cap)?;
if self.len > 0 {
unsafe {
core::ptr::copy_nonoverlapping(self.ptr, new_slab.as_mut_ptr(), self.len);
}
}
self.ptr = new_slab.as_mut_ptr();
self.capacity = new_cap;
Ok(())
}
}
impl<T: core::fmt::Debug + Copy> core::fmt::Debug for ArenaVec<'_, T> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_list().entries(self.as_slice().iter()).finish()
}
}
#[cfg(test)]
#[allow(useless_ptr_null_checks)]
mod tests {
use super::*;
#[test]
fn test_arena_basic() {
let arena = Arena::with_capacity_and_alignment(1024, 1)
.expect("Test setup failed: arena creation with custom alignment");
let ptr1 = arena
.allocate(100)
.expect("Arena allocation should succeed in test");
let ptr2 = arena
.allocate(200)
.expect("Arena allocation should succeed in test");
assert!(!ptr1.as_ptr().is_null());
assert!(!ptr2.as_ptr().is_null());
assert_ne!(ptr1, ptr2);
assert_eq!(arena.usage(), 300);
assert_eq!(arena.available(), 724);
}
#[test]
fn test_arena_reset() {
let arena = Arena::with_capacity(1024).expect("Test setup failed: arena creation");
arena
.allocate(500)
.expect("Arena allocation should succeed in test");
assert_eq!(arena.usage(), 500);
arena.reset();
assert_eq!(arena.usage(), 0);
arena
.allocate(300)
.expect("Arena allocation should succeed after reset");
assert_eq!(arena.usage(), 300);
}
#[test]
fn test_arena_exhaustion() {
let arena = Arena::with_capacity(100).expect("Test setup failed: arena creation");
arena.allocate(50).expect("First allocation should succeed");
arena
.allocate(30)
.expect("Second allocation should succeed");
let result = arena.allocate(50);
assert!(result.is_err());
}
#[test]
fn test_arena_pool() {
let pool = ArenaPool::new(1024, 4);
let arena1 = pool.acquire().expect("Pool should acquire first arena");
let arena2 = pool.acquire().expect("Pool should acquire second arena");
assert_eq!(pool.pool_size(), 0);
pool.release(arena1);
pool.release(arena2);
assert_eq!(pool.pool_size(), 2);
}
#[test]
fn test_arena_slice() {
let arena = Arena::with_capacity(1024).expect("Test setup failed: arena creation");
let slice: &mut [u32] = arena
.allocate_slice(10)
.expect("Arena slice allocation should succeed");
assert_eq!(slice.len(), 10);
slice[0] = 42;
assert_eq!(slice[0], 42);
}
#[test]
fn test_arena_value() {
let arena = Arena::with_capacity(1024).expect("Test setup failed: arena creation");
let value = arena
.allocate_value(42u32)
.expect("Arena value allocation should succeed");
assert_eq!(*value, 42);
*value = 100;
assert_eq!(*value, 100);
}
#[test]
fn test_arena_guard() {
let arena = Arena::with_capacity_and_alignment(1024, 1)
.expect("Test setup failed: arena creation with custom alignment");
arena
.allocate(100)
.expect("Initial allocation should succeed");
assert_eq!(arena.usage(), 100);
{
let _guard = ArenaGuard::new(&arena);
arena
.allocate(200)
.expect("Allocation within guard should succeed");
assert_eq!(arena.usage(), 300);
}
assert_eq!(arena.usage(), 100);
}
#[test]
fn test_arena_pool_checkout_return_reuse() {
let pool = ArenaPool::new(1024, 4);
let a1 = pool.checkout().expect("checkout first arena");
assert_eq!(pool.pool_size(), 0);
pool.return_arena(a1);
assert_eq!(pool.pool_size(), 1);
let a2 = pool.checkout().expect("checkout second arena from pool");
assert_eq!(pool.pool_size(), 0);
pool.return_arena(a2);
}
#[test]
fn test_arena_vec_push_and_slice() {
let arena = Arena::with_capacity(4096).expect("arena for ArenaVec test");
let mut v = ArenaVec::<u32>::with_capacity_in(&arena, 4).expect("ArenaVec creation");
assert!(v.is_empty());
v.push(10).expect("push 10");
v.push(20).expect("push 20");
v.push(30).expect("push 30");
assert_eq!(v.len(), 3);
assert_eq!(v.as_slice(), &[10, 20, 30]);
}
#[test]
fn test_arena_vec_grows_beyond_initial_capacity() {
let arena = Arena::with_capacity(65536).expect("arena large enough to grow");
let mut v = ArenaVec::<u8>::with_capacity_in(&arena, 2).expect("ArenaVec small cap");
for i in 0u8..8 {
v.push(i).expect("push should succeed");
}
assert_eq!(v.len(), 8);
assert_eq!(v.as_slice(), &[0, 1, 2, 3, 4, 5, 6, 7]);
}
#[test]
fn test_arena_vec_zero_initial_capacity() {
let arena = Arena::with_capacity(4096).expect("arena");
let mut v = ArenaVec::<i32>::with_capacity_in(&arena, 0).expect("zero-cap ArenaVec");
assert!(v.is_empty());
v.push(42)
.expect("push into zero-cap ArenaVec triggers grow");
assert_eq!(v.as_slice(), &[42]);
}
}