use crate::error::{Result, ZiporaError};
use std::alloc::{Layout, alloc, dealloc};
use std::ptr::NonNull;
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
pub struct BumpAllocator {
buffer: NonNull<u8>,
capacity: usize,
current: AtomicUsize,
allocated_bytes: AtomicU64,
}
impl BumpAllocator {
pub fn new(capacity: usize) -> Result<Self> {
if capacity == 0 {
return Err(ZiporaError::invalid_data("capacity cannot be zero"));
}
let layout = Layout::from_size_align(capacity, 8)
.map_err(|_| ZiporaError::invalid_data("invalid layout for bump allocator"))?;
let ptr = unsafe { alloc(layout) };
if ptr.is_null() {
return Err(ZiporaError::out_of_memory(capacity));
}
Ok(Self {
buffer: unsafe { NonNull::new_unchecked(ptr) },
capacity,
current: AtomicUsize::new(0),
allocated_bytes: AtomicU64::new(0),
})
}
pub fn alloc<T>(&self) -> Result<NonNull<T>> {
let size = std::mem::size_of::<T>();
let align = std::mem::align_of::<T>();
self.alloc_bytes(size, align).map(|ptr| ptr.cast())
}
pub fn alloc_slice<T>(&self, count: usize) -> Result<NonNull<[T]>> {
let size = std::mem::size_of::<T>() * count;
let align = std::mem::align_of::<T>();
let ptr = self.alloc_bytes(size, align)?;
let slice_ptr = std::ptr::slice_from_raw_parts_mut(ptr.as_ptr() as *mut T, count);
Ok(unsafe { NonNull::new_unchecked(slice_ptr) })
}
pub fn alloc_bytes(&self, size: usize, align: usize) -> Result<NonNull<u8>> {
if size == 0 {
return Err(ZiporaError::invalid_data("allocation size cannot be zero"));
}
if !align.is_power_of_two() {
return Err(ZiporaError::invalid_data(
"alignment must be a power of two",
));
}
loop {
let current = self.current.load(Ordering::Acquire);
let aligned_offset = (current + align - 1) & !(align - 1);
let new_offset = aligned_offset + size;
if new_offset > self.capacity {
return Err(ZiporaError::out_of_memory(size));
}
match self.current.compare_exchange_weak(
current,
new_offset,
Ordering::Release,
Ordering::Relaxed,
) {
Ok(_) => {
self.allocated_bytes
.fetch_add(size as u64, Ordering::Relaxed);
let ptr = unsafe { self.buffer.as_ptr().add(aligned_offset) };
return Ok(unsafe { NonNull::new_unchecked(ptr) });
}
Err(_) => {
std::hint::spin_loop();
continue;
}
}
}
}
pub unsafe fn reset(&self) {
self.current.store(0, Ordering::Release);
self.allocated_bytes.store(0, Ordering::Relaxed);
}
pub fn allocated_bytes(&self) -> u64 {
self.allocated_bytes.load(Ordering::Relaxed)
}
#[inline]
pub fn capacity(&self) -> usize {
self.capacity
}
pub fn remaining_bytes(&self) -> usize {
self.capacity - self.current.load(Ordering::Relaxed)
}
pub fn can_allocate(&self, size: usize, align: usize) -> bool {
let current = self.current.load(Ordering::Relaxed);
let aligned_offset = (current + align - 1) & !(align - 1);
aligned_offset + size <= self.capacity
}
}
unsafe impl Send for BumpAllocator {}
unsafe impl Sync for BumpAllocator {}
impl Drop for BumpAllocator {
fn drop(&mut self) {
if self.capacity > 0 {
if let Ok(layout) = Layout::from_size_align(self.capacity, 8) {
unsafe {
dealloc(self.buffer.as_ptr(), layout);
}
}
}
}
}
pub struct BumpArena {
allocator: BumpAllocator,
initial_offset: usize,
}
impl BumpArena {
pub fn new(capacity: usize) -> Result<Self> {
let allocator = BumpAllocator::new(capacity)?;
Ok(Self {
allocator,
initial_offset: 0,
})
}
pub fn scope(&self) -> BumpScope<'_> {
BumpScope {
allocator: &self.allocator,
initial_offset: self.allocator.current.load(Ordering::Relaxed),
initial_allocated_bytes: self.allocator.allocated_bytes(),
}
}
pub fn alloc<T>(&self) -> Result<NonNull<T>> {
self.allocator.alloc()
}
pub fn alloc_slice<T>(&self, count: usize) -> Result<NonNull<[T]>> {
self.allocator.alloc_slice(count)
}
pub fn alloc_bytes(&self, size: usize, align: usize) -> Result<NonNull<u8>> {
self.allocator.alloc_bytes(size, align)
}
pub fn stats(&self) -> BumpStats {
BumpStats {
allocated_bytes: self.allocator.allocated_bytes(),
capacity: self.allocator.capacity(),
remaining_bytes: self.allocator.remaining_bytes(),
}
}
}
impl Drop for BumpArena {
fn drop(&mut self) {
unsafe {
self.allocator.reset();
}
self.allocator.current.store(self.initial_offset, Ordering::Relaxed);
}
}
pub struct BumpScope<'a> {
allocator: &'a BumpAllocator,
initial_offset: usize,
initial_allocated_bytes: u64,
}
impl<'a> BumpScope<'a> {
pub fn alloc<T>(&self) -> Result<NonNull<T>> {
self.allocator.alloc()
}
pub fn alloc_slice<T>(&self, count: usize) -> Result<NonNull<[T]>> {
self.allocator.alloc_slice(count)
}
pub fn alloc_bytes(&self, size: usize, align: usize) -> Result<NonNull<u8>> {
self.allocator.alloc_bytes(size, align)
}
pub fn stats(&self) -> BumpStats {
BumpStats {
allocated_bytes: self.allocator.allocated_bytes(),
capacity: self.allocator.capacity(),
remaining_bytes: self.allocator.remaining_bytes(),
}
}
}
impl<'a> Drop for BumpScope<'a> {
fn drop(&mut self) {
self.allocator.current.store(self.initial_offset, Ordering::Relaxed);
self.allocator
.allocated_bytes
.store(self.initial_allocated_bytes, Ordering::Relaxed);
}
}
#[derive(Debug, Clone)]
pub struct BumpStats {
pub allocated_bytes: u64,
pub capacity: usize,
pub remaining_bytes: usize,
}
impl BumpStats {
pub fn utilization(&self) -> f64 {
self.allocated_bytes as f64 / self.capacity as f64
}
pub fn is_nearly_full(&self) -> bool {
self.utilization() > 0.9
}
}
pub struct BumpVec<'a, T> {
ptr: NonNull<T>,
len: usize,
capacity: usize,
#[allow(dead_code)]
allocator: &'a BumpAllocator,
}
impl<'a, T> BumpVec<'a, T> {
pub fn new_in(allocator: &'a BumpAllocator, capacity: usize) -> Result<Self> {
if capacity == 0 {
return Err(ZiporaError::invalid_data("capacity cannot be zero"));
}
let ptr = allocator.alloc_slice::<T>(capacity)?;
Ok(Self {
ptr: ptr.cast(),
len: 0,
capacity,
allocator,
})
}
#[inline]
pub fn push(&mut self, item: T) -> Result<()> {
if self.len >= self.capacity {
return Err(ZiporaError::invalid_data("bump vector capacity exceeded"));
}
unsafe {
self.ptr.as_ptr().add(self.len).write(item);
}
self.len += 1;
Ok(())
}
pub fn pop(&mut self) -> Option<T> {
if self.len == 0 {
return None;
}
self.len -= 1;
Some(unsafe { self.ptr.as_ptr().add(self.len).read() })
}
#[inline]
pub fn len(&self) -> usize {
self.len
}
#[inline]
pub fn is_empty(&self) -> bool {
self.len == 0
}
#[inline]
pub fn capacity(&self) -> usize {
self.capacity
}
#[inline]
pub fn as_slice(&self) -> &[T] {
unsafe { std::slice::from_raw_parts(self.ptr.as_ptr(), self.len) }
}
#[inline]
pub fn as_mut_slice(&mut self) -> &mut [T] {
unsafe { std::slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) }
}
}
impl<'a, T> Drop for BumpVec<'a, T> {
fn drop(&mut self) {
for i in 0..self.len {
unsafe {
self.ptr.as_ptr().add(i).drop_in_place();
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_bump_allocator_creation() {
let allocator = BumpAllocator::new(4096).unwrap();
assert_eq!(allocator.capacity(), 4096);
assert_eq!(allocator.allocated_bytes(), 0);
assert_eq!(allocator.remaining_bytes(), 4096);
}
#[test]
fn test_bump_allocation() {
let allocator = BumpAllocator::new(4096).unwrap();
let ptr1 = allocator.alloc::<u64>().unwrap();
let ptr2 = allocator.alloc::<u64>().unwrap();
assert_ne!(ptr1.as_ptr(), ptr2.as_ptr());
assert!(allocator.allocated_bytes() >= 16); assert!(allocator.remaining_bytes() < 4096);
}
#[test]
fn test_bump_slice_allocation() {
let allocator = BumpAllocator::new(4096).unwrap();
let mut slice_ptr = allocator.alloc_slice::<u32>(10).unwrap();
let slice = unsafe { slice_ptr.as_mut() };
assert_eq!(slice.len(), 10);
for (i, item) in slice.iter_mut().enumerate() {
*item = i as u32;
}
for (i, item) in slice.iter().enumerate() {
assert_eq!(*item, i as u32);
}
}
#[test]
fn test_bump_alignment() {
let allocator = BumpAllocator::new(4096).unwrap();
let _ptr1 = allocator.alloc::<u8>().unwrap();
let ptr2 = allocator.alloc::<u64>().unwrap();
assert_eq!(ptr2.as_ptr() as usize % 8, 0);
}
#[test]
fn test_bump_exhaustion() {
let allocator = BumpAllocator::new(24).unwrap();
let _ptr1 = allocator.alloc::<u64>().unwrap(); let _ptr2 = allocator.alloc::<u64>().unwrap(); let _ptr3 = allocator.alloc::<u64>().unwrap();
let result = allocator.alloc::<u64>();
assert!(result.is_err(), "Should fail to allocate when exhausted");
}
#[test]
fn test_bump_reset() {
let allocator = BumpAllocator::new(4096).unwrap();
let _ptr1 = allocator.alloc::<u64>().unwrap();
let _ptr2 = allocator.alloc::<u64>().unwrap();
assert!(allocator.allocated_bytes() > 0);
assert!(allocator.remaining_bytes() < 4096);
unsafe {
allocator.reset();
}
assert_eq!(allocator.allocated_bytes(), 0);
assert_eq!(allocator.remaining_bytes(), 4096);
}
#[test]
fn test_bump_arena() {
let arena = BumpArena::new(4096).unwrap();
let ptr1 = arena.alloc::<u64>().unwrap();
let ptr2 = arena.alloc::<u64>().unwrap();
assert_ne!(ptr1.as_ptr(), ptr2.as_ptr());
let stats = arena.stats();
assert!(stats.allocated_bytes >= 16);
assert!(stats.utilization() > 0.0);
assert!(!stats.is_nearly_full());
}
#[test]
fn test_bump_scope() {
let allocator = BumpAllocator::new(4096).unwrap();
let initial_allocated = allocator.allocated_bytes();
{
let scope = BumpScope {
allocator: &allocator,
initial_offset: allocator.current.load(Ordering::Relaxed),
initial_allocated_bytes: allocator.allocated_bytes(),
};
let _ptr1 = scope.alloc::<u64>().unwrap();
let _ptr2 = scope.alloc::<u64>().unwrap();
assert!(allocator.allocated_bytes() > initial_allocated);
}
assert_eq!(allocator.current.load(Ordering::Relaxed), 0);
}
#[test]
fn test_bump_vec() {
let allocator = BumpAllocator::new(4096).unwrap();
let mut vec = BumpVec::new_in(&allocator, 10).unwrap();
assert_eq!(vec.len(), 0);
assert!(vec.is_empty());
assert_eq!(vec.capacity(), 10);
vec.push(42).unwrap();
vec.push(84).unwrap();
assert_eq!(vec.len(), 2);
assert!(!vec.is_empty());
let slice = vec.as_slice();
assert_eq!(slice[0], 42);
assert_eq!(slice[1], 84);
let popped = vec.pop().unwrap();
assert_eq!(popped, 84);
assert_eq!(vec.len(), 1);
}
#[test]
fn test_can_allocate() {
let allocator = BumpAllocator::new(64).unwrap();
assert!(allocator.can_allocate(8, 8));
assert!(allocator.can_allocate(64, 1));
assert!(!allocator.can_allocate(65, 1));
let _ptr = allocator.alloc::<u64>().unwrap();
assert!(allocator.can_allocate(8, 8));
assert!(!allocator.can_allocate(64, 1));
}
#[test]
fn test_invalid_parameters() {
assert!(BumpAllocator::new(0).is_err());
let allocator = BumpAllocator::new(1024).unwrap();
assert!(allocator.alloc_bytes(0, 8).is_err());
assert!(allocator.alloc_bytes(8, 3).is_err()); }
}