memkit 0.1.1-beta.1

Deterministic, intent-driven memory allocation for systems requiring predictable performance
Documentation
//! Fast arena - zero-overhead bump allocator for maximum performance.
//!
//! This is the fastest allocation path in memkit, designed to compete with
//! bumpalo by eliminating TLS and RefCell overhead.

use std::alloc::{alloc, dealloc, Layout};
use std::cell::Cell;
use std::marker::PhantomData;
use std::ptr::NonNull;

/// A fast bump allocator with zero overhead.
///
/// Unlike `MkAllocator`, this does not use thread-local storage and can be
/// used directly for maximum performance. The tradeoff is that it's not
/// thread-safe and must be managed manually.
///
/// # Example
///
/// ```rust
/// use memkit::MkFastArena;
///
/// let arena = MkFastArena::new(1024 * 1024); // 1 MB
///
/// unsafe {
///     // Ultra-fast allocation - just bumps a pointer
///     let x = arena.alloc(42u64);
///     let y = arena.alloc([1.0f32, 2.0, 3.0]);
/// }
///
/// // O(1) reset - just resets the pointer
/// arena.reset();
/// ```
pub struct MkFastArena {
    /// Start of the arena memory.
    base: NonNull<u8>,
    /// End of the arena memory (for bounds checking).
    end: *const u8,
    /// Current allocation pointer.
    ptr: Cell<*mut u8>,
    /// Marker for !Send + !Sync
    _marker: PhantomData<*mut u8>,
}

impl MkFastArena {
    /// Create a new fast arena with the given size in bytes.
    #[inline]
    pub fn new(size: usize) -> Self {
        assert!(size > 0, "Arena size must be > 0");
        
        // Allocate with page alignment for best performance
        let layout = Layout::from_size_align(size, 4096)
            .expect("Invalid arena size");
        
        let base = unsafe {
            let ptr = alloc(layout);
            NonNull::new(ptr).expect("Failed to allocate arena memory")
        };
        
        let end = unsafe { base.as_ptr().add(size) };
        
        Self {
            base,
            end,
            ptr: Cell::new(base.as_ptr()),
            _marker: PhantomData,
        }
    }

    /// Create a new arena with the given size in megabytes.
    #[inline]
    pub fn with_capacity_mb(mb: usize) -> Self {
        Self::new(mb * 1024 * 1024)
    }

    /// Allocate and initialize a value.
    ///
    /// Returns a reference to the allocated value.
    /// Returns `None` if the arena is full.
    ///
    /// # Safety
    ///
    /// The caller must ensure that the returned reference does not outlive the arena.
    #[allow(clippy::mut_from_ref)]
    #[inline(always)]
    pub unsafe fn alloc<T>(&self, value: T) -> Option<&mut T> {
        let ptr = self.alloc_raw::<T>()?;
        unsafe {
            ptr.write(value);
            Some(&mut *ptr)
        }
    }

    /// Allocate memory for a type without initializing it.
    #[inline(always)]
    pub fn alloc_raw<T>(&self) -> Option<*mut T> {
        let layout = Layout::new::<T>();
        self.alloc_layout(layout).map(|p| p as *mut T)
    }

    /// Allocate a slice of uninitialized memory.
    #[inline(always)]
    pub fn alloc_slice_raw<T>(&self, len: usize) -> Option<*mut T> {
        if len == 0 {
            return Some(std::ptr::NonNull::dangling().as_ptr());
        }
        let layout = Layout::array::<T>(len).ok()?;
        self.alloc_layout(layout).map(|p| p as *mut T)
    }

    /// Allocate a slice and fill with a default value.
    ///
    /// # Safety
    ///
    /// The caller must ensure that the returned slice does not outlive the arena.
    #[allow(clippy::mut_from_ref)]
    #[inline(always)]
    pub unsafe fn alloc_slice_fill<T: Clone>(&self, len: usize, value: T) -> Option<&mut [T]> {
        let ptr = self.alloc_slice_raw::<T>(len)?;
        unsafe {
            for i in 0..len {
                ptr.add(i).write(value.clone());
            }
            Some(std::slice::from_raw_parts_mut(ptr, len))
        }
    }

    /// Allocate a slice and fill with default values.
    ///
    /// # Safety
    ///
    /// The caller must ensure that the returned slice does not outlive the arena.
    #[allow(clippy::mut_from_ref)]
    #[inline(always)]
    pub unsafe fn alloc_slice_default<T: Default>(&self, len: usize) -> Option<&mut [T]> {
        let ptr = self.alloc_slice_raw::<T>(len)?;
        unsafe {
            for i in 0..len {
                ptr.add(i).write(T::default());
            }
            Some(std::slice::from_raw_parts_mut(ptr, len))
        }
    }

    /// Core allocation function - bumps the pointer.
    #[inline(always)]
    fn alloc_layout(&self, layout: Layout) -> Option<*mut u8> {
        let current = self.ptr.get();
        
        // Align up
        let align = layout.align();
        let aligned = ((current as usize + align - 1) & !(align - 1)) as *mut u8;
        
        // Calculate new pointer
        let new_ptr = unsafe { aligned.add(layout.size()) };
        
        // Bounds check
        if new_ptr > self.end as *mut u8 {
            return None;
        }
        
        // Bump the pointer
        self.ptr.set(new_ptr);
        
        Some(aligned)
    }

    /// Reset the arena, invalidating all allocations.
    ///
    /// This is O(1) - just resets the pointer to the start.
    #[inline(always)]
    pub fn reset(&self) {
        self.ptr.set(self.base.as_ptr());
    }

    /// Get the current allocation position (for checkpointing).
    #[inline(always)]
    pub fn checkpoint(&self) -> usize {
        self.ptr.get() as usize - self.base.as_ptr() as usize
    }

    /// Reset to a previous checkpoint.
    #[inline(always)]
    pub fn reset_to(&self, checkpoint: usize) {
        let new_ptr = unsafe { self.base.as_ptr().add(checkpoint) };
        debug_assert!(new_ptr <= self.end as *mut u8);
        self.ptr.set(new_ptr);
    }

    /// Get number of bytes allocated.
    #[inline]
    pub fn allocated(&self) -> usize {
        self.ptr.get() as usize - self.base.as_ptr() as usize
    }

    /// Get number of bytes remaining.
    #[inline]
    pub fn remaining(&self) -> usize {
        self.end as usize - self.ptr.get() as usize
    }

    /// Get total capacity.
    #[inline]
    pub fn capacity(&self) -> usize {
        self.end as usize - self.base.as_ptr() as usize
    }
}

impl Drop for MkFastArena {
    fn drop(&mut self) {
        unsafe {
            let size = self.end as usize - self.base.as_ptr() as usize;
            let layout = Layout::from_size_align_unchecked(size, 4096);
            dealloc(self.base.as_ptr(), layout);
        }
    }
}

// MkFastArena is !Send and !Sync - it's meant for single-threaded use
// The PhantomData<*mut u8> ensures this

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_basic_alloc() {
        let arena = MkFastArena::new(4096);
        
        unsafe {
            let x = arena.alloc(42u64).unwrap();
            assert_eq!(*x, 42);
            
            let y = arena.alloc(123u32).unwrap();
            assert_eq!(*y, 123);
            
            *x = 100;
            assert_eq!(*x, 100);
        }
    }

    #[test]
    fn test_slice_alloc() {
        let arena = MkFastArena::new(4096);
        
        unsafe {
            let slice = arena.alloc_slice_fill(100, 42u64).unwrap();
            assert_eq!(slice.len(), 100);
            assert_eq!(slice[0], 42);
            assert_eq!(slice[99], 42);
        }
    }

    #[test]
    fn test_reset() {
        let arena = MkFastArena::new(4096);
        
        unsafe {
            arena.alloc(42u64).unwrap();
            arena.alloc(123u64).unwrap();
        }
        assert!(arena.allocated() > 0);
        
        arena.reset();
        assert_eq!(arena.allocated(), 0);
    }

    #[test]
    fn test_checkpoint() {
        let arena = MkFastArena::new(4096);
        
        unsafe {
            arena.alloc(42u64).unwrap();
            let cp = arena.checkpoint();
            
            arena.alloc(123u64).unwrap();
            arena.alloc(456u64).unwrap();
            assert!(arena.allocated() > cp);
            
            arena.reset_to(cp);
            assert_eq!(arena.checkpoint(), cp);
        }
    }

    #[test]
    fn test_full() {
        let arena = MkFastArena::new(64);
        
        // Fill it
        let _ = arena.alloc_slice_raw::<u8>(64);
        
        // Should fail
        unsafe {
            assert!(arena.alloc(42u64).is_none());
        }
    }
}