memkit 0.2.0-beta.1

Deterministic, intent-driven memory allocation for systems requiring predictable performance
Documentation
//! Frame arena - a fast bump allocator with reset capability.

use std::alloc::{alloc, dealloc, Layout};
use std::cell::Cell;
use std::ptr::NonNull;

use super::hints::{likely, unlikely};
use super::sizeclass;

/// A bump allocator arena for frame-temporary allocations.
///
/// Allocations are O(1) - just bump a pointer.
/// Reset is O(1) - just reset the pointer to the start.
/// 
/// Cache line aligned for optimal performance.
#[repr(C, align(64))]  // Cache line alignment
pub struct FrameArena {
    /// Start of the arena memory.
    base: NonNull<u8>,
    /// Total size of the arena.
    size: usize,
    /// Current allocation head (offset from base).
    /// Hot field - placed at end for better cache utilization
    head: Cell<usize>,
}

impl FrameArena {
    /// Create a new arena with the given size.
    pub fn new(size: usize) -> Option<Self> {
        if unlikely(size == 0) {
            return None;
        }

        // Allocate the arena memory with cache line alignment
        // Using 64-byte alignment for better performance
        let aligned_size = (size + 63) & !63;  // Round up to cache line
        let layout = Layout::from_size_align(aligned_size, 64).ok()?;
        let ptr = unsafe { alloc(layout) };
        let base = NonNull::new(ptr)?;

        Some(Self {
            base,
            size: aligned_size,
            head: Cell::new(0),
        })
    }

    /// Allocate memory from the arena.
    ///
    /// Returns null if the arena is full.
    #[inline(always)]
    pub fn alloc(&self, layout: Layout) -> *mut u8 {
        let align = layout.align();
        let size = layout.size();

        // Align the head - using bit operations for speed
        let current = self.head.get();
        let align_mask = align - 1;
        let aligned = (current + align_mask) & !align_mask;

        // Check if we have enough space
        let new_head = aligned + size;
        if unlikely(new_head > self.size) {
            return std::ptr::null_mut();
        }

        // Bump the head
        self.head.set(new_head);

        // Return the pointer
        unsafe { self.base.as_ptr().add(aligned) }
    }

    /// Allocate and initialize a value.
    #[inline(always)]
    pub fn alloc_value<T>(&self, value: T) -> *mut T {
        let layout = Layout::new::<T>();
        let ptr = self.alloc(layout) as *mut T;
        if likely(!ptr.is_null()) {
            unsafe { ptr.write(value) };
        }
        ptr
    }

    /// Allocate a slice of uninitialized memory.
    #[inline(always)]
    pub fn alloc_slice<T>(&self, len: usize) -> *mut T {
        if unlikely(len == 0) {
            return std::ptr::null_mut();
        }
        let layout = unsafe { Layout::array::<T>(len).unwrap_unchecked() };
        self.alloc(layout) as *mut T
    }

    /// Allocate using size class for common sizes.
    ///
    /// This is a fast path for frequently allocated sizes.
    #[inline(always)]
    pub fn alloc_size_class(&self, size: usize) -> *mut u8 {
        if let Some(class) = sizeclass::SizeClass::for_size(size) {
            // Use pre-computed layout for size class
            self.alloc(class.layout())
        } else {
            // Fallback to normal allocation
            let layout = unsafe { Layout::from_size_align_unchecked(size, 8) };
            self.alloc(layout)
        }
    }

    /// Allocate a value using size class optimization.
    #[inline(always)]
    pub fn alloc_value_fast<T>(&self, value: T) -> *mut T {
        // Try size class first
        let size = std::mem::size_of::<T>();
        let ptr = if likely(size <= 2048) {
            self.alloc_size_class(size) as *mut T
        } else {
            // Fallback for large types
            let layout = Layout::new::<T>();
            self.alloc(layout) as *mut T
        };

        if likely(!ptr.is_null()) {
            unsafe { ptr.write(value) };
        }
        ptr
    }

    /// Reset the arena, invalidating all allocations.
    ///
    /// This is O(1) - we just reset the head pointer.
    #[inline(always)]
    pub fn reset(&self) {
        self.head.set(0);
    }

    /// Get the current head position (for checkpointing).
    #[inline(always)]
    pub fn head(&self) -> usize {
        self.head.get()
    }

    /// Reset to a specific head position (for checkpoint restore).
    pub fn reset_to(&self, pos: usize) {
        debug_assert!(pos <= self.head.get(), "Cannot reset forward");
        self.head.set(pos);
    }

    /// Get the number of bytes allocated.
    pub fn allocated(&self) -> usize {
        self.head.get()
    }

    /// Get the number of bytes remaining.
    pub fn remaining(&self) -> usize {
        self.size - self.head.get()
    }

    /// Get the total arena size.
    pub fn capacity(&self) -> usize {
        self.size
    }

    /// Check if the arena is empty.
    pub fn is_empty(&self) -> bool {
        self.head.get() == 0
    }
}

impl Drop for FrameArena {
    fn drop(&mut self) {
        unsafe {
            let layout = Layout::from_size_align_unchecked(self.size, 64);
            dealloc(self.base.as_ptr(), layout);
        }
    }
}

// FrameArena is not Send/Sync - it's meant to be thread-local
// The MkAllocator handles thread-safety at a higher level

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_arena_alloc() {
        let arena = FrameArena::new(4096).unwrap();

        let ptr1 = arena.alloc_value(42u32);
        assert!(!ptr1.is_null());
        assert_eq!(unsafe { *ptr1 }, 42);

        let ptr2 = arena.alloc_value(123u64);
        assert!(!ptr2.is_null());
        assert_eq!(unsafe { *ptr2 }, 123);

        assert!(arena.allocated() > 0);
    }

    #[test]
    fn test_arena_reset() {
        let arena = FrameArena::new(4096).unwrap();

        arena.alloc_value(42u32);
        arena.alloc_value(123u64);
        assert!(arena.allocated() > 0);

        arena.reset();
        assert_eq!(arena.allocated(), 0);
    }

    #[test]
    fn test_arena_checkpoint() {
        let arena = FrameArena::new(4096).unwrap();

        arena.alloc_value(42u32);
        let checkpoint = arena.head();

        arena.alloc_value(123u64);
        arena.alloc_value(456u64);
        assert!(arena.allocated() > checkpoint);

        arena.reset_to(checkpoint);
        assert_eq!(arena.head(), checkpoint);
    }

    #[test]
    fn test_arena_slice() {
        let arena = FrameArena::new(4096).unwrap();

        let slice = arena.alloc_slice::<u32>(100);
        assert!(!slice.is_null());

        // Initialize the slice
        for i in 0..100 {
            unsafe { slice.add(i).write(i as u32) };
        }

        // Verify
        for i in 0..100 {
            assert_eq!(unsafe { *slice.add(i) }, i as u32);
        }
    }

    #[test]
    fn test_arena_full() {
        let arena = FrameArena::new(64).unwrap();

        // Fill it up
        let ptr = arena.alloc_slice::<u8>(64);
        assert!(!ptr.is_null());

        // Should fail now
        let ptr2 = arena.alloc_value(42u32);
        assert!(ptr2.is_null());
    }
}