memkit 0.1.0-beta.1

Deterministic, intent-driven memory allocation for systems requiring predictable performance
Documentation
//! The main allocator type.

use std::alloc::Layout;
use std::sync::Arc;

use crate::config::MkConfig;
use crate::container::{MkFrameBox, MkFrameSlice, MkFrameVec, MkPoolBox, MkHeapBox};
use crate::core::global::GlobalState;
use crate::core::tls;
use crate::lifecycle::MkScope;
pub mod handle;

pub use handle::{MkHandle, MkHandleAllocator};

/// The main memory allocator.
///
/// This is the primary entry point for all allocation operations.
/// It is cheap to clone (internally uses `Arc`) and thread-safe.
///
/// # Example
///
/// ```rust,no_run
/// use memkit::{MkAllocator, MkConfig};
///
/// let alloc = MkAllocator::new(MkConfig::default());
///
/// alloc.begin_frame();
/// let data = alloc.frame_alloc::<[f32; 256]>();
/// alloc.end_frame();
/// ```
#[derive(Clone)]
pub struct MkAllocator {
    global: Arc<GlobalState>,
}

impl MkAllocator {
    /// Create a new allocator with the given configuration.
    pub fn new(config: MkConfig) -> Self {
        let global = Arc::new(GlobalState::new(config));
        Self { global }
    }

    /// Create an allocator with default configuration.
    pub fn with_defaults() -> Self {
        Self::new(MkConfig::default())
    }

    /// Get the configuration.
    pub fn config(&self) -> &MkConfig {
        self.global.config()
    }

    /// Ensure thread-local state is initialized.
    fn ensure_tls(&self) {
        if !tls::is_tls_initialized() {
            tls::init_tls(self.global.config().frame_arena_size, Arc::clone(&self.global));
        }
    }

    /// Begin a new frame. Must be paired with `end_frame()`.
    #[inline]
    pub fn begin_frame(&self) {
        self.ensure_tls();
        self.global.next_frame();
        tls::with_tls_mut(|tls| tls.begin_frame());
    }

    /// End the current frame. Resets frame arena.
    #[inline]
    pub fn end_frame(&self) {
        self.ensure_tls();
        tls::with_tls_mut(|tls| tls.end_frame());
    }

    /// Get the current frame number.
    pub fn frame(&self) -> u64 {
        self.global.frame()
    }

    /// Allocate from the frame arena.
    #[inline(always)]
    pub fn frame_alloc<T>(&self) -> *mut T {
        self.ensure_tls();
        tls::with_tls_mut(|tls| tls.frame_alloc::<T>())
    }

    /// Allocate a value in the frame arena and return a box.
    #[inline(always)]
    pub fn frame_box<T>(&self, value: T) -> Option<MkFrameBox<'_, T>> {
        self.ensure_tls();
        let ptr = tls::with_tls_mut(|tls| tls.frame_alloc_value(value));
        if ptr.is_null() {
            None
        } else {
            unsafe { MkFrameBox::from_raw(ptr) }
        }
    }

    /// Allocate a slice in the frame arena.
    #[inline(always)]
    pub fn frame_slice<T>(&self, len: usize) -> Option<MkFrameSlice<'_, T>> {
        self.ensure_tls();
        let ptr = tls::with_tls_mut(|tls| tls.frame_alloc_slice::<T>(len));
        if ptr.is_null() {
            None
        } else {
            unsafe { MkFrameSlice::from_raw_parts(ptr, len) }
        }
    }

    /// Allocate a vector in the frame arena with the given capacity.
    pub fn frame_vec<T>(&self, capacity: usize) -> Option<MkFrameVec<'_, T>> {
        self.ensure_tls();
        let ptr = tls::with_tls_mut(|tls| tls.frame_alloc_slice::<T>(capacity));
        if ptr.is_null() {
            None
        } else {
            Some(unsafe { MkFrameVec::from_raw_parts(ptr, capacity) })
        }
    }

    /// Allocate from the pool.
    ///
    /// Note: Currently uses system heap backing for safety until concurrent slabs are ready.
    pub fn pool_box<T>(&self, value: T) -> Option<MkPoolBox<T>> {
        // For Beta.1, we bypass the TLS slab allocator because MkPoolBox::drop
        // uses the global system deallocator (to be safe across threads).
        // Mixing slab pointers with system dealloc causes heap corruption.
        // TODO: Re-enable slab allocation when thread-safe return is implemented.
        let layout = Layout::new::<T>();
        let ptr = self.global.heap_alloc(layout) as *mut T;
        
        if ptr.is_null() {
            None
        } else {
            unsafe {
                ptr.write(value);
                MkPoolBox::from_raw(ptr, self.global.clone())
            }
        }
    }

    /// Allocate from the heap.
    pub fn heap_box<T>(&self, value: T) -> Option<MkHeapBox<T>> {
        let layout = Layout::new::<T>();
        // Use system allocator directly to match MkHeapBox::drop behavior
        // logic: MkHeapBox doesn't carry GlobalState, so it cannot call heap_free.
        let ptr = unsafe { std::alloc::alloc(layout) as *mut T };
        if ptr.is_null() {
            None
        } else {
            unsafe {
                ptr.write(value);
                MkHeapBox::from_raw(ptr)
            }
        }
    }

    /// Create a scoped frame guard.
    pub fn scope(&self) -> MkScope<'_> {
        self.ensure_tls();
        MkScope::new(self)
    }

    /// Get the current frame head position (for checkpointing).
    pub fn frame_head(&self) -> usize {
        self.ensure_tls();
        tls::with_tls(|tls| tls.frame_head())
    }

    /// Reset frame to a checkpoint position.
    pub fn reset_frame_to(&self, pos: usize) {
        self.ensure_tls();
        tls::with_tls(|tls| tls.reset_frame_to(pos));
    }

    /// Get global statistics.
    pub fn stats(&self) -> crate::core::global::GlobalStats {
        self.global.stats()
    }

    /// Begin a named phase.
    pub fn begin_phase(&self, name: &'static str) -> crate::lifecycle::MkPhase {
        self.ensure_tls();
        crate::lifecycle::MkPhase::begin(name)
    }

    /// Defer the deallocation of memory.
    ///
    /// The memory will be reclaimed later by calling `reclaim()`.
    ///
    /// # Safety
    ///
    /// The pointer must be valid and its layout must be correct.
    pub unsafe fn deferred_free<T>(&self, ptr: *mut T, drop_fn: Option<unsafe fn(*mut T)>) {
        let layout = std::alloc::Layout::new::<T>();
        // SAFETY: We transmute the drop_fn to match the internal storage type (*mut u8)
        let drop_fn_u8 = std::mem::transmute::<Option<unsafe fn(*mut T)>, Option<unsafe fn(*mut u8)>>(drop_fn);
        self.global.deferred_queue().push(ptr as *mut u8, layout, drop_fn_u8);
    }

    /// Reclaim all deferred memory.
    ///
    /// Returns the number of items reclaimed.
    pub fn reclaim(&self) -> usize {
        let global = Arc::clone(&self.global);
        self.global.deferred_queue().reclaim(|ptr, layout| {
            global.heap_free(ptr, layout);
        })
    }

    /// Allocate a handle for a pointer.
    pub fn handle_alloc(&self, ptr: *mut u8) -> MkHandle {
        self.global.handle_allocator().allocate(ptr)
    }

    /// Resolve a handle to its current pointer.
    pub fn handle_resolve(&self, handle: MkHandle) -> Option<*mut u8> {
        self.global.handle_allocator().resolve(handle)
    }

    /// Update the pointer associated with a handle.
    pub fn handle_update(&self, handle: MkHandle, new_ptr: *mut u8) -> bool {
        self.global.handle_allocator().update(handle, new_ptr)
    }

    /// Free a handle.
    pub fn handle_free(&self, handle: MkHandle) -> bool {
        self.global.handle_allocator().deallocate(handle)
    }
}