memkit 0.2.0-beta.1

Deterministic, intent-driven memory allocation for systems requiring predictable performance
Documentation
//! Global allocator state shared across threads.

use std::alloc::{alloc, dealloc, Layout};
use std::sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering};
use std::sync::Mutex;

use crate::config::MkConfig;

/// Global state shared across all threads.
pub struct GlobalState {
    /// Configuration.
    config: MkConfig,
    /// Global frame counter.
    frame_counter: AtomicU64,
    /// Total bytes allocated (across all threads).
    total_allocated: AtomicUsize,
    /// Peak bytes allocated.
    peak_allocated: AtomicUsize,
    /// Total allocation count.
    allocation_count: AtomicU64,
    /// Total deallocation count.
    deallocation_count: AtomicU64,
    /// Deferred free queue.
    deferred_queue: crate::sync::deferred::MkDeferredQueue,
    /// Handle allocator.
    handle_allocator: crate::allocator::handle::MkHandleAllocator,
    /// Global pool allocator for cross-thread pool returns.
    global_pool: Mutex<crate::core::slab::SlabAllocator>,
    /// Track whether pool allocation is enabled.
    pool_enabled: AtomicBool,
}

// SAFETY: All fields in GlobalState are either plain data (MkConfig),
// atomic types (AtomicU64, AtomicUsize), or explicitly thread-safe types
// (MkDeferredQueue uses SegQueue, MkHandleAllocator uses RwLock).
unsafe impl Send for GlobalState {}
unsafe impl Sync for GlobalState {}

impl GlobalState {
    /// Create new global state.
    pub fn new(config: MkConfig) -> Self {
        Self {
            config,
            frame_counter: AtomicU64::new(0),
            total_allocated: AtomicUsize::new(0),
            peak_allocated: AtomicUsize::new(0),
            allocation_count: AtomicU64::new(0),
            deallocation_count: AtomicU64::new(0),
            deferred_queue: crate::sync::deferred::MkDeferredQueue::new(),
            handle_allocator: crate::allocator::handle::MkHandleAllocator::new(),
            global_pool: Mutex::new(crate::core::slab::SlabAllocator::new()),
            pool_enabled: AtomicBool::new(false), // Disabled for now
        }
    }

    /// Get the configuration.
    pub fn config(&self) -> &MkConfig {
        &self.config
    }

    /// Increment frame counter and return new value.
    pub fn next_frame(&self) -> u64 {
        self.frame_counter.fetch_add(1, Ordering::SeqCst) + 1
    }

    /// Get current frame number.
    pub fn frame(&self) -> u64 {
        self.frame_counter.load(Ordering::SeqCst)
    }

    /// Record an allocation.
    pub fn record_alloc(&self, size: usize) {
        self.allocation_count.fetch_add(1, Ordering::Relaxed);
        let new_total = self.total_allocated.fetch_add(size, Ordering::Relaxed) + size;

        // Update peak if necessary
        let mut peak = self.peak_allocated.load(Ordering::Relaxed);
        while new_total > peak {
            match self.peak_allocated.compare_exchange_weak(
                peak,
                new_total,
                Ordering::Relaxed,
                Ordering::Relaxed,
            ) {
                Ok(_) => break,
                Err(current) => peak = current,
            }
        }
    }

    /// Record a deallocation.
    pub fn record_dealloc(&self, size: usize) {
        self.deallocation_count.fetch_add(1, Ordering::Relaxed);
        self.total_allocated.fetch_sub(size, Ordering::Relaxed);
    }

    /// Allocate from the system heap.
    pub fn heap_alloc(&self, layout: Layout) -> *mut u8 {
        let ptr = unsafe { alloc(layout) };
        if !ptr.is_null() {
            self.record_alloc(layout.size());
            unsafe { crate::core::sentinel::MkSentinel::poison_alloc(ptr, layout.size()) };
        }
        ptr
    }

    /// Free to the system heap.
    pub fn heap_free(&self, ptr: *mut u8, layout: Layout) {
        if !ptr.is_null() {
            unsafe { crate::core::sentinel::MkSentinel::poison_free(ptr, layout.size()) };
            self.record_dealloc(layout.size());
            unsafe { dealloc(ptr, layout) };
        }
    }

    /// Get the deferred queue.
    pub fn deferred_queue(&self) -> &crate::sync::deferred::MkDeferredQueue {
        &self.deferred_queue
    }

    /// Get the handle allocator.
    pub fn handle_allocator(&self) -> &crate::allocator::handle::MkHandleAllocator {
        &self.handle_allocator
    }

    /// Allocate from the global pool.
    /// 
    /// This is used for pool allocations that need to be thread-safe.
    /// Falls back to heap allocation if pool allocation fails.
    pub fn pool_alloc<T>(&self, value: T) -> *mut T {
        let layout = Layout::new::<T>();
        
        // For beta.2, pool allocation is disabled to prevent heap corruption
        // TODO: Implement proper allocation source tracking
        if !self.pool_enabled.load(Ordering::Relaxed) {
            // Use heap allocation
            let ptr = self.heap_alloc(layout) as *mut T;
            if !ptr.is_null() {
                unsafe {
                    ptr.write(value);
                }
            }
            return ptr;
        }
        
        // Try to allocate from global pool (disabled for now)
        if let Ok(mut pool) = self.global_pool.lock() {
            let ptr = pool.alloc::<T>();
            if !ptr.is_null() {
                unsafe {
                    ptr.write(value);
                    return ptr;
                }
            }
        }
        
        // Fallback to heap allocation
        let ptr = self.heap_alloc(layout) as *mut T;
        if !ptr.is_null() {
            unsafe {
                ptr.write(value);
            }
        }
        ptr
    }

    /// Free to the global pool.
    /// 
    /// This is used for pool deallocations that need to be thread-safe.
    /// Falls back to heap deallocation if the object is not from a pool.
    pub fn pool_free<T>(&self, ptr: *mut T) {
        if ptr.is_null() {
            return;
        }
        
        unsafe {
            // Drop the value first
            std::ptr::drop_in_place(ptr);
            
            // For beta.2, always use heap deallocation to prevent corruption
            // TODO: Implement proper allocation source tracking
            let layout = Layout::new::<T>();
            self.heap_free(ptr as *mut u8, layout);
        }
    }

    /// Get statistics.
    pub fn stats(&self) -> GlobalStats {
        GlobalStats {
            total_allocated: self.total_allocated.load(Ordering::Relaxed),
            peak_allocated: self.peak_allocated.load(Ordering::Relaxed),
            allocation_count: self.allocation_count.load(Ordering::Relaxed),
            deallocation_count: self.deallocation_count.load(Ordering::Relaxed),
            frame: self.frame_counter.load(Ordering::Relaxed),
        }
    }
}

/// Global statistics.
#[derive(Debug, Clone, Copy)]
pub struct GlobalStats {
    pub total_allocated: usize,
    pub peak_allocated: usize,
    pub allocation_count: u64,
    pub deallocation_count: u64,
    pub frame: u64,
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_global_stats() {
        let global = GlobalState::new(MkConfig::default());

        global.record_alloc(100);
        global.record_alloc(200);

        let stats = global.stats();
        assert_eq!(stats.total_allocated, 300);
        assert_eq!(stats.peak_allocated, 300);
        assert_eq!(stats.allocation_count, 2);

        global.record_dealloc(100);
        let stats = global.stats();
        assert_eq!(stats.total_allocated, 200);
        assert_eq!(stats.peak_allocated, 300); // Peak unchanged
    }

    #[test]
    fn test_frame_counter() {
        let global = GlobalState::new(MkConfig::default());

        assert_eq!(global.frame(), 0);
        assert_eq!(global.next_frame(), 1);
        assert_eq!(global.next_frame(), 2);
        assert_eq!(global.frame(), 2);
    }
}

impl Drop for GlobalState {
    fn drop(&mut self) {
        crate::core::sentinel::MkSentinel::verify_leaks();
    }
}