memkit 0.1.0-beta.1

Deterministic, intent-driven memory allocation for systems requiring predictable performance
Documentation
//! Global allocator state shared across threads.

use std::alloc::{alloc, dealloc, Layout};
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};

use crate::config::MkConfig;

/// Global state shared across all threads.
pub struct GlobalState {
    /// Configuration.
    config: MkConfig,
    /// Global frame counter.
    frame_counter: AtomicU64,
    /// Total bytes allocated (across all threads).
    total_allocated: AtomicUsize,
    /// Peak bytes allocated.
    peak_allocated: AtomicUsize,
    /// Total allocation count.
    allocation_count: AtomicU64,
    /// Total deallocation count.
    deallocation_count: AtomicU64,
    /// Deferred free queue.
    deferred_queue: crate::sync::deferred::MkDeferredQueue,
    /// Handle allocator.
    handle_allocator: crate::allocator::handle::MkHandleAllocator,
}

// SAFETY: All fields in GlobalState are either plain data (MkConfig),
// atomic types (AtomicU64, AtomicUsize), or explicitly thread-safe types
// (MkDeferredQueue uses SegQueue, MkHandleAllocator uses RwLock).
unsafe impl Send for GlobalState {}
unsafe impl Sync for GlobalState {}

impl GlobalState {
    /// Create new global state.
    pub fn new(config: MkConfig) -> Self {
        Self {
            config,
            frame_counter: AtomicU64::new(0),
            total_allocated: AtomicUsize::new(0),
            peak_allocated: AtomicUsize::new(0),
            allocation_count: AtomicU64::new(0),
            deallocation_count: AtomicU64::new(0),
            deferred_queue: crate::sync::deferred::MkDeferredQueue::new(),
            handle_allocator: crate::allocator::handle::MkHandleAllocator::new(),
        }
    }

    /// Get the configuration.
    pub fn config(&self) -> &MkConfig {
        &self.config
    }

    /// Increment frame counter and return new value.
    pub fn next_frame(&self) -> u64 {
        self.frame_counter.fetch_add(1, Ordering::SeqCst) + 1
    }

    /// Get current frame number.
    pub fn frame(&self) -> u64 {
        self.frame_counter.load(Ordering::SeqCst)
    }

    /// Record an allocation.
    pub fn record_alloc(&self, size: usize) {
        self.allocation_count.fetch_add(1, Ordering::Relaxed);
        let new_total = self.total_allocated.fetch_add(size, Ordering::Relaxed) + size;

        // Update peak if necessary
        let mut peak = self.peak_allocated.load(Ordering::Relaxed);
        while new_total > peak {
            match self.peak_allocated.compare_exchange_weak(
                peak,
                new_total,
                Ordering::Relaxed,
                Ordering::Relaxed,
            ) {
                Ok(_) => break,
                Err(current) => peak = current,
            }
        }
    }

    /// Record a deallocation.
    pub fn record_dealloc(&self, size: usize) {
        self.deallocation_count.fetch_add(1, Ordering::Relaxed);
        self.total_allocated.fetch_sub(size, Ordering::Relaxed);
    }

    /// Allocate from the system heap.
    pub fn heap_alloc(&self, layout: Layout) -> *mut u8 {
        let ptr = unsafe { alloc(layout) };
        if !ptr.is_null() {
            self.record_alloc(layout.size());
            unsafe { crate::core::sentinel::MkSentinel::poison_alloc(ptr, layout.size()) };
        }
        ptr
    }

    /// Free to the system heap.
    pub fn heap_free(&self, ptr: *mut u8, layout: Layout) {
        if !ptr.is_null() {
            unsafe { crate::core::sentinel::MkSentinel::poison_free(ptr, layout.size()) };
            self.record_dealloc(layout.size());
            unsafe { dealloc(ptr, layout) };
        }
    }

    /// Get the deferred queue.
    pub fn deferred_queue(&self) -> &crate::sync::deferred::MkDeferredQueue {
        &self.deferred_queue
    }

    /// Get the handle allocator.
    pub fn handle_allocator(&self) -> &crate::allocator::handle::MkHandleAllocator {
        &self.handle_allocator
    }

    /// Get statistics.
    pub fn stats(&self) -> GlobalStats {
        GlobalStats {
            total_allocated: self.total_allocated.load(Ordering::Relaxed),
            peak_allocated: self.peak_allocated.load(Ordering::Relaxed),
            allocation_count: self.allocation_count.load(Ordering::Relaxed),
            deallocation_count: self.deallocation_count.load(Ordering::Relaxed),
            frame: self.frame_counter.load(Ordering::Relaxed),
        }
    }
}

/// Global statistics.
#[derive(Debug, Clone, Copy)]
pub struct GlobalStats {
    pub total_allocated: usize,
    pub peak_allocated: usize,
    pub allocation_count: u64,
    pub deallocation_count: u64,
    pub frame: u64,
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_global_stats() {
        let global = GlobalState::new(MkConfig::default());

        global.record_alloc(100);
        global.record_alloc(200);

        let stats = global.stats();
        assert_eq!(stats.total_allocated, 300);
        assert_eq!(stats.peak_allocated, 300);
        assert_eq!(stats.allocation_count, 2);

        global.record_dealloc(100);
        let stats = global.stats();
        assert_eq!(stats.total_allocated, 200);
        assert_eq!(stats.peak_allocated, 300); // Peak unchanged
    }

    #[test]
    fn test_frame_counter() {
        let global = GlobalState::new(MkConfig::default());

        assert_eq!(global.frame(), 0);
        assert_eq!(global.next_frame(), 1);
        assert_eq!(global.next_frame(), 2);
        assert_eq!(global.frame(), 2);
    }
}

impl Drop for GlobalState {
    fn drop(&mut self) {
        crate::core::sentinel::MkSentinel::verify_leaks();
    }
}