attachable-slab-allocator 0.1.0

A high-performance, $O(1)$, Master-Slave slab allocator designed for `no_std` environments, kernels, and embedded systems. This library provides fixed-size memory management with RAII safety while remaining completely agnostic of the underlying memory provider.
Documentation
use attachable_slab_allocator::Result;
use attachable_slab_allocator::locks::SpinLock;
use attachable_slab_allocator::{SlabCache, define_allocation_hooks};
use bolero::AnySliceMutExt;
use std::alloc::{Layout, alloc_zeroed, dealloc};
use std::ptr::NonNull;
use std::sync::atomic::AtomicU32;
use std::thread;

static ALLOC_COUNT: AtomicU32 = AtomicU32::new(0);

pub fn alloc_mem(lay: Layout) -> Option<NonNull<u8>> {
    ALLOC_COUNT.fetch_add(1, std::sync::atomic::Ordering::Relaxed);

    let ptr = unsafe { alloc_zeroed(lay) };
    let n_ptr = NonNull::new(ptr)?;
    Some(n_ptr)
}

pub fn free_mem(ptr: NonNull<u8>, lay: Layout) -> Result<()> {
    ALLOC_COUNT.fetch_sub(1, std::sync::atomic::Ordering::Relaxed);

    unsafe { dealloc(ptr.as_ptr(), lay) };

    Ok(())
}

define_allocation_hooks!(alloc_mem, free_mem);

static TOTAL_ALOC: AtomicU32 = AtomicU32::new(0);

#[test]
fn test_slab_cache() {
    let slab_cache =
        SlabCache::<u128, SpinLock, 0x200>::new().expect("SlabCache::<.., .., ..>::new failed");

    let mut handles = Vec::new();

    const KEY: u128 = 0xFFD0_AABB_CBAC_43FA;

    for thread_id in 0..20 {
        let mut cache_clone = slab_cache.clone();

        let handle = thread::spawn(move || {
            let mut ptrs = Vec::new();

            let mut rng_state: u64 = 0x123456789ABCDEF1 + (thread_id as u64);

            let mut next_random = move || {
                rng_state ^= rng_state << 13;
                rng_state ^= rng_state >> 7;
                rng_state ^= rng_state << 17;
                rng_state
            };

            const TOT_OP: usize = if cfg!(miri) { 50 } else { 100000 };

            for _ in 0..TOT_OP {
                let next_op = next_random() % 100;

                if next_op < 80 {
                    let mut ptr = cache_clone
                        .alloc()
                        .expect("SlabCache::<.., .., ..>::alloc failed");
                    *ptr = ptr.as_ptr() as u128 ^ (KEY + thread_id);
                    ptrs.push(ptr);

                    TOTAL_ALOC.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
                } else {
                    if let Some(slb) = ptrs.pop() {
                        let key = *slb;
                        let val = slb.as_ptr() as u128 ^ (KEY + thread_id);
                        assert_eq!(key, val, "SlabCache::<.., .., ..> internal problem");
                        drop(slb);
                    } else {
                        let mut ptr = cache_clone
                            .alloc()
                            .expect("SlabCache::<.., .., ..>::alloc failed");
                        *ptr = ptr.as_ptr() as u128 ^ (KEY + thread_id);
                        ptrs.push(ptr);

                        TOTAL_ALOC.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
                    }
                }
            }

            ptrs.shuffle();

            for ptr in ptrs.drain(..) {
                let key = *ptr;
                let val = ptr.as_ptr() as u128 ^ (KEY + thread_id);
                assert_eq!(key, val, "SlabCache::<.., .., ..> internal problem")
            }
        });

        handles.push(handle);
    }

    for h in handles.drain(..) {
        h.join().unwrap();
    }

    drop(slab_cache);

    let alloc_count = ALLOC_COUNT.load(std::sync::atomic::Ordering::Relaxed);

    assert!(
        alloc_count == 0,
        "SlabCache::<.., .., ..> thread safe failed"
    );

    println!(
        "test successfully with total allocation {}",
        TOTAL_ALOC.load(std::sync::atomic::Ordering::Relaxed)
    );
}