robinxx_map 0.1.0

High-performance, thread-safe open-addressing hash map using Robin Hood displacement & xxHash3.
Documentation
//! Memory layout calculation, alignment enforcement, and allocation.
//!
//! Handles `SoA` (Structure of Arrays) block allocation with 64-byte alignment and
//! power-of-two capacity invariants. All allocation routines are `#![no_std]` compatible
//! and rely on stable `alloc::alloc` functions.
//!
//! # Memory Layout
//! ```text
//! [ Meta(u8) | Padding... | Keys(K) | Padding... | Values(V) | Padding... ]
//! ^ 64B aligned
//! ```
//! Each section is padded to 64-byte boundaries to guarantee SIMD-friendly stride
//! and cache-line alignment for auto-vectorized probing loops.
//!
//! # Custom Allocator Support
//! This module uses stable `alloc::alloc` functions. For per-instance custom allocators,
//! enable the nightly `allocator_api` feature and replace direct `alloc`/`dealloc` calls
//! with `Allocator::allocate`/`deallocate`.

use alloc::alloc::{Layout, alloc, dealloc, handle_alloc_error};
use core::ptr::NonNull;

const ALIGN: usize = 64;

/// Rounds `n` up to the nearest multiple of `ALIGN`.
#[inline]
const fn round_up(n: usize) -> usize {
    (n + ALIGN - 1) & !(ALIGN - 1)
}

/// Computes the total memory layout and section offsets for a `SoA` block.
///
/// # Arguments
/// * `capacity` - Power-of-two slot count. Must be `> 0`.
/// * `key_size` - `core::mem::size_of::<K>()`.
/// * `val_size` - `core::mem::size_of::<V>()`.
///
/// # Returns
/// A tuple `(layout, keys_offset, vals_offset)` representing the full block
/// and byte offsets for the `Keys` and `Values` arrays.
///
/// # Panics
/// Panics if capacity or sizes cause arithmetic overflow, or if alignment
/// requirements cannot be met. These are internal invariants enforced by caller.
///
/// # Safety
/// Caller must ensure `capacity` matches the actual table capacity.
/// `key_size` and `val_size` must exactly match the target `K` and `V` types.
///
/// # See Also
/// - `allocate_block`
/// - `get_array_ptrs`
#[must_use]
pub fn calc_layout(capacity: usize, key_size: usize, val_size: usize) -> (Layout, usize, usize) {
    let meta_sz = round_up(capacity);
    let keys_sz = round_up(
        key_size
            .checked_mul(capacity)
            .expect("key_size * capacity overflow"),
    );
    let vals_sz = round_up(
        val_size
            .checked_mul(capacity)
            .expect("val_size * capacity overflow"),
    );
    let total_sz = meta_sz
        .checked_add(keys_sz)
        .and_then(|s| s.checked_add(vals_sz))
        .expect("total SoA block size overflow");

    let keys_offset = meta_sz;
    let vals_offset = meta_sz + keys_sz;
    let layout = Layout::from_size_align(total_sz, ALIGN).expect("invalid layout");
    (layout, keys_offset, vals_offset)
}

/// Allocates a zero-initialized `SoA` memory block for the hash table.
///
/// # Arguments
/// * `capacity` - Power-of-two slot count.
/// * `key_size` - `size_of::<K>()`.
/// * `val_size` - `size_of::<V>()`.
///
/// # Returns
/// `NonNull<u8>` pointing to the start of the allocated block.
///
/// # Panics
/// Panics if allocation fails (OOM) via `handle_alloc_error`, or if layout
/// calculation overflows.
///
/// # Safety
/// Caller must ensure `capacity` is a valid power of two and `> 0`.
/// `key_size` and `val_size` must match the actual `K` and `V` types.
/// The metadata region (first `capacity` bytes) is zero-initialized, denoting
/// all slots as empty. Keys/Values regions are uninitialized and must be
/// properly initialized before use.
///
/// # See Also
/// - `deallocate_block`
#[inline]
#[must_use]
pub unsafe fn allocate_block(capacity: usize, key_size: usize, val_size: usize) -> NonNull<u8> {
    let (layout, meta_sz, _) = calc_layout(capacity, key_size, val_size);
    let ptr = unsafe { alloc(layout) };
    if ptr.is_null() {
        handle_alloc_error(layout);
    }

    // SAFETY: FIX - Zero-initialize the ENTIRE padded metadata region, not just `capacity` bytes.
    // `meta_sz` is the rounded-up size (e.g., 64), whereas `capacity` might be small (e.g., 4).
    unsafe {
        core::ptr::write_bytes(ptr, 0, meta_sz);
    }

    unsafe { NonNull::new_unchecked(ptr) }
}

/// Deallocates a previously allocated `SoA` memory block.
///
/// # Arguments
/// * `ptr` - Pointer returned by `allocate_block`.
/// * `capacity`, `key_size`, `val_size` - Original allocation parameters.
///
/// # Safety
/// `ptr` must have been returned by `allocate_block` with identical parameters.
/// Must not be called on already deallocated, null, or invalid pointers.
/// All initialized keys/values must be dropped by the caller prior to this call.
///
/// # See Also
/// - `allocate_block`
pub unsafe fn deallocate_block(
    ptr: NonNull<u8>,
    capacity: usize,
    key_size: usize,
    val_size: usize,
) {
    let (layout, _, _) = calc_layout(capacity, key_size, val_size);
    unsafe {
        dealloc(ptr.as_ptr(), layout);
    }
}

/// Returns typed raw pointers to the meta, keys, and values arrays.
///
/// # Arguments
/// * `base` - Base pointer to the allocated `SoA` block.
/// * `keys_offset`, `vals_offset` - Byte offsets from `calc_layout`.
///
/// # Safety
/// `base` must point to a valid, allocated `SoA` block. Offsets must match `calc_layout`.
/// Returned pointers are guaranteed to be aligned to `align_of::<K>()` and `align_of::<V>()`
/// due to 64-byte `SoA` padding. Caller must ensure proper initialization before dereferencing.
#[inline]
#[must_use]
pub unsafe fn get_array_ptrs<K, V>(
    base: NonNull<u8>,
    keys_offset: usize,
    vals_offset: usize,
) -> (*mut u8, *mut K, *mut V) {
    let meta = base.as_ptr();
    let keys = unsafe { meta.add(keys_offset).cast::<K>() };
    let vals = unsafe { meta.add(vals_offset).cast::<V>() };
    (meta, keys, vals)
}