robinxx_map 0.1.0

High-performance, thread-safe open-addressing hash map using Robin Hood displacement & xxHash3.
Documentation
//! Concurrency primitives for `no_std` thread-safety.
//!
//! # Summary
//! Implements `SpinMutex` and `MutexGuard` using atomic operations for synchronized access.
//!
//! # Description
//! This module provides a minimal, `no_std`-compatible spinlock mutex backed by
//! `core::sync::atomic::AtomicBool`. It guarantees `Send + Sync` across all targets
//! without relying on `std` or OS primitives. The lock uses a compare-and-swap (CAS)
//! loop with `core::hint::spin_loop()` to minimize CPU contention during busy-waiting.
//!
//! Intended for wrapping internal `RawTable` state in `RobinHoodMap` to provide
//! thread-safe `&self` and `&mut self` access patterns. Critical sections are kept
//! minimal to reduce lock overhead.
//!
//! # Examples
//! ```
//! use robinxx_map::sync::SpinMutex;
//!
//! let mutex = SpinMutex::new(42);
//! let mut guard = mutex.lock();
//! *guard = 100;
//! drop(guard);
//! assert_eq!(*mutex.lock(), 100);
//! ```
//!
//! # Panics
//! None. All operations are infallible and do not allocate.
//!
//! # Errors
//! Not applicable. Returns `MutexGuard` directly without `Result`.
//!
//! # Safety
//! - `UnsafeCell` is used internally to enable interior mutability. Access is strictly
//!   gated by the atomic lock state, preventing data races.
//! - `Send`/`Sync` implementations rely on `T: Send`. Violating this bound at the
//!   type level would cause undefined behavior (enforced by compiler trait bounds).
//! - Caller must not leak `MutexGuard` across thread boundaries in a way that violates
//!   lock ownership semantics (enforced by lifetime `'a`).
//!
//! # See Also
//! - [`RobinHoodMap`](crate::map::RobinHoodMap) for the thread-safe public API.
//! - [`RawTable`](crate::table::RawTable) for the underlying data structure.

use core::cell::UnsafeCell;
use core::hint::spin_loop;
use core::ops::{Deref, DerefMut};
use core::sync::atomic::{AtomicBool, Ordering};

/// A simple spinlock-based mutex for `no_std` environments.
///
/// # Summary
/// Wraps `T` with atomic `bool` lock for synchronized access.
///
/// # Description
/// `SpinMutex` provides interior mutability with busy-wait locking. It is optimized
/// for short critical sections and `no_std` compatibility. The lock uses `Acquire`
/// ordering on acquisition and `Release` ordering on release to ensure memory
/// visibility guarantees across threads.
///
/// # Examples
/// ```
/// use robinxx_map::sync::SpinMutex;
///
/// let mutex = SpinMutex::new(vec![1, 2, 3]);
/// let guard = mutex.lock();
/// assert_eq!(guard.len(), 3);
/// ```
///
/// # Panics
/// Never panics. All operations are infallible.
///
/// # Errors
/// Not applicable. No `Result` return type.
///
/// # Safety
/// `UnsafeCell` enables mutation through `&self`. Thread-safety is guaranteed
/// by atomic lock state and `T: Send` bound. Caller must not create aliasing
/// references that violate exclusive access while lock is held.
///
/// # See Also
/// - [`MutexGuard`] for the RAII guard type.
/// - [`SpinMutex::lock`] for acquiring the lock.
pub struct SpinMutex<T> {
    /// Atomic flag: `false` = unlocked, `true` = locked.
    locked: AtomicBool,
    /// Protected data with interior mutability.
    data: UnsafeCell<T>,
}

// SAFETY: `SpinMutex` synchronizes access to `T`.
// - `Send`: Can be transferred across threads if `T` can.
// - `Sync`: Can be shared across threads if `T` can be sent (access is serialized).
unsafe impl<T: Send> Send for SpinMutex<T> {}
unsafe impl<T: Send> Sync for SpinMutex<T> {}

impl<T> SpinMutex<T> {
    /// Creates a new `SpinMutex` wrapping the given value.
    ///
    /// # Summary
    /// Constructs an unlocked mutex with initial data.
    ///
    /// # Description
    /// Allocates the mutex in an unlocked state (`locked = false`). The inner
    /// data is fully initialized and ready for access via `lock()`.
    ///
    /// # Examples
    /// ```
    /// use robinxx_map::sync::SpinMutex;
    /// let m = SpinMutex::new(0usize);
    /// ```
    ///
    /// # Panics
    /// Never panics. Pure safe constructor.
    ///
    /// # Errors
    /// Not applicable.
    ///
    /// # Safety
    /// Not applicable. No `unsafe` operations in this method.
    ///
    /// # See Also
    /// - [`lock`](Self::lock) for acquiring exclusive access.
    #[inline]
    #[must_use]
    pub const fn new(data: T) -> Self {
        Self {
            locked: AtomicBool::new(false),
            data: UnsafeCell::new(data),
        }
    }

    /// Acquires the spinlock, blocking until available.
    ///
    /// # Summary
    /// Locks the mutex and returns a guard for safe access.
    ///
    /// # Description
    /// Uses `compare_exchange_weak` in a tight loop with `spin_loop()` to acquire
    /// the lock. Returns `MutexGuard` which automatically releases the lock on `Drop`.
    /// Suitable for short critical sections where OS context-switch overhead is undesirable.
    ///
    /// # Examples
    /// ```
    /// use robinxx_map::sync::SpinMutex;
    ///
    /// let mutex = SpinMutex::new("data");
    /// let guard = mutex.lock();
    /// assert_eq!(*guard, "data");
    /// ```
    ///
    /// # Panics
    /// Never panics. Busy-wait loop is infallible.
    ///
    /// # Errors
    /// Not applicable. No `Result` return type.
    ///
    /// # Safety
    /// Guard guarantees exclusive access. Memory ordering: `Acquire` on lock,
    /// `Release` on unlock ensures visibility of prior writes across threads.
    /// Caller must not hold multiple guards simultaneously for the same mutex.
    ///
    /// # See Also
    /// - [`MutexGuard`] for the RAII guard that releases the lock on drop.
    /// - [`SpinMutex::new`] for constructing a new mutex.
    #[inline]
    pub fn lock(&self) -> MutexGuard<'_, T> {
        while self
            .locked
            .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
            .is_err()
        {
            spin_loop();
        }
        MutexGuard { mutex: self }
    }
}

/// RAII guard that releases the spinlock when dropped.
///
/// # Summary
/// Holds exclusive access to `T` until dropped.
///
/// # Description
/// Returned by `SpinMutex::lock()`. Implements `Deref` and `DerefMut` for
/// ergonomic access to the inner value. Automatically calls `unlock()` via
/// `Drop`, preventing deadlocks from forgotten releases.
///
/// # Examples
/// ```
/// use robinxx_map::sync::SpinMutex;
///
/// let mutex = SpinMutex::new(10);
/// {
///     let mut g = mutex.lock();
///     *g += 5;
/// } // Lock released here
/// assert_eq!(*mutex.lock(), 15);
/// ```
///
/// # Panics
/// Never panics. Drop implementation is infallible.
///
/// # Errors
/// Not applicable.
///
/// # Safety
/// `UnsafeCell` access is strictly bounded by lock lifetime. No aliasing
/// violations possible while guard is active. Caller must not extend guard
/// lifetime beyond the scope where exclusive access is required.
///
/// # See Also
/// - [`SpinMutex`] for the mutex type this guard protects.
/// - [`SpinMutex::lock`] for acquiring a new guard.
pub struct MutexGuard<'a, T> {
    mutex: &'a SpinMutex<T>,
}

impl<T> Drop for MutexGuard<'_, T> {
    /// Releases the lock with `Release` ordering.
    ///
    /// # Summary
    /// Unlocks the mutex on guard destruction.
    ///
    /// # Description
    /// Stores `false` to the atomic flag using `Ordering::Release`, ensuring
    /// all writes made while holding the lock are visible to subsequent lockers.
    ///
    /// # Panics
    /// Never panics.
    ///
    /// # Errors
    /// Not applicable.
    ///
    /// # Safety
    /// Only one guard exists per lock at any time. Release ordering guarantees
    /// memory visibility. This method is called automatically by Rust's drop
    /// semantics; manual invocation is discouraged.
    ///
    /// # See Also
    /// - [`SpinMutex::lock`] for the corresponding acquisition method.
    #[inline]
    fn drop(&mut self) {
        self.mutex.locked.store(false, Ordering::Release);
    }
}

impl<T> Deref for MutexGuard<'_, T> {
    type Target = T;

    /// Returns an immutable reference to the protected data.
    ///
    /// # Summary
    /// Provides read-only access to the inner value.
    ///
    /// # Description
    /// Implemented to enable ergonomic `&T` access via the guard. The reference
    /// is valid only while the guard is held and the lock remains acquired.
    ///
    /// # Panics
    /// Never panics.
    ///
    /// # Errors
    /// Not applicable.
    ///
    /// # Safety
    /// Safe because the guard guarantees exclusive access via the active lock.
    /// `UnsafeCell::get()` is soundly dereferenced only while the mutex is locked.
    /// Caller must not create additional references that violate borrow rules.
    ///
    /// # See Also
    /// - [`DerefMut::deref_mut`] for mutable access.
    #[inline]
    fn deref(&self) -> &Self::Target {
        // SAFETY: Guard guarantees exclusive access via active lock.
        // UnsafeCell is soundly dereferenced only while locked.
        unsafe { &*self.mutex.data.get() }
    }
}

impl<T> DerefMut for MutexGuard<'_, T> {
    /// Returns a mutable reference to the protected data.
    ///
    /// # Summary
    /// Provides mutable access to the inner value.
    ///
    /// # Description
    /// Implemented to enable ergonomic `&mut T` access via the guard. The reference
    /// is valid only while the guard is held and the lock remains acquired.
    ///
    /// # Panics
    /// Never panics.
    ///
    /// # Errors
    /// Not applicable.
    ///
    /// # Safety
    /// Safe because the guard guarantees exclusive access via the active lock.
    /// `UnsafeCell::get()` is soundly dereferenced only while the mutex is locked.
    /// Caller must not create aliasing mutable references.
    ///
    /// # See Also
    /// - [`Deref::deref`] for immutable access.
    #[inline]
    fn deref_mut(&mut self) -> &mut Self::Target {
        // SAFETY: Same as Deref, but yields mutable reference.
        // Exclusive access guaranteed by lock ownership.
        unsafe { &mut *self.mutex.data.get() }
    }
}

#[cfg(test)]
mod tests {
    extern crate std;

    use super::*;
    use alloc::sync::Arc;
    use alloc::vec;
    use core::sync::atomic::{AtomicUsize, Ordering};

    #[test]
    fn test_spin_mutex_basic() {
        let m = SpinMutex::new(0usize);
        {
            let mut g = m.lock();
            *g = 42;
        }
        assert_eq!(*m.lock(), 42);
    }

    #[test]
    fn test_spin_mutex_drop_unlocks() {
        let m = SpinMutex::new(10);
        {
            let _g = m.lock();
            assert_eq!(m.locked.load(Ordering::Relaxed), true);
        }
        assert_eq!(m.locked.load(Ordering::Relaxed), false);
    }

    #[test]
    fn test_spin_mutex_concurrent_increment() {
        let counter = Arc::new(SpinMutex::new(AtomicUsize::new(0)));
        let mut handles = vec![];
        for _ in 0..4 {
            let c = Arc::clone(&counter);
            handles.push(std::thread::spawn(move || {
                for _ in 0..1000 {
                    let val = c.lock().fetch_add(1, Ordering::Relaxed);
                    assert!(val < 4000);
                }
            }));
        }
        for h in handles {
            h.join().unwrap();
        }
        assert_eq!(counter.lock().load(Ordering::Relaxed), 4000);
    }
}