concurrent_avl_tree 0.1.0

Lock-free readable AVL tree with epoch-based reclamation and background batch rebalancing.
Documentation
//! # Epoch-Based Memory Reclamation
//!
//! Lock-free deferred memory disposal using global epoch counters.

use crate::config::MAX_ACTIVE_EPOCHS;
use std::sync::Mutex;
use std::sync::atomic::{AtomicUsize, Ordering};

/// # Epoch-Based Reclamation System
///
/// Tracks active reader epochs to defer memory reclamation until safe quiescence.
///
/// ## Description
/// Writers queue disposal callbacks. Readers publish epoch participation. System advances epoch
/// when all registered readers reach a quiescent state. Executes pending reclaims after a
/// two-epoch grace period. Guarantees zero use-after-free during lock-free traversal.
///
/// ## Invariant
/// `global_epoch <= max_registered_epoch + MAX_ACTIVE_EPOCHS`
///
/// ## Thread Safety
/// `enter_epoch` and `leave_epoch` are lock-free. `advance_epoch` requires external serialization.
///
/// ## Performance
/// Function-local `thread_local` storage eliminates ODR violations. Atomic operations use
/// `Acquire`/`Release` semantics for cross-thread visibility.
///
/// ## Examples
/// ```
/// use concurrent_avl_tree::concurrency::epoch::EpochBasedReclamation;
///
/// let reclamation = EpochBasedReclamation::new();
/// let guard = reclamation.enter_epoch();
/// reclamation.advance_epoch();
/// drop(guard);
/// ```
pub struct EpochBasedReclamation {
    global_epoch: AtomicUsize,
    reader_counts: [AtomicUsize; MAX_ACTIVE_EPOCHS],
    pending_disposals: Mutex<Vec<Box<dyn FnOnce() + Send>>>,
}

impl EpochBasedReclamation {
    /// # Construct Reclamation System
    ///
    /// Initializes epoch counters to zero and clears disposal queue.
    ///
    /// ## Returns
    /// New `EpochBasedReclamation` instance.
    ///
    /// ## Complexity
    /// Time: O(1), Space: O(MAX_ACTIVE_EPOCHS)
    #[must_use]
    pub fn new() -> Self {
        Self {
            global_epoch: AtomicUsize::new(0),
            reader_counts: std::array::from_fn(|_| AtomicUsize::new(0)),
            pending_disposals: Mutex::new(Vec::new()),
        }
    }

    /// # Enter Reader Epoch
    ///
    /// Registers current thread as active reader for current global epoch.
    ///
    /// ## Returns
    /// `EpochGuard` RAII handle. Drops to leave epoch.
    ///
    /// ## Thread Safety
    /// Lock-free atomic increment. Thread-safe.
    ///
    /// ## Complexity
    /// Time: O(1), Space: O(1) per thread
    pub fn enter_epoch(&self) -> EpochGuard<'_> {
        let current = self.global_epoch.load(Ordering::Relaxed);
        let idx = current % MAX_ACTIVE_EPOCHS;
        self.reader_counts[idx].fetch_add(1, Ordering::Relaxed);
        EpochGuard { system: self, index: idx }
    }

    /// # Advance Global Epoch
    ///
    /// Increments epoch counter and executes disposals for quiesced epochs.
    ///
    /// ## Thread Safety
    /// Requires external serialization for correctness in high-contention scenarios.
    ///
    /// ## Complexity
    /// Time: O(D) where D = pending disposals, Space: O(1)
    ///
    /// ## Panics
    /// Panics if internal `pending_disposals` mutex is poisoned. Poison occurs
    /// when a thread holding the lock panics during disposal callback execution.
    /// Indicates unrecoverable state corruption; caller should abort or restart
    /// reclamation subsystem.
    pub fn advance_epoch(&self) {
        self.global_epoch.fetch_add(1, Ordering::Release);
        let current = self.global_epoch.load(Ordering::Acquire);
        if current >= 2 {
            let safe_idx = (current - 2) % MAX_ACTIVE_EPOCHS;
            if self.reader_counts[safe_idx].load(Ordering::Acquire) == 0 {
                let mut guard = self.pending_disposals.lock().unwrap();
                for cb in guard.drain(..) {
                    cb();
                }
            }
        }
    }

    /// # Queue Disposal Callback
    ///
    /// Appends callable to pending disposal list. Executes on epoch grace completion.
    ///
    /// ## Parameters
    /// - `callback`: Closure consuming captured resources. Must be `Send + 'static`.
    ///
    /// ## Thread Safety
    /// Thread-safe via internal mutex.
    ///
    /// ## Panics
    /// Panics if internal `pending_disposals` mutex is poisoned. Poison occurs
    /// when a thread holding the lock panics during prior `queue_disposal` or
    /// `advance_epoch` invocation. Indicates unrecoverable state corruption.
    pub fn queue_disposal(&self, callback: impl FnOnce() + Send + 'static) {
        self.pending_disposals.lock().unwrap().push(Box::new(callback));
    }
}

impl Default for EpochBasedReclamation {
    fn default() -> Self {
        Self::new()
    }
}

/// # RAII Epoch Guard
///
/// Automatically leaves epoch on drop. Prevents premature memory reclamation.
///
/// ## Invariant
/// `reader_counts[index]` increments on creation, decrements on drop.
///
/// ## Thread Safety
/// Drop executes lock-free atomic decrement.
pub struct EpochGuard<'a> {
    system: &'a EpochBasedReclamation,
    index: usize,
}

impl Drop for EpochGuard<'_> {
    fn drop(&mut self) {
        self.system.reader_counts[self.index].fetch_sub(1, Ordering::Release);
    }
}