concurrent_avl_tree 0.1.0

Lock-free readable AVL tree with epoch-based reclamation and background batch rebalancing.
Documentation
//! # Integration Test Suite
//!
//! Validates public API workflow, concurrency primitives, and memory arena lifecycle.

use concurrent_avl_tree::concurrency::epoch::EpochBasedReclamation;
use concurrent_avl_tree::error::ConcurrentAVLError;
use concurrent_avl_tree::memory::arena::GenerationalArena;
use concurrent_avl_tree::tree::batch::{BatchCollector, BatchEntry, OperationType};
use concurrent_avl_tree::tree::node::Node;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};

#[test]
fn test_epoch_reclamation_lifecycle() {
    let reclamation = EpochBasedReclamation::new();
    let guard = reclamation.enter_epoch();

    // Queue disposal should not execute while guard is active
    let executed = Arc::new(AtomicBool::new(false));
    reclamation.queue_disposal({
        let flag = Arc::clone(&executed);
        move || flag.store(true, Ordering::SeqCst)
    });

    reclamation.advance_epoch(); // Grace period 1
    reclamation.advance_epoch(); // Grace period 2 (should execute if guard dropped)
    assert!(!executed.load(std::sync::atomic::Ordering::SeqCst));

    drop(guard);
    reclamation.advance_epoch(); // Now safe
    reclamation.advance_epoch();
    assert!(executed.load(std::sync::atomic::Ordering::SeqCst));
}

#[test]
fn test_arena_allocation_and_swap() {
    let mut arena = GenerationalArena::<4096>::new();

    // Initial state
    assert_eq!(arena.active_buffer_len(), 0);

    // Allocate aligned chunks
    let off1 = arena.allocate(128).unwrap();
    let off2 = arena.allocate(64).unwrap();
    assert_eq!(off1 % 64, 0);
    assert_eq!(off2 % 64, 0);
    assert!(off2 > off1);

    // Swap makes next buffer active
    arena.swap_buffers();
    assert!(arena.active_buffer_len() > 0);

    // Reset clears next buffer for rebuild
    arena.reset();
    let off3 = arena.allocate(256).unwrap();
    assert_eq!(off3, 0); // Reset returns pointer to origin
}

#[test]
fn test_batch_collection_sorting_and_invariant() {
    let collector = BatchCollector::<i32, String>::default();

    // Concurrent append simulation
    collector.append_operation(BatchEntry {
        operation_type: OperationType::Delete,
        target_key: 30,
        target_value: None,
    });
    collector.append_operation(BatchEntry {
        operation_type: OperationType::Insert,
        target_key: 10,
        target_value: Some("a".into()),
    });
    collector.append_operation(BatchEntry {
        operation_type: OperationType::Insert,
        target_key: 20,
        target_value: Some("b".into()),
    });

    // Extract and verify sorting
    let sorted = collector.extract_and_sort();
    assert_eq!(sorted.len(), 3);
    assert_eq!(sorted[0].target_key, 10);
    assert_eq!(sorted[1].target_key, 20);
    assert_eq!(sorted[2].target_key, 30);

    // Verify invariant: internal buffer empty
    let second_extract = collector.extract_and_sort();
    assert!(second_extract.is_empty());
}

#[test]
fn test_node_construction_and_atomic_pointers() {
    let node = Box::new(Node::new(42, "value".to_string()));
    assert_eq!(node.key, 42);
    assert_eq!(node.height, 0);
    assert!(node.left_child.load(std::sync::atomic::Ordering::SeqCst).is_null());
}

#[test]
fn test_error_handling_propagation() {
    let mut arena = GenerationalArena::<128>::new();
    let result = arena.allocate(200);
    assert_eq!(result, Err(ConcurrentAVLError::AllocationExhausted));
}