use std::fmt as StdFmt;
use std::marker::PhantomData;
use std::sync::atomic::{AtomicPtr, Ordering as AtomicOrdering};
use crate::shard_counter::ShardedCounter;
use crate::alloc_trait::TreeAllocator;
use crate::alloc15::SeizeAllocator;
use crate::inline::bits::InlineBits;
use crate::leaf15::LeafNode15;
use crate::policy::{BoxPolicy, InlinePolicy, LeafPolicy};
use coalesce::CoalesceQueue;
use seize::Collector;
mod batch_utils;
mod coalesce;
mod generic;
mod range;
pub mod remove;
mod split;
pub use generic::{BatchEntry, BatchInsertResult, Entry, OccupiedEntry, VacantEntry};
pub use range::{KeysIter, RangeBound, RangeIter, ScanEntry, ValuesIter};
pub use remove::RemoveError;
pub mod batch {
pub use super::batch_utils::{
BatchStats, from_iter, sequential_keys, sequential_u64_keys, zip_into_entries,
};
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum InsertError {
LeafFull,
SplitRequired,
LayerCreationRequired,
SplitFailed,
SplitPropagationRequired,
}
impl StdFmt::Display for InsertError {
fn fmt(&self, f: &mut StdFmt::Formatter<'_>) -> StdFmt::Result {
match self {
Self::LeafFull => write!(f, "leaf node is full"),
Self::SplitRequired => {
write!(f, "split required (leaf full)")
}
Self::LayerCreationRequired => {
write!(f, "layer creation required (key conflict)")
}
Self::SplitFailed => {
write!(f, "split operation failed")
}
Self::SplitPropagationRequired => {
write!(f, "split propagation required (parent full)")
}
}
}
}
impl std::error::Error for InsertError {}
pub struct MassTreeGeneric<P, A>
where
P: LeafPolicy,
A: TreeAllocator<P>,
{
collector: Collector,
allocator: A,
root_ptr: AtomicPtr<u8>,
count: ShardedCounter,
coalesce_queue: CoalesceQueue,
_marker: PhantomData<P>,
}
unsafe impl<P, A> Send for MassTreeGeneric<P, A>
where
P: LeafPolicy,
A: TreeAllocator<P>,
{
}
unsafe impl<P, A> Sync for MassTreeGeneric<P, A>
where
P: LeafPolicy,
A: TreeAllocator<P>,
{
}
impl<P, A> StdFmt::Debug for MassTreeGeneric<P, A>
where
P: LeafPolicy,
A: TreeAllocator<P>,
{
fn fmt(&self, f: &mut StdFmt::Formatter<'_>) -> StdFmt::Result {
f.debug_struct("MassTreeGeneric")
.field("root_ptr", &self.root_ptr.load(AtomicOrdering::Relaxed))
.field("count", &self.count.load())
.field("width", &LeafNode15::<P>::WIDTH)
.field("pending_coalesce", &self.coalesce_queue.len())
.finish_non_exhaustive()
}
}
#[derive(Debug)]
pub enum InsertSearchResultGeneric {
Found { slot: usize },
NotFound { logical_pos: usize },
Conflict { slot: usize },
Layer { slot: usize },
}
impl<P, A> Drop for MassTreeGeneric<P, A>
where
P: LeafPolicy,
A: TreeAllocator<P>,
{
fn drop(&mut self) {
self.coalesce_queue.clear();
unsafe { self.collector.reclaim_all() };
let root: *mut u8 = self.root_ptr.load(AtomicOrdering::Acquire);
self.allocator.teardown_tree(root);
}
}
pub type MassTree<V> = MassTree15Inline<V>;
pub type MassTree15<V> = MassTreeGeneric<BoxPolicy<V>, SeizeAllocator<BoxPolicy<V>>>;
pub type MassTree15Inline<V> = MassTreeGeneric<InlinePolicy<V>, SeizeAllocator<InlinePolicy<V>>>;
impl<V: Send + Sync + 'static> MassTree15<V> {
#[must_use]
#[inline(always)]
pub fn new() -> Self {
Self::with_allocator(SeizeAllocator::new())
}
#[must_use]
#[inline(always)]
pub fn with_batch_size(batch_size: usize) -> Self {
Self::with_allocator_batch_size(SeizeAllocator::new(), Some(batch_size))
}
}
impl<V: Send + Sync + 'static> Default for MassTree15<V> {
fn default() -> Self {
Self::new()
}
}
impl<V: InlineBits> MassTree15Inline<V> {
#[must_use]
#[inline(always)]
pub fn new() -> Self {
Self::with_allocator(SeizeAllocator::new())
}
#[must_use]
#[inline(always)]
pub fn with_batch_size(batch_size: usize) -> Self {
Self::with_allocator_batch_size(SeizeAllocator::new(), Some(batch_size))
}
}
impl<V: InlineBits> Default for MassTree15Inline<V> {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
#[expect(clippy::unwrap_used, reason = "Fail fast in tests")]
#[expect(clippy::cast_possible_truncation, reason = "reasonable in tests")]
#[expect(clippy::cast_sign_loss, reason = "reasonable in tests")]
#[expect(clippy::items_after_statements, reason = "doesn't matter in tests")]
mod unit_tests;