concurrent_map/
lib.rs

1#![cfg_attr(
2    test,
3    deny(
4        missing_docs,
5        future_incompatible,
6        nonstandard_style,
7        rust_2018_idioms,
8        missing_copy_implementations,
9        trivial_casts,
10        trivial_numeric_casts,
11        unused_qualifications,
12    )
13)]
14#![cfg_attr(test, deny(
15    clippy::cast_lossless,
16    clippy::cast_possible_truncation,
17    clippy::cast_possible_wrap,
18    clippy::cast_precision_loss,
19    clippy::cast_sign_loss,
20    clippy::decimal_literal_representation,
21    clippy::doc_markdown,
22    // clippy::else_if_without_else,
23    clippy::empty_enum,
24    clippy::explicit_into_iter_loop,
25    clippy::explicit_iter_loop,
26    clippy::expl_impl_clone_on_copy,
27    clippy::fallible_impl_from,
28    clippy::filter_map_next,
29    clippy::float_arithmetic,
30    clippy::get_unwrap,
31    clippy::if_not_else,
32    clippy::indexing_slicing,
33    clippy::inline_always,
34    clippy::integer_arithmetic,
35    clippy::invalid_upcast_comparisons,
36    clippy::items_after_statements,
37    clippy::manual_find_map,
38    clippy::map_entry,
39    clippy::map_flatten,
40    clippy::match_like_matches_macro,
41    clippy::match_same_arms,
42    clippy::maybe_infinite_iter,
43    clippy::mem_forget,
44    // clippy::missing_docs_in_private_items,
45    clippy::module_name_repetitions,
46    clippy::multiple_inherent_impl,
47    clippy::mut_mut,
48    clippy::needless_borrow,
49    clippy::needless_continue,
50    clippy::needless_pass_by_value,
51    clippy::non_ascii_literal,
52    clippy::path_buf_push_overwrite,
53    // clippy::print_stdout,
54    clippy::redundant_closure_for_method_calls,
55    clippy::shadow_reuse,
56    clippy::shadow_same,
57    clippy::shadow_unrelated,
58    clippy::single_match_else,
59    clippy::string_add,
60    clippy::string_add_assign,
61    clippy::type_repetition_in_bounds,
62    clippy::unicode_not_nfc,
63    clippy::unimplemented,
64    clippy::unseparated_literal_suffix,
65    clippy::used_underscore_binding,
66    clippy::wildcard_dependencies,
67))]
68#![cfg_attr(
69    test,
70    warn(
71        clippy::missing_const_for_fn,
72        clippy::multiple_crate_versions,
73        clippy::wildcard_enum_match_arm,
74    )
75)]
76
77//! A lock-free B+ tree based on [sled](https://github.com/spacejam/sled)'s internal
78//! index structure, but supporting richer Rust types as keys and values than raw bytes.
79//!
80//! This structure supports atomic compare and swap operations with the
81//! [`ConcurrentMap::cas`] method.
82//!
83//! The [`ConcurrentMap`] allows users to tune the tree fan-out (`FANOUT`)
84//! and the underlying memory reclamation granularity (`LOCAL_GC_BUFFER_SIZE`)
85//! for achieving desired performance properties. The defaults are pretty good
86//! for most use cases but if you want to squeeze every bit of performance out
87//! for your particular workload, tweaking them based on realistic measurements
88//! may be beneficial. See the [`ConcurrentMap`] docs for more details.
89//!
90//! If you want to use a custom key type, you must
91//! implement the [`Minimum`] trait,
92//! allowing the left-most side of the tree to be
93//! created before inserting any data. If you wish
94//! to perform scans in reverse lexicographical order,
95//! you may instead implement [`Maximum`] for your key
96//! type and use [`std::cmp::Reverse`].
97//!
98//! This is an ordered data structure, and supports very high throughput iteration over
99//! lexicographically sorted ranges of values. If you are looking for simple point operation
100//! performance, you may find a better option among one of the many concurrent
101//! hashmap implementations that are floating around.
102
103#[cfg(feature = "serde")]
104mod serde;
105
106#[cfg(not(feature = "fault_injection"))]
107#[inline]
108const fn debug_delay() -> bool {
109    false
110}
111
112/// This function is useful for inducing random jitter into
113/// our atomic operations, shaking out more possible
114/// interleavings quickly. It gets fully eliminated by the
115/// compiler in non-test code.
116#[cfg(feature = "fault_injection")]
117fn debug_delay() -> bool {
118    use std::thread;
119    use std::time::Duration;
120
121    use rand::{thread_rng, Rng};
122
123    let mut rng = thread_rng();
124
125    match rng.gen_range(0..100) {
126        0..=98 => false,
127        _ => {
128            thread::yield_now();
129            true
130        }
131    }
132}
133
134use stack_map::StackMap;
135
136use std::borrow::Borrow;
137use std::fmt;
138use std::mem::size_of;
139use std::num::{
140    NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128,
141    NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize,
142};
143use std::ops::{Bound, Deref};
144use std::ptr::NonNull;
145use std::sync::{
146    atomic::{AtomicPtr, AtomicUsize, Ordering},
147    Arc,
148};
149
150#[cfg(feature = "timing")]
151use std::time::{Duration, Instant};
152
153use ebr::{Ebr, Guard};
154
155// NB this must always be 1
156const MERGE_SIZE: usize = 1;
157
158#[derive(Debug)]
159enum Deferred<
160    K: 'static + Clone + Minimum + Send + Sync + Ord,
161    V: 'static + Clone + Send + Sync,
162    const FANOUT: usize,
163> {
164    #[allow(unused)]
165    Node(Box<Node<K, V, FANOUT>>),
166    BoxedAtomicPtr(BoxedAtomicPtr<K, V, FANOUT>),
167}
168
169impl<
170        K: 'static + Clone + Minimum + Send + Sync + Ord,
171        V: 'static + Clone + Send + Sync,
172        const FANOUT: usize,
173    > Drop for Deferred<K, V, FANOUT>
174{
175    fn drop(&mut self) {
176        if let Deferred::BoxedAtomicPtr(id) = self {
177            assert!(!id.0.is_null());
178            let reclaimed: Box<AtomicPtr<Node<K, V, FANOUT>>> =
179                unsafe { Box::from_raw(id.0 as *mut _) };
180            drop(reclaimed);
181        }
182    }
183}
184
185#[derive(Debug, Clone, Eq)]
186struct BoxedAtomicPtr<
187    K: 'static + Clone + Minimum + Send + Sync + Ord,
188    V: 'static + Clone + Send + Sync,
189    const FANOUT: usize,
190>(*const AtomicPtr<Node<K, V, FANOUT>>);
191
192impl<
193        K: 'static + Clone + Minimum + Send + Sync + Ord,
194        V: 'static + Clone + Send + Sync,
195        const FANOUT: usize,
196    > Copy for BoxedAtomicPtr<K, V, FANOUT>
197{
198}
199
200impl<
201        K: 'static + Clone + Minimum + Send + Sync + Ord,
202        V: 'static + Clone + Send + Sync,
203        const FANOUT: usize,
204    > PartialEq for BoxedAtomicPtr<K, V, FANOUT>
205{
206    fn eq(&self, other: &Self) -> bool {
207        self.0 == other.0
208    }
209}
210
211unsafe impl<
212        K: 'static + Clone + Minimum + Send + Sync + Ord,
213        V: 'static + Clone + Send + Sync,
214        const FANOUT: usize,
215    > Send for BoxedAtomicPtr<K, V, FANOUT>
216{
217}
218
219unsafe impl<
220        K: 'static + Clone + Minimum + Send + Sync + Ord,
221        V: 'static + Clone + Send + Sync,
222        const FANOUT: usize,
223    > Sync for BoxedAtomicPtr<K, V, FANOUT>
224{
225}
226
227impl<
228        K: 'static + Clone + Minimum + Send + Sync + Ord,
229        V: 'static + Clone + Send + Sync,
230        const FANOUT: usize,
231    > Deref for BoxedAtomicPtr<K, V, FANOUT>
232{
233    type Target = AtomicPtr<Node<K, V, FANOUT>>;
234
235    fn deref(&self) -> &AtomicPtr<Node<K, V, FANOUT>> {
236        unsafe { &*self.0 }
237    }
238}
239
240impl<
241        K: 'static + Clone + Minimum + Send + Sync + Ord,
242        V: 'static + Clone + Send + Sync,
243        const FANOUT: usize,
244    > BoxedAtomicPtr<K, V, FANOUT>
245{
246    fn new(node: Box<Node<K, V, FANOUT>>) -> BoxedAtomicPtr<K, V, FANOUT> {
247        let pointee_ptr = Box::into_raw(node);
248        let pointer_ptr = Box::into_raw(Box::new(AtomicPtr::new(pointee_ptr)));
249        BoxedAtomicPtr(pointer_ptr)
250    }
251
252    fn node_view<const LOCAL_GC_BUFFER_SIZE: usize>(
253        &self,
254        _guard: &mut Guard<'_, Deferred<K, V, FANOUT>, LOCAL_GC_BUFFER_SIZE>,
255    ) -> Option<NodeView<K, V, FANOUT>> {
256        let ptr = NonNull::new(self.load(Ordering::Acquire))?;
257
258        Some(NodeView { ptr, id: *self })
259    }
260}
261
262/// Error type for the [`ConcurrentMap::cas`] operation.
263#[derive(Debug, PartialEq, Eq)]
264pub struct CasFailure<V> {
265    /// The current actual value that failed the comparison
266    pub actual: Option<V>,
267    /// The value that was proposed as a new value, which could
268    /// not be installed due to the comparison failure.
269    pub returned_new_value: Option<V>,
270}
271
272#[derive(Debug)]
273struct NodeView<K, V, const FANOUT: usize>
274where
275    K: 'static + Clone + Minimum + Ord + Send + Sync,
276    V: 'static + Clone + Send + Sync,
277{
278    ptr: NonNull<Node<K, V, FANOUT>>,
279    id: BoxedAtomicPtr<K, V, FANOUT>,
280}
281
282impl<K, V, const FANOUT: usize> NodeView<K, V, FANOUT>
283where
284    K: 'static + Clone + Minimum + Ord + Send + Sync,
285    V: 'static + Clone + Send + Sync,
286{
287    /// Try to replace. If the node has been deleted since we got our view,
288    /// an Err(None) is returned.
289    fn cas<const LOCAL_GC_BUFFER_SIZE: usize>(
290        &self,
291        replacement: Box<Node<K, V, FANOUT>>,
292        guard: &mut Guard<'_, Deferred<K, V, FANOUT>, LOCAL_GC_BUFFER_SIZE>,
293    ) -> Result<NodeView<K, V, FANOUT>, Option<NodeView<K, V, FANOUT>>> {
294        assert!(
295            !(replacement.hi.is_some() ^ replacement.next.is_some()),
296            "hi and next must both either be None or Some"
297        );
298
299        if debug_delay() {
300            return Err(Some(NodeView {
301                ptr: self.ptr,
302                id: self.id,
303            }));
304        }
305
306        let replacement_ptr = Box::into_raw(replacement);
307        let res = self.id.compare_exchange(
308            self.ptr.as_ptr(),
309            replacement_ptr,
310            Ordering::AcqRel,
311            Ordering::Acquire,
312        );
313
314        match res {
315            Ok(_) => {
316                let replaced: Box<Node<K, V, FANOUT>> = unsafe { Box::from_raw(self.ptr.as_ptr()) };
317                guard.defer_drop(Deferred::Node(replaced));
318                Ok(NodeView {
319                    ptr: NonNull::new(replacement_ptr).unwrap(),
320                    id: self.id,
321                })
322            }
323            Err(actual) => {
324                let failed_value: Box<Node<K, V, FANOUT>> =
325                    unsafe { Box::from_raw(replacement_ptr) };
326                drop(failed_value);
327
328                if actual.is_null() {
329                    Err(None)
330                } else {
331                    Err(Some(NodeView {
332                        ptr: NonNull::new(actual).unwrap(),
333                        id: self.id,
334                    }))
335                }
336            }
337        }
338    }
339
340    /// This function is used to get a mutable reference to
341    /// the node. It is intended as an optimization to avoid
342    /// RCU overhead when the overall ConcurrentMap's inner
343    /// Arc only has a single copy, giving us enough runtime
344    /// information to uphold the required invariant that there
345    /// is at most one accessing thread for the overall structure.
346    ///
347    /// Additional care must be taken to ensure that at any time,
348    /// there is only ever a single mutable reference to this
349    /// inner Node, otherwise various optimizations may cause
350    /// memory corruption. When in doubt, don't use this.
351    unsafe fn get_mut(&mut self) -> &mut Node<K, V, FANOUT> {
352        self.ptr.as_mut()
353    }
354}
355
356impl<K, V, const FANOUT: usize> Deref for NodeView<K, V, FANOUT>
357where
358    K: 'static + Clone + Minimum + Ord + Send + Sync,
359    V: 'static + Clone + Send + Sync,
360{
361    type Target = Node<K, V, FANOUT>;
362
363    fn deref(&self) -> &Self::Target {
364        unsafe { self.ptr.as_ref() }
365    }
366}
367
368/// Trait for types for which a minimum possible value exists.
369///
370/// This trait must be implemented for any `K` key type in the [`ConcurrentMap`].
371pub trait Minimum: Ord {
372    /// The returned value must be less than or equal
373    /// to all possible values for this type.
374    const MIN: Self;
375}
376
377/// Trait for types for which a maximum possible value exists.
378///
379/// This exists primarily to play nicely with [`std::cmp::Reverse`] keys
380/// for achieving high performance reverse iteration.
381pub trait Maximum: Ord {
382    /// The returned value must be greater than or equal
383    /// to all possible values for this type.
384    const MAX: Self;
385}
386
387impl Minimum for () {
388    const MIN: Self = ();
389}
390
391impl Minimum for bool {
392    const MIN: Self = false;
393}
394
395impl<T: Maximum> Minimum for std::cmp::Reverse<T> {
396    const MIN: Self = std::cmp::Reverse(T::MAX);
397}
398
399macro_rules! impl_integer {
400    ($($t:ty),+) => {
401        $(
402            impl Minimum for $t {
403                const MIN: Self = <$t>::MIN;
404            }
405
406            impl Maximum for $t {
407                const MAX: Self = <$t>::MAX;
408            }
409        )*
410    }
411}
412
413impl_integer!(
414    usize,
415    u8,
416    u16,
417    u32,
418    u64,
419    u128,
420    isize,
421    i8,
422    i16,
423    i32,
424    i64,
425    i128,
426    NonZeroI128,
427    NonZeroI16,
428    NonZeroI32,
429    NonZeroI64,
430    NonZeroI8,
431    NonZeroIsize,
432    NonZeroU128,
433    NonZeroU16,
434    NonZeroU32,
435    NonZeroU64,
436    NonZeroU8,
437    NonZeroUsize
438);
439
440impl<T: Ord> Minimum for Vec<T> {
441    const MIN: Self = Vec::new();
442}
443
444impl<T: Ord> Minimum for &[T] {
445    const MIN: Self = &[];
446}
447
448impl<T: Minimum, const LEN: usize> Minimum for [T; LEN] {
449    const MIN: Self = [T::MIN; LEN];
450}
451
452impl Minimum for String {
453    const MIN: Self = String::new();
454}
455
456impl Minimum for &str {
457    const MIN: Self = "";
458}
459
460impl<A: Minimum, B: Minimum> Minimum for (A, B) {
461    const MIN: Self = (A::MIN, B::MIN);
462}
463impl<A: Minimum, B: Minimum, C: Minimum> Minimum for (A, B, C) {
464    const MIN: Self = (A::MIN, B::MIN, C::MIN);
465}
466impl<A: Minimum, B: Minimum, C: Minimum, D: Minimum> Minimum for (A, B, C, D) {
467    const MIN: Self = (A::MIN, B::MIN, C::MIN, D::MIN);
468}
469impl<A: Minimum, B: Minimum, C: Minimum, D: Minimum, E: Minimum> Minimum for (A, B, C, D, E) {
470    const MIN: Self = (A::MIN, B::MIN, C::MIN, D::MIN, E::MIN);
471}
472impl<A: Minimum, B: Minimum, C: Minimum, D: Minimum, E: Minimum, F: Minimum> Minimum
473    for (A, B, C, D, E, F)
474{
475    const MIN: Self = (A::MIN, B::MIN, C::MIN, D::MIN, E::MIN, F::MIN);
476}
477
478/// A lock-free B+ tree.
479///
480/// Note that this structure is `Send` but NOT `Sync`,
481/// despite being a lock-free tree. This is because the
482/// inner reclamation system, provided by the `ebr` crate
483/// completely avoids atomic operations in its hot path
484/// for efficiency. If you want to share [`ConcurrentMap`]
485/// between threads, simply clone it, and this will set up
486/// a new efficient thread-local memory reclamation state.
487///
488/// If you want to use a custom key type, you must
489/// implement the [`Minimum`] trait,
490/// allowing the left-most side of the tree to be
491/// created before inserting any data. If you wish
492/// to perform scans in reverse lexicographical order,
493/// you may instead implement [`Maximum`] for your key
494/// type and use [`std::cmp::Reverse`].
495///
496/// The `FANOUT` const generic must be greater than 3.
497/// This const generic controls how large the fixed-size
498/// array for either child pointers (for index nodes) or
499/// values (for leaf nodes) will be. A higher value may
500/// make reads and scans faster, but writes will be slower
501/// because each modification performs a read-copy-update
502/// of the full node. In some cases, it may be preferable
503/// to wrap complex values in an `Arc` to avoid higher
504/// copy costs.
505///
506/// The `LOCAL_GC_BUFFER_SIZE` const generic must be greater than 0.
507/// This controls the epoch-based reclamation granularity.
508/// Garbage is placed into fixed-size arrays, and garbage collection
509/// only happens after this array fills up and a final timestamp is
510/// assigned to it. Lower values will cause replaced values to be dropped
511/// more quickly, but the efficiency will be lower. Values that are
512/// extremely high may cause undesirable memory usage because it will
513/// take more time to fill up each thread-local garbage segment.
514///
515/// # Examples
516///
517/// ```
518/// let map = concurrent_map::ConcurrentMap::<usize, usize>::default();
519///
520/// // insert and remove atomically returns the last value, if it was set,
521/// // similarly to a BTreeMap
522/// assert_eq!(map.insert(1, 10), None);
523/// assert_eq!(map.insert(1, 11), Some(10));
524/// assert_eq!(map.remove(&1), Some(11));
525///
526/// // get also functions similarly to BTreeMap, except it
527/// // returns a cloned version of the value rather than a
528/// // reference to it, so that no locks need to be maintained.
529/// // For this reason, it can be a good idea to use types that
530/// // are cheap to clone for values, which can be easily handled
531/// // with `Arc` etc...
532/// assert_eq!(map.insert(1, 12), None);
533/// assert_eq!(map.get(&1), Some(12));
534///
535/// // compare and swap from value 12 to value 20
536/// map.cas(1, Some(&12_usize), Some(20)).unwrap();
537///
538/// assert_eq!(map.get(&1).unwrap(), 20);
539///
540/// // there are a lot of methods that are not covered
541/// // here - check out the docs!
542/// ```
543#[derive(Clone)]
544pub struct ConcurrentMap<K, V, const FANOUT: usize = 64, const LOCAL_GC_BUFFER_SIZE: usize = 128>
545where
546    K: 'static + Clone + Minimum + Ord + Send + Sync,
547    V: 'static + Clone + Send + Sync,
548{
549    // epoch-based reclamation
550    ebr: Ebr<Deferred<K, V, FANOUT>, LOCAL_GC_BUFFER_SIZE>,
551    // the tree structure, separate from the other
552    // types so that we can mix mutable references
553    // to ebr with immutable references to other
554    // things.
555    inner: Arc<Inner<K, V, FANOUT, LOCAL_GC_BUFFER_SIZE>>,
556    // an eventually consistent, lagging count of the
557    // number of items in this structure.
558    len: Arc<AtomicUsize>,
559}
560
561impl<K, V, const FANOUT: usize, const LOCAL_GC_BUFFER_SIZE: usize> PartialEq
562    for ConcurrentMap<K, V, FANOUT, LOCAL_GC_BUFFER_SIZE>
563where
564    K: 'static + fmt::Debug + Clone + Minimum + Ord + Send + Sync + PartialEq,
565    V: 'static + fmt::Debug + Clone + Send + Sync + PartialEq,
566{
567    fn eq(&self, other: &Self) -> bool {
568        let literally_the_same = Arc::as_ptr(&self.inner) == Arc::as_ptr(&other.inner);
569        if literally_the_same {
570            return true;
571        }
572
573        let self_iter = self.iter();
574        let mut other_iter = other.iter();
575
576        for self_kv in self_iter {
577            let other_kv = other_iter.next();
578            if !Some(self_kv).eq(&other_kv) {
579                return false;
580            }
581        }
582
583        other_iter.next().is_none()
584    }
585}
586
587impl<K, V, const FANOUT: usize, const LOCAL_GC_BUFFER_SIZE: usize> fmt::Debug
588    for ConcurrentMap<K, V, FANOUT, LOCAL_GC_BUFFER_SIZE>
589where
590    K: 'static + fmt::Debug + Clone + Minimum + Ord + Send + Sync,
591    V: 'static + fmt::Debug + Clone + Send + Sync,
592{
593    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
594        f.write_str("ConcurrentMap ")?;
595        f.debug_map().entries(self.iter()).finish()
596    }
597}
598
599impl<K, V, const FANOUT: usize, const LOCAL_GC_BUFFER_SIZE: usize> Default
600    for ConcurrentMap<K, V, FANOUT, LOCAL_GC_BUFFER_SIZE>
601where
602    K: 'static + Clone + Minimum + Ord + Send + Sync,
603    V: 'static + Clone + Send + Sync,
604{
605    fn default() -> ConcurrentMap<K, V, FANOUT, LOCAL_GC_BUFFER_SIZE> {
606        assert!(FANOUT > 3, "ConcurrentMap FANOUT must be greater than 3");
607        assert!(
608            LOCAL_GC_BUFFER_SIZE > 0,
609            "LOCAL_GC_BUFFER_SIZE must be greater than 0"
610        );
611        let mut root_node = Node::<K, V, FANOUT>::new_root();
612        let root_node_lo = root_node.lo.clone();
613        let leaf_node = Node::<K, V, FANOUT>::new_leaf(root_node.lo.clone());
614        let leaf = BoxedAtomicPtr::new(leaf_node);
615        root_node.index_mut().insert(root_node_lo, leaf);
616
617        let root = BoxedAtomicPtr::new(root_node);
618
619        let inner = Arc::new(Inner {
620            root,
621            #[cfg(feature = "timing")]
622            slowest_op: u64::MIN.into(),
623            #[cfg(feature = "timing")]
624            fastest_op: u64::MAX.into(),
625        });
626
627        ConcurrentMap {
628            ebr: Ebr::default(),
629            inner,
630            len: Arc::new(0.into()),
631        }
632    }
633}
634
635struct Inner<K, V, const FANOUT: usize, const LOCAL_GC_BUFFER_SIZE: usize>
636where
637    K: 'static + Clone + Minimum + Ord + Send + Sync,
638    V: 'static + Clone + Send + Sync,
639{
640    root: BoxedAtomicPtr<K, V, FANOUT>,
641    #[cfg(feature = "timing")]
642    slowest_op: AtomicU64,
643    #[cfg(feature = "timing")]
644    fastest_op: AtomicU64,
645}
646
647impl<K, V, const FANOUT: usize, const LOCAL_GC_BUFFER_SIZE: usize> Drop
648    for Inner<K, V, FANOUT, LOCAL_GC_BUFFER_SIZE>
649where
650    K: 'static + Clone + Minimum + Ord + Send + Sync,
651    V: 'static + Clone + Send + Sync,
652{
653    fn drop(&mut self) {
654        #[cfg(feature = "timing")]
655        self.print_timing();
656
657        let ebr = Ebr::default();
658        let mut guard = ebr.pin();
659
660        let mut cursor: NodeView<K, V, FANOUT> = self.root(&mut guard);
661
662        let mut lhs_chain: Vec<BoxedAtomicPtr<K, V, FANOUT>> = vec![];
663
664        loop {
665            lhs_chain.push(cursor.id);
666            if cursor.is_leaf() {
667                break;
668            }
669            let child_ptr: BoxedAtomicPtr<K, V, FANOUT> = cursor.index().get_index(0).unwrap().1;
670
671            cursor = child_ptr.node_view(&mut guard).unwrap();
672        }
673
674        let mut layer = 0;
675        for lhs_ptr in lhs_chain {
676            layer += 1;
677
678            let mut min_fill_physical: f64 = 1.0;
679            let mut max_fill_physical: f64 = 0.0;
680            let mut fill_sum_physical: f64 = 0.0;
681
682            let mut min_fill_logical: f64 = 1.0;
683            let mut max_fill_logical: f64 = 0.0;
684            let mut fill_sum_logical: f64 = 0.0;
685            let mut nodes_counted: usize = 0;
686
687            let mut next_opt: Option<BoxedAtomicPtr<K, V, FANOUT>> = Some(lhs_ptr);
688            while let Some(next) = next_opt {
689                assert!(!next.0.is_null());
690                let sibling_cursor = next.node_view(&mut guard).unwrap();
691
692                let fill_phy = ((size_of::<K>() + size_of::<V>()) * sibling_cursor.len()) as f64
693                    / size_of::<Node<K, V, FANOUT>>() as f64;
694                min_fill_physical = min_fill_physical.min(fill_phy);
695                max_fill_physical = max_fill_physical.max(fill_phy);
696                fill_sum_physical += fill_phy;
697
698                let fill_log = sibling_cursor.len() as f64 / FANOUT as f64;
699                min_fill_logical = min_fill_logical.min(fill_log);
700                max_fill_logical = max_fill_logical.max(fill_log);
701                fill_sum_logical += fill_log;
702                nodes_counted += 1;
703
704                next_opt = sibling_cursor.next;
705                let node_box = unsafe { Box::from_raw(sibling_cursor.ptr.as_ptr()) };
706                drop(node_box);
707
708                let reclaimed_ptr: Box<AtomicPtr<Node<K, V, FANOUT>>> =
709                    unsafe { Box::from_raw(next.0 as *mut _) };
710                drop(reclaimed_ptr);
711            }
712
713            if cfg!(feature = "print_utilization_on_drop") {
714                println!("layer {layer} count {nodes_counted}");
715                println!(
716                    "logical: min: {min_fill_logical} max: {max_fill_logical} avg: {}",
717                    fill_sum_logical / nodes_counted as f64
718                );
719                println!(
720                    "physical: min: {min_fill_physical} max: {max_fill_physical} avg: {}",
721                    fill_sum_physical / nodes_counted as f64
722                );
723            }
724        }
725    }
726}
727
728impl<K, V, const FANOUT: usize, const LOCAL_GC_BUFFER_SIZE: usize>
729    ConcurrentMap<K, V, FANOUT, LOCAL_GC_BUFFER_SIZE>
730where
731    K: 'static + Clone + Minimum + Ord + Send + Sync,
732    V: 'static + Clone + Send + Sync,
733{
734    /// Creates a new empty `ConcurrentMap<K, V, ...>`.
735    ///
736    /// # Examples
737    /// ```
738    /// use concurrent_map::ConcurrentMap;
739    ///
740    /// let cm: ConcurrentMap<bool, usize> = ConcurrentMap::new();
741    /// ```
742    pub fn new() -> Self {
743        Self::default()
744    }
745
746    /// Atomically get a value out of the map that is associated with this key.
747    ///
748    /// # Examples
749    /// ```
750    /// let map = concurrent_map::ConcurrentMap::<usize, usize>::default();
751    ///
752    /// map.insert(1, 1);
753    ///
754    /// let actual = map.get(&0);
755    /// let expected = None;
756    /// assert_eq!(expected, actual);
757    ///
758    /// let actual = map.get(&1);
759    /// let expected = Some(1);
760    /// assert_eq!(expected, actual);
761    /// ```
762    pub fn get<Q>(&self, key: &Q) -> Option<V>
763    where
764        K: Borrow<Q>,
765        Q: Ord + ?Sized,
766    {
767        let mut guard = self.ebr.pin();
768
769        let leaf = self.inner.leaf_for_key(LeafSearch::Eq(key), &mut guard);
770
771        leaf.get(key)
772    }
773
774    /// Returns `true` if the `ConcurrentMap` contains the specified key.
775    ///
776    /// # Examples
777    /// ```
778    /// let map = concurrent_map::ConcurrentMap::<usize, usize>::default();
779    ///
780    /// map.insert(1, 1);
781    ///
782    /// assert!(map.contains_key(&1));
783    /// assert!(!map.contains_key(&2));
784    /// ```
785    pub fn contains_key<Q>(&self, key: &Q) -> bool
786    where
787        K: Borrow<Q>,
788        Q: Ord + ?Sized,
789    {
790        self.get(key).is_some()
791    }
792
793    /// Atomically get a key and value out of the map that is associated with the key that
794    /// is lexicographically less than the provided key.
795    ///
796    /// This will always return `None` if the key passed to `get_lt` == `K::MIN`.
797    ///
798    /// # Examples
799    /// ```
800    /// let map = concurrent_map::ConcurrentMap::<usize, usize>::default();
801    ///
802    /// map.insert(1, 1);
803    ///
804    /// let actual = map.get_lt(&0);
805    /// let expected = None;
806    /// assert_eq!(expected, actual);
807    ///
808    /// let actual = map.get_lt(&1);
809    /// let expected = None;
810    /// assert_eq!(expected, actual);
811    ///
812    /// let actual = map.get_lt(&2);
813    /// let expected = Some((1, 1));
814    /// assert_eq!(expected, actual);
815    /// ```
816    pub fn get_lt<Q>(&self, key: &Q) -> Option<(K, V)>
817    where
818        K: Borrow<Q>,
819        Q: ?Sized + Ord + PartialEq,
820    {
821        if key == K::MIN.borrow() {
822            return None;
823        }
824
825        let start = Bound::Unbounded;
826        let end = Bound::Excluded(key);
827        self.range((start, end)).next_back()
828    }
829
830    /// Atomically get a key and value out of the map that is associated with the key that
831    /// is lexicographically less than or equal to the provided key.
832    ///
833    /// # Examples
834    /// ```
835    /// let map = concurrent_map::ConcurrentMap::<usize, usize>::default();
836    ///
837    /// map.insert(1, 1);
838    ///
839    /// let actual = map.get_lte(&0);
840    /// let expected = None;
841    /// assert_eq!(expected, actual);
842    ///
843    /// let actual = map.get_lte(&1);
844    /// let expected = Some((1, 1));
845    /// assert_eq!(expected, actual);
846    ///
847    /// let actual = map.get_lte(&2);
848    /// let expected = Some((1, 1));
849    /// assert_eq!(expected, actual);
850    /// ```
851    pub fn get_lte<Q>(&self, key: &Q) -> Option<(K, V)>
852    where
853        K: Borrow<Q>,
854        Q: ?Sized + Ord + PartialEq,
855    {
856        let mut guard = self.ebr.pin();
857        let end = LeafSearch::Eq(key);
858        let current_back = self.inner.leaf_for_key(end, &mut guard);
859
860        // fast path
861        if let Some((k, v)) = current_back.leaf().get_less_than_or_equal(key) {
862            return Some((k.clone(), v.clone()));
863        }
864
865        // slow path: fall back to reverse iterator
866        let current = self
867            .inner
868            .leaf_for_key(LeafSearch::Eq(K::MIN.borrow()), &mut guard);
869
870        Iter {
871            guard,
872            inner: &self.inner,
873            range: (Bound::Unbounded, Bound::Included(key)),
874            current,
875            current_back,
876            next_index: 0,
877            next_index_from_back: 0,
878            q: std::marker::PhantomData,
879        }
880        .next_back()
881    }
882
883    /// Atomically get a key and value out of the map that is associated with the key
884    /// that is lexicographically greater than the provided key.
885    ///
886    /// # Examples
887    /// ```
888    /// let map = concurrent_map::ConcurrentMap::<usize, usize>::default();
889    ///
890    /// map.insert(1, 1);
891    ///
892    /// let actual = map.get_gt(&0);
893    /// let expected = Some((1, 1));
894    /// assert_eq!(expected, actual);
895    ///
896    /// let actual = map.get_gt(&1);
897    /// let expected = None;
898    /// assert_eq!(expected, actual);
899    ///
900    /// let actual = map.get_gt(&2);
901    /// let expected = None;
902    /// assert_eq!(expected, actual);
903    /// ```
904    pub fn get_gt<Q>(&self, key: &Q) -> Option<(K, V)>
905    where
906        K: Borrow<Q>,
907        Q: ?Sized + Ord + PartialEq,
908    {
909        self.range((Bound::Excluded(key), Bound::Unbounded)).next()
910    }
911
912    /// Atomically get a key and value out of the map that is associated with the key
913    /// that is lexicographically greater than or equal to the provided key.
914    ///
915    /// # Examples
916    /// ```
917    /// let map = concurrent_map::ConcurrentMap::<usize, usize>::default();
918    ///
919    /// map.insert(1, 1);
920    ///
921    /// let actual = map.get_gte(&0);
922    /// let expected = Some((1, 1));
923    /// assert_eq!(expected, actual);
924    ///
925    /// let actual = map.get_gte(&1);
926    /// let expected = Some((1, 1));
927    /// assert_eq!(expected, actual);
928    ///
929    /// let actual = map.get_gte(&2);
930    /// let expected = None;
931    /// assert_eq!(expected, actual);
932    /// ```
933    pub fn get_gte<Q>(&self, key: &Q) -> Option<(K, V)>
934    where
935        K: Borrow<Q>,
936        Q: ?Sized + Ord + PartialEq,
937    {
938        self.range((Bound::Included(key), Bound::Unbounded)).next()
939    }
940
941    /// Get the minimum item stored in this structure.
942    ///
943    /// # Examples
944    /// ```
945    /// let map = concurrent_map::ConcurrentMap::<usize, usize>::default();
946    ///
947    /// map.insert(1, 1);
948    /// map.insert(2, 2);
949    /// map.insert(3, 3);
950    ///
951    /// let actual = map.first();
952    /// let expected = Some((1, 1));
953    /// assert_eq!(actual, expected);
954    /// ```
955    pub fn first(&self) -> Option<(K, V)> {
956        self.iter().next()
957    }
958
959    /// Atomically remove the minimum item stored in this structure.
960    ///
961    /// # Examples
962    /// ```
963    /// let map = concurrent_map::ConcurrentMap::<usize, usize>::default();
964    ///
965    /// map.insert(1, 1);
966    /// map.insert(2, 2);
967    /// map.insert(3, 3);
968    ///
969    /// let actual = map.pop_first();
970    /// let expected = Some((1, 1));
971    /// assert_eq!(actual, expected);
972    ///
973    /// assert_eq!(map.get(&1), None);
974    /// ```
975    pub fn pop_first(&self) -> Option<(K, V)>
976    where
977        V: PartialEq,
978    {
979        loop {
980            let (k, v) = self.first()?;
981            if self.cas(k.clone(), Some(&v), None).is_ok() {
982                return Some((k, v));
983            }
984        }
985    }
986
987    /// Pops the first kv pair in the provided range, or returns `None` if nothing
988    /// exists within that range.
989    ///
990    /// # Panics
991    ///
992    /// This will panic if the provided range's end_bound() == Bound::Excluded(K::MIN).
993    ///
994    /// # Examples
995    ///
996    /// ```
997    /// use concurrent_map::ConcurrentMap;
998    ///
999    /// let data = vec![
1000    ///     ("key 1", 1),
1001    ///     ("key 2", 2),
1002    ///     ("key 3", 3)
1003    /// ];
1004    ///
1005    /// let map: ConcurrentMap<&'static str, usize> = data.iter().copied().collect();
1006    ///
1007    /// let r1 = map.pop_first_in_range("key 1"..="key 3");
1008    /// assert_eq!(Some(("key 1", 1_usize)), r1);
1009    ///
1010    /// let r2 = map.pop_first_in_range("key 1".."key 3");
1011    /// assert_eq!(Some(("key 2", 2_usize)), r2);
1012    ///
1013    /// let r3: Vec<_> = map.range("key 4"..).collect();
1014    /// assert!(r3.is_empty());
1015    ///
1016    /// let r4 = map.pop_first_in_range("key 2"..="key 3");
1017    /// assert_eq!(Some(("key 3", 3_usize)), r4);
1018    /// ```
1019    pub fn pop_first_in_range<Q, R>(&self, range: R) -> Option<(K, V)>
1020    where
1021        R: std::ops::RangeBounds<Q> + Clone,
1022        K: Borrow<Q>,
1023        V: PartialEq,
1024        Q: ?Sized + Ord + PartialEq,
1025    {
1026        loop {
1027            let mut r = self.range(range.clone());
1028            let (k, v) = r.next()?;
1029            if self.cas(k.clone(), Some(&v), None).is_ok() {
1030                return Some((k, v));
1031            }
1032        }
1033    }
1034
1035    /// Get the maximum item stored in this structure.
1036    ///
1037    /// # Examples
1038    /// ```
1039    /// let map = concurrent_map::ConcurrentMap::<usize, usize>::default();
1040    ///
1041    /// map.insert(1, 1);
1042    /// map.insert(2, 2);
1043    /// map.insert(3, 3);
1044    ///
1045    /// let actual = map.last();
1046    /// let expected = Some((3, 3));
1047    /// assert_eq!(actual, expected);
1048    /// ```
1049    pub fn last(&self) -> Option<(K, V)> {
1050        self.iter().next_back()
1051    }
1052
1053    /// Atomically remove the maximum item stored in this structure.
1054    ///
1055    /// # Examples
1056    /// ```
1057    /// let map = concurrent_map::ConcurrentMap::<usize, usize>::default();
1058    ///
1059    /// map.insert(1, 1);
1060    /// map.insert(2, 2);
1061    /// map.insert(3, 3);
1062    ///
1063    /// let actual = map.pop_last();
1064    /// let expected = Some((3, 3));
1065    /// assert_eq!(actual, expected);
1066    ///
1067    /// assert_eq!(map.get(&3), None);
1068    /// ```
1069    pub fn pop_last(&self) -> Option<(K, V)>
1070    where
1071        V: PartialEq,
1072    {
1073        loop {
1074            let (k, v) = self.last()?;
1075            if self.cas(k.clone(), Some(&v), None).is_ok() {
1076                return Some((k, v));
1077            }
1078        }
1079    }
1080
1081    /// Pops the last kv pair in the provided range, or returns `None` if nothing
1082    /// exists within that range.
1083    ///
1084    /// # Panics
1085    ///
1086    /// This will panic if the provided range's end_bound() == Bound::Excluded(K::MIN).
1087    ///
1088    /// # Examples
1089    ///
1090    /// ```
1091    /// use concurrent_map::ConcurrentMap;
1092    ///
1093    /// let data = vec![
1094    ///     ("key 1", 1),
1095    ///     ("key 2", 2),
1096    ///     ("key 3", 3)
1097    /// ];
1098    ///
1099    /// let map: ConcurrentMap<&'static str, usize> = data.iter().copied().collect();
1100    ///
1101    /// let r1 = map.pop_last_in_range("key 1"..="key 3");
1102    /// assert_eq!(Some(("key 3", 3_usize)), r1);
1103    ///
1104    /// let r2 = map.pop_last_in_range("key 1".."key 3");
1105    /// assert_eq!(Some(("key 2", 2_usize)), r2);
1106    ///
1107    /// let r3 = map.pop_last_in_range("key 4"..);
1108    /// assert!(r3.is_none());
1109    ///
1110    /// let r4 = map.pop_last_in_range("key 2"..="key 3");
1111    /// assert!(r4.is_none());
1112    ///
1113    /// let r5 = map.pop_last_in_range("key 0"..="key 3");
1114    /// assert_eq!(Some(("key 1", 1_usize)), r5);
1115    ///
1116    /// let r6 = map.pop_last_in_range("key 0"..="key 3");
1117    /// assert!(r6.is_none());
1118    /// ```
1119    pub fn pop_last_in_range<Q, R>(&self, range: R) -> Option<(K, V)>
1120    where
1121        R: std::ops::RangeBounds<Q> + Clone,
1122        K: Borrow<Q>,
1123        V: PartialEq,
1124        Q: ?Sized + Ord + PartialEq,
1125    {
1126        loop {
1127            let mut r = self.range(range.clone());
1128            let (k, v) = r.next_back()?;
1129            if self.cas(k.clone(), Some(&v), None).is_ok() {
1130                return Some((k, v));
1131            }
1132        }
1133    }
1134
1135    /// Atomically insert a key-value pair into the map, returning the previous value associated with this key if one existed.
1136    ///
1137    /// This method has an optimization that skips lock-free RCU when the internal `Arc` has a
1138    /// strong count of `1`, significantly increasing insertion throughput when used from a
1139    /// single thread.
1140    ///
1141    /// # Examples
1142    ///
1143    /// ```
1144    /// let map = concurrent_map::ConcurrentMap::<usize, usize>::default();
1145    ///
1146    /// assert_eq!(map.insert(1, 1), None);
1147    /// assert_eq!(map.insert(1, 1), Some(1));
1148    /// ```
1149    pub fn insert(&self, key: K, value: V) -> Option<V> {
1150        let strong_count = Arc::strong_count(&self.inner);
1151        let direct_mutations_safe = strong_count == 1;
1152
1153        // This optimization allows us to completely skip RCU.
1154        // We use the debug_delay here to exercise both paths
1155        // even when testing with a single thread.
1156        if direct_mutations_safe && !debug_delay() {
1157            let mut guard = self.ebr.pin();
1158            let mut leaf = self.inner.leaf_for_key(LeafSearch::Eq(&key), &mut guard);
1159            let node_mut_ref: &mut Node<K, V, FANOUT> = unsafe { leaf.get_mut() };
1160            assert!(!node_mut_ref.should_split(), "bad leaf: should split",);
1161
1162            let ret = node_mut_ref.insert(key, value);
1163
1164            if node_mut_ref.should_split() {
1165                // don't need to track this for potential cleanup due to the fact that it's
1166                // guaranteed to succeed.
1167                node_mut_ref.split();
1168            }
1169
1170            if ret.is_none() {
1171                self.len.fetch_add(1, Ordering::Relaxed);
1172            }
1173
1174            return ret;
1175        }
1176
1177        // Concurrent workloads need to do the normal RCU loop.
1178        loop {
1179            let mut guard = self.ebr.pin();
1180            let leaf = self.inner.leaf_for_key(LeafSearch::Eq(&key), &mut guard);
1181
1182            let mut leaf_clone: Box<Node<K, V, FANOUT>> = Box::new((*leaf).clone());
1183            assert!(!leaf_clone.should_split(), "bad leaf: should split",);
1184
1185            let ret = leaf_clone.insert(key.clone(), value.clone());
1186
1187            let rhs_ptr_opt = if leaf_clone.should_split() {
1188                Some(leaf_clone.split())
1189            } else {
1190                None
1191            };
1192
1193            let install_attempt = leaf.cas(leaf_clone, &mut guard);
1194
1195            if install_attempt.is_ok() {
1196                if ret.is_none() {
1197                    self.len.fetch_add(1, Ordering::Relaxed);
1198                }
1199                return ret;
1200            } else if let Some(new_ptr) = rhs_ptr_opt {
1201                // clear dangling BoxedAtomicPtr (cas freed the pointee already)
1202                let reclaimed_ptr: Box<AtomicPtr<Node<K, V, FANOUT>>> =
1203                    unsafe { Box::from_raw(new_ptr.0 as *mut _) };
1204
1205                let _dropping_reclaimed_rhs: Box<Node<K, V, FANOUT>> =
1206                    unsafe { Box::from_raw(reclaimed_ptr.load(Ordering::Acquire)) };
1207            }
1208        }
1209    }
1210
1211    /// Atomically remove the value associated with this key from the map, returning the previous value if one existed.
1212    ///
1213    /// # Examples
1214    ///
1215    /// ```
1216    /// let map = concurrent_map::ConcurrentMap::<usize, usize>::default();
1217    ///
1218    /// assert_eq!(map.remove(&1), None);
1219    /// assert_eq!(map.insert(1, 1), None);
1220    /// assert_eq!(map.remove(&1), Some(1));
1221    /// ```
1222    pub fn remove<Q>(&self, key: &Q) -> Option<V>
1223    where
1224        K: Borrow<Q>,
1225        Q: Ord + ?Sized,
1226    {
1227        loop {
1228            let mut guard = self.ebr.pin();
1229            let leaf = self.inner.leaf_for_key(LeafSearch::Eq(key), &mut guard);
1230            let mut leaf_clone: Box<Node<K, V, FANOUT>> = Box::new((*leaf).clone());
1231            let ret = leaf_clone.remove(key);
1232            let install_attempt = leaf.cas(leaf_clone, &mut guard);
1233            if install_attempt.is_ok() {
1234                if ret.is_some() {
1235                    self.len.fetch_sub(1, Ordering::Relaxed);
1236                }
1237                return ret;
1238            }
1239        }
1240    }
1241
1242    /// Atomically compare and swap the value associated with this key from the old value to the
1243    /// new one. An old value of `None` means "only create this value if it does not already
1244    /// exist". A new value of `None` means "delete this value, if it matches the provided old value".
1245    /// If successful, returns the old value if it existed. If unsuccessful, returns both the proposed
1246    /// new value that failed to be installed as well as the current actual value in a [`CasFailure`]
1247    /// struct.
1248    ///
1249    /// # Examples
1250    ///
1251    /// ```
1252    /// let map = concurrent_map::ConcurrentMap::<usize, usize>::default();
1253    ///
1254    /// // key 1 does not yet exist
1255    /// assert_eq!(map.get(&1), None);
1256    ///
1257    /// // uniquely create value 10
1258    /// map.cas(1, None, Some(10)).unwrap();
1259    ///
1260    /// assert_eq!(map.get(&1).unwrap(), 10);
1261    ///
1262    /// // compare and swap from value 10 to value 20
1263    /// map.cas(1, Some(&10_usize), Some(20)).unwrap();
1264    ///
1265    /// assert_eq!(map.get(&1).unwrap(), 20);
1266    ///
1267    /// // if we guess the wrong current value, a CasFailure is returned
1268    /// // which will tell us what the actual current value is (which we
1269    /// // failed to provide) and it will give us back our proposed new
1270    /// // value.
1271    /// let cas_result = map.cas(1, Some(&999999_usize), Some(30));
1272    ///
1273    /// let expected_cas_failure = Err(concurrent_map::CasFailure {
1274    ///     actual: Some(20),
1275    ///     returned_new_value: Some(30),
1276    /// });
1277    ///
1278    /// assert_eq!(cas_result, expected_cas_failure);
1279    ///
1280    /// // conditionally delete
1281    /// map.cas(1, Some(&20_usize), None).unwrap();
1282    ///
1283    /// assert_eq!(map.get(&1), None);
1284    /// ```
1285    pub fn cas<VRef>(
1286        &self,
1287        key: K,
1288        old: Option<&VRef>,
1289        new: Option<V>,
1290    ) -> Result<Option<V>, CasFailure<V>>
1291    where
1292        V: Borrow<VRef>,
1293        VRef: PartialEq + ?Sized,
1294    {
1295        loop {
1296            let mut guard = self.ebr.pin();
1297            let leaf = self.inner.leaf_for_key(LeafSearch::Eq(&key), &mut guard);
1298            let mut leaf_clone: Box<Node<K, V, FANOUT>> = Box::new((*leaf).clone());
1299            let ret = leaf_clone.cas(key.clone(), old, new.clone());
1300
1301            let rhs_ptr_opt = if leaf_clone.should_split() {
1302                Some(leaf_clone.split())
1303            } else {
1304                None
1305            };
1306
1307            let install_attempt = leaf.cas(leaf_clone, &mut guard);
1308
1309            if install_attempt.is_ok() {
1310                if matches!(ret, Ok(Some(_))) && new.is_none() {
1311                    self.len.fetch_sub(1, Ordering::Relaxed);
1312                } else if matches!(ret, Ok(None)) && new.is_some() {
1313                    self.len.fetch_add(1, Ordering::Relaxed);
1314                }
1315                return ret;
1316            } else if let Some(new_ptr) = rhs_ptr_opt {
1317                // clear dangling BoxedAtomicPtr (cas freed pointee already)
1318                let reclaimed_ptr: Box<AtomicPtr<Node<K, V, FANOUT>>> =
1319                    unsafe { Box::from_raw(new_ptr.0 as *mut _) };
1320
1321                let _dropping_reclaimed_rhs: Box<Node<K, V, FANOUT>> =
1322                    unsafe { Box::from_raw(reclaimed_ptr.load(Ordering::Acquire)) };
1323            }
1324        }
1325    }
1326
1327    /// A **lagging**, eventually-consistent length count. This is NOT atomically
1328    /// updated with [`insert`] / [`remove`] / [`cas`], but is updated after those
1329    /// operations complete their atomic modifications to the shared map.
1330    pub fn len(&self) -> usize {
1331        self.len.load(Ordering::Relaxed)
1332    }
1333
1334    /// A **lagging**, eventually-consistent check for emptiness, based on the correspondingly
1335    /// non-atomic `len` method.
1336    pub fn is_empty(&self) -> bool {
1337        self.len() == 0
1338    }
1339
1340    /// Iterate over the map.
1341    ///
1342    /// This is not an atomic snapshot, and it caches B+tree leaf
1343    /// nodes as it iterates through them to achieve high throughput.
1344    /// As a result, the following behaviors are possible:
1345    ///
1346    /// * may (or may not!) return values that were concurrently added to the map after the
1347    ///   iterator was created
1348    /// * may (or may not!) return items that were concurrently deleted from the map after
1349    ///   the iterator was created
1350    /// * If a key's value is changed from one value to another one after this iterator
1351    ///   is created, this iterator might return the old or the new value.
1352    ///
1353    /// But, you can be certain that any key that existed prior to the creation of this
1354    /// iterator, and was not changed during iteration, will be observed as expected.
1355    ///
1356    /// # Examples
1357    ///
1358    /// ```
1359    /// use concurrent_map::ConcurrentMap;
1360    ///
1361    /// let data = vec![
1362    ///     ("key 1", 1),
1363    ///     ("key 2", 2),
1364    ///     ("key 3", 3)
1365    /// ];
1366    ///
1367    /// let map: ConcurrentMap<&'static str, usize> = data.iter().copied().collect();
1368    ///
1369    /// let r: Vec<_> = map.iter().collect();
1370    ///
1371    /// assert_eq!(&data, &r);
1372    /// ```
1373    pub fn iter(&self) -> Iter<'_, K, V, FANOUT, LOCAL_GC_BUFFER_SIZE> {
1374        let mut guard = self.ebr.pin();
1375
1376        let current = self.inner.leaf_for_key(LeafSearch::Eq(&K::MIN), &mut guard);
1377        let current_back = self.inner.leaf_for_key(LeafSearch::Max, &mut guard);
1378        let next_index_from_back = 0;
1379
1380        Iter {
1381            guard,
1382            inner: &self.inner,
1383            current,
1384            range: std::ops::RangeFull,
1385            next_index: 0,
1386            current_back,
1387            next_index_from_back,
1388            q: std::marker::PhantomData,
1389        }
1390    }
1391
1392    /// Iterate over a range of the map.
1393    ///
1394    /// This is not an atomic snapshot, and it caches B+tree leaf
1395    /// nodes as it iterates through them to achieve high throughput.
1396    /// As a result, the following behaviors are possible:
1397    ///
1398    /// * may (or may not!) return values that were concurrently added to the map after the
1399    ///   iterator was created
1400    /// * may (or may not!) return items that were concurrently deleted from the map after
1401    ///   the iterator was created
1402    /// * If a key's value is changed from one value to another one after this iterator
1403    ///   is created, this iterator might return the old or the new value.
1404    ///
1405    /// But, you can be certain that any key that existed prior to the creation of this
1406    /// iterator, and was not changed during iteration, will be observed as expected.
1407    ///
1408    /// # Panics
1409    ///
1410    /// This will panic if the provided range's end_bound() == Bound::Excluded(K::MIN).
1411    ///
1412    /// # Examples
1413    ///
1414    /// ```
1415    /// use concurrent_map::ConcurrentMap;
1416    ///
1417    /// let data = vec![
1418    ///     ("key 1", 1),
1419    ///     ("key 2", 2),
1420    ///     ("key 3", 3)
1421    /// ];
1422    ///
1423    /// let map: ConcurrentMap<&'static str, usize> = data.iter().copied().collect();
1424    ///
1425    /// let r1: Vec<_> = map.range("key 1"..="key 3").collect();
1426    /// assert_eq!(&data, &r1);
1427    ///
1428    /// let r2: Vec<_> = map.range("key 1".."key 3").collect();
1429    /// assert_eq!(&data[..2], &r2);
1430    ///
1431    /// let r3: Vec<_> = map.range("key 2"..="key 3").collect();
1432    /// assert_eq!(&data[1..], &r3);
1433    ///
1434    /// let r4: Vec<_> = map.range("key 4"..).collect();
1435    /// assert!(r4.is_empty());
1436    /// ```
1437    pub fn range<Q, R>(&self, range: R) -> Iter<'_, K, V, FANOUT, LOCAL_GC_BUFFER_SIZE, R, Q>
1438    where
1439        R: std::ops::RangeBounds<Q>,
1440        K: Borrow<Q>,
1441        Q: ?Sized + Ord + PartialEq,
1442    {
1443        let mut guard = self.ebr.pin();
1444
1445        let kmin = &K::MIN;
1446        let min = kmin.borrow();
1447        let start = match range.start_bound() {
1448            Bound::Unbounded => min,
1449            Bound::Included(k) | Bound::Excluded(k) => k,
1450        };
1451
1452        let end = match range.end_bound() {
1453            Bound::Unbounded => LeafSearch::Max,
1454            Bound::Included(k) => LeafSearch::Eq(k),
1455            Bound::Excluded(k) => {
1456                assert!(k != K::MIN.borrow());
1457                LeafSearch::Lt(k)
1458            }
1459        };
1460
1461        let current = self.inner.leaf_for_key(LeafSearch::Eq(start), &mut guard);
1462        let current_back = self.inner.leaf_for_key(end, &mut guard);
1463
1464        Iter {
1465            guard,
1466            inner: &self.inner,
1467            range,
1468            current,
1469            current_back,
1470            next_index: 0,
1471            next_index_from_back: 0,
1472            q: std::marker::PhantomData,
1473        }
1474    }
1475
1476    /// Fetch the value, apply a function to it and return the result.
1477    /// Similar to [`ConcurrentMap::cas`], returning a `None` from the provided
1478    /// closure will cause a deletion of the value.
1479    ///
1480    /// # Note
1481    ///
1482    /// This may call the function multiple times if the value has been
1483    /// changed from other threads in the meantime.
1484    /// This function essentially implements the common CAS loop pattern
1485    /// for atomically pushing a function to some shared data.
1486    ///
1487    /// # Examples
1488    ///
1489    /// ```
1490    /// let map = concurrent_map::ConcurrentMap::<&'static str, usize>::default();
1491    ///
1492    /// fn increment(old_opt: Option<&usize>) -> Option<usize> {
1493    ///     let incremented = match old_opt {
1494    ///         Some(old) => {
1495    ///             old + 1
1496    ///         }
1497    ///         None => 0,
1498    ///     };
1499    ///
1500    ///     // returning `None` here means "delete this value"
1501    ///     Some(incremented)
1502    /// }
1503    ///
1504    /// assert_eq!(map.update_and_fetch("counter", increment), Some(0));
1505    /// assert_eq!(map.update_and_fetch("counter", increment), Some(1));
1506    /// assert_eq!(map.update_and_fetch("counter", increment), Some(2));
1507    /// assert_eq!(map.update_and_fetch("counter", increment), Some(3));
1508    ///
1509    /// // push a "deletion" that returns None
1510    /// assert_eq!(map.update_and_fetch("counter", |_| None), None);
1511    /// ```
1512    pub fn update_and_fetch<F>(&self, key: K, mut f: F) -> Option<V>
1513    where
1514        F: FnMut(Option<&V>) -> Option<V>,
1515        V: PartialEq,
1516    {
1517        let mut current_opt = self.get(&key);
1518
1519        loop {
1520            let next = f(current_opt.as_ref());
1521            match self.cas(key.clone(), current_opt.as_ref(), next.clone()) {
1522                Ok(_) => return next,
1523                Err(CasFailure { actual: cur, .. }) => {
1524                    current_opt = cur;
1525                }
1526            }
1527        }
1528    }
1529
1530    /// Fetch the value, apply a function to it and return the previous value.
1531    /// Similar to [`ConcurrentMap::cas`], returning a `None` from the provided
1532    /// closure will cause a deletion of the value.
1533    ///
1534    /// # Note
1535    ///
1536    /// This may call the function multiple times if the value has been
1537    /// changed from other threads in the meantime.
1538    /// This function essentially implements the common CAS loop pattern
1539    /// for atomically pushing a function to some shared data.
1540    ///
1541    /// # Examples
1542    ///
1543    /// ```
1544    /// let map = concurrent_map::ConcurrentMap::<&'static str, usize>::default();
1545    ///
1546    /// fn increment(old_opt: Option<&usize>) -> Option<usize> {
1547    ///     let incremented = match old_opt {
1548    ///         Some(old) => {
1549    ///             old + 1
1550    ///         }
1551    ///         None => 0,
1552    ///     };
1553    ///
1554    ///     // returning `None` here means "delete this value"
1555    ///     Some(incremented)
1556    /// }
1557    ///
1558    /// assert_eq!(map.fetch_and_update("counter", increment), None);
1559    /// assert_eq!(map.fetch_and_update("counter", increment), Some(0));
1560    /// assert_eq!(map.fetch_and_update("counter", increment), Some(1));
1561    /// assert_eq!(map.fetch_and_update("counter", increment), Some(2));
1562    ///
1563    /// // push a "deletion" that returns the previous value, essentially
1564    /// // mimicking the ConcurrentMap::remove method.
1565    /// assert_eq!(map.fetch_and_update("counter", |_| None), Some(3));
1566    ///
1567    /// // verify that it's not present
1568    /// assert_eq!(map.get("counter"), None);
1569    /// ```
1570    pub fn fetch_and_update<F>(&self, key: K, mut f: F) -> Option<V>
1571    where
1572        F: FnMut(Option<&V>) -> Option<V>,
1573        V: PartialEq,
1574    {
1575        let mut current_opt = self.get(&key);
1576
1577        loop {
1578            let next = f(current_opt.as_ref());
1579            match self.cas(key.clone(), current_opt.as_ref(), next) {
1580                Ok(_) => return current_opt,
1581                Err(CasFailure { actual: cur, .. }) => {
1582                    current_opt = cur;
1583                }
1584            }
1585        }
1586    }
1587}
1588
1589// This impl block is for `fetch_max` and `fetch_min` operations on
1590// values that implement `Ord`.
1591impl<K, V, const FANOUT: usize, const LOCAL_GC_BUFFER_SIZE: usize>
1592    ConcurrentMap<K, V, FANOUT, LOCAL_GC_BUFFER_SIZE>
1593where
1594    K: 'static + Clone + Minimum + Ord + Send + Sync,
1595    V: 'static + Clone + Send + Sync + Ord,
1596{
1597    /// Similar to [`std::sync::atomic::AtomicU64::fetch_min`] in spirit, this
1598    /// atomically sets the value to the minimum of the
1599    /// previous value and the provided value.
1600    ///
1601    /// The previous value is returned. None is returned if
1602    /// there was no previous value, in which case the
1603    /// value is set to the provided value. The value is
1604    /// unchanged if the current value is already lower
1605    /// than the provided value.
1606    ///
1607    /// # Examples
1608    ///
1609    /// ```
1610    /// let map = concurrent_map::ConcurrentMap::<&'static str, usize>::default();
1611    ///
1612    /// // acts as an insertion if no value is present
1613    /// assert_eq!(map.fetch_min("key 1", 5), None);
1614    ///
1615    /// // sets the value to the new lower value, returns the old value
1616    /// assert_eq!(map.fetch_min("key 1", 2), Some(5));
1617    ///
1618    /// // fails to set the value to a lower number, returns the
1619    /// // current value.
1620    /// assert_eq!(map.fetch_min("key 1", 10), Some(2));
1621    ///
1622    /// ```
1623    pub fn fetch_min(&self, key: K, value: V) -> Option<V> {
1624        let f = move |prev_opt: Option<&V>| {
1625            if let Some(prev) = prev_opt {
1626                Some(prev.min(&value).clone())
1627            } else {
1628                Some(value.clone())
1629            }
1630        };
1631
1632        self.fetch_and_update(key, f)
1633    }
1634
1635    /// Similar to [`std::sync::atomic::AtomicU64::fetch_max`] in spirit, this
1636    /// atomically sets the value to the maximum of the
1637    /// previous value and the provided value.
1638    ///
1639    /// The previous value is returned. None is returned if
1640    /// there was no previous value, in which case the
1641    /// value is set to the provided value. The value is
1642    /// unchanged if the current value is already higher
1643    /// than the provided value.
1644    ///
1645    /// # Examples
1646    ///
1647    /// ```
1648    /// let map = concurrent_map::ConcurrentMap::<&'static str, usize>::default();
1649    ///
1650    /// // acts as an insertion if no value is present
1651    /// assert_eq!(map.fetch_max("key 1", 5), None);
1652    ///
1653    /// // sets the value to the new higher value, returns the old value
1654    /// assert_eq!(map.fetch_max("key 1", 10), Some(5));
1655    ///
1656    /// // fails to set the value to a higher number, returns the
1657    /// // current value.
1658    /// assert_eq!(map.fetch_max("key 1", 2), Some(10));
1659    ///
1660    /// ```
1661    pub fn fetch_max(&self, key: K, value: V) -> Option<V> {
1662        let f = move |prev_opt: Option<&V>| {
1663            if let Some(prev) = prev_opt {
1664                Some(prev.max(&value).clone())
1665            } else {
1666                Some(value.clone())
1667            }
1668        };
1669
1670        self.fetch_and_update(key, f)
1671    }
1672}
1673
1674/// An iterator over a [`ConcurrentMap`]. Note that this is
1675/// not an atomic snapshot of the overall shared state, but
1676/// it will contain any data that existed before the iterator
1677/// was created.
1678///
1679/// Note that this iterator contains an epoch-based reclamation
1680/// guard, and the overall concurrent structure will be unable
1681/// to free any memory until this iterator drops again.
1682///
1683/// There are a lot of generics on this struct. Most of them directly
1684/// correspond to the generics of the [`ConcurrentMap`] itself. But
1685/// there are two that don't:
1686///
1687/// * `R` is The type of the range that is stored in the iterator
1688/// * `Q` is the type that exists INSIDE of `R`
1689///
1690/// So, if an `Iter` is created from:
1691///
1692/// ```
1693/// let map = concurrent_map::ConcurrentMap::<usize, usize>::default();
1694/// let start = std::ops::Bound::Excluded(0_usize);
1695/// let end = std::ops::Bound::Included(5_usize);
1696/// let iter = map.range((start, end));
1697/// ```
1698///
1699/// then the type of `R` is `(std::ops::Bound, std::ops::Bound)`
1700/// (a 2-tuple of `std::ops::Bound`), and the type of `Q` is `usize`.
1701pub struct Iter<
1702    'a,
1703    K,
1704    V,
1705    const FANOUT: usize,
1706    const LOCAL_GC_BUFFER_SIZE: usize,
1707    R = std::ops::RangeFull,
1708    Q = K,
1709> where
1710    K: 'static + Clone + Minimum + Ord + Send + Sync,
1711    V: 'static + Clone + Send + Sync,
1712    R: std::ops::RangeBounds<Q>,
1713    K: Borrow<Q>,
1714    Q: ?Sized,
1715{
1716    inner: &'a Inner<K, V, FANOUT, LOCAL_GC_BUFFER_SIZE>,
1717    guard: Guard<'a, Deferred<K, V, FANOUT>, LOCAL_GC_BUFFER_SIZE>,
1718    range: R,
1719    current: NodeView<K, V, FANOUT>,
1720    next_index: usize,
1721    current_back: NodeView<K, V, FANOUT>,
1722    next_index_from_back: usize,
1723    q: std::marker::PhantomData<&'a Q>,
1724}
1725
1726impl<'a, K, V, const FANOUT: usize, const LOCAL_GC_BUFFER_SIZE: usize, R, Q> Iterator
1727    for Iter<'a, K, V, FANOUT, LOCAL_GC_BUFFER_SIZE, R, Q>
1728where
1729    K: 'static + Clone + Minimum + Ord + Send + Sync,
1730    V: 'static + Clone + Send + Sync,
1731    R: std::ops::RangeBounds<Q>,
1732    K: Borrow<Q>,
1733    Q: ?Sized + PartialEq + Ord,
1734{
1735    type Item = (K, V);
1736
1737    fn next(&mut self) -> Option<Self::Item> {
1738        loop {
1739            if let Some((k, v)) = self.current.leaf().get_index(self.next_index) {
1740                // iterate over current cached b+ tree leaf node
1741                self.next_index += 1;
1742                if !self.range.contains(k.borrow()) {
1743                    // we might hit this on the first iteration
1744                    continue;
1745                }
1746                return Some((k.clone(), v.clone()));
1747            } else if let Some(next_ptr) = self.current.next {
1748                if !self
1749                    .range
1750                    .contains(self.current.hi.as_ref().unwrap().borrow())
1751                {
1752                    // we have reached the end of our range
1753                    return None;
1754                }
1755                if let Some(next_current) = next_ptr.node_view(&mut self.guard) {
1756                    // we were able to take the fast path by following the sibling pointer
1757
1758                    // it's possible that nodes were merged etc... so we need to make sure
1759                    // that we make forward progress
1760                    self.next_index = next_current
1761                        .leaf()
1762                        .iter()
1763                        .position(|(k, _v)| k >= self.current.hi.as_ref().unwrap())
1764                        .unwrap_or(0);
1765
1766                    self.current = next_current;
1767                } else if let Some(ref hi) = self.current.hi {
1768                    // we have to take the slow path by traversing the
1769                    // map due to a concurrent merge that deleted the
1770                    // right sibling. we are protected from a use after
1771                    // free of the ID itself due to holding an ebr Guard
1772                    // on the Iter struct, holding a barrier against re-use.
1773                    let next_current = self
1774                        .inner
1775                        .leaf_for_key(LeafSearch::Eq(hi.borrow()), &mut self.guard);
1776
1777                    // it's possible that nodes were merged etc... so we need to make sure
1778                    // that we make forward progress
1779                    self.next_index = next_current
1780                        .leaf()
1781                        .iter()
1782                        .position(|(k, _v)| k >= hi)
1783                        .unwrap_or(0);
1784                    self.current = next_current;
1785                } else {
1786                    panic!("somehow hit a node that has a next but not a hi key");
1787                }
1788            } else {
1789                // end of the collection
1790                return None;
1791            }
1792        }
1793    }
1794}
1795
1796impl<'a, K, V, const FANOUT: usize, const LOCAL_GC_BUFFER_SIZE: usize, R, Q> DoubleEndedIterator
1797    for Iter<'a, K, V, FANOUT, LOCAL_GC_BUFFER_SIZE, R, Q>
1798where
1799    K: 'static + Clone + Minimum + Ord + Send + Sync,
1800    V: 'static + Clone + Send + Sync,
1801    R: std::ops::RangeBounds<Q>,
1802    K: Borrow<Q>,
1803    Q: ?Sized + PartialEq + Ord,
1804{
1805    fn next_back(&mut self) -> Option<Self::Item> {
1806        loop {
1807            if self.next_index_from_back >= self.current_back.leaf().len() {
1808                if !self.range.contains(self.current_back.lo.borrow())
1809                    || self.current_back.lo == K::MIN
1810                {
1811                    // finished
1812                    return None;
1813                }
1814
1815                let next_current_back = self.inner.leaf_for_key(
1816                    LeafSearch::Lt(self.current_back.lo.borrow()),
1817                    &mut self.guard,
1818                );
1819                assert!(next_current_back.lo != self.current_back.lo);
1820
1821                self.next_index_from_back = next_current_back
1822                    .leaf()
1823                    .iter()
1824                    .rev()
1825                    .position(|(k, _v)| k < &self.current_back.lo)
1826                    .unwrap_or(0);
1827
1828                self.current_back = next_current_back;
1829
1830                if self.current_back.leaf().is_empty() {
1831                    continue;
1832                }
1833            }
1834
1835            let offset_to_return = self.current_back.leaf().len() - (1 + self.next_index_from_back);
1836            let (k, v) = self
1837                .current_back
1838                .leaf()
1839                .get_index(offset_to_return)
1840                .unwrap();
1841
1842            self.next_index_from_back += 1;
1843            if !self.range.contains(k.borrow()) {
1844                continue;
1845            } else {
1846                return Some((k.clone(), v.clone()));
1847            }
1848        }
1849    }
1850}
1851
1852enum LeafSearch<K> {
1853    // For finding a leaf that would contain this key, if present.
1854    Eq(K),
1855    // For finding the direct left sibling of a node during reverse
1856    // iteration. The actual semantic is to find a leaf that has a lo key
1857    // that is less than K and a hi key that is >= K
1858    Lt(K),
1859    Max,
1860}
1861
1862impl<K, V, const FANOUT: usize, const LOCAL_GC_BUFFER_SIZE: usize>
1863    Inner<K, V, FANOUT, LOCAL_GC_BUFFER_SIZE>
1864where
1865    K: 'static + Clone + Minimum + Ord + Send + Sync,
1866    V: 'static + Clone + Send + Sync,
1867{
1868    fn root(
1869        &self,
1870        _guard: &mut Guard<'_, Deferred<K, V, FANOUT>, LOCAL_GC_BUFFER_SIZE>,
1871    ) -> NodeView<K, V, FANOUT> {
1872        loop {
1873            if let Some(ptr) = NonNull::new(self.root.load(Ordering::Acquire)) {
1874                return NodeView { ptr, id: self.root };
1875            }
1876        }
1877    }
1878
1879    // lock-free merging:
1880    // 1. try to mark the parent's merging_child
1881    //  a. must not be the left-most child
1882    //  b. if unsuccessful, give up
1883    // 2. mark the child as merging
1884    // 3. find the left sibling
1885    // 4. cas the left sibling to eat the right sibling
1886    //  a. loop until successful
1887    //  b. go right if the left-most child split and no-longer points to merging child
1888    //  c. split the new larger left sibling if it is at the split threshold
1889    // 5. cas the parent to remove the merged child
1890    // 6. remove the child's pointer in the page table
1891    // 7. defer the reclamation of the BoxedAtomicPtr
1892    // 8. defer putting the child's ID into the free stack
1893    //
1894    // merge threshold must be >= 1, because otherwise index nodes with 1 empty
1895    // child will never be compactible.
1896
1897    fn install_parent_merge<'a>(
1898        &'a self,
1899        parent: &NodeView<K, V, FANOUT>,
1900        child: &NodeView<K, V, FANOUT>,
1901        guard: &mut Guard<'a, Deferred<K, V, FANOUT>, LOCAL_GC_BUFFER_SIZE>,
1902    ) -> Result<NodeView<K, V, FANOUT>, ()> {
1903        // 1. try to mark the parent's merging_child
1904        //  a. must not be the left-most child
1905        //  b. if unsuccessful, give up
1906        let is_leftmost_child = parent.index().get_index(0).unwrap().0 == child.lo;
1907
1908        if is_leftmost_child {
1909            return Err(());
1910        }
1911
1912        if !parent.index().contains_key(&child.lo) {
1913            // can't install a parent merge if the child is unknown to the parent.
1914            return Err(());
1915        }
1916
1917        if parent.merging_child.is_some() {
1918            // there is already a merge in-progress for a different child.
1919            return Err(());
1920        }
1921
1922        let mut parent_clone: Box<Node<K, V, FANOUT>> = Box::new((*parent).clone());
1923        parent_clone.merging_child = Some(child.id);
1924        parent.cas(parent_clone, guard).map_err(|_| ())
1925    }
1926
1927    fn merge_child<'a>(
1928        &'a self,
1929        parent: &mut NodeView<K, V, FANOUT>,
1930        child: &mut NodeView<K, V, FANOUT>,
1931        guard: &mut Guard<'a, Deferred<K, V, FANOUT>, LOCAL_GC_BUFFER_SIZE>,
1932    ) {
1933        // 2. mark child as merging
1934        while !child.is_merging {
1935            let mut child_clone: Box<Node<K, V, FANOUT>> = Box::new((*child).clone());
1936            child_clone.is_merging = true;
1937            *child = match child.cas(child_clone, guard) {
1938                Ok(new_child) | Err(Some(new_child)) => new_child,
1939                Err(None) => {
1940                    // child already removed
1941                    return;
1942                }
1943            };
1944        }
1945
1946        // 3. find the left sibling
1947        let first_left_sibling_guess = parent
1948            .index()
1949            .iter()
1950            .filter(|(k, _v)| (..&child.lo).contains(&k))
1951            .next_back()
1952            .unwrap()
1953            .1;
1954
1955        let mut left_sibling = if let Some(view) = first_left_sibling_guess.node_view(guard) {
1956            view
1957        } else {
1958            // the merge already completed and this left sibling has already also been merged
1959            return;
1960        };
1961
1962        loop {
1963            if left_sibling.next.is_none() {
1964                // the merge completed and the left sibling became the infinity node in the mean
1965                // time
1966                return;
1967            }
1968
1969            if child.hi.is_some() && left_sibling.hi.is_some() && left_sibling.hi >= child.hi {
1970                // step 4 happened concurrently
1971                break;
1972            }
1973
1974            let next = left_sibling.next.unwrap();
1975            if next != child.id {
1976                left_sibling = if let Some(view) = next.node_view(guard) {
1977                    view
1978                } else {
1979                    // the merge already completed and this left sibling has already also been merged
1980                    return;
1981                };
1982                continue;
1983            }
1984
1985            // 4. cas the left sibling to eat the right sibling
1986            //  a. loop until successful
1987            //  b. go right if the left-most child split and no-longer points to merging child
1988            //  c. split the new larger left sibling if it is at the split threshold
1989            let mut left_sibling_clone: Box<Node<K, V, FANOUT>> = Box::new((*left_sibling).clone());
1990            left_sibling_clone.merge(child);
1991
1992            let rhs_ptr_opt = if left_sibling_clone.should_split() {
1993                // we have to try to split the sibling, funny enough.
1994                // this is the consequence of using fixed-size arrays
1995                // for storing items with no flexibility.
1996
1997                Some(left_sibling_clone.split())
1998            } else {
1999                None
2000            };
2001
2002            let cas_result = left_sibling.cas(left_sibling_clone, guard);
2003            if let (Err(_), Some(rhs_ptr)) = (&cas_result, rhs_ptr_opt) {
2004                // We need to free the split right sibling that we installed
2005                let reclaimed_ptr: Box<AtomicPtr<Node<K, V, FANOUT>>> =
2006                    unsafe { Box::from_raw(rhs_ptr.0 as *mut _) };
2007
2008                let _dropping_reclaimed_rhs: Box<Node<K, V, FANOUT>> =
2009                    unsafe { Box::from_raw(reclaimed_ptr.load(Ordering::Acquire)) };
2010            }
2011
2012            match cas_result {
2013                Ok(_) => {
2014                    break;
2015                }
2016                Err(Some(actual)) => left_sibling = actual,
2017                Err(None) => {
2018                    return;
2019                }
2020            }
2021        }
2022
2023        // 5. cas the parent to remove the merged child
2024        while parent.merging_child == Some(child.id) {
2025            let mut parent_clone: Box<Node<K, V, FANOUT>> = Box::new((*parent).clone());
2026
2027            assert!(parent_clone.merging_child.is_some());
2028            assert!(parent_clone.index().contains_key(&child.lo));
2029
2030            parent_clone.merging_child = None;
2031            parent_clone.index_mut().remove(&child.lo).unwrap();
2032
2033            let cas_result = parent.cas(parent_clone, guard);
2034            match cas_result {
2035                Ok(new_parent) | Err(Some(new_parent)) => *parent = new_parent,
2036                Err(None) => {
2037                    return;
2038                }
2039            }
2040        }
2041
2042        // 6. remove the child's pointer in the page table
2043        if child
2044            .id
2045            .compare_exchange(
2046                child.ptr.as_ptr(),
2047                std::ptr::null_mut(),
2048                Ordering::AcqRel,
2049                Ordering::Acquire,
2050            )
2051            .is_err()
2052        {
2053            // only the thread that uninstalls this pointer gets to
2054            // mark resources for reuse.
2055            return;
2056        }
2057
2058        // 7. defer the reclamation of the BoxedAtomicPtr
2059        guard.defer_drop(Deferred::BoxedAtomicPtr(child.id));
2060
2061        // 8. defer the reclamation of the child node
2062        let replaced: Box<Node<K, V, FANOUT>> = unsafe { Box::from_raw(child.ptr.as_ptr()) };
2063        guard.defer_drop(Deferred::Node(replaced));
2064    }
2065
2066    #[cfg(feature = "timing")]
2067    fn print_timing(&self) {
2068        println!(
2069            "min : {:?}",
2070            Duration::from_nanos(self.fastest_op.load(Ordering::Acquire))
2071        );
2072        println!(
2073            "max : {:?}",
2074            Duration::from_nanos(self.slowest_op.load(Ordering::Acquire))
2075        );
2076    }
2077
2078    #[cfg(feature = "timing")]
2079    fn record_timing(&self, time: Duration) {
2080        let nanos = time.as_nanos() as u64;
2081        let min = self.fastest_op.load(Ordering::Relaxed);
2082        if nanos < min {
2083            self.fastest_op.fetch_min(nanos, Ordering::Relaxed);
2084        }
2085
2086        let max = self.slowest_op.load(Ordering::Relaxed);
2087        if nanos > max {
2088            self.slowest_op.fetch_max(nanos, Ordering::Relaxed);
2089        }
2090    }
2091
2092    fn leaf_for_key<'a, Q>(
2093        &'a self,
2094        search: LeafSearch<&Q>,
2095        guard: &mut Guard<'a, Deferred<K, V, FANOUT>, LOCAL_GC_BUFFER_SIZE>,
2096    ) -> NodeView<K, V, FANOUT>
2097    where
2098        K: Borrow<Q>,
2099        Q: Ord + ?Sized,
2100    {
2101        let mut parent_cursor_opt: Option<NodeView<K, V, FANOUT>> = None;
2102        let mut cursor = self.root(guard);
2103        let mut root_cursor = NodeView {
2104            ptr: cursor.ptr,
2105            id: cursor.id,
2106        };
2107
2108        macro_rules! reset {
2109            ($reason:expr) => {
2110                // println!("resetting because of {:?}", $reason);
2111                parent_cursor_opt = None;
2112                cursor = self.root(guard);
2113                root_cursor = NodeView {
2114                    ptr: cursor.ptr,
2115                    id: cursor.id,
2116                };
2117                continue;
2118            };
2119        }
2120
2121        #[cfg(feature = "timing")]
2122        let before = Instant::now();
2123
2124        loop {
2125            if let Some(merging_child_ptr) = cursor.merging_child {
2126                let mut child = if let Some(view) = merging_child_ptr.node_view(guard) {
2127                    view
2128                } else {
2129                    reset!("merging child of marked parent already freed");
2130                };
2131                self.merge_child(&mut cursor, &mut child, guard);
2132                reset!("cooperatively performed merge_child after detecting parent");
2133            }
2134
2135            if cursor.is_merging {
2136                reset!("resetting after detected child merging without corresponding parent child_merge");
2137            }
2138
2139            if cursor.should_merge() {
2140                if let Some(ref mut parent_cursor) = parent_cursor_opt {
2141                    let is_leftmost_child =
2142                        parent_cursor.index().get_index(0).unwrap().0 == cursor.lo;
2143
2144                    if !is_leftmost_child {
2145                        if let Ok(new_parent) =
2146                            self.install_parent_merge(parent_cursor, &cursor, guard)
2147                        {
2148                            *parent_cursor = new_parent;
2149                        } else {
2150                            reset!("failed to install parent merge");
2151                        }
2152
2153                        self.merge_child(parent_cursor, &mut cursor, guard);
2154                        reset!("completed merge_child");
2155                    }
2156                } else {
2157                    assert!(!cursor.is_leaf());
2158                }
2159            }
2160
2161            match search {
2162                LeafSearch::Eq(k) | LeafSearch::Lt(k) => assert!(k >= cursor.lo.borrow()),
2163                LeafSearch::Max => {}
2164            }
2165
2166            if let Some(hi) = &cursor.hi {
2167                let go_right = match search {
2168                    LeafSearch::Eq(k) => k >= hi.borrow(),
2169                    // Lt looks for a node with lo < K, hi >= K
2170                    LeafSearch::Lt(k) => k > hi.borrow(),
2171                    LeafSearch::Max => true,
2172                };
2173                if go_right {
2174                    // go right to the tree sibling
2175                    let next = cursor.next.unwrap();
2176                    let rhs = if let Some(view) = next.node_view(guard) {
2177                        view
2178                    } else {
2179                        reset!("right child already freed");
2180                    };
2181
2182                    if let Some(ref mut parent_cursor) = parent_cursor_opt {
2183                        if parent_cursor.is_viable_parent_for(&rhs) {
2184                            let mut parent_clone: Box<Node<K, V, FANOUT>> =
2185                                Box::new((*parent_cursor).clone());
2186                            assert!(!parent_clone.is_leaf());
2187                            parent_clone.index_mut().insert(rhs.lo.clone(), next);
2188
2189                            let rhs_ptr_opt = if parent_clone.should_split() {
2190                                Some(parent_clone.split())
2191                            } else {
2192                                None
2193                            };
2194
2195                            if let Ok(new_parent_view) = parent_cursor.cas(parent_clone, guard) {
2196                                parent_cursor_opt = Some(new_parent_view);
2197                            } else if let Some(rhs_ptr) = rhs_ptr_opt {
2198                                let reclaimed_ptr: Box<AtomicPtr<Node<K, V, FANOUT>>> =
2199                                    unsafe { Box::from_raw(rhs_ptr.0 as *mut _) };
2200
2201                                let _dropping_reclaimed_rhs: Box<Node<K, V, FANOUT>> =
2202                                    unsafe { Box::from_raw(reclaimed_ptr.load(Ordering::Acquire)) };
2203                            }
2204                        }
2205                    } else {
2206                        // root hoist
2207                        let current_root_ptr: AtomicPtr<_> = root_cursor.ptr.as_ptr().into();
2208                        let new_index_ptr =
2209                            BoxedAtomicPtr(Box::into_raw(Box::new(current_root_ptr)));
2210
2211                        let mut new_root_node = Node::<K, V, FANOUT>::new_root();
2212                        new_root_node
2213                            .index_mut()
2214                            .insert(cursor.lo.clone(), new_index_ptr);
2215                        new_root_node.index_mut().insert(rhs.lo.clone(), next);
2216                        let new_root_ptr = Box::into_raw(new_root_node);
2217
2218                        let worked = !debug_delay()
2219                            && self
2220                                .root
2221                                .compare_exchange(
2222                                    root_cursor.ptr.as_ptr(),
2223                                    new_root_ptr,
2224                                    Ordering::AcqRel,
2225                                    Ordering::Acquire,
2226                                )
2227                                .is_ok();
2228
2229                        if worked {
2230                            let parent_view = NodeView {
2231                                id: self.root,
2232                                ptr: NonNull::new(new_root_ptr).unwrap(),
2233                            };
2234                            parent_cursor_opt = Some(parent_view);
2235                        } else {
2236                            let dangling_root = unsafe { Box::from_raw(new_root_ptr) };
2237                            drop(dangling_root);
2238
2239                            let reclaimed_ptr: Box<AtomicPtr<Node<K, V, FANOUT>>> =
2240                                unsafe { Box::from_raw(new_index_ptr.0 as *mut _) };
2241                            drop(reclaimed_ptr);
2242                        }
2243                    }
2244
2245                    cursor = rhs;
2246                    continue;
2247                }
2248            }
2249
2250            if cursor.is_leaf() {
2251                assert!(!cursor.is_merging);
2252                assert!(cursor.merging_child.is_none());
2253                if let Some(ref hi) = cursor.hi {
2254                    match search {
2255                        LeafSearch::Eq(k) => assert!(k < hi.borrow()),
2256                        LeafSearch::Lt(k) => assert!(k <= hi.borrow()),
2257                        LeafSearch::Max => {
2258                            unreachable!("leaf should have no hi key if we're searching for Max")
2259                        }
2260                    }
2261                }
2262                break;
2263            }
2264
2265            // go down the tree
2266            let index = cursor.index();
2267            let child_ptr = match search {
2268                LeafSearch::Eq(k) => index.get_less_than_or_equal(k).unwrap().1,
2269                LeafSearch::Lt(k) => {
2270                    // Lt looks for a node with lo < K and hi >= K
2271                    // so we find the first child with a lo key > K and
2272                    // return its left sibling
2273                    index.get_less_than(k).unwrap().1
2274                }
2275                LeafSearch::Max => {
2276                    index
2277                        .get_index(index.len().checked_sub(1).unwrap())
2278                        .unwrap()
2279                        .1
2280                }
2281            };
2282
2283            parent_cursor_opt = Some(cursor);
2284            cursor = if let Some(view) = child_ptr.node_view(guard) {
2285                view
2286            } else {
2287                reset!("attempt to traverse to child failed because the child has been freed");
2288            };
2289        }
2290
2291        #[cfg(feature = "timing")]
2292        self.record_timing(before.elapsed());
2293
2294        cursor
2295    }
2296}
2297
2298#[derive(Debug, Clone)]
2299#[repr(u8)]
2300enum Data<K, V, const FANOUT: usize>
2301where
2302    K: 'static + Clone + Minimum + Ord + Send + Sync,
2303    V: 'static + Clone + Send + Sync,
2304{
2305    Leaf(StackMap<K, V, FANOUT>),
2306    Index(StackMap<K, BoxedAtomicPtr<K, V, FANOUT>, FANOUT>),
2307}
2308
2309impl<K, V, const FANOUT: usize> Data<K, V, FANOUT>
2310where
2311    K: 'static + Clone + Minimum + Ord + Send + Sync,
2312    V: 'static + Clone + Send + Sync,
2313{
2314    const fn len(&self) -> usize {
2315        match self {
2316            Data::Leaf(ref leaf) => leaf.len(),
2317            Data::Index(ref index) => index.len(),
2318        }
2319    }
2320}
2321
2322#[derive(Debug)]
2323struct Node<K, V, const FANOUT: usize>
2324where
2325    K: 'static + Clone + Minimum + Ord + Send + Sync,
2326    V: 'static + Clone + Send + Sync,
2327{
2328    next: Option<BoxedAtomicPtr<K, V, FANOUT>>,
2329    merging_child: Option<BoxedAtomicPtr<K, V, FANOUT>>,
2330    data: Data<K, V, FANOUT>,
2331    lo: K,
2332    hi: Option<K>,
2333    is_merging: bool,
2334}
2335
2336impl<K, V, const FANOUT: usize> Clone for Node<K, V, FANOUT>
2337where
2338    K: 'static + Clone + Minimum + Ord + Send + Sync,
2339    V: 'static + Clone + Send + Sync,
2340{
2341    fn clone(&self) -> Node<K, V, FANOUT> {
2342        Node {
2343            lo: self.lo.clone(),
2344            hi: self.hi.clone(),
2345            next: self.next,
2346            data: self.data.clone(),
2347            merging_child: self.merging_child,
2348            is_merging: self.is_merging,
2349        }
2350    }
2351}
2352
2353impl<K, V, const FANOUT: usize> Node<K, V, FANOUT>
2354where
2355    K: 'static + Clone + Minimum + Ord + Send + Sync,
2356    V: 'static + Clone + Send + Sync,
2357{
2358    const fn index(&self) -> &StackMap<K, BoxedAtomicPtr<K, V, FANOUT>, FANOUT> {
2359        if let Data::Index(ref index) = self.data {
2360            index
2361        } else {
2362            unreachable!()
2363        }
2364    }
2365
2366    fn index_mut(&mut self) -> &mut StackMap<K, BoxedAtomicPtr<K, V, FANOUT>, FANOUT> {
2367        if let Data::Index(ref mut index) = self.data {
2368            index
2369        } else {
2370            unreachable!()
2371        }
2372    }
2373
2374    const fn leaf(&self) -> &StackMap<K, V, FANOUT> {
2375        if let Data::Leaf(ref leaf) = self.data {
2376            leaf
2377        } else {
2378            unreachable!()
2379        }
2380    }
2381
2382    fn leaf_mut(&mut self) -> &mut StackMap<K, V, FANOUT> {
2383        if let Data::Leaf(ref mut leaf) = self.data {
2384            leaf
2385        } else {
2386            unreachable!()
2387        }
2388    }
2389
2390    const fn is_leaf(&self) -> bool {
2391        matches!(self.data, Data::Leaf(..))
2392    }
2393
2394    fn new_root() -> Box<Node<K, V, FANOUT>> {
2395        let min_key = K::MIN;
2396        Box::new(Node {
2397            lo: min_key,
2398            hi: None,
2399            next: None,
2400            data: Data::Index(StackMap::new()),
2401            merging_child: None,
2402            is_merging: false,
2403        })
2404    }
2405
2406    fn new_leaf(lo: K) -> Box<Node<K, V, FANOUT>> {
2407        Box::new(Node {
2408            lo,
2409            hi: None,
2410            next: None,
2411            data: Data::Leaf(StackMap::new()),
2412            merging_child: None,
2413            is_merging: false,
2414        })
2415    }
2416
2417    fn get<Q>(&self, key: &Q) -> Option<V>
2418    where
2419        K: Borrow<Q>,
2420        Q: Ord + ?Sized,
2421    {
2422        assert!(!self.is_merging);
2423        assert!(self.merging_child.is_none());
2424        assert!(self.is_leaf());
2425
2426        self.leaf().get(key).cloned()
2427    }
2428
2429    fn remove<Q>(&mut self, key: &Q) -> Option<V>
2430    where
2431        K: Borrow<Q>,
2432        Q: Ord + ?Sized,
2433    {
2434        assert!(!self.is_merging);
2435        assert!(self.merging_child.is_none());
2436
2437        self.leaf_mut().remove(key)
2438    }
2439
2440    fn insert(&mut self, key: K, value: V) -> Option<V> {
2441        assert!(!self.is_merging);
2442        assert!(self.merging_child.is_none());
2443        assert!(!self.should_split());
2444
2445        self.leaf_mut().insert(key, value)
2446    }
2447
2448    fn cas<V2>(
2449        &mut self,
2450        key: K,
2451        old: Option<&V2>,
2452        new: Option<V>,
2453    ) -> Result<Option<V>, CasFailure<V>>
2454    where
2455        V: Borrow<V2>,
2456        V2: ?Sized + PartialEq,
2457    {
2458        assert!(!self.is_merging);
2459        assert!(self.merging_child.is_none());
2460
2461        // anything that should be split should have been split
2462        // prior to becoming globally visible via this codepath.
2463        assert!(!self.should_split());
2464
2465        match (old, self.leaf().get(&key)) {
2466            (expected, actual) if expected == actual.map(Borrow::borrow) => {
2467                if let Some(to_insert) = new {
2468                    Ok(self.leaf_mut().insert(key, to_insert))
2469                } else {
2470                    Ok(self.leaf_mut().remove(&key))
2471                }
2472            }
2473            (_, actual) => Err(CasFailure {
2474                actual: actual.cloned(),
2475                returned_new_value: new,
2476            }),
2477        }
2478    }
2479
2480    const fn should_merge(&self) -> bool {
2481        if self.merging_child.is_some() || self.is_merging {
2482            return false;
2483        }
2484        self.len() <= MERGE_SIZE
2485    }
2486
2487    const fn should_split(&self) -> bool {
2488        if self.merging_child.is_some() || self.is_merging {
2489            return false;
2490        }
2491        self.len() > FANOUT - MERGE_SIZE
2492    }
2493
2494    const fn len(&self) -> usize {
2495        self.data.len()
2496    }
2497
2498    fn split(&mut self) -> BoxedAtomicPtr<K, V, FANOUT> {
2499        assert!(!self.is_merging);
2500        assert!(self.merging_child.is_none());
2501
2502        let split_idx = if self.hi.is_none() {
2503            // the infinity node should split almost at the end to improve fill ratio
2504            self.len() - 2
2505        } else if self.lo == K::MIN {
2506            // the left-most node should split almost at the beginning to improve fill ratio
2507            2
2508        } else {
2509            FANOUT / 2
2510        };
2511
2512        let (split_point, rhs_data) = match self.data {
2513            Data::Leaf(ref mut leaf) => {
2514                let rhs_leaf = leaf.split_off(split_idx);
2515                let split_point = rhs_leaf.first().unwrap().0.clone();
2516                assert!(leaf.len() > MERGE_SIZE);
2517                (split_point, Data::Leaf(rhs_leaf))
2518            }
2519            Data::Index(ref mut index) => {
2520                let rhs_index = index.split_off(split_idx);
2521                let split_point = rhs_index.first().unwrap().0.clone();
2522                assert!(index.len() > MERGE_SIZE);
2523                (split_point, Data::Index(rhs_index))
2524            }
2525        };
2526
2527        assert!(rhs_data.len() < FANOUT - MERGE_SIZE);
2528        assert!(rhs_data.len() > MERGE_SIZE);
2529
2530        let rhs_hi = std::mem::replace(&mut self.hi, Some(split_point.clone()));
2531
2532        let rhs = BoxedAtomicPtr::new(Box::new(Node {
2533            lo: split_point,
2534            hi: rhs_hi,
2535            next: self.next,
2536            data: rhs_data,
2537            merging_child: None,
2538            is_merging: false,
2539        }));
2540
2541        self.next = Some(rhs);
2542
2543        assert!(!self.should_split());
2544
2545        rhs
2546    }
2547
2548    fn merge(&mut self, rhs: &NodeView<K, V, FANOUT>) {
2549        assert!(rhs.is_merging);
2550        assert!(!self.is_merging);
2551
2552        self.hi = rhs.hi.clone();
2553        self.next = rhs.next;
2554
2555        match self.data {
2556            Data::Leaf(ref mut leaf) => {
2557                for (k, v) in rhs.leaf().iter() {
2558                    let prev = leaf.insert(k.clone(), v.clone());
2559                    assert!(prev.is_none());
2560                }
2561            }
2562            Data::Index(ref mut index) => {
2563                for (k, v) in rhs.index().iter() {
2564                    let prev = index.insert(k.clone(), *v);
2565                    assert!(prev.is_none());
2566                }
2567            }
2568        }
2569    }
2570
2571    fn is_viable_parent_for(&self, possible_child: &NodeView<K, V, FANOUT>) -> bool {
2572        match (&self.hi, &possible_child.hi) {
2573            (Some(_), None) => return false,
2574            (Some(parent_hi), Some(child_hi)) if parent_hi < child_hi => return false,
2575            _ => {}
2576        }
2577        self.lo <= possible_child.lo
2578    }
2579}
2580
2581impl<K, V, const FANOUT: usize, const LOCAL_GC_BUFFER_SIZE: usize> FromIterator<(K, V)>
2582    for ConcurrentMap<K, V, FANOUT, LOCAL_GC_BUFFER_SIZE>
2583where
2584    K: 'static + Clone + Minimum + Ord + Send + Sync,
2585    V: 'static + Clone + Send + Sync,
2586{
2587    fn from_iter<I: IntoIterator<Item = (K, V)>>(iter: I) -> Self {
2588        let map = ConcurrentMap::default();
2589
2590        for (k, v) in iter {
2591            map.insert(k, v);
2592        }
2593
2594        map
2595    }
2596}
2597
2598impl<'a, K, V, const FANOUT: usize, const LOCAL_GC_BUFFER_SIZE: usize> IntoIterator
2599    for &'a ConcurrentMap<K, V, FANOUT, LOCAL_GC_BUFFER_SIZE>
2600where
2601    K: 'static + Clone + Minimum + Ord + Send + Sync,
2602    V: 'static + Clone + Send + Sync,
2603{
2604    type Item = (K, V);
2605    type IntoIter = Iter<'a, K, V, FANOUT, LOCAL_GC_BUFFER_SIZE, std::ops::RangeFull>;
2606
2607    fn into_iter(self) -> Self::IntoIter {
2608        self.iter()
2609    }
2610}
2611
2612// This ensures that ConcurrentMap is Send and Clone.
2613const fn _test_impls() {
2614    const fn send<T: Send>() {}
2615    const fn clone<T: Clone>() {}
2616    send::<ConcurrentMap<usize, usize>>();
2617    clone::<ConcurrentMap<usize, usize>>();
2618}
2619
2620#[test]
2621fn basic_map() {
2622    let map = ConcurrentMap::<usize, usize>::default();
2623
2624    let n = 64; // SPLIT_SIZE
2625    for i in 0..=n {
2626        assert_eq!(map.get(&i), None);
2627        map.insert(i, i);
2628        assert_eq!(map.get(&i), Some(i), "failed to get key {i}");
2629    }
2630
2631    for (i, (k, _v)) in map.range(..).enumerate() {
2632        assert_eq!(i, k);
2633    }
2634
2635    for (i, (k, _v)) in map.range(..).rev().enumerate() {
2636        assert_eq!(n - i, k);
2637    }
2638
2639    for (i, (k, _v)) in map.iter().enumerate() {
2640        assert_eq!(i, k);
2641    }
2642
2643    for (i, (k, _v)) in map.iter().rev().enumerate() {
2644        assert_eq!(n - i, k);
2645    }
2646
2647    for (i, (k, _v)) in map.range(0..).enumerate() {
2648        assert_eq!(i, k);
2649    }
2650
2651    for (i, (k, _v)) in map.range(0..).rev().enumerate() {
2652        assert_eq!(n - i, k);
2653    }
2654
2655    for (i, (k, _v)) in map.range(0..n).enumerate() {
2656        assert_eq!(i, k);
2657    }
2658
2659    for (i, (k, _v)) in map.range(0..n).rev().enumerate() {
2660        assert_eq!((n - 1) - i, k);
2661    }
2662
2663    for (i, (k, _v)) in map.range(0..=n).enumerate() {
2664        assert_eq!(i, k);
2665    }
2666
2667    for (i, (k, _v)) in map.range(0..=n).rev().enumerate() {
2668        assert_eq!(n - i, k);
2669    }
2670
2671    for i in 0..=n {
2672        assert_eq!(map.get(&i), Some(i), "failed to get key {i}");
2673    }
2674}
2675
2676#[test]
2677fn timing_map() {
2678    use std::time::Instant;
2679
2680    let map = ConcurrentMap::<u64, u64>::default();
2681
2682    let n = 1024 * 1024;
2683
2684    let insert = Instant::now();
2685    for i in 0..n {
2686        map.insert(i, i);
2687    }
2688    let insert_elapsed = insert.elapsed();
2689    println!(
2690        "{} inserts/s, total {:?}",
2691        (n * 1_000_000) / u64::try_from(insert_elapsed.as_micros().max(1)).unwrap_or(u64::MAX),
2692        insert_elapsed
2693    );
2694
2695    let scan = Instant::now();
2696    let count = map.range(..).count();
2697    assert_eq!(count as u64, n);
2698    let scan_elapsed = scan.elapsed();
2699    println!(
2700        "{} scanned items/s, total {:?}",
2701        (n * 1_000_000) / u64::try_from(scan_elapsed.as_micros().max(1)).unwrap_or(u64::MAX),
2702        scan_elapsed
2703    );
2704
2705    let scan_rev = Instant::now();
2706    let count = map.range(..).rev().count();
2707    assert_eq!(count as u64, n);
2708    let scan_rev_elapsed = scan_rev.elapsed();
2709    println!(
2710        "{} reverse-scanned items/s, total {:?}",
2711        (n * 1_000_000) / u64::try_from(scan_rev_elapsed.as_micros().max(1)).unwrap_or(u64::MAX),
2712        scan_rev_elapsed
2713    );
2714
2715    let gets = Instant::now();
2716    for i in 0..n {
2717        map.get(&i);
2718    }
2719    let gets_elapsed = gets.elapsed();
2720    println!(
2721        "{} gets/s, total {:?}",
2722        (n * 1_000_000) / u64::try_from(gets_elapsed.as_micros().max(1)).unwrap_or(u64::MAX),
2723        gets_elapsed
2724    );
2725}