concurrent_slotmap/
lib.rs

1#![allow(unused_unsafe, clippy::inline_always)]
2#![warn(rust_2018_idioms, missing_debug_implementations)]
3#![forbid(unsafe_op_in_unsafe_fn, clippy::undocumented_unsafe_blocks)]
4#![cfg_attr(not(feature = "std"), no_std)]
5
6extern crate alloc;
7
8use alloc::borrow::Cow;
9use core::{
10    cell::UnsafeCell,
11    fmt, hint,
12    iter::{self, FusedIterator},
13    mem::MaybeUninit,
14    num::NonZeroU32,
15    ops::Deref,
16    panic::{RefUnwindSafe, UnwindSafe},
17    slice,
18    sync::atomic::{
19        self, AtomicU32, AtomicU64,
20        Ordering::{Acquire, Relaxed, Release, SeqCst},
21    },
22};
23use virtual_buffer::vec::Vec;
24
25pub mod epoch;
26
27/// The slot index used to signify the lack thereof.
28const NIL: u32 = u32::MAX;
29
30/// The number of low bits that can be used for tagged generations.
31const TAG_BITS: u32 = 8;
32
33/// The mask for tagged generations.
34const TAG_MASK: u32 = (1 << TAG_BITS) - 1;
35
36#[cfg_attr(not(doc), repr(C))]
37pub struct SlotMap<T, C: Collector<T> = DefaultCollector> {
38    slots: Vec<Slot<T>>,
39    len: AtomicU32,
40    global: epoch::GlobalHandle,
41    collector: C,
42
43    _alignment1: CacheAligned,
44
45    /// The free-list. This is the list of slots which have already been dropped and are ready to
46    /// be claimed by insert operations.
47    free_list: AtomicU32,
48
49    _alignment2: CacheAligned,
50
51    /// Free-lists queued for inclusion in the `free_list`. Since the global epoch can only be
52    /// advanced if all pinned local epochs are pinned in the current global epoch, we only need
53    /// two free-lists in the queue: one for the current global epoch and one for the previous
54    /// epoch. This way a thread can always push a slot into list `(epoch / 2) % 2`, and whenever
55    /// the global epoch is advanced, since we know that the lag can be at most one step, we can be
56    /// certain that the list which was lagging behind before the global epoch was advanced is now
57    /// safe to drop and prepend to the `free_list`.
58    ///
59    /// The atomic packs the list's head in the lower 32 bits and the epoch of the last push in the
60    /// upper 32 bits. The epoch must not be updated if it would be going backwards; it's only
61    /// updated when the last epoch is at least 2 steps behind the local epoch, at which point -
62    /// after removing the list from the queue and updating the epoch - we can be certain that no
63    /// other threads are accessing any of the slots in the list.
64    free_list_queue: [AtomicU64; 2],
65}
66
67impl<T, C: Collector<T>> UnwindSafe for SlotMap<T, C> {}
68impl<T, C: Collector<T>> RefUnwindSafe for SlotMap<T, C> {}
69
70impl<T> SlotMap<T, DefaultCollector> {
71    #[must_use]
72    pub fn new(max_capacity: u32) -> Self {
73        Self::with_global(max_capacity, epoch::GlobalHandle::new())
74    }
75
76    #[must_use]
77    pub fn with_global(max_capacity: u32, global: epoch::GlobalHandle) -> Self {
78        Self::with_global_and_collector(max_capacity, global, DefaultCollector)
79    }
80}
81
82impl<T, C: Collector<T>> SlotMap<T, C> {
83    #[must_use]
84    pub fn with_collector(max_capacity: u32, collector: C) -> Self {
85        Self::with_global_and_collector(max_capacity, epoch::GlobalHandle::new(), collector)
86    }
87
88    #[must_use]
89    pub fn with_global_and_collector(
90        max_capacity: u32,
91        global: epoch::GlobalHandle,
92        collector: C,
93    ) -> Self {
94        SlotMap {
95            slots: Vec::new(max_capacity as usize),
96            len: AtomicU32::new(0),
97            global,
98            collector,
99            _alignment1: CacheAligned,
100            free_list: AtomicU32::new(NIL),
101            _alignment2: CacheAligned,
102            free_list_queue: [
103                AtomicU64::new(u64::from(NIL)),
104                AtomicU64::new(u64::from(NIL) | 2 << 32),
105            ],
106        }
107    }
108
109    // Our capacity can never exceed `u32::MAX`.
110    #[allow(clippy::cast_possible_truncation)]
111    #[inline]
112    #[must_use]
113    pub fn capacity(&self) -> u32 {
114        self.slots.capacity() as u32
115    }
116
117    #[inline]
118    #[must_use]
119    pub fn len(&self) -> u32 {
120        self.len.load(Relaxed)
121    }
122
123    #[inline]
124    #[must_use]
125    pub fn is_empty(&self) -> bool {
126        self.len() == 0
127    }
128
129    #[inline]
130    #[must_use]
131    pub fn global(&self) -> &epoch::GlobalHandle {
132        &self.global
133    }
134
135    #[inline]
136    #[must_use]
137    pub fn collector(&self) -> &C {
138        &self.collector
139    }
140
141    /// # Panics
142    ///
143    /// Panics if `guard.global()` does not equal `self.global()`.
144    #[inline]
145    pub fn insert<'a>(&'a self, value: T, guard: impl Into<Cow<'a, epoch::Guard<'a>>>) -> SlotId {
146        self.insert_inner(value, 0, guard.into())
147    }
148
149    /// # Panics
150    ///
151    /// - Panics if `guard.global()` does not equal `self.global()`.
152    /// - Panics if `tag` has more than the low 8 bits set.
153    #[inline]
154    pub fn insert_with_tag<'a>(
155        &'a self,
156        value: T,
157        tag: u32,
158        guard: impl Into<Cow<'a, epoch::Guard<'a>>>,
159    ) -> SlotId {
160        self.insert_inner(value, tag, guard.into())
161    }
162
163    #[allow(clippy::needless_pass_by_value)]
164    fn insert_inner<'a>(&'a self, value: T, tag: u32, guard: Cow<'a, epoch::Guard<'a>>) -> SlotId {
165        assert_eq!(guard.global(), &self.global);
166        assert_eq!(tag & !TAG_MASK, 0);
167
168        let mut free_list_head = self.free_list.load(Acquire);
169        let mut backoff = Backoff::new();
170
171        loop {
172            if free_list_head == NIL {
173                break;
174            }
175
176            // SAFETY: We always push indices of existing slots into the free-lists and the slots
177            // vector never shrinks, therefore the index must have staid in bounds.
178            let slot = unsafe { self.slots.get_unchecked(free_list_head as usize) };
179
180            let next_free = slot.next_free.load(Relaxed);
181
182            match self
183                .free_list
184                .compare_exchange_weak(free_list_head, next_free, Release, Acquire)
185            {
186                Ok(_) => {
187                    // SAFETY: `SlotMap::remove[_mut]` guarantees that the free-list only contains
188                    // slots that are no longer read by any threads, and we have removed the slot
189                    // from the free-list such that no other threads can be writing the same slot.
190                    unsafe { slot.value.get().cast::<T>().write(value) };
191
192                    let new_generation = slot
193                        .generation
194                        .fetch_add(OCCUPIED_BIT | tag, Release)
195                        .wrapping_add(OCCUPIED_BIT | tag);
196
197                    self.len.fetch_add(1, Relaxed);
198
199                    // SAFETY: `SlotMap::remove[_mut]` guarantees that a freed slot has its
200                    // generation's `OCCUPIED_BIT` unset, and since we incremented the generation,
201                    // the bit must have been flipped again.
202                    return unsafe { SlotId::new_unchecked(free_list_head, new_generation) };
203                }
204                Err(new_head) => {
205                    free_list_head = new_head;
206                    backoff.spin();
207                }
208            }
209        }
210
211        // Our capacity can never exceed `u32::MAX`.
212        #[allow(clippy::cast_possible_truncation)]
213        let index = self.slots.push(Slot::new(value, tag)) as u32;
214
215        self.len.fetch_add(1, Relaxed);
216
217        // SAFETY: The `OCCUPIED_BIT` is set.
218        unsafe { SlotId::new_unchecked(index, OCCUPIED_BIT | tag) }
219    }
220
221    pub fn insert_mut(&mut self, value: T) -> SlotId {
222        self.insert_with_tag_mut(value, 0)
223    }
224
225    /// # Panics
226    ///
227    /// Panics if `tag` has more than the low 8 bits set.
228    pub fn insert_with_tag_mut(&mut self, value: T, tag: u32) -> SlotId {
229        assert_eq!(tag & !TAG_MASK, 0);
230
231        let free_list_head = *self.free_list.get_mut();
232
233        if free_list_head != NIL {
234            // SAFETY: We always push indices of existing slots into the free-lists and the slots
235            // vector never shrinks, therefore the index must have staid in bounds.
236            let slot = unsafe { self.slots.get_unchecked_mut(free_list_head as usize) };
237
238            let new_generation = slot.generation.get_mut().wrapping_add(OCCUPIED_BIT | tag);
239
240            *slot.generation.get_mut() = new_generation;
241
242            *self.free_list.get_mut() = *slot.next_free.get_mut();
243
244            *slot.value.get_mut() = MaybeUninit::new(value);
245
246            *self.len.get_mut() += 1;
247
248            // SAFETY: `SlotMap::remove[_mut]` guarantees that a freed slot has its generation's
249            // `OCCUPIED_BIT` unset, and since we incremented the generation, the bit must have been
250            // flipped again.
251            return unsafe { SlotId::new_unchecked(free_list_head, new_generation) };
252        }
253
254        // Our capacity can never exceed `u32::MAX`.
255        #[allow(clippy::cast_possible_truncation)]
256        let index = self.slots.push_mut(Slot::new(value, tag)) as u32;
257
258        *self.len.get_mut() += 1;
259
260        // SAFETY: The `OCCUPIED_BIT` is set.
261        unsafe { SlotId::new_unchecked(index, OCCUPIED_BIT | tag) }
262    }
263
264    /// # Panics
265    ///
266    /// Panics if `guard.global()` does not equal `self.global()`.
267    #[inline]
268    pub fn remove<'a>(
269        &'a self,
270        id: SlotId,
271        guard: impl Into<Cow<'a, epoch::Guard<'a>>>,
272    ) -> Option<Ref<'a, T>> {
273        self.remove_inner(id, guard.into())
274    }
275
276    fn remove_inner<'a>(
277        &'a self,
278        id: SlotId,
279        guard: Cow<'a, epoch::Guard<'a>>,
280    ) -> Option<Ref<'a, T>> {
281        assert_eq!(guard.global(), &self.global);
282
283        let slot = self.slots.get(id.index as usize)?;
284        let new_generation = (id.generation() & !TAG_MASK).wrapping_add(OCCUPIED_BIT);
285
286        // This works thanks to the invariant of `SlotId` that the `OCCUPIED_BIT` of its generation
287        // must be set. That means that the only outcome possible in case of success here is that
288        // the `OCCUPIED_BIT` is unset and the generation is advanced.
289        if slot
290            .generation
291            .compare_exchange(id.generation(), new_generation, Acquire, Relaxed)
292            .is_err()
293        {
294            return None;
295        }
296
297        // SAFETY:
298        // * The `Acquire` ordering when loading the slot's generation synchronizes with the
299        //   `Release` ordering in `SlotMap::insert`, making sure that the newly written value is
300        //   visible here.
301        // * The `compare_exchange` above succeeded, which means that the previous generation of the
302        //   slot must have matched `id.generation`. By `SlotId`'s invariant, its generation's
303        //   occupied bit must be set. Since the generation matched, the slot's occupied bit must
304        //   have been set, which makes reading the value safe as the only way the occupied bit can
305        //   be set is in `SlotMap::insert[_mut]` after initialization of the slot.
306        // * We unset the slot's `OCCUPIED_BIT` such that no other threads can be attempting to push
307        //   it into the free-lists.
308        Some(unsafe { self.remove_inner_inner(id.index, guard) })
309    }
310
311    // Inner indeed.
312    unsafe fn remove_inner_inner<'a>(
313        &'a self,
314        index: u32,
315        guard: Cow<'a, epoch::Guard<'a>>,
316    ) -> Ref<'a, T> {
317        // SAFETY: The caller must ensure that `index` is in bounds.
318        let slot = unsafe { self.slots.get_unchecked(index as usize) };
319
320        atomic::fence(SeqCst);
321
322        let epoch = guard.global().epoch();
323        let queued_list = &self.free_list_queue[((epoch >> 1) & 1) as usize];
324        let mut queued_state = queued_list.load(Acquire);
325        let mut backoff = Backoff::new();
326
327        loop {
328            let queued_head = (queued_state & 0xFFFF_FFFF) as u32;
329            let queued_epoch = (queued_state >> 32) as u32;
330            let epoch_interval = epoch.wrapping_sub(queued_epoch);
331
332            if epoch_interval == 0 {
333                slot.next_free.store(queued_head, Relaxed);
334
335                let new_state = u64::from(index) | u64::from(queued_epoch) << 32;
336
337                match queued_list.compare_exchange_weak(queued_state, new_state, Release, Acquire) {
338                    Ok(_) => {
339                        self.len.fetch_sub(1, Relaxed);
340
341                        // SAFETY: The caller must ensure that the inner value was initialized and
342                        // that said write was synchronized with and made visible here.
343                        break unsafe { Ref { slot, guard } };
344                    }
345                    Err(new_state) => {
346                        queued_state = new_state;
347                        backoff.spin();
348                    }
349                }
350            } else {
351                let global_epoch_is_behind_queue = epoch_interval & (1 << 31) != 0;
352
353                debug_assert!(!global_epoch_is_behind_queue);
354                debug_assert!(epoch_interval >= 4);
355
356                slot.next_free.store(NIL, Relaxed);
357
358                let new_state = u64::from(index) | u64::from(epoch) << 32;
359
360                match queued_list.compare_exchange_weak(queued_state, new_state, Release, Acquire) {
361                    Ok(_) => {
362                        self.len.fetch_sub(1, Relaxed);
363
364                        // SAFETY: Having ended up here, the global epoch must have been advanced
365                        // at least 2 steps from the last push into the queued list and we removed
366                        // the list from the queue, which means that no other threads can be
367                        // accessing any of the slots in the list.
368                        unsafe { self.collect_unchecked(queued_head) };
369
370                        // SAFETY: The caller must ensure that the inner value was initialized and
371                        // that said write was synchronized with and made visible here.
372                        break unsafe { Ref { slot, guard } };
373                    }
374                    Err(new_state) => {
375                        queued_state = new_state;
376                        backoff.spin();
377                    }
378                }
379            }
380        }
381    }
382
383    /// # Panics
384    ///
385    /// Panics if `guard.global()` does not equal `self.global()`.
386    pub fn try_collect(&self, guard: &epoch::Guard<'_>) {
387        assert_eq!(guard.global(), &self.global);
388
389        let epoch = guard.global().epoch();
390        let queued_list = &self.free_list_queue[((epoch >> 1) & 1) as usize];
391        let mut queued_state = queued_list.load(Acquire);
392        let mut backoff = Backoff::new();
393
394        loop {
395            let queued_head = (queued_state & 0xFFFF_FFFF) as u32;
396            let queued_epoch = (queued_state >> 32) as u32;
397            let epoch_interval = epoch.wrapping_sub(queued_epoch);
398
399            if epoch_interval == 0 {
400                break;
401            }
402
403            let global_epoch_is_behind_queue = epoch_interval & (1 << 31) != 0;
404
405            debug_assert!(!global_epoch_is_behind_queue);
406
407            let new_state = u64::from(NIL) | u64::from(queued_epoch) << 32;
408
409            match queued_list.compare_exchange_weak(queued_state, new_state, Relaxed, Acquire) {
410                Ok(_) => {
411                    // SAFETY: Having ended up here, the global epoch must have been advanced at
412                    // least 2 steps from the last push into the queued list and we removed the
413                    // list from the queue, which means that no other threads can be accessing any
414                    // of the slots in the list.
415                    unsafe { self.collect_unchecked(queued_head) };
416
417                    break;
418                }
419                Err(new_state) => {
420                    queued_state = new_state;
421                    backoff.spin();
422                }
423            }
424        }
425    }
426
427    #[cold]
428    unsafe fn collect_unchecked(&self, queued_head: u32) {
429        if queued_head == NIL {
430            // There is no garbage.
431            return;
432        }
433
434        let mut queued_tail = queued_head;
435        let mut queued_tail_slot;
436
437        // Drop the queued free-list and find the tail slot.
438        loop {
439            // SAFETY: We always push indices of existing slots into the free-lists and the slots
440            // vector never shrinks, therefore the index must have staid in bounds.
441            queued_tail_slot = unsafe { self.slots.get_unchecked(queued_tail as usize) };
442
443            let ptr = queued_tail_slot.value.get().cast::<T>();
444
445            // SAFETY: The caller must ensure that we have exclusive access to this list.
446            unsafe { self.collector.collect(ptr) };
447
448            let next_free = queued_tail_slot.next_free.load(Acquire);
449
450            if next_free == NIL {
451                break;
452            }
453
454            queued_tail = next_free;
455        }
456
457        let mut free_list_head = self.free_list.load(Acquire);
458        let mut backoff = Backoff::new();
459
460        // Free the queued free-list by prepending it to the free-list.
461        loop {
462            queued_tail_slot.next_free.store(free_list_head, Relaxed);
463
464            match self.free_list.compare_exchange_weak(
465                free_list_head,
466                queued_head,
467                Release,
468                Acquire,
469            ) {
470                Ok(_) => break,
471                Err(new_head) => {
472                    free_list_head = new_head;
473                    backoff.spin();
474                }
475            }
476        }
477    }
478
479    pub fn remove_mut(&mut self, id: SlotId) -> Option<T> {
480        let slot = self.slots.get_mut(id.index as usize)?;
481        let generation = *slot.generation.get_mut();
482
483        if generation == id.generation() {
484            *slot.generation.get_mut() = (id.generation() & !TAG_MASK).wrapping_add(OCCUPIED_BIT);
485
486            *slot.next_free.get_mut() = *self.free_list.get_mut();
487            *self.free_list.get_mut() = id.index;
488
489            *self.len.get_mut() -= 1;
490
491            // SAFETY:
492            // * The mutable reference makes sure that access to the slot is synchronized.
493            // * We checked that `id.generation` matches the slot's generation, which includes the
494            //   occupied bit. By `SlotId`'s invariant, its generation's occupied bit must be set.
495            //   Since the generation matched, the slot's occupied bit must be set, which makes
496            //   reading the value safe as the only way the occupied bit can be set is in
497            //   `SlotMap::insert[_mut]` after initialization of the slot.
498            // * We incremented the slot's generation such that its `OCCUPIED_BIT` is unset and its
499            //   generation is advanced, such that future attempts to access the slot will fail.
500            Some(unsafe { slot.value.get().cast::<T>().read() })
501        } else {
502            None
503        }
504    }
505
506    #[cfg(test)]
507    fn remove_index<'a>(
508        &'a self,
509        index: u32,
510        guard: impl Into<Cow<'a, epoch::Guard<'a>>>,
511    ) -> Option<Ref<'a, T>> {
512        self.remove_index_inner(index, guard.into())
513    }
514
515    #[cfg(test)]
516    fn remove_index_inner<'a>(
517        &'a self,
518        index: u32,
519        guard: Cow<'a, epoch::Guard<'a>>,
520    ) -> Option<Ref<'a, T>> {
521        assert_eq!(guard.global(), &self.global);
522
523        let slot = self.slots.get(index as usize)?;
524        let mut generation = slot.generation.load(Relaxed);
525        let mut backoff = Backoff::new();
526
527        loop {
528            if generation & OCCUPIED_BIT == 0 {
529                return None;
530            }
531
532            let new_generation = (generation & !TAG_MASK).wrapping_add(OCCUPIED_BIT);
533
534            match slot.generation.compare_exchange_weak(
535                generation,
536                new_generation,
537                Acquire,
538                Relaxed,
539            ) {
540                Ok(_) => break,
541                Err(new_generation) => {
542                    generation = new_generation;
543                    backoff.spin();
544                }
545            }
546        }
547
548        // SAFETY:
549        // * The `Acquire` ordering when loading the slot's generation synchronizes with the
550        //   `Release` ordering in `SlotMap::insert`, making sure that the newly written value is
551        //   visible here.
552        // * The `compare_exchange_weak` above succeeded, which means that the previous generation
553        //   of the slot must have had its `OCCUPIED_BIT` set, which makes reading the value safe as
554        //   the only way the occupied bit can be set is in `SlotMap::insert[_mut]` after
555        //   initialization of the slot.
556        // * We unset the slot's `OCCUPIED_BIT` such that no other threads can be attempting to push
557        //   it into the free-lists.
558        Some(unsafe { self.remove_inner_inner(index, guard) })
559    }
560
561    /// # Panics
562    ///
563    /// Panics if `guard.global()` does not equal `self.global()`.
564    #[inline(always)]
565    #[must_use]
566    pub fn get<'a>(
567        &'a self,
568        id: SlotId,
569        guard: impl Into<Cow<'a, epoch::Guard<'a>>>,
570    ) -> Option<Ref<'a, T>> {
571        self.get_inner(id, guard.into())
572    }
573
574    #[inline(always)]
575    fn get_inner<'a>(&'a self, id: SlotId, guard: Cow<'a, epoch::Guard<'a>>) -> Option<Ref<'a, T>> {
576        assert_eq!(guard.global(), &self.global);
577
578        let slot = self.slots.get(id.index as usize)?;
579        let generation = slot.generation.load(Acquire);
580
581        if generation == id.generation() {
582            // SAFETY:
583            // * The `Acquire` ordering when loading the slot's generation synchronizes with the
584            //   `Release` ordering in `SlotMap::insert`, making sure that the newly written value
585            //   is visible here.
586            // * We checked that `id.generation` matches the slot's generation, which includes the
587            //   occupied bit. By `SlotId`'s invariant, its generation's occupied bit must be set.
588            //   Since the generation matched, the slot's occupied bit must be set, which makes
589            //   reading the value safe as the only way the occupied bit can be set is in
590            //   `SlotMap::insert[_mut]` after initialization of the slot.
591            Some(unsafe { Ref { slot, guard } })
592        } else {
593            None
594        }
595    }
596
597    /// # Safety
598    ///
599    /// You must ensure that the epoch is [pinned] before you call this method and that the
600    /// returned reference doesn't outlive all [`epoch::Guard`]s active on the thread, or that all
601    /// accesses to `self` are externally synchronized (for example through the use of a `Mutex` or
602    /// by being single-threaded).
603    ///
604    /// [pinned]: epoch::pin
605    #[inline(always)]
606    pub unsafe fn get_unprotected(&self, id: SlotId) -> Option<&T> {
607        let slot = self.slots.get(id.index as usize)?;
608        let generation = slot.generation.load(Acquire);
609
610        if generation == id.generation() {
611            // SAFETY:
612            // * The `Acquire` ordering when loading the slot's generation synchronizes with the
613            //   `Release` ordering in `SlotMap::insert`, making sure that the newly written value
614            //   is visible here.
615            // * We checked that `id.generation` matches the slot's generation, which includes the
616            //   occupied bit. By `SlotId`'s invariant, its generation's occupied bit must be set.
617            //   Since the generation matched, the slot's occupied bit must be set, which makes
618            //   reading the value safe as the only way the occupied bit can be set is in
619            //   `SlotMap::insert[_mut]` after initialization of the slot.
620            // * The caller must ensure that the returned reference is protected by a guard before
621            //   the call and that the returned reference doesn't outlive said guard, or that
622            //   synchronization is ensured externally.
623            Some(unsafe { slot.value_unchecked() })
624        } else {
625            None
626        }
627    }
628
629    #[inline(always)]
630    pub fn get_mut(&mut self, id: SlotId) -> Option<&mut T> {
631        let slot = self.slots.get_mut(id.index as usize)?;
632        let generation = *slot.generation.get_mut();
633
634        if generation == id.generation() {
635            // SAFETY:
636            // * The mutable reference makes sure that access to the slot is synchronized.
637            // * We checked that `id.generation` matches the slot's generation, which includes the
638            //   occupied bit. By `SlotId`'s invariant, its generation's occupied bit must be set.
639            //   Since the generation matched, the slot's occupied bit must be set, which makes
640            //   reading the value safe as the only way the occupied bit can be set is in
641            //   `SlotMap::insert[_mut]` after initialization of the slot.
642            Some(unsafe { slot.value_unchecked_mut() })
643        } else {
644            None
645        }
646    }
647
648    #[inline]
649    pub fn get_many_mut<const N: usize>(&mut self, ids: [SlotId; N]) -> Option<[&mut T; N]> {
650        fn get_many_check_valid<const N: usize>(ids: &[SlotId; N], len: u32) -> bool {
651            let mut valid = true;
652
653            for (i, id) in ids.iter().enumerate() {
654                valid &= id.index() < len;
655
656                for id2 in &ids[..i] {
657                    valid &= id.index() != id2.index();
658                }
659            }
660
661            valid
662        }
663
664        // Our capacity can never exceed `u32::MAX`, so the length of the slots can't either.
665        #[allow(clippy::cast_possible_truncation)]
666        let len = self.slots.len() as u32;
667
668        if get_many_check_valid(&ids, len) {
669            // SAFETY: We checked that all indices are disjunct and in bounds of the slots vector.
670            unsafe { self.get_many_unchecked_mut(ids) }
671        } else {
672            None
673        }
674    }
675
676    #[inline]
677    unsafe fn get_many_unchecked_mut<const N: usize>(
678        &mut self,
679        ids: [SlotId; N],
680    ) -> Option<[&mut T; N]> {
681        let slots_ptr = self.slots.as_mut_ptr();
682        let mut refs = MaybeUninit::<[&mut T; N]>::uninit();
683        let refs_ptr = refs.as_mut_ptr().cast::<&mut T>();
684
685        for i in 0..N {
686            // SAFETY: `i` is in bounds of the array.
687            let id = unsafe { ids.get_unchecked(i) };
688
689            // SAFETY: The caller must ensure that `ids` contains only IDs whose indices are in
690            // bounds of the slots vector.
691            let slot = unsafe { slots_ptr.add(id.index() as usize) };
692
693            // SAFETY: The caller must ensure that `ids` contains only IDs with disjunct indices.
694            let slot = unsafe { &mut *slot };
695
696            let generation = *slot.generation.get_mut();
697
698            if generation != id.generation() {
699                return None;
700            }
701
702            // SAFETY:
703            // * The mutable reference makes sure that access to the slot is synchronized.
704            // * We checked that `id.generation` matches the slot's generation, which includes the
705            //   occupied bit. By `SlotId`'s invariant, its generation's occupied bit must be set.
706            //   Since the generation matched, the slot's occupied bit must be set, which makes
707            //   reading the value safe as the only way the occupied bit can be set is in
708            //   `SlotMap::insert[_mut]` after initialization of the slot.
709            let value = unsafe { slot.value_unchecked_mut() };
710
711            // SAFETY: `i` is in bounds of the array.
712            unsafe { *refs_ptr.add(i) = value };
713        }
714
715        // SAFETY: We initialized all the elements.
716        Some(unsafe { refs.assume_init() })
717    }
718
719    /// # Panics
720    ///
721    /// Panics if `guard.global()` does not equal `self.global()`.
722    #[inline(always)]
723    pub fn index<'a>(
724        &'a self,
725        index: u32,
726        guard: impl Into<Cow<'a, epoch::Guard<'a>>>,
727    ) -> Option<Ref<'a, T>> {
728        self.index_inner(index, guard.into())
729    }
730
731    #[inline(always)]
732    fn index_inner<'a>(
733        &'a self,
734        index: u32,
735        guard: Cow<'a, epoch::Guard<'a>>,
736    ) -> Option<Ref<'a, T>> {
737        assert_eq!(guard.global(), &self.global);
738
739        let slot = self.slots.get(index as usize)?;
740        let generation = slot.generation.load(Acquire);
741
742        if generation & OCCUPIED_BIT != 0 {
743            // SAFETY:
744            // * The `Acquire` ordering when loading the slot's generation synchronizes with the
745            //   `Release` ordering in `SlotMap::insert`, making sure that the newly written value
746            //   is visible here.
747            // * We checked that the slot is occupied, which means that it must have been
748            //   initialized in `SlotMap::insert[_mut]`.
749            Some(unsafe { Ref { slot, guard } })
750        } else {
751            None
752        }
753    }
754
755    /// # Safety
756    ///
757    /// You must ensure that the epoch is [pinned] before you call this method and that the
758    /// returned reference doesn't outlive all [`epoch::Guard`]s active on the thread, or that all
759    /// accesses to `self` are externally synchronized (for example through the use of a `Mutex` or
760    /// by being single-threaded).
761    ///
762    /// [pinned]: epoch::pin
763    #[inline(always)]
764    pub unsafe fn index_unprotected(&self, index: u32) -> Option<&T> {
765        let slot = self.slots.get(index as usize)?;
766        let generation = slot.generation.load(Acquire);
767
768        if generation & OCCUPIED_BIT != 0 {
769            // SAFETY:
770            // * The `Acquire` ordering when loading the slot's generation synchronizes with the
771            //   `Release` ordering in `SlotMap::insert`, making sure that the newly written value
772            //   is visible here.
773            // * We checked that the slot is occupied, which means that it must have been
774            //   initialized in `SlotMap::insert[_mut]`.
775            // * The caller must ensure that the returned reference is protected by a guard before
776            //   the call and that the returned reference doesn't outlive said guard, or that
777            //   synchronization is ensured externally.
778            Some(unsafe { slot.value_unchecked() })
779        } else {
780            None
781        }
782    }
783
784    #[inline(always)]
785    pub fn index_mut(&mut self, index: u32) -> Option<&mut T> {
786        let slot = self.slots.get_mut(index as usize)?;
787        let generation = *slot.generation.get_mut();
788
789        if generation & OCCUPIED_BIT != 0 {
790            // SAFETY:
791            // * The mutable reference makes sure that access to the slot is synchronized.
792            // * We checked that the slot is occupied, which means that it must have been
793            //   initialized in `SlotMap::insert[_mut]`.
794            Some(unsafe { slot.value_unchecked_mut() })
795        } else {
796            None
797        }
798    }
799
800    /// # Safety
801    ///
802    /// `index` must be in bounds of the slots vector and the slot must have been initialized and
803    /// must not be free.
804    ///
805    /// # Panics
806    ///
807    /// Panics if `guard.global()` does not equal `self.global()`.
808    #[inline(always)]
809    pub unsafe fn index_unchecked<'a>(
810        &'a self,
811        index: u32,
812        guard: impl Into<Cow<'a, epoch::Guard<'a>>>,
813    ) -> Ref<'a, T> {
814        // SAFETY: Ensured by the caller.
815        unsafe { self.index_unchecked_inner(index, guard.into()) }
816    }
817
818    #[inline(always)]
819    unsafe fn index_unchecked_inner<'a>(
820        &'a self,
821        index: u32,
822        guard: Cow<'a, epoch::Guard<'a>>,
823    ) -> Ref<'a, T> {
824        assert_eq!(guard.global(), &self.global);
825
826        // SAFETY: The caller must ensure that the index is in bounds.
827        let slot = unsafe { self.slots.get_unchecked(index as usize) };
828
829        let _generation = slot.generation.load(Acquire);
830
831        // SAFETY:
832        // * The `Acquire` ordering when loading the slot's generation synchronizes with the
833        //   `Release` ordering in `SlotMap::insert`, making sure that the newly written value is
834        //   visible here.
835        // * The caller must ensure that the slot is initialized.
836        unsafe { Ref { slot, guard } }
837    }
838
839    /// # Safety
840    ///
841    /// * `index` must be in bounds of the slots vector and the slot must have been initialized and
842    ///   must not be free.
843    /// * You must ensure that the epoch is [pinned] before you call this method and that the
844    ///   returned reference doesn't outlive all [`epoch::Guard`]s active on the thread, or that
845    ///   all accesses to `self` are externally synchronized (for example through the use of a
846    ///   `Mutex` or by being single-threaded).
847    ///
848    /// [pinned]: epoch::pin
849    #[inline(always)]
850    pub unsafe fn index_unchecked_unprotected(&self, index: u32) -> &T {
851        // SAFETY: The caller must ensure that the index is in bounds.
852        let slot = unsafe { self.slots.get_unchecked(index as usize) };
853
854        let _generation = slot.generation.load(Acquire);
855
856        // SAFETY:
857        // * The `Acquire` ordering when loading the slot's generation synchronizes with the
858        //   `Release` ordering in `SlotMap::insert`, making sure that the newly written value is
859        //   visible here.
860        // * The caller must ensure that the slot is initialized.
861        // * The caller must ensure that the returned reference is protected by a guard before the
862        //   call and that the returned reference doesn't outlive said guard, or that
863        //   synchronization is ensured externally.
864        unsafe { slot.value_unchecked() }
865    }
866
867    /// # Safety
868    ///
869    /// `index` must be in bounds of the slots vector and the slot must have been initialized and
870    /// must not be free.
871    #[inline(always)]
872    pub unsafe fn index_unchecked_mut(&mut self, index: u32) -> &mut T {
873        // SAFETY: The caller must ensure that the index is in bounds.
874        let slot = unsafe { self.slots.get_unchecked_mut(index as usize) };
875
876        // SAFETY:
877        // * The mutable reference makes sure that access to the slot is synchronized.
878        // * The caller must ensure that the slot is initialized.
879        unsafe { slot.value_unchecked_mut() }
880    }
881
882    /// # Panics
883    ///
884    /// Panics if `guard.global()` does not equal `self.global()`.
885    #[inline]
886    pub fn iter<'a>(&'a self, guard: impl Into<Cow<'a, epoch::Guard<'a>>>) -> Iter<'a, T> {
887        self.iter_inner(guard.into())
888    }
889
890    #[inline]
891    fn iter_inner<'a>(&'a self, guard: Cow<'a, epoch::Guard<'a>>) -> Iter<'a, T> {
892        assert_eq!(guard.global(), &self.global);
893
894        Iter {
895            slots: self.slots.iter().enumerate(),
896            guard,
897        }
898    }
899
900    /// # Safety
901    ///
902    /// You must ensure that the epoch is [pinned] before you call this method and that the
903    /// returned reference doesn't outlive all [`epoch::Guard`]s active on the thread, or that all
904    /// accesses to `self` are externally synchronized (for example through the use of a `Mutex` or
905    /// by being single-threaded).
906    #[inline]
907    pub unsafe fn iter_unprotected(&self) -> IterUnprotected<'_, T> {
908        IterUnprotected {
909            slots: self.slots.iter().enumerate(),
910        }
911    }
912
913    #[inline]
914    pub fn iter_mut(&mut self) -> IterMut<'_, T> {
915        IterMut {
916            slots: self.slots.iter_mut().enumerate(),
917        }
918    }
919}
920
921// We don't want to print the `_alignment` fields.
922#[allow(clippy::missing_fields_in_debug)]
923impl<T: fmt::Debug, C: Collector<T>> fmt::Debug for SlotMap<T, C> {
924    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
925        struct Slots;
926
927        impl fmt::Debug for Slots {
928            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
929                f.pad("[..]")
930            }
931        }
932
933        struct List<'a, T, C: Collector<T>>(&'a SlotMap<T, C>, u32);
934
935        impl<T, C: Collector<T>> fmt::Debug for List<'_, T, C> {
936            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
937                let mut head = self.1;
938                let mut debug = f.debug_list();
939
940                while head != NIL {
941                    debug.entry(&head);
942
943                    // SAFETY: We always push indices of existing slots into the free-lists and the
944                    // slots vector never shrinks, therefore the index must have staid in bounds.
945                    let slot = unsafe { self.0.slots.get_unchecked(head as usize) };
946
947                    head = slot.next_free.load(Acquire);
948                }
949
950                debug.finish()
951            }
952        }
953
954        struct QueuedList<'a, T, C: Collector<T>>(&'a SlotMap<T, C>, u64);
955
956        impl<T, C: Collector<T>> fmt::Debug for QueuedList<'_, T, C> {
957            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
958                let state = self.1;
959                let head = (state & 0xFFFF_FFFF) as u32;
960                let epoch = (state >> 32) as u32;
961                let mut debug = f.debug_struct("QueuedList");
962                debug
963                    .field("entries", &List(self.0, head))
964                    .field("epoch", &epoch);
965
966                debug.finish()
967            }
968        }
969
970        let mut debug = f.debug_struct("SlotMap");
971        debug
972            .field("slots", &Slots)
973            .field("len", &self.len)
974            .field("global", &self.global)
975            .field("free_list", &List(self, self.free_list.load(Acquire)))
976            .field(
977                "free_list_queue",
978                &[
979                    QueuedList(self, self.free_list_queue[0].load(Acquire)),
980                    QueuedList(self, self.free_list_queue[1].load(Acquire)),
981                ],
982            );
983
984        debug.finish()
985    }
986}
987
988impl<T, C: Collector<T>> Drop for SlotMap<T, C> {
989    fn drop(&mut self) {
990        if !core::mem::needs_drop::<T>() {
991            return;
992        }
993
994        for list in &mut self.free_list_queue {
995            let mut head = (*list.get_mut() & 0xFFFF_FFFF) as u32;
996
997            while head != NIL {
998                // SAFETY: We always push indices of existing slots into the free-lists and the
999                // slots vector never shrinks, therefore the index must have staid in bounds.
1000                let slot = unsafe { self.slots.get_unchecked_mut(head as usize) };
1001
1002                let ptr = slot.value.get_mut().as_mut_ptr();
1003
1004                // SAFETY: We can be certain that this slot has been initialized, since the only
1005                // way in which it could have been queued for freeing is in `SlotMap::remove` if
1006                // the slot was inserted before.
1007                unsafe { self.collector.collect(ptr) };
1008
1009                head = *slot.next_free.get_mut();
1010            }
1011        }
1012
1013        for slot in &mut self.slots {
1014            if *slot.generation.get_mut() & OCCUPIED_BIT != 0 {
1015                let ptr = slot.value.get_mut().as_mut_ptr();
1016
1017                // SAFETY:
1018                // * The mutable reference makes sure that access to the slot is synchronized.
1019                // * We checked that the slot is occupied, which means that it must have been
1020                //   initialized in `SlotMap::insert[_mut]`.
1021                unsafe { self.collector.collect(ptr) };
1022            }
1023        }
1024    }
1025}
1026
1027impl<'a, T, C: Collector<T>> IntoIterator for &'a mut SlotMap<T, C> {
1028    type Item = (SlotId, &'a mut T);
1029
1030    type IntoIter = IterMut<'a, T>;
1031
1032    #[inline]
1033    fn into_iter(self) -> Self::IntoIter {
1034        self.iter_mut()
1035    }
1036}
1037
1038const OCCUPIED_BIT: u32 = 1 << TAG_BITS;
1039
1040struct Slot<T> {
1041    generation: AtomicU32,
1042    next_free: AtomicU32,
1043    value: UnsafeCell<MaybeUninit<T>>,
1044}
1045
1046// SAFETY: The user of `Slot` must ensure that access to `Slot::value` is synchronized.
1047unsafe impl<T: Sync> Sync for Slot<T> {}
1048
1049impl<T> Slot<T> {
1050    fn new(value: T, tag: u32) -> Self {
1051        Slot {
1052            generation: AtomicU32::new(OCCUPIED_BIT | tag),
1053            next_free: AtomicU32::new(NIL),
1054            value: UnsafeCell::new(MaybeUninit::new(value)),
1055        }
1056    }
1057
1058    unsafe fn value_unchecked(&self) -> &T {
1059        // SAFETY: The caller must ensure that access to the cell's inner value is synchronized.
1060        let value = unsafe { &*self.value.get() };
1061
1062        // SAFETY: The caller must ensure that the slot has been initialized.
1063        unsafe { value.assume_init_ref() }
1064    }
1065
1066    unsafe fn value_unchecked_mut(&mut self) -> &mut T {
1067        // SAFETY: The caller must ensure that the slot has been initialized.
1068        unsafe { self.value.get_mut().assume_init_mut() }
1069    }
1070}
1071
1072#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
1073pub struct SlotId {
1074    index: u32,
1075    generation: NonZeroU32,
1076}
1077
1078impl SlotId {
1079    pub const INVALID: Self = SlotId {
1080        index: u32::MAX,
1081        generation: NonZeroU32::MAX,
1082    };
1083
1084    #[inline(always)]
1085    #[must_use]
1086    pub const fn new(index: u32, generation: u32) -> Self {
1087        assert!(generation & OCCUPIED_BIT != 0);
1088
1089        // SAFETY: We checked that the `OCCUPIED_BIT` is set.
1090        unsafe { SlotId::new_unchecked(index, generation) }
1091    }
1092
1093    const unsafe fn new_unchecked(index: u32, generation: u32) -> Self {
1094        // SAFETY: The caller must ensure that the `OCCUPIED_BIT` of `generation` is set, which
1095        // means it must be non-zero.
1096        let generation = unsafe { NonZeroU32::new_unchecked(generation) };
1097
1098        SlotId { index, generation }
1099    }
1100
1101    #[inline(always)]
1102    #[must_use]
1103    pub const fn index(self) -> u32 {
1104        self.index
1105    }
1106
1107    #[inline(always)]
1108    #[must_use]
1109    pub const fn generation(self) -> u32 {
1110        self.generation.get()
1111    }
1112
1113    #[inline(always)]
1114    #[must_use]
1115    pub const fn tag(self) -> u32 {
1116        self.generation.get() & TAG_MASK
1117    }
1118}
1119
1120pub struct Ref<'a, T> {
1121    slot: &'a Slot<T>,
1122    #[allow(dead_code)]
1123    guard: Cow<'a, epoch::Guard<'a>>,
1124}
1125
1126impl<T> Deref for Ref<'_, T> {
1127    type Target = T;
1128
1129    #[inline]
1130    fn deref(&self) -> &Self::Target {
1131        // SAFETY: The constructor of `Ref` must ensure that the inner value was initialized and
1132        // that said write was synchronized with and made visible here. As for future writes to the
1133        // inner value, we know that none can happen as long as our `Guard` wasn't dropped.
1134        unsafe { self.slot.value_unchecked() }
1135    }
1136}
1137
1138impl<T: fmt::Debug> fmt::Debug for Ref<'_, T> {
1139    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1140        fmt::Debug::fmt(&**self, f)
1141    }
1142}
1143
1144impl<T: fmt::Display> fmt::Display for Ref<'_, T> {
1145    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1146        fmt::Display::fmt(&**self, f)
1147    }
1148}
1149
1150pub struct Iter<'a, T> {
1151    slots: iter::Enumerate<slice::Iter<'a, Slot<T>>>,
1152    guard: Cow<'a, epoch::Guard<'a>>,
1153}
1154
1155impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
1156    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1157        f.debug_struct("Iter").finish_non_exhaustive()
1158    }
1159}
1160
1161impl<'a, T> Iterator for Iter<'a, T> {
1162    type Item = (SlotId, Ref<'a, T>);
1163
1164    #[inline]
1165    fn next(&mut self) -> Option<Self::Item> {
1166        loop {
1167            let (index, slot) = self.slots.next()?;
1168            let generation = slot.generation.load(Acquire);
1169
1170            if generation & OCCUPIED_BIT != 0 {
1171                // Our capacity can never exceed `u32::MAX`.
1172                #[allow(clippy::cast_possible_truncation)]
1173                let index = index as u32;
1174
1175                // SAFETY: We checked that the occupied bit is set.
1176                let id = unsafe { SlotId::new_unchecked(index, generation) };
1177
1178                let guard = self.guard.clone();
1179
1180                // SAFETY:
1181                // * The `Acquire` ordering when loading the slot's generation synchronizes with the
1182                //   `Release` ordering in `SlotMap::insert`, making sure that the newly written
1183                //   value is visible here.
1184                // * We checked that the slot is occupied, which means that it must have been
1185                //   initialized in `SlotMap::insert[_mut]`.
1186                let r = unsafe { Ref { slot, guard } };
1187
1188                break Some((id, r));
1189            }
1190        }
1191    }
1192
1193    #[inline]
1194    fn size_hint(&self) -> (usize, Option<usize>) {
1195        (0, Some(self.slots.len()))
1196    }
1197}
1198
1199impl<T> DoubleEndedIterator for Iter<'_, T> {
1200    #[inline]
1201    fn next_back(&mut self) -> Option<Self::Item> {
1202        loop {
1203            let (index, slot) = self.slots.next_back()?;
1204            let generation = slot.generation.load(Acquire);
1205
1206            if generation & OCCUPIED_BIT != 0 {
1207                // Our capacity can never exceed `u32::MAX`.
1208                #[allow(clippy::cast_possible_truncation)]
1209                let index = index as u32;
1210
1211                // SAFETY: We checked that the occupied bit is set.
1212                let id = unsafe { SlotId::new_unchecked(index, generation) };
1213
1214                let guard = self.guard.clone();
1215
1216                // SAFETY:
1217                // * The `Acquire` ordering when loading the slot's generation synchronizes with the
1218                //   `Release` ordering in `SlotMap::insert`, making sure that the newly written
1219                //   value is visible here.
1220                // * We checked that the slot is occupied, which means that it must have been
1221                //   initialized in `SlotMap::insert[_mut]`.
1222                let r = unsafe { Ref { slot, guard } };
1223
1224                break Some((id, r));
1225            }
1226        }
1227    }
1228}
1229
1230impl<T> FusedIterator for Iter<'_, T> {}
1231
1232pub struct IterUnprotected<'a, T> {
1233    slots: iter::Enumerate<slice::Iter<'a, Slot<T>>>,
1234}
1235
1236impl<T: fmt::Debug> fmt::Debug for IterUnprotected<'_, T> {
1237    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1238        f.debug_struct("IterUnprotected").finish_non_exhaustive()
1239    }
1240}
1241
1242impl<'a, T> Iterator for IterUnprotected<'a, T> {
1243    type Item = (SlotId, &'a T);
1244
1245    #[inline]
1246    fn next(&mut self) -> Option<Self::Item> {
1247        loop {
1248            let (index, slot) = self.slots.next()?;
1249            let generation = slot.generation.load(Acquire);
1250
1251            if generation & OCCUPIED_BIT != 0 {
1252                // Our capacity can never exceed `u32::MAX`.
1253                #[allow(clippy::cast_possible_truncation)]
1254                let index = index as u32;
1255
1256                // SAFETY: We checked that the occupied bit is set.
1257                let id = unsafe { SlotId::new_unchecked(index, generation) };
1258
1259                // SAFETY:
1260                // * The `Acquire` ordering when loading the slot's generation synchronizes with the
1261                //   `Release` ordering in `SlotMap::insert`, making sure that the newly written
1262                //   value is visible here.
1263                // * We checked that the slot is occupied, which means that it must have been
1264                //   initialized in `SlotMap::insert[_mut]`.
1265                // * The caller of `SlotMap::iter_unprotected` must ensure that the returned
1266                //   iterator is protected by a guard before the call and that the returned iterator
1267                //   doesn't outlive said guard, or that synchronization is ensured externally.
1268                let r = unsafe { slot.value_unchecked() };
1269
1270                break Some((id, r));
1271            }
1272        }
1273    }
1274
1275    #[inline]
1276    fn size_hint(&self) -> (usize, Option<usize>) {
1277        (0, Some(self.slots.len()))
1278    }
1279}
1280
1281impl<T> DoubleEndedIterator for IterUnprotected<'_, T> {
1282    #[inline]
1283    fn next_back(&mut self) -> Option<Self::Item> {
1284        loop {
1285            let (index, slot) = self.slots.next_back()?;
1286            let generation = slot.generation.load(Acquire);
1287
1288            if generation & OCCUPIED_BIT != 0 {
1289                // Our capacity can never exceed `u32::MAX`.
1290                #[allow(clippy::cast_possible_truncation)]
1291                let index = index as u32;
1292
1293                // SAFETY: We checked that the occupied bit is set.
1294                let id = unsafe { SlotId::new_unchecked(index, generation) };
1295
1296                // SAFETY:
1297                // * The `Acquire` ordering when loading the slot's generation synchronizes with the
1298                //   `Release` ordering in `SlotMap::insert`, making sure that the newly written
1299                //   value is visible here.
1300                // * We checked that the slot is occupied, which means that it must have been
1301                //   initialized in `SlotMap::insert[_mut]`.
1302                // * The caller of `SlotMap::iter_unprotected` must ensure that the returned
1303                //   iterator is protected by a guard before the call and that the returned iterator
1304                //   doesn't outlive said guard, or that synchronization is ensured externally.
1305                let r = unsafe { slot.value_unchecked() };
1306
1307                break Some((id, r));
1308            }
1309        }
1310    }
1311}
1312
1313impl<T> FusedIterator for IterUnprotected<'_, T> {}
1314
1315pub struct IterMut<'a, T> {
1316    slots: iter::Enumerate<slice::IterMut<'a, Slot<T>>>,
1317}
1318
1319impl<T: fmt::Debug> fmt::Debug for IterMut<'_, T> {
1320    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1321        f.debug_struct("IterMut").finish_non_exhaustive()
1322    }
1323}
1324
1325impl<'a, T> Iterator for IterMut<'a, T> {
1326    type Item = (SlotId, &'a mut T);
1327
1328    #[inline]
1329    fn next(&mut self) -> Option<Self::Item> {
1330        loop {
1331            let (index, slot) = self.slots.next()?;
1332            let generation = *slot.generation.get_mut();
1333
1334            if generation & OCCUPIED_BIT != 0 {
1335                // Our capacity can never exceed `u32::MAX`.
1336                #[allow(clippy::cast_possible_truncation)]
1337                let index = index as u32;
1338
1339                // SAFETY: We checked that the `OCCUPIED_BIT` is set.
1340                let id = unsafe { SlotId::new_unchecked(index, generation) };
1341
1342                // SAFETY: We checked that the slot is occupied, which means that it must have been
1343                // initialized in `SlotMap::insert[_mut]`.
1344                let r = unsafe { slot.value_unchecked_mut() };
1345
1346                break Some((id, r));
1347            }
1348        }
1349    }
1350
1351    #[inline]
1352    fn size_hint(&self) -> (usize, Option<usize>) {
1353        (0, Some(self.slots.len()))
1354    }
1355}
1356
1357impl<T> DoubleEndedIterator for IterMut<'_, T> {
1358    #[inline]
1359    fn next_back(&mut self) -> Option<Self::Item> {
1360        loop {
1361            let (index, slot) = self.slots.next_back()?;
1362            let generation = *slot.generation.get_mut();
1363
1364            if generation & OCCUPIED_BIT != 0 {
1365                // Our capacity can never exceed `u32::MAX`.
1366                #[allow(clippy::cast_possible_truncation)]
1367                let index = index as u32;
1368
1369                // SAFETY: We checked that the `OCCUPIED_BIT` is set.
1370                let id = unsafe { SlotId::new_unchecked(index, generation) };
1371
1372                // SAFETY: We checked that the slot is occupied, which means that it must have been
1373                // initialized in `SlotMap::insert[_mut]`.
1374                let r = unsafe { slot.value_unchecked_mut() };
1375
1376                break Some((id, r));
1377            }
1378        }
1379    }
1380}
1381
1382impl<T> FusedIterator for IterMut<'_, T> {}
1383
1384pub trait Collector<T> {
1385    /// # Safety
1386    ///
1387    /// This function has the same safety preconditions and semantics as [`ptr::drop_in_place`]. It
1388    /// must be safe to drop the value pointed to by `ptr`.
1389    ///
1390    /// [`ptr::drop_in_place`]: core::ptr::drop_in_place
1391    unsafe fn collect(&self, ptr: *mut T);
1392}
1393
1394#[derive(Debug)]
1395pub struct DefaultCollector;
1396
1397impl<T> Collector<T> for DefaultCollector {
1398    #[inline(always)]
1399    unsafe fn collect(&self, ptr: *mut T) {
1400        // SAFETY: The caller must ensure that it is safe to drop the value.
1401        unsafe { ptr.drop_in_place() }
1402    }
1403}
1404
1405#[repr(align(128))]
1406struct CacheAligned;
1407
1408const SPIN_LIMIT: u32 = 6;
1409
1410struct Backoff {
1411    step: u32,
1412}
1413
1414impl Backoff {
1415    fn new() -> Self {
1416        Backoff { step: 0 }
1417    }
1418
1419    fn spin(&mut self) {
1420        for _ in 0..1 << self.step {
1421            hint::spin_loop();
1422        }
1423
1424        if self.step <= SPIN_LIMIT {
1425            self.step += 1;
1426        }
1427    }
1428}
1429
1430#[cfg(test)]
1431mod tests {
1432    use self::epoch::PINNINGS_BETWEEN_ADVANCE;
1433    use super::*;
1434    use std::thread;
1435
1436    #[test]
1437    fn basic_usage1() {
1438        let map = SlotMap::new(10);
1439        let guard = &map.global().register_local().into_inner().pin();
1440
1441        let x = map.insert(69, guard);
1442        let y = map.insert(42, guard);
1443
1444        assert_eq!(map.get(x, guard).as_deref(), Some(&69));
1445        assert_eq!(map.get(y, guard).as_deref(), Some(&42));
1446
1447        map.remove(x, guard);
1448
1449        let x2 = map.insert(12, guard);
1450
1451        assert_eq!(map.get(x2, guard).as_deref(), Some(&12));
1452        assert_eq!(map.get(x, guard).as_deref(), None);
1453
1454        map.remove(y, guard);
1455        map.remove(x2, guard);
1456
1457        assert_eq!(map.get(y, guard).as_deref(), None);
1458        assert_eq!(map.get(x2, guard).as_deref(), None);
1459    }
1460
1461    #[test]
1462    fn basic_usage2() {
1463        let map = SlotMap::new(10);
1464        let guard = &map.global().register_local().into_inner().pin();
1465
1466        let x = map.insert(1, guard);
1467        let y = map.insert(2, guard);
1468        let z = map.insert(3, guard);
1469
1470        assert_eq!(map.get(x, guard).as_deref(), Some(&1));
1471        assert_eq!(map.get(y, guard).as_deref(), Some(&2));
1472        assert_eq!(map.get(z, guard).as_deref(), Some(&3));
1473
1474        map.remove(y, guard);
1475
1476        let y2 = map.insert(20, guard);
1477
1478        assert_eq!(map.get(y2, guard).as_deref(), Some(&20));
1479        assert_eq!(map.get(y, guard).as_deref(), None);
1480
1481        map.remove(x, guard);
1482        map.remove(z, guard);
1483
1484        let x2 = map.insert(10, guard);
1485
1486        assert_eq!(map.get(x2, guard).as_deref(), Some(&10));
1487        assert_eq!(map.get(x, guard).as_deref(), None);
1488
1489        let z2 = map.insert(30, guard);
1490
1491        assert_eq!(map.get(z2, guard).as_deref(), Some(&30));
1492        assert_eq!(map.get(x, guard).as_deref(), None);
1493
1494        map.remove(x2, guard);
1495
1496        assert_eq!(map.get(x2, guard).as_deref(), None);
1497
1498        map.remove(y2, guard);
1499        map.remove(z2, guard);
1500
1501        assert_eq!(map.get(y2, guard).as_deref(), None);
1502        assert_eq!(map.get(z2, guard).as_deref(), None);
1503    }
1504
1505    #[test]
1506    fn basic_usage3() {
1507        let map = SlotMap::new(10);
1508        let guard = &map.global().register_local().into_inner().pin();
1509
1510        let x = map.insert(1, guard);
1511        let y = map.insert(2, guard);
1512
1513        assert_eq!(map.get(x, guard).as_deref(), Some(&1));
1514        assert_eq!(map.get(y, guard).as_deref(), Some(&2));
1515
1516        let z = map.insert(3, guard);
1517
1518        assert_eq!(map.get(z, guard).as_deref(), Some(&3));
1519
1520        map.remove(x, guard);
1521        map.remove(z, guard);
1522
1523        let z2 = map.insert(30, guard);
1524        let x2 = map.insert(10, guard);
1525
1526        assert_eq!(map.get(x2, guard).as_deref(), Some(&10));
1527        assert_eq!(map.get(z2, guard).as_deref(), Some(&30));
1528        assert_eq!(map.get(x, guard).as_deref(), None);
1529        assert_eq!(map.get(z, guard).as_deref(), None);
1530
1531        map.remove(x2, guard);
1532        map.remove(y, guard);
1533        map.remove(z2, guard);
1534
1535        assert_eq!(map.get(x2, guard).as_deref(), None);
1536        assert_eq!(map.get(y, guard).as_deref(), None);
1537        assert_eq!(map.get(z2, guard).as_deref(), None);
1538    }
1539
1540    #[test]
1541    fn basic_usage_mut1() {
1542        let mut map = SlotMap::new(10);
1543
1544        let x = map.insert_mut(69);
1545        let y = map.insert_mut(42);
1546
1547        assert_eq!(map.get_mut(x), Some(&mut 69));
1548        assert_eq!(map.get_mut(y), Some(&mut 42));
1549
1550        map.remove_mut(x);
1551
1552        let x2 = map.insert_mut(12);
1553
1554        assert_eq!(map.get_mut(x2), Some(&mut 12));
1555        assert_eq!(map.get_mut(x), None);
1556
1557        map.remove_mut(y);
1558        map.remove_mut(x2);
1559
1560        assert_eq!(map.get_mut(y), None);
1561        assert_eq!(map.get_mut(x2), None);
1562    }
1563
1564    #[test]
1565    fn basic_usage_mut2() {
1566        let mut map = SlotMap::new(10);
1567
1568        let x = map.insert_mut(1);
1569        let y = map.insert_mut(2);
1570        let z = map.insert_mut(3);
1571
1572        assert_eq!(map.get_mut(x), Some(&mut 1));
1573        assert_eq!(map.get_mut(y), Some(&mut 2));
1574        assert_eq!(map.get_mut(z), Some(&mut 3));
1575
1576        map.remove_mut(y);
1577
1578        let y2 = map.insert_mut(20);
1579
1580        assert_eq!(map.get_mut(y2), Some(&mut 20));
1581        assert_eq!(map.get_mut(y), None);
1582
1583        map.remove_mut(x);
1584        map.remove_mut(z);
1585
1586        let x2 = map.insert_mut(10);
1587
1588        assert_eq!(map.get_mut(x2), Some(&mut 10));
1589        assert_eq!(map.get_mut(x), None);
1590
1591        let z2 = map.insert_mut(30);
1592
1593        assert_eq!(map.get_mut(z2), Some(&mut 30));
1594        assert_eq!(map.get_mut(x), None);
1595
1596        map.remove_mut(x2);
1597
1598        assert_eq!(map.get_mut(x2), None);
1599
1600        map.remove_mut(y2);
1601        map.remove_mut(z2);
1602
1603        assert_eq!(map.get_mut(y2), None);
1604        assert_eq!(map.get_mut(z2), None);
1605    }
1606
1607    #[test]
1608    fn basic_usage_mut3() {
1609        let mut map = SlotMap::new(10);
1610
1611        let x = map.insert_mut(1);
1612        let y = map.insert_mut(2);
1613
1614        assert_eq!(map.get_mut(x), Some(&mut 1));
1615        assert_eq!(map.get_mut(y), Some(&mut 2));
1616
1617        let z = map.insert_mut(3);
1618
1619        assert_eq!(map.get_mut(z), Some(&mut 3));
1620
1621        map.remove_mut(x);
1622        map.remove_mut(z);
1623
1624        let z2 = map.insert_mut(30);
1625        let x2 = map.insert_mut(10);
1626
1627        assert_eq!(map.get_mut(x2), Some(&mut 10));
1628        assert_eq!(map.get_mut(z2), Some(&mut 30));
1629        assert_eq!(map.get_mut(x), None);
1630        assert_eq!(map.get_mut(z), None);
1631
1632        map.remove_mut(x2);
1633        map.remove_mut(y);
1634        map.remove_mut(z2);
1635
1636        assert_eq!(map.get_mut(x2), None);
1637        assert_eq!(map.get_mut(y), None);
1638        assert_eq!(map.get_mut(z2), None);
1639    }
1640
1641    #[test]
1642    fn iter1() {
1643        let map = SlotMap::new(10);
1644        let guard = &map.global().register_local().into_inner().pin();
1645
1646        let x = map.insert(1, guard);
1647        let _ = map.insert(2, guard);
1648        let y = map.insert(3, guard);
1649
1650        let mut iter = map.iter(guard);
1651
1652        assert_eq!(*iter.next().unwrap().1, 1);
1653        assert_eq!(*iter.next().unwrap().1, 2);
1654        assert_eq!(*iter.next().unwrap().1, 3);
1655        assert!(iter.next().is_none());
1656
1657        map.remove(x, guard);
1658        map.remove(y, guard);
1659
1660        let mut iter = map.iter(guard);
1661
1662        assert_eq!(*iter.next().unwrap().1, 2);
1663        assert!(iter.next().is_none());
1664
1665        map.insert(3, guard);
1666        map.insert(1, guard);
1667
1668        let mut iter = map.iter(guard);
1669
1670        assert_eq!(*iter.next().unwrap().1, 2);
1671        assert_eq!(*iter.next().unwrap().1, 3);
1672        assert_eq!(*iter.next().unwrap().1, 1);
1673        assert!(iter.next().is_none());
1674    }
1675
1676    #[test]
1677    fn iter2() {
1678        let map = SlotMap::new(10);
1679        let guard = &map.global().register_local().into_inner().pin();
1680
1681        let x = map.insert(1, guard);
1682        let y = map.insert(2, guard);
1683        let z = map.insert(3, guard);
1684
1685        map.remove(x, guard);
1686
1687        let mut iter = map.iter(guard);
1688
1689        assert_eq!(*iter.next().unwrap().1, 2);
1690        assert_eq!(*iter.next().unwrap().1, 3);
1691        assert!(iter.next().is_none());
1692
1693        map.remove(y, guard);
1694
1695        let mut iter = map.iter(guard);
1696
1697        assert_eq!(*iter.next().unwrap().1, 3);
1698        assert!(iter.next().is_none());
1699
1700        map.remove(z, guard);
1701
1702        let mut iter = map.iter(guard);
1703
1704        assert!(iter.next().is_none());
1705    }
1706
1707    #[test]
1708    fn iter3() {
1709        let map = SlotMap::new(10);
1710        let guard = &map.global().register_local().into_inner().pin();
1711
1712        let _ = map.insert(1, guard);
1713        let x = map.insert(2, guard);
1714
1715        let mut iter = map.iter(guard);
1716
1717        assert_eq!(*iter.next().unwrap().1, 1);
1718        assert_eq!(*iter.next().unwrap().1, 2);
1719        assert!(iter.next().is_none());
1720
1721        map.remove(x, guard);
1722
1723        let x = map.insert(2, guard);
1724        let _ = map.insert(3, guard);
1725        let y = map.insert(4, guard);
1726
1727        map.remove(y, guard);
1728
1729        let mut iter = map.iter(guard);
1730
1731        assert_eq!(*iter.next().unwrap().1, 1);
1732        assert_eq!(*iter.next().unwrap().1, 2);
1733        assert_eq!(*iter.next().unwrap().1, 3);
1734        assert!(iter.next().is_none());
1735
1736        map.remove(x, guard);
1737
1738        let mut iter = map.iter(guard);
1739
1740        assert_eq!(*iter.next().unwrap().1, 1);
1741        assert_eq!(*iter.next().unwrap().1, 3);
1742        assert!(iter.next().is_none());
1743    }
1744
1745    #[test]
1746    fn iter_mut1() {
1747        let mut map = SlotMap::new(10);
1748
1749        let x = map.insert_mut(1);
1750        let _ = map.insert_mut(2);
1751        let y = map.insert_mut(3);
1752
1753        let mut iter = map.iter_mut();
1754
1755        assert_eq!(*iter.next().unwrap().1, 1);
1756        assert_eq!(*iter.next().unwrap().1, 2);
1757        assert_eq!(*iter.next().unwrap().1, 3);
1758        assert!(iter.next().is_none());
1759
1760        map.remove_mut(x);
1761        map.remove_mut(y);
1762
1763        let mut iter = map.iter_mut();
1764
1765        assert_eq!(*iter.next().unwrap().1, 2);
1766        assert!(iter.next().is_none());
1767
1768        map.insert_mut(3);
1769        map.insert_mut(1);
1770
1771        let mut iter = map.iter_mut();
1772
1773        assert_eq!(*iter.next().unwrap().1, 1);
1774        assert_eq!(*iter.next().unwrap().1, 2);
1775        assert_eq!(*iter.next().unwrap().1, 3);
1776        assert!(iter.next().is_none());
1777    }
1778
1779    #[test]
1780    fn iter_mut2() {
1781        let mut map = SlotMap::new(10);
1782
1783        let x = map.insert_mut(1);
1784        let y = map.insert_mut(2);
1785        let z = map.insert_mut(3);
1786
1787        map.remove_mut(x);
1788
1789        let mut iter = map.iter_mut();
1790
1791        assert_eq!(*iter.next().unwrap().1, 2);
1792        assert_eq!(*iter.next().unwrap().1, 3);
1793        assert!(iter.next().is_none());
1794
1795        map.remove_mut(y);
1796
1797        let mut iter = map.iter_mut();
1798
1799        assert_eq!(*iter.next().unwrap().1, 3);
1800        assert!(iter.next().is_none());
1801
1802        map.remove_mut(z);
1803
1804        let mut iter = map.iter_mut();
1805
1806        assert!(iter.next().is_none());
1807    }
1808
1809    #[test]
1810    fn iter_mut3() {
1811        let mut map = SlotMap::new(10);
1812
1813        let _ = map.insert_mut(1);
1814        let x = map.insert_mut(2);
1815
1816        let mut iter = map.iter_mut();
1817
1818        assert_eq!(*iter.next().unwrap().1, 1);
1819        assert_eq!(*iter.next().unwrap().1, 2);
1820        assert!(iter.next().is_none());
1821
1822        map.remove_mut(x);
1823
1824        let x = map.insert_mut(2);
1825        let _ = map.insert_mut(3);
1826        let y = map.insert_mut(4);
1827
1828        map.remove_mut(y);
1829
1830        let mut iter = map.iter_mut();
1831
1832        assert_eq!(*iter.next().unwrap().1, 1);
1833        assert_eq!(*iter.next().unwrap().1, 2);
1834        assert_eq!(*iter.next().unwrap().1, 3);
1835        assert!(iter.next().is_none());
1836
1837        map.remove_mut(x);
1838
1839        let mut iter = map.iter_mut();
1840
1841        assert_eq!(*iter.next().unwrap().1, 1);
1842        assert_eq!(*iter.next().unwrap().1, 3);
1843        assert!(iter.next().is_none());
1844    }
1845
1846    #[test]
1847    fn reusing_slots_mut1() {
1848        let mut map = SlotMap::new(10);
1849
1850        let x = map.insert_mut(0);
1851        let y = map.insert_mut(0);
1852
1853        map.remove_mut(y);
1854
1855        let y2 = map.insert_mut(0);
1856        assert_eq!(y2.index, y.index);
1857        assert_ne!(y2.generation, y.generation);
1858
1859        map.remove_mut(x);
1860
1861        let x2 = map.insert_mut(0);
1862        assert_eq!(x2.index, x.index);
1863        assert_ne!(x2.generation, x.generation);
1864
1865        map.remove_mut(y2);
1866        map.remove_mut(x2);
1867    }
1868
1869    #[test]
1870    fn reusing_slots_mut2() {
1871        let mut map = SlotMap::new(10);
1872
1873        let x = map.insert_mut(0);
1874
1875        map.remove_mut(x);
1876
1877        let x2 = map.insert_mut(0);
1878        assert_eq!(x.index, x2.index);
1879        assert_ne!(x.generation, x2.generation);
1880
1881        let y = map.insert_mut(0);
1882        let z = map.insert_mut(0);
1883
1884        map.remove_mut(y);
1885        map.remove_mut(x2);
1886
1887        let x3 = map.insert_mut(0);
1888        let y2 = map.insert_mut(0);
1889        assert_eq!(x3.index, x2.index);
1890        assert_ne!(x3.generation, x2.generation);
1891        assert_eq!(y2.index, y.index);
1892        assert_ne!(y2.generation, y.generation);
1893
1894        map.remove_mut(x3);
1895        map.remove_mut(y2);
1896        map.remove_mut(z);
1897    }
1898
1899    #[test]
1900    fn reusing_slots_mut3() {
1901        let mut map = SlotMap::new(10);
1902
1903        let x = map.insert_mut(0);
1904        let y = map.insert_mut(0);
1905
1906        map.remove_mut(x);
1907        map.remove_mut(y);
1908
1909        let y2 = map.insert_mut(0);
1910        let x2 = map.insert_mut(0);
1911        let z = map.insert_mut(0);
1912        assert_eq!(x2.index, x.index);
1913        assert_ne!(x2.generation, x.generation);
1914        assert_eq!(y2.index, y.index);
1915        assert_ne!(y2.generation, y.generation);
1916
1917        map.remove_mut(x2);
1918        map.remove_mut(z);
1919        map.remove_mut(y2);
1920
1921        let y3 = map.insert_mut(0);
1922        let z2 = map.insert_mut(0);
1923        let x3 = map.insert_mut(0);
1924        assert_eq!(y3.index, y2.index);
1925        assert_ne!(y3.generation, y2.generation);
1926        assert_eq!(z2.index, z.index);
1927        assert_ne!(z2.generation, z.generation);
1928        assert_eq!(x3.index, x2.index);
1929        assert_ne!(x3.generation, x2.generation);
1930
1931        map.remove_mut(x3);
1932        map.remove_mut(y3);
1933        map.remove_mut(z2);
1934    }
1935
1936    #[test]
1937    fn get_many_mut() {
1938        let mut map = SlotMap::new(3);
1939
1940        let x = map.insert_mut(1);
1941        let y = map.insert_mut(2);
1942        let z = map.insert_mut(3);
1943
1944        assert_eq!(map.get_many_mut([x, y]), Some([&mut 1, &mut 2]));
1945        assert_eq!(map.get_many_mut([y, z]), Some([&mut 2, &mut 3]));
1946        assert_eq!(map.get_many_mut([z, x]), Some([&mut 3, &mut 1]));
1947
1948        assert_eq!(map.get_many_mut([x, y, z]), Some([&mut 1, &mut 2, &mut 3]));
1949        assert_eq!(map.get_many_mut([z, y, x]), Some([&mut 3, &mut 2, &mut 1]));
1950
1951        assert_eq!(map.get_many_mut([x, x]), None);
1952        assert_eq!(map.get_many_mut([x, SlotId::new(3, OCCUPIED_BIT)]), None);
1953
1954        map.remove_mut(y);
1955
1956        assert_eq!(map.get_many_mut([x, z]), Some([&mut 1, &mut 3]));
1957
1958        assert_eq!(map.get_many_mut([y]), None);
1959        assert_eq!(map.get_many_mut([x, y]), None);
1960        assert_eq!(map.get_many_mut([y, z]), None);
1961
1962        let y = map.insert_mut(2);
1963
1964        assert_eq!(map.get_many_mut([x, y, z]), Some([&mut 1, &mut 2, &mut 3]));
1965
1966        map.remove_mut(x);
1967        map.remove_mut(z);
1968
1969        assert_eq!(map.get_many_mut([y]), Some([&mut 2]));
1970
1971        assert_eq!(map.get_many_mut([x]), None);
1972        assert_eq!(map.get_many_mut([z]), None);
1973
1974        map.remove_mut(y);
1975
1976        assert_eq!(map.get_many_mut([]), Some([]));
1977    }
1978
1979    #[test]
1980    fn tagged() {
1981        let map = SlotMap::new(1);
1982        let guard = &map.global().register_local().into_inner().pin();
1983
1984        let x = map.insert_with_tag(42, 1, guard);
1985        assert_eq!(x.generation() & TAG_MASK, 1);
1986        assert_eq!(map.get(x, guard).as_deref(), Some(&42));
1987    }
1988
1989    #[test]
1990    fn tagged_mut() {
1991        let mut map = SlotMap::new(1);
1992
1993        let x = map.insert_with_tag_mut(42, 1);
1994        assert_eq!(x.generation() & TAG_MASK, 1);
1995        assert_eq!(map.get_mut(x), Some(&mut 42));
1996    }
1997
1998    // TODO: Testing concurrent generational collections is the most massive pain in the ass. We
1999    // aren't testing the actual implementations but rather ones that don't take the generation into
2000    // account because of that.
2001
2002    #[cfg(not(miri))]
2003    const ITERATIONS: u32 = 1_000_000;
2004    #[cfg(miri)]
2005    const ITERATIONS: u32 = 1_000;
2006
2007    #[test]
2008    fn multi_threaded1() {
2009        const THREADS: u32 = 2;
2010
2011        let map = SlotMap::new(ITERATIONS);
2012
2013        thread::scope(|s| {
2014            let inserter = || {
2015                let local = map.global().register_local();
2016
2017                for _ in 0..ITERATIONS / THREADS {
2018                    map.insert(0, local.pin());
2019                }
2020            };
2021
2022            for _ in 0..THREADS {
2023                s.spawn(inserter);
2024            }
2025        });
2026
2027        assert_eq!(map.len(), ITERATIONS);
2028
2029        thread::scope(|s| {
2030            let remover = || {
2031                let local = map.global().register_local();
2032
2033                for index in 0..ITERATIONS {
2034                    let _ = map.remove(SlotId::new(index, OCCUPIED_BIT), local.pin());
2035                }
2036            };
2037
2038            for _ in 0..THREADS {
2039                s.spawn(remover);
2040            }
2041        });
2042
2043        assert_eq!(map.len(), 0);
2044    }
2045
2046    #[test]
2047    fn multi_threaded2() {
2048        const CAPACITY: u32 = PINNINGS_BETWEEN_ADVANCE as u32 * 3;
2049
2050        // TODO: Why does ThreadSanitizer need more than `CAPACITY` slots, but only in CI???
2051        let map = SlotMap::new(ITERATIONS / 2);
2052
2053        thread::scope(|s| {
2054            let insert_remover = || {
2055                let local = map.global().register_local();
2056
2057                for _ in 0..ITERATIONS / 6 {
2058                    let x = map.insert(0, local.pin());
2059                    let y = map.insert(0, local.pin());
2060                    map.remove(y, local.pin());
2061                    let z = map.insert(0, local.pin());
2062                    map.remove(x, local.pin());
2063                    map.remove(z, local.pin());
2064                }
2065            };
2066            let iterator = || {
2067                let local = map.global().register_local();
2068
2069                for _ in 0..ITERATIONS / CAPACITY * 2 {
2070                    for index in 0..CAPACITY {
2071                        if let Some(value) = map.index(index, local.pin()) {
2072                            let _ = *value;
2073                        }
2074                    }
2075                }
2076            };
2077
2078            s.spawn(iterator);
2079            s.spawn(iterator);
2080            s.spawn(iterator);
2081            s.spawn(insert_remover);
2082        });
2083    }
2084
2085    #[test]
2086    fn multi_threaded3() {
2087        let map = SlotMap::new(ITERATIONS / 10);
2088
2089        thread::scope(|s| {
2090            let inserter = || {
2091                let local = map.global().register_local();
2092
2093                for i in 0..ITERATIONS {
2094                    if i % 10 == 0 {
2095                        map.insert(0, local.pin());
2096                    } else {
2097                        thread::yield_now();
2098                    }
2099                }
2100            };
2101            let remover = || {
2102                let local = map.global().register_local();
2103
2104                for _ in 0..ITERATIONS {
2105                    map.remove_index(0, local.pin());
2106                }
2107            };
2108            let getter = || {
2109                let local = map.global().register_local();
2110
2111                for _ in 0..ITERATIONS {
2112                    if let Some(value) = map.index(0, local.pin()) {
2113                        let _ = *value;
2114                    }
2115                }
2116            };
2117
2118            s.spawn(getter);
2119            s.spawn(getter);
2120            s.spawn(getter);
2121            s.spawn(getter);
2122            s.spawn(remover);
2123            s.spawn(remover);
2124            s.spawn(remover);
2125            s.spawn(inserter);
2126        });
2127    }
2128}