rust_lockless_slotmap/lib.rs
1//! # Lockless Slotmap
2//!
3//! Lockless Slotmap is a lockless implementation of a slotmap. A slotmap is a data structure that
4//! allows for stable references to elements while providing fast insertion (in O(1) time) and
5//! removal (in O(1) time).
6//!
7//! This implementation is (mostly) lockless, meaning that it can be used in a high performance
8//! environment where locks are not desired. The only place where locks are used is when the slotmap
9//! becomes saturated and a new block needs to be allocated. Because of the ever-growing exponential
10//! size of the blocks, this should be a rare occurrence.
11//!
12//! # Limitations
13//!
14//! Each slot in the slotmap can only be reused so many times. 16-bit generation numbers are kept as
15//! guard against ABA problems, this means that each slot can only be reused 65536 times (after which
16//! the slot is considered "dead" and will not be reused which can lead to slowly increasing memory
17//! usage). Therefore for very long running applications with high insertion and removal rates, this
18//! implementation may not be suitable.
19//!
20//! # Example
21//!
22//! ```
23//! use rust_lockless_slotmap::LocklessSlotmap;
24//! use std::sync::Arc;
25//! use parking_lot::RawRwLock;
26//!
27//! let slotmap: Arc<LocklessSlotmap<usize, RawRwLock>> = Arc::new(LocklessSlotmap::new());
28//! let ticket = slotmap.insert(42);
29//!
30//! let slotmap_clone = Arc::clone(&slotmap);
31//! let handle = std::thread::spawn(move || {
32//! let entry = slotmap_clone.get(ticket).unwrap();
33//! assert_eq!(*entry, 42);
34//! drop(entry); // Deadlock if this is not dropped
35//!
36//! slotmap_clone.insert(45);
37//! slotmap_clone.erase(ticket);
38//! });
39//!
40//! handle.join().unwrap();
41//!
42//! assert_eq!(slotmap.len(), 1);
43//! assert!(slotmap.get(ticket).is_none());
44//! ```
45//!
46
47use std::{cell::UnsafeCell, mem::MaybeUninit, sync::atomic::AtomicUsize};
48
49use utils::{AtomicOptionU32, AtomicOptionU64, AtomicState, State};
50
51#[cfg(test)]
52mod tests;
53mod utils;
54
55/// Maximum number of elements per allocation block.
56///
57/// [`SlotmapTicket`] allows for stable references to elements in the slotmap, as such
58/// dynamic resizing of the slotmap is not possible. In order to achieve this, the
59/// slotmap is divided into blocks, each block having a fixed number of elements.
60///
61/// The slotmap can only grow by adding new blocks. The number of element per block
62/// starts at 64 (default) and can grow up to a limit defined by [`MAX_ELEMENTS_PER_BLOCK`].
63///
64/// The maximum theoretical number of elements per block is 2^32 (due to constraints on
65/// the [`SlotmapTicket`] structure).
66pub const MAX_ELEMENTS_PER_BLOCK: usize = 32768;
67const _: () = assert!(MAX_ELEMENTS_PER_BLOCK < std::u32::MAX as usize, "The MAX_ELEMENTS_PER_BLOCK must be less than u32::MAX (constraint due to AtomicState)");
68
69/// Holds a ticket (think of it as a reference) to an element stored in the slotmap.
70///
71/// The ticket is used to access the element stored in the slotmap. The ticket is
72/// created when the element is inserted into the slotmap and is used to access
73/// the element until the element is removed from the slotmap.
74///
75/// Notice that the slotmap implementation ensures that each ticket is unique. If
76/// an element is removed from the slotmap, the ticket is invalidated and cannot
77/// be used, even if the slot is reused.
78///
79/// You can retrieve the element corresponding to the ticket using the
80/// [`LocklessSlotmap::get`] which will also guarantee that the element is not
81/// removed while it is being accessed. This ticket makes no such guarantees.
82#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
83pub struct SlotmapTicket {
84 block_index: u16,
85 generation: u16,
86 slot_index: u32,
87}
88
89impl SlotmapTicket {
90 pub(crate) fn new(block_index: u16, slot_index: u32, generation: u16) -> Self
91 {
92 assert!(block_index <= std::u16::MAX, "The block index has exceeded the maximum value of u16");
93 assert!(generation <= std::u16::MAX, "The generation has exceeded the maximum value of u16");
94
95 Self {
96 block_index: block_index,
97 generation: generation,
98 slot_index: slot_index,
99 }
100 }
101
102 pub(crate) fn block_index(&self) -> u16
103 {
104 self.block_index
105 }
106
107 pub(crate) fn generation(&self) -> u16
108 {
109 self.generation
110 }
111
112 pub(crate) fn slot_index(&self) -> u32
113 {
114 self.slot_index
115 }
116}
117
118/// SlotmapEntry is a reference to an element stored in the slotmap.
119///
120/// The SlotmapEntry is created when the element is accessed in the slotmap. It
121/// ensures that the element cannot be removed while there is a thread actively
122/// accessing it.
123///
124/// # Note
125///
126/// It is the responsibility of the user to ensure that the SlotmapEntry is dropped
127/// prior to removing the element from the slotmap. Failure to do so will result in
128/// a deadlock, where the erasing method will wait indefinitely for the
129/// SlotmapEntry to be dropped.
130///
131/// # Example
132/// ```
133/// use rust_lockless_slotmap::LocklessSlotmap;
134/// use parking_lot::RawRwLock;
135///
136/// let slotmap: LocklessSlotmap<usize, RawRwLock> = LocklessSlotmap::new();
137/// let ticket = slotmap.insert(42);
138///
139/// {
140/// let entry = slotmap.get(ticket).unwrap();
141/// assert_eq!(*entry, 42);
142/// }
143/// ```
144///
145pub struct SlotmapEntry<'a, T> {
146 atomic_ref: &'a AtomicUsize,
147 data: &'a T,
148}
149
150impl <'a, T> SlotmapEntry<'a, T> {
151 /// Get a reference to the element stored in the slotmap.
152 ///
153 /// This reference cannot outlive the protection of the SlotmapEntry.
154 /// Therefore all access to this element are guaranteed to be safe.
155 ///
156 /// # Returns
157 /// A reference to the element stored in the slotmap.
158 pub fn get<'b: 'a>(&'b self) -> &'b T
159 {
160 self.data
161 }
162}
163
164impl <'a, T> Drop for SlotmapEntry<'a, T> {
165 fn drop(&mut self)
166 {
167 self.atomic_ref.fetch_sub(1, std::sync::atomic::Ordering::SeqCst);
168 }
169}
170
171impl <'a, T> std::ops::Deref for SlotmapEntry<'a, T> {
172 type Target = T;
173
174 fn deref(&self) -> &Self::Target
175 {
176 self.get()
177 }
178}
179
180
181struct LocklessStateEntry
182{
183 refcount: AtomicUsize,
184 state: AtomicState,
185}
186
187struct LocklessSlotmapBlock<T>
188{
189 elements: Box<[UnsafeCell<MaybeUninit<T>>]>,
190 states: Box<[LocklessStateEntry]>,
191 next_free_slot: AtomicOptionU32,
192 next_non_saturated_block: AtomicOptionU64,
193}
194
195impl <T> LocklessSlotmapBlock<T> {
196 fn new(size: usize) -> Self
197 {
198 debug_assert!(size <= MAX_ELEMENTS_PER_BLOCK, "The size of the block must be less than or equal to MAX_ELEMENTS_PER_BLOCK");
199 debug_assert!(size < std::u32::MAX as usize, "The size of the block must be less than u32::MAX (constraint due to AtomicState)");
200 debug_assert!(size > 0, "The size of the block must be greater than 0");
201
202 let mut elements = Vec::with_capacity(size);
203 let mut states = Vec::with_capacity(size);
204
205 for i in 0..size {
206 elements.push(UnsafeCell::new(MaybeUninit::uninit()));
207 states.push(LocklessStateEntry {
208 refcount: AtomicUsize::new(0),
209 state: State::Free {
210 next_generation: 0,
211 next_free_slot: if i < size - 1 {
212 Some(i as u32 + 1)
213 } else {
214 None
215 }
216 }.into(),
217 });
218 }
219
220 Self {
221 elements: elements.into_boxed_slice(),
222 states: states.into_boxed_slice(),
223 next_free_slot: AtomicOptionU32::new(Some(0)),
224 next_non_saturated_block: AtomicOptionU64::new(None),
225 }
226 }
227}
228
229/// LocklessSlotmap is a lockless implementation of a slotmap.
230///
231/// A slotmap is a data structure that allows for stable references to elements
232/// while providing fast insertion (in O(1) time) and removal (in O(1) time).
233///
234/// This implementation is (mostly) lockless, meaning that it can be used in a
235/// high performance environment where locks are not desired. The only place where
236/// locks are used is when the slotmap becomes saturated and a new block needs to
237/// be allocated. Because of the ever-growing exponential size of the blocks, this
238/// should be a rare occurrence.
239///
240/// # Limitations
241///
242/// Each slot in the slotmap can only be reused so many times. 16-bit generation
243/// numbers are kept as guard against ABA problems, this means that each slot can
244/// only be reused 65536 times (after which the slot is considered "dead" and will
245/// not be reused which can lead to slowly increasing memory usage). Therefore for
246/// very long running applications with high insertion and removal rates, this
247/// implementation may not be suitable.
248///
249/// # Implementation
250///
251/// Internally, the slotmap is divided into blocks, each block containing a fixed
252/// number of elements. When the slotmap is saturated, a new block is allocated
253/// without invalidating all the already existing blocks. This allows for fast
254/// insertion and removal of elements.
255///
256/// Blocks grow exponentially in size, starting at 64 elements (default) and
257/// growing up to a maximum of [`MAX_ELEMENTS_PER_BLOCK`] elements.
258///
259/// # Note
260///
261/// In the current implementation, the insertion of new elements takes the place of
262/// the most recently removed element. At high loads this behavior can lead to
263/// excessive memory fragmentation. This behavior may be changed in the future.
264///
265pub struct LocklessSlotmap<T, R>
266where
267 T: Sized + Send + Sync,
268 R: lock_api::RawRwLock,
269{
270 blocks: lock_api::RwLock<R, Vec<LocklessSlotmapBlock<T>>>,
271 next_non_saturated_block: AtomicOptionU64,
272 next_block_size: AtomicUsize,
273 capacity: AtomicUsize,
274 len: AtomicUsize,
275 generation_limit_reached: AtomicUsize,
276}
277
278impl <T, R> LocklessSlotmap<T, R>
279where
280 T: Sized + Send + Sync,
281 R: lock_api::RawRwLock,
282{
283 fn grow(current_size: usize) -> usize
284 {
285 std::cmp::min(MAX_ELEMENTS_PER_BLOCK, current_size + (current_size >> 1))
286 }
287
288 fn alloc_block(&self)
289 {
290 // First we need to acquire the write lock (exclusive access)
291 // to the blocks.
292 // NOTE: This will lock the entire slotmap, so it is not
293 // recommended. Already retrieved references won't
294 // be affected by this lock.
295 let mut blocks = self.blocks.write();
296
297 // Check the current next_non_saturated_block
298 let next_non_saturated_block = self.next_non_saturated_block.load(std::sync::atomic::Ordering::SeqCst);
299 if next_non_saturated_block.is_some() {
300 return;
301 }
302
303 // Determine the size of the next block
304 let next_block_size = self.next_block_size.load(std::sync::atomic::Ordering::Relaxed);
305 self.next_block_size.store(Self::grow(next_block_size), std::sync::atomic::Ordering::Relaxed);
306
307 // Allocate the elements and states for the new block
308 let new_block = LocklessSlotmapBlock::new(next_block_size);
309 new_block.next_non_saturated_block.store(None, std::sync::atomic::Ordering::Relaxed);
310
311 // Add the block to the blocks
312 let block_index = blocks.len();
313 assert!(block_index < std::u16::MAX as usize, "The number of blocks has exceeded the maximum value of u16");
314 blocks.push(new_block);
315
316 if self.next_non_saturated_block.compare_exchange(
317 None,
318 Some(block_index as u64),
319 std::sync::atomic::Ordering::SeqCst,
320 std::sync::atomic::Ordering::Relaxed
321 ).is_err() {
322 blocks.pop();
323 return;
324 }
325
326 // Increment the capacity and size
327 self.capacity.fetch_add(next_block_size, std::sync::atomic::Ordering::Relaxed);
328 }
329
330 /// Creates a new slotmap with the default capacity of 64 elements.
331 ///
332 /// # Returns
333 /// A new slotmap with the default capacity of 64 elements.
334 ///
335 /// # Panics
336 /// Panics if the allocation of the first block fails.
337 pub fn new() -> Self
338 {
339 Self::with_capacity(64)
340 }
341
342 /// Creates a new slotmap with the specified capacity.
343 ///
344 /// # Arguments
345 /// * `capacity` - The capacity (number of elements) of the slotmap. This capacity is limited
346 /// to [`MAX_ELEMENTS_PER_BLOCK`] elements. However, this limit should allow for
347 /// a slotmap with a capacity of up to 2^32 elements.
348 ///
349 /// # Returns
350 /// A new slotmap with the specified capacity.
351 ///
352 /// # Panics
353 /// Panics if the capacity is greater than [`MAX_ELEMENTS_PER_BLOCK`].
354 pub fn with_capacity(capacity: usize) -> Self
355 {
356 assert!(capacity <= MAX_ELEMENTS_PER_BLOCK, "The capacity of the slotmap must be less than or equal to MAX_ELEMENTS_PER_BLOCK");
357 assert!(capacity > 0, "The capacity of the slotmap must be greater than 0");
358
359 let object = Self {
360 blocks: lock_api::RwLock::new(Vec::new()),
361 next_non_saturated_block: AtomicOptionU64::new(None),
362 next_block_size: AtomicUsize::new(capacity),
363 capacity: AtomicUsize::new(0),
364 len: AtomicUsize::new(0),
365 generation_limit_reached: AtomicUsize::new(0),
366 };
367 object.alloc_block(); // Pre-allocate the first block
368 object
369 }
370
371 /// Inserts a new element into the slotmap.
372 ///
373 /// Atomically inserts a new element into the slotmap. The element is stored in the slotmap
374 /// and a ticket is returned that can be used to access the element.
375 ///
376 /// # Arguments
377 /// * `value` - The value to insert into the slotmap.
378 ///
379 /// # Returns
380 /// A ticket that can be used to access the element in the slotmap. The value of the ticket
381 /// can then be accessed using the [`LocklessSlotmap::get`] method.
382 pub fn insert(&self, value: T) -> SlotmapTicket
383 {
384 let backoff = crossbeam::utils::Backoff::new();
385 loop {
386 // Attempt to find a free slot in the slotmap
387 let block_index = self.next_non_saturated_block.load(std::sync::atomic::Ordering::SeqCst);
388
389 // If there is no block available, allocate a new block
390 let block_index = if let Some(block_index) = block_index {
391 usize::try_from(block_index).unwrap()
392 }
393 else {
394 self.alloc_block();
395 continue; // Retry
396 };
397
398 // Acquire the read lock (shared access) to the blocks
399 let blocks = self.blocks.read();
400 let block = &blocks[block_index as usize];
401
402 // Attempt to find a free slot in the block
403 let slot_index = block.next_free_slot.load(std::sync::atomic::Ordering::SeqCst);
404 let slot_index = if let Some(slot_index) = slot_index {
405 slot_index
406 }
407 else {
408 // Another thread has made a progress, retry
409 backoff.spin();
410 continue;
411 };
412
413 let slot_state = &block.states[slot_index as usize];
414
415 // Attempt to acquire the slot at slot_index
416 let state = slot_state.state.load(std::sync::atomic::Ordering::SeqCst);
417 let (next_generation, next_free_slot) = match state {
418 State::Free { next_generation, next_free_slot } => {
419 (next_generation, next_free_slot)
420 },
421 _ => {
422 // Another thread has made a progress, retry
423 backoff.spin();
424 continue;
425 }
426 };
427
428 // We then need to attempt to transition the state of the slot to Reserved
429 if slot_state.state.compare_exchange(
430 state,
431 State::Reserved,
432 std::sync::atomic::Ordering::SeqCst,
433 std::sync::atomic::Ordering::SeqCst
434 ).is_err() {
435 // Another thread has made a progress, retry
436 backoff.spin();
437 continue;
438 }
439
440 // We then update the next_free_slot of the block
441 if block.next_free_slot.compare_exchange(
442 Some(slot_index),
443 next_free_slot,
444 std::sync::atomic::Ordering::SeqCst,
445 std::sync::atomic::Ordering::SeqCst
446 ).is_err() {
447 // Another thread has made a progress, retry
448 backoff.spin();
449 slot_state.state.store(state, std::sync::atomic::Ordering::SeqCst);
450 continue;
451 }
452
453 // If there is no next_free_slot, we need to update the next_non_saturated_block
454 if next_free_slot.is_none() {
455 if self.next_non_saturated_block.compare_exchange(
456 Some(block_index as u64),
457 block.next_non_saturated_block.load(std::sync::atomic::Ordering::SeqCst),
458 std::sync::atomic::Ordering::SeqCst,
459 std::sync::atomic::Ordering::SeqCst
460 ).is_err() {
461 // Another thread has made a progress, retry
462 backoff.spin();
463 slot_state.state.store(state, std::sync::atomic::Ordering::SeqCst);
464 block.next_free_slot.store(Some(slot_index), std::sync::atomic::Ordering::SeqCst);
465 continue;
466 }
467 }
468
469 // We then need to initialize the element at slot_index
470 unsafe {
471 let element = block.elements[slot_index as usize].get().as_mut().unwrap();
472 element.write(value);
473 }
474
475 // We then need to transition the state of the slot to Occupied
476 if slot_state.state.compare_exchange(
477 State::Reserved,
478 State::Occupied { generation: next_generation },
479 std::sync::atomic::Ordering::SeqCst,
480 std::sync::atomic::Ordering::SeqCst
481 ).is_err() {
482 panic!("Race condition detected, this is a bug, please report it.");
483 }
484
485 // Finally create the ticket
486 self.len.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
487 return SlotmapTicket::new(block_index as u16, slot_index, next_generation);
488 }
489 }
490
491 /// Get an element from the slotmap at the specified ticket.
492 ///
493 /// Retrieves the element stored in the slotmap at the specified ticket. The ticket
494 /// is invalidated after the element is removed from the slotmap. For more information
495 /// you can refer to the [`SlotmapEntry`] structure.
496 ///
497 /// # Arguments
498 /// * `ticket` - The ticket of the element in the slotmap. Tickets are invalidated
499 /// after the element is removed from the slotmap. See [`SlotmapTicket`]
500 /// for more details.
501 ///
502 /// # Returns
503 /// An [`Option<T>`] containing a [`SlotmapEntry`] to the element stored in the slotmap
504 /// at the specified ticket. If the ticket is invalid or the element has been removed
505 pub fn get(&self, ticket: SlotmapTicket) -> Option<SlotmapEntry<'_, T>> {
506 let block_index = ticket.block_index();
507 let slot_index = ticket.slot_index();
508 let ticket_generation = ticket.generation();
509
510 // Acquire the read lock (shared access) to the blocks
511 let blocks = self.blocks.read();
512 let block = &blocks[usize::from(block_index)];
513
514 // Acquire the state of the slot
515 let slot_state = &block.states[slot_index as usize];
516 let state = slot_state.state.load(std::sync::atomic::Ordering::SeqCst);
517
518 // Check if the slot is occupied and the generation matches
519 match state {
520 State::Occupied { generation } if generation == ticket_generation => (),
521 _ => return None,
522 }
523
524 // Increment the refcount of the slot
525 slot_state.refcount.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
526
527 // Recheck the state of the slot
528 let new_state = slot_state.state.load(std::sync::atomic::Ordering::SeqCst);
529 if new_state != state {
530 // Another thread has made a progress, decrement the refcount
531 slot_state.refcount.fetch_sub(1, std::sync::atomic::Ordering::SeqCst);
532 return None;
533 }
534
535 // Get the reference to the element
536
537 // SAFETY: The slot is occupied and the generation matches, we have incremented the refcount
538 // and rechecked the state of the slot **AFTERWARDS**, therefore the reference must be valid.
539 let element = unsafe {
540 block.elements[slot_index as usize].get().as_ref().unwrap().assume_init_ref()
541 };
542
543 // SAFETY: Boxed are never removed unless the LocklessSlotmap is dropped
544 // therefore the reference lifetime can be extended to the lifetime of the LocklessSlotmap
545 let refcount: &'_ AtomicUsize = unsafe {
546 std::mem::transmute(&slot_state.refcount)
547 };
548
549 // Return the slotmap entry
550 Some(SlotmapEntry {
551 atomic_ref: refcount,
552 data: element,
553 })
554 }
555
556 /// Erase an element from the slotmap at the specified ticket.
557 ///
558 /// Removes the element stored in the slotmap at the specified ticket. The ticket is
559 /// invalidated after the element is removed from the slotmap. For more information
560 /// you can refer to the [`SlotmapEntry`] structure.
561 ///
562 /// # Deadlocks
563 ///
564 /// This method will wait for all [`SlotmapEntry`] corresponding to the ticket to be
565 /// dropped before removing the element from the slotmap. Special care should be taken
566 /// to ensure that the thread calling this method is not holding a [`SlotmapEntry`]
567 /// corresponding to the ticket or a deadlock will occur.
568 ///
569 /// # Arguments
570 /// * `ticket` - The ticket of the element in the slotmap. Tickets are invalidated
571 ///
572 /// # Returns
573 /// An [`Option<T>`] containing the element stored in the slotmap at the specified ticket or
574 /// [`Option::None`] if the ticket is invalid or the element has already been removed.
575 pub fn erase(&self, ticket: SlotmapTicket) -> Option<T> {
576 let block_index = ticket.block_index();
577 let slot_index = ticket.slot_index();
578 let ticket_generation = ticket.generation();
579
580 // Acquire the read lock (shared access) to the blocks
581 let blocks = self.blocks.read();
582
583 // Acquire the state of the slot
584 let block = &blocks[usize::from(block_index)];
585
586 // Acquire the state of the slot
587 let slot_state = &block.states[slot_index as usize];
588
589 // Begin of the critical section
590 let backoff = crossbeam::utils::Backoff::new();
591 'critical: loop {
592 // Check that the slot is occupied and the generation matches
593 let state = slot_state.state.load(std::sync::atomic::Ordering::SeqCst);
594 match state {
595 State::Occupied { generation } if generation == ticket_generation => (),
596 _ => break 'critical None, // The slot is not occupied or the generation does not match
597 }
598
599 // Attempt to transition the state of the slot to Reserved
600 if slot_state.state.compare_exchange(
601 state,
602 State::Reserved,
603 std::sync::atomic::Ordering::SeqCst,
604 std::sync::atomic::Ordering::SeqCst
605 ).is_err() {
606 // Another thread has made a progress, retry
607 backoff.spin();
608 continue;
609 }
610
611 // Second loop: Await for the refcount to hit 0
612 'zeroref: loop {
613 let refcount = slot_state.refcount.load(std::sync::atomic::Ordering::SeqCst);
614 if refcount == 0 {
615 break 'zeroref;
616 }
617
618 // Another thread has made a progress, retry
619 backoff.snooze();
620 }
621
622 // Retrieve the element
623 let element = unsafe {
624 block.elements[slot_index as usize].get().as_mut().unwrap().assume_init_read()
625 };
626
627 // We then attempt to transition the state of the slot to Free
628 if let Some(next_generation) = ticket_generation.checked_add(1) {
629 // We update the next_free_slot of the block so that it points to this slot (currently reserved)
630 let next_free_slot = 'update_slot: loop {
631 let next_free_slot = block.next_free_slot.load(std::sync::atomic::Ordering::SeqCst);
632
633 if block.next_free_slot.compare_exchange(
634 next_free_slot,
635 Some(slot_index),
636 std::sync::atomic::Ordering::SeqCst,
637 std::sync::atomic::Ordering::SeqCst
638 ).is_err() {
639 // Another thread has made a progress, retry
640 backoff.spin();
641 continue 'update_slot;
642 }
643
644 break 'update_slot next_free_slot;
645 };
646
647 // If next_free_slot is None, that means that this blog was dangling
648 // and now it is not anymore, we need to update the next_non_saturated_block
649 if next_free_slot.is_none() {
650 let next_non_saturated_block = 'update_block: loop {
651 let next_non_saturated_block = self.next_non_saturated_block.load(std::sync::atomic::Ordering::SeqCst);
652
653 // Attempt to update the next_non_saturated_block
654 if self.next_non_saturated_block.compare_exchange(
655 next_non_saturated_block,
656 Some(block_index as u64),
657 std::sync::atomic::Ordering::SeqCst,
658 std::sync::atomic::Ordering::SeqCst
659 ).is_err() {
660 // Another thread has made a progress, retry
661 backoff.spin();
662 continue 'update_block;
663 }
664
665 // We finally update the next_free_slot of the slot
666 break 'update_block next_non_saturated_block;
667 };
668
669 // We then update the next_non_saturated_block of the block
670 block.next_non_saturated_block.store(next_non_saturated_block, std::sync::atomic::Ordering::SeqCst);
671 };
672
673 // The generation has not reached the maximum value, we can reuse this slot
674 if slot_state.state.compare_exchange(
675 State::Reserved,
676 State::Free {
677 next_generation: next_generation,
678 next_free_slot: next_free_slot,
679 },
680 std::sync::atomic::Ordering::SeqCst,
681 std::sync::atomic::Ordering::SeqCst
682 ).is_err() {
683 panic!("refcount is 0, the slot is reserved, no overwriting should occur, this is a bug, please report it.");
684 }
685 }
686 else {
687 // The generation has reached the maximum value, we won't be reusing
688 // this slot, therefore we leave it as Reserved
689 self.generation_limit_reached.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
690 }
691
692 self.len.fetch_sub(1, std::sync::atomic::Ordering::Relaxed);
693 break 'critical Some(element);
694 }
695 }
696
697 /// Get the maximum number of elements that can be stored in the slotmap.
698 ///
699 /// # Returns
700 /// The maximum number of elements that can be stored in the slotmap.
701 pub fn capacity(&self) -> usize
702 {
703 self.capacity.load(std::sync::atomic::Ordering::SeqCst)
704 }
705
706 /// Get the number of elements stored in the slotmap.
707 ///
708 /// Notice that in a multithreaded environment, the number of elements stored in the slotmap
709 /// can change between the time this method is called and the time the result is used, therefore
710 /// this method should be used as an approximation.
711 ///
712 /// # Returns
713 /// The number of elements stored in the slotmap.
714 pub fn len(&self) -> usize
715 {
716 self.len.load(std::sync::atomic::Ordering::SeqCst)
717 }
718
719 /// Number of times the generation limit has been reached.
720 ///
721 /// As discussed in the limitations of the [`LocklessSlotmap`] structure, each slot can only
722 /// be reused so many times (2^16 times). When the generation limit is reached, the slot is
723 /// considered "dead" and will not be reused. This method returns the number of times dead
724 /// slots have been encountered.
725 ///
726 /// Notice that in a multithreaded environment, the number of times the generation limit has
727 /// been reached can change between the time this method is called and the time the result is
728 /// used, therefore this method should be used as an approximation.
729 ///
730 /// # Returns
731 /// The number of times the generation limit has been reached.
732 pub fn generation_limit_reached(&self) -> usize
733 {
734 self.generation_limit_reached.load(std::sync::atomic::Ordering::SeqCst)
735 }
736}
737
738impl <T, R> Drop for LocklessSlotmap<T, R>
739where
740 T: Sized + Send + Sync,
741 R: lock_api::RawRwLock,
742{
743 fn drop(&mut self)
744 {
745 // Acquire the write lock (exclusive access) to the blocks
746 let blocks = self.blocks.write();
747
748 // Drop all the blocks
749 for block in blocks.iter() {
750 for (slot_state, slot_data) in block.states.iter().zip(block.elements.iter()) {
751 // First we need to acquire the state of the slot
752 let state = slot_state.state.load(std::sync::atomic::Ordering::SeqCst);
753
754 // If the slot is occupied, drop the element
755 match state {
756 State::Reserved => {
757 // State can only be Reserved if they reached the maximum
758 // generation, therefore the refcount must be 0
759 let refcount = slot_state.refcount.load(std::sync::atomic::Ordering::SeqCst);
760 assert_eq!(refcount, 0, "The refcount of the slot is not 0, this is a bug, please report it.");
761 }
762 State::Free { .. } => (),
763 State::Occupied { .. } => {
764 // Check that the refcount is 0
765 let refcount = slot_state.refcount.load(std::sync::atomic::Ordering::SeqCst);
766 assert_eq!(refcount, 0, "The refcount of the slot is not 0, this is a bug, please report it.");
767
768 // Drop the element
769 // SAFETY: The slot is occupied, the refcount is 0, the slot is being dropped
770 // therefore the element can be safely dropped.
771 unsafe {
772 slot_data.get().as_mut().unwrap().assume_init_drop();
773 }
774 }
775 }
776 }
777 }
778 }
779}
780
781unsafe impl <T, R> Send for LocklessSlotmap<T, R>
782where
783 T: Sized + Send + Sync,
784 R: lock_api::RawRwLock,
785{}
786
787unsafe impl <T, R> Sync for LocklessSlotmap<T, R>
788where
789 T: Sized + Send + Sync,
790 R: lock_api::RawRwLock,
791{}