use std::{cell::UnsafeCell, mem::MaybeUninit, sync::atomic::AtomicUsize};
use utils::{AtomicOptionU32, AtomicOptionU64, AtomicState, State};
#[cfg(test)]
mod tests;
mod utils;
pub const MAX_ELEMENTS_PER_BLOCK: usize = 32768;
const _: () = assert!(MAX_ELEMENTS_PER_BLOCK < std::u32::MAX as usize, "The MAX_ELEMENTS_PER_BLOCK must be less than u32::MAX (constraint due to AtomicState)");
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct SlotmapTicket {
block_index: u16,
generation: u16,
slot_index: u32,
}
impl SlotmapTicket {
pub(crate) fn new(block_index: u16, slot_index: u32, generation: u16) -> Self
{
assert!(block_index <= std::u16::MAX, "The block index has exceeded the maximum value of u16");
assert!(generation <= std::u16::MAX, "The generation has exceeded the maximum value of u16");
Self {
block_index: block_index,
generation: generation,
slot_index: slot_index,
}
}
pub(crate) fn block_index(&self) -> u16
{
self.block_index
}
pub(crate) fn generation(&self) -> u16
{
self.generation
}
pub(crate) fn slot_index(&self) -> u32
{
self.slot_index
}
}
pub struct SlotmapEntry<'a, T> {
atomic_ref: &'a AtomicUsize,
data: &'a T,
}
impl <'a, T> SlotmapEntry<'a, T> {
pub fn get<'b: 'a>(&'b self) -> &'b T
{
self.data
}
}
impl <'a, T> Drop for SlotmapEntry<'a, T> {
fn drop(&mut self)
{
self.atomic_ref.fetch_sub(1, std::sync::atomic::Ordering::SeqCst);
}
}
impl <'a, T> std::ops::Deref for SlotmapEntry<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target
{
self.get()
}
}
struct LocklessStateEntry
{
refcount: AtomicUsize,
state: AtomicState,
}
struct LocklessSlotmapBlock<T>
{
elements: Box<[UnsafeCell<MaybeUninit<T>>]>,
states: Box<[LocklessStateEntry]>,
next_free_slot: AtomicOptionU32,
next_non_saturated_block: AtomicOptionU64,
}
impl <T> LocklessSlotmapBlock<T> {
fn new(size: usize) -> Self
{
debug_assert!(size <= MAX_ELEMENTS_PER_BLOCK, "The size of the block must be less than or equal to MAX_ELEMENTS_PER_BLOCK");
debug_assert!(size < std::u32::MAX as usize, "The size of the block must be less than u32::MAX (constraint due to AtomicState)");
debug_assert!(size > 0, "The size of the block must be greater than 0");
let mut elements = Vec::with_capacity(size);
let mut states = Vec::with_capacity(size);
for i in 0..size {
elements.push(UnsafeCell::new(MaybeUninit::uninit()));
states.push(LocklessStateEntry {
refcount: AtomicUsize::new(0),
state: State::Free {
next_generation: 0,
next_free_slot: if i < size - 1 {
Some(i as u32 + 1)
} else {
None
}
}.into(),
});
}
Self {
elements: elements.into_boxed_slice(),
states: states.into_boxed_slice(),
next_free_slot: AtomicOptionU32::new(Some(0)),
next_non_saturated_block: AtomicOptionU64::new(None),
}
}
}
pub struct LocklessSlotmap<T, R>
where
T: Sized + Send + Sync,
R: lock_api::RawRwLock,
{
blocks: lock_api::RwLock<R, Vec<LocklessSlotmapBlock<T>>>,
next_non_saturated_block: AtomicOptionU64,
next_block_size: AtomicUsize,
capacity: AtomicUsize,
len: AtomicUsize,
generation_limit_reached: AtomicUsize,
}
impl <T, R> LocklessSlotmap<T, R>
where
T: Sized + Send + Sync,
R: lock_api::RawRwLock,
{
fn grow(current_size: usize) -> usize
{
std::cmp::min(MAX_ELEMENTS_PER_BLOCK, current_size + (current_size >> 1))
}
fn alloc_block(&self)
{
let mut blocks = self.blocks.write();
let next_non_saturated_block = self.next_non_saturated_block.load(std::sync::atomic::Ordering::SeqCst);
if next_non_saturated_block.is_some() {
return;
}
let next_block_size = self.next_block_size.load(std::sync::atomic::Ordering::Relaxed);
self.next_block_size.store(Self::grow(next_block_size), std::sync::atomic::Ordering::Relaxed);
let new_block = LocklessSlotmapBlock::new(next_block_size);
new_block.next_non_saturated_block.store(None, std::sync::atomic::Ordering::Relaxed);
let block_index = blocks.len();
assert!(block_index < std::u16::MAX as usize, "The number of blocks has exceeded the maximum value of u16");
blocks.push(new_block);
if self.next_non_saturated_block.compare_exchange(
None,
Some(block_index as u64),
std::sync::atomic::Ordering::SeqCst,
std::sync::atomic::Ordering::Relaxed
).is_err() {
blocks.pop();
return;
}
self.capacity.fetch_add(next_block_size, std::sync::atomic::Ordering::Relaxed);
}
pub fn new() -> Self
{
Self::with_capacity(64)
}
pub fn with_capacity(capacity: usize) -> Self
{
assert!(capacity <= MAX_ELEMENTS_PER_BLOCK, "The capacity of the slotmap must be less than or equal to MAX_ELEMENTS_PER_BLOCK");
assert!(capacity > 0, "The capacity of the slotmap must be greater than 0");
let object = Self {
blocks: lock_api::RwLock::new(Vec::new()),
next_non_saturated_block: AtomicOptionU64::new(None),
next_block_size: AtomicUsize::new(capacity),
capacity: AtomicUsize::new(0),
len: AtomicUsize::new(0),
generation_limit_reached: AtomicUsize::new(0),
};
object.alloc_block(); object
}
pub fn insert(&self, value: T) -> SlotmapTicket
{
let backoff = crossbeam::utils::Backoff::new();
loop {
let block_index = self.next_non_saturated_block.load(std::sync::atomic::Ordering::SeqCst);
let block_index = if let Some(block_index) = block_index {
usize::try_from(block_index).unwrap()
}
else {
self.alloc_block();
continue; };
let blocks = self.blocks.read();
let block = &blocks[block_index as usize];
let slot_index = block.next_free_slot.load(std::sync::atomic::Ordering::SeqCst);
let slot_index = if let Some(slot_index) = slot_index {
slot_index
}
else {
backoff.spin();
continue;
};
let slot_state = &block.states[slot_index as usize];
let state = slot_state.state.load(std::sync::atomic::Ordering::SeqCst);
let (next_generation, next_free_slot) = match state {
State::Free { next_generation, next_free_slot } => {
(next_generation, next_free_slot)
},
_ => {
backoff.spin();
continue;
}
};
if slot_state.state.compare_exchange(
state,
State::Reserved,
std::sync::atomic::Ordering::SeqCst,
std::sync::atomic::Ordering::SeqCst
).is_err() {
backoff.spin();
continue;
}
if block.next_free_slot.compare_exchange(
Some(slot_index),
next_free_slot,
std::sync::atomic::Ordering::SeqCst,
std::sync::atomic::Ordering::SeqCst
).is_err() {
backoff.spin();
slot_state.state.store(state, std::sync::atomic::Ordering::SeqCst);
continue;
}
if next_free_slot.is_none() {
if self.next_non_saturated_block.compare_exchange(
Some(block_index as u64),
block.next_non_saturated_block.load(std::sync::atomic::Ordering::SeqCst),
std::sync::atomic::Ordering::SeqCst,
std::sync::atomic::Ordering::SeqCst
).is_err() {
backoff.spin();
slot_state.state.store(state, std::sync::atomic::Ordering::SeqCst);
block.next_free_slot.store(Some(slot_index), std::sync::atomic::Ordering::SeqCst);
continue;
}
}
unsafe {
let element = block.elements[slot_index as usize].get().as_mut().unwrap();
element.write(value);
}
if slot_state.state.compare_exchange(
State::Reserved,
State::Occupied { generation: next_generation },
std::sync::atomic::Ordering::SeqCst,
std::sync::atomic::Ordering::SeqCst
).is_err() {
panic!("Race condition detected, this is a bug, please report it.");
}
self.len.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
return SlotmapTicket::new(block_index as u16, slot_index, next_generation);
}
}
pub fn get(&self, ticket: SlotmapTicket) -> Option<SlotmapEntry<'_, T>> {
let block_index = ticket.block_index();
let slot_index = ticket.slot_index();
let ticket_generation = ticket.generation();
let blocks = self.blocks.read();
let block = &blocks[usize::from(block_index)];
let slot_state = &block.states[slot_index as usize];
let state = slot_state.state.load(std::sync::atomic::Ordering::SeqCst);
match state {
State::Occupied { generation } if generation == ticket_generation => (),
_ => return None,
}
slot_state.refcount.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
let new_state = slot_state.state.load(std::sync::atomic::Ordering::SeqCst);
if new_state != state {
slot_state.refcount.fetch_sub(1, std::sync::atomic::Ordering::SeqCst);
return None;
}
let element = unsafe {
block.elements[slot_index as usize].get().as_ref().unwrap().assume_init_ref()
};
let refcount: &'_ AtomicUsize = unsafe {
std::mem::transmute(&slot_state.refcount)
};
Some(SlotmapEntry {
atomic_ref: refcount,
data: element,
})
}
pub fn erase(&self, ticket: SlotmapTicket) -> Option<T> {
let block_index = ticket.block_index();
let slot_index = ticket.slot_index();
let ticket_generation = ticket.generation();
let blocks = self.blocks.read();
let block = &blocks[usize::from(block_index)];
let slot_state = &block.states[slot_index as usize];
let backoff = crossbeam::utils::Backoff::new();
'critical: loop {
let state = slot_state.state.load(std::sync::atomic::Ordering::SeqCst);
match state {
State::Occupied { generation } if generation == ticket_generation => (),
_ => break 'critical None, }
if slot_state.state.compare_exchange(
state,
State::Reserved,
std::sync::atomic::Ordering::SeqCst,
std::sync::atomic::Ordering::SeqCst
).is_err() {
backoff.spin();
continue;
}
'zeroref: loop {
let refcount = slot_state.refcount.load(std::sync::atomic::Ordering::SeqCst);
if refcount == 0 {
break 'zeroref;
}
backoff.snooze();
}
let element = unsafe {
block.elements[slot_index as usize].get().as_mut().unwrap().assume_init_read()
};
if let Some(next_generation) = ticket_generation.checked_add(1) {
let next_free_slot = 'update_slot: loop {
let next_free_slot = block.next_free_slot.load(std::sync::atomic::Ordering::SeqCst);
if block.next_free_slot.compare_exchange(
next_free_slot,
Some(slot_index),
std::sync::atomic::Ordering::SeqCst,
std::sync::atomic::Ordering::SeqCst
).is_err() {
backoff.spin();
continue;
}
if next_free_slot.is_none() {
let next_non_saturated_block = 'update_block: loop {
let next_non_saturated_block = self.next_non_saturated_block.load(std::sync::atomic::Ordering::SeqCst);
if self.next_non_saturated_block.compare_exchange(
next_non_saturated_block,
Some(block_index as u64),
std::sync::atomic::Ordering::SeqCst,
std::sync::atomic::Ordering::SeqCst
).is_err() {
backoff.spin();
continue;
}
break 'update_block next_non_saturated_block;
};
block.next_non_saturated_block.store(next_non_saturated_block, std::sync::atomic::Ordering::SeqCst);
};
break 'update_slot next_free_slot;
};
if slot_state.state.compare_exchange(
State::Reserved,
State::Free {
next_generation: next_generation,
next_free_slot: next_free_slot,
},
std::sync::atomic::Ordering::SeqCst,
std::sync::atomic::Ordering::SeqCst
).is_err() {
panic!("refcount is 0, the slot is reserved, no overwriting should occur, this is a bug, please report it.");
}
}
else {
self.generation_limit_reached.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
}
self.len.fetch_sub(1, std::sync::atomic::Ordering::Relaxed);
break 'critical Some(element);
}
}
pub fn capacity(&self) -> usize
{
self.capacity.load(std::sync::atomic::Ordering::SeqCst)
}
pub fn len(&self) -> usize
{
self.len.load(std::sync::atomic::Ordering::SeqCst)
}
pub fn generation_limit_reached(&self) -> usize
{
self.generation_limit_reached.load(std::sync::atomic::Ordering::SeqCst)
}
}
impl <T, R> Drop for LocklessSlotmap<T, R>
where
T: Sized + Send + Sync,
R: lock_api::RawRwLock,
{
fn drop(&mut self)
{
let blocks = self.blocks.write();
for block in blocks.iter() {
for (slot_state, slot_data) in block.states.iter().zip(block.elements.iter()) {
let state = slot_state.state.load(std::sync::atomic::Ordering::SeqCst);
match state {
State::Reserved => {
let refcount = slot_state.refcount.load(std::sync::atomic::Ordering::SeqCst);
assert_eq!(refcount, 0, "The refcount of the slot is not 0, this is a bug, please report it.");
}
State::Free { .. } => (),
State::Occupied { .. } => {
let refcount = slot_state.refcount.load(std::sync::atomic::Ordering::SeqCst);
assert_eq!(refcount, 0, "The refcount of the slot is not 0, this is a bug, please report it.");
unsafe {
slot_data.get().as_mut().unwrap().assume_init_drop();
}
}
}
}
}
}
}
unsafe impl <T, R> Send for LocklessSlotmap<T, R>
where
T: Sized + Send + Sync,
R: lock_api::RawRwLock,
{}
unsafe impl <T, R> Sync for LocklessSlotmap<T, R>
where
T: Sized + Send + Sync,
R: lock_api::RawRwLock,
{}