use crate::loom::sync::atomic::{AtomicUsize, Ordering};
use crate::util::CachePadded;
use std::process::abort;
use std::ptr;
use super::MapIndex;
pub struct RefCount {
value: CachePadded<AtomicUsize>,
}
impl RefCount {
const MAP_INDEX_FLAG: usize = 1usize << (usize::BITS - 1);
const COUNT_OVERFLOW_BIT: usize = Self::MAP_INDEX_FLAG >> 1;
const COUNT_MASK: usize = Self::MAP_INDEX_FLAG - 1;
pub(super) fn new(read_index: MapIndex) -> Self {
Self {
value: CachePadded::new(AtomicUsize::new((read_index as usize) << (usize::BITS - 1))),
}
}
#[inline]
fn to_map_index(value: usize) -> MapIndex {
unsafe { MapIndex::from_usize_unchecked(value >> (usize::BITS - 1)) }
}
#[inline]
pub fn increment(&self) -> MapIndex {
let old_value = self.value.fetch_add(1, Ordering::Acquire);
static ABORT_WRAPPER_FN: fn() -> MapIndex = || abort();
#[cold]
#[inline(never)]
fn abort_helper() -> MapIndex {
let fn_ptr = unsafe { ptr::read_volatile(&ABORT_WRAPPER_FN) };
(fn_ptr)()
}
if old_value & Self::COUNT_OVERFLOW_BIT != 0 {
return abort_helper();
}
Self::to_map_index(old_value)
}
#[inline]
pub fn decrement(&self) -> MapIndex {
let old_value = self.value.fetch_sub(1, Ordering::Release);
Self::to_map_index(old_value)
}
#[inline]
pub(super) fn swap_maps(&self) -> usize {
let old_value = self
.value
.fetch_add(Self::MAP_INDEX_FLAG, Ordering::Relaxed);
old_value & Self::COUNT_MASK
}
}