use std::cell::Cell;
use std::ptr;
use std::cell::UnsafeCell;
#[cfg(feature = "loom")]
use loom::sync::atomic::{AtomicPtr, AtomicU64, AtomicUsize, Ordering, fence};
#[cfg(not(feature = "loom"))]
use std::sync::atomic::{AtomicPtr, AtomicU64, AtomicUsize, Ordering, fence};
use crate::Key;
use crate::disk_loc::DiskLoc;
use crate::key::Location;
pub const MAX_HEIGHT: usize = 16;
pub trait SkipNode: Sized + Send + Sync {
fn key_bytes(&self) -> &[u8];
fn height(&self) -> u8;
fn is_marked(&self) -> bool;
fn mark(&self) -> bool;
fn tower(&self, level: usize) -> &AtomicPtr<Self>;
fn alloc_head() -> *mut Self;
unsafe fn dealloc_node(ptr: *mut Self);
unsafe fn reclaim(ptr: *mut Self, _collector: &seize::Collector);
}
#[inline(always)]
unsafe fn read_cell<T: Copy>(cell: &UnsafeCell<T>) -> T {
unsafe { cell.get().read() }
}
#[inline(always)]
unsafe fn write_cell<T>(cell: &UnsafeCell<T>, val: T) {
unsafe { cell.get().write(val) }
}
#[inline(always)]
fn spin_or_yield() {
#[cfg(feature = "loom")]
loom::thread::yield_now();
#[cfg(not(feature = "loom"))]
std::hint::spin_loop();
}
#[repr(C)]
pub struct ConstNode<K: Key, const V: usize, L: Location = DiskLoc> {
pub key: K,
seq: AtomicU64,
loc: UnsafeCell<L>,
value: UnsafeCell<[u8; V]>,
height: u8,
tower_ptr: *const AtomicPtr<Self>,
}
impl<K: Key, const V: usize, L: Location> ConstNode<K, V, L> {
const fn base_size() -> usize {
let raw = std::mem::size_of::<Self>();
let align = std::mem::align_of::<AtomicPtr<Self>>();
(raw + align - 1) & !(align - 1)
}
fn layout_for(height: u8) -> std::alloc::Layout {
let tower_bytes = height as usize * std::mem::size_of::<AtomicPtr<Self>>();
let total = Self::base_size() + tower_bytes;
let align = std::mem::align_of::<Self>().max(std::mem::align_of::<AtomicPtr<Self>>());
std::alloc::Layout::from_size_align(total, align).expect("invalid layout")
}
pub fn alloc(key: K, value: [u8; V], loc: L, height: u8) -> *mut Self {
let layout = Self::layout_for(height);
unsafe {
let ptr = std::alloc::alloc(layout) as *mut Self;
assert!(!ptr.is_null(), "allocation failed");
let tower = (ptr as *mut u8).add(Self::base_size()) as *mut AtomicPtr<Self>;
std::ptr::write(&raw mut (*ptr).key, key);
std::ptr::write(&raw mut (*ptr).seq, AtomicU64::new(0));
std::ptr::write(&raw mut (*ptr).loc, UnsafeCell::new(loc));
std::ptr::write(&raw mut (*ptr).value, UnsafeCell::new(value));
std::ptr::write(&raw mut (*ptr).height, height);
std::ptr::write(&raw mut (*ptr).tower_ptr, tower as *const AtomicPtr<Self>);
for i in 0..height as usize {
std::ptr::write(tower.add(i), AtomicPtr::new(ptr::null_mut()));
}
ptr
}
}
#[inline]
#[allow(dead_code)]
pub fn read_data(&self) -> (L, [u8; V]) {
loop {
let s1 = self.seq.load(Ordering::Acquire);
if s1 & 1 != 0 {
spin_or_yield();
continue;
}
let loc = unsafe { read_cell(&self.loc) };
let value = unsafe { read_cell(&self.value) };
fence(Ordering::Acquire);
let s2 = self.seq.load(Ordering::Relaxed);
if s1 == s2 {
return (loc, value);
}
spin_or_yield();
}
}
#[inline]
pub fn read_loc(&self) -> L {
loop {
let s1 = self.seq.load(Ordering::Acquire);
if s1 & 1 != 0 {
spin_or_yield();
continue;
}
let loc = unsafe { read_cell(&self.loc) };
fence(Ordering::Acquire);
let s2 = self.seq.load(Ordering::Relaxed);
if s1 == s2 {
return loc;
}
spin_or_yield();
}
}
#[inline]
pub fn read_value(&self) -> [u8; V] {
loop {
let s1 = self.seq.load(Ordering::Acquire);
if s1 & 1 != 0 {
spin_or_yield();
continue;
}
let value = unsafe { read_cell(&self.value) };
fence(Ordering::Acquire);
let s2 = self.seq.load(Ordering::Relaxed);
if s1 == s2 {
return value;
}
spin_or_yield();
}
}
#[inline]
pub fn write_data(&self, loc: L, value: &[u8; V]) {
self.seq.fetch_add(1, Ordering::Relaxed);
fence(Ordering::Release);
unsafe {
write_cell(&self.loc, loc);
write_cell(&self.value, *value);
}
self.seq.fetch_add(1, Ordering::Release);
}
#[inline]
pub fn write_loc(&self, loc: L) {
self.seq.fetch_add(1, Ordering::Relaxed);
fence(Ordering::Release);
unsafe {
write_cell(&self.loc, loc);
}
self.seq.fetch_add(1, Ordering::Release);
}
}
impl<K: Key, const V: usize, L: Location> SkipNode for ConstNode<K, V, L> {
#[inline]
fn key_bytes(&self) -> &[u8] {
self.key.as_bytes()
}
#[inline]
fn height(&self) -> u8 {
self.height
}
#[inline]
fn is_marked(&self) -> bool {
self.tower(0).load(Ordering::Acquire) as usize & 1 != 0
}
fn mark(&self) -> bool {
let atomic_usize: &AtomicUsize =
unsafe { &*(self.tower(0) as *const AtomicPtr<Self> as *const AtomicUsize) };
let old = atomic_usize.fetch_or(1, Ordering::AcqRel);
old & 1 == 0
}
#[inline]
fn tower(&self, level: usize) -> &AtomicPtr<Self> {
debug_assert!(level < self.height as usize);
unsafe { &*self.tower_ptr.add(level) }
}
fn alloc_head() -> *mut Self {
Self::alloc(K::zeroed(), [0u8; V], L::zeroed(), MAX_HEIGHT as u8)
}
unsafe fn dealloc_node(ptr: *mut Self) {
unsafe {
let height = (*ptr).height;
std::ptr::drop_in_place(&mut (*ptr).key);
let layout = Self::layout_for(height);
std::alloc::dealloc(ptr as *mut u8, layout);
}
}
unsafe fn reclaim(ptr: *mut Self, _collector: &seize::Collector) {
unsafe { Self::dealloc_node(ptr) }
}
}
unsafe impl<K: Key, const V: usize, L: Location> Send for ConstNode<K, V, L> {}
unsafe impl<K: Key, const V: usize, L: Location> Sync for ConstNode<K, V, L> {}
#[cfg(feature = "var-collections")]
#[repr(C)]
pub struct VarNode<K: Key> {
pub key: K,
pub disk: AtomicPtr<DiskLoc>,
height: u8,
tower_ptr: *const AtomicPtr<Self>,
}
#[cfg(feature = "var-collections")]
impl<K: Key> VarNode<K> {
const fn base_size() -> usize {
let raw = std::mem::size_of::<Self>();
let align = std::mem::align_of::<AtomicPtr<Self>>();
(raw + align - 1) & !(align - 1)
}
fn layout_for(height: u8) -> std::alloc::Layout {
let tower_bytes = height as usize * std::mem::size_of::<AtomicPtr<Self>>();
let total = Self::base_size() + tower_bytes;
let align = std::mem::align_of::<Self>().max(std::mem::align_of::<AtomicPtr<Self>>());
std::alloc::Layout::from_size_align(total, align).expect("invalid layout")
}
pub fn alloc(key: K, disk: DiskLoc, height: u8) -> *mut Self {
let layout = Self::layout_for(height);
unsafe {
let ptr = std::alloc::alloc(layout) as *mut Self;
assert!(!ptr.is_null(), "allocation failed");
let tower = (ptr as *mut u8).add(Self::base_size()) as *mut AtomicPtr<Self>;
std::ptr::write(&raw mut (*ptr).key, key);
std::ptr::write(
&raw mut (*ptr).disk,
AtomicPtr::new(Box::into_raw(Box::new(disk))),
);
std::ptr::write(&raw mut (*ptr).height, height);
std::ptr::write(&raw mut (*ptr).tower_ptr, tower as *const AtomicPtr<Self>);
for i in 0..height as usize {
std::ptr::write(tower.add(i), AtomicPtr::new(ptr::null_mut()));
}
ptr
}
}
#[inline]
pub fn load_disk(&self) -> &DiskLoc {
let ptr = self.disk.load(Ordering::Acquire);
unsafe { &*ptr }
}
#[inline]
pub fn load_disk_ptr(&self) -> *mut DiskLoc {
self.disk.load(Ordering::Acquire)
}
#[inline]
pub fn swap_disk(&self, new_disk: *mut DiskLoc) -> *mut DiskLoc {
self.disk.swap(new_disk, Ordering::AcqRel)
}
#[inline]
pub fn compare_exchange_disk(
&self,
expected: *mut DiskLoc,
new_disk: *mut DiskLoc,
) -> Result<*mut DiskLoc, *mut DiskLoc> {
self.disk
.compare_exchange(expected, new_disk, Ordering::AcqRel, Ordering::Acquire)
}
}
#[cfg(feature = "var-collections")]
impl<K: Key> SkipNode for VarNode<K> {
#[inline]
fn key_bytes(&self) -> &[u8] {
self.key.as_bytes()
}
#[inline]
fn height(&self) -> u8 {
self.height
}
#[inline]
fn is_marked(&self) -> bool {
self.tower(0).load(Ordering::Acquire) as usize & 1 != 0
}
fn mark(&self) -> bool {
let atomic_usize: &AtomicUsize =
unsafe { &*(self.tower(0) as *const AtomicPtr<Self> as *const AtomicUsize) };
let old = atomic_usize.fetch_or(1, Ordering::AcqRel);
old & 1 == 0
}
#[inline]
fn tower(&self, level: usize) -> &AtomicPtr<Self> {
debug_assert!(level < self.height as usize);
unsafe { &*self.tower_ptr.add(level) }
}
fn alloc_head() -> *mut Self {
Self::alloc(K::zeroed(), DiskLoc::new(0, 0, 0, 0), MAX_HEIGHT as u8)
}
unsafe fn dealloc_node(ptr: *mut Self) {
unsafe {
let height = (*ptr).height;
#[cfg(not(feature = "loom"))]
let disk = *(*ptr).disk.get_mut();
#[cfg(feature = "loom")]
let disk = (*ptr).disk.unsync_load();
if !disk.is_null() {
drop(Box::from_raw(disk));
}
std::ptr::drop_in_place(&mut (*ptr).key);
let layout = Self::layout_for(height);
std::alloc::dealloc(ptr as *mut u8, layout);
}
}
unsafe fn reclaim(ptr: *mut Self, _collector: &seize::Collector) {
unsafe { Self::dealloc_node(ptr) }
}
}
#[cfg(feature = "var-collections")]
unsafe impl<K: Key> Send for VarNode<K> {}
#[cfg(feature = "var-collections")]
unsafe impl<K: Key> Sync for VarNode<K> {}
#[cfg(feature = "typed-tree")]
pub struct TypedData<T> {
pub disk: DiskLoc,
pub value: T,
}
#[cfg(feature = "typed-tree")]
#[repr(C)]
pub struct TypedNode<K: Key, T> {
pub key: K,
pub data: AtomicPtr<TypedData<T>>,
height: u8,
tower_ptr: *const AtomicPtr<Self>,
}
#[cfg(feature = "typed-tree")]
impl<K: Key, T> TypedNode<K, T> {
const fn base_size() -> usize {
let raw = std::mem::size_of::<Self>();
let align = std::mem::align_of::<AtomicPtr<Self>>();
(raw + align - 1) & !(align - 1)
}
fn layout_for(height: u8) -> std::alloc::Layout {
let tower_bytes = height as usize * std::mem::size_of::<AtomicPtr<Self>>();
let total = Self::base_size() + tower_bytes;
let align = std::mem::align_of::<Self>().max(std::mem::align_of::<AtomicPtr<Self>>());
std::alloc::Layout::from_size_align(total, align).expect("invalid layout")
}
pub fn alloc(key: K, value: T, disk: DiskLoc, height: u8) -> *mut Self {
let layout = Self::layout_for(height);
unsafe {
let ptr = std::alloc::alloc(layout) as *mut Self;
assert!(!ptr.is_null(), "allocation failed");
let tower = (ptr as *mut u8).add(Self::base_size()) as *mut AtomicPtr<Self>;
std::ptr::write(&raw mut (*ptr).key, key);
std::ptr::write(
&raw mut (*ptr).data,
AtomicPtr::new(Box::into_raw(Box::new(TypedData { disk, value }))),
);
std::ptr::write(&raw mut (*ptr).height, height);
std::ptr::write(&raw mut (*ptr).tower_ptr, tower as *const AtomicPtr<Self>);
for i in 0..height as usize {
std::ptr::write(tower.add(i), AtomicPtr::new(ptr::null_mut()));
}
ptr
}
}
#[inline]
pub fn load_data(&self) -> &TypedData<T> {
let ptr = self.data.load(Ordering::Acquire);
unsafe { &*ptr }
}
#[inline]
pub fn swap_data(&self, new_data: *mut TypedData<T>) -> *mut TypedData<T> {
self.data.swap(new_data, Ordering::AcqRel)
}
}
#[cfg(feature = "typed-tree")]
impl<K: Key, T: Send + Sync> SkipNode for TypedNode<K, T> {
#[inline]
fn key_bytes(&self) -> &[u8] {
self.key.as_bytes()
}
#[inline]
fn height(&self) -> u8 {
self.height
}
#[inline]
fn is_marked(&self) -> bool {
self.tower(0).load(Ordering::Acquire) as usize & 1 != 0
}
fn mark(&self) -> bool {
let atomic_usize: &AtomicUsize =
unsafe { &*(self.tower(0) as *const AtomicPtr<Self> as *const AtomicUsize) };
let old = atomic_usize.fetch_or(1, Ordering::AcqRel);
old & 1 == 0
}
#[inline]
fn tower(&self, level: usize) -> &AtomicPtr<Self> {
debug_assert!(level < self.height as usize);
unsafe { &*self.tower_ptr.add(level) }
}
fn alloc_head() -> *mut Self {
let layout = Self::layout_for(MAX_HEIGHT as u8);
unsafe {
let ptr = std::alloc::alloc(layout) as *mut Self;
assert!(!ptr.is_null(), "allocation failed");
let tower = (ptr as *mut u8).add(Self::base_size()) as *mut AtomicPtr<Self>;
std::ptr::write(&raw mut (*ptr).key, K::zeroed());
std::ptr::write(&raw mut (*ptr).data, AtomicPtr::new(ptr::null_mut()));
std::ptr::write(&raw mut (*ptr).height, MAX_HEIGHT as u8);
std::ptr::write(&raw mut (*ptr).tower_ptr, tower as *const AtomicPtr<Self>);
for i in 0..MAX_HEIGHT {
std::ptr::write(tower.add(i), AtomicPtr::new(ptr::null_mut()));
}
ptr
}
}
unsafe fn dealloc_node(ptr: *mut Self) {
unsafe {
let height = (*ptr).height;
#[cfg(not(feature = "loom"))]
let data = *(*ptr).data.get_mut();
#[cfg(feature = "loom")]
let data = (*ptr).data.unsync_load();
if !data.is_null() {
drop(Box::from_raw(data));
}
std::ptr::drop_in_place(&mut (*ptr).key);
let layout = Self::layout_for(height);
std::alloc::dealloc(ptr as *mut u8, layout);
}
}
unsafe fn reclaim(ptr: *mut Self, _collector: &seize::Collector) {
unsafe { Self::dealloc_node(ptr) }
}
}
#[cfg(feature = "typed-tree")]
unsafe impl<K: Key, T: Send + Sync> Send for TypedNode<K, T> {}
#[cfg(feature = "typed-tree")]
unsafe impl<K: Key, T: Send + Sync> Sync for TypedNode<K, T> {}
thread_local! {
static RNG_STATE: Cell<u64> = const { Cell::new(0) };
}
fn xorshift_seed() -> u64 {
let boxed = Box::new(0u8);
let addr = &*boxed as *const u8 as u64;
drop(boxed);
addr.wrapping_mul(6364136223846793005)
.wrapping_add(1442695040888963407)
}
pub fn random_height() -> u8 {
let mut height = 1u8;
RNG_STATE.with(|state| {
let mut s = state.get();
if s == 0 {
s = xorshift_seed();
}
s ^= s << 13;
s ^= s >> 7;
s ^= s << 17;
state.set(s);
let mut r = s as u32;
while height < MAX_HEIGHT as u8 && (r & 3) == 0 {
height += 1;
r >>= 2;
}
});
height
}