use alloc::alloc::{alloc_zeroed, handle_alloc_error, Layout};
use alloc::boxed::Box;
use core::cell::UnsafeCell;
use core::fmt;
use core::marker::PhantomData;
use core::mem::MaybeUninit;
use core::panic::{RefUnwindSafe, UnwindSafe};
use core::ptr;
use core::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
use crossbeam_utils::{Backoff, CachePadded};
const WRITE: usize = 1;
const READ: usize = 2;
const DESTROY: usize = 4;
const LAP: usize = 32;
const BLOCK_CAP: usize = LAP - 1;
const SHIFT: usize = 1;
const HAS_NEXT: usize = 1;
struct Slot<T> {
value: UnsafeCell<MaybeUninit<T>>,
state: AtomicUsize,
}
impl<T> Slot<T> {
fn wait_write(&self) {
let backoff = Backoff::new();
while self.state.load(Ordering::Acquire) & WRITE == 0 {
backoff.snooze();
}
}
}
struct Block<T> {
next: AtomicPtr<Block<T>>,
slots: [Slot<T>; BLOCK_CAP],
}
impl<T> Block<T> {
const LAYOUT: Layout = {
let layout = Layout::new::<Self>();
assert!(
layout.size() != 0,
"Block should never be zero-sized, as it has an AtomicPtr field"
);
layout
};
fn new() -> Box<Self> {
let ptr = unsafe { alloc_zeroed(Self::LAYOUT) };
if ptr.is_null() {
handle_alloc_error(Self::LAYOUT)
}
unsafe { Box::from_raw(ptr.cast()) }
}
fn wait_next(&self) -> *mut Block<T> {
let backoff = Backoff::new();
loop {
let next = self.next.load(Ordering::Acquire);
if !next.is_null() {
return next;
}
backoff.snooze();
}
}
unsafe fn destroy(this: *mut Block<T>, start: usize) {
for i in start..BLOCK_CAP - 1 {
let slot = (*this).slots.get_unchecked(i);
if slot.state.load(Ordering::Acquire) & READ == 0
&& slot.state.fetch_or(DESTROY, Ordering::AcqRel) & READ == 0
{
return;
}
}
drop(Box::from_raw(this));
}
}
struct Position<T> {
index: AtomicUsize,
block: AtomicPtr<Block<T>>,
}
pub struct SegQueue<T> {
head: CachePadded<Position<T>>,
tail: CachePadded<Position<T>>,
_marker: PhantomData<T>,
}
unsafe impl<T: Send> Send for SegQueue<T> {}
unsafe impl<T: Send> Sync for SegQueue<T> {}
impl<T> UnwindSafe for SegQueue<T> {}
impl<T> RefUnwindSafe for SegQueue<T> {}
impl<T> SegQueue<T> {
pub const fn new() -> SegQueue<T> {
SegQueue {
head: CachePadded::new(Position {
block: AtomicPtr::new(ptr::null_mut()),
index: AtomicUsize::new(0),
}),
tail: CachePadded::new(Position {
block: AtomicPtr::new(ptr::null_mut()),
index: AtomicUsize::new(0),
}),
_marker: PhantomData,
}
}
pub fn push(&self, value: T) {
let backoff = Backoff::new();
let mut tail = self.tail.index.load(Ordering::Acquire);
let mut block = self.tail.block.load(Ordering::Acquire);
let mut next_block = None;
loop {
let offset = (tail >> SHIFT) % LAP;
if offset == BLOCK_CAP {
backoff.snooze();
tail = self.tail.index.load(Ordering::Acquire);
block = self.tail.block.load(Ordering::Acquire);
continue;
}
if offset + 1 == BLOCK_CAP && next_block.is_none() {
next_block = Some(Block::<T>::new());
}
if block.is_null() {
let new = Box::into_raw(Block::<T>::new());
if self
.tail
.block
.compare_exchange(block, new, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
self.head.block.store(new, Ordering::Release);
block = new;
} else {
next_block = unsafe { Some(Box::from_raw(new)) };
tail = self.tail.index.load(Ordering::Acquire);
block = self.tail.block.load(Ordering::Acquire);
continue;
}
}
let new_tail = tail + (1 << SHIFT);
match self.tail.index.compare_exchange_weak(
tail,
new_tail,
Ordering::SeqCst,
Ordering::Acquire,
) {
Ok(_) => unsafe {
if offset + 1 == BLOCK_CAP {
let next_block = Box::into_raw(next_block.unwrap());
let next_index = new_tail.wrapping_add(1 << SHIFT);
self.tail.block.store(next_block, Ordering::Release);
self.tail.index.store(next_index, Ordering::Release);
(*block).next.store(next_block, Ordering::Release);
}
let slot = (*block).slots.get_unchecked(offset);
slot.value.get().write(MaybeUninit::new(value));
slot.state.fetch_or(WRITE, Ordering::Release);
return;
},
Err(t) => {
tail = t;
block = self.tail.block.load(Ordering::Acquire);
backoff.spin();
}
}
}
}
pub fn pop(&self) -> Option<T> {
let backoff = Backoff::new();
let mut head = self.head.index.load(Ordering::Acquire);
let mut block = self.head.block.load(Ordering::Acquire);
loop {
let offset = (head >> SHIFT) % LAP;
if offset == BLOCK_CAP {
backoff.snooze();
head = self.head.index.load(Ordering::Acquire);
block = self.head.block.load(Ordering::Acquire);
continue;
}
let mut new_head = head + (1 << SHIFT);
if new_head & HAS_NEXT == 0 {
atomic::fence(Ordering::SeqCst);
let tail = self.tail.index.load(Ordering::Relaxed);
if head >> SHIFT == tail >> SHIFT {
return None;
}
if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP {
new_head |= HAS_NEXT;
}
}
if block.is_null() {
backoff.snooze();
head = self.head.index.load(Ordering::Acquire);
block = self.head.block.load(Ordering::Acquire);
continue;
}
match self.head.index.compare_exchange_weak(
head,
new_head,
Ordering::SeqCst,
Ordering::Acquire,
) {
Ok(_) => unsafe {
if offset + 1 == BLOCK_CAP {
let next = (*block).wait_next();
let mut next_index = (new_head & !HAS_NEXT).wrapping_add(1 << SHIFT);
if !(*next).next.load(Ordering::Relaxed).is_null() {
next_index |= HAS_NEXT;
}
self.head.block.store(next, Ordering::Release);
self.head.index.store(next_index, Ordering::Release);
}
let slot = (*block).slots.get_unchecked(offset);
slot.wait_write();
let value = slot.value.get().read().assume_init();
if offset + 1 == BLOCK_CAP {
Block::destroy(block, 0);
} else if slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0 {
Block::destroy(block, offset + 1);
}
return Some(value);
},
Err(h) => {
head = h;
block = self.head.block.load(Ordering::Acquire);
backoff.spin();
}
}
}
}
pub fn is_empty(&self) -> bool {
let head = self.head.index.load(Ordering::SeqCst);
let tail = self.tail.index.load(Ordering::SeqCst);
head >> SHIFT == tail >> SHIFT
}
pub fn len(&self) -> usize {
loop {
let mut tail = self.tail.index.load(Ordering::SeqCst);
let mut head = self.head.index.load(Ordering::SeqCst);
if self.tail.index.load(Ordering::SeqCst) == tail {
tail &= !((1 << SHIFT) - 1);
head &= !((1 << SHIFT) - 1);
if (tail >> SHIFT) & (LAP - 1) == LAP - 1 {
tail = tail.wrapping_add(1 << SHIFT);
}
if (head >> SHIFT) & (LAP - 1) == LAP - 1 {
head = head.wrapping_add(1 << SHIFT);
}
let lap = (head >> SHIFT) / LAP;
tail = tail.wrapping_sub((lap * LAP) << SHIFT);
head = head.wrapping_sub((lap * LAP) << SHIFT);
tail >>= SHIFT;
head >>= SHIFT;
return tail - head - tail / LAP;
}
}
}
}
impl<T> Drop for SegQueue<T> {
fn drop(&mut self) {
let mut head = *self.head.index.get_mut();
let mut tail = *self.tail.index.get_mut();
let mut block = *self.head.block.get_mut();
head &= !((1 << SHIFT) - 1);
tail &= !((1 << SHIFT) - 1);
unsafe {
while head != tail {
let offset = (head >> SHIFT) % LAP;
if offset < BLOCK_CAP {
let slot = (*block).slots.get_unchecked(offset);
(*slot.value.get()).assume_init_drop();
} else {
let next = *(*block).next.get_mut();
drop(Box::from_raw(block));
block = next;
}
head = head.wrapping_add(1 << SHIFT);
}
if !block.is_null() {
drop(Box::from_raw(block));
}
}
}
}
impl<T> fmt::Debug for SegQueue<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("SegQueue { .. }")
}
}
impl<T> Default for SegQueue<T> {
fn default() -> SegQueue<T> {
SegQueue::new()
}
}
impl<T> IntoIterator for SegQueue<T> {
type Item = T;
type IntoIter = IntoIter<T>;
fn into_iter(self) -> Self::IntoIter {
IntoIter { value: self }
}
}
#[derive(Debug)]
pub struct IntoIter<T> {
value: SegQueue<T>,
}
impl<T> Iterator for IntoIter<T> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
let value = &mut self.value;
let head = *value.head.index.get_mut();
let tail = *value.tail.index.get_mut();
if head >> SHIFT == tail >> SHIFT {
None
} else {
let block = *value.head.block.get_mut();
let offset = (head >> SHIFT) % LAP;
let item = unsafe {
let slot = (*block).slots.get_unchecked(offset);
slot.value.get().read().assume_init()
};
if offset + 1 == BLOCK_CAP {
unsafe {
let next = *(*block).next.get_mut();
drop(Box::from_raw(block));
*value.head.block.get_mut() = next;
}
*value.head.index.get_mut() = head.wrapping_add(2 << SHIFT);
debug_assert_eq!((*value.head.index.get_mut() >> SHIFT) % LAP, 0);
} else {
*value.head.index.get_mut() = head.wrapping_add(1 << SHIFT);
}
Some(item)
}
}
}