1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
use crate::atomic_once_cell_array::AtomicOnceCellArray;
use std::ops::Range;
use std::sync::atomic::{AtomicUsize, Ordering};
/// A fixed-size stack with `capacity` determined at run-time. The elements of the stack are uninitialized.
/// Elements may be initialized with `push` and then retrieved as a reference with `get`. `push` returns the
/// index of the element in the stack. The caller may reserve space for multiple elements with `reserve_uninit`.
/// Reserved elements must be initialized with `set`. Calling `push`, `reserve_uninit`, or `set` is thread-safe.
/// The stack will panic if the `capacity` is exceeded, or the `index` is out of range, or the `set` function is
/// called more than once on an index when `T` is not zero-sized, or if the `set` function is called on an index
/// that was not reserved by a prior call to `reserve_uninit`. The stack will only drop initialized elements.
///
/// # Guarantees
///
/// - The stack will not allocate if `capacity` is 0 or if `T` is zero-sized.
/// - The allocated memory will not be `default` initialized.
/// - Elements initialized by `push` or `set` are immutable.
/// - The synchronization is `lock-free`.
///
/// # Zero-sized Types
///
/// When `T` is zero-sized, the stack does not track individual indices and instead only maintains a count of
/// how many instances of `T` have been `set` in the stack. On `drop`, the stack will `drop` that many instances
/// of `T`. The stack will panic if the number of calls to `set` exceeds the capacity.
pub struct AtomicOnceCellStack<T> {
data: AtomicOnceCellArray<T>,
last_index: AtomicUsize,
}
impl<T> AtomicOnceCellStack<T> {
pub fn with_capacity(capacity: usize) -> Self {
// SAFETY: `data` will catch any issues with <T> or capacity.
Self {
data: AtomicOnceCellArray::with_capacity(capacity),
last_index: AtomicUsize::new(0),
}
}
pub fn push(
&self,
val: T,
) -> usize {
// SAFETY: `data` will catch index out of bounds or multiple attempted writes.
let last_len = self.last_index.fetch_add(1, Ordering::Relaxed);
self.data.set(last_len, val);
last_len
}
pub fn reserve_uninit(
&self,
num_to_reserve: usize,
) -> usize {
let last_len = self.last_index.fetch_add(num_to_reserve, Ordering::Relaxed);
if last_len + num_to_reserve > self.capacity() {
// SAFETY: `push` and `set` will catch any incorrect indexing,
// but there's no sense in waiting to blow up some indeterminate time
// in the future if we know that this is a problem right now.
panic!(
"len {} + num_to_reserve {} must be <= capacity {}",
last_len,
num_to_reserve,
self.capacity()
);
}
last_len
}
pub fn set(
&self,
index: usize,
val: T,
) {
if index < self.len() {
// SAFETY: `data` will catch multiple attempted writes.
self.data.set(index, val);
} else {
// SAFETY:
panic!(
"index {} must be < len {} (did you forget to `reserve_uninit` first?)",
index,
self.capacity()
);
}
}
pub fn get(
&self,
index: usize,
) -> &T {
// SAFETY: `data` will catch index out of bounds or attempts to read uninitialized memory.
self.data.get(index)
}
pub unsafe fn get_range_unchecked(
&self,
range: Range<usize>,
) -> &[T] {
let len = self.len();
if range.end > len {
panic!("end of range {} must be < len {}", range.end, len);
}
&self.data.get_all_unchecked()[range]
}
pub unsafe fn get_all_unchecked(&self) -> &[T] {
let len = self.len();
let capacity = self.capacity();
if len < capacity {
panic!("length {} must be == capacity {}", len, capacity);
}
self.data.get_all_unchecked()
}
pub fn capacity(&self) -> usize {
self.data.capacity()
}
pub fn len(&self) -> usize {
self.last_index.load(Ordering::Acquire)
}
pub fn iter(&self) -> Iter<T> {
self.into_iter()
}
}
impl<'a, T> IntoIterator for &'a AtomicOnceCellStack<T> {
type Item = &'a T;
type IntoIter = Iter<'a, T>;
fn into_iter(self) -> Self::IntoIter {
Iter::new(self)
}
}
pub struct Iter<'a, T> {
source: &'a AtomicOnceCellStack<T>,
next_index: usize,
}
impl<'a, T> Iter<'a, T> {
#[inline]
pub fn new(source: &'a AtomicOnceCellStack<T>) -> Self {
Self {
source,
next_index: 0,
}
}
}
impl<'a, T> Iterator for Iter<'a, T> {
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
if self.next_index < self.source.len() {
let index = self.next_index;
self.next_index += 1;
Some(self.source.get(index))
} else {
None
}
}
}