use std::cell::UnsafeCell;
use std::ops::{Index, IndexMut};
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
pub struct AppendOnlyVec<T> {
count: AtomicUsize,
reserved: AtomicUsize,
data: [UnsafeCell<*mut T>; BITS_USED - 1 - 3],
}
unsafe impl<T: Send> Send for AppendOnlyVec<T> {}
unsafe impl<T: Sync + Send> Sync for AppendOnlyVec<T> {}
const BITS: usize = std::mem::size_of::<usize>() * 8;
#[cfg(target_arch = "x86_64")]
const BITS_USED: usize = 48;
#[cfg(all(not(target_arch = "x86_64"), target_pointer_width = "64"))]
const BITS_USED: usize = 64;
#[cfg(target_pointer_width = "32")]
const BITS_USED: usize = 32;
const fn indices(i: usize) -> (u32, usize) {
let i = i + 8;
let bin = BITS as u32 - 1 - i.leading_zeros();
let bin = bin - 3;
let offset = i - bin_size(bin);
(bin, offset)
}
const fn bin_size(array: u32) -> usize {
(1 << 3) << array
}
fn spin_wait(failures: &mut usize) {
*failures += 1;
if *failures <= 3 {
for _ in 0..(1 << *failures) {
std::hint::spin_loop();
}
} else {
std::thread::yield_now();
}
}
#[test]
fn test_indices() {
for i in 0..32 {
println!("{:3}: {} {}", i, indices(i).0, indices(i).1);
}
let mut array = 0;
let mut offset = 0;
let mut index = 0;
while index < 1000 {
index += 1;
offset += 1;
if offset >= bin_size(array) {
offset = 0;
array += 1;
}
assert_eq!(indices(index), (array, offset));
}
}
impl<T> Default for AppendOnlyVec<T> {
fn default() -> Self {
Self::new()
}
}
impl<T> AppendOnlyVec<T> {
pub fn iter(&self) -> impl DoubleEndedIterator<Item = &T> + ExactSizeIterator {
(0..self.len()).map(|i| unsafe { self.get_unchecked(i) })
}
#[inline]
pub fn len(&self) -> usize {
self.count.load(Ordering::Acquire)
}
fn layout(&self, array: u32) -> std::alloc::Layout {
std::alloc::Layout::array::<T>(bin_size(array)).unwrap()
}
fn pre_push(&self, val: T) -> usize {
let idx = self.reserved.fetch_add(1, Ordering::Relaxed);
let (array, offset) = indices(idx);
let ptr = if self.len() < 1 + idx - offset {
if offset == 0 {
let layout = self.layout(array);
let ptr = unsafe { std::alloc::alloc(layout) } as *mut T;
unsafe {
*self.data[array as usize].get() = ptr;
}
ptr
} else {
let mut failures = 0;
while self.len() < 1 + idx - offset {
spin_wait(&mut failures);
}
unsafe { *self.data[array as usize].get() }
}
} else {
unsafe { *self.data[array as usize].get() }
};
unsafe { (ptr.add(offset)).write(val) };
idx
}
pub fn push(&self, val: T) -> usize {
let idx = self.pre_push(val);
let mut failures = 0;
while self
.count
.compare_exchange(idx, idx + 1, Ordering::Release, Ordering::Relaxed)
.is_err()
{
spin_wait(&mut failures);
}
idx
}
pub fn extend(&self, iter: impl IntoIterator<Item = T>) {
for val in iter {
self.push(val);
}
}
pub fn push_mut(&mut self, val: T) -> usize {
let idx = self.pre_push(val);
self.count.store(idx + 1, Ordering::Relaxed);
idx
}
const EMPTY: UnsafeCell<*mut T> = UnsafeCell::new(std::ptr::null_mut());
pub const fn new() -> Self {
AppendOnlyVec {
count: AtomicUsize::new(0),
reserved: AtomicUsize::new(0),
data: [Self::EMPTY; BITS_USED - 1 - 3],
}
}
unsafe fn get_unchecked(&self, idx: usize) -> &T {
let (array, offset) = indices(idx);
let ptr = *self.data[array as usize].get();
&*ptr.add(offset)
}
pub fn into_vec(self) -> Vec<T> {
let mut vec = Vec::with_capacity(self.len());
for idx in 0..self.len() {
let (array, offset) = indices(idx);
let ptr = unsafe { *self.data[array as usize].get() };
let value = unsafe { ptr.add(offset).read() };
vec.push(value);
}
self.count.store(0, Ordering::Relaxed);
vec
}
}
impl<T> std::fmt::Debug for AppendOnlyVec<T>
where
T: std::fmt::Debug,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_list().entries(self.iter()).finish()
}
}
impl<T> Index<usize> for AppendOnlyVec<T> {
type Output = T;
fn index(&self, idx: usize) -> &Self::Output {
assert!(idx < self.len()); let (array, offset) = indices(idx);
let ptr = unsafe { *self.data[array as usize].get() };
unsafe { &*ptr.add(offset) }
}
}
impl<T> IndexMut<usize> for AppendOnlyVec<T> {
fn index_mut(&mut self, idx: usize) -> &mut Self::Output {
assert!(idx < self.len()); let (array, offset) = indices(idx);
let ptr = unsafe { *self.data[array as usize].get() };
unsafe { &mut *ptr.add(offset) }
}
}
impl<T> Drop for AppendOnlyVec<T> {
fn drop(&mut self) {
for idx in 0..self.len() {
let (array, offset) = indices(idx);
let ptr = unsafe { *self.data[array as usize].get() };
unsafe {
std::ptr::drop_in_place(ptr.add(offset));
}
}
for array in 0..self.data.len() as u32 {
let ptr = unsafe { *self.data[array as usize].get() };
if !ptr.is_null() {
let layout = self.layout(array);
unsafe { std::alloc::dealloc(ptr as *mut u8, layout) };
} else {
break;
}
}
}
}
impl<T> Clone for AppendOnlyVec<T>
where
T: Clone,
{
fn clone(&self) -> Self {
self.iter().cloned().collect()
}
}
#[derive(Debug)]
pub struct IntoIter<T>(std::vec::IntoIter<T>);
impl<T> Iterator for IntoIter<T> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
self.0.next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.0.size_hint()
}
}
impl<T> DoubleEndedIterator for IntoIter<T> {
fn next_back(&mut self) -> Option<Self::Item> {
self.0.next_back()
}
}
impl<T> ExactSizeIterator for IntoIter<T> {
fn len(&self) -> usize {
self.0.len()
}
}
impl<T> IntoIterator for AppendOnlyVec<T> {
type Item = T;
type IntoIter = IntoIter<T>;
fn into_iter(self) -> Self::IntoIter {
IntoIter(self.into_vec().into_iter())
}
}
impl<T> FromIterator<T> for AppendOnlyVec<T> {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
let out = Self::new();
for x in iter {
let idx = out.pre_push(x);
out.count.store(idx + 1, Ordering::Relaxed);
}
out
}
}
impl<T> From<Vec<T>> for AppendOnlyVec<T> {
fn from(value: Vec<T>) -> Self {
value.into_iter().collect()
}
}
#[test]
fn test_pushing_and_indexing() {
let v = AppendOnlyVec::<usize>::new();
for n in 0..50 {
v.push(n);
assert_eq!(v.len(), n + 1);
for i in 0..(n + 1) {
assert_eq!(v[i], i);
}
}
let vec: Vec<usize> = v.iter().copied().collect();
let ve2: Vec<usize> = (0..50).collect();
assert_eq!(vec, ve2);
}
#[test]
fn test_parallel_pushing() {
use std::sync::Arc;
let v = Arc::new(AppendOnlyVec::<u64>::new());
let mut threads = Vec::new();
const N: u64 = 100;
for thread_num in 0..N {
let v = v.clone();
threads.push(std::thread::spawn(move || {
let which1 = v.push(thread_num);
let which2 = v.push(thread_num);
assert_eq!(v[which1 as usize], thread_num);
assert_eq!(v[which2 as usize], thread_num);
}));
}
for t in threads {
t.join().ok();
}
for thread_num in 0..N {
assert_eq!(2, v.iter().copied().filter(|&x| x == thread_num).count());
}
}
#[test]
fn test_into_vec() {
struct SafeToDrop(bool);
impl Drop for SafeToDrop {
fn drop(&mut self) {
assert!(self.0);
}
}
let v = AppendOnlyVec::new();
for _ in 0..50 {
v.push(SafeToDrop(false));
}
let mut v = v.into_vec();
for i in v.iter_mut() {
i.0 = true;
}
}
#[test]
fn test_push_then_index_mut() {
let mut v = AppendOnlyVec::<usize>::new();
for i in 0..1024 {
v.push(i);
}
for i in 0..1024 {
v[i] += i;
}
for i in 0..1024 {
assert_eq!(v[i], 2 * i);
}
}
#[test]
fn test_from_vec() {
for v in [vec![5_i32, 4, 3, 2, 1], Vec::new(), vec![1]] {
let aov: AppendOnlyVec<i32> = v.clone().into();
assert_eq!(v, aov.into_vec());
}
}
#[test]
fn test_clone() {
let v = AppendOnlyVec::<String>::new();
for i in 0..1024 {
v.push(format!("{}", i));
}
let v2 = v.clone();
assert_eq!(v.len(), v2.len());
for i in 0..1024 {
assert_eq!(v[i], v2[i]);
}
}
#[test]
fn test_push_mut() {
let mut v = AppendOnlyVec::new();
for i in 0..1024 {
v.push_mut(format!("{}", i));
}
assert_eq!(v.len(), 1024);
}