use compressed_intvec::atomic_fixed_vec;
use compressed_intvec::fixed::atomic::{SAtomicFixedVec, UAtomicFixedVec};
use compressed_intvec::fixed::{BitWidth, Error, FixedVec, UFixedVec};
use rand::{RngExt, rngs::SmallRng, SeedableRng};
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::thread;
#[cfg(feature = "parallel")]
use rayon::prelude::*;
fn generate_random_vec(size: usize, max_val_exclusive: u64) -> Vec<u64> {
let mut rng = SmallRng::seed_from_u64(42);
if max_val_exclusive == 0 {
return (0..size).map(|_| rng.random::<u64>()).collect();
}
(0..size)
.map(|_| rng.random_range(0..max_val_exclusive))
.collect()
}
fn generate_random_signed_vec(size: usize, max_abs_val: i64) -> Vec<i64> {
let mut rng = SmallRng::seed_from_u64(42);
(0..size)
.map(|_| rng.random_range(-max_abs_val..max_abs_val))
.collect()
}
macro_rules! test_atomic_api_for_type {
($test_name:ident, $T:ty, $is_signed:ident, $max_val:expr) => {
#[test]
fn $test_name() {
let data: Vec<$T> = if $is_signed {
generate_random_signed_vec(256, $max_val as i64)
.into_iter()
.map(|x| x as $T)
.collect()
} else {
generate_random_vec(256, $max_val)
.into_iter()
.map(|x| x as $T)
.collect()
};
let vec_builder = UAtomicFixedVec::<$T>::builder().build(&data).unwrap();
assert_eq!(vec_builder.len(), data.len());
if !data.is_empty() {
assert_eq!(
vec_builder.load(10, Ordering::Relaxed),
data[10],
"Builder load failed for {}",
stringify!($T)
);
}
let vec_tryfrom = UAtomicFixedVec::<$T>::try_from(data.as_slice()).unwrap();
assert_eq!(vec_builder.bit_width(), vec_tryfrom.bit_width());
assert_eq!(vec_tryfrom.len(), data.len());
if !data.is_empty() {
assert_eq!(
vec_tryfrom.load(20, Ordering::Relaxed),
data[20],
"TryFrom load failed for {}",
stringify!($T)
);
}
let vec = vec_tryfrom; if data.len() < 3 {
return;
}
let val0 = data[0];
let val1 = data[1];
let val2 = data[2];
vec.store(0, val0, Ordering::SeqCst);
assert_eq!(vec.load(0, Ordering::SeqCst), val0);
let old = vec.swap(0, val1, Ordering::SeqCst);
assert_eq!(old, val0);
assert_eq!(vec.load(0, Ordering::SeqCst), val1);
let result = vec.compare_exchange(0, val1, val2, Ordering::SeqCst, Ordering::Relaxed);
assert_eq!(result, Ok(val1));
assert_eq!(vec.load(0, Ordering::SeqCst), val2);
let result_fail =
vec.compare_exchange(0, val0, val1, Ordering::SeqCst, Ordering::Relaxed);
assert_eq!(result_fail, Err(val2));
assert_eq!(vec.load(0, Ordering::SeqCst), val2);
}
};
}
macro_rules! test_atomic_rmw_for_type {
($test_name:ident, $T:ty, $v1_lit:expr, $v2_lit:expr, $v3_lit:expr) => {
#[test]
fn $test_name() {
let v1 = $v1_lit as $T;
let v2 = $v2_lit as $T;
let v3 = $v3_lit as $T;
let builder = UAtomicFixedVec::<$T>::builder().bit_width(BitWidth::Explicit(32));
let vec = builder.clone().build(&[v1]).unwrap();
assert_eq!(
vec.fetch_add(0, v2, Ordering::SeqCst),
v1,
"fetch_add: wrong return value"
);
assert_eq!(
vec.load(0, Ordering::Relaxed),
v1.wrapping_add(v2),
"fetch_add: wrong final value"
);
let vec = builder.clone().build(&[v1]).unwrap();
assert_eq!(
vec.fetch_sub(0, v2, Ordering::SeqCst),
v1,
"fetch_sub: wrong return value"
);
assert_eq!(
vec.load(0, Ordering::Relaxed),
v1.wrapping_sub(v2),
"fetch_sub: wrong final value"
);
let vec = builder.clone().build(&[v1]).unwrap();
assert_eq!(
vec.fetch_and(0, v3, Ordering::SeqCst),
v1,
"fetch_and: wrong return value"
);
assert_eq!(
vec.load(0, Ordering::Relaxed),
v1 & v3,
"fetch_and: wrong final value"
);
let vec = builder.clone().build(&[v1]).unwrap();
assert_eq!(
vec.fetch_or(0, v3, Ordering::SeqCst),
v1,
"fetch_or: wrong return value"
);
assert_eq!(
vec.load(0, Ordering::Relaxed),
v1 | v3,
"fetch_or: wrong final value"
);
let vec = builder.clone().build(&[v1]).unwrap();
assert_eq!(
vec.fetch_xor(0, v3, Ordering::SeqCst),
v1,
"fetch_xor: wrong return value"
);
assert_eq!(
vec.load(0, Ordering::Relaxed),
v1 ^ v3,
"fetch_xor: wrong final value"
);
let vec = builder.clone().build(&[v1]).unwrap();
assert_eq!(
vec.fetch_max(0, v2, Ordering::SeqCst),
v1,
"fetch_max: wrong return value"
);
assert_eq!(
vec.load(0, Ordering::Relaxed),
v1.max(v2),
"fetch_max: wrong final value"
);
let vec = builder.build(&[v1]).unwrap();
assert_eq!(
vec.fetch_min(0, v2, Ordering::SeqCst),
v1,
"fetch_min: wrong return value"
);
assert_eq!(
vec.load(0, Ordering::Relaxed),
v1.min(v2),
"fetch_min: wrong final value"
);
}
};
}
test_atomic_api_for_type!(test_api_u8, u8, false, u8::MAX as u64);
test_atomic_api_for_type!(test_api_u16, u16, false, u16::MAX as u64);
test_atomic_api_for_type!(test_api_u32, u32, false, u32::MAX as u64);
test_atomic_api_for_type!(test_api_u64, u64, false, 0); test_atomic_rmw_for_type!(test_rmw_u32, u32, 100, 50, 0xF0);
test_atomic_api_for_type!(test_api_i8, i8, true, i8::MAX as u64);
test_atomic_api_for_type!(test_api_i16, i16, true, i16::MAX as u64);
test_atomic_api_for_type!(test_api_i32, i32, true, i32::MAX as u64);
test_atomic_api_for_type!(test_api_i64, i64, true, i64::MAX as u64);
test_atomic_rmw_for_type!(test_rmw_i32, i32, -100, 50, 0xF0);
#[test]
fn test_atomic_fixed_vec_macro() {
let vec = atomic_fixed_vec![-10i32, 20, -30];
let _: SAtomicFixedVec<i32> = vec; assert_eq!(vec.len(), 3);
assert_eq!(vec.load(0, Ordering::Relaxed), -10);
assert_eq!(vec.load(2, Ordering::Relaxed), -30);
let vec_rep = atomic_fixed_vec![42u64; 100];
let _: UAtomicFixedVec<u64> = vec_rep; assert_eq!(vec_rep.len(), 100);
assert_eq!(vec_rep.load(99, Ordering::Relaxed), 42);
let empty_vec: UAtomicFixedVec<u8> = atomic_fixed_vec![];
assert!(empty_vec.is_empty());
}
#[test]
fn test_builder_failures() {
let data: &[u32] = &[10, 20, 50];
let result = UAtomicFixedVec::<u32>::builder()
.bit_width(BitWidth::Explicit(4))
.build(data);
assert!(matches!(result, Err(Error::ValueTooLarge { .. })));
let result_bw = UAtomicFixedVec::<u64>::builder()
.bit_width(BitWidth::Explicit(65))
.build(&[]); assert!(matches!(result_bw, Err(Error::InvalidParameters(_))));
}
#[test]
fn test_edge_case_zero_bit_width() {
let data = vec![0u32; 100];
let result = UAtomicFixedVec::<u32>::builder()
.bit_width(BitWidth::Explicit(0))
.build(&data);
assert!(matches!(result, Err(Error::InvalidParameters(_))));
let empty_data: Vec<u32> = vec![];
let vec = UAtomicFixedVec::<u32>::builder()
.bit_width(BitWidth::Explicit(0))
.build(&empty_data)
.unwrap();
assert!(vec.is_empty());
}
#[test]
fn test_from_conversions() {
let data: Vec<u32> = (0..100).collect();
let fixed_vec: UFixedVec<u32> = FixedVec::builder().build(&data).unwrap();
let bit_width = fixed_vec.bit_width();
let len = fixed_vec.len();
let atomic_vec = UAtomicFixedVec::<u32>::from(fixed_vec);
assert_eq!(atomic_vec.bit_width(), bit_width);
assert_eq!(atomic_vec.len(), len);
assert_eq!(atomic_vec.load(50, Ordering::Relaxed), 50);
let new_fixed_vec = FixedVec::from(atomic_vec);
assert_eq!(new_fixed_vec.bit_width(), bit_width);
assert_eq!(new_fixed_vec.len(), len);
assert_eq!(new_fixed_vec.get(50), Some(50));
}
#[test]
fn test_ergonomic_read_apis() {
let data: Vec<u32> = (0..100).collect();
let vec = UAtomicFixedVec::<u32>::builder().build(&data).unwrap();
assert_eq!(vec.get(50), Some(50));
let collected: Vec<u32> = vec.iter().collect();
assert_eq!(collected, data);
let mut collected_ref = vec![];
for val in &vec {
collected_ref.push(val);
}
assert_eq!(collected_ref, data);
let empty_vec: UAtomicFixedVec<u8> = atomic_fixed_vec![];
assert!(empty_vec.get(0).is_none());
assert_eq!(empty_vec.iter().next(), None);
}
#[test]
fn test_concurrent_disjoint_stores() {
const NUM_THREADS: usize = 4;
const LEN: usize = 1000;
let vec = Arc::new(
UAtomicFixedVec::<u16>::builder()
.bit_width(BitWidth::Explicit(12))
.build(&vec![0; LEN])
.unwrap(),
);
thread::scope(|s| {
for thread_id in 0..NUM_THREADS {
let vec_clone = Arc::clone(&vec);
s.spawn(move || {
let chunk_size = LEN / NUM_THREADS;
let start = thread_id * chunk_size;
let end = start + chunk_size;
for i in start..end {
vec_clone.store(i, (thread_id * 1000 + i) as u16, Ordering::SeqCst);
}
});
}
});
for thread_id in 0..NUM_THREADS {
let chunk_size = LEN / NUM_THREADS;
let start = thread_id * chunk_size;
let end = start + chunk_size;
for i in start..end {
assert_eq!(vec.load(i, Ordering::SeqCst), (thread_id * 1000 + i) as u16);
}
}
}
#[test]
fn test_concurrent_cas_contention() {
let vec = Arc::new(
UAtomicFixedVec::<u32>::builder()
.bit_width(BitWidth::Explicit(16))
.build(&[0; 1])
.unwrap(),
);
const NUM_THREADS: usize = 10;
const INCREMENTS_PER_THREAD: u32 = 1000;
thread::scope(|s| {
for _ in 0..NUM_THREADS {
let vec_clone = Arc::clone(&vec);
s.spawn(move || {
for _ in 0..INCREMENTS_PER_THREAD {
let mut current = vec_clone.load(0, Ordering::Relaxed);
loop {
match vec_clone.compare_exchange(
0,
current,
current.wrapping_add(1),
Ordering::SeqCst,
Ordering::Relaxed,
) {
Ok(_) => break,
Err(actual) => current = actual,
}
}
}
});
}
});
assert_eq!(
vec.load(0, Ordering::SeqCst),
NUM_THREADS as u32 * INCREMENTS_PER_THREAD
);
}
#[test]
fn test_concurrent_fetch_add_contention() {
let vec = Arc::new(
UAtomicFixedVec::<u32>::builder()
.bit_width(BitWidth::Explicit(16))
.build(&[0; 1])
.unwrap(),
);
const NUM_THREADS: usize = 10;
const INCREMENTS_PER_THREAD: u32 = 1000;
thread::scope(|s| {
for _ in 0..NUM_THREADS {
let vec_clone = Arc::clone(&vec);
s.spawn(move || {
for _ in 0..INCREMENTS_PER_THREAD {
vec_clone.fetch_add(0, 1, Ordering::SeqCst);
}
});
}
});
assert_eq!(
vec.load(0, Ordering::SeqCst),
NUM_THREADS as u32 * INCREMENTS_PER_THREAD
);
}
#[test]
#[cfg(feature = "parallel")]
fn test_par_iter_mut_contention() {
let data: Vec<u32> = (0..10_000).collect();
let vec: UAtomicFixedVec<u32> = UAtomicFixedVec::builder()
.bit_width(BitWidth::Explicit(15))
.build(&data)
.unwrap();
vec.par_iter_mut().for_each(|mut proxy| {
*proxy *= 2;
});
for (i, &expected) in data.iter().enumerate().take(vec.len()) {
assert_eq!(
vec.load(i, Ordering::Relaxed),
expected * 2,
"Mismatch at index {}",
i
);
}
}