use crate::error::{Result, ZiporaError};
use crate::memory::simd_ops::{fast_copy, fast_fill, fast_compare};
use crate::simd::{AdaptiveSimdSelector, Operation, SimdImpl};
use crate::zipora_verify;
use std::alloc::{self, Layout};
use std::fmt;
use std::mem;
use std::ops::{Deref, DerefMut, Index, IndexMut};
use std::ptr::{self, NonNull};
use std::slice;
use std::time::Instant;
#[inline]
fn check_alignment<T>(ptr: *mut u8) {
crate::zipora_verify_not_null!(ptr);
crate::zipora_verify_aligned!(ptr, mem::align_of::<T>());
}
#[inline]
fn cast_aligned_ptr<T>(ptr: *mut u8) -> *mut T {
check_alignment::<T>(ptr);
ptr as *mut T
}
#[inline]
const fn is_simd_safe<T>() -> bool {
mem::needs_drop::<T>() == false
}
#[inline]
const fn is_simd_beneficial<T>(element_count: usize) -> bool {
const SIMD_THRESHOLD: usize = 64;
element_count * mem::size_of::<T>() >= SIMD_THRESHOLD
}
const PREFETCH_DISTANCE: usize = 8;
struct PrefetchOps;
impl PrefetchOps {
#[inline]
fn prefetch_read<T>(ptr: *const T) {
#[cfg(target_arch = "x86_64")]
unsafe {
std::arch::x86_64::_mm_prefetch(
ptr as *const i8,
std::arch::x86_64::_MM_HINT_T0
);
}
#[cfg(target_arch = "aarch64")]
unsafe {
std::arch::asm!(
"prfm pldl1keep, [{0}]",
in(reg) ptr,
options(nostack, preserves_flags, readonly)
);
}
#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
{
std::hint::black_box(ptr);
}
}
#[inline]
fn prefetch_range<T>(start: *const T, count: usize, distance: usize) {
if count <= distance {
return;
}
const CACHE_LINE_SIZE: usize = 64;
let element_size = mem::size_of::<T>();
let elements_per_line = (CACHE_LINE_SIZE / element_size).max(1);
for i in (0..count).step_by(elements_per_line) {
if i + distance < count {
unsafe {
Self::prefetch_read(start.add(i + distance));
}
}
}
}
}
#[inline]
unsafe fn slice_as_bytes<T>(slice: &[T]) -> &[u8] {
if slice.is_empty() {
&[]
} else {
unsafe {
slice::from_raw_parts(
slice.as_ptr() as *const u8,
slice.len() * mem::size_of::<T>(),
)
}
}
}
#[inline]
unsafe fn slice_as_bytes_mut<T>(slice: &mut [T]) -> &mut [u8] {
if slice.is_empty() {
&mut []
} else {
unsafe {
slice::from_raw_parts_mut(
slice.as_mut_ptr() as *mut u8,
slice.len() * mem::size_of::<T>(),
)
}
}
}
pub struct FastVec<T> {
ptr: Option<NonNull<T>>,
len: usize,
cap: usize,
}
impl<T> FastVec<T> {
#[inline]
pub fn new() -> Self {
Self {
ptr: None,
len: 0,
cap: 0,
}
}
pub fn with_capacity(cap: usize) -> Result<Self> {
if cap == 0 {
return Ok(Self::new());
}
crate::zipora_verify!(cap <= (isize::MAX as usize) / mem::size_of::<T>().max(1),
"capacity {} too large for element size {}", cap, mem::size_of::<T>());
let layout = Layout::array::<T>(cap)
.map_err(|_| ZiporaError::out_of_memory(cap * mem::size_of::<T>()))?;
let ptr = unsafe {
let raw_ptr = alloc::alloc(layout);
crate::zipora_verify_alloc!(raw_ptr, layout.size());
cast_aligned_ptr::<T>(raw_ptr)
};
Ok(Self {
ptr: Some(unsafe { NonNull::new_unchecked(ptr) }),
len: 0,
cap,
})
}
pub fn with_capacity_zeroed(cap: usize) -> Result<Self> {
if cap == 0 {
return Ok(Self::new());
}
crate::zipora_verify!(cap <= (isize::MAX as usize) / mem::size_of::<T>().max(1),
"capacity {} too large for element size {}", cap, mem::size_of::<T>());
let layout = Layout::array::<T>(cap)
.map_err(|_| ZiporaError::out_of_memory(cap * mem::size_of::<T>()))?;
let ptr = unsafe {
let raw_ptr = alloc::alloc_zeroed(layout);
crate::zipora_verify_alloc!(raw_ptr, layout.size());
cast_aligned_ptr::<T>(raw_ptr)
};
Ok(Self {
ptr: Some(unsafe { NonNull::new_unchecked(ptr) }),
len: cap, cap,
})
}
pub fn from_vec(vec: Vec<T>) -> Self {
let mut vec = std::mem::ManuallyDrop::new(vec);
let ptr = vec.as_mut_ptr();
let len = vec.len();
let cap = vec.capacity();
Self {
ptr: NonNull::new(ptr),
len,
cap,
}
}
pub fn with_size(size: usize, value: T) -> Result<Self>
where
T: Clone,
{
let mut vec = Self::with_capacity(size)?;
vec.resize(size, value)?;
Ok(vec)
}
#[inline]
pub fn len(&self) -> usize {
self.len
}
#[inline]
pub fn is_empty(&self) -> bool {
self.len == 0
}
#[inline]
pub fn capacity(&self) -> usize {
self.cap
}
#[inline]
pub fn as_ptr(&self) -> *const T {
match self.ptr {
Some(ptr) => ptr.as_ptr(),
None => ptr::null(),
}
}
#[inline]
pub fn as_mut_ptr(&mut self) -> *mut T {
match self.ptr {
Some(ptr) => ptr.as_ptr(),
None => ptr::null_mut(),
}
}
#[inline]
pub fn as_slice(&self) -> &[T] {
unsafe {
slice::from_raw_parts(
self.ptr.unwrap_or(NonNull::dangling()).as_ptr(),
self.len,
)
}
}
#[inline(always)]
pub fn as_mut_slice(&mut self) -> &mut [T] {
unsafe {
slice::from_raw_parts_mut(
self.ptr.unwrap_or(NonNull::dangling()).as_ptr(),
self.len,
)
}
}
pub fn reserve(&mut self, additional: usize) -> Result<()> {
crate::zipora_verify!(additional <= (isize::MAX as usize) / mem::size_of::<T>().max(1),
"additional capacity {} too large for element size {}", additional, mem::size_of::<T>());
let required = self
.len
.checked_add(additional)
.ok_or_else(|| ZiporaError::out_of_memory(usize::MAX))?;
if required <= self.cap {
return Ok(());
}
self.realloc(required)
}
pub fn ensure_capacity(&mut self, min_cap: usize) -> Result<()> {
crate::zipora_verify!(min_cap <= (isize::MAX as usize) / mem::size_of::<T>().max(1),
"minimum capacity {} too large for element size {}", min_cap, mem::size_of::<T>());
crate::zipora_verify_ge!(min_cap, self.len);
if min_cap <= self.cap {
return Ok(());
}
self.realloc(min_cap)
}
fn realloc(&mut self, new_cap: usize) -> Result<()> {
crate::zipora_verify_ge!(new_cap, self.len);
crate::zipora_verify!(new_cap <= (isize::MAX as usize) / mem::size_of::<T>().max(1),
"new capacity {} too large for element size {}", new_cap, mem::size_of::<T>());
let target_cap = new_cap.max(self.cap.saturating_mul(2));
let new_layout = Layout::array::<T>(target_cap)
.map_err(|_| ZiporaError::out_of_memory(target_cap * mem::size_of::<T>()))?;
let new_ptr = match self.ptr {
Some(ptr) => {
if self.cap == 0 {
unsafe {
let raw_ptr = alloc::alloc(new_layout);
if raw_ptr.is_null() {
std::ptr::null_mut()
} else {
cast_aligned_ptr::<T>(raw_ptr)
}
}
} else {
let old_layout = Layout::array::<T>(self.cap)
.expect("array layout overflow: capacity exceeds Layout limits");
unsafe {
let raw_ptr =
alloc::realloc(ptr.as_ptr() as *mut u8, old_layout, new_layout.size());
if raw_ptr.is_null() {
std::ptr::null_mut()
} else {
cast_aligned_ptr::<T>(raw_ptr)
}
}
}
}
None => unsafe {
let raw_ptr = alloc::alloc(new_layout);
if raw_ptr.is_null() {
std::ptr::null_mut()
} else {
cast_aligned_ptr::<T>(raw_ptr)
}
},
};
if new_ptr.is_null() {
return Err(ZiporaError::out_of_memory(new_layout.size()));
}
self.ptr = Some(unsafe { NonNull::new_unchecked(new_ptr) });
self.cap = target_cap;
Ok(())
}
pub fn push(&mut self, value: T) -> Result<()> {
crate::zipora_verify_le!(self.len, self.cap);
crate::zipora_verify!(self.len < (isize::MAX as usize),
"vector length {} would exceed maximum", self.len);
if self.len >= self.cap {
self.ensure_capacity(self.len + 1)?;
}
crate::zipora_verify_le!(self.len, self.cap);
crate::zipora_verify!(self.ptr.is_some() || self.len == 0,
"invalid state: non-null pointer required for len > 0");
unsafe {
ptr::write(self.as_mut_ptr().add(self.len), value);
}
self.len += 1;
Ok(())
}
pub fn pop(&mut self) -> Option<T> {
crate::zipora_verify_le!(self.len, self.cap);
if self.len == 0 {
None
} else {
crate::zipora_verify!(self.ptr.is_some(), "invalid state: null pointer with len > 0");
self.len -= 1;
Some(unsafe { ptr::read(self.as_ptr().add(self.len)) })
}
}
pub fn insert(&mut self, index: usize, value: T) -> Result<()> {
if index > self.len {
return Err(ZiporaError::out_of_bounds(index, self.len));
}
crate::zipora_verify_le!(index, self.len);
if self.len >= self.cap {
self.ensure_capacity(self.len + 1)?;
}
let move_count = self.len - index;
unsafe {
let ptr = self.as_mut_ptr().add(index);
if move_count > 0 {
ptr::copy(ptr, ptr.add(1), move_count);
}
ptr::write(ptr, value);
}
self.len += 1;
Ok(())
}
pub fn remove(&mut self, index: usize) -> Result<T> {
if index >= self.len {
return Err(ZiporaError::out_of_bounds(index, self.len));
}
let move_count = self.len - index - 1;
unsafe {
let ptr = self.as_mut_ptr().add(index);
let value = ptr::read(ptr);
if move_count > 0 {
ptr::copy(ptr.add(1), ptr, move_count);
}
self.len -= 1;
Ok(value)
}
}
pub fn resize(&mut self, new_len: usize, value: T) -> Result<()>
where
T: Clone,
{
crate::zipora_verify!(new_len <= (isize::MAX as usize) / mem::size_of::<T>().max(1),
"new length {} too large for element size {}", new_len, mem::size_of::<T>());
crate::zipora_verify_le!(self.len, self.cap);
if new_len > self.len {
self.ensure_capacity(new_len)?;
crate::zipora_verify_ge!(self.cap, new_len);
crate::zipora_verify!(self.ptr.is_some(), "invalid state: null pointer after capacity adjustment");
let fill_count = new_len - self.len;
if is_simd_safe::<T>() && is_simd_beneficial::<T>(fill_count) && mem::size_of::<T>() == 1 {
unsafe {
let fill_slice = slice::from_raw_parts_mut(
self.as_mut_ptr().add(self.len) as *mut u8,
fill_count,
);
fast_fill(fill_slice, *((&value) as *const T as *const u8));
}
} else {
for i in self.len..new_len {
unsafe {
ptr::write(self.as_mut_ptr().add(i), value.clone());
}
}
}
} else if new_len < self.len {
crate::zipora_verify!(self.ptr.is_some() || self.len == 0,
"invalid state: null pointer with elements to drop");
for i in new_len..self.len {
unsafe {
ptr::drop_in_place(self.as_mut_ptr().add(i));
}
}
}
self.len = new_len;
crate::zipora_verify_le!(self.len, self.cap);
Ok(())
}
pub fn resize_with<F>(&mut self, new_len: usize, f: F) -> Result<()>
where
F: FnMut() -> T,
{
crate::zipora_verify!(new_len <= (isize::MAX as usize) / mem::size_of::<T>().max(1),
"new length {} too large for element size {}", new_len, mem::size_of::<T>());
crate::zipora_verify_le!(self.len, self.cap);
if new_len > self.len {
self.ensure_capacity(new_len)?;
crate::zipora_verify_ge!(self.cap, new_len);
crate::zipora_verify!(self.ptr.is_some(), "invalid state: null pointer after capacity adjustment");
let mut closure = f;
for i in self.len..new_len {
unsafe {
ptr::write(self.as_mut_ptr().add(i), closure());
}
}
} else if new_len < self.len {
for i in new_len..self.len {
unsafe {
ptr::drop_in_place(self.as_mut_ptr().add(i));
}
}
}
self.len = new_len;
crate::zipora_verify_le!(self.len, self.cap);
Ok(())
}
pub fn clear(&mut self) {
crate::zipora_verify_le!(self.len, self.cap);
crate::zipora_verify!(self.ptr.is_some() || self.len == 0,
"invalid state: null pointer with elements to clear");
for i in 0..self.len {
unsafe {
ptr::drop_in_place(self.as_mut_ptr().add(i));
}
}
self.len = 0;
crate::zipora_verify_le!(self.len, self.cap);
}
pub fn shrink_to_fit(&mut self) -> Result<()> {
if self.len == self.cap {
return Ok(());
}
if self.len == 0 {
if let Some(ptr) = self.ptr {
unsafe {
let layout = Layout::array::<T>(self.cap)
.expect("array layout overflow: capacity exceeds Layout limits");
alloc::dealloc(ptr.as_ptr() as *mut u8, layout);
}
}
self.ptr = None;
self.cap = 0;
return Ok(());
}
let new_layout = Layout::array::<T>(self.len)
.map_err(|_| ZiporaError::out_of_memory(self.len * mem::size_of::<T>()))?;
let new_ptr = if let Some(ptr) = self.ptr {
let old_layout = Layout::array::<T>(self.cap)
.expect("array layout overflow: capacity exceeds Layout limits");
unsafe {
let raw_ptr =
alloc::realloc(ptr.as_ptr() as *mut u8, old_layout, new_layout.size());
if raw_ptr.is_null() {
std::ptr::null_mut()
} else {
cast_aligned_ptr::<T>(raw_ptr)
}
}
} else {
return Ok(()); };
if new_ptr.is_null() {
return Err(ZiporaError::out_of_memory(new_layout.size()));
}
self.ptr = Some(unsafe { NonNull::new_unchecked(new_ptr) });
self.cap = self.len;
Ok(())
}
#[inline(always)]
pub unsafe fn get_unchecked(&self, index: usize) -> &T {
debug_assert!(index < self.len);
unsafe { &*self.ptr.unwrap_unchecked().as_ptr().add(index) }
}
#[inline(always)]
pub unsafe fn get_unchecked_mut(&mut self, index: usize) -> &mut T {
debug_assert!(index < self.len);
unsafe { &mut *self.ptr.unwrap_unchecked().as_ptr().add(index) }
}
pub fn extend<I>(&mut self, iter: I) -> Result<()>
where
I: IntoIterator<Item = T>,
I::IntoIter: ExactSizeIterator,
{
let mut iter = iter.into_iter();
let additional = iter.len();
self.reserve(additional)?;
if is_simd_safe::<T>() && is_simd_beneficial::<T>(additional) {
let items: Vec<T> = iter.collect();
if items.len() == additional {
unsafe {
let src_bytes = slice_as_bytes(&items);
let dst_bytes = slice_as_bytes_mut(slice::from_raw_parts_mut(
self.as_mut_ptr().add(self.len),
additional,
));
fast_copy(src_bytes, dst_bytes)?;
}
self.len += additional;
return Ok(());
}
} else {
for item in iter {
unsafe {
ptr::write(self.as_mut_ptr().add(self.len), item);
self.len += 1;
}
}
}
Ok(())
}
pub fn fill_range_fast(&mut self, start: usize, end: usize, value: T) -> Result<()>
where
T: Copy,
{
if start > end || end > self.len {
return Err(ZiporaError::out_of_bounds(end, self.len));
}
crate::zipora_verify_le!(start, end);
crate::zipora_verify_le!(end, self.len);
crate::zipora_verify_le!(self.len, self.cap);
crate::zipora_verify!(self.ptr.is_some() || self.len == 0,
"invalid state: null pointer with data to fill");
if start == end {
return Ok(()); }
let range_len = end - start;
if is_simd_safe::<T>() && is_simd_beneficial::<T>(range_len) {
let selector = AdaptiveSimdSelector::global();
let simd_impl = selector.select_optimal_impl(
Operation::MemZero,
range_len * mem::size_of::<T>(),
None, );
let start_time = Instant::now();
if mem::size_of::<T>() == 1 {
unsafe {
let range_slice = slice::from_raw_parts_mut(
self.as_mut_ptr().add(start) as *mut u8,
range_len,
);
if range_len >= PREFETCH_DISTANCE * 8 {
PrefetchOps::prefetch_range(
range_slice.as_ptr(),
range_len,
PREFETCH_DISTANCE
);
}
fast_fill(range_slice, *((&value) as *const T as *const u8));
}
} else {
let range_slice = unsafe {
slice::from_raw_parts_mut(
self.as_mut_ptr().add(start),
range_len,
)
};
if range_len >= PREFETCH_DISTANCE * 2 {
for i in 0..range_len {
if i + PREFETCH_DISTANCE < range_len {
PrefetchOps::prefetch_read(&range_slice[i + PREFETCH_DISTANCE] as *const T as *const u8 as *const i8);
}
range_slice[i] = value;
}
} else {
for item in range_slice.iter_mut() {
*item = value;
}
}
}
selector.monitor_performance(
Operation::MemZero,
start_time.elapsed(),
range_len as u64
);
} else {
let range_slice = &mut self.as_mut_slice()[start..end];
for item in range_slice.iter_mut() {
*item = value;
}
}
Ok(())
}
pub fn copy_from_slice_fast(&mut self, src: &[T]) -> Result<()>
where
T: Copy,
{
crate::zipora_verify!(src.len() <= (isize::MAX as usize) / mem::size_of::<T>().max(1),
"source slice length {} too large for element size {}", src.len(), mem::size_of::<T>());
crate::zipora_verify_le!(self.len, self.cap);
if src.is_empty() {
return Ok(());
}
self.ensure_capacity(src.len())?;
crate::zipora_verify_ge!(self.cap, src.len());
crate::zipora_verify!(self.ptr.is_some(), "invalid state: null pointer after capacity adjustment");
if is_simd_safe::<T>() && is_simd_beneficial::<T>(src.len()) {
let selector = AdaptiveSimdSelector::global();
let simd_impl = selector.select_optimal_impl(
Operation::Copy,
src.len() * mem::size_of::<T>(),
None, );
let start_time = Instant::now();
unsafe {
if src.len() >= PREFETCH_DISTANCE * 8 {
PrefetchOps::prefetch_range(
src.as_ptr(),
src.len(),
PREFETCH_DISTANCE
);
}
let src_bytes = slice_as_bytes(src);
let dst_bytes = slice_as_bytes_mut(slice::from_raw_parts_mut(
self.as_mut_ptr(),
src.len(),
));
fast_copy(src_bytes, dst_bytes)?;
}
selector.monitor_performance(
Operation::Copy,
start_time.elapsed(),
src.len() as u64
);
} else {
unsafe {
ptr::copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr(), src.len());
}
}
self.len = src.len();
Ok(())
}
pub fn extend_from_slice_fast(&mut self, src: &[T]) -> Result<()>
where
T: Copy,
{
if src.is_empty() {
return Ok(());
}
let old_len = self.len;
self.reserve(src.len())?;
if is_simd_safe::<T>() && is_simd_beneficial::<T>(src.len()) {
let selector = AdaptiveSimdSelector::global();
let simd_impl = selector.select_optimal_impl(
Operation::Copy,
src.len() * mem::size_of::<T>(),
None, );
let start_time = Instant::now();
unsafe {
if src.len() >= PREFETCH_DISTANCE * 8 {
PrefetchOps::prefetch_range(
src.as_ptr(),
src.len(),
PREFETCH_DISTANCE
);
}
let src_bytes = slice_as_bytes(src);
let dst_bytes = slice_as_bytes_mut(slice::from_raw_parts_mut(
self.as_mut_ptr().add(old_len),
src.len(),
));
fast_copy(src_bytes, dst_bytes)?;
}
selector.monitor_performance(
Operation::Copy,
start_time.elapsed(),
src.len() as u64
);
} else {
unsafe {
ptr::copy_nonoverlapping(
src.as_ptr(),
self.as_mut_ptr().add(old_len),
src.len(),
);
}
}
self.len += src.len();
Ok(())
}
}
impl<T> Default for FastVec<T> {
fn default() -> Self {
Self::new()
}
}
impl<T> Drop for FastVec<T> {
fn drop(&mut self) {
self.clear();
if let Some(ptr) = self.ptr {
if self.cap > 0 {
unsafe {
let layout = Layout::array::<T>(self.cap)
.expect("array layout overflow: capacity exceeds Layout limits");
alloc::dealloc(ptr.as_ptr() as *mut u8, layout);
}
}
}
}
}
impl<T> Deref for FastVec<T> {
type Target = [T];
fn deref(&self) -> &Self::Target {
self.as_slice()
}
}
impl<T> DerefMut for FastVec<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.as_mut_slice()
}
}
impl<T> Index<usize> for FastVec<T> {
type Output = T;
fn index(&self, index: usize) -> &Self::Output {
crate::zipora_verify_bounds!(index, self.len);
&self.as_slice()[index]
}
}
impl<T> IndexMut<usize> for FastVec<T> {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
crate::zipora_verify_bounds!(index, self.len);
&mut self.as_mut_slice()[index]
}
}
impl<T: fmt::Debug> fmt::Debug for FastVec<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self.as_slice()).finish()
}
}
impl<T: PartialEq> PartialEq for FastVec<T> {
fn eq(&self, other: &Self) -> bool {
if self.len != other.len {
return false;
}
if self.len == 0 {
return true;
}
if is_simd_safe::<T>() && is_simd_beneficial::<T>(self.len) {
unsafe {
let self_bytes = slice_as_bytes(self.as_slice());
let other_bytes = slice_as_bytes(other.as_slice());
fast_compare(self_bytes, other_bytes) == 0
}
} else {
self.as_slice() == other.as_slice()
}
}
}
impl<T: Eq> Eq for FastVec<T> {}
impl<T: Clone> Clone for FastVec<T> {
fn clone(&self) -> Self {
let mut new_vec = Self::with_capacity(self.len)
.or_else(|_| Self::with_capacity(self.len / 2))
.unwrap_or_else(|_| Self::new());
for item in self.as_slice() {
if let Err(_) = new_vec.push(item.clone()) {
break;
}
}
new_vec
}
}
unsafe impl<T: Send> Send for FastVec<T> {}
unsafe impl<T: Sync> Sync for FastVec<T> {}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_new() {
let vec: FastVec<i32> = FastVec::new();
assert_eq!(vec.len(), 0);
assert_eq!(vec.capacity(), 0);
assert!(vec.is_empty());
}
#[test]
fn test_with_capacity() {
let vec: FastVec<i32> = FastVec::with_capacity(10).unwrap();
assert_eq!(vec.len(), 0);
assert_eq!(vec.capacity(), 10);
assert!(vec.is_empty());
}
#[test]
fn test_push_pop() {
let mut vec = FastVec::new();
vec.push(1).unwrap();
vec.push(2).unwrap();
vec.push(3).unwrap();
assert_eq!(vec.len(), 3);
assert_eq!(vec.pop(), Some(3));
assert_eq!(vec.pop(), Some(2));
assert_eq!(vec.len(), 1);
}
#[test]
fn test_index() {
let mut vec = FastVec::new();
vec.push(42).unwrap();
vec.push(84).unwrap();
assert_eq!(vec[0], 42);
assert_eq!(vec[1], 84);
vec[0] = 100;
assert_eq!(vec[0], 100);
}
#[test]
fn test_insert_remove() {
let mut vec = FastVec::new();
vec.push(1).unwrap();
vec.push(3).unwrap();
vec.insert(1, 2).unwrap();
assert_eq!(vec.as_slice(), &[1, 2, 3]);
let removed = vec.remove(1).unwrap();
assert_eq!(removed, 2);
assert_eq!(vec.as_slice(), &[1, 3]);
}
#[test]
fn test_resize() {
let mut vec = FastVec::new();
vec.resize(5, 42).unwrap();
assert_eq!(vec.len(), 5);
assert_eq!(vec.as_slice(), &[42, 42, 42, 42, 42]);
vec.resize(3, 0).unwrap();
assert_eq!(vec.len(), 3);
assert_eq!(vec.as_slice(), &[42, 42, 42]);
}
#[test]
fn test_clone() {
let mut vec = FastVec::new();
vec.push(1).unwrap();
vec.push(2).unwrap();
let cloned = vec.clone();
assert_eq!(vec.as_slice(), cloned.as_slice());
}
#[test]
fn test_clear() {
let mut vec = FastVec::new();
vec.push(1).unwrap();
vec.push(2).unwrap();
vec.clear();
assert_eq!(vec.len(), 0);
assert!(vec.is_empty());
}
#[test]
#[should_panic]
fn test_index_bounds() {
let vec: FastVec<i32> = FastVec::new();
let _ = vec[0]; }
#[test]
fn test_with_size() {
let vec = FastVec::with_size(5, 42).unwrap();
assert_eq!(vec.len(), 5);
assert_eq!(vec.capacity(), 5);
for i in 0..5 {
assert_eq!(vec[i], 42);
}
}
#[test]
fn test_pointers() {
let mut vec: FastVec<i32> = FastVec::new();
assert!(vec.as_ptr().is_null());
assert!(vec.as_mut_ptr().is_null());
vec.push(42).unwrap();
vec.push(84).unwrap();
assert!(!vec.as_ptr().is_null());
assert!(!vec.as_mut_ptr().is_null());
unsafe {
assert_eq!(*vec.as_ptr(), 42);
assert_eq!(*vec.as_ptr().add(1), 84);
}
}
#[test]
fn test_slices() {
let mut vec = FastVec::new();
vec.push(1).unwrap();
vec.push(2).unwrap();
vec.push(3).unwrap();
let slice = vec.as_slice();
assert_eq!(slice, &[1, 2, 3]);
let mut_slice = vec.as_mut_slice();
mut_slice[1] = 20;
assert_eq!(vec[1], 20);
}
#[test]
fn test_unsafe_access() {
let mut vec = FastVec::new();
vec.push(10).unwrap();
vec.push(20).unwrap();
vec.push(30).unwrap();
unsafe {
assert_eq!(*vec.get_unchecked(0), 10);
assert_eq!(*vec.get_unchecked(2), 30);
*vec.get_unchecked_mut(1) = 200;
assert_eq!(vec[1], 200);
}
}
#[test]
fn test_extend() {
let mut vec = FastVec::new();
vec.push(1).unwrap();
vec.push(2).unwrap();
let data = vec![3, 4, 5];
vec.extend(data).unwrap();
assert_eq!(vec.as_slice(), &[1, 2, 3, 4, 5]);
}
#[test]
fn test_reserve() {
let mut vec: FastVec<i32> = FastVec::new();
assert_eq!(vec.capacity(), 0);
vec.reserve(10).unwrap();
assert!(vec.capacity() >= 10);
let old_cap = vec.capacity();
vec.reserve(5).unwrap();
assert_eq!(vec.capacity(), old_cap); }
#[test]
fn test_ensure_capacity() {
let mut vec: FastVec<i32> = FastVec::new();
vec.ensure_capacity(15).unwrap();
assert!(vec.capacity() >= 15);
let old_cap = vec.capacity();
vec.ensure_capacity(10).unwrap();
assert_eq!(vec.capacity(), old_cap); }
#[test]
fn test_shrink_to_fit() {
let mut vec = FastVec::with_capacity(100).unwrap();
vec.push(1).unwrap();
vec.push(2).unwrap();
vec.push(3).unwrap();
assert!(vec.capacity() >= 100);
vec.shrink_to_fit().unwrap();
assert_eq!(vec.capacity(), 3);
assert_eq!(vec.as_slice(), &[1, 2, 3]);
let mut empty_vec: FastVec<i32> = FastVec::with_capacity(50).unwrap();
empty_vec.shrink_to_fit().unwrap();
assert_eq!(empty_vec.capacity(), 0);
assert!(empty_vec.as_ptr().is_null());
}
#[test]
fn test_out_of_bounds_errors() {
let mut vec = FastVec::new();
vec.push(1).unwrap();
vec.push(2).unwrap();
assert!(vec.insert(5, 100).is_err());
assert!(vec.remove(5).is_err());
assert!(vec.remove(2).is_err());
}
#[test]
fn test_memory_management() {
let mut vec = FastVec::new();
for i in 0..1000 {
vec.push(i).unwrap();
}
assert_eq!(vec.len(), 1000);
assert!(vec.capacity() >= 1000);
assert!(vec.capacity() < 2000); }
#[test]
fn test_drop_elements() {
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
let counter = Arc::new(AtomicUsize::new(0));
#[derive(Clone)]
struct DropCounter {
counter: Arc<AtomicUsize>,
}
impl Drop for DropCounter {
fn drop(&mut self) {
self.counter.fetch_add(1, Ordering::SeqCst);
}
}
{
let mut vec = FastVec::new();
for _ in 0..5 {
vec.push(DropCounter {
counter: counter.clone(),
})
.unwrap();
}
vec.remove(2).unwrap();
assert_eq!(counter.load(Ordering::SeqCst), 1);
let resize_value = DropCounter {
counter: counter.clone(),
};
vec.resize(2, resize_value).unwrap();
assert_eq!(counter.load(Ordering::SeqCst), 4);
vec.clear();
assert_eq!(counter.load(Ordering::SeqCst), 6); }
assert_eq!(counter.load(Ordering::SeqCst), 6);
}
#[test]
fn test_zero_capacity() {
let vec: FastVec<i32> = FastVec::with_capacity(0).unwrap();
assert_eq!(vec.capacity(), 0);
assert_eq!(vec.len(), 0);
assert!(vec.as_ptr().is_null());
}
#[test]
fn test_equality_and_debug() {
let mut vec1 = FastVec::new();
let mut vec2 = FastVec::new();
vec1.push(1).unwrap();
vec1.push(2).unwrap();
vec1.push(3).unwrap();
vec2.push(1).unwrap();
vec2.push(2).unwrap();
vec2.push(3).unwrap();
assert_eq!(vec1, vec2);
vec2.push(4).unwrap();
assert_ne!(vec1, vec2);
let debug_str = format!("{:?}", vec1);
assert!(debug_str.contains("1"));
assert!(debug_str.contains("2"));
assert!(debug_str.contains("3"));
}
#[test]
fn test_deref() {
let mut vec = FastVec::new();
vec.push(1).unwrap();
vec.push(2).unwrap();
vec.push(3).unwrap();
let slice: &[i32] = &vec;
assert_eq!(slice, &[1, 2, 3]);
let mut_slice: &mut [i32] = &mut vec;
mut_slice[1] = 20;
assert_eq!(vec[1], 20);
}
#[test]
fn test_send_sync() {
fn assert_send<T: Send>() {}
fn assert_sync<T: Sync>() {}
assert_send::<FastVec<i32>>();
assert_sync::<FastVec<i32>>();
}
#[test]
fn test_edge_cases() {
let mut vec = FastVec::new();
vec.push(2).unwrap();
vec.push(3).unwrap();
vec.insert(0, 1).unwrap();
assert_eq!(vec.as_slice(), &[1, 2, 3]);
vec.insert(3, 4).unwrap();
assert_eq!(vec.as_slice(), &[1, 2, 3, 4]);
assert_eq!(vec.remove(0).unwrap(), 1);
assert_eq!(vec.as_slice(), &[2, 3, 4]);
assert_eq!(vec.remove(2).unwrap(), 4);
assert_eq!(vec.as_slice(), &[2, 3]);
}
#[test]
fn test_large_allocation() {
let mut vec = FastVec::with_capacity(10000).unwrap();
for i in 0..10000 {
vec.push(i).unwrap();
}
assert_eq!(vec.len(), 10000);
assert_eq!(vec[9999], 9999);
}
mod alignment_tests {
use super::*;
use std::mem;
#[repr(align(1))]
#[derive(Debug, Clone, Copy, PartialEq)]
struct Align1(u8);
#[repr(align(2))]
#[derive(Debug, Clone, Copy, PartialEq)]
struct Align2(u16);
#[repr(align(4))]
#[derive(Debug, Clone, Copy, PartialEq)]
struct Align4(u32);
#[repr(align(8))]
#[derive(Debug, Clone, Copy, PartialEq)]
struct Align8(u64);
#[repr(align(16))]
#[derive(Debug, Clone, Copy, PartialEq)]
struct Align16([u64; 2]);
#[repr(align(32))]
#[derive(Debug, Clone, Copy, PartialEq)]
struct Align32([u64; 4]);
fn verify_alignment<T>(ptr: *const T) {
let align = mem::align_of::<T>();
let addr = ptr as usize;
assert_eq!(
addr % align,
0,
"Pointer {:#x} is not aligned for type {} (requires {}-byte alignment)",
addr,
std::any::type_name::<T>(),
align
);
}
#[test]
fn test_alignment_1_byte() {
let mut vec = FastVec::<Align1>::new();
vec.push(Align1(42)).unwrap();
verify_alignment(vec.as_ptr());
for i in 0..1000 {
vec.push(Align1(i as u8)).unwrap();
verify_alignment(vec.as_ptr());
}
assert_eq!(vec.len(), 1001);
assert_eq!(vec[0], Align1(42));
assert_eq!(vec[1000], Align1(231)); }
#[test]
fn test_alignment_2_byte() {
let mut vec = FastVec::<Align2>::new();
vec.push(Align2(42)).unwrap();
verify_alignment(vec.as_ptr());
for i in 0..1000 {
vec.push(Align2(i as u16)).unwrap();
verify_alignment(vec.as_ptr());
}
assert_eq!(vec.len(), 1001);
assert_eq!(vec[0], Align2(42));
assert_eq!(vec[1000], Align2(999)); }
#[test]
fn test_alignment_4_byte() {
let mut vec = FastVec::<Align4>::new();
vec.push(Align4(42)).unwrap();
verify_alignment(vec.as_ptr());
for i in 0..1000 {
vec.push(Align4(i as u32)).unwrap();
verify_alignment(vec.as_ptr());
}
assert_eq!(vec.len(), 1001);
assert_eq!(vec[0], Align4(42));
assert_eq!(vec[1000], Align4(999)); }
#[test]
fn test_alignment_8_byte() {
let mut vec = FastVec::<Align8>::new();
vec.push(Align8(42)).unwrap();
verify_alignment(vec.as_ptr());
for i in 0..1000 {
vec.push(Align8(i as u64)).unwrap();
verify_alignment(vec.as_ptr());
}
assert_eq!(vec.len(), 1001);
assert_eq!(vec[0], Align8(42));
assert_eq!(vec[1000], Align8(999)); }
#[test]
fn test_alignment_16_byte() {
let mut vec = FastVec::<Align16>::new();
vec.push(Align16([42, 84])).unwrap();
verify_alignment(vec.as_ptr());
for i in 0..500 {
vec.push(Align16([i as u64, (i * 2) as u64])).unwrap();
verify_alignment(vec.as_ptr());
}
assert_eq!(vec.len(), 501);
assert_eq!(vec[0], Align16([42, 84]));
assert_eq!(vec[500], Align16([499, 998]));
}
#[test]
fn test_alignment_32_byte() {
let mut vec = FastVec::<Align32>::new();
vec.push(Align32([1, 2, 3, 4])).unwrap();
verify_alignment(vec.as_ptr());
for i in 0..200 {
vec.push(Align32([
i as u64,
i as u64 + 1,
i as u64 + 2,
i as u64 + 3,
]))
.unwrap();
verify_alignment(vec.as_ptr());
}
assert_eq!(vec.len(), 201);
assert_eq!(vec[0], Align32([1, 2, 3, 4]));
assert_eq!(vec[200], Align32([199, 200, 201, 202]));
}
#[test]
fn test_large_allocations_with_realloc() {
let mut vec8 = FastVec::<Align8>::new();
let mut vec16 = FastVec::<Align16>::new();
let mut vec32 = FastVec::<Align32>::new();
for i in 0..10000 {
vec8.push(Align8(i as u64)).unwrap();
verify_alignment(vec8.as_ptr());
if i % 2 == 0 {
vec16.push(Align16([i as u64, i as u64 + 1])).unwrap();
verify_alignment(vec16.as_ptr());
}
if i % 4 == 0 {
vec32
.push(Align32([
i as u64,
i as u64 + 1,
i as u64 + 2,
i as u64 + 3,
]))
.unwrap();
verify_alignment(vec32.as_ptr());
}
}
assert_eq!(vec8.len(), 10000);
assert_eq!(vec16.len(), 5000);
assert_eq!(vec32.len(), 2500);
assert_eq!(vec8[9999], Align8(9999));
assert_eq!(vec16[4999], Align16([9998, 9999]));
assert_eq!(vec32[2499], Align32([9996, 9997, 9998, 9999]));
verify_alignment(vec8.as_ptr());
verify_alignment(vec16.as_ptr());
verify_alignment(vec32.as_ptr());
}
#[test]
fn test_stress_allocation_cycles() {
let mut vec = FastVec::<Align16>::new();
for cycle in 0..100 {
for i in 0..100 {
vec.push(Align16([cycle as u64, i as u64])).unwrap();
verify_alignment(vec.as_ptr());
}
for _ in 0..50 {
vec.pop();
if !vec.is_empty() {
verify_alignment(vec.as_ptr());
}
}
assert_eq!(vec.len(), (cycle + 1) * 50);
if !vec.is_empty() {
verify_alignment(vec.as_ptr());
}
}
assert_eq!(vec.len(), 5000);
verify_alignment(vec.as_ptr());
}
#[test]
fn test_zero_to_nonzero_capacity_transitions() {
let mut vec1 = FastVec::<Align1>::new();
let mut vec8 = FastVec::<Align8>::new();
let mut vec16 = FastVec::<Align16>::new();
let mut vec32 = FastVec::<Align32>::new();
assert_eq!(vec1.capacity(), 0);
assert_eq!(vec8.capacity(), 0);
assert_eq!(vec16.capacity(), 0);
assert_eq!(vec32.capacity(), 0);
vec1.push(Align1(1)).unwrap();
vec8.push(Align8(8)).unwrap();
vec16.push(Align16([16, 17])).unwrap();
vec32.push(Align32([32, 33, 34, 35])).unwrap();
verify_alignment(vec1.as_ptr());
verify_alignment(vec8.as_ptr());
verify_alignment(vec16.as_ptr());
verify_alignment(vec32.as_ptr());
assert_eq!(vec1[0], Align1(1));
assert_eq!(vec8[0], Align8(8));
assert_eq!(vec16[0], Align16([16, 17]));
assert_eq!(vec32[0], Align32([32, 33, 34, 35]));
}
#[test]
fn test_shrink_to_fit_preserves_alignment() {
let mut vec = FastVec::<Align32>::with_capacity(1000).unwrap();
for i in 0..10 {
vec.push(Align32([i, i + 1, i + 2, i + 3])).unwrap();
}
verify_alignment(vec.as_ptr());
assert!(vec.capacity() >= 1000);
vec.shrink_to_fit().unwrap();
verify_alignment(vec.as_ptr());
assert_eq!(vec.capacity(), 10);
assert_eq!(vec.len(), 10);
for i in 0..10 {
assert_eq!(
vec[i],
Align32([i as u64, i as u64 + 1, i as u64 + 2, i as u64 + 3])
);
}
}
#[test]
fn test_reserve_preserves_alignment() {
let mut vec = FastVec::<Align16>::new();
vec.push(Align16([1, 2])).unwrap();
verify_alignment(vec.as_ptr());
vec.reserve(1000).unwrap();
verify_alignment(vec.as_ptr());
for i in 2..100 {
vec.push(Align16([i, i + 1])).unwrap();
verify_alignment(vec.as_ptr());
}
assert_eq!(vec.len(), 99); assert_eq!(vec[0], Align16([1, 2]));
assert_eq!(vec[98], Align16([99, 100])); }
#[test]
fn test_insert_remove_preserves_alignment() {
let mut vec = FastVec::<Align8>::new();
for i in 0..10 {
vec.push(Align8(i)).unwrap();
}
verify_alignment(vec.as_ptr());
vec.insert(5, Align8(999)).unwrap();
verify_alignment(vec.as_ptr());
assert_eq!(vec[4], Align8(4));
assert_eq!(vec[5], Align8(999));
assert_eq!(vec[6], Align8(5));
let removed = vec.remove(5).unwrap();
assert_eq!(removed, Align8(999));
verify_alignment(vec.as_ptr());
assert_eq!(vec[4], Align8(4));
assert_eq!(vec[5], Align8(5));
}
#[test]
fn test_resize_preserves_alignment() {
let mut vec = FastVec::<Align32>::new();
for i in 0..5 {
vec.push(Align32([i, i + 1, i + 2, i + 3])).unwrap();
}
verify_alignment(vec.as_ptr());
vec.resize(100, Align32([99, 100, 101, 102])).unwrap();
verify_alignment(vec.as_ptr());
assert_eq!(vec.len(), 100);
for i in 0..5 {
assert_eq!(
vec[i],
Align32([i as u64, i as u64 + 1, i as u64 + 2, i as u64 + 3])
);
}
for i in 5..100 {
assert_eq!(vec[i], Align32([99, 100, 101, 102]));
}
vec.resize(10, Align32([0, 0, 0, 0])).unwrap();
verify_alignment(vec.as_ptr());
assert_eq!(vec.len(), 10);
}
#[test]
fn test_mixed_alignment_stress() {
let mut vec = FastVec::<Align32>::new();
for round in 0..50 {
for i in 0..20 {
vec.push(Align32([
round as u64,
i as u64,
round as u64 + i as u64,
0,
]))
.unwrap();
verify_alignment(vec.as_ptr());
}
if vec.len() > 10 {
vec.insert(vec.len() / 2, Align32([888, 888, 888, 888]))
.unwrap();
verify_alignment(vec.as_ptr());
}
if vec.len() > 5 {
vec.remove(vec.len() - 1).unwrap();
verify_alignment(vec.as_ptr());
}
if round % 10 == 0 && vec.len() > 0 {
let new_size = vec.len() + 10;
vec.resize(new_size, Align32([777, 777, 777, 777])).unwrap();
verify_alignment(vec.as_ptr());
}
if round % 15 == 0 && vec.capacity() > vec.len() * 2 {
vec.shrink_to_fit().unwrap();
if !vec.is_empty() {
verify_alignment(vec.as_ptr());
}
}
}
if !vec.is_empty() {
verify_alignment(vec.as_ptr());
}
}
#[test]
fn test_debug_assertions_in_debug_mode() {
let mut vec = FastVec::<Align16>::new();
vec.push(Align16([1, 2])).unwrap();
let ptr = vec.as_ptr();
verify_alignment(ptr);
for i in 0..1000 {
vec.push(Align16([i as u64, i as u64 + 1])).unwrap();
}
verify_alignment(vec.as_ptr());
}
#[test]
fn test_pointer_cast_safety() {
let mut vec = FastVec::<Align32>::new();
assert!(vec.as_ptr().is_null());
assert!(vec.as_mut_ptr().is_null());
vec.push(Align32([1, 2, 3, 4])).unwrap();
let ptr = vec.as_ptr();
assert!(!ptr.is_null());
verify_alignment(ptr);
let mut_ptr = vec.as_mut_ptr();
assert!(!mut_ptr.is_null());
verify_alignment(mut_ptr);
for i in 0..100 {
vec.push(Align32([i, i + 1, i + 2, i + 3])).unwrap();
verify_alignment(vec.as_ptr());
verify_alignment(vec.as_mut_ptr());
}
}
#[test]
fn test_edge_case_alignment_boundary() {
let mut vec = FastVec::<Align32>::new();
vec.reserve(1).unwrap();
verify_alignment(vec.as_ptr());
vec.push(Align32([1, 2, 3, 4])).unwrap();
verify_alignment(vec.as_ptr());
vec.reserve(1000).unwrap();
verify_alignment(vec.as_ptr());
vec.shrink_to_fit().unwrap();
verify_alignment(vec.as_ptr());
assert_eq!(vec[0], Align32([1, 2, 3, 4]));
}
}
mod simd_tests {
use super::*;
#[test]
fn test_fill_range_fast_u8() {
let mut vec = FastVec::with_capacity(1000).unwrap();
vec.resize(1000, 0u8).unwrap();
vec.fill_range_fast(100, 900, 0xAA).unwrap();
for i in 0..100 {
assert_eq!(vec[i], 0u8);
}
for i in 100..900 {
assert_eq!(vec[i], 0xAA);
}
for i in 900..1000 {
assert_eq!(vec[i], 0u8);
}
}
#[test]
fn test_fill_range_fast_small() {
let mut vec = FastVec::with_capacity(10).unwrap();
vec.resize(10, 0u8).unwrap();
vec.fill_range_fast(2, 8, 0xFF).unwrap();
assert_eq!(vec[1], 0u8);
assert_eq!(vec[2], 0xFF);
assert_eq!(vec[7], 0xFF);
assert_eq!(vec[8], 0u8);
}
#[test]
fn test_fill_range_fast_bounds() {
let mut vec = FastVec::with_size(5, 42u8).unwrap();
assert!(vec.fill_range_fast(0, 10, 0xFF).is_err());
assert!(vec.fill_range_fast(3, 2, 0xFF).is_err());
assert!(vec.fill_range_fast(1, 4, 0xFF).is_ok());
assert_eq!(vec[0], 42);
assert_eq!(vec[1], 0xFF);
assert_eq!(vec[3], 0xFF);
assert_eq!(vec[4], 42);
}
#[test]
fn test_copy_from_slice_fast_large() {
let src: Vec<u8> = (0..1000).map(|i| (i % 256) as u8).collect();
let mut vec = FastVec::new();
vec.copy_from_slice_fast(&src).unwrap();
assert_eq!(vec.len(), 1000);
for i in 0..1000 {
assert_eq!(vec[i], (i % 256) as u8);
}
}
#[test]
fn test_copy_from_slice_fast_small() {
let src = vec![1u8, 2, 3, 4, 5];
let mut vec = FastVec::new();
vec.copy_from_slice_fast(&src).unwrap();
assert_eq!(vec.len(), 5);
assert_eq!(vec.as_slice(), &[1, 2, 3, 4, 5]);
}
#[test]
fn test_copy_from_slice_fast_empty() {
let src: Vec<u8> = vec![];
let mut vec = FastVec::new();
vec.copy_from_slice_fast(&src).unwrap();
assert_eq!(vec.len(), 0);
}
#[test]
fn test_extend_from_slice_fast_large() {
let mut vec = FastVec::new();
vec.push(255u8).unwrap();
let src: Vec<u8> = (0..1000).map(|i| (i % 256) as u8).collect();
vec.extend_from_slice_fast(&src).unwrap();
assert_eq!(vec.len(), 1001);
assert_eq!(vec[0], 255);
for i in 1..1001 {
assert_eq!(vec[i], ((i - 1) % 256) as u8);
}
}
#[test]
fn test_extend_from_slice_fast_small() {
let mut vec = FastVec::new();
vec.push(100u8).unwrap();
let src = vec![1u8, 2, 3, 4, 5];
vec.extend_from_slice_fast(&src).unwrap();
assert_eq!(vec.len(), 6);
assert_eq!(vec.as_slice(), &[100, 1, 2, 3, 4, 5]);
}
#[test]
fn test_simd_optimized_insert_large() {
let mut vec = FastVec::new();
for i in 0..2000u16 {
vec.push(i).unwrap();
}
vec.insert(1000, 9999u16).unwrap();
assert_eq!(vec.len(), 2001);
assert_eq!(vec[999], 999);
assert_eq!(vec[1000], 9999);
assert_eq!(vec[1001], 1000);
}
#[test]
fn test_simd_optimized_remove_large() {
let mut vec = FastVec::new();
for i in 0..2000u16 {
vec.push(i).unwrap();
}
let removed = vec.remove(1000).unwrap();
assert_eq!(removed, 1000);
assert_eq!(vec.len(), 1999);
assert_eq!(vec[999], 999);
assert_eq!(vec[1000], 1001);
}
#[test]
fn test_simd_optimized_resize_large() {
let mut vec: FastVec<u8> = FastVec::new();
vec.resize(2000, 0x42).unwrap();
assert_eq!(vec.len(), 2000);
for i in 0..2000 {
assert_eq!(vec[i], 0x42);
}
}
#[test]
fn test_simd_optimized_extend_large() {
let mut vec = FastVec::new();
vec.push(0u8).unwrap();
let data: Vec<u8> = (1..=2000).map(|i| (i % 256) as u8).collect();
vec.extend(data.into_iter()).unwrap();
assert_eq!(vec.len(), 2001);
assert_eq!(vec[0], 0);
for i in 1..=2000 {
assert_eq!(vec[i], ((i % 256) as u8));
}
}
#[test]
fn test_simd_optimized_partial_eq() {
let vec1: FastVec<u8> = {
let mut v = FastVec::new();
for i in 0..2000 {
v.push((i % 256) as u8).unwrap();
}
v
};
let vec2: FastVec<u8> = {
let mut v = FastVec::new();
for i in 0..2000 {
v.push((i % 256) as u8).unwrap();
}
v
};
let vec3: FastVec<u8> = {
let mut v = FastVec::new();
for i in 0..2000 {
v.push(((i + 1) % 256) as u8).unwrap();
}
v
};
assert_eq!(vec1, vec2);
assert_ne!(vec1, vec3);
}
#[test]
fn test_simd_with_different_types() {
let mut vec_u16 = FastVec::new();
let data_u16: Vec<u16> = (0..1000).map(|i| i as u16).collect();
vec_u16.extend_from_slice_fast(&data_u16).unwrap();
assert_eq!(vec_u16.len(), 1000);
let mut vec_u32 = FastVec::new();
let data_u32: Vec<u32> = (0..1000).map(|i| i as u32).collect();
vec_u32.extend_from_slice_fast(&data_u32).unwrap();
assert_eq!(vec_u32.len(), 1000);
let mut vec_u64 = FastVec::new();
let data_u64: Vec<u64> = (0..1000).map(|i| i as u64).collect();
vec_u64.extend_from_slice_fast(&data_u64).unwrap();
assert_eq!(vec_u64.len(), 1000);
}
#[test]
fn test_simd_thresholds() {
let mut small_vec: FastVec<u8> = FastVec::new();
small_vec.resize(10, 0).unwrap();
small_vec.fill_range_fast(0, 10, 0xFF).unwrap();
for i in 0..10 {
assert_eq!(small_vec[i], 0xFF);
}
let mut large_vec: FastVec<u8> = FastVec::new();
large_vec.resize(1000, 0).unwrap();
large_vec.fill_range_fast(0, 1000, 0xAA).unwrap();
for i in 0..1000 {
assert_eq!(large_vec[i], 0xAA);
}
}
#[test]
fn test_simd_safety_with_drop_types() {
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
let counter = Arc::new(AtomicUsize::new(0));
#[derive(Clone)]
struct DropCounter {
counter: Arc<AtomicUsize>,
}
impl Drop for DropCounter {
fn drop(&mut self) {
self.counter.fetch_add(1, Ordering::SeqCst);
}
}
let mut vec = FastVec::new();
for _ in 0..100 {
vec.push(DropCounter {
counter: counter.clone(),
})
.unwrap();
}
vec.insert(50, DropCounter {
counter: counter.clone(),
})
.unwrap();
vec.remove(25).unwrap();
assert_eq!(vec.len(), 100);
assert_eq!(counter.load(Ordering::SeqCst), 1);
}
#[test]
fn test_simd_memory_safety() {
let mut vec: FastVec<u8> = FastVec::new();
for size in [1, 16, 32, 63, 64, 65, 100, 256, 1000] {
vec.clear();
vec.resize(size, 0).unwrap();
for i in 0..size {
vec[i] = (i % 256) as u8;
}
if size > 10 {
vec.fill_range_fast(1, size - 1, 0xAA).unwrap();
assert_eq!(vec[0], 0);
if size > 1 {
assert_eq!(vec[size - 1], ((size - 1) % 256) as u8);
}
}
let copy_data: Vec<u8> = (0..size).map(|i| (i % 256) as u8).collect();
vec.copy_from_slice_fast(©_data).unwrap();
assert_eq!(vec.len(), size);
for i in 0..size {
assert_eq!(vec[i], (i % 256) as u8);
}
}
}
#[test]
fn test_simd_performance_characteristics() {
let mut vec: FastVec<u8> = FastVec::new();
let large_size = 10000;
vec.resize(large_size, 0x55).unwrap();
assert_eq!(vec.len(), large_size);
for i in 0..large_size {
assert_eq!(vec[i], 0x55);
}
vec.fill_range_fast(1000, 9000, 0xAA).unwrap();
for i in 1000..9000 {
assert_eq!(vec[i], 0xAA);
}
let source_data: Vec<u8> = (0..large_size).map(|i| (i % 256) as u8).collect();
vec.copy_from_slice_fast(&source_data).unwrap();
for i in 0..large_size {
assert_eq!(vec[i], (i % 256) as u8);
}
}
#[test]
fn test_adaptive_simd_integration() {
use crate::simd::AdaptiveSimdSelector;
let selector = AdaptiveSimdSelector::global();
println!("Hardware tier: {:?}", selector.hardware_tier());
let mut vec: FastVec<u8> = FastVec::with_capacity(5000).unwrap();
vec.resize(5000, 0).unwrap();
vec.fill_range_fast(0, 5000, 0x42).unwrap();
for i in 0..5000 {
assert_eq!(vec[i], 0x42);
}
}
#[test]
fn test_advanced_prefetching_patterns() {
let mut vec: FastVec<u64> = FastVec::new();
let src_data: Vec<u64> = (0..5000).collect();
vec.copy_from_slice_fast(&src_data).unwrap();
assert_eq!(vec.len(), 5000);
for i in 0..5000 {
assert_eq!(vec[i], i as u64);
}
}
#[test]
fn test_prefetch_distance_threshold() {
let mut vec: FastVec<u32> = FastVec::new();
let small_data: Vec<u32> = vec![1, 2, 3, 4, 5];
vec.extend_from_slice_fast(&small_data).unwrap();
assert_eq!(vec.as_slice(), &[1, 2, 3, 4, 5]);
vec.clear();
let large_data: Vec<u32> = (0..1000).collect();
vec.extend_from_slice_fast(&large_data).unwrap();
for i in 0..1000 {
assert_eq!(vec[i], i as u32);
}
}
#[test]
fn test_performance_monitoring() {
use crate::simd::AdaptiveSimdSelector;
let selector = AdaptiveSimdSelector::global();
let mut vec: FastVec<u8> = FastVec::new();
let large_data: Vec<u8> = (0..10000).map(|i| (i % 256) as u8).collect();
vec.copy_from_slice_fast(&large_data).unwrap();
vec.fill_range_fast(0, 10000, 0xFF).unwrap();
for i in 0..10000 {
assert_eq!(vec[i], 0xFF);
}
}
#[test]
fn test_cross_platform_prefetch() {
let mut vec: FastVec<u16> = FastVec::new();
let data: Vec<u16> = (0..2000).collect();
vec.extend_from_slice_fast(&data).unwrap();
assert_eq!(vec.len(), 2000);
for i in 0..2000 {
assert_eq!(vec[i], i as u16);
}
}
}
}