use std::fmt::{Display, Formatter};
use std::mem::ManuallyDrop;
use std::ops::{Deref, DerefMut, RangeBounds};
use std::{fmt, mem};
use crate::Vec64;
use crate::structs::shared_buffer::SharedBuffer;
use crate::traits::print::MAX_PREVIEW;
pub struct Buffer<T> {
storage: Storage<T>,
}
enum Storage<T> {
Owned(Vec64<T>),
Shared {
owner: SharedBuffer,
offset: usize, len: usize, },
}
impl<T: Clone> Buffer<T> {
#[inline]
pub fn from_slice(slice: &[T]) -> Self {
let mut v = Vec64::with_capacity(slice.len());
v.extend_from_slice(slice);
Buffer::from_vec64(v)
}
}
impl<T> Buffer<T> {
#[inline]
pub fn from_vec64(v: Vec64<T>) -> Self {
Self {
storage: Storage::Owned(v),
}
}
#[inline]
pub fn from_shared(owner: SharedBuffer) -> Self {
let bytes = owner.as_slice();
let size_of_t = std::mem::size_of::<T>();
let ptr_usize = bytes.as_ptr() as usize;
let align = std::mem::align_of::<T>();
let needs_alignment = ptr_usize % 64 != 0;
let correct_type_align = ptr_usize % align == 0;
if needs_alignment {
eprintln!(
"Buffer::from_shared: underlying SharedBuffer {:p} not 64-byte aligned, cloning to owned Vec64<T>.",
bytes.as_ptr()
);
assert_eq!(
ptr_usize % align,
0,
"Underlying SharedBuffer is not properly aligned for T"
);
assert_eq!(
bytes.len() % size_of_t,
0,
"Underlying SharedBuffer is not a valid T slice"
);
let len = bytes.len() / size_of_t;
let mut v = Vec64::with_capacity(len);
unsafe {
std::ptr::copy_nonoverlapping(bytes.as_ptr() as *const T, v.as_mut_ptr(), len);
v.set_len(len);
}
return Buffer::from_vec64(v);
}
assert!(
correct_type_align,
"Underlying SharedBuffer is not properly aligned for T"
);
assert_eq!(
bytes.len() % size_of_t,
0,
"Underlying SharedBuffer is not a valid T slice"
);
let len = bytes.len() / size_of_t;
Self {
storage: Storage::Shared {
owner,
offset: 0,
len,
},
}
}
#[inline]
pub fn from_shared_column(owner: SharedBuffer, offset: usize, len: usize) -> Self {
Self {
storage: Storage::Shared { owner, offset, len },
}
}
#[inline]
pub unsafe fn from_shared_raw(arc: std::sync::Arc<[u8]>, ptr: *const T, len: usize) -> Self {
assert!(!ptr.is_null());
let align = std::mem::align_of::<T>();
let ptr_usize = ptr as usize;
let needs_alignment = ptr_usize % 64 != 0;
let correct_type_align = ptr_usize % align == 0;
if !correct_type_align {
panic!(
"Buffer::from_shared_raw: pointer {ptr:p} is not aligned to {} bytes",
align
);
}
if needs_alignment {
eprintln!(
"Buffer::from_shared_raw: pointer {ptr:p} is not 64-byte aligned, cloning to owned Vec64<T>."
);
let mut v = Vec64::with_capacity(len);
unsafe { std::ptr::copy_nonoverlapping(ptr, v.as_mut_ptr(), len) };
unsafe { v.set_len(len) };
return Buffer::from_vec64(v);
}
let shared = SharedBuffer::from_owner(arc);
let base = shared.as_slice().as_ptr() as usize;
let p = ptr_usize;
let byte_offset = p
.checked_sub(base)
.expect("Buffer::from_shared_raw: pointer not in Arc<[u8]> region");
let byte_len = len * std::mem::size_of::<T>();
let owner_slice = shared.slice(byte_offset..byte_offset + byte_len);
Self {
storage: Storage::Shared {
owner: owner_slice,
offset: 0,
len,
},
}
}
#[cfg(all(target_os = "linux", feature = "memfd"))]
pub fn from_memfd(name: &str, len: usize) -> std::io::Result<Self> {
use crate::structs::shared_buffer::MemfdBuffer;
let byte_size = len * std::mem::size_of::<T>();
let memfd = MemfdBuffer::new(name, byte_size)?;
let shared = SharedBuffer::from_memfd_owner(memfd);
Ok(Self {
storage: Storage::Shared {
owner: shared,
offset: 0,
len,
},
})
}
#[cfg(all(target_os = "linux", feature = "memfd"))]
pub fn from_memfd_remote(
creator_pid: u32,
fd: std::os::unix::io::RawFd,
len: usize,
) -> std::io::Result<Self> {
use crate::structs::shared_buffer::MemfdBuffer;
let byte_size = len * std::mem::size_of::<T>();
let memfd = MemfdBuffer::reopen(creator_pid, fd, byte_size)?;
let shared = SharedBuffer::from_memfd_owner(memfd);
Ok(Self {
storage: Storage::Shared {
owner: shared,
offset: 0,
len,
},
})
}
#[cfg(all(target_os = "linux", feature = "memfd"))]
pub fn memfd_fd(&self) -> Option<i32> {
match &self.storage {
Storage::Shared { owner, .. } => owner.memfd_fd(),
Storage::Owned(_) => None,
}
}
#[inline]
pub fn as_slice(&self) -> &[T] {
match &self.storage {
Storage::Owned(vec) => vec.as_slice(),
Storage::Shared { owner, offset, len } => {
let bytes = owner.as_slice();
let size_of_t = std::mem::size_of::<T>();
let ptr = unsafe { bytes.as_ptr().add(offset * size_of_t) };
unsafe { std::slice::from_raw_parts(ptr as *const T, *len) }
}
}
}
#[inline]
pub fn as_mut_slice(&mut self) -> &mut [T] {
self.make_owned_mut().as_mut_slice()
}
#[inline]
pub fn len(&self) -> usize {
self.as_slice().len()
}
#[inline]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
#[inline]
pub fn push(&mut self, v: T) {
self.make_owned_mut().push(v);
}
#[inline]
pub fn clear(&mut self) {
self.make_owned_mut().clear();
}
#[inline]
pub fn reserve(&mut self, addl: usize) {
self.make_owned_mut().reserve(addl);
}
#[inline]
pub fn capacity(&self) -> usize {
match &self.storage {
Storage::Owned(vec) => vec.capacity(),
Storage::Shared {
owner: _,
offset: _,
len,
} => {
*len
}
}
}
#[inline]
fn make_owned_mut(&mut self) -> &mut Vec64<T> {
if let Storage::Owned(ref mut vec) = self.storage {
return vec;
}
let (owner, offset, len) =
match mem::replace(&mut self.storage, Storage::Owned(Vec64::with_capacity(0))) {
Storage::Shared { owner, offset, len } => (owner, offset, len),
_ => unreachable!(),
};
let bytes = owner.as_slice();
let size_of_t = std::mem::size_of::<T>();
let ptr = unsafe { bytes.as_ptr().add(offset * size_of_t) };
let mut new_vec = Vec64::with_capacity(len);
unsafe {
std::ptr::copy_nonoverlapping(ptr as *const T, new_vec.as_mut_ptr(), len);
new_vec.set_len(len);
}
self.storage = Storage::Owned(new_vec);
if let Storage::Owned(ref mut vec) = self.storage {
vec
} else {
unreachable!()
}
}
#[inline]
pub fn splice<'a, R, I>(&'a mut self, range: R, replace_with: I) -> impl Iterator<Item = T> + 'a
where
R: RangeBounds<usize>,
I: IntoIterator<Item = T> + 'a,
I::IntoIter: 'a,
{
let vec = self.make_owned_mut();
vec.splice(range, replace_with)
}
#[inline]
pub fn is_shared(&self) -> bool {
matches!(self.storage, Storage::Shared { .. })
}
#[inline]
pub fn to_owned_copy(&self) -> Self
where
T: Clone,
{
let vec: Vec64<T> = self.as_ref().iter().cloned().collect();
Buffer::from_vec64(vec)
}
}
impl<T: Clone> Buffer<T> {
#[inline]
pub fn resize(&mut self, new_len: usize, value: T) {
self.make_owned_mut().resize(new_len, value);
}
#[inline]
pub fn extend_from_slice(&mut self, s: &[T]) {
self.make_owned_mut().extend_from_slice(s);
}
#[inline]
pub fn split_off(&mut self, at: usize) -> Self {
match &mut self.storage {
Storage::Owned(vec) => Self {
storage: Storage::Owned(vec.split_off(at)),
},
Storage::Shared { .. } => {
panic!("split_off is not supported on Shared buffers")
}
}
}
}
#[inline]
pub fn split_at_first_align64(ptr: *const u8, len_bytes: usize) -> Option<(usize, usize)> {
let addr = ptr as usize;
let misalign = addr & 63;
if misalign == 0 {
return None;
}
let head = 64 - misalign;
if head >= len_bytes {
return None;
}
Some((head, len_bytes - head))
}
impl<T: Copy> Buffer<T> {
#[inline]
pub fn truncate(&mut self, new_len: usize) {
if new_len >= self.len() {
return;
}
match &mut self.storage {
Storage::Owned(vec) => vec.truncate(new_len),
Storage::Shared { offset: _, len, .. } => {
*len = new_len;
}
}
}
}
impl<T: Clone> Clone for Buffer<T> {
fn clone(&self) -> Self {
match &self.storage {
Storage::Owned(vec) => Buffer::from_vec64(vec.clone()),
Storage::Shared { owner, offset, len } => Buffer {
storage: Storage::Shared {
owner: owner.clone(),
offset: *offset,
len: *len,
},
},
}
}
}
impl<T: PartialEq> PartialEq for Buffer<T> {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.deref() == other.deref()
}
}
impl<T: PartialEq> PartialEq<Vec64<T>> for Buffer<T> {
#[inline]
fn eq(&self, other: &Vec64<T>) -> bool {
self.deref() == other.deref()
}
}
impl<T: fmt::Debug> fmt::Debug for Buffer<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Buffer").field(&self.as_slice()).finish()
}
}
impl<T> Default for Buffer<T> {
#[inline]
fn default() -> Self {
Buffer::from_vec64(Vec64::default())
}
}
impl<T> Deref for Buffer<T> {
type Target = [T];
#[inline]
fn deref(&self) -> &[T] {
self.as_slice()
}
}
impl<T> DerefMut for Buffer<T> {
#[inline]
fn deref_mut(&mut self) -> &mut [T] {
match &self.storage {
Storage::Owned(_) => self.make_owned_mut(),
Storage::Shared { .. } => {
panic!("Cannot mutably deref a shared buffer")
}
}
}
}
impl<T> From<Vec64<T>> for Buffer<T> {
#[inline]
fn from(v: Vec64<T>) -> Self {
Buffer::from_vec64(v)
}
}
impl<'a, T> IntoIterator for &'a Buffer<T> {
type Item = &'a T;
type IntoIter = std::slice::Iter<'a, T>;
#[inline]
fn into_iter(self) -> Self::IntoIter {
self.deref().iter()
}
}
impl<'a, T> IntoIterator for &'a mut Buffer<T> {
type Item = &'a mut T;
type IntoIter = std::slice::IterMut<'a, T>;
#[inline]
fn into_iter(self) -> Self::IntoIter {
let vec64 = self.make_owned_mut();
vec64.iter_mut()
}
}
impl<T> FromIterator<T> for Buffer<T> {
#[inline]
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
Self::from(Vec64::from_iter(iter))
}
}
impl<T> IntoIterator for Buffer<T> {
type Item = T;
type IntoIter = <Vec64<T> as IntoIterator>::IntoIter;
#[inline]
fn into_iter(self) -> Self::IntoIter {
let mut this = ManuallyDrop::new(self);
unsafe {
match &mut this.storage {
Storage::Owned(vec) => {
std::ptr::read(vec).into_iter()
}
Storage::Shared { owner, offset, len } => {
let bytes = owner.as_slice();
let size_of_t = std::mem::size_of::<T>();
let ptr = bytes.as_ptr().add(*offset * size_of_t);
let mut v = Vec64::with_capacity(*len);
std::ptr::copy_nonoverlapping(ptr as *const T, v.as_mut_ptr(), *len);
v.set_len(*len);
v.into_iter()
}
}
}
}
}
impl<T> AsRef<[T]> for Buffer<T> {
#[inline]
fn as_ref(&self) -> &[T] {
self.deref()
}
}
impl<T> AsMut<[T]> for Buffer<T> {
#[inline]
fn as_mut(&mut self) -> &mut [T] {
self.deref_mut()
}
}
#[cfg(feature = "parallel_proc")]
impl<T: Send + Sync> Buffer<T> {
#[inline]
pub fn par_iter(&self) -> rayon::slice::Iter<'_, T> {
use rayon::iter::IntoParallelRefIterator;
self.as_slice().par_iter()
}
#[inline]
pub fn par_iter_mut(&mut self) -> rayon::slice::IterMut<'_, T> {
use rayon::iter::IntoParallelRefMutIterator;
self.make_owned_mut().par_iter_mut()
}
}
impl<T: Display> Display for Buffer<T> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let kind = if self.is_shared() { "shared" } else { "owned" };
let len = self.len();
writeln!(f, "Buffer [{} elements] ({})", len, kind)?;
write!(f, "[")?;
for i in 0..usize::min(len, MAX_PREVIEW) {
if i > 0 {
write!(f, ", ")?;
}
write!(f, "{}", self[i])?;
}
if len > MAX_PREVIEW {
write!(f, ", … ({} total)", len)?;
}
write!(f, "]")
}
}
unsafe impl<T: Sync> Sync for Buffer<T> {}
unsafe impl<T: Send> Send for Buffer<T> {}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use super::*;
use crate::vec64;
#[test]
fn test_owned_buffer() {
let mut buf = Buffer::from(Vec64::from(vec![1, 2, 3]));
assert_eq!(buf.len(), 3);
assert_eq!(&buf[..], &[1, 2, 3]);
buf.push(4);
assert_eq!(&buf[..], &[1, 2, 3, 4]);
}
#[test]
fn test_shared_buffer_read() {
let data = vec64![1u8, 2, 3, 4];
let arc: Arc<[u8]> = Arc::from(&data[..]);
let arc_ptr = arc.as_ptr();
let buf = unsafe { Buffer::from_shared_raw(arc.clone(), arc_ptr, arc.len()) };
assert_eq!(buf.len(), 4);
assert_eq!(&buf[..], &[1, 2, 3, 4]);
}
#[test]
fn test_shared_buffer_cow() {
let data = vec64![1u8, 2, 3];
let arc: Arc<[u8]> = Arc::from(&data[..]);
let arc_ptr = arc.as_ptr();
let mut buf = unsafe { Buffer::from_shared_raw(arc.clone(), arc_ptr, arc.len()) };
buf.push(4);
assert_eq!(&buf[..], &[1, 2, 3, 4]);
}
#[test]
fn test_clone_owned() {
let buf1 = Buffer::from(vec64![1, 2, 3, 4]);
let buf2 = buf1.clone();
assert_eq!(buf1, buf2);
assert_eq!(buf1.len(), buf2.len());
assert_eq!(&buf1[..], &buf2[..]);
}
#[test]
fn test_clone_shared() {
let data = vec64![5u8, 6, 7, 8];
let arc: Arc<[u8]> = Arc::from(&data[..]);
let arc_ptr = arc.as_ptr();
let buf1 = unsafe { Buffer::from_shared_raw(arc.clone(), arc_ptr, arc.len()) };
let buf2 = buf1.clone();
assert_eq!(buf1, buf2);
assert_eq!(&buf1[..], &[5, 6, 7, 8]);
assert_eq!(&buf2[..], &[5, 6, 7, 8]);
}
#[test]
fn test_truncate_owned() {
let mut buf = Buffer::from(vec64![1, 2, 3, 4, 5]);
buf.truncate(3);
assert_eq!(buf.len(), 3);
assert_eq!(&buf[..], &[1, 2, 3]);
buf.truncate(10);
assert_eq!(buf.len(), 3);
}
#[test]
fn test_truncate_shared() {
let data = vec64![1u8, 2, 3, 4, 5];
let arc: Arc<[u8]> = Arc::from(&data[..]);
let arc_ptr = arc.as_ptr();
let mut buf = unsafe { Buffer::from_shared_raw(arc.clone(), arc_ptr, arc.len()) };
buf.truncate(3);
assert_eq!(buf.len(), 3);
assert_eq!(&buf[..], &[1, 2, 3]);
}
#[test]
fn test_clear() {
let mut buf = Buffer::from(vec64![1, 2, 3]);
buf.clear();
assert_eq!(buf.len(), 0);
assert!(buf.is_empty());
}
#[test]
fn test_clear_shared_makes_owned() {
let data = vec64![1u8, 2, 3];
let arc: Arc<[u8]> = Arc::from(&data[..]);
let arc_ptr = arc.as_ptr();
let mut buf = unsafe { Buffer::from_shared_raw(arc.clone(), arc_ptr, arc.len()) };
buf.clear();
assert_eq!(buf.len(), 0);
assert!(buf.is_empty());
buf.push(10);
assert_eq!(&buf[..], &[10]);
}
#[test]
fn test_reserve() {
let mut buf = Buffer::from(vec64![1, 2, 3]);
let initial_cap = buf.capacity();
buf.reserve(100);
assert!(buf.capacity() >= initial_cap + 100);
}
#[test]
fn test_capacity_shared() {
let data = vec64![1u8, 2, 3, 4];
let arc: Arc<[u8]> = Arc::from(&data[..]);
let arc_ptr = arc.as_ptr();
let buf = unsafe { Buffer::from_shared_raw(arc.clone(), arc_ptr, arc.len()) };
assert_eq!(buf.capacity(), 4);
}
#[test]
fn test_resize() {
let mut buf = Buffer::from(vec64![1, 2, 3]);
buf.resize(5, 99);
assert_eq!(&buf[..], &[1, 2, 3, 99, 99]);
buf.resize(2, 0);
assert_eq!(&buf[..], &[1, 2]);
}
#[test]
fn test_extend_from_slice() {
let mut buf = Buffer::from(vec64![1, 2]);
buf.extend_from_slice(&[3, 4, 5]);
assert_eq!(&buf[..], &[1, 2, 3, 4, 5]);
}
#[test]
fn test_splice() {
let mut buf = Buffer::from(vec64![1, 2, 3, 4, 5]);
let removed: Vec<_> = buf.splice(1..4, vec![10, 20]).collect();
assert_eq!(removed, vec![2, 3, 4]);
assert_eq!(&buf[..], &[1, 10, 20, 5]);
}
#[test]
fn test_splice_shared_makes_owned() {
let data = vec64![1u8, 2, 3, 4, 5];
let arc: Arc<[u8]> = Arc::from(&data[..]);
let arc_ptr = arc.as_ptr();
let mut buf = unsafe { Buffer::from_shared_raw(arc.clone(), arc_ptr, arc.len()) };
let removed: Vec<_> = buf.splice(2..4, vec![10]).collect();
assert_eq!(removed, vec![3, 4]);
assert_eq!(&buf[..], &[1, 2, 10, 5]);
}
#[test]
fn test_as_slice_and_as_mut_slice() {
let mut buf = Buffer::from(vec64![1, 2, 3]);
assert_eq!(buf.as_slice(), &[1, 2, 3]);
buf.as_mut_slice()[1] = 20;
assert_eq!(buf.as_slice(), &[1, 20, 3]);
}
#[test]
fn test_as_mut_slice_shared_makes_owned() {
let data = vec64![1u8, 2, 3];
let arc: Arc<[u8]> = Arc::from(&data[..]);
let arc_ptr = arc.as_ptr();
let mut buf = unsafe { Buffer::from_shared_raw(arc.clone(), arc_ptr, arc.len()) };
let slice = buf.as_mut_slice();
slice[0] = 10;
assert_eq!(&buf[..], &[10, 2, 3]);
}
#[test]
fn test_equality() {
let buf1 = Buffer::from(vec64![1, 2, 3]);
let buf2 = Buffer::from(vec64![1, 2, 3]);
let buf3 = Buffer::from(vec64![1, 2, 4]);
assert_eq!(buf1, buf2);
assert_ne!(buf1, buf3);
}
#[test]
fn test_equality_with_vec64() {
let buf = Buffer::from(vec64![1, 2, 3]);
let vec = vec64![1, 2, 3];
assert_eq!(buf, vec);
}
#[test]
fn test_default() {
let buf: Buffer<i32> = Buffer::default();
assert_eq!(buf.len(), 0);
assert!(buf.is_empty());
}
#[test]
fn test_from_iter() {
let buf: Buffer<i32> = (1..=5).collect();
assert_eq!(&buf[..], &[1, 2, 3, 4, 5]);
}
#[test]
fn test_into_iter_owned() {
let buf = Buffer::from(vec64![1, 2, 3, 4]);
let vec: Vec<_> = buf.into_iter().collect();
assert_eq!(vec, vec![1, 2, 3, 4]);
}
#[test]
fn test_into_iter_shared() {
let data = vec64![1u8, 2, 3, 4];
let arc: Arc<[u8]> = Arc::from(&data[..]);
let arc_ptr = arc.as_ptr();
let buf = unsafe { Buffer::from_shared_raw(arc.clone(), arc_ptr, arc.len()) };
let vec: Vec<_> = buf.into_iter().collect();
assert_eq!(vec, vec![1, 2, 3, 4]);
}
#[test]
fn test_ref_iter() {
let buf = Buffer::from(vec64![1, 2, 3]);
let sum: i32 = buf.iter().sum();
assert_eq!(sum, 6);
}
#[test]
fn test_mut_iter() {
let mut buf = Buffer::from(vec64![1, 2, 3]);
for x in &mut buf {
*x *= 2;
}
assert_eq!(&buf[..], &[2, 4, 6]);
}
#[test]
fn test_mut_iter_shared_makes_owned() {
let data = vec64![1u8, 2, 3];
let arc: Arc<[u8]> = Arc::from(&data[..]);
let arc_ptr = arc.as_ptr();
let mut buf = unsafe { Buffer::from_shared_raw(arc.clone(), arc_ptr, arc.len()) };
for x in &mut buf {
*x *= 2;
}
assert_eq!(&buf[..], &[2, 4, 6]);
}
#[test]
fn test_as_ref_as_mut() {
let mut buf = Buffer::from(vec64![1, 2, 3]);
let slice_ref: &[i32] = buf.as_ref();
assert_eq!(slice_ref, &[1, 2, 3]);
let slice_mut: &mut [i32] = buf.as_mut();
slice_mut[0] = 10;
assert_eq!(&buf[..], &[10, 2, 3]);
}
#[test]
fn test_empty_buffer() {
let buf: Buffer<i32> = Buffer::from(vec64![]);
assert!(buf.is_empty());
assert_eq!(buf.len(), 0);
}
#[test]
fn test_multiple_shared_views() {
let mut data = vec64![0u8; 128];
for i in 0..128 {
data[i] = i as u8;
}
let arc: Arc<[u8]> = Arc::from(&data[..]);
let ptr1 = arc.as_ptr(); let ptr2 = unsafe { arc.as_ptr().add(64) }; let buf1 = unsafe { Buffer::from_shared_raw(arc.clone(), ptr1, 64) };
let buf2 = unsafe { Buffer::from_shared_raw(arc.clone(), ptr2, 64) };
assert_eq!(&buf1[0..4], &[0, 1, 2, 3]);
assert_eq!(&buf2[0..4], &[64, 65, 66, 67]);
assert_eq!(buf1.len(), 64);
assert_eq!(buf2.len(), 64);
assert_eq!(buf1[63], 63);
assert_eq!(buf2[63], 127);
}
#[test]
fn test_debug_format() {
let owned = Buffer::from(vec64![1, 2, 3]);
assert!(format!("{:?}", owned).contains("Buffer"));
let data = vec64![1u8, 2, 3];
let arc: Arc<[u8]> = Arc::from(&data[..]);
let arc_ptr = arc.as_ptr();
let shared = unsafe { Buffer::from_shared_raw(arc.clone(), arc_ptr, arc.len()) };
assert!(format!("{:?}", shared).contains("Buffer"));
}
#[cfg(feature = "parallel_proc")]
#[test]
fn test_par_iter() {
use rayon::prelude::*;
let buf = Buffer::from(vec64![1, 2, 3, 4, 5]);
let sum: i32 = buf.par_iter().sum();
assert_eq!(sum, 15);
}
#[cfg(feature = "parallel_proc")]
#[test]
fn test_par_iter_mut() {
use rayon::prelude::*;
let mut buf = Buffer::from(vec64![1, 2, 3, 4, 5]);
buf.par_iter_mut().for_each(|x| *x *= 2);
assert_eq!(&buf[..], &[2, 4, 6, 8, 10]);
}
#[cfg(feature = "parallel_proc")]
#[test]
fn test_par_iter_mut_shared_makes_owned() {
use rayon::prelude::*;
let data = vec64![1u8, 2, 3, 4, 5];
let arc: Arc<[u8]> = Arc::from(&data[..]);
let arc_ptr = arc.as_ptr();
let mut buf = unsafe { Buffer::from_shared_raw(arc.clone(), arc_ptr, arc.len()) };
buf.par_iter_mut().for_each(|x| *x *= 2);
assert_eq!(&buf[..], &[2, 4, 6, 8, 10]);
}
}