use super::{ParallelVec, ParallelVecConversionError};
use alloc::{
alloc::{alloc, dealloc, handle_alloc_error, realloc, Layout},
vec::Vec,
};
use core::ptr::NonNull;
pub unsafe trait ParallelParam: Sized + private::Sealed {
type Storage: Copy + Eq;
type Ptr: Copy;
type Offsets;
type Ref<'a>;
type RefMut<'a>;
type Vecs;
type Slices<'a>;
type SlicesMut<'a>;
type Iters<'a>;
type ItersMut<'a>;
fn dangling() -> Self::Storage;
fn as_ptr(storage: Self::Storage) -> Self::Ptr;
unsafe fn alloc(capacity: usize) -> Self::Storage;
unsafe fn realloc(storage: Self::Storage, current_capacity: usize, new_capacity: usize) -> Self::Storage;
unsafe fn dealloc(storage: Self::Storage, capacity: usize);
unsafe fn ptr_at(storage: Self::Storage, idx: usize) -> Self::Ptr {
Self::add(Self::as_ptr(storage), idx)
}
fn get_vec_len(vecs: &Self::Vecs) -> Option<usize>;
unsafe fn get_vec_ptrs(vecs: &mut Self::Vecs) -> Self::Ptr;
unsafe fn add(base: Self::Ptr, offset: usize) -> Self::Ptr;
unsafe fn copy_to(src: Self::Ptr, dst: Self::Ptr, size: usize);
unsafe fn copy_to_nonoverlapping(src: Self::Ptr, dst: Self::Ptr, size: usize);
unsafe fn as_slices<'a>(ptr: Self::Ptr, len: usize) -> Self::Slices<'a>;
unsafe fn as_slices_mut<'a>(ptr: Self::Ptr, len: usize) -> Self::SlicesMut<'a>;
#[allow(clippy::needless_lifetimes)]
fn iters<'a>(slices: Self::Slices<'a>) -> Self::Iters<'a>;
#[allow(clippy::needless_lifetimes)]
fn iters_mut<'a>(slices: Self::SlicesMut<'a>) -> Self::ItersMut<'a>;
fn reverse(ptr: Self::SlicesMut<'_>);
unsafe fn as_ref<'a>(ptr: Self::Ptr) -> Self::Ref<'a>;
unsafe fn as_storage(ptr: Self::Ptr) -> Self::Storage;
unsafe fn as_mut<'a>(ptr: Self::Ptr) -> Self::RefMut<'a>;
unsafe fn read(ptr: Self::Ptr) -> Self;
unsafe fn write(ptr: Self::Ptr, value: Self);
unsafe fn swap(a: Self::Ptr, other: Self::Ptr);
unsafe fn drop(ptr: Self::Ptr);
}
mod private {
pub trait Sealed {}
macro_rules! impl_seal {
($($ts:ident),*) => {
impl<$($ts,)*> Sealed for ($($ts,)*) {}
}
}
impl_seal!(T1, T2);
impl_seal!(T1, T2, T3);
impl_seal!(T1, T2, T3, T4);
impl_seal!(T1, T2, T3, T4, T5);
impl_seal!(T1, T2, T3, T4, T5, T6);
impl_seal!(T1, T2, T3, T4, T5, T6, T7);
impl_seal!(T1, T2, T3, T4, T5, T6, T7, T8);
impl_seal!(T1, T2, T3, T4, T5, T6, T7, T8, T9);
impl_seal!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10);
impl_seal!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11);
impl_seal!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12);
}
macro_rules! skip_first {
($first:ident, $second: ident) => {
$second
};
}
macro_rules! impl_parallel_vec_param {
($t1: ident, $v1: ident, $($ts:ident, $vs:ident),*) => {
unsafe impl<$t1: 'static $(, $ts: 'static)*> ParallelParam for ($t1 $(, $ts)*) {
type Storage = (NonNull<$t1> $(, NonNull<$ts>)*);
type Ref<'a> = (&'a $t1, $(&'a $ts,)*);
type RefMut<'a> = (&'a mut $t1, $(&'a mut $ts,)*);
type Slices<'a> = (&'a [$t1] $(, &'a [$ts])*);
type SlicesMut<'a> = (&'a mut [$t1] $(, &'a mut [$ts])*);
type Vecs = (Vec<$t1> $(, Vec<$ts>)*);
type Ptr = (*mut $t1 $(, *mut $ts)*);
type Offsets = (usize $(, skip_first!($ts, usize))*);
type Iters<'a> = (core::slice::Iter<'a, $t1> $(, core::slice::Iter<'a, $ts>)*);
type ItersMut<'a>= (core::slice::IterMut<'a, $t1> $(, core::slice::IterMut<'a, $ts>)*);
#[inline(always)]
fn dangling() -> Self::Storage {
(NonNull::dangling(), $(NonNull::<$ts>::dangling()),*)
}
#[inline(always)]
fn as_ptr(storage: Self::Storage) -> Self::Ptr {
let ($t1$(, $ts)*) = storage;
($t1.as_ptr() $(, $ts.as_ptr())*)
}
unsafe fn alloc(capacity: usize) -> Self::Storage {
debug_assert!(capacity != 0);
let $t1 = if core::mem::size_of::<$t1>() != 0 {
let layout = Layout::array::<$t1>(capacity).unwrap();
let ptr = alloc(layout).cast::<$t1>();
NonNull::new(ptr).unwrap_or_else(|| handle_alloc_error(layout))
} else {
NonNull::dangling()
};
$(
let $ts = if core::mem::size_of::<$ts>() != 0 {
let layout = Layout::array::<$ts>(capacity).unwrap();
let ptr = alloc(layout).cast::<$ts>();
NonNull::new(ptr).unwrap_or_else(|| handle_alloc_error(layout))
} else {
NonNull::dangling()
};
)*
($t1 $(, $ts)*)
}
unsafe fn realloc(storage: Self::Storage, current_capacity: usize, new_capacity: usize) -> Self::Storage {
if new_capacity == 0 {
Self::dealloc(storage, current_capacity);
return Self::dangling();
}
if current_capacity == 0 {
return Self::alloc(new_capacity);
}
let ($t1 $(, $ts)*) = storage;
let $t1 = if core::mem::size_of::<$t1>() != 0 {
let layout = Layout::array::<$t1>(current_capacity).unwrap();
let new_size = core::mem::size_of::<$t1>().checked_mul(new_capacity).unwrap();
let ptr = realloc($t1.as_ptr().cast::<u8>(), layout, new_size).cast::<$t1>();
NonNull::new(ptr).unwrap_or_else(|| handle_alloc_error(layout))
} else {
$t1
};
$(
let $ts = if core::mem::size_of::<$ts>() != 0 {
let layout = Layout::array::<$ts>(current_capacity).unwrap();
let new_size = core::mem::size_of::<$ts>().checked_mul(new_capacity).unwrap();
let ptr = realloc($ts.as_ptr().cast::<u8>(), layout, new_size).cast::<$ts>();
NonNull::new(ptr).unwrap_or_else(|| handle_alloc_error(layout))
} else {
$ts
};
)*
($t1 $(, $ts)*)
}
unsafe fn dealloc(storage: Self::Storage, capacity: usize) {
if capacity == 0 {
return;
}
let ($t1 $(, $ts)*) = storage;
if core::mem::size_of::<$t1>() != 0 {
dealloc($t1.as_ptr().cast::<u8>(), Layout::array::<$t1>(capacity).unwrap_unchecked());
}
$(
if core::mem::size_of::<$ts>() != 0 {
dealloc($ts.as_ptr().cast::<u8>(), Layout::array::<$ts>(capacity).unwrap_unchecked());
}
)*
}
#[inline(always)]
unsafe fn add(base: Self::Ptr, offset: usize) -> Self::Ptr {
let ($t1, $($ts),*) = base;
($t1.add(offset), $($ts.add(offset)),*)
}
#[inline(always)]
unsafe fn copy_to(src: Self::Ptr, dst: Self::Ptr, len: usize) {
let ($t1, $($ts),*) = src;
let ($v1, $($vs),*) = dst;
$t1.copy_to($v1, len);
$($ts.copy_to($vs, len);)*
}
#[inline(always)]
unsafe fn copy_to_nonoverlapping(src: Self::Ptr, dst: Self::Ptr, len: usize) {
let ($t1, $($ts),*) = src;
let ($v1, $($vs),*) = dst;
$t1.copy_to_nonoverlapping($v1, len);
$(
$ts.copy_to_nonoverlapping($vs, len);
)*
}
#[inline(always)]
unsafe fn as_slices<'a>(ptr: Self::Ptr, len: usize) -> Self::Slices<'a> {
let ($t1, $($ts),*) = ptr;
(
core::slice::from_raw_parts($t1, len)
$(
, core::slice::from_raw_parts($ts, len)
)*
)
}
#[inline(always)]
unsafe fn as_slices_mut<'a>(ptr: Self::Ptr, len: usize) -> Self::SlicesMut<'a> {
let ($t1, $($ts),*) = ptr;
(
core::slice::from_raw_parts_mut($t1, len)
$(
, core::slice::from_raw_parts_mut($ts, len)
)*
)
}
#[inline(always)]
fn iters<'a>(slices: Self::Slices<'a>) -> Self::Iters<'a> {
let ($t1, $($ts),*) = slices;
($t1.iter() $(, $ts.iter())*)
}
#[inline(always)]
fn iters_mut<'a>(slices: Self::SlicesMut<'a>) -> Self::ItersMut<'a> {
let ($t1, $($ts),*) = slices;
($t1.iter_mut() $(, $ts.iter_mut())*)
}
#[inline(always)]
fn reverse<'a>(slices: Self::SlicesMut<'a>) {
let ($t1, $($ts),*) = slices;
$t1.reverse();
$($ts.reverse();)*
}
#[inline(always)]
unsafe fn as_storage<'a>(ptr: Self::Ptr) -> Self::Storage {
let ($t1 $(, $ts)*) = ptr;
(
NonNull::new_unchecked($t1)
$(, NonNull::new_unchecked($ts))*
)
}
#[inline(always)]
unsafe fn as_ref<'a>(ptr: Self::Ptr) -> Self::Ref<'a> {
let ($t1, $($ts),*) = ptr;
(&*$t1 $(, &*$ts)*)
}
#[inline(always)]
unsafe fn as_mut<'a>(ptr: Self::Ptr) -> Self::RefMut<'a> {
let ($t1, $($ts),*) = ptr;
(&mut *$t1 $(, &mut *$ts)*)
}
#[inline(always)]
unsafe fn read(ptr: Self::Ptr) -> Self {
let ($t1, $($ts),*) = ptr;
($t1.read() $(, $ts.read())*)
}
#[inline(always)]
unsafe fn write(ptr: Self::Ptr, value: Self) {
let ($t1, $($ts),*) = ptr;
let ($v1, $($vs),*) = value;
$t1.write($v1);
$($ts.write($vs);)*
}
#[inline(always)]
unsafe fn swap(a: Self::Ptr, b: Self::Ptr) {
let ($v1, $($vs),*) = a;
let ($t1, $($ts),*) = b;
core::ptr::swap($t1, $v1);
$(core::ptr::swap($ts, $vs);)*
}
#[inline(always)]
unsafe fn drop(ptr: Self::Ptr) {
let ($t1, $($ts),*) = ptr;
core::ptr::drop_in_place($t1);
$(core::ptr::drop_in_place($ts);)*
}
fn get_vec_len(vecs: &Self::Vecs) -> Option<usize> {
let ($t1, $($ts),*) = vecs;
let len = $t1.len();
$(
if $ts.len() != len {
return None;
}
)*
Some(len)
}
unsafe fn get_vec_ptrs(vecs: &mut Self::Vecs) -> Self::Ptr {
let ($t1, $($ts),*) = vecs;
($t1.as_mut_ptr() $(, $ts.as_mut_ptr())*)
}
}
impl<$t1: 'static $(, $ts: 'static)*> TryFrom<(Vec<$t1> $(, Vec<$ts>)*)> for ParallelVec<($t1 $(, $ts)*)> {
type Error = ParallelVecConversionError;
fn try_from(mut vecs: (Vec<$t1> $(, Vec<$ts>)*)) -> Result<Self, Self::Error> {
let len = <($t1 $(, $ts)*) as ParallelParam>::get_vec_len(&vecs);
if let Some(len) = len {
let parallel_vec = Self::with_capacity(len);
unsafe {
let src = <($t1 $(, $ts)*) as ParallelParam>::get_vec_ptrs(&mut vecs);
let dst = <($t1 $(, $ts)*) as ParallelParam>::as_ptr(parallel_vec.storage);
<($t1 $(, $ts)*) as ParallelParam>::copy_to_nonoverlapping(src, dst, len);
core::mem::forget(vecs);
}
Ok(parallel_vec)
} else {
Err(ParallelVecConversionError::UnevenLengths)
}
}
}
}
}
impl_parallel_vec_param!(T1, V1, T2, V2);
impl_parallel_vec_param!(T1, V1, T2, V2, T3, V3);
impl_parallel_vec_param!(T1, V1, T2, V2, T3, V3, T4, V4);
impl_parallel_vec_param!(T1, V1, T2, V2, T3, V3, T4, V4, T5, V5);
impl_parallel_vec_param!(T1, V1, T2, V2, T3, V3, T4, V4, T5, V5, T6, V6);
impl_parallel_vec_param!(T1, V1, T2, V2, T3, V3, T4, V4, T5, V5, T6, V6, T7, V7);
impl_parallel_vec_param!(T1, V1, T2, V2, T3, V3, T4, V4, T5, V5, T6, V6, T7, V7, T8, V8);
impl_parallel_vec_param!(T1, V1, T2, V2, T3, T4, V3, V4, T5, V5, T6, V6, T7, V7, T8, V8, T9, V9);
impl_parallel_vec_param!(
T1, V1, T2, V2, T3, T4, V3, V4, T5, V5, T6, V6, T7, V7, T8, V8, T9, V9, T10, V10
);
impl_parallel_vec_param!(
T1, V1, T2, V2, T3, T4, V3, V4, T5, V5, T6, V6, T7, V7, T8, V8, T9, V9, T10, V10, T11, V11
);
impl_parallel_vec_param!(
T1, V1, T2, V2, T3, T4, V3, V4, T5, V5, T6, V6, T7, V7, T8, V8, T9, V9, T10, V10, T11, V11,
T12, V12
);