pub struct Emulated<T, const N: usize, A = Scalar>(/* private fields */);Expand description
An emulated SIMD vector.
The emulated implementation behaves just like an intrinsic, but the APIs are implemented using loops over arrays rather than dispatching to platform specific instructions.
The idea behind this type is that it can be used on architecture where explicit backend support has not been added, or when an architecture does not support a given type/lengh pair well.
Furthermore, it can be used when developing new back-ends to provide fallback implementations. This allows new back-ends to be developed one piece as a time instead of all at onces.
NOTE: The alignment requirements of an emulated vector will be different than the alignment requirements an actual intrinsic.
Higher level code must not rely on alignments being compatible across architectures!
Implementations§
Trait Implementations§
Source§impl<A> SIMDDotProduct<Emulated<i16, 16, A>> for Emulated<i32, 8, A>where
A: Sealed,
Promote intermediate values to i32 and then perform accuulation.
impl<A> SIMDDotProduct<Emulated<i16, 16, A>> for Emulated<i32, 8, A>where
A: Sealed,
Promote intermediate values to i32 and then perform accuulation.
Source§impl<A> SIMDDotProduct<Emulated<i16, 32, A>> for Emulated<i32, 16, A>where
A: Sealed,
Promote intermediate values to i32 and then perform accuulation.
impl<A> SIMDDotProduct<Emulated<i16, 32, A>> for Emulated<i32, 16, A>where
A: Sealed,
Promote intermediate values to i32 and then perform accuulation.
Source§impl<A> SIMDDotProduct<Emulated<i8, 32, A>, Emulated<u8, 32, A>> for Emulated<i32, 8, A>where
A: Sealed,
impl<A> SIMDDotProduct<Emulated<i8, 32, A>, Emulated<u8, 32, A>> for Emulated<i32, 8, A>where
A: Sealed,
Source§impl<A> SIMDDotProduct<Emulated<i8, 64, A>, Emulated<u8, 64, A>> for Emulated<i32, 16, A>where
A: Sealed,
impl<A> SIMDDotProduct<Emulated<i8, 64, A>, Emulated<u8, 64, A>> for Emulated<i32, 16, A>where
A: Sealed,
Source§impl<A> SIMDDotProduct<Emulated<u8, 32, A>, Emulated<i8, 32, A>> for Emulated<i32, 8, A>where
A: Sealed,
Promote intermediate values to i32 and then perform accuulation.
impl<A> SIMDDotProduct<Emulated<u8, 32, A>, Emulated<i8, 32, A>> for Emulated<i32, 8, A>where
A: Sealed,
Promote intermediate values to i32 and then perform accuulation.
Source§impl<A> SIMDDotProduct<Emulated<u8, 64, A>, Emulated<i8, 64, A>> for Emulated<i32, 16, A>where
A: Sealed,
Promote intermediate values to i32 and then perform accuulation.
impl<A> SIMDDotProduct<Emulated<u8, 64, A>, Emulated<i8, 64, A>> for Emulated<i32, 16, A>where
A: Sealed,
Promote intermediate values to i32 and then perform accuulation.
Source§impl<T, const N: usize, A> SIMDMinMax for Emulated<T, N, A>where
T: ReferenceScalarOps,
MinMax
impl<T, const N: usize, A> SIMDMinMax for Emulated<T, N, A>where
T: ReferenceScalarOps,
MinMax
Source§fn min_simd(self, rhs: Self) -> Self
fn min_simd(self, rhs: Self) -> Self
self and rhs, subject to looser NaN handling.Source§fn max_simd(self, rhs: Self) -> Self
fn max_simd(self, rhs: Self) -> Self
self and rhs, subject to looser NaN handling.Source§fn min_simd_standard(self, rhs: Self) -> Self
fn min_simd_standard(self, rhs: Self) -> Self
self and rhs as if by applying the standard
library’s min method for the scalar type.Source§fn max_simd_standard(self, rhs: Self) -> Self
fn max_simd_standard(self, rhs: Self) -> Self
self and rhs as if by applying the standard
library’s max method for the scalar type.Source§impl<T, const N: usize, A> SIMDMulAdd for Emulated<T, N, A>where
T: ReferenceScalarOps,
MulAdd
impl<T, const N: usize, A> SIMDMulAdd for Emulated<T, N, A>where
T: ReferenceScalarOps,
MulAdd
fn mul_add_simd(self, rhs: Self, accumulator: Self) -> Self
Source§impl<T, const N: usize, A> SIMDPartialEq for Emulated<T, N, A>where
T: PartialEq,
Self: SIMDVector,
SIMDPartialEq
impl<T, const N: usize, A> SIMDPartialEq for Emulated<T, N, A>where
T: PartialEq,
Self: SIMDVector,
SIMDPartialEq
Source§impl<T, const N: usize, A> SIMDPartialOrd for Emulated<T, N, A>where
T: PartialOrd,
Self: SIMDVector,
SIMDPartialOrd
impl<T, const N: usize, A> SIMDPartialOrd for Emulated<T, N, A>where
T: PartialOrd,
Self: SIMDVector,
SIMDPartialOrd
Source§impl<A> SIMDReinterpret<Emulated<i16, 16, A>> for Emulated<u32, 8, A>where
A: Sealed,
Available on little-endian only.
impl<A> SIMDReinterpret<Emulated<i16, 16, A>> for Emulated<u32, 8, A>where
A: Sealed,
fn reinterpret_simd(self) -> Emulated<i16, 16, A>
Source§impl<A> SIMDReinterpret<Emulated<i8, 64, A>> for Emulated<u32, 16, A>where
A: Sealed,
Available on little-endian only.
impl<A> SIMDReinterpret<Emulated<i8, 64, A>> for Emulated<u32, 16, A>where
A: Sealed,
fn reinterpret_simd(self) -> Emulated<i8, 64, A>
Source§impl<A> SIMDReinterpret<Emulated<u32, 16, A>> for Emulated<i8, 64, A>where
A: Sealed,
Available on little-endian only.
impl<A> SIMDReinterpret<Emulated<u32, 16, A>> for Emulated<i8, 64, A>where
A: Sealed,
fn reinterpret_simd(self) -> Emulated<u32, 16, A>
Source§impl<A> SIMDReinterpret<Emulated<u32, 16, A>> for Emulated<u8, 64, A>where
A: Sealed,
Available on little-endian only.
impl<A> SIMDReinterpret<Emulated<u32, 16, A>> for Emulated<u8, 64, A>where
A: Sealed,
fn reinterpret_simd(self) -> Emulated<u32, 16, A>
Source§impl<A> SIMDReinterpret<Emulated<u8, 64, A>> for Emulated<u32, 16, A>where
A: Sealed,
Available on little-endian only.
impl<A> SIMDReinterpret<Emulated<u8, 64, A>> for Emulated<u32, 16, A>where
A: Sealed,
fn reinterpret_simd(self) -> Emulated<u8, 64, A>
Source§impl<T, const N: usize, A> SIMDSelect<Emulated<T, N, A>> for BitMask<N, A>where
T: Copy,
A: Sealed,
Const<N>: SupportedLaneCount,
BitMask<N, A>: SIMDMask<Arch = A>,
Emulated<T, N, A>: SIMDVector<Mask = BitMask<N, A>>,
impl<T, const N: usize, A> SIMDSelect<Emulated<T, N, A>> for BitMask<N, A>where
T: Copy,
A: Sealed,
Const<N>: SupportedLaneCount,
BitMask<N, A>: SIMDMask<Arch = A>,
Emulated<T, N, A>: SIMDVector<Mask = BitMask<N, A>>,
Source§impl<T, const N: usize, A> SIMDVector for Emulated<T, N, A>
impl<T, const N: usize, A> SIMDVector for Emulated<T, N, A>
Source§const EMULATED: bool = true
const EMULATED: bool = true
The underlying behavior is emulated using loops and is not accelerated by back-end intrinsics.
Source§fn to_underlying(self) -> Self::Underlying
fn to_underlying(self) -> Self::Underlying
Return the underlying array.
Source§fn from_underlying(arch: A, repr: [T; N]) -> Self
fn from_underlying(arch: A, repr: [T; N]) -> Self
Construct from the underlying array.
Source§fn from_array(arch: A, x: [T; N]) -> Self
fn from_array(arch: A, x: [T; N]) -> Self
Construct from the underlying array.
Source§fn splat(arch: A, value: Self::Scalar) -> Self
fn splat(arch: A, value: Self::Scalar) -> Self
Broadcast the provided scalar across all lanes.
Source§unsafe fn load_simd_masked_logical(
arch: A,
ptr: *const T,
mask: Self::Mask,
) -> Self
unsafe fn load_simd_masked_logical( arch: A, ptr: *const T, mask: Self::Mask, ) -> Self
Only load values then the corresponding mask lane is set.
Source§unsafe fn load_simd_first(arch: A, ptr: *const T, first: usize) -> Self
unsafe fn load_simd_first(arch: A, ptr: *const T, first: usize) -> Self
Only load the first first items. Set the rest to zero.
Source§unsafe fn store_simd(self, ptr: *mut T)
unsafe fn store_simd(self, ptr: *mut T)
Store all the things.
Source§unsafe fn store_simd_masked_logical(self, ptr: *mut T, mask: Self::Mask)
unsafe fn store_simd_masked_logical(self, ptr: *mut T, mask: Self::Mask)
Only store values then the corresponding mask lane is set.
Source§unsafe fn store_simd_first(self, ptr: *mut T, first: usize)
unsafe fn store_simd_first(self, ptr: *mut T, first: usize)
Only store the first first items. Set the rest to zero.
Source§type Underlying = [T; N]
type Underlying = [T; N]
Source§type ConstLanes = Const<N>
type ConstLanes = Const<N>
LANES but in the type domain so we can use it to constrain other
aspects of this trait. Read moreSource§type Mask = BitMask<N, A>
type Mask = BitMask<N, A>
Source§fn default(arch: A) -> Self
fn default(arch: A) -> Self
Source§unsafe fn load_simd_masked(
arch: Self::Arch,
ptr: *const <Self as SIMDVector>::Scalar,
mask: <<Self as SIMDVector>::ConstLanes as BitMaskType<Self::Arch>>::Type,
) -> Self
unsafe fn load_simd_masked( arch: Self::Arch, ptr: *const <Self as SIMDVector>::Scalar, mask: <<Self as SIMDVector>::ConstLanes as BitMaskType<Self::Arch>>::Type, ) -> Self
load_simd_masked_logical but taking a BitMask instead. Read moreSource§unsafe fn store_simd_masked(
self,
ptr: *mut <Self as SIMDVector>::Scalar,
mask: <<Self as SIMDVector>::ConstLanes as BitMaskType<Self::Arch>>::Type,
)
unsafe fn store_simd_masked( self, ptr: *mut <Self as SIMDVector>::Scalar, mask: <<Self as SIMDVector>::ConstLanes as BitMaskType<Self::Arch>>::Type, )
load_simd_masked_logical but taking a BitMask instead. Read more