pub trait SIMDVector: Copy + Debug {
type Arch: Sealed;
type Scalar: Copy + Debug;
type Underlying: Copy;
type ConstLanes: ArrayType<Self::Scalar> + BitMaskType<Self::Arch>;
type Mask: SIMDMask<Arch = Self::Arch> + From<<Self::ConstLanes as BitMaskType<Self::Arch>>::Type> + Into<<Self::ConstLanes as BitMaskType<Self::Arch>>::Type>;
const LANES: usize;
const EMULATED: bool;
Show 17 methods
// Required methods
fn arch(self) -> Self::Arch;
fn default(arch: Self::Arch) -> Self;
fn to_underlying(self) -> Self::Underlying;
fn from_underlying(arch: Self::Arch, repr: Self::Underlying) -> Self;
fn to_array(self) -> <Self::ConstLanes as ArrayType<Self::Scalar>>::Type;
fn from_array(
arch: Self::Arch,
x: <Self::ConstLanes as ArrayType<Self::Scalar>>::Type,
) -> Self;
fn splat(arch: Self::Arch, value: Self::Scalar) -> Self;
unsafe fn load_simd(
arch: Self::Arch,
ptr: *const <Self as SIMDVector>::Scalar,
) -> Self;
unsafe fn load_simd_masked_logical(
arch: Self::Arch,
ptr: *const <Self as SIMDVector>::Scalar,
mask: <Self as SIMDVector>::Mask,
) -> Self;
unsafe fn store_simd(self, ptr: *mut <Self as SIMDVector>::Scalar);
unsafe fn store_simd_masked_logical(
self,
ptr: *mut <Self as SIMDVector>::Scalar,
mask: <Self as SIMDVector>::Mask,
);
// Provided methods
fn num_lanes() -> usize { ... }
unsafe fn load_simd_masked(
arch: Self::Arch,
ptr: *const <Self as SIMDVector>::Scalar,
mask: <<Self as SIMDVector>::ConstLanes as BitMaskType<Self::Arch>>::Type,
) -> Self { ... }
unsafe fn load_simd_first(
arch: Self::Arch,
ptr: *const <Self as SIMDVector>::Scalar,
first: usize,
) -> Self { ... }
unsafe fn store_simd_masked(
self,
ptr: *mut <Self as SIMDVector>::Scalar,
mask: <<Self as SIMDVector>::ConstLanes as BitMaskType<Self::Arch>>::Type,
) { ... }
unsafe fn store_simd_first(
self,
ptr: *mut <Self as SIMDVector>::Scalar,
first: usize,
) { ... }
fn cast<T>(self) -> <Self as SIMDCast<T>>::Cast
where Self: SIMDCast<T> { ... }
}Expand description
A trait representing minimal behavior for a SIMD-like vector.
A SIMDVector can be thought of as a homogeneous array [T; N] (with potentially
stricter alignment requirements) that generally behave for arithmetic purposes like
scalars in the sense that if
fn add(a: V, b: V) -> V
where V: SIMDVector {
a + b
}will have the same semantics of broadcasting the + operation across all lanes in the
vector.
Required Associated Constants§
Required Associated Types§
Sourcetype Underlying: Copy
type Underlying: Copy
The underlying representation.
Sourcetype ConstLanes: ArrayType<Self::Scalar> + BitMaskType<Self::Arch>
type ConstLanes: ArrayType<Self::Scalar> + BitMaskType<Self::Arch>
The value of LANES but in the type domain so we can use it to constrain other
aspects of this trait.
Should be the type Const<Self::LANES>.
Sourcetype Mask: SIMDMask<Arch = Self::Arch> + From<<Self::ConstLanes as BitMaskType<Self::Arch>>::Type> + Into<<Self::ConstLanes as BitMaskType<Self::Arch>>::Type>
type Mask: SIMDMask<Arch = Self::Arch> + From<<Self::ConstLanes as BitMaskType<Self::Arch>>::Type> + Into<<Self::ConstLanes as BitMaskType<Self::Arch>>::Type>
The expanded logical mask representation. This may-or-may-not actually be a bitmask, but should be easily convertible to and from a bitmask.
Required Methods§
Sourcefn arch(self) -> Self::Arch
fn arch(self) -> Self::Arch
Return the architecture object associated with this vector.
§NOTE
This is safe because construction of self serves as the witness that we are on
a compatible architecture.
Sourcefn default(arch: Self::Arch) -> Self
fn default(arch: Self::Arch) -> Self
Return the default value for the type. This is always the numeric 0 for the associated scalar type.
Sourcefn to_underlying(self) -> Self::Underlying
fn to_underlying(self) -> Self::Underlying
Return the underlying type.
Sourcefn from_underlying(arch: Self::Arch, repr: Self::Underlying) -> Self
fn from_underlying(arch: Self::Arch, repr: Self::Underlying) -> Self
Construct from the underlying type.
Sourcefn to_array(self) -> <Self::ConstLanes as ArrayType<Self::Scalar>>::Type
fn to_array(self) -> <Self::ConstLanes as ArrayType<Self::Scalar>>::Type
Retrieve the contents as an array.
Sourcefn from_array(
arch: Self::Arch,
x: <Self::ConstLanes as ArrayType<Self::Scalar>>::Type,
) -> Self
fn from_array( arch: Self::Arch, x: <Self::ConstLanes as ArrayType<Self::Scalar>>::Type, ) -> Self
Construct from the associated array.
The argument arch provides a “proof of compatibility” as A can only be safely
instantiated when all the requirements for the architecture are met.
Sourcefn splat(arch: Self::Arch, value: Self::Scalar) -> Self
fn splat(arch: Self::Arch, value: Self::Scalar) -> Self
Broadcast the provided scalar across all lanes.
The argument arch provides a “proof of compatibility” as A can only be safely
instantiated when all the requirements for the architecture are met.
Sourceunsafe fn load_simd(
arch: Self::Arch,
ptr: *const <Self as SIMDVector>::Scalar,
) -> Self
unsafe fn load_simd( arch: Self::Arch, ptr: *const <Self as SIMDVector>::Scalar, ) -> Self
Load <Self as SIMDVector>::LANES number of elements starting at the provided
pointer.
The alignment of ptr must be the same as <Self as SIMDVector>::Scalar, but does
not need to be stricter.
§Safety
A contiguous read of <Self as SIMDVector>::LANES must touch valid memory.
Sourceunsafe fn load_simd_masked_logical(
arch: Self::Arch,
ptr: *const <Self as SIMDVector>::Scalar,
mask: <Self as SIMDVector>::Mask,
) -> Self
unsafe fn load_simd_masked_logical( arch: Self::Arch, ptr: *const <Self as SIMDVector>::Scalar, mask: <Self as SIMDVector>::Mask, ) -> Self
Load <Self as SIMDVector>::LANES number of elements starting at the provided
pointer.
The alignment of ptr must be the same as <Self as SIMDVector>::Scalar, but does
not need to be stricter.
Entries in the mask that evaluate to false will not be accessed.
This makes it safe to use this function with lanes masked out that would otherwise
cross a page boundary or otherwise cause an out-of-bounds read.
§Safety
Offsets from the ptr where the mask evaluates to true must be dereferenceable to
the underlying scalar type.
Sourceunsafe fn store_simd(self, ptr: *mut <Self as SIMDVector>::Scalar)
unsafe fn store_simd(self, ptr: *mut <Self as SIMDVector>::Scalar)
Store <Self as SIMDVector>::LANES number of elements contiguously starting at the
provided pointer.
The alignment of ptr must be the same as <Self as SIMDVector>::Scalar, but does
not need to be stricter.
§Safety
The pointed-to memory must adhere to Rust’s exclusive reference rules.
A contiguous store of <Self as SIMDVector>::LANES must touch valid memory.
Sourceunsafe fn store_simd_masked_logical(
self,
ptr: *mut <Self as SIMDVector>::Scalar,
mask: <Self as SIMDVector>::Mask,
)
unsafe fn store_simd_masked_logical( self, ptr: *mut <Self as SIMDVector>::Scalar, mask: <Self as SIMDVector>::Mask, )
Store <Self as SIMDVector>::LANES number of elements starting at the provided
pointer.
The alignment of ptr must be the same as <Self as SIMDVector>::Scalar, but does
not need to be stricter.
Entries in the mask that evaluate to false will not be accessed.
This makes it safe to use this function with lanes masked out that would otherwise
cross a page boundary or otherwise cause an out-of-bounds write.
§Safety
The pointed-to memory must adhere to Rust’s exclusive reference rules.
Offsets from the ptr where the mask evaluates to true must be mutably
dereferenceable to the underlying scalar type.
Provided Methods§
Sourceunsafe fn load_simd_masked(
arch: Self::Arch,
ptr: *const <Self as SIMDVector>::Scalar,
mask: <<Self as SIMDVector>::ConstLanes as BitMaskType<Self::Arch>>::Type,
) -> Self
unsafe fn load_simd_masked( arch: Self::Arch, ptr: *const <Self as SIMDVector>::Scalar, mask: <<Self as SIMDVector>::ConstLanes as BitMaskType<Self::Arch>>::Type, ) -> Self
The same as load_simd_masked_logical but taking a BitMask instead.
No load attempt will be made to lanes that are masked out.
§Safety
Offsets from the ptr where the mask evaluates to true must be dereferenceable to
the underlying scalar type. For implementations using the provided default, the
conversion from the bitmask to the actual mask must be correct.
Sourceunsafe fn load_simd_first(
arch: Self::Arch,
ptr: *const <Self as SIMDVector>::Scalar,
first: usize,
) -> Self
unsafe fn load_simd_first( arch: Self::Arch, ptr: *const <Self as SIMDVector>::Scalar, first: usize, ) -> Self
The same as load_simd_masked_logical, but potentially specialized for situations
where it is known that some number of first elements will be accessed.
If first is greater than or equal to the number of lanes, then all lanes will be
loaded.
§Safety
A contiguous read of first.min(<Self as SIMDVector>::LANES) must be valid.
Sourceunsafe fn store_simd_masked(
self,
ptr: *mut <Self as SIMDVector>::Scalar,
mask: <<Self as SIMDVector>::ConstLanes as BitMaskType<Self::Arch>>::Type,
)
unsafe fn store_simd_masked( self, ptr: *mut <Self as SIMDVector>::Scalar, mask: <<Self as SIMDVector>::ConstLanes as BitMaskType<Self::Arch>>::Type, )
The same as store_simd_masked_logical but taking a BitMask instead.
No store attempt will be made to lanes that are masked out.
§Safety
The pointed-to memory must adhere to Rust’s exclusive reference rules.
Offsets from the ptr where the mask evaluates to true must be mutably
dereferenceable to the underlying scalar type.
For implementations using the provided default, the conversion from the bitmask to the actual mask must be correct.
Sourceunsafe fn store_simd_first(
self,
ptr: *mut <Self as SIMDVector>::Scalar,
first: usize,
)
unsafe fn store_simd_first( self, ptr: *mut <Self as SIMDVector>::Scalar, first: usize, )
The same as store_simd_masked_logical, but potentially specialized for situations
where it is known that some number of first elements will be accessed.
If first is greater than or equal to the number of lanes, then all lanes will be
written.
§Safety
The pointed-to memory must adhere to Rust’s exclusive reference rules.
A contiguous write of first.min(<Self as SIMDVector>::LANES) must be valid.
Dyn Compatibility§
This trait is not dyn compatible.
In older versions of Rust, dyn compatibility was called "object safety", so this trait is not object safe.