Skip to main content

SIMDVector

Trait SIMDVector 

Source
pub trait SIMDVector: Copy + Debug {
    type Arch: Sealed;
    type Scalar: Copy + Debug;
    type Underlying: Copy;
    type ConstLanes: ArrayType<Self::Scalar> + BitMaskType<Self::Arch>;
    type Mask: SIMDMask<Arch = Self::Arch> + From<<Self::ConstLanes as BitMaskType<Self::Arch>>::Type> + Into<<Self::ConstLanes as BitMaskType<Self::Arch>>::Type>;

    const LANES: usize;
    const EMULATED: bool;
Show 17 methods // Required methods fn arch(self) -> Self::Arch; fn default(arch: Self::Arch) -> Self; fn to_underlying(self) -> Self::Underlying; fn from_underlying(arch: Self::Arch, repr: Self::Underlying) -> Self; fn to_array(self) -> <Self::ConstLanes as ArrayType<Self::Scalar>>::Type; fn from_array( arch: Self::Arch, x: <Self::ConstLanes as ArrayType<Self::Scalar>>::Type, ) -> Self; fn splat(arch: Self::Arch, value: Self::Scalar) -> Self; unsafe fn load_simd( arch: Self::Arch, ptr: *const <Self as SIMDVector>::Scalar, ) -> Self; unsafe fn load_simd_masked_logical( arch: Self::Arch, ptr: *const <Self as SIMDVector>::Scalar, mask: <Self as SIMDVector>::Mask, ) -> Self; unsafe fn store_simd(self, ptr: *mut <Self as SIMDVector>::Scalar); unsafe fn store_simd_masked_logical( self, ptr: *mut <Self as SIMDVector>::Scalar, mask: <Self as SIMDVector>::Mask, ); // Provided methods fn num_lanes() -> usize { ... } unsafe fn load_simd_masked( arch: Self::Arch, ptr: *const <Self as SIMDVector>::Scalar, mask: <<Self as SIMDVector>::ConstLanes as BitMaskType<Self::Arch>>::Type, ) -> Self { ... } unsafe fn load_simd_first( arch: Self::Arch, ptr: *const <Self as SIMDVector>::Scalar, first: usize, ) -> Self { ... } unsafe fn store_simd_masked( self, ptr: *mut <Self as SIMDVector>::Scalar, mask: <<Self as SIMDVector>::ConstLanes as BitMaskType<Self::Arch>>::Type, ) { ... } unsafe fn store_simd_first( self, ptr: *mut <Self as SIMDVector>::Scalar, first: usize, ) { ... } fn cast<T>(self) -> <Self as SIMDCast<T>>::Cast where Self: SIMDCast<T> { ... }
}
Expand description

A trait representing minimal behavior for a SIMD-like vector.

A SIMDVector can be thought of as a homogeneous array [T; N] (with potentially stricter alignment requirements) that generally behave for arithmetic purposes like scalars in the sense that if

fn add(a: V, b: V) -> V
where V: SIMDVector {
    a + b
}

will have the same semantics of broadcasting the + operation across all lanes in the vector.

Required Associated Constants§

Source

const LANES: usize

The number of lanes in the vector.

Source

const EMULATED: bool

Whether or not this is an emulated vector.

Emulated vectors are backed by Rust arrays and use scalar loops to implement arithmetic operations.

Required Associated Types§

Source

type Arch: Sealed

The architecture this vector belongs to.

Source

type Scalar: Copy + Debug

The type of each element in the vector.

Source

type Underlying: Copy

The underlying representation.

Source

type ConstLanes: ArrayType<Self::Scalar> + BitMaskType<Self::Arch>

The value of LANES but in the type domain so we can use it to constrain other aspects of this trait.

Should be the type Const<Self::LANES>.

Source

type Mask: SIMDMask<Arch = Self::Arch> + From<<Self::ConstLanes as BitMaskType<Self::Arch>>::Type> + Into<<Self::ConstLanes as BitMaskType<Self::Arch>>::Type>

The expanded logical mask representation. This may-or-may-not actually be a bitmask, but should be easily convertible to and from a bitmask.

Required Methods§

Source

fn arch(self) -> Self::Arch

Return the architecture object associated with this vector.

§NOTE

This is safe because construction of self serves as the witness that we are on a compatible architecture.

Source

fn default(arch: Self::Arch) -> Self

Return the default value for the type. This is always the numeric 0 for the associated scalar type.

Source

fn to_underlying(self) -> Self::Underlying

Return the underlying type.

Source

fn from_underlying(arch: Self::Arch, repr: Self::Underlying) -> Self

Construct from the underlying type.

Source

fn to_array(self) -> <Self::ConstLanes as ArrayType<Self::Scalar>>::Type

Retrieve the contents as an array.

Source

fn from_array( arch: Self::Arch, x: <Self::ConstLanes as ArrayType<Self::Scalar>>::Type, ) -> Self

Construct from the associated array.

The argument arch provides a “proof of compatibility” as A can only be safely instantiated when all the requirements for the architecture are met.

Source

fn splat(arch: Self::Arch, value: Self::Scalar) -> Self

Broadcast the provided scalar across all lanes.

The argument arch provides a “proof of compatibility” as A can only be safely instantiated when all the requirements for the architecture are met.

Source

unsafe fn load_simd( arch: Self::Arch, ptr: *const <Self as SIMDVector>::Scalar, ) -> Self

Load <Self as SIMDVector>::LANES number of elements starting at the provided pointer.

The alignment of ptr must be the same as <Self as SIMDVector>::Scalar, but does not need to be stricter.

§Safety

A contiguous read of <Self as SIMDVector>::LANES must touch valid memory.

Source

unsafe fn load_simd_masked_logical( arch: Self::Arch, ptr: *const <Self as SIMDVector>::Scalar, mask: <Self as SIMDVector>::Mask, ) -> Self

Load <Self as SIMDVector>::LANES number of elements starting at the provided pointer.

The alignment of ptr must be the same as <Self as SIMDVector>::Scalar, but does not need to be stricter.

Entries in the mask that evaluate to false will not be accessed. This makes it safe to use this function with lanes masked out that would otherwise cross a page boundary or otherwise cause an out-of-bounds read.

§Safety

Offsets from the ptr where the mask evaluates to true must be dereferenceable to the underlying scalar type.

Source

unsafe fn store_simd(self, ptr: *mut <Self as SIMDVector>::Scalar)

Store <Self as SIMDVector>::LANES number of elements contiguously starting at the provided pointer.

The alignment of ptr must be the same as <Self as SIMDVector>::Scalar, but does not need to be stricter.

§Safety

The pointed-to memory must adhere to Rust’s exclusive reference rules.

A contiguous store of <Self as SIMDVector>::LANES must touch valid memory.

Source

unsafe fn store_simd_masked_logical( self, ptr: *mut <Self as SIMDVector>::Scalar, mask: <Self as SIMDVector>::Mask, )

Store <Self as SIMDVector>::LANES number of elements starting at the provided pointer.

The alignment of ptr must be the same as <Self as SIMDVector>::Scalar, but does not need to be stricter.

Entries in the mask that evaluate to false will not be accessed. This makes it safe to use this function with lanes masked out that would otherwise cross a page boundary or otherwise cause an out-of-bounds write.

§Safety

The pointed-to memory must adhere to Rust’s exclusive reference rules.

Offsets from the ptr where the mask evaluates to true must be mutably dereferenceable to the underlying scalar type.

Provided Methods§

Source

fn num_lanes() -> usize

Return the number of lanes in this vector.

Source

unsafe fn load_simd_masked( arch: Self::Arch, ptr: *const <Self as SIMDVector>::Scalar, mask: <<Self as SIMDVector>::ConstLanes as BitMaskType<Self::Arch>>::Type, ) -> Self

The same as load_simd_masked_logical but taking a BitMask instead.

No load attempt will be made to lanes that are masked out.

§Safety

Offsets from the ptr where the mask evaluates to true must be dereferenceable to the underlying scalar type. For implementations using the provided default, the conversion from the bitmask to the actual mask must be correct.

Source

unsafe fn load_simd_first( arch: Self::Arch, ptr: *const <Self as SIMDVector>::Scalar, first: usize, ) -> Self

The same as load_simd_masked_logical, but potentially specialized for situations where it is known that some number of first elements will be accessed.

If first is greater than or equal to the number of lanes, then all lanes will be loaded.

§Safety

A contiguous read of first.min(<Self as SIMDVector>::LANES) must be valid.

Source

unsafe fn store_simd_masked( self, ptr: *mut <Self as SIMDVector>::Scalar, mask: <<Self as SIMDVector>::ConstLanes as BitMaskType<Self::Arch>>::Type, )

The same as store_simd_masked_logical but taking a BitMask instead.

No store attempt will be made to lanes that are masked out.

§Safety

The pointed-to memory must adhere to Rust’s exclusive reference rules.

Offsets from the ptr where the mask evaluates to true must be mutably dereferenceable to the underlying scalar type.

For implementations using the provided default, the conversion from the bitmask to the actual mask must be correct.

Source

unsafe fn store_simd_first( self, ptr: *mut <Self as SIMDVector>::Scalar, first: usize, )

The same as store_simd_masked_logical, but potentially specialized for situations where it is known that some number of first elements will be accessed.

If first is greater than or equal to the number of lanes, then all lanes will be written.

§Safety

The pointed-to memory must adhere to Rust’s exclusive reference rules.

A contiguous write of first.min(<Self as SIMDVector>::LANES) must be valid.

Source

fn cast<T>(self) -> <Self as SIMDCast<T>>::Cast
where Self: SIMDCast<T>,

Perform a numeric cast on each element, returning a new SIMD vector.

See also: SIMDCast.

Dyn Compatibility§

This trait is not dyn compatible.

In older versions of Rust, dyn compatibility was called "object safety", so this trait is not object safe.

Implementors§

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v3::f16x8_::f16x8

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v3::f16x16_::f16x16

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v3::f32x4_::f32x4

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v3::f32x8_::f32x8

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v3::i8x16_::i8x16

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v3::i8x32_::i8x32

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v3::i16x8_::i16x8

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v3::i16x16_::i16x16

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v3::i32x4_::i32x4

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v3::i32x8_::i32x8

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v3::u8x16_::u8x16

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v3::u8x32_::u8x32

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v3::u32x4_::u32x4

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v3::u32x8_::u32x8

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v3::u64x2_::u64x2

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v3::u64x4_::u64x4

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v4::f16x8_::f16x8

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v4::f16x16_::f16x16

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v4::f32x4_::f32x4

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v4::f32x8_::f32x8

Source§

impl SIMDVector for f32x16

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v4::i8x16_::i8x16

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v4::i8x32_::i8x32

Source§

impl SIMDVector for i8x64

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v4::i16x8_::i16x8

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v4::i16x16_::i16x16

Source§

impl SIMDVector for i16x32

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v4::i32x4_::i32x4

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v4::i32x8_::i32x8

Source§

impl SIMDVector for i32x16

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v4::u8x16_::u8x16

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v4::u8x32_::u8x32

Source§

impl SIMDVector for u8x64

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v4::u32x4_::u32x4

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v4::u32x8_::u32x8

Source§

impl SIMDVector for u32x16

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v4::u64x2_::u64x2

Source§

impl SIMDVector for diskann_wide::arch::x86_64::v4::u64x4_::u64x4

Source§

impl<T, const N: usize, A> SIMDVector for Emulated<T, N, A>
where T: Copy + Debug + Default, Const<N>: ArrayType<T, Type = [T; N]>, BitMask<N, A>: SIMDMask<Arch = A>, A: Sealed,