[][src]Struct packed_simd_2::Simd

#[repr(transparent)]pub struct Simd<A: SimdArray>(_);

Packed SIMD vector type.

Examples

let v = Simd::<[i32; 4]>::new(0, 1, 2, 3);
assert_eq!(v.extract(2), 2);

Implementations

impl Simd<[i8; 2]>[src]

pub const fn new(x0: i8, x1: i8) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: i8) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> i8[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> i8[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[i8; 2]>[src]

pub fn rotate_left(self, n: i8x2) -> i8x2[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: i8x2) -> i8x2[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[i8; 2]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[i8; 2]>[src]

pub fn wrapping_sum(self) -> i8[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> i8[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[i8; 2]>[src]

pub fn max_element(self) -> i8[src]

Largest vector element value.

pub fn min_element(self) -> i8[src]

Smallest vector element value.

impl Simd<[i8; 2]>[src]

pub fn and(self) -> i8[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> i8[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> i8[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[i8; 2]>[src]

pub fn from_slice_aligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i8; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i8; 2]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[i8; 2]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[i8; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[i8; 2]>[src]

pub fn eq(self, other: Self) -> m8x2[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m8x2[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m8x2[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m8x2[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m8x2[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m8x2[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[i8; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i8x2>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[i8; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i8x2>[src]

Returns a wrapper that implements Ord.

impl Simd<[i8; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[u8; 2]>[src]

pub const fn new(x0: u8, x1: u8) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: u8) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> u8[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> u8[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[u8; 2]>[src]

pub fn rotate_left(self, n: u8x2) -> u8x2[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: u8x2) -> u8x2[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[u8; 2]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[u8; 2]>[src]

pub fn wrapping_sum(self) -> u8[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> u8[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[u8; 2]>[src]

pub fn max_element(self) -> u8[src]

Largest vector element value.

pub fn min_element(self) -> u8[src]

Smallest vector element value.

impl Simd<[u8; 2]>[src]

pub fn and(self) -> u8[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> u8[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> u8[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[u8; 2]>[src]

pub fn from_slice_aligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u8; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u8; 2]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[u8; 2]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[u8; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[u8; 2]>[src]

pub fn eq(self, other: Self) -> m8x2[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m8x2[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m8x2[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m8x2[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m8x2[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m8x2[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[u8; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u8x2>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[u8; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u8x2>[src]

Returns a wrapper that implements Ord.

impl Simd<[u8; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[m8; 2]>[src]

pub const fn new(x0: bool, x1: bool) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[m8; 2]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[m8; 2]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

pub fn any(self) -> bool[src]

Is any vector lane true?

pub fn none(self) -> bool[src]

Are all vector lanes false?

impl Simd<[m8; 2]>[src]

pub fn eq(self, other: Self) -> m8x2[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m8x2[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m8x2[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m8x2[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m8x2[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m8x2[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[m8; 2]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m8; 2] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

The lanes of the result for which the mask is true contain the values of a. The remaining lanes contain the values of b.

impl Simd<[m8; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m8x2>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[m8; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m8x2>[src]

Returns a wrapper that implements Ord.

impl Simd<[m8; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[m8; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[i8; 4]>[src]

pub const fn new(x0: i8, x1: i8, x2: i8, x3: i8) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: i8) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> i8[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> i8[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[i8; 4]>[src]

pub fn rotate_left(self, n: i8x4) -> i8x4[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: i8x4) -> i8x4[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[i8; 4]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[i8; 4]>[src]

pub fn wrapping_sum(self) -> i8[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> i8[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[i8; 4]>[src]

pub fn max_element(self) -> i8[src]

Largest vector element value.

pub fn min_element(self) -> i8[src]

Smallest vector element value.

impl Simd<[i8; 4]>[src]

pub fn and(self) -> i8[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> i8[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> i8[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[i8; 4]>[src]

pub fn from_slice_aligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i8; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i8; 4]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[i8; 4]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[i8; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[i8; 4]>[src]

pub fn eq(self, other: Self) -> m8x4[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m8x4[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m8x4[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m8x4[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m8x4[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m8x4[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[i8; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i8x4>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[i8; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i8x4>[src]

Returns a wrapper that implements Ord.

impl Simd<[i8; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[u8; 4]>[src]

pub const fn new(x0: u8, x1: u8, x2: u8, x3: u8) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: u8) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> u8[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> u8[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[u8; 4]>[src]

pub fn rotate_left(self, n: u8x4) -> u8x4[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: u8x4) -> u8x4[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[u8; 4]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[u8; 4]>[src]

pub fn wrapping_sum(self) -> u8[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> u8[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[u8; 4]>[src]

pub fn max_element(self) -> u8[src]

Largest vector element value.

pub fn min_element(self) -> u8[src]

Smallest vector element value.

impl Simd<[u8; 4]>[src]

pub fn and(self) -> u8[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> u8[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> u8[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[u8; 4]>[src]

pub fn from_slice_aligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u8; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u8; 4]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[u8; 4]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[u8; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[u8; 4]>[src]

pub fn eq(self, other: Self) -> m8x4[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m8x4[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m8x4[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m8x4[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m8x4[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m8x4[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[u8; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u8x4>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[u8; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u8x4>[src]

Returns a wrapper that implements Ord.

impl Simd<[u8; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[m8; 4]>[src]

pub const fn new(x0: bool, x1: bool, x2: bool, x3: bool) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[m8; 4]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[m8; 4]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

pub fn any(self) -> bool[src]

Is any vector lane true?

pub fn none(self) -> bool[src]

Are all vector lanes false?

impl Simd<[m8; 4]>[src]

pub fn eq(self, other: Self) -> m8x4[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m8x4[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m8x4[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m8x4[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m8x4[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m8x4[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[m8; 4]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m8; 4] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

The lanes of the result for which the mask is true contain the values of a. The remaining lanes contain the values of b.

impl Simd<[m8; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m8x4>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[m8; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m8x4>[src]

Returns a wrapper that implements Ord.

impl Simd<[m8; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[m8; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[i16; 2]>[src]

pub const fn new(x0: i16, x1: i16) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: i16) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> i16[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> i16[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[i16; 2]>[src]

pub fn rotate_left(self, n: i16x2) -> i16x2[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: i16x2) -> i16x2[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[i16; 2]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[i16; 2]>[src]

pub fn wrapping_sum(self) -> i16[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> i16[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[i16; 2]>[src]

pub fn max_element(self) -> i16[src]

Largest vector element value.

pub fn min_element(self) -> i16[src]

Smallest vector element value.

impl Simd<[i16; 2]>[src]

pub fn and(self) -> i16[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> i16[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> i16[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[i16; 2]>[src]

pub fn from_slice_aligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i16; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i16; 2]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[i16; 2]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[i16; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[i16; 2]>[src]

pub fn eq(self, other: Self) -> m16x2[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m16x2[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m16x2[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m16x2[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m16x2[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m16x2[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[i16; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i16x2>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[i16; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i16x2>[src]

Returns a wrapper that implements Ord.

impl Simd<[i16; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[u16; 2]>[src]

pub const fn new(x0: u16, x1: u16) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: u16) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> u16[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> u16[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[u16; 2]>[src]

pub fn rotate_left(self, n: u16x2) -> u16x2[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: u16x2) -> u16x2[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[u16; 2]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[u16; 2]>[src]

pub fn wrapping_sum(self) -> u16[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> u16[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[u16; 2]>[src]

pub fn max_element(self) -> u16[src]

Largest vector element value.

pub fn min_element(self) -> u16[src]

Smallest vector element value.

impl Simd<[u16; 2]>[src]

pub fn and(self) -> u16[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> u16[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> u16[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[u16; 2]>[src]

pub fn from_slice_aligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u16; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u16; 2]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[u16; 2]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[u16; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[u16; 2]>[src]

pub fn eq(self, other: Self) -> m16x2[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m16x2[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m16x2[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m16x2[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m16x2[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m16x2[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[u16; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u16x2>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[u16; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u16x2>[src]

Returns a wrapper that implements Ord.

impl Simd<[u16; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[m16; 2]>[src]

pub const fn new(x0: bool, x1: bool) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[m16; 2]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[m16; 2]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

pub fn any(self) -> bool[src]

Is any vector lane true?

pub fn none(self) -> bool[src]

Are all vector lanes false?

impl Simd<[m16; 2]>[src]

pub fn eq(self, other: Self) -> m16x2[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m16x2[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m16x2[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m16x2[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m16x2[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m16x2[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[m16; 2]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m16; 2] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

The lanes of the result for which the mask is true contain the values of a. The remaining lanes contain the values of b.

impl Simd<[m16; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m16x2>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[m16; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m16x2>[src]

Returns a wrapper that implements Ord.

impl Simd<[m16; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[m16; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[i8; 8]>[src]

pub const fn new(
    x0: i8,
    x1: i8,
    x2: i8,
    x3: i8,
    x4: i8,
    x5: i8,
    x6: i8,
    x7: i8
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: i8) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> i8[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> i8[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[i8; 8]>[src]

pub fn rotate_left(self, n: i8x8) -> i8x8[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: i8x8) -> i8x8[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[i8; 8]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[i8; 8]>[src]

pub fn wrapping_sum(self) -> i8[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> i8[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[i8; 8]>[src]

pub fn max_element(self) -> i8[src]

Largest vector element value.

pub fn min_element(self) -> i8[src]

Smallest vector element value.

impl Simd<[i8; 8]>[src]

pub fn and(self) -> i8[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> i8[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> i8[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[i8; 8]>[src]

pub fn from_slice_aligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i8; 8]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i8; 8]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[i8; 8]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[i8; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[i8; 8]>[src]

pub fn eq(self, other: Self) -> m8x8[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m8x8[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m8x8[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m8x8[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m8x8[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m8x8[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[i8; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i8x8>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[i8; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i8x8>[src]

Returns a wrapper that implements Ord.

impl Simd<[i8; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[u8; 8]>[src]

pub const fn new(
    x0: u8,
    x1: u8,
    x2: u8,
    x3: u8,
    x4: u8,
    x5: u8,
    x6: u8,
    x7: u8
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: u8) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> u8[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> u8[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[u8; 8]>[src]

pub fn rotate_left(self, n: u8x8) -> u8x8[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: u8x8) -> u8x8[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[u8; 8]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[u8; 8]>[src]

pub fn wrapping_sum(self) -> u8[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> u8[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[u8; 8]>[src]

pub fn max_element(self) -> u8[src]

Largest vector element value.

pub fn min_element(self) -> u8[src]

Smallest vector element value.

impl Simd<[u8; 8]>[src]

pub fn and(self) -> u8[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> u8[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> u8[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[u8; 8]>[src]

pub fn from_slice_aligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u8; 8]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u8; 8]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[u8; 8]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[u8; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[u8; 8]>[src]

pub fn eq(self, other: Self) -> m8x8[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m8x8[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m8x8[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m8x8[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m8x8[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m8x8[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[u8; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u8x8>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[u8; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u8x8>[src]

Returns a wrapper that implements Ord.

impl Simd<[u8; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[m8; 8]>[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[m8; 8]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[m8; 8]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

pub fn any(self) -> bool[src]

Is any vector lane true?

pub fn none(self) -> bool[src]

Are all vector lanes false?

impl Simd<[m8; 8]>[src]

pub fn eq(self, other: Self) -> m8x8[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m8x8[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m8x8[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m8x8[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m8x8[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m8x8[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[m8; 8]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m8; 8] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

The lanes of the result for which the mask is true contain the values of a. The remaining lanes contain the values of b.

impl Simd<[m8; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m8x8>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[m8; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m8x8>[src]

Returns a wrapper that implements Ord.

impl Simd<[m8; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[m8; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[i16; 4]>[src]

pub const fn new(x0: i16, x1: i16, x2: i16, x3: i16) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: i16) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> i16[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> i16[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[i16; 4]>[src]

pub fn rotate_left(self, n: i16x4) -> i16x4[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: i16x4) -> i16x4[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[i16; 4]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[i16; 4]>[src]

pub fn wrapping_sum(self) -> i16[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> i16[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[i16; 4]>[src]

pub fn max_element(self) -> i16[src]

Largest vector element value.

pub fn min_element(self) -> i16[src]

Smallest vector element value.

impl Simd<[i16; 4]>[src]

pub fn and(self) -> i16[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> i16[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> i16[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[i16; 4]>[src]

pub fn from_slice_aligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i16; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i16; 4]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[i16; 4]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[i16; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[i16; 4]>[src]

pub fn eq(self, other: Self) -> m16x4[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m16x4[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m16x4[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m16x4[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m16x4[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m16x4[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[i16; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i16x4>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[i16; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i16x4>[src]

Returns a wrapper that implements Ord.

impl Simd<[i16; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[u16; 4]>[src]

pub const fn new(x0: u16, x1: u16, x2: u16, x3: u16) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: u16) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> u16[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> u16[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[u16; 4]>[src]

pub fn rotate_left(self, n: u16x4) -> u16x4[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: u16x4) -> u16x4[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[u16; 4]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[u16; 4]>[src]

pub fn wrapping_sum(self) -> u16[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> u16[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[u16; 4]>[src]

pub fn max_element(self) -> u16[src]

Largest vector element value.

pub fn min_element(self) -> u16[src]

Smallest vector element value.

impl Simd<[u16; 4]>[src]

pub fn and(self) -> u16[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> u16[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> u16[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[u16; 4]>[src]

pub fn from_slice_aligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u16; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u16; 4]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[u16; 4]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[u16; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[u16; 4]>[src]

pub fn eq(self, other: Self) -> m16x4[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m16x4[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m16x4[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m16x4[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m16x4[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m16x4[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[u16; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u16x4>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[u16; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u16x4>[src]

Returns a wrapper that implements Ord.

impl Simd<[u16; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[m16; 4]>[src]

pub const fn new(x0: bool, x1: bool, x2: bool, x3: bool) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[m16; 4]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[m16; 4]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

pub fn any(self) -> bool[src]

Is any vector lane true?

pub fn none(self) -> bool[src]

Are all vector lanes false?

impl Simd<[m16; 4]>[src]

pub fn eq(self, other: Self) -> m16x4[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m16x4[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m16x4[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m16x4[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m16x4[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m16x4[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[m16; 4]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m16; 4] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

The lanes of the result for which the mask is true contain the values of a. The remaining lanes contain the values of b.

impl Simd<[m16; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m16x4>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[m16; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m16x4>[src]

Returns a wrapper that implements Ord.

impl Simd<[m16; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[m16; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[i32; 2]>[src]

pub const fn new(x0: i32, x1: i32) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: i32) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> i32[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> i32[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[i32; 2]>[src]

pub fn rotate_left(self, n: i32x2) -> i32x2[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: i32x2) -> i32x2[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[i32; 2]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[i32; 2]>[src]

pub fn wrapping_sum(self) -> i32[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> i32[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[i32; 2]>[src]

pub fn max_element(self) -> i32[src]

Largest vector element value.

pub fn min_element(self) -> i32[src]

Smallest vector element value.

impl Simd<[i32; 2]>[src]

pub fn and(self) -> i32[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> i32[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> i32[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[i32; 2]>[src]

pub fn from_slice_aligned(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i32; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i32; 2]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[i32; 2]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[i32; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[i32; 2]>[src]

pub fn eq(self, other: Self) -> m32x2[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m32x2[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m32x2[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m32x2[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m32x2[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m32x2[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[i32; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i32x2>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[i32; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i32x2>[src]

Returns a wrapper that implements Ord.

impl Simd<[i32; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[u32; 2]>[src]

pub const fn new(x0: u32, x1: u32) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: u32) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> u32[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> u32[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[u32; 2]>[src]

pub fn rotate_left(self, n: u32x2) -> u32x2[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: u32x2) -> u32x2[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[u32; 2]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[u32; 2]>[src]

pub fn wrapping_sum(self) -> u32[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> u32[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[u32; 2]>[src]

pub fn max_element(self) -> u32[src]

Largest vector element value.

pub fn min_element(self) -> u32[src]

Smallest vector element value.

impl Simd<[u32; 2]>[src]

pub fn and(self) -> u32[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> u32[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> u32[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[u32; 2]>[src]

pub fn from_slice_aligned(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u32; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u32; 2]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[u32; 2]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[u32; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[u32; 2]>[src]

pub fn eq(self, other: Self) -> m32x2[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m32x2[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m32x2[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m32x2[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m32x2[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m32x2[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[u32; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u32x2>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[u32; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u32x2>[src]

Returns a wrapper that implements Ord.

impl Simd<[u32; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[m32; 2]>[src]

pub const fn new(x0: bool, x1: bool) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[m32; 2]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[m32; 2]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

pub fn any(self) -> bool[src]

Is any vector lane true?

pub fn none(self) -> bool[src]

Are all vector lanes false?

impl Simd<[m32; 2]>[src]

pub fn eq(self, other: Self) -> m32x2[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m32x2[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m32x2[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m32x2[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m32x2[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m32x2[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[m32; 2]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m32; 2] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

The lanes of the result for which the mask is true contain the values of a. The remaining lanes contain the values of b.

impl Simd<[m32; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m32x2>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[m32; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m32x2>[src]

Returns a wrapper that implements Ord.

impl Simd<[m32; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[m32; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[f32; 2]>[src]

pub const fn new(x0: f32, x1: f32) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: f32) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> f32[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> f32[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: f32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: f32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[f32; 2]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[f32; 2]>[src]

pub fn sum(self) -> f32[src]

Horizontal sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If one of the vector element is NaN the reduction returns NaN. The resulting NaN is not required to be equal to any of the NaNs in the vector.

pub fn product(self) -> f32[src]

Horizontal product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If one of the vector element is NaN the reduction returns NaN. The resulting NaN is not required to be equal to any of the NaNs in the vector.

impl Simd<[f32; 2]>[src]

pub fn max_element(self) -> f32[src]

Largest vector element value.

pub fn min_element(self) -> f32[src]

Smallest vector element value.

impl Simd<[f32; 2]>[src]

pub fn from_slice_aligned(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[f32; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[f32; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[f32; 2]>[src]

pub const EPSILON: f32x2[src]

Machine epsilon value.

pub const MIN: f32x2[src]

Smallest finite value.

pub const MIN_POSITIVE: f32x2[src]

Smallest positive normal value.

pub const MAX: f32x2[src]

Largest finite value.

pub const NAN: f32x2[src]

Not a Number (NaN).

pub const INFINITY: f32x2[src]

Infinity (∞).

pub const NEG_INFINITY: f32x2[src]

Negative infinity (-∞).

pub const PI: f32x2[src]

Archimedes' constant (Ï€)

pub const FRAC_PI_2: f32x2[src]

Ï€/2

pub const FRAC_PI_3: f32x2[src]

Ï€/3

pub const FRAC_PI_4: f32x2[src]

Ï€/4

pub const FRAC_PI_6: f32x2[src]

Ï€/6

pub const FRAC_PI_8: f32x2[src]

Ï€/8

pub const FRAC_1_PI: f32x2[src]

1/Ï€

pub const FRAC_2_PI: f32x2[src]

2/Ï€

pub const FRAC_2_SQRT_PI: f32x2[src]

2/sqrt(Ï€)

pub const SQRT_2: f32x2[src]

sqrt(2)

pub const FRAC_1_SQRT_2: f32x2[src]

1/sqrt(2)

pub const E: f32x2[src]

Euler's number (e)

pub const LOG2_E: f32x2[src]

log2(e)

pub const LOG10_E: f32x2[src]

log10(e)

pub const LN_2: f32x2[src]

ln(2)

pub const LN_10: f32x2[src]

ln(10)

impl Simd<[f32; 2]>[src]

pub fn is_nan(self) -> m32x2[src]

pub fn is_infinite(self) -> m32x2[src]

pub fn is_finite(self) -> m32x2[src]

impl Simd<[f32; 2]>[src]

pub fn abs(self) -> Self[src]

Absolute value.

impl Simd<[f32; 2]>[src]

pub fn cos(self) -> Self[src]

Cosine.

pub fn cos_pi(self) -> Self[src]

Cosine of self * PI.

impl Simd<[f32; 2]>[src]

pub fn exp(self) -> Self[src]

Returns the exponential function of self: e^(self).

impl Simd<[f32; 2]>[src]

pub fn ln(self) -> Self[src]

Returns the natural logarithm of self.

impl Simd<[f32; 2]>[src]

pub fn mul_add(self, y: Self, z: Self) -> Self[src]

Fused multiply add: self * y + z

impl Simd<[f32; 2]>[src]

pub fn mul_adde(self, y: Self, z: Self) -> Self[src]

Fused multiply add estimate: ~= self * y + z

While fused multiply-add (fma) has infinite precision, mul_adde has at worst the same precision of a multiply followed by an add. This might be more efficient on architectures that do not have an fma instruction.

impl Simd<[f32; 2]>[src]

pub fn powf(self, x: Self) -> Self[src]

Raises self number to the floating point power of x.

impl Simd<[f32; 2]>[src]

pub fn recpre(self) -> Self[src]

Reciprocal estimate: ~= 1. / self.

FIXME: The precision of the estimate is currently unspecified.

impl Simd<[f32; 2]>[src]

pub fn rsqrte(self) -> Self[src]

Reciprocal square-root estimate: ~= 1. / self.sqrt().

FIXME: The precision of the estimate is currently unspecified.

impl Simd<[f32; 2]>[src]

pub fn sin(self) -> Self[src]

Sine.

pub fn sin_pi(self) -> Self[src]

Sine of self * PI.

pub fn sin_cos_pi(self) -> (Self, Self)[src]

Sine and cosine of self * PI.

impl Simd<[f32; 2]>[src]

pub fn sqrt(self) -> Self[src]

impl Simd<[f32; 2]>[src]

pub fn sqrte(self) -> Self[src]

Square-root estimate.

FIXME: The precision of the estimate is currently unspecified.

impl Simd<[f32; 2]>[src]

pub fn tanh(self) -> Self[src]

Tanh.

impl Simd<[f32; 2]>[src]

pub fn eq(self, other: Self) -> m32x2[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m32x2[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m32x2[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m32x2[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m32x2[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m32x2[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[i8; 16]>[src]

pub const fn new(
    x0: i8,
    x1: i8,
    x2: i8,
    x3: i8,
    x4: i8,
    x5: i8,
    x6: i8,
    x7: i8,
    x8: i8,
    x9: i8,
    x10: i8,
    x11: i8,
    x12: i8,
    x13: i8,
    x14: i8,
    x15: i8
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: i8) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> i8[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> i8[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[i8; 16]>[src]

pub fn rotate_left(self, n: i8x16) -> i8x16[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: i8x16) -> i8x16[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[i8; 16]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[i8; 16]>[src]

pub fn wrapping_sum(self) -> i8[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> i8[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[i8; 16]>[src]

pub fn max_element(self) -> i8[src]

Largest vector element value.

pub fn min_element(self) -> i8[src]

Smallest vector element value.

impl Simd<[i8; 16]>[src]

pub fn and(self) -> i8[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> i8[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> i8[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[i8; 16]>[src]

pub fn from_slice_aligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i8; 16]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i8; 16]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[i8; 16]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[i8; 16]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[i8; 16]>[src]

pub fn eq(self, other: Self) -> m8x16[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m8x16[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m8x16[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m8x16[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m8x16[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m8x16[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[i8; 16]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i8x16>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[i8; 16]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i8x16>[src]

Returns a wrapper that implements Ord.

impl Simd<[i8; 16]>[src]

pub fn bitmask(self) -> u16[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[u8; 16]>[src]

pub const fn new(
    x0: u8,
    x1: u8,
    x2: u8,
    x3: u8,
    x4: u8,
    x5: u8,
    x6: u8,
    x7: u8,
    x8: u8,
    x9: u8,
    x10: u8,
    x11: u8,
    x12: u8,
    x13: u8,
    x14: u8,
    x15: u8
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: u8) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> u8[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> u8[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[u8; 16]>[src]

pub fn rotate_left(self, n: u8x16) -> u8x16[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: u8x16) -> u8x16[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[u8; 16]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[u8; 16]>[src]

pub fn wrapping_sum(self) -> u8[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> u8[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[u8; 16]>[src]

pub fn max_element(self) -> u8[src]

Largest vector element value.

pub fn min_element(self) -> u8[src]

Smallest vector element value.

impl Simd<[u8; 16]>[src]

pub fn and(self) -> u8[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> u8[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> u8[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[u8; 16]>[src]

pub fn from_slice_aligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u8; 16]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u8; 16]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[u8; 16]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[u8; 16]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[u8; 16]>[src]

pub fn eq(self, other: Self) -> m8x16[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m8x16[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m8x16[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m8x16[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m8x16[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m8x16[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[u8; 16]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u8x16>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[u8; 16]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u8x16>[src]

Returns a wrapper that implements Ord.

impl Simd<[u8; 16]>[src]

pub fn bitmask(self) -> u16[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[m8; 16]>[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool,
    x8: bool,
    x9: bool,
    x10: bool,
    x11: bool,
    x12: bool,
    x13: bool,
    x14: bool,
    x15: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[m8; 16]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[m8; 16]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

pub fn any(self) -> bool[src]

Is any vector lane true?

pub fn none(self) -> bool[src]

Are all vector lanes false?

impl Simd<[m8; 16]>[src]

pub fn eq(self, other: Self) -> m8x16[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m8x16[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m8x16[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m8x16[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m8x16[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m8x16[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[m8; 16]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m8; 16] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

The lanes of the result for which the mask is true contain the values of a. The remaining lanes contain the values of b.

impl Simd<[m8; 16]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m8x16>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[m8; 16]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m8x16>[src]

Returns a wrapper that implements Ord.

impl Simd<[m8; 16]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[m8; 16]>[src]

pub fn bitmask(self) -> u16[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[i16; 8]>[src]

pub const fn new(
    x0: i16,
    x1: i16,
    x2: i16,
    x3: i16,
    x4: i16,
    x5: i16,
    x6: i16,
    x7: i16
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: i16) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> i16[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> i16[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[i16; 8]>[src]

pub fn rotate_left(self, n: i16x8) -> i16x8[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: i16x8) -> i16x8[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[i16; 8]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[i16; 8]>[src]

pub fn wrapping_sum(self) -> i16[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> i16[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[i16; 8]>[src]

pub fn max_element(self) -> i16[src]

Largest vector element value.

pub fn min_element(self) -> i16[src]

Smallest vector element value.

impl Simd<[i16; 8]>[src]

pub fn and(self) -> i16[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> i16[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> i16[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[i16; 8]>[src]

pub fn from_slice_aligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i16; 8]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i16; 8]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[i16; 8]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[i16; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[i16; 8]>[src]

pub fn eq(self, other: Self) -> m16x8[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m16x8[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m16x8[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m16x8[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m16x8[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m16x8[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[i16; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i16x8>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[i16; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i16x8>[src]

Returns a wrapper that implements Ord.

impl Simd<[i16; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[u16; 8]>[src]

pub const fn new(
    x0: u16,
    x1: u16,
    x2: u16,
    x3: u16,
    x4: u16,
    x5: u16,
    x6: u16,
    x7: u16
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: u16) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> u16[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> u16[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[u16; 8]>[src]

pub fn rotate_left(self, n: u16x8) -> u16x8[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: u16x8) -> u16x8[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[u16; 8]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[u16; 8]>[src]

pub fn wrapping_sum(self) -> u16[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> u16[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[u16; 8]>[src]

pub fn max_element(self) -> u16[src]

Largest vector element value.

pub fn min_element(self) -> u16[src]

Smallest vector element value.

impl Simd<[u16; 8]>[src]

pub fn and(self) -> u16[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> u16[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> u16[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[u16; 8]>[src]

pub fn from_slice_aligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u16; 8]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u16; 8]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[u16; 8]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[u16; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[u16; 8]>[src]

pub fn eq(self, other: Self) -> m16x8[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m16x8[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m16x8[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m16x8[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m16x8[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m16x8[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[u16; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u16x8>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[u16; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u16x8>[src]

Returns a wrapper that implements Ord.

impl Simd<[u16; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[m16; 8]>[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[m16; 8]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[m16; 8]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

pub fn any(self) -> bool[src]

Is any vector lane true?

pub fn none(self) -> bool[src]

Are all vector lanes false?

impl Simd<[m16; 8]>[src]

pub fn eq(self, other: Self) -> m16x8[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m16x8[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m16x8[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m16x8[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m16x8[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m16x8[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[m16; 8]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m16; 8] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

The lanes of the result for which the mask is true contain the values of a. The remaining lanes contain the values of b.

impl Simd<[m16; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m16x8>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[m16; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m16x8>[src]

Returns a wrapper that implements Ord.

impl Simd<[m16; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[m16; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[i32; 4]>[src]

pub const fn new(x0: i32, x1: i32, x2: i32, x3: i32) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: i32) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> i32[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> i32[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[i32; 4]>[src]

pub fn rotate_left(self, n: i32x4) -> i32x4[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: i32x4) -> i32x4[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[i32; 4]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[i32; 4]>[src]

pub fn wrapping_sum(self) -> i32[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> i32[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[i32; 4]>[src]

pub fn max_element(self) -> i32[src]

Largest vector element value.

pub fn min_element(self) -> i32[src]

Smallest vector element value.

impl Simd<[i32; 4]>[src]

pub fn and(self) -> i32[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> i32[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> i32[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[i32; 4]>[src]

pub fn from_slice_aligned(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i32; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i32; 4]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[i32; 4]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[i32; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[i32; 4]>[src]

pub fn eq(self, other: Self) -> m32x4[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m32x4[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m32x4[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m32x4[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m32x4[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m32x4[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[i32; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i32x4>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[i32; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i32x4>[src]

Returns a wrapper that implements Ord.

impl Simd<[i32; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[u32; 4]>[src]

pub const fn new(x0: u32, x1: u32, x2: u32, x3: u32) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: u32) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> u32[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> u32[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[u32; 4]>[src]

pub fn rotate_left(self, n: u32x4) -> u32x4[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: u32x4) -> u32x4[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[u32; 4]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[u32; 4]>[src]

pub fn wrapping_sum(self) -> u32[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> u32[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[u32; 4]>[src]

pub fn max_element(self) -> u32[src]

Largest vector element value.

pub fn min_element(self) -> u32[src]

Smallest vector element value.

impl Simd<[u32; 4]>[src]

pub fn and(self) -> u32[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> u32[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> u32[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[u32; 4]>[src]

pub fn from_slice_aligned(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u32; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u32; 4]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[u32; 4]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[u32; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[u32; 4]>[src]

pub fn eq(self, other: Self) -> m32x4[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m32x4[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m32x4[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m32x4[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m32x4[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m32x4[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[u32; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u32x4>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[u32; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u32x4>[src]

Returns a wrapper that implements Ord.

impl Simd<[u32; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[f32; 4]>[src]

pub const fn new(x0: f32, x1: f32, x2: f32, x3: f32) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: f32) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> f32[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> f32[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: f32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: f32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[f32; 4]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[f32; 4]>[src]

pub fn sum(self) -> f32[src]

Horizontal sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If one of the vector element is NaN the reduction returns NaN. The resulting NaN is not required to be equal to any of the NaNs in the vector.

pub fn product(self) -> f32[src]

Horizontal product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If one of the vector element is NaN the reduction returns NaN. The resulting NaN is not required to be equal to any of the NaNs in the vector.

impl Simd<[f32; 4]>[src]

pub fn max_element(self) -> f32[src]

Largest vector element value.

pub fn min_element(self) -> f32[src]

Smallest vector element value.

impl Simd<[f32; 4]>[src]

pub fn from_slice_aligned(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[f32; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[f32; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[f32; 4]>[src]

pub const EPSILON: f32x4[src]

Machine epsilon value.

pub const MIN: f32x4[src]

Smallest finite value.

pub const MIN_POSITIVE: f32x4[src]

Smallest positive normal value.

pub const MAX: f32x4[src]

Largest finite value.

pub const NAN: f32x4[src]

Not a Number (NaN).

pub const INFINITY: f32x4[src]

Infinity (∞).

pub const NEG_INFINITY: f32x4[src]

Negative infinity (-∞).

pub const PI: f32x4[src]

Archimedes' constant (Ï€)

pub const FRAC_PI_2: f32x4[src]

Ï€/2

pub const FRAC_PI_3: f32x4[src]

Ï€/3

pub const FRAC_PI_4: f32x4[src]

Ï€/4

pub const FRAC_PI_6: f32x4[src]

Ï€/6

pub const FRAC_PI_8: f32x4[src]

Ï€/8

pub const FRAC_1_PI: f32x4[src]

1/Ï€

pub const FRAC_2_PI: f32x4[src]

2/Ï€

pub const FRAC_2_SQRT_PI: f32x4[src]

2/sqrt(Ï€)

pub const SQRT_2: f32x4[src]

sqrt(2)

pub const FRAC_1_SQRT_2: f32x4[src]

1/sqrt(2)

pub const E: f32x4[src]

Euler's number (e)

pub const LOG2_E: f32x4[src]

log2(e)

pub const LOG10_E: f32x4[src]

log10(e)

pub const LN_2: f32x4[src]

ln(2)

pub const LN_10: f32x4[src]

ln(10)

impl Simd<[f32; 4]>[src]

pub fn is_nan(self) -> m32x4[src]

pub fn is_infinite(self) -> m32x4[src]

pub fn is_finite(self) -> m32x4[src]

impl Simd<[f32; 4]>[src]

pub fn abs(self) -> Self[src]

Absolute value.

impl Simd<[f32; 4]>[src]

pub fn cos(self) -> Self[src]

Cosine.

pub fn cos_pi(self) -> Self[src]

Cosine of self * PI.

impl Simd<[f32; 4]>[src]

pub fn exp(self) -> Self[src]

Returns the exponential function of self: e^(self).

impl Simd<[f32; 4]>[src]

pub fn ln(self) -> Self[src]

Returns the natural logarithm of self.

impl Simd<[f32; 4]>[src]

pub fn mul_add(self, y: Self, z: Self) -> Self[src]

Fused multiply add: self * y + z

impl Simd<[f32; 4]>[src]

pub fn mul_adde(self, y: Self, z: Self) -> Self[src]

Fused multiply add estimate: ~= self * y + z

While fused multiply-add (fma) has infinite precision, mul_adde has at worst the same precision of a multiply followed by an add. This might be more efficient on architectures that do not have an fma instruction.

impl Simd<[f32; 4]>[src]

pub fn powf(self, x: Self) -> Self[src]

Raises self number to the floating point power of x.

impl Simd<[f32; 4]>[src]

pub fn recpre(self) -> Self[src]

Reciprocal estimate: ~= 1. / self.

FIXME: The precision of the estimate is currently unspecified.

impl Simd<[f32; 4]>[src]

pub fn rsqrte(self) -> Self[src]

Reciprocal square-root estimate: ~= 1. / self.sqrt().

FIXME: The precision of the estimate is currently unspecified.

impl Simd<[f32; 4]>[src]

pub fn sin(self) -> Self[src]

Sine.

pub fn sin_pi(self) -> Self[src]

Sine of self * PI.

pub fn sin_cos_pi(self) -> (Self, Self)[src]

Sine and cosine of self * PI.

impl Simd<[f32; 4]>[src]

pub fn sqrt(self) -> Self[src]

impl Simd<[f32; 4]>[src]

pub fn sqrte(self) -> Self[src]

Square-root estimate.

FIXME: The precision of the estimate is currently unspecified.

impl Simd<[f32; 4]>[src]

pub fn tanh(self) -> Self[src]

Tanh.

impl Simd<[f32; 4]>[src]

pub fn eq(self, other: Self) -> m32x4[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m32x4[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m32x4[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m32x4[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m32x4[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m32x4[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[m32; 4]>[src]

pub const fn new(x0: bool, x1: bool, x2: bool, x3: bool) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[m32; 4]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[m32; 4]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

pub fn any(self) -> bool[src]

Is any vector lane true?

pub fn none(self) -> bool[src]

Are all vector lanes false?

impl Simd<[m32; 4]>[src]

pub fn eq(self, other: Self) -> m32x4[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m32x4[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m32x4[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m32x4[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m32x4[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m32x4[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[m32; 4]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m32; 4] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

The lanes of the result for which the mask is true contain the values of a. The remaining lanes contain the values of b.

impl Simd<[m32; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m32x4>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[m32; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m32x4>[src]

Returns a wrapper that implements Ord.

impl Simd<[m32; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[m32; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[i64; 2]>[src]

pub const fn new(x0: i64, x1: i64) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: i64) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> i64[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> i64[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[i64; 2]>[src]

pub fn rotate_left(self, n: i64x2) -> i64x2[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: i64x2) -> i64x2[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[i64; 2]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[i64; 2]>[src]

pub fn wrapping_sum(self) -> i64[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> i64[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[i64; 2]>[src]

pub fn max_element(self) -> i64[src]

Largest vector element value.

pub fn min_element(self) -> i64[src]

Smallest vector element value.

impl Simd<[i64; 2]>[src]

pub fn and(self) -> i64[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> i64[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> i64[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[i64; 2]>[src]

pub fn from_slice_aligned(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i64; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i64; 2]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[i64; 2]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[i64; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[i64; 2]>[src]

pub fn eq(self, other: Self) -> m64x2[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m64x2[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m64x2[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m64x2[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m64x2[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m64x2[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[i64; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i64x2>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[i64; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i64x2>[src]

Returns a wrapper that implements Ord.

impl Simd<[i64; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[u64; 2]>[src]

pub const fn new(x0: u64, x1: u64) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: u64) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> u64[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> u64[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[u64; 2]>[src]

pub fn rotate_left(self, n: u64x2) -> u64x2[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: u64x2) -> u64x2[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[u64; 2]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[u64; 2]>[src]

pub fn wrapping_sum(self) -> u64[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> u64[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[u64; 2]>[src]

pub fn max_element(self) -> u64[src]

Largest vector element value.

pub fn min_element(self) -> u64[src]

Smallest vector element value.

impl Simd<[u64; 2]>[src]

pub fn and(self) -> u64[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> u64[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> u64[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[u64; 2]>[src]

pub fn from_slice_aligned(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u64; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u64; 2]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[u64; 2]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[u64; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[u64; 2]>[src]

pub fn eq(self, other: Self) -> m64x2[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m64x2[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m64x2[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m64x2[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m64x2[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m64x2[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[u64; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u64x2>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[u64; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u64x2>[src]

Returns a wrapper that implements Ord.

impl Simd<[u64; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[f64; 2]>[src]

pub const fn new(x0: f64, x1: f64) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: f64) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> f64[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> f64[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: f64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: f64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[f64; 2]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[f64; 2]>[src]

pub fn sum(self) -> f64[src]

Horizontal sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If one of the vector element is NaN the reduction returns NaN. The resulting NaN is not required to be equal to any of the NaNs in the vector.

pub fn product(self) -> f64[src]

Horizontal product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If one of the vector element is NaN the reduction returns NaN. The resulting NaN is not required to be equal to any of the NaNs in the vector.

impl Simd<[f64; 2]>[src]

pub fn max_element(self) -> f64[src]

Largest vector element value.

pub fn min_element(self) -> f64[src]

Smallest vector element value.

impl Simd<[f64; 2]>[src]

pub fn from_slice_aligned(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[f64; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[f64; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[f64; 2]>[src]

pub const EPSILON: f64x2[src]

Machine epsilon value.

pub const MIN: f64x2[src]

Smallest finite value.

pub const MIN_POSITIVE: f64x2[src]

Smallest positive normal value.

pub const MAX: f64x2[src]

Largest finite value.

pub const NAN: f64x2[src]

Not a Number (NaN).

pub const INFINITY: f64x2[src]

Infinity (∞).

pub const NEG_INFINITY: f64x2[src]

Negative infinity (-∞).

pub const PI: f64x2[src]

Archimedes' constant (Ï€)

pub const FRAC_PI_2: f64x2[src]

Ï€/2

pub const FRAC_PI_3: f64x2[src]

Ï€/3

pub const FRAC_PI_4: f64x2[src]

Ï€/4

pub const FRAC_PI_6: f64x2[src]

Ï€/6

pub const FRAC_PI_8: f64x2[src]

Ï€/8

pub const FRAC_1_PI: f64x2[src]

1/Ï€

pub const FRAC_2_PI: f64x2[src]

2/Ï€

pub const FRAC_2_SQRT_PI: f64x2[src]

2/sqrt(Ï€)

pub const SQRT_2: f64x2[src]

sqrt(2)

pub const FRAC_1_SQRT_2: f64x2[src]

1/sqrt(2)

pub const E: f64x2[src]

Euler's number (e)

pub const LOG2_E: f64x2[src]

log2(e)

pub const LOG10_E: f64x2[src]

log10(e)

pub const LN_2: f64x2[src]

ln(2)

pub const LN_10: f64x2[src]

ln(10)

impl Simd<[f64; 2]>[src]

pub fn is_nan(self) -> m64x2[src]

pub fn is_infinite(self) -> m64x2[src]

pub fn is_finite(self) -> m64x2[src]

impl Simd<[f64; 2]>[src]

pub fn abs(self) -> Self[src]

Absolute value.

impl Simd<[f64; 2]>[src]

pub fn cos(self) -> Self[src]

Cosine.

pub fn cos_pi(self) -> Self[src]

Cosine of self * PI.

impl Simd<[f64; 2]>[src]

pub fn exp(self) -> Self[src]

Returns the exponential function of self: e^(self).

impl Simd<[f64; 2]>[src]

pub fn ln(self) -> Self[src]

Returns the natural logarithm of self.

impl Simd<[f64; 2]>[src]

pub fn mul_add(self, y: Self, z: Self) -> Self[src]

Fused multiply add: self * y + z

impl Simd<[f64; 2]>[src]

pub fn mul_adde(self, y: Self, z: Self) -> Self[src]

Fused multiply add estimate: ~= self * y + z

While fused multiply-add (fma) has infinite precision, mul_adde has at worst the same precision of a multiply followed by an add. This might be more efficient on architectures that do not have an fma instruction.

impl Simd<[f64; 2]>[src]

pub fn powf(self, x: Self) -> Self[src]

Raises self number to the floating point power of x.

impl Simd<[f64; 2]>[src]

pub fn recpre(self) -> Self[src]

Reciprocal estimate: ~= 1. / self.

FIXME: The precision of the estimate is currently unspecified.

impl Simd<[f64; 2]>[src]

pub fn rsqrte(self) -> Self[src]

Reciprocal square-root estimate: ~= 1. / self.sqrt().

FIXME: The precision of the estimate is currently unspecified.

impl Simd<[f64; 2]>[src]

pub fn sin(self) -> Self[src]

Sine.

pub fn sin_pi(self) -> Self[src]

Sine of self * PI.

pub fn sin_cos_pi(self) -> (Self, Self)[src]

Sine and cosine of self * PI.

impl Simd<[f64; 2]>[src]

pub fn sqrt(self) -> Self[src]

impl Simd<[f64; 2]>[src]

pub fn sqrte(self) -> Self[src]

Square-root estimate.

FIXME: The precision of the estimate is currently unspecified.

impl Simd<[f64; 2]>[src]

pub fn tanh(self) -> Self[src]

Tanh.

impl Simd<[f64; 2]>[src]

pub fn eq(self, other: Self) -> m64x2[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m64x2[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m64x2[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m64x2[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m64x2[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m64x2[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[m64; 2]>[src]

pub const fn new(x0: bool, x1: bool) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[m64; 2]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[m64; 2]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

pub fn any(self) -> bool[src]

Is any vector lane true?

pub fn none(self) -> bool[src]

Are all vector lanes false?

impl Simd<[m64; 2]>[src]

pub fn eq(self, other: Self) -> m64x2[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m64x2[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m64x2[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m64x2[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m64x2[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m64x2[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[m64; 2]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m64; 2] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

The lanes of the result for which the mask is true contain the values of a. The remaining lanes contain the values of b.

impl Simd<[m64; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m64x2>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[m64; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m64x2>[src]

Returns a wrapper that implements Ord.

impl Simd<[m64; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[m64; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[i128; 1]>[src]

pub const fn new(x0: i128) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: i128) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> i128[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> i128[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[i128; 1]>[src]

pub fn rotate_left(self, n: i128x1) -> i128x1[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: i128x1) -> i128x1[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[i128; 1]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[i128; 1]>[src]

pub fn wrapping_sum(self) -> i128[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> i128[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[i128; 1]>[src]

pub fn max_element(self) -> i128[src]

Largest vector element value.

pub fn min_element(self) -> i128[src]

Smallest vector element value.

impl Simd<[i128; 1]>[src]

pub fn and(self) -> i128[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> i128[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> i128[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[i128; 1]>[src]

pub fn from_slice_aligned(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i128; 1]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i128; 1]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[i128; 1]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[i128; 1]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[i128; 1]>[src]

pub fn eq(self, other: Self) -> m128x1[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m128x1[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m128x1[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m128x1[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m128x1[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m128x1[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[i128; 1]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i128x1>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[i128; 1]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i128x1>[src]

Returns a wrapper that implements Ord.

impl Simd<[i128; 1]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[u128; 1]>[src]

pub const fn new(x0: u128) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: u128) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> u128[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> u128[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[u128; 1]>[src]

pub fn rotate_left(self, n: u128x1) -> u128x1[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: u128x1) -> u128x1[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[u128; 1]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[u128; 1]>[src]

pub fn wrapping_sum(self) -> u128[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> u128[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[u128; 1]>[src]

pub fn max_element(self) -> u128[src]

Largest vector element value.

pub fn min_element(self) -> u128[src]

Smallest vector element value.

impl Simd<[u128; 1]>[src]

pub fn and(self) -> u128[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> u128[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> u128[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[u128; 1]>[src]

pub fn from_slice_aligned(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u128; 1]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u128; 1]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[u128; 1]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[u128; 1]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[u128; 1]>[src]

pub fn eq(self, other: Self) -> m128x1[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m128x1[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m128x1[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m128x1[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m128x1[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m128x1[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[u128; 1]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u128x1>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[u128; 1]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u128x1>[src]

Returns a wrapper that implements Ord.

impl Simd<[u128; 1]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[m128; 1]>[src]

pub const fn new(x0: bool) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[m128; 1]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[m128; 1]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

pub fn any(self) -> bool[src]

Is any vector lane true?

pub fn none(self) -> bool[src]

Are all vector lanes false?

impl Simd<[m128; 1]>[src]

pub fn eq(self, other: Self) -> m128x1[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m128x1[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m128x1[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m128x1[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m128x1[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m128x1[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[m128; 1]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m128; 1] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

The lanes of the result for which the mask is true contain the values of a. The remaining lanes contain the values of b.

impl Simd<[m128; 1]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m128x1>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[m128; 1]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m128x1>[src]

Returns a wrapper that implements Ord.

impl Simd<[m128; 1]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[m128; 1]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[i8; 32]>[src]

pub const fn new(
    x0: i8,
    x1: i8,
    x2: i8,
    x3: i8,
    x4: i8,
    x5: i8,
    x6: i8,
    x7: i8,
    x8: i8,
    x9: i8,
    x10: i8,
    x11: i8,
    x12: i8,
    x13: i8,
    x14: i8,
    x15: i8,
    x16: i8,
    x17: i8,
    x18: i8,
    x19: i8,
    x20: i8,
    x21: i8,
    x22: i8,
    x23: i8,
    x24: i8,
    x25: i8,
    x26: i8,
    x27: i8,
    x28: i8,
    x29: i8,
    x30: i8,
    x31: i8
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: i8) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> i8[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> i8[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[i8; 32]>[src]

pub fn rotate_left(self, n: i8x32) -> i8x32[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: i8x32) -> i8x32[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[i8; 32]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[i8; 32]>[src]

pub fn wrapping_sum(self) -> i8[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> i8[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[i8; 32]>[src]

pub fn max_element(self) -> i8[src]

Largest vector element value.

pub fn min_element(self) -> i8[src]

Smallest vector element value.

impl Simd<[i8; 32]>[src]

pub fn and(self) -> i8[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> i8[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> i8[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[i8; 32]>[src]

pub fn from_slice_aligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i8; 32]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i8; 32]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[i8; 32]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[i8; 32]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[i8; 32]>[src]

pub fn eq(self, other: Self) -> m8x32[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m8x32[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m8x32[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m8x32[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m8x32[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m8x32[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[i8; 32]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i8x32>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[i8; 32]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i8x32>[src]

Returns a wrapper that implements Ord.

impl Simd<[i8; 32]>[src]

pub fn bitmask(self) -> u32[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[u8; 32]>[src]

pub const fn new(
    x0: u8,
    x1: u8,
    x2: u8,
    x3: u8,
    x4: u8,
    x5: u8,
    x6: u8,
    x7: u8,
    x8: u8,
    x9: u8,
    x10: u8,
    x11: u8,
    x12: u8,
    x13: u8,
    x14: u8,
    x15: u8,
    x16: u8,
    x17: u8,
    x18: u8,
    x19: u8,
    x20: u8,
    x21: u8,
    x22: u8,
    x23: u8,
    x24: u8,
    x25: u8,
    x26: u8,
    x27: u8,
    x28: u8,
    x29: u8,
    x30: u8,
    x31: u8
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: u8) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> u8[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> u8[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[u8; 32]>[src]

pub fn rotate_left(self, n: u8x32) -> u8x32[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: u8x32) -> u8x32[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[u8; 32]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[u8; 32]>[src]

pub fn wrapping_sum(self) -> u8[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> u8[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[u8; 32]>[src]

pub fn max_element(self) -> u8[src]

Largest vector element value.

pub fn min_element(self) -> u8[src]

Smallest vector element value.

impl Simd<[u8; 32]>[src]

pub fn and(self) -> u8[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> u8[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> u8[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[u8; 32]>[src]

pub fn from_slice_aligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u8; 32]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u8; 32]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[u8; 32]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[u8; 32]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[u8; 32]>[src]

pub fn eq(self, other: Self) -> m8x32[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m8x32[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m8x32[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m8x32[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m8x32[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m8x32[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[u8; 32]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u8x32>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[u8; 32]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u8x32>[src]

Returns a wrapper that implements Ord.

impl Simd<[u8; 32]>[src]

pub fn bitmask(self) -> u32[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[m8; 32]>[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool,
    x8: bool,
    x9: bool,
    x10: bool,
    x11: bool,
    x12: bool,
    x13: bool,
    x14: bool,
    x15: bool,
    x16: bool,
    x17: bool,
    x18: bool,
    x19: bool,
    x20: bool,
    x21: bool,
    x22: bool,
    x23: bool,
    x24: bool,
    x25: bool,
    x26: bool,
    x27: bool,
    x28: bool,
    x29: bool,
    x30: bool,
    x31: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[m8; 32]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[m8; 32]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

pub fn any(self) -> bool[src]

Is any vector lane true?

pub fn none(self) -> bool[src]

Are all vector lanes false?

impl Simd<[m8; 32]>[src]

pub fn eq(self, other: Self) -> m8x32[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m8x32[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m8x32[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m8x32[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m8x32[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m8x32[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[m8; 32]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m8; 32] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

The lanes of the result for which the mask is true contain the values of a. The remaining lanes contain the values of b.

impl Simd<[m8; 32]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m8x32>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[m8; 32]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m8x32>[src]

Returns a wrapper that implements Ord.

impl Simd<[m8; 32]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[m8; 32]>[src]

pub fn bitmask(self) -> u32[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[i16; 16]>[src]

pub const fn new(
    x0: i16,
    x1: i16,
    x2: i16,
    x3: i16,
    x4: i16,
    x5: i16,
    x6: i16,
    x7: i16,
    x8: i16,
    x9: i16,
    x10: i16,
    x11: i16,
    x12: i16,
    x13: i16,
    x14: i16,
    x15: i16
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: i16) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> i16[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> i16[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[i16; 16]>[src]

pub fn rotate_left(self, n: i16x16) -> i16x16[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: i16x16) -> i16x16[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[i16; 16]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[i16; 16]>[src]

pub fn wrapping_sum(self) -> i16[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> i16[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[i16; 16]>[src]

pub fn max_element(self) -> i16[src]

Largest vector element value.

pub fn min_element(self) -> i16[src]

Smallest vector element value.

impl Simd<[i16; 16]>[src]

pub fn and(self) -> i16[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> i16[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> i16[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[i16; 16]>[src]

pub fn from_slice_aligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i16; 16]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i16; 16]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[i16; 16]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[i16; 16]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[i16; 16]>[src]

pub fn eq(self, other: Self) -> m16x16[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m16x16[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m16x16[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m16x16[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m16x16[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m16x16[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[i16; 16]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i16x16>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[i16; 16]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i16x16>[src]

Returns a wrapper that implements Ord.

impl Simd<[i16; 16]>[src]

pub fn bitmask(self) -> u16[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[u16; 16]>[src]

pub const fn new(
    x0: u16,
    x1: u16,
    x2: u16,
    x3: u16,
    x4: u16,
    x5: u16,
    x6: u16,
    x7: u16,
    x8: u16,
    x9: u16,
    x10: u16,
    x11: u16,
    x12: u16,
    x13: u16,
    x14: u16,
    x15: u16
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: u16) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> u16[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> u16[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[u16; 16]>[src]

pub fn rotate_left(self, n: u16x16) -> u16x16[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: u16x16) -> u16x16[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[u16; 16]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[u16; 16]>[src]

pub fn wrapping_sum(self) -> u16[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> u16[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[u16; 16]>[src]

pub fn max_element(self) -> u16[src]

Largest vector element value.

pub fn min_element(self) -> u16[src]

Smallest vector element value.

impl Simd<[u16; 16]>[src]

pub fn and(self) -> u16[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> u16[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> u16[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[u16; 16]>[src]

pub fn from_slice_aligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u16; 16]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u16; 16]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[u16; 16]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[u16; 16]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[u16; 16]>[src]

pub fn eq(self, other: Self) -> m16x16[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m16x16[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m16x16[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m16x16[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m16x16[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m16x16[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[u16; 16]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u16x16>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[u16; 16]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u16x16>[src]

Returns a wrapper that implements Ord.

impl Simd<[u16; 16]>[src]

pub fn bitmask(self) -> u16[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[m16; 16]>[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool,
    x8: bool,
    x9: bool,
    x10: bool,
    x11: bool,
    x12: bool,
    x13: bool,
    x14: bool,
    x15: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[m16; 16]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[m16; 16]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

pub fn any(self) -> bool[src]

Is any vector lane true?

pub fn none(self) -> bool[src]

Are all vector lanes false?

impl Simd<[m16; 16]>[src]

pub fn eq(self, other: Self) -> m16x16[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m16x16[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m16x16[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m16x16[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m16x16[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m16x16[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[m16; 16]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m16; 16] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

The lanes of the result for which the mask is true contain the values of a. The remaining lanes contain the values of b.

impl Simd<[m16; 16]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m16x16>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[m16; 16]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m16x16>[src]

Returns a wrapper that implements Ord.

impl Simd<[m16; 16]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[m16; 16]>[src]

pub fn bitmask(self) -> u16[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[i32; 8]>[src]

pub const fn new(
    x0: i32,
    x1: i32,
    x2: i32,
    x3: i32,
    x4: i32,
    x5: i32,
    x6: i32,
    x7: i32
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: i32) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> i32[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> i32[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[i32; 8]>[src]

pub fn rotate_left(self, n: i32x8) -> i32x8[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: i32x8) -> i32x8[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[i32; 8]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[i32; 8]>[src]

pub fn wrapping_sum(self) -> i32[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> i32[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[i32; 8]>[src]

pub fn max_element(self) -> i32[src]

Largest vector element value.

pub fn min_element(self) -> i32[src]

Smallest vector element value.

impl Simd<[i32; 8]>[src]

pub fn and(self) -> i32[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> i32[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> i32[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[i32; 8]>[src]

pub fn from_slice_aligned(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i32; 8]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i32; 8]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[i32; 8]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[i32; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[i32; 8]>[src]

pub fn eq(self, other: Self) -> m32x8[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m32x8[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m32x8[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m32x8[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m32x8[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m32x8[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[i32; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i32x8>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[i32; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i32x8>[src]

Returns a wrapper that implements Ord.

impl Simd<[i32; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[u32; 8]>[src]

pub const fn new(
    x0: u32,
    x1: u32,
    x2: u32,
    x3: u32,
    x4: u32,
    x5: u32,
    x6: u32,
    x7: u32
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: u32) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> u32[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> u32[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[u32; 8]>[src]

pub fn rotate_left(self, n: u32x8) -> u32x8[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: u32x8) -> u32x8[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[u32; 8]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[u32; 8]>[src]

pub fn wrapping_sum(self) -> u32[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> u32[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[u32; 8]>[src]

pub fn max_element(self) -> u32[src]

Largest vector element value.

pub fn min_element(self) -> u32[src]

Smallest vector element value.

impl Simd<[u32; 8]>[src]

pub fn and(self) -> u32[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> u32[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> u32[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[u32; 8]>[src]

pub fn from_slice_aligned(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u32; 8]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u32; 8]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[u32; 8]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[u32; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[u32; 8]>[src]

pub fn eq(self, other: Self) -> m32x8[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m32x8[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m32x8[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m32x8[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m32x8[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m32x8[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[u32; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u32x8>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[u32; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u32x8>[src]

Returns a wrapper that implements Ord.

impl Simd<[u32; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[f32; 8]>[src]

pub const fn new(
    x0: f32,
    x1: f32,
    x2: f32,
    x3: f32,
    x4: f32,
    x5: f32,
    x6: f32,
    x7: f32
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: f32) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> f32[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> f32[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: f32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: f32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[f32; 8]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[f32; 8]>[src]

pub fn sum(self) -> f32[src]

Horizontal sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If one of the vector element is NaN the reduction returns NaN. The resulting NaN is not required to be equal to any of the NaNs in the vector.

pub fn product(self) -> f32[src]

Horizontal product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If one of the vector element is NaN the reduction returns NaN. The resulting NaN is not required to be equal to any of the NaNs in the vector.

impl Simd<[f32; 8]>[src]

pub fn max_element(self) -> f32[src]

Largest vector element value.

pub fn min_element(self) -> f32[src]

Smallest vector element value.

impl Simd<[f32; 8]>[src]

pub fn from_slice_aligned(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[f32; 8]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[f32; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[f32; 8]>[src]

pub const EPSILON: f32x8[src]

Machine epsilon value.

pub const MIN: f32x8[src]

Smallest finite value.

pub const MIN_POSITIVE: f32x8[src]

Smallest positive normal value.

pub const MAX: f32x8[src]

Largest finite value.

pub const NAN: f32x8[src]

Not a Number (NaN).

pub const INFINITY: f32x8[src]

Infinity (∞).

pub const NEG_INFINITY: f32x8[src]

Negative infinity (-∞).

pub const PI: f32x8[src]

Archimedes' constant (Ï€)

pub const FRAC_PI_2: f32x8[src]

Ï€/2

pub const FRAC_PI_3: f32x8[src]

Ï€/3

pub const FRAC_PI_4: f32x8[src]

Ï€/4

pub const FRAC_PI_6: f32x8[src]

Ï€/6

pub const FRAC_PI_8: f32x8[src]

Ï€/8

pub const FRAC_1_PI: f32x8[src]

1/Ï€

pub const FRAC_2_PI: f32x8[src]

2/Ï€

pub const FRAC_2_SQRT_PI: f32x8[src]

2/sqrt(Ï€)

pub const SQRT_2: f32x8[src]

sqrt(2)

pub const FRAC_1_SQRT_2: f32x8[src]

1/sqrt(2)

pub const E: f32x8[src]

Euler's number (e)

pub const LOG2_E: f32x8[src]

log2(e)

pub const LOG10_E: f32x8[src]

log10(e)

pub const LN_2: f32x8[src]

ln(2)

pub const LN_10: f32x8[src]

ln(10)

impl Simd<[f32; 8]>[src]

pub fn is_nan(self) -> m32x8[src]

pub fn is_infinite(self) -> m32x8[src]

pub fn is_finite(self) -> m32x8[src]

impl Simd<[f32; 8]>[src]

pub fn abs(self) -> Self[src]

Absolute value.

impl Simd<[f32; 8]>[src]

pub fn cos(self) -> Self[src]

Cosine.

pub fn cos_pi(self) -> Self[src]

Cosine of self * PI.

impl Simd<[f32; 8]>[src]

pub fn exp(self) -> Self[src]

Returns the exponential function of self: e^(self).

impl Simd<[f32; 8]>[src]

pub fn ln(self) -> Self[src]

Returns the natural logarithm of self.

impl Simd<[f32; 8]>[src]

pub fn mul_add(self, y: Self, z: Self) -> Self[src]

Fused multiply add: self * y + z

impl Simd<[f32; 8]>[src]

pub fn mul_adde(self, y: Self, z: Self) -> Self[src]

Fused multiply add estimate: ~= self * y + z

While fused multiply-add (fma) has infinite precision, mul_adde has at worst the same precision of a multiply followed by an add. This might be more efficient on architectures that do not have an fma instruction.

impl Simd<[f32; 8]>[src]

pub fn powf(self, x: Self) -> Self[src]

Raises self number to the floating point power of x.

impl Simd<[f32; 8]>[src]

pub fn recpre(self) -> Self[src]

Reciprocal estimate: ~= 1. / self.

FIXME: The precision of the estimate is currently unspecified.

impl Simd<[f32; 8]>[src]

pub fn rsqrte(self) -> Self[src]

Reciprocal square-root estimate: ~= 1. / self.sqrt().

FIXME: The precision of the estimate is currently unspecified.

impl Simd<[f32; 8]>[src]

pub fn sin(self) -> Self[src]

Sine.

pub fn sin_pi(self) -> Self[src]

Sine of self * PI.

pub fn sin_cos_pi(self) -> (Self, Self)[src]

Sine and cosine of self * PI.

impl Simd<[f32; 8]>[src]

pub fn sqrt(self) -> Self[src]

impl Simd<[f32; 8]>[src]

pub fn sqrte(self) -> Self[src]

Square-root estimate.

FIXME: The precision of the estimate is currently unspecified.

impl Simd<[f32; 8]>[src]

pub fn tanh(self) -> Self[src]

Tanh.

impl Simd<[f32; 8]>[src]

pub fn eq(self, other: Self) -> m32x8[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m32x8[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m32x8[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m32x8[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m32x8[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m32x8[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[m32; 8]>[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[m32; 8]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[m32; 8]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

pub fn any(self) -> bool[src]

Is any vector lane true?

pub fn none(self) -> bool[src]

Are all vector lanes false?

impl Simd<[m32; 8]>[src]

pub fn eq(self, other: Self) -> m32x8[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m32x8[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m32x8[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m32x8[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m32x8[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m32x8[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[m32; 8]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m32; 8] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

The lanes of the result for which the mask is true contain the values of a. The remaining lanes contain the values of b.

impl Simd<[m32; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m32x8>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[m32; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m32x8>[src]

Returns a wrapper that implements Ord.

impl Simd<[m32; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[m32; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[i64; 4]>[src]

pub const fn new(x0: i64, x1: i64, x2: i64, x3: i64) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: i64) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> i64[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> i64[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[i64; 4]>[src]

pub fn rotate_left(self, n: i64x4) -> i64x4[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: i64x4) -> i64x4[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[i64; 4]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[i64; 4]>[src]

pub fn wrapping_sum(self) -> i64[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> i64[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[i64; 4]>[src]

pub fn max_element(self) -> i64[src]

Largest vector element value.

pub fn min_element(self) -> i64[src]

Smallest vector element value.

impl Simd<[i64; 4]>[src]

pub fn and(self) -> i64[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> i64[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> i64[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[i64; 4]>[src]

pub fn from_slice_aligned(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i64; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i64; 4]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[i64; 4]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[i64; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[i64; 4]>[src]

pub fn eq(self, other: Self) -> m64x4[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m64x4[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m64x4[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m64x4[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m64x4[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m64x4[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[i64; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i64x4>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[i64; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i64x4>[src]

Returns a wrapper that implements Ord.

impl Simd<[i64; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[u64; 4]>[src]

pub const fn new(x0: u64, x1: u64, x2: u64, x3: u64) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: u64) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> u64[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> u64[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[u64; 4]>[src]

pub fn rotate_left(self, n: u64x4) -> u64x4[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: u64x4) -> u64x4[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[u64; 4]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[u64; 4]>[src]

pub fn wrapping_sum(self) -> u64[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> u64[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[u64; 4]>[src]

pub fn max_element(self) -> u64[src]

Largest vector element value.

pub fn min_element(self) -> u64[src]

Smallest vector element value.

impl Simd<[u64; 4]>[src]

pub fn and(self) -> u64[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> u64[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> u64[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[u64; 4]>[src]

pub fn from_slice_aligned(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u64; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u64; 4]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[u64; 4]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[u64; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[u64; 4]>[src]

pub fn eq(self, other: Self) -> m64x4[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m64x4[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m64x4[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m64x4[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m64x4[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m64x4[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[u64; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u64x4>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[u64; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u64x4>[src]

Returns a wrapper that implements Ord.

impl Simd<[u64; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[f64; 4]>[src]

pub const fn new(x0: f64, x1: f64, x2: f64, x3: f64) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: f64) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> f64[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> f64[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: f64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: f64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[f64; 4]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[f64; 4]>[src]

pub fn sum(self) -> f64[src]

Horizontal sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If one of the vector element is NaN the reduction returns NaN. The resulting NaN is not required to be equal to any of the NaNs in the vector.

pub fn product(self) -> f64[src]

Horizontal product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If one of the vector element is NaN the reduction returns NaN. The resulting NaN is not required to be equal to any of the NaNs in the vector.

impl Simd<[f64; 4]>[src]

pub fn max_element(self) -> f64[src]

Largest vector element value.

pub fn min_element(self) -> f64[src]

Smallest vector element value.

impl Simd<[f64; 4]>[src]

pub fn from_slice_aligned(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[f64; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[f64; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[f64; 4]>[src]

pub const EPSILON: f64x4[src]

Machine epsilon value.

pub const MIN: f64x4[src]

Smallest finite value.

pub const MIN_POSITIVE: f64x4[src]

Smallest positive normal value.

pub const MAX: f64x4[src]

Largest finite value.

pub const NAN: f64x4[src]

Not a Number (NaN).

pub const INFINITY: f64x4[src]

Infinity (∞).

pub const NEG_INFINITY: f64x4[src]

Negative infinity (-∞).

pub const PI: f64x4[src]

Archimedes' constant (Ï€)

pub const FRAC_PI_2: f64x4[src]

Ï€/2

pub const FRAC_PI_3: f64x4[src]

Ï€/3

pub const FRAC_PI_4: f64x4[src]

Ï€/4

pub const FRAC_PI_6: f64x4[src]

Ï€/6

pub const FRAC_PI_8: f64x4[src]

Ï€/8

pub const FRAC_1_PI: f64x4[src]

1/Ï€

pub const FRAC_2_PI: f64x4[src]

2/Ï€

pub const FRAC_2_SQRT_PI: f64x4[src]

2/sqrt(Ï€)

pub const SQRT_2: f64x4[src]

sqrt(2)

pub const FRAC_1_SQRT_2: f64x4[src]

1/sqrt(2)

pub const E: f64x4[src]

Euler's number (e)

pub const LOG2_E: f64x4[src]

log2(e)

pub const LOG10_E: f64x4[src]

log10(e)

pub const LN_2: f64x4[src]

ln(2)

pub const LN_10: f64x4[src]

ln(10)

impl Simd<[f64; 4]>[src]

pub fn is_nan(self) -> m64x4[src]

pub fn is_infinite(self) -> m64x4[src]

pub fn is_finite(self) -> m64x4[src]

impl Simd<[f64; 4]>[src]

pub fn abs(self) -> Self[src]

Absolute value.

impl Simd<[f64; 4]>[src]

pub fn cos(self) -> Self[src]

Cosine.

pub fn cos_pi(self) -> Self[src]

Cosine of self * PI.

impl Simd<[f64; 4]>[src]

pub fn exp(self) -> Self[src]

Returns the exponential function of self: e^(self).

impl Simd<[f64; 4]>[src]

pub fn ln(self) -> Self[src]

Returns the natural logarithm of self.

impl Simd<[f64; 4]>[src]

pub fn mul_add(self, y: Self, z: Self) -> Self[src]

Fused multiply add: self * y + z

impl Simd<[f64; 4]>[src]

pub fn mul_adde(self, y: Self, z: Self) -> Self[src]

Fused multiply add estimate: ~= self * y + z

While fused multiply-add (fma) has infinite precision, mul_adde has at worst the same precision of a multiply followed by an add. This might be more efficient on architectures that do not have an fma instruction.

impl Simd<[f64; 4]>[src]

pub fn powf(self, x: Self) -> Self[src]

Raises self number to the floating point power of x.

impl Simd<[f64; 4]>[src]

pub fn recpre(self) -> Self[src]

Reciprocal estimate: ~= 1. / self.

FIXME: The precision of the estimate is currently unspecified.

impl Simd<[f64; 4]>[src]

pub fn rsqrte(self) -> Self[src]

Reciprocal square-root estimate: ~= 1. / self.sqrt().

FIXME: The precision of the estimate is currently unspecified.

impl Simd<[f64; 4]>[src]

pub fn sin(self) -> Self[src]

Sine.

pub fn sin_pi(self) -> Self[src]

Sine of self * PI.

pub fn sin_cos_pi(self) -> (Self, Self)[src]

Sine and cosine of self * PI.

impl Simd<[f64; 4]>[src]

pub fn sqrt(self) -> Self[src]

impl Simd<[f64; 4]>[src]

pub fn sqrte(self) -> Self[src]

Square-root estimate.

FIXME: The precision of the estimate is currently unspecified.

impl Simd<[f64; 4]>[src]

pub fn tanh(self) -> Self[src]

Tanh.

impl Simd<[f64; 4]>[src]

pub fn eq(self, other: Self) -> m64x4[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m64x4[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m64x4[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m64x4[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m64x4[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m64x4[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[m64; 4]>[src]

pub const fn new(x0: bool, x1: bool, x2: bool, x3: bool) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[m64; 4]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[m64; 4]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

pub fn any(self) -> bool[src]

Is any vector lane true?

pub fn none(self) -> bool[src]

Are all vector lanes false?

impl Simd<[m64; 4]>[src]

pub fn eq(self, other: Self) -> m64x4[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m64x4[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m64x4[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m64x4[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m64x4[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m64x4[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[m64; 4]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m64; 4] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

The lanes of the result for which the mask is true contain the values of a. The remaining lanes contain the values of b.

impl Simd<[m64; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m64x4>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[m64; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m64x4>[src]

Returns a wrapper that implements Ord.

impl Simd<[m64; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[m64; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[i128; 2]>[src]

pub const fn new(x0: i128, x1: i128) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: i128) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> i128[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> i128[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[i128; 2]>[src]

pub fn rotate_left(self, n: i128x2) -> i128x2[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: i128x2) -> i128x2[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[i128; 2]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[i128; 2]>[src]

pub fn wrapping_sum(self) -> i128[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> i128[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[i128; 2]>[src]

pub fn max_element(self) -> i128[src]

Largest vector element value.

pub fn min_element(self) -> i128[src]

Smallest vector element value.

impl Simd<[i128; 2]>[src]

pub fn and(self) -> i128[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> i128[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> i128[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[i128; 2]>[src]

pub fn from_slice_aligned(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i128; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i128; 2]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[i128; 2]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[i128; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[i128; 2]>[src]

pub fn eq(self, other: Self) -> m128x2[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m128x2[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m128x2[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m128x2[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m128x2[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m128x2[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[i128; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i128x2>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[i128; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i128x2>[src]

Returns a wrapper that implements Ord.

impl Simd<[i128; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[u128; 2]>[src]

pub const fn new(x0: u128, x1: u128) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: u128) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> u128[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> u128[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[u128; 2]>[src]

pub fn rotate_left(self, n: u128x2) -> u128x2[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: u128x2) -> u128x2[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[u128; 2]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[u128; 2]>[src]

pub fn wrapping_sum(self) -> u128[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> u128[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[u128; 2]>[src]

pub fn max_element(self) -> u128[src]

Largest vector element value.

pub fn min_element(self) -> u128[src]

Smallest vector element value.

impl Simd<[u128; 2]>[src]

pub fn and(self) -> u128[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> u128[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> u128[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[u128; 2]>[src]

pub fn from_slice_aligned(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u128; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u128; 2]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[u128; 2]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[u128; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[u128; 2]>[src]

pub fn eq(self, other: Self) -> m128x2[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m128x2[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m128x2[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m128x2[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m128x2[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m128x2[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[u128; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u128x2>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[u128; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u128x2>[src]

Returns a wrapper that implements Ord.

impl Simd<[u128; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[m128; 2]>[src]

pub const fn new(x0: bool, x1: bool) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[m128; 2]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[m128; 2]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

pub fn any(self) -> bool[src]

Is any vector lane true?

pub fn none(self) -> bool[src]

Are all vector lanes false?

impl Simd<[m128; 2]>[src]

pub fn eq(self, other: Self) -> m128x2[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m128x2[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m128x2[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m128x2[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m128x2[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m128x2[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[m128; 2]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m128; 2] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

The lanes of the result for which the mask is true contain the values of a. The remaining lanes contain the values of b.

impl Simd<[m128; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m128x2>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[m128; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m128x2>[src]

Returns a wrapper that implements Ord.

impl Simd<[m128; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[m128; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[i8; 64]>[src]

pub const fn new(
    x0: i8,
    x1: i8,
    x2: i8,
    x3: i8,
    x4: i8,
    x5: i8,
    x6: i8,
    x7: i8,
    x8: i8,
    x9: i8,
    x10: i8,
    x11: i8,
    x12: i8,
    x13: i8,
    x14: i8,
    x15: i8,
    x16: i8,
    x17: i8,
    x18: i8,
    x19: i8,
    x20: i8,
    x21: i8,
    x22: i8,
    x23: i8,
    x24: i8,
    x25: i8,
    x26: i8,
    x27: i8,
    x28: i8,
    x29: i8,
    x30: i8,
    x31: i8,
    x32: i8,
    x33: i8,
    x34: i8,
    x35: i8,
    x36: i8,
    x37: i8,
    x38: i8,
    x39: i8,
    x40: i8,
    x41: i8,
    x42: i8,
    x43: i8,
    x44: i8,
    x45: i8,
    x46: i8,
    x47: i8,
    x48: i8,
    x49: i8,
    x50: i8,
    x51: i8,
    x52: i8,
    x53: i8,
    x54: i8,
    x55: i8,
    x56: i8,
    x57: i8,
    x58: i8,
    x59: i8,
    x60: i8,
    x61: i8,
    x62: i8,
    x63: i8
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: i8) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> i8[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> i8[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[i8; 64]>[src]

pub fn rotate_left(self, n: i8x64) -> i8x64[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: i8x64) -> i8x64[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[i8; 64]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[i8; 64]>[src]

pub fn wrapping_sum(self) -> i8[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> i8[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[i8; 64]>[src]

pub fn max_element(self) -> i8[src]

Largest vector element value.

pub fn min_element(self) -> i8[src]

Smallest vector element value.

impl Simd<[i8; 64]>[src]

pub fn and(self) -> i8[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> i8[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> i8[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[i8; 64]>[src]

pub fn from_slice_aligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i8; 64]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i8; 64]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[i8; 64]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[i8; 64]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[i8; 64]>[src]

pub fn eq(self, other: Self) -> m8x64[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m8x64[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m8x64[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m8x64[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m8x64[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m8x64[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[i8; 64]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i8x64>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[i8; 64]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i8x64>[src]

Returns a wrapper that implements Ord.

impl Simd<[i8; 64]>[src]

pub fn bitmask(self) -> u64[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[u8; 64]>[src]

pub const fn new(
    x0: u8,
    x1: u8,
    x2: u8,
    x3: u8,
    x4: u8,
    x5: u8,
    x6: u8,
    x7: u8,
    x8: u8,
    x9: u8,
    x10: u8,
    x11: u8,
    x12: u8,
    x13: u8,
    x14: u8,
    x15: u8,
    x16: u8,
    x17: u8,
    x18: u8,
    x19: u8,
    x20: u8,
    x21: u8,
    x22: u8,
    x23: u8,
    x24: u8,
    x25: u8,
    x26: u8,
    x27: u8,
    x28: u8,
    x29: u8,
    x30: u8,
    x31: u8,
    x32: u8,
    x33: u8,
    x34: u8,
    x35: u8,
    x36: u8,
    x37: u8,
    x38: u8,
    x39: u8,
    x40: u8,
    x41: u8,
    x42: u8,
    x43: u8,
    x44: u8,
    x45: u8,
    x46: u8,
    x47: u8,
    x48: u8,
    x49: u8,
    x50: u8,
    x51: u8,
    x52: u8,
    x53: u8,
    x54: u8,
    x55: u8,
    x56: u8,
    x57: u8,
    x58: u8,
    x59: u8,
    x60: u8,
    x61: u8,
    x62: u8,
    x63: u8
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: u8) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> u8[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> u8[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[u8; 64]>[src]

pub fn rotate_left(self, n: u8x64) -> u8x64[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: u8x64) -> u8x64[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[u8; 64]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[u8; 64]>[src]

pub fn wrapping_sum(self) -> u8[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> u8[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[u8; 64]>[src]

pub fn max_element(self) -> u8[src]

Largest vector element value.

pub fn min_element(self) -> u8[src]

Smallest vector element value.

impl Simd<[u8; 64]>[src]

pub fn and(self) -> u8[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> u8[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> u8[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[u8; 64]>[src]

pub fn from_slice_aligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u8; 64]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u8; 64]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[u8; 64]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[u8; 64]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[u8; 64]>[src]

pub fn eq(self, other: Self) -> m8x64[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m8x64[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m8x64[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m8x64[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m8x64[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m8x64[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[u8; 64]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u8x64>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[u8; 64]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u8x64>[src]

Returns a wrapper that implements Ord.

impl Simd<[u8; 64]>[src]

pub fn bitmask(self) -> u64[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[m8; 64]>[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool,
    x8: bool,
    x9: bool,
    x10: bool,
    x11: bool,
    x12: bool,
    x13: bool,
    x14: bool,
    x15: bool,
    x16: bool,
    x17: bool,
    x18: bool,
    x19: bool,
    x20: bool,
    x21: bool,
    x22: bool,
    x23: bool,
    x24: bool,
    x25: bool,
    x26: bool,
    x27: bool,
    x28: bool,
    x29: bool,
    x30: bool,
    x31: bool,
    x32: bool,
    x33: bool,
    x34: bool,
    x35: bool,
    x36: bool,
    x37: bool,
    x38: bool,
    x39: bool,
    x40: bool,
    x41: bool,
    x42: bool,
    x43: bool,
    x44: bool,
    x45: bool,
    x46: bool,
    x47: bool,
    x48: bool,
    x49: bool,
    x50: bool,
    x51: bool,
    x52: bool,
    x53: bool,
    x54: bool,
    x55: bool,
    x56: bool,
    x57: bool,
    x58: bool,
    x59: bool,
    x60: bool,
    x61: bool,
    x62: bool,
    x63: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[m8; 64]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[m8; 64]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

pub fn any(self) -> bool[src]

Is any vector lane true?

pub fn none(self) -> bool[src]

Are all vector lanes false?

impl Simd<[m8; 64]>[src]

pub fn eq(self, other: Self) -> m8x64[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m8x64[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m8x64[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m8x64[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m8x64[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m8x64[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[m8; 64]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m8; 64] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

The lanes of the result for which the mask is true contain the values of a. The remaining lanes contain the values of b.

impl Simd<[m8; 64]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m8x64>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[m8; 64]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m8x64>[src]

Returns a wrapper that implements Ord.

impl Simd<[m8; 64]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[m8; 64]>[src]

pub fn bitmask(self) -> u64[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[i16; 32]>[src]

pub const fn new(
    x0: i16,
    x1: i16,
    x2: i16,
    x3: i16,
    x4: i16,
    x5: i16,
    x6: i16,
    x7: i16,
    x8: i16,
    x9: i16,
    x10: i16,
    x11: i16,
    x12: i16,
    x13: i16,
    x14: i16,
    x15: i16,
    x16: i16,
    x17: i16,
    x18: i16,
    x19: i16,
    x20: i16,
    x21: i16,
    x22: i16,
    x23: i16,
    x24: i16,
    x25: i16,
    x26: i16,
    x27: i16,
    x28: i16,
    x29: i16,
    x30: i16,
    x31: i16
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: i16) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> i16[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> i16[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[i16; 32]>[src]

pub fn rotate_left(self, n: i16x32) -> i16x32[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: i16x32) -> i16x32[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[i16; 32]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[i16; 32]>[src]

pub fn wrapping_sum(self) -> i16[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> i16[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[i16; 32]>[src]

pub fn max_element(self) -> i16[src]

Largest vector element value.

pub fn min_element(self) -> i16[src]

Smallest vector element value.

impl Simd<[i16; 32]>[src]

pub fn and(self) -> i16[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> i16[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> i16[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[i16; 32]>[src]

pub fn from_slice_aligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i16; 32]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i16; 32]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[i16; 32]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[i16; 32]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[i16; 32]>[src]

pub fn eq(self, other: Self) -> m16x32[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m16x32[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m16x32[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m16x32[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m16x32[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m16x32[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[i16; 32]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i16x32>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[i16; 32]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i16x32>[src]

Returns a wrapper that implements Ord.

impl Simd<[i16; 32]>[src]

pub fn bitmask(self) -> u32[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[u16; 32]>[src]

pub const fn new(
    x0: u16,
    x1: u16,
    x2: u16,
    x3: u16,
    x4: u16,
    x5: u16,
    x6: u16,
    x7: u16,
    x8: u16,
    x9: u16,
    x10: u16,
    x11: u16,
    x12: u16,
    x13: u16,
    x14: u16,
    x15: u16,
    x16: u16,
    x17: u16,
    x18: u16,
    x19: u16,
    x20: u16,
    x21: u16,
    x22: u16,
    x23: u16,
    x24: u16,
    x25: u16,
    x26: u16,
    x27: u16,
    x28: u16,
    x29: u16,
    x30: u16,
    x31: u16
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: u16) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> u16[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> u16[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[u16; 32]>[src]

pub fn rotate_left(self, n: u16x32) -> u16x32[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: u16x32) -> u16x32[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[u16; 32]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[u16; 32]>[src]

pub fn wrapping_sum(self) -> u16[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> u16[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[u16; 32]>[src]

pub fn max_element(self) -> u16[src]

Largest vector element value.

pub fn min_element(self) -> u16[src]

Smallest vector element value.

impl Simd<[u16; 32]>[src]

pub fn and(self) -> u16[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> u16[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> u16[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[u16; 32]>[src]

pub fn from_slice_aligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u16; 32]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u16; 32]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[u16; 32]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[u16; 32]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[u16; 32]>[src]

pub fn eq(self, other: Self) -> m16x32[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m16x32[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m16x32[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m16x32[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m16x32[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m16x32[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[u16; 32]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u16x32>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[u16; 32]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u16x32>[src]

Returns a wrapper that implements Ord.

impl Simd<[u16; 32]>[src]

pub fn bitmask(self) -> u32[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[m16; 32]>[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool,
    x8: bool,
    x9: bool,
    x10: bool,
    x11: bool,
    x12: bool,
    x13: bool,
    x14: bool,
    x15: bool,
    x16: bool,
    x17: bool,
    x18: bool,
    x19: bool,
    x20: bool,
    x21: bool,
    x22: bool,
    x23: bool,
    x24: bool,
    x25: bool,
    x26: bool,
    x27: bool,
    x28: bool,
    x29: bool,
    x30: bool,
    x31: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[m16; 32]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[m16; 32]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

pub fn any(self) -> bool[src]

Is any vector lane true?

pub fn none(self) -> bool[src]

Are all vector lanes false?

impl Simd<[m16; 32]>[src]

pub fn eq(self, other: Self) -> m16x32[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m16x32[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m16x32[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m16x32[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m16x32[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m16x32[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[m16; 32]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m16; 32] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

The lanes of the result for which the mask is true contain the values of a. The remaining lanes contain the values of b.

impl Simd<[m16; 32]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m16x32>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[m16; 32]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m16x32>[src]

Returns a wrapper that implements Ord.

impl Simd<[m16; 32]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[m16; 32]>[src]

pub fn bitmask(self) -> u32[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[i32; 16]>[src]

pub const fn new(
    x0: i32,
    x1: i32,
    x2: i32,
    x3: i32,
    x4: i32,
    x5: i32,
    x6: i32,
    x7: i32,
    x8: i32,
    x9: i32,
    x10: i32,
    x11: i32,
    x12: i32,
    x13: i32,
    x14: i32,
    x15: i32
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: i32) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> i32[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> i32[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[i32; 16]>[src]

pub fn rotate_left(self, n: i32x16) -> i32x16[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: i32x16) -> i32x16[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[i32; 16]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[i32; 16]>[src]

pub fn wrapping_sum(self) -> i32[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> i32[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[i32; 16]>[src]

pub fn max_element(self) -> i32[src]

Largest vector element value.

pub fn min_element(self) -> i32[src]

Smallest vector element value.

impl Simd<[i32; 16]>[src]

pub fn and(self) -> i32[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> i32[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> i32[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[i32; 16]>[src]

pub fn from_slice_aligned(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i32; 16]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i32; 16]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[i32; 16]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[i32; 16]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[i32; 16]>[src]

pub fn eq(self, other: Self) -> m32x16[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m32x16[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m32x16[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m32x16[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m32x16[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m32x16[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[i32; 16]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i32x16>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[i32; 16]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i32x16>[src]

Returns a wrapper that implements Ord.

impl Simd<[i32; 16]>[src]

pub fn bitmask(self) -> u16[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[u32; 16]>[src]

pub const fn new(
    x0: u32,
    x1: u32,
    x2: u32,
    x3: u32,
    x4: u32,
    x5: u32,
    x6: u32,
    x7: u32,
    x8: u32,
    x9: u32,
    x10: u32,
    x11: u32,
    x12: u32,
    x13: u32,
    x14: u32,
    x15: u32
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: u32) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> u32[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> u32[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[u32; 16]>[src]

pub fn rotate_left(self, n: u32x16) -> u32x16[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: u32x16) -> u32x16[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[u32; 16]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[u32; 16]>[src]

pub fn wrapping_sum(self) -> u32[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> u32[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[u32; 16]>[src]

pub fn max_element(self) -> u32[src]

Largest vector element value.

pub fn min_element(self) -> u32[src]

Smallest vector element value.

impl Simd<[u32; 16]>[src]

pub fn and(self) -> u32[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> u32[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> u32[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[u32; 16]>[src]

pub fn from_slice_aligned(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u32; 16]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u32; 16]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[u32; 16]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[u32; 16]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[u32; 16]>[src]

pub fn eq(self, other: Self) -> m32x16[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m32x16[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m32x16[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m32x16[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m32x16[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m32x16[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[u32; 16]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u32x16>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[u32; 16]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u32x16>[src]

Returns a wrapper that implements Ord.

impl Simd<[u32; 16]>[src]

pub fn bitmask(self) -> u16[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[f32; 16]>[src]

pub const fn new(
    x0: f32,
    x1: f32,
    x2: f32,
    x3: f32,
    x4: f32,
    x5: f32,
    x6: f32,
    x7: f32,
    x8: f32,
    x9: f32,
    x10: f32,
    x11: f32,
    x12: f32,
    x13: f32,
    x14: f32,
    x15: f32
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: f32) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> f32[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> f32[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: f32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: f32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[f32; 16]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[f32; 16]>[src]

pub fn sum(self) -> f32[src]

Horizontal sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If one of the vector element is NaN the reduction returns NaN. The resulting NaN is not required to be equal to any of the NaNs in the vector.

pub fn product(self) -> f32[src]

Horizontal product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If one of the vector element is NaN the reduction returns NaN. The resulting NaN is not required to be equal to any of the NaNs in the vector.

impl Simd<[f32; 16]>[src]

pub fn max_element(self) -> f32[src]

Largest vector element value.

pub fn min_element(self) -> f32[src]

Smallest vector element value.

impl Simd<[f32; 16]>[src]

pub fn from_slice_aligned(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[f32; 16]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[f32; 16]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[f32; 16]>[src]

pub const EPSILON: f32x16[src]

Machine epsilon value.

pub const MIN: f32x16[src]

Smallest finite value.

pub const MIN_POSITIVE: f32x16[src]

Smallest positive normal value.

pub const MAX: f32x16[src]

Largest finite value.

pub const NAN: f32x16[src]

Not a Number (NaN).

pub const INFINITY: f32x16[src]

Infinity (∞).

pub const NEG_INFINITY: f32x16[src]

Negative infinity (-∞).

pub const PI: f32x16[src]

Archimedes' constant (Ï€)

pub const FRAC_PI_2: f32x16[src]

Ï€/2

pub const FRAC_PI_3: f32x16[src]

Ï€/3

pub const FRAC_PI_4: f32x16[src]

Ï€/4

pub const FRAC_PI_6: f32x16[src]

Ï€/6

pub const FRAC_PI_8: f32x16[src]

Ï€/8

pub const FRAC_1_PI: f32x16[src]

1/Ï€

pub const FRAC_2_PI: f32x16[src]

2/Ï€

pub const FRAC_2_SQRT_PI: f32x16[src]

2/sqrt(Ï€)

pub const SQRT_2: f32x16[src]

sqrt(2)

pub const FRAC_1_SQRT_2: f32x16[src]

1/sqrt(2)

pub const E: f32x16[src]

Euler's number (e)

pub const LOG2_E: f32x16[src]

log2(e)

pub const LOG10_E: f32x16[src]

log10(e)

pub const LN_2: f32x16[src]

ln(2)

pub const LN_10: f32x16[src]

ln(10)

impl Simd<[f32; 16]>[src]

pub fn is_nan(self) -> m32x16[src]

pub fn is_infinite(self) -> m32x16[src]

pub fn is_finite(self) -> m32x16[src]

impl Simd<[f32; 16]>[src]

pub fn abs(self) -> Self[src]

Absolute value.

impl Simd<[f32; 16]>[src]

pub fn cos(self) -> Self[src]

Cosine.

pub fn cos_pi(self) -> Self[src]

Cosine of self * PI.

impl Simd<[f32; 16]>[src]

pub fn exp(self) -> Self[src]

Returns the exponential function of self: e^(self).

impl Simd<[f32; 16]>[src]

pub fn ln(self) -> Self[src]

Returns the natural logarithm of self.

impl Simd<[f32; 16]>[src]

pub fn mul_add(self, y: Self, z: Self) -> Self[src]

Fused multiply add: self * y + z

impl Simd<[f32; 16]>[src]

pub fn mul_adde(self, y: Self, z: Self) -> Self[src]

Fused multiply add estimate: ~= self * y + z

While fused multiply-add (fma) has infinite precision, mul_adde has at worst the same precision of a multiply followed by an add. This might be more efficient on architectures that do not have an fma instruction.

impl Simd<[f32; 16]>[src]

pub fn powf(self, x: Self) -> Self[src]

Raises self number to the floating point power of x.

impl Simd<[f32; 16]>[src]

pub fn recpre(self) -> Self[src]

Reciprocal estimate: ~= 1. / self.

FIXME: The precision of the estimate is currently unspecified.

impl Simd<[f32; 16]>[src]

pub fn rsqrte(self) -> Self[src]

Reciprocal square-root estimate: ~= 1. / self.sqrt().

FIXME: The precision of the estimate is currently unspecified.

impl Simd<[f32; 16]>[src]

pub fn sin(self) -> Self[src]

Sine.

pub fn sin_pi(self) -> Self[src]

Sine of self * PI.

pub fn sin_cos_pi(self) -> (Self, Self)[src]

Sine and cosine of self * PI.

impl Simd<[f32; 16]>[src]

pub fn sqrt(self) -> Self[src]

impl Simd<[f32; 16]>[src]

pub fn sqrte(self) -> Self[src]

Square-root estimate.

FIXME: The precision of the estimate is currently unspecified.

impl Simd<[f32; 16]>[src]

pub fn tanh(self) -> Self[src]

Tanh.

impl Simd<[f32; 16]>[src]

pub fn eq(self, other: Self) -> m32x16[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m32x16[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m32x16[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m32x16[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m32x16[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m32x16[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[m32; 16]>[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool,
    x8: bool,
    x9: bool,
    x10: bool,
    x11: bool,
    x12: bool,
    x13: bool,
    x14: bool,
    x15: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[m32; 16]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[m32; 16]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

pub fn any(self) -> bool[src]

Is any vector lane true?

pub fn none(self) -> bool[src]

Are all vector lanes false?

impl Simd<[m32; 16]>[src]

pub fn eq(self, other: Self) -> m32x16[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m32x16[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m32x16[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m32x16[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m32x16[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m32x16[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[m32; 16]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m32; 16] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

The lanes of the result for which the mask is true contain the values of a. The remaining lanes contain the values of b.

impl Simd<[m32; 16]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m32x16>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[m32; 16]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m32x16>[src]

Returns a wrapper that implements Ord.

impl Simd<[m32; 16]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[m32; 16]>[src]

pub fn bitmask(self) -> u16[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[i64; 8]>[src]

pub const fn new(
    x0: i64,
    x1: i64,
    x2: i64,
    x3: i64,
    x4: i64,
    x5: i64,
    x6: i64,
    x7: i64
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: i64) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> i64[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> i64[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[i64; 8]>[src]

pub fn rotate_left(self, n: i64x8) -> i64x8[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: i64x8) -> i64x8[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[i64; 8]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[i64; 8]>[src]

pub fn wrapping_sum(self) -> i64[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> i64[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[i64; 8]>[src]

pub fn max_element(self) -> i64[src]

Largest vector element value.

pub fn min_element(self) -> i64[src]

Smallest vector element value.

impl Simd<[i64; 8]>[src]

pub fn and(self) -> i64[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> i64[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> i64[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[i64; 8]>[src]

pub fn from_slice_aligned(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i64; 8]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i64; 8]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[i64; 8]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[i64; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[i64; 8]>[src]

pub fn eq(self, other: Self) -> m64x8[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m64x8[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m64x8[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m64x8[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m64x8[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m64x8[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[i64; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i64x8>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[i64; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i64x8>[src]

Returns a wrapper that implements Ord.

impl Simd<[i64; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[u64; 8]>[src]

pub const fn new(
    x0: u64,
    x1: u64,
    x2: u64,
    x3: u64,
    x4: u64,
    x5: u64,
    x6: u64,
    x7: u64
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: u64) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> u64[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> u64[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[u64; 8]>[src]

pub fn rotate_left(self, n: u64x8) -> u64x8[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: u64x8) -> u64x8[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[u64; 8]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[u64; 8]>[src]

pub fn wrapping_sum(self) -> u64[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> u64[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[u64; 8]>[src]

pub fn max_element(self) -> u64[src]

Largest vector element value.

pub fn min_element(self) -> u64[src]

Smallest vector element value.

impl Simd<[u64; 8]>[src]

pub fn and(self) -> u64[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> u64[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> u64[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[u64; 8]>[src]

pub fn from_slice_aligned(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u64; 8]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u64; 8]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[u64; 8]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[u64; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[u64; 8]>[src]

pub fn eq(self, other: Self) -> m64x8[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m64x8[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m64x8[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m64x8[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m64x8[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m64x8[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[u64; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u64x8>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[u64; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u64x8>[src]

Returns a wrapper that implements Ord.

impl Simd<[u64; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[f64; 8]>[src]

pub const fn new(
    x0: f64,
    x1: f64,
    x2: f64,
    x3: f64,
    x4: f64,
    x5: f64,
    x6: f64,
    x7: f64
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: f64) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> f64[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> f64[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: f64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: f64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[f64; 8]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[f64; 8]>[src]

pub fn sum(self) -> f64[src]

Horizontal sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If one of the vector element is NaN the reduction returns NaN. The resulting NaN is not required to be equal to any of the NaNs in the vector.

pub fn product(self) -> f64[src]

Horizontal product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If one of the vector element is NaN the reduction returns NaN. The resulting NaN is not required to be equal to any of the NaNs in the vector.

impl Simd<[f64; 8]>[src]

pub fn max_element(self) -> f64[src]

Largest vector element value.

pub fn min_element(self) -> f64[src]

Smallest vector element value.

impl Simd<[f64; 8]>[src]

pub fn from_slice_aligned(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[f64; 8]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[f64; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[f64; 8]>[src]

pub const EPSILON: f64x8[src]

Machine epsilon value.

pub const MIN: f64x8[src]

Smallest finite value.

pub const MIN_POSITIVE: f64x8[src]

Smallest positive normal value.

pub const MAX: f64x8[src]

Largest finite value.

pub const NAN: f64x8[src]

Not a Number (NaN).

pub const INFINITY: f64x8[src]

Infinity (∞).

pub const NEG_INFINITY: f64x8[src]

Negative infinity (-∞).

pub const PI: f64x8[src]

Archimedes' constant (Ï€)

pub const FRAC_PI_2: f64x8[src]

Ï€/2

pub const FRAC_PI_3: f64x8[src]

Ï€/3

pub const FRAC_PI_4: f64x8[src]

Ï€/4

pub const FRAC_PI_6: f64x8[src]

Ï€/6

pub const FRAC_PI_8: f64x8[src]

Ï€/8

pub const FRAC_1_PI: f64x8[src]

1/Ï€

pub const FRAC_2_PI: f64x8[src]

2/Ï€

pub const FRAC_2_SQRT_PI: f64x8[src]

2/sqrt(Ï€)

pub const SQRT_2: f64x8[src]

sqrt(2)

pub const FRAC_1_SQRT_2: f64x8[src]

1/sqrt(2)

pub const E: f64x8[src]

Euler's number (e)

pub const LOG2_E: f64x8[src]

log2(e)

pub const LOG10_E: f64x8[src]

log10(e)

pub const LN_2: f64x8[src]

ln(2)

pub const LN_10: f64x8[src]

ln(10)

impl Simd<[f64; 8]>[src]

pub fn is_nan(self) -> m64x8[src]

pub fn is_infinite(self) -> m64x8[src]

pub fn is_finite(self) -> m64x8[src]

impl Simd<[f64; 8]>[src]

pub fn abs(self) -> Self[src]

Absolute value.

impl Simd<[f64; 8]>[src]

pub fn cos(self) -> Self[src]

Cosine.

pub fn cos_pi(self) -> Self[src]

Cosine of self * PI.

impl Simd<[f64; 8]>[src]

pub fn exp(self) -> Self[src]

Returns the exponential function of self: e^(self).

impl Simd<[f64; 8]>[src]

pub fn ln(self) -> Self[src]

Returns the natural logarithm of self.

impl Simd<[f64; 8]>[src]

pub fn mul_add(self, y: Self, z: Self) -> Self[src]

Fused multiply add: self * y + z

impl Simd<[f64; 8]>[src]

pub fn mul_adde(self, y: Self, z: Self) -> Self[src]

Fused multiply add estimate: ~= self * y + z

While fused multiply-add (fma) has infinite precision, mul_adde has at worst the same precision of a multiply followed by an add. This might be more efficient on architectures that do not have an fma instruction.

impl Simd<[f64; 8]>[src]

pub fn powf(self, x: Self) -> Self[src]

Raises self number to the floating point power of x.

impl Simd<[f64; 8]>[src]

pub fn recpre(self) -> Self[src]

Reciprocal estimate: ~= 1. / self.

FIXME: The precision of the estimate is currently unspecified.

impl Simd<[f64; 8]>[src]

pub fn rsqrte(self) -> Self[src]

Reciprocal square-root estimate: ~= 1. / self.sqrt().

FIXME: The precision of the estimate is currently unspecified.

impl Simd<[f64; 8]>[src]

pub fn sin(self) -> Self[src]

Sine.

pub fn sin_pi(self) -> Self[src]

Sine of self * PI.

pub fn sin_cos_pi(self) -> (Self, Self)[src]

Sine and cosine of self * PI.

impl Simd<[f64; 8]>[src]

pub fn sqrt(self) -> Self[src]

impl Simd<[f64; 8]>[src]

pub fn sqrte(self) -> Self[src]

Square-root estimate.

FIXME: The precision of the estimate is currently unspecified.

impl Simd<[f64; 8]>[src]

pub fn tanh(self) -> Self[src]

Tanh.

impl Simd<[f64; 8]>[src]

pub fn eq(self, other: Self) -> m64x8[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m64x8[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m64x8[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m64x8[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m64x8[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m64x8[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[m64; 8]>[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[m64; 8]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[m64; 8]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

pub fn any(self) -> bool[src]

Is any vector lane true?

pub fn none(self) -> bool[src]

Are all vector lanes false?

impl Simd<[m64; 8]>[src]

pub fn eq(self, other: Self) -> m64x8[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m64x8[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m64x8[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m64x8[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m64x8[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m64x8[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[m64; 8]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m64; 8] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

The lanes of the result for which the mask is true contain the values of a. The remaining lanes contain the values of b.

impl Simd<[m64; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m64x8>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[m64; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m64x8>[src]

Returns a wrapper that implements Ord.

impl Simd<[m64; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[m64; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[i128; 4]>[src]

pub const fn new(x0: i128, x1: i128, x2: i128, x3: i128) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: i128) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> i128[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> i128[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[i128; 4]>[src]

pub fn rotate_left(self, n: i128x4) -> i128x4[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: i128x4) -> i128x4[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[i128; 4]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[i128; 4]>[src]

pub fn wrapping_sum(self) -> i128[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> i128[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[i128; 4]>[src]

pub fn max_element(self) -> i128[src]

Largest vector element value.

pub fn min_element(self) -> i128[src]

Smallest vector element value.

impl Simd<[i128; 4]>[src]

pub fn and(self) -> i128[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> i128[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> i128[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[i128; 4]>[src]

pub fn from_slice_aligned(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i128; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[i128; 4]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[i128; 4]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[i128; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[i128; 4]>[src]

pub fn eq(self, other: Self) -> m128x4[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m128x4[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m128x4[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m128x4[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m128x4[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m128x4[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[i128; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i128x4>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[i128; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i128x4>[src]

Returns a wrapper that implements Ord.

impl Simd<[i128; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[u128; 4]>[src]

pub const fn new(x0: u128, x1: u128, x2: u128, x3: u128) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: u128) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> u128[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> u128[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[u128; 4]>[src]

pub fn rotate_left(self, n: u128x4) -> u128x4[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: u128x4) -> u128x4[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[u128; 4]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[u128; 4]>[src]

pub fn wrapping_sum(self) -> u128[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> u128[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[u128; 4]>[src]

pub fn max_element(self) -> u128[src]

Largest vector element value.

pub fn min_element(self) -> u128[src]

Smallest vector element value.

impl Simd<[u128; 4]>[src]

pub fn and(self) -> u128[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> u128[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> u128[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[u128; 4]>[src]

pub fn from_slice_aligned(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u128; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[u128; 4]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[u128; 4]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[u128; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[u128; 4]>[src]

pub fn eq(self, other: Self) -> m128x4[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m128x4[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m128x4[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m128x4[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m128x4[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m128x4[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[u128; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u128x4>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[u128; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u128x4>[src]

Returns a wrapper that implements Ord.

impl Simd<[u128; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[m128; 4]>[src]

pub const fn new(x0: bool, x1: bool, x2: bool, x3: bool) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[m128; 4]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[m128; 4]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

pub fn any(self) -> bool[src]

Is any vector lane true?

pub fn none(self) -> bool[src]

Are all vector lanes false?

impl Simd<[m128; 4]>[src]

pub fn eq(self, other: Self) -> m128x4[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> m128x4[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> m128x4[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> m128x4[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> m128x4[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> m128x4[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[m128; 4]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m128; 4] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

The lanes of the result for which the mask is true contain the values of a. The remaining lanes contain the values of b.

impl Simd<[m128; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m128x4>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[m128; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m128x4>[src]

Returns a wrapper that implements Ord.

impl Simd<[m128; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[m128; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[isize; 2]>[src]

pub const fn new(x0: isize, x1: isize) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: isize) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> isize[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> isize[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: isize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: isize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[isize; 2]>[src]

pub fn rotate_left(self, n: isizex2) -> isizex2[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: isizex2) -> isizex2[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[isize; 2]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[isize; 2]>[src]

pub fn wrapping_sum(self) -> isize[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> isize[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[isize; 2]>[src]

pub fn max_element(self) -> isize[src]

Largest vector element value.

pub fn min_element(self) -> isize[src]

Smallest vector element value.

impl Simd<[isize; 2]>[src]

pub fn and(self) -> isize[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> isize[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> isize[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[isize; 2]>[src]

pub fn from_slice_aligned(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[isize; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[isize; 2]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[isize; 2]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[isize; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[isize; 2]>[src]

pub fn eq(self, other: Self) -> msizex2[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> msizex2[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> msizex2[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> msizex2[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> msizex2[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> msizex2[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[isize; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<isizex2>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[isize; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<isizex2>[src]

Returns a wrapper that implements Ord.

impl Simd<[isize; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[usize; 2]>[src]

pub const fn new(x0: usize, x1: usize) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: usize) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> usize[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> usize[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: usize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: usize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[usize; 2]>[src]

pub fn rotate_left(self, n: usizex2) -> usizex2[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: usizex2) -> usizex2[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[usize; 2]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[usize; 2]>[src]

pub fn wrapping_sum(self) -> usize[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> usize[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[usize; 2]>[src]

pub fn max_element(self) -> usize[src]

Largest vector element value.

pub fn min_element(self) -> usize[src]

Smallest vector element value.

impl Simd<[usize; 2]>[src]

pub fn and(self) -> usize[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> usize[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> usize[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[usize; 2]>[src]

pub fn from_slice_aligned(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[usize; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[usize; 2]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[usize; 2]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[usize; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[usize; 2]>[src]

pub fn eq(self, other: Self) -> msizex2[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> msizex2[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> msizex2[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> msizex2[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> msizex2[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> msizex2[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[usize; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<usizex2>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[usize; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<usizex2>[src]

Returns a wrapper that implements Ord.

impl Simd<[usize; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[msize; 2]>[src]

pub const fn new(x0: bool, x1: bool) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[msize; 2]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[msize; 2]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

pub fn any(self) -> bool[src]

Is any vector lane true?

pub fn none(self) -> bool[src]

Are all vector lanes false?

impl Simd<[msize; 2]>[src]

pub fn eq(self, other: Self) -> msizex2[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> msizex2[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> msizex2[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> msizex2[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> msizex2[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> msizex2[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[msize; 2]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[msize; 2] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

The lanes of the result for which the mask is true contain the values of a. The remaining lanes contain the values of b.

impl Simd<[msize; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<msizex2>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[msize; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<msizex2>[src]

Returns a wrapper that implements Ord.

impl Simd<[msize; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[msize; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[isize; 4]>[src]

pub const fn new(x0: isize, x1: isize, x2: isize, x3: isize) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: isize) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> isize[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> isize[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: isize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: isize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[isize; 4]>[src]

pub fn rotate_left(self, n: isizex4) -> isizex4[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: isizex4) -> isizex4[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[isize; 4]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[isize; 4]>[src]

pub fn wrapping_sum(self) -> isize[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> isize[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[isize; 4]>[src]

pub fn max_element(self) -> isize[src]

Largest vector element value.

pub fn min_element(self) -> isize[src]

Smallest vector element value.

impl Simd<[isize; 4]>[src]

pub fn and(self) -> isize[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> isize[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> isize[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[isize; 4]>[src]

pub fn from_slice_aligned(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[isize; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[isize; 4]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[isize; 4]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[isize; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[isize; 4]>[src]

pub fn eq(self, other: Self) -> msizex4[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> msizex4[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> msizex4[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> msizex4[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> msizex4[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> msizex4[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[isize; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<isizex4>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[isize; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<isizex4>[src]

Returns a wrapper that implements Ord.

impl Simd<[isize; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[usize; 4]>[src]

pub const fn new(x0: usize, x1: usize, x2: usize, x3: usize) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: usize) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> usize[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> usize[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: usize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: usize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[usize; 4]>[src]

pub fn rotate_left(self, n: usizex4) -> usizex4[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: usizex4) -> usizex4[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[usize; 4]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[usize; 4]>[src]

pub fn wrapping_sum(self) -> usize[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> usize[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[usize; 4]>[src]

pub fn max_element(self) -> usize[src]

Largest vector element value.

pub fn min_element(self) -> usize[src]

Smallest vector element value.

impl Simd<[usize; 4]>[src]

pub fn and(self) -> usize[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> usize[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> usize[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[usize; 4]>[src]

pub fn from_slice_aligned(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[usize; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[usize; 4]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[usize; 4]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[usize; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[usize; 4]>[src]

pub fn eq(self, other: Self) -> msizex4[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> msizex4[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> msizex4[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> msizex4[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> msizex4[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> msizex4[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[usize; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<usizex4>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[usize; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<usizex4>[src]

Returns a wrapper that implements Ord.

impl Simd<[usize; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[msize; 4]>[src]

pub const fn new(x0: bool, x1: bool, x2: bool, x3: bool) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[msize; 4]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[msize; 4]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

pub fn any(self) -> bool[src]

Is any vector lane true?

pub fn none(self) -> bool[src]

Are all vector lanes false?

impl Simd<[msize; 4]>[src]

pub fn eq(self, other: Self) -> msizex4[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> msizex4[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> msizex4[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> msizex4[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> msizex4[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> msizex4[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[msize; 4]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[msize; 4] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

The lanes of the result for which the mask is true contain the values of a. The remaining lanes contain the values of b.

impl Simd<[msize; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<msizex4>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[msize; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<msizex4>[src]

Returns a wrapper that implements Ord.

impl Simd<[msize; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[msize; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[isize; 8]>[src]

pub const fn new(
    x0: isize,
    x1: isize,
    x2: isize,
    x3: isize,
    x4: isize,
    x5: isize,
    x6: isize,
    x7: isize
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: isize) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> isize[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> isize[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: isize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: isize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[isize; 8]>[src]

pub fn rotate_left(self, n: isizex8) -> isizex8[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: isizex8) -> isizex8[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[isize; 8]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[isize; 8]>[src]

pub fn wrapping_sum(self) -> isize[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> isize[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[isize; 8]>[src]

pub fn max_element(self) -> isize[src]

Largest vector element value.

pub fn min_element(self) -> isize[src]

Smallest vector element value.

impl Simd<[isize; 8]>[src]

pub fn and(self) -> isize[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> isize[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> isize[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[isize; 8]>[src]

pub fn from_slice_aligned(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[isize; 8]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[isize; 8]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[isize; 8]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[isize; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[isize; 8]>[src]

pub fn eq(self, other: Self) -> msizex8[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> msizex8[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> msizex8[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> msizex8[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> msizex8[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> msizex8[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[isize; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<isizex8>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[isize; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<isizex8>[src]

Returns a wrapper that implements Ord.

impl Simd<[isize; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[usize; 8]>[src]

pub const fn new(
    x0: usize,
    x1: usize,
    x2: usize,
    x3: usize,
    x4: usize,
    x5: usize,
    x6: usize,
    x7: usize
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: usize) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> usize[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> usize[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: usize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: usize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[usize; 8]>[src]

pub fn rotate_left(self, n: usizex8) -> usizex8[src]

Shifts the bits of each lane to the left by the specified amount in the corresponding lane of n, wrapping the truncated bits to the end of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

pub fn rotate_right(self, n: usizex8) -> usizex8[src]

Shifts the bits of each lane to the right by the specified amount in the corresponding lane of n, wrapping the truncated bits to the beginning of the resulting integer.

Note: this is neither the same operation as << nor equivalent to slice::rotate_left.

impl Simd<[usize; 8]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

Returns a new vector containing the minimum value of each of the input vector lanes.

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

Returns a new vector containing the maximum value of each of the input vector lanes.

impl Simd<[usize; 8]>[src]

pub fn wrapping_sum(self) -> usize[src]

Horizontal wrapping sum of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

pub fn wrapping_product(self) -> usize[src]

Horizontal wrapping product of the vector elements.

The intrinsic performs a tree-reduction of the vector elements. That is, for an 8 element vector:

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

If an operation overflows it returns the mathematical result modulo 2^n where n is the number of times it overflows.

impl Simd<[usize; 8]>[src]

pub fn max_element(self) -> usize[src]

Largest vector element value.

pub fn min_element(self) -> usize[src]

Smallest vector element value.

impl Simd<[usize; 8]>[src]

pub fn and(self) -> usize[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> usize[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> usize[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[usize; 8]>[src]

pub fn from_slice_aligned(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[usize; 8]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl Simd<[usize; 8]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

On little endian this is a no-op. On big endian the bytes are swapped.

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

On big endian this is a no-op. On little endian the bytes are swapped.

impl Simd<[usize; 8]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of the lanes of self.

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of the lanes of self.

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary representation of the lanes of self.

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary representation of the lanes of self.

impl Simd<[usize; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[usize; 8]>[src]

pub fn eq(self, other: Self) -> msizex8[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> msizex8[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> msizex8[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> msizex8[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> msizex8[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> msizex8[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[usize; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<usizex8>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[usize; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<usizex8>[src]

Returns a wrapper that implements Ord.

impl Simd<[usize; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl Simd<[msize; 8]>[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to value.

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl Simd<[msize; 8]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

Note: if the vector has one lane, the first element of the vector is returned.

impl Simd<[msize; 8]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

pub fn any(self) -> bool[src]

Is any vector lane true?

pub fn none(self) -> bool[src]

Are all vector lanes false?

impl Simd<[msize; 8]>[src]

pub fn eq(self, other: Self) -> msizex8[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> msizex8[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> msizex8[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> msizex8[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> msizex8[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> msizex8[src]

Lane-wise greater-than-or-equals comparison.

impl Simd<[msize; 8]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[msize; 8] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

The lanes of the result for which the mask is true contain the values of a. The remaining lanes contain the values of b.

impl Simd<[msize; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<msizex8>[src]

Returns a wrapper that implements PartialOrd.

impl Simd<[msize; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<msizex8>[src]

Returns a wrapper that implements Ord.

impl Simd<[msize; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl Simd<[msize; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

If the vector has less than 8 lanes, the bits that do not correspond to any vector lanes are cleared.

impl<T> Simd<[*const T; 2]>[src]

pub const fn new(x0: *const T, x1: *const T) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: *const T) -> Self[src]

Constructs a new instance with each element initialized to value.

pub const fn null() -> Self[src]

Constructs a new instance with each element initialized to null.

pub fn is_null(self) -> msizex2[src]

Returns a mask that selects those lanes that contain null pointers.

pub fn extract(self, index: usize) -> *const T[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> *const T[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: *const T) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: *const T) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl<T> Simd<[*const T; 2]>[src]

pub fn eq(self, other: Self) -> msizex2[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> msizex2[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> msizex2[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> msizex2[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> msizex2[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> msizex2[src]

Lane-wise greater-than-or-equals comparison.

impl<T> Simd<[*const T; 2]>[src]

pub fn from_slice_aligned(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl<T> Simd<[*const T; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl<T> Simd<[*const T; 2]>[src]

pub unsafe fn offset(self, count: isizex2) -> Self[src]

Calculates the offset from a pointer.

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

If any of the following conditions are violated, the result is Undefined Behavior:

  • Both the starting and resulting pointer must be either in bounds or one byte past the end of an allocated object.

  • The computed offset, in bytes, cannot overflow an isize.

  • The offset being in bounds cannot rely on "wrapping around" the address space. That is, the infinite-precision sum, in bytes must fit in a usize.

The compiler and standard library generally tries to ensure allocations never reach a size where an offset is a concern. For instance, Vec and Box ensure they never allocate more than isize::MAX bytes, so vec.as_ptr().offset(vec.len() as isize) is always safe.

Most platforms fundamentally can't even construct such an allocation. For instance, no known 64-bit platform can ever serve a request for 263 bytes due to page-table limitations or splitting the address space. However, some 32-bit and 16-bit platforms may successfully serve a request for more than isize::MAX bytes with things like Physical Address Extension. As such, memory acquired directly from allocators or memory mapped files may be too large to handle with this function.

Consider using wrapping_offset instead if these constraints are difficult to satisfy. The only advantage of this method is that it enables more aggressive compiler optimizations.

pub fn wrapping_offset(self, count: isizex2) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic.

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

The resulting pointer does not need to be in bounds, but it is potentially hazardous to dereference (which requires unsafe).

Always use .offset(count) instead when possible, because offset allows the compiler to optimize better.

pub unsafe fn offset_from(self, origin: Self) -> isizex2[src]

Calculates the distance between two pointers.

The returned value is in units of T: the distance in bytes is divided by mem::size_of::<T>().

This function is the inverse of offset.

Safety

If any of the following conditions are violated, the result is Undefined Behavior:

  • Both the starting and other pointer must be either in bounds or one byte past the end of the same allocated object.

  • The distance between the pointers, in bytes, cannot overflow an isize.

  • The distance between the pointers, in bytes, must be an exact multiple of the size of T.

  • The distance being in bounds cannot rely on "wrapping around" the address space.

The compiler and standard library generally try to ensure allocations never reach a size where an offset is a concern. For instance, Vec and Box ensure they never allocate more than isize::MAX bytes, so ptr_into_vec.offset_from(vec.as_ptr()) is always safe.

Most platforms fundamentally can't even construct such an allocation. For instance, no known 64-bit platform can ever serve a request for 263 bytes due to page-table limitations or splitting the address space. However, some 32-bit and 16-bit platforms may successfully serve a request for more than isize::MAX bytes with things like Physical Address Extension. As such, memory acquired directly from allocators or memory mapped files may be too large to handle with this function.

Consider using wrapping_offset_from instead if these constraints are difficult to satisfy. The only advantage of this method is that it enables more aggressive compiler optimizations.

pub fn wrapping_offset_from(self, origin: Self) -> isizex2[src]

Calculates the distance between two pointers.

The returned value is in units of T: the distance in bytes is divided by mem::size_of::<T>().

If the address different between the two pointers is not a multiple of mem::size_of::<T>() then the result of the division is rounded towards zero.

Though this method is safe for any two pointers, note that its result will be mostly useless if the two pointers aren't into the same allocated object, for example if they point to two different local variables.

pub unsafe fn add(self, count: usizex2) -> Self[src]

Calculates the offset from a pointer (convenience for .offset(count as isize)).

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

If any of the following conditions are violated, the result is Undefined Behavior:

  • Both the starting and resulting pointer must be either in bounds or one byte past the end of an allocated object.

  • The computed offset, in bytes, cannot overflow an isize.

  • The offset being in bounds cannot rely on "wrapping around" the address space. That is, the infinite-precision sum must fit in a usize.

The compiler and standard library generally tries to ensure allocations never reach a size where an offset is a concern. For instance, Vec and Box ensure they never allocate more than isize::MAX bytes, so vec.as_ptr().add(vec.len()) is always safe.

Most platforms fundamentally can't even construct such an allocation. For instance, no known 64-bit platform can ever serve a request for 263 bytes due to page-table limitations or splitting the address space. However, some 32-bit and 16-bit platforms may successfully serve a request for more than isize::MAX bytes with things like Physical Address Extension. As such, memory acquired directly from allocators or memory mapped files may be too large to handle with this function.

Consider using wrapping_offset instead if these constraints are difficult to satisfy. The only advantage of this method is that it enables more aggressive compiler optimizations.

pub unsafe fn sub(self, count: usizex2) -> Self[src]

Calculates the offset from a pointer (convenience for .offset((count as isize).wrapping_neg())).

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

If any of the following conditions are violated, the result is Undefined Behavior:

  • Both the starting and resulting pointer must be either in bounds or one byte past the end of an allocated object.

  • The computed offset cannot exceed isize::MAX bytes.

  • The offset being in bounds cannot rely on "wrapping around" the address space. That is, the infinite-precision sum must fit in a usize.

The compiler and standard library generally tries to ensure allocations never reach a size where an offset is a concern. For instance, Vec and Box ensure they never allocate more than isize::MAX bytes, so vec.as_ptr().add(vec.len()).sub(vec.len()) is always safe.

Most platforms fundamentally can't even construct such an allocation. For instance, no known 64-bit platform can ever serve a request for 263 bytes due to page-table limitations or splitting the address space. However, some 32-bit and 16-bit platforms may successfully serve a request for more than isize::MAX bytes with things like Physical Address Extension. As such, memory acquired directly from allocators or memory mapped files may be too large to handle with this function.

Consider using wrapping_offset instead if these constraints are difficult to satisfy. The only advantage of this method is that it enables more aggressive compiler optimizations.

pub fn wrapping_add(self, count: usizex2) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. (convenience for .wrapping_offset(count as isize))

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

The resulting pointer does not need to be in bounds, but it is potentially hazardous to dereference (which requires unsafe).

Always use .add(count) instead when possible, because add allows the compiler to optimize better.

pub fn wrapping_sub(self, count: usizex2) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. (convenience for .wrapping_offset((count as isize).wrapping_sub()))

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

The resulting pointer does not need to be in bounds, but it is potentially hazardous to dereference (which requires unsafe).

Always use .sub(count) instead when possible, because sub allows the compiler to optimize better.

impl<T> Simd<[*const T; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl<T> Simd<[*const T; 2]> where
    [T; 2]: SimdArray
[src]

pub unsafe fn read<M>(
    self,
    mask: Simd<[M; 2]>,
    value: Simd<[T; 2]>
) -> Simd<[T; 2]> where
    M: Mask,
    [M; 2]: SimdArray
[src]

Reads selected vector elements from memory.

Instantiates a new vector by reading the values from self for those lanes whose mask is true, and using the elements of value otherwise.

No memory is accessed for those lanes of self whose mask is false.

Safety

This method is unsafe because it dereferences raw pointers. The pointers must be aligned to mem::align_of::<T>().

impl<T> Simd<[*mut T; 2]>[src]

pub const fn new(x0: *mut T, x1: *mut T) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: *mut T) -> Self[src]

Constructs a new instance with each element initialized to value.

pub const fn null() -> Self[src]

Constructs a new instance with each element initialized to null.

pub fn is_null(self) -> msizex2[src]

Returns a mask that selects those lanes that contain null pointers.

pub fn extract(self, index: usize) -> *mut T[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> *mut T[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: *mut T) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: *mut T) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl<T> Simd<[*mut T; 2]>[src]

pub fn eq(self, other: Self) -> msizex2[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> msizex2[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> msizex2[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> msizex2[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> msizex2[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> msizex2[src]

Lane-wise greater-than-or-equals comparison.

impl<T> Simd<[*mut T; 2]>[src]

pub fn from_slice_aligned(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl<T> Simd<[*mut T; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl<T> Simd<[*mut T; 2]>[src]

pub unsafe fn offset(self, count: isizex2) -> Self[src]

Calculates the offset from a pointer.

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

If any of the following conditions are violated, the result is Undefined Behavior:

  • Both the starting and resulting pointer must be either in bounds or one byte past the end of an allocated object.

  • The computed offset, in bytes, cannot overflow an isize.

  • The offset being in bounds cannot rely on "wrapping around" the address space. That is, the infinite-precision sum, in bytes must fit in a usize.

The compiler and standard library generally tries to ensure allocations never reach a size where an offset is a concern. For instance, Vec and Box ensure they never allocate more than isize::MAX bytes, so vec.as_ptr().offset(vec.len() as isize) is always safe.

Most platforms fundamentally can't even construct such an allocation. For instance, no known 64-bit platform can ever serve a request for 263 bytes due to page-table limitations or splitting the address space. However, some 32-bit and 16-bit platforms may successfully serve a request for more than isize::MAX bytes with things like Physical Address Extension. As such, memory acquired directly from allocators or memory mapped files may be too large to handle with this function.

Consider using wrapping_offset instead if these constraints are difficult to satisfy. The only advantage of this method is that it enables more aggressive compiler optimizations.

pub fn wrapping_offset(self, count: isizex2) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic.

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

The resulting pointer does not need to be in bounds, but it is potentially hazardous to dereference (which requires unsafe).

Always use .offset(count) instead when possible, because offset allows the compiler to optimize better.

pub unsafe fn offset_from(self, origin: Self) -> isizex2[src]

Calculates the distance between two pointers.

The returned value is in units of T: the distance in bytes is divided by mem::size_of::<T>().

This function is the inverse of offset.

Safety

If any of the following conditions are violated, the result is Undefined Behavior:

  • Both the starting and other pointer must be either in bounds or one byte past the end of the same allocated object.

  • The distance between the pointers, in bytes, cannot overflow an isize.

  • The distance between the pointers, in bytes, must be an exact multiple of the size of T.

  • The distance being in bounds cannot rely on "wrapping around" the address space.

The compiler and standard library generally try to ensure allocations never reach a size where an offset is a concern. For instance, Vec and Box ensure they never allocate more than isize::MAX bytes, so ptr_into_vec.offset_from(vec.as_ptr()) is always safe.

Most platforms fundamentally can't even construct such an allocation. For instance, no known 64-bit platform can ever serve a request for 263 bytes due to page-table limitations or splitting the address space. However, some 32-bit and 16-bit platforms may successfully serve a request for more than isize::MAX bytes with things like Physical Address Extension. As such, memory acquired directly from allocators or memory mapped files may be too large to handle with this function.

Consider using wrapping_offset_from instead if these constraints are difficult to satisfy. The only advantage of this method is that it enables more aggressive compiler optimizations.

pub fn wrapping_offset_from(self, origin: Self) -> isizex2[src]

Calculates the distance between two pointers.

The returned value is in units of T: the distance in bytes is divided by mem::size_of::<T>().

If the address different between the two pointers is not a multiple of mem::size_of::<T>() then the result of the division is rounded towards zero.

Though this method is safe for any two pointers, note that its result will be mostly useless if the two pointers aren't into the same allocated object, for example if they point to two different local variables.

pub unsafe fn add(self, count: usizex2) -> Self[src]

Calculates the offset from a pointer (convenience for .offset(count as isize)).

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

If any of the following conditions are violated, the result is Undefined Behavior:

  • Both the starting and resulting pointer must be either in bounds or one byte past the end of an allocated object.

  • The computed offset, in bytes, cannot overflow an isize.

  • The offset being in bounds cannot rely on "wrapping around" the address space. That is, the infinite-precision sum must fit in a usize.

The compiler and standard library generally tries to ensure allocations never reach a size where an offset is a concern. For instance, Vec and Box ensure they never allocate more than isize::MAX bytes, so vec.as_ptr().add(vec.len()) is always safe.

Most platforms fundamentally can't even construct such an allocation. For instance, no known 64-bit platform can ever serve a request for 263 bytes due to page-table limitations or splitting the address space. However, some 32-bit and 16-bit platforms may successfully serve a request for more than isize::MAX bytes with things like Physical Address Extension. As such, memory acquired directly from allocators or memory mapped files may be too large to handle with this function.

Consider using wrapping_offset instead if these constraints are difficult to satisfy. The only advantage of this method is that it enables more aggressive compiler optimizations.

pub unsafe fn sub(self, count: usizex2) -> Self[src]

Calculates the offset from a pointer (convenience for .offset((count as isize).wrapping_neg())).

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

If any of the following conditions are violated, the result is Undefined Behavior:

  • Both the starting and resulting pointer must be either in bounds or one byte past the end of an allocated object.

  • The computed offset cannot exceed isize::MAX bytes.

  • The offset being in bounds cannot rely on "wrapping around" the address space. That is, the infinite-precision sum must fit in a usize.

The compiler and standard library generally tries to ensure allocations never reach a size where an offset is a concern. For instance, Vec and Box ensure they never allocate more than isize::MAX bytes, so vec.as_ptr().add(vec.len()).sub(vec.len()) is always safe.

Most platforms fundamentally can't even construct such an allocation. For instance, no known 64-bit platform can ever serve a request for 263 bytes due to page-table limitations or splitting the address space. However, some 32-bit and 16-bit platforms may successfully serve a request for more than isize::MAX bytes with things like Physical Address Extension. As such, memory acquired directly from allocators or memory mapped files may be too large to handle with this function.

Consider using wrapping_offset instead if these constraints are difficult to satisfy. The only advantage of this method is that it enables more aggressive compiler optimizations.

pub fn wrapping_add(self, count: usizex2) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. (convenience for .wrapping_offset(count as isize))

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

The resulting pointer does not need to be in bounds, but it is potentially hazardous to dereference (which requires unsafe).

Always use .add(count) instead when possible, because add allows the compiler to optimize better.

pub fn wrapping_sub(self, count: usizex2) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. (convenience for .wrapping_offset((count as isize).wrapping_sub()))

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

The resulting pointer does not need to be in bounds, but it is potentially hazardous to dereference (which requires unsafe).

Always use .sub(count) instead when possible, because sub allows the compiler to optimize better.

impl<T> Simd<[*mut T; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl<T> Simd<[*mut T; 2]> where
    [T; 2]: SimdArray
[src]

pub unsafe fn read<M>(
    self,
    mask: Simd<[M; 2]>,
    value: Simd<[T; 2]>
) -> Simd<[T; 2]> where
    M: Mask,
    [M; 2]: SimdArray
[src]

Reads selected vector elements from memory.

Instantiates a new vector by reading the values from self for those lanes whose mask is true, and using the elements of value otherwise.

No memory is accessed for those lanes of self whose mask is false.

Safety

This method is unsafe because it dereferences raw pointers. The pointers must be aligned to mem::align_of::<T>().

impl<T> Simd<[*mut T; 2]> where
    [T; 2]: SimdArray
[src]

pub unsafe fn write<M>(self, mask: Simd<[M; 2]>, value: Simd<[T; 2]>) where
    M: Mask,
    [M; 2]: SimdArray
[src]

Writes selected vector elements to memory.

Writes the lanes of values for which the mask is true to their corresponding memory addresses in self.

No memory is accessed for those lanes of self whose mask is false.

Overlapping memory addresses of self are written to in order from the lest-significant to the most-significant element.

Safety

This method is unsafe because it dereferences raw pointers. The pointers must be aligned to mem::align_of::<T>().

impl<T> Simd<[*const T; 4]>[src]

pub const fn new(x0: *const T, x1: *const T, x2: *const T, x3: *const T) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: *const T) -> Self[src]

Constructs a new instance with each element initialized to value.

pub const fn null() -> Self[src]

Constructs a new instance with each element initialized to null.

pub fn is_null(self) -> msizex4[src]

Returns a mask that selects those lanes that contain null pointers.

pub fn extract(self, index: usize) -> *const T[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> *const T[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: *const T) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: *const T) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl<T> Simd<[*const T; 4]>[src]

pub fn eq(self, other: Self) -> msizex4[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> msizex4[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> msizex4[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> msizex4[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> msizex4[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> msizex4[src]

Lane-wise greater-than-or-equals comparison.

impl<T> Simd<[*const T; 4]>[src]

pub fn from_slice_aligned(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl<T> Simd<[*const T; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl<T> Simd<[*const T; 4]>[src]

pub unsafe fn offset(self, count: isizex4) -> Self[src]

Calculates the offset from a pointer.

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

If any of the following conditions are violated, the result is Undefined Behavior:

  • Both the starting and resulting pointer must be either in bounds or one byte past the end of an allocated object.

  • The computed offset, in bytes, cannot overflow an isize.

  • The offset being in bounds cannot rely on "wrapping around" the address space. That is, the infinite-precision sum, in bytes must fit in a usize.

The compiler and standard library generally tries to ensure allocations never reach a size where an offset is a concern. For instance, Vec and Box ensure they never allocate more than isize::MAX bytes, so vec.as_ptr().offset(vec.len() as isize) is always safe.

Most platforms fundamentally can't even construct such an allocation. For instance, no known 64-bit platform can ever serve a request for 263 bytes due to page-table limitations or splitting the address space. However, some 32-bit and 16-bit platforms may successfully serve a request for more than isize::MAX bytes with things like Physical Address Extension. As such, memory acquired directly from allocators or memory mapped files may be too large to handle with this function.

Consider using wrapping_offset instead if these constraints are difficult to satisfy. The only advantage of this method is that it enables more aggressive compiler optimizations.

pub fn wrapping_offset(self, count: isizex4) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic.

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

The resulting pointer does not need to be in bounds, but it is potentially hazardous to dereference (which requires unsafe).

Always use .offset(count) instead when possible, because offset allows the compiler to optimize better.

pub unsafe fn offset_from(self, origin: Self) -> isizex4[src]

Calculates the distance between two pointers.

The returned value is in units of T: the distance in bytes is divided by mem::size_of::<T>().

This function is the inverse of offset.

Safety

If any of the following conditions are violated, the result is Undefined Behavior:

  • Both the starting and other pointer must be either in bounds or one byte past the end of the same allocated object.

  • The distance between the pointers, in bytes, cannot overflow an isize.

  • The distance between the pointers, in bytes, must be an exact multiple of the size of T.

  • The distance being in bounds cannot rely on "wrapping around" the address space.

The compiler and standard library generally try to ensure allocations never reach a size where an offset is a concern. For instance, Vec and Box ensure they never allocate more than isize::MAX bytes, so ptr_into_vec.offset_from(vec.as_ptr()) is always safe.

Most platforms fundamentally can't even construct such an allocation. For instance, no known 64-bit platform can ever serve a request for 263 bytes due to page-table limitations or splitting the address space. However, some 32-bit and 16-bit platforms may successfully serve a request for more than isize::MAX bytes with things like Physical Address Extension. As such, memory acquired directly from allocators or memory mapped files may be too large to handle with this function.

Consider using wrapping_offset_from instead if these constraints are difficult to satisfy. The only advantage of this method is that it enables more aggressive compiler optimizations.

pub fn wrapping_offset_from(self, origin: Self) -> isizex4[src]

Calculates the distance between two pointers.

The returned value is in units of T: the distance in bytes is divided by mem::size_of::<T>().

If the address different between the two pointers is not a multiple of mem::size_of::<T>() then the result of the division is rounded towards zero.

Though this method is safe for any two pointers, note that its result will be mostly useless if the two pointers aren't into the same allocated object, for example if they point to two different local variables.

pub unsafe fn add(self, count: usizex4) -> Self[src]

Calculates the offset from a pointer (convenience for .offset(count as isize)).

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

If any of the following conditions are violated, the result is Undefined Behavior:

  • Both the starting and resulting pointer must be either in bounds or one byte past the end of an allocated object.

  • The computed offset, in bytes, cannot overflow an isize.

  • The offset being in bounds cannot rely on "wrapping around" the address space. That is, the infinite-precision sum must fit in a usize.

The compiler and standard library generally tries to ensure allocations never reach a size where an offset is a concern. For instance, Vec and Box ensure they never allocate more than isize::MAX bytes, so vec.as_ptr().add(vec.len()) is always safe.

Most platforms fundamentally can't even construct such an allocation. For instance, no known 64-bit platform can ever serve a request for 263 bytes due to page-table limitations or splitting the address space. However, some 32-bit and 16-bit platforms may successfully serve a request for more than isize::MAX bytes with things like Physical Address Extension. As such, memory acquired directly from allocators or memory mapped files may be too large to handle with this function.

Consider using wrapping_offset instead if these constraints are difficult to satisfy. The only advantage of this method is that it enables more aggressive compiler optimizations.

pub unsafe fn sub(self, count: usizex4) -> Self[src]

Calculates the offset from a pointer (convenience for .offset((count as isize).wrapping_neg())).

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

If any of the following conditions are violated, the result is Undefined Behavior:

  • Both the starting and resulting pointer must be either in bounds or one byte past the end of an allocated object.

  • The computed offset cannot exceed isize::MAX bytes.

  • The offset being in bounds cannot rely on "wrapping around" the address space. That is, the infinite-precision sum must fit in a usize.

The compiler and standard library generally tries to ensure allocations never reach a size where an offset is a concern. For instance, Vec and Box ensure they never allocate more than isize::MAX bytes, so vec.as_ptr().add(vec.len()).sub(vec.len()) is always safe.

Most platforms fundamentally can't even construct such an allocation. For instance, no known 64-bit platform can ever serve a request for 263 bytes due to page-table limitations or splitting the address space. However, some 32-bit and 16-bit platforms may successfully serve a request for more than isize::MAX bytes with things like Physical Address Extension. As such, memory acquired directly from allocators or memory mapped files may be too large to handle with this function.

Consider using wrapping_offset instead if these constraints are difficult to satisfy. The only advantage of this method is that it enables more aggressive compiler optimizations.

pub fn wrapping_add(self, count: usizex4) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. (convenience for .wrapping_offset(count as isize))

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

The resulting pointer does not need to be in bounds, but it is potentially hazardous to dereference (which requires unsafe).

Always use .add(count) instead when possible, because add allows the compiler to optimize better.

pub fn wrapping_sub(self, count: usizex4) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. (convenience for .wrapping_offset((count as isize).wrapping_sub()))

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

The resulting pointer does not need to be in bounds, but it is potentially hazardous to dereference (which requires unsafe).

Always use .sub(count) instead when possible, because sub allows the compiler to optimize better.

impl<T> Simd<[*const T; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl<T> Simd<[*const T; 4]> where
    [T; 4]: SimdArray
[src]

pub unsafe fn read<M>(
    self,
    mask: Simd<[M; 4]>,
    value: Simd<[T; 4]>
) -> Simd<[T; 4]> where
    M: Mask,
    [M; 4]: SimdArray
[src]

Reads selected vector elements from memory.

Instantiates a new vector by reading the values from self for those lanes whose mask is true, and using the elements of value otherwise.

No memory is accessed for those lanes of self whose mask is false.

Safety

This method is unsafe because it dereferences raw pointers. The pointers must be aligned to mem::align_of::<T>().

impl<T> Simd<[*mut T; 4]>[src]

pub const fn new(x0: *mut T, x1: *mut T, x2: *mut T, x3: *mut T) -> Self[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: *mut T) -> Self[src]

Constructs a new instance with each element initialized to value.

pub const fn null() -> Self[src]

Constructs a new instance with each element initialized to null.

pub fn is_null(self) -> msizex4[src]

Returns a mask that selects those lanes that contain null pointers.

pub fn extract(self, index: usize) -> *mut T[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> *mut T[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: *mut T) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: *mut T) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl<T> Simd<[*mut T; 4]>[src]

pub fn eq(self, other: Self) -> msizex4[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> msizex4[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> msizex4[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> msizex4[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> msizex4[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> msizex4[src]

Lane-wise greater-than-or-equals comparison.

impl<T> Simd<[*mut T; 4]>[src]

pub fn from_slice_aligned(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl<T> Simd<[*mut T; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl<T> Simd<[*mut T; 4]>[src]

pub unsafe fn offset(self, count: isizex4) -> Self[src]

Calculates the offset from a pointer.

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

If any of the following conditions are violated, the result is Undefined Behavior:

  • Both the starting and resulting pointer must be either in bounds or one byte past the end of an allocated object.

  • The computed offset, in bytes, cannot overflow an isize.

  • The offset being in bounds cannot rely on "wrapping around" the address space. That is, the infinite-precision sum, in bytes must fit in a usize.

The compiler and standard library generally tries to ensure allocations never reach a size where an offset is a concern. For instance, Vec and Box ensure they never allocate more than isize::MAX bytes, so vec.as_ptr().offset(vec.len() as isize) is always safe.

Most platforms fundamentally can't even construct such an allocation. For instance, no known 64-bit platform can ever serve a request for 263 bytes due to page-table limitations or splitting the address space. However, some 32-bit and 16-bit platforms may successfully serve a request for more than isize::MAX bytes with things like Physical Address Extension. As such, memory acquired directly from allocators or memory mapped files may be too large to handle with this function.

Consider using wrapping_offset instead if these constraints are difficult to satisfy. The only advantage of this method is that it enables more aggressive compiler optimizations.

pub fn wrapping_offset(self, count: isizex4) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic.

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

The resulting pointer does not need to be in bounds, but it is potentially hazardous to dereference (which requires unsafe).

Always use .offset(count) instead when possible, because offset allows the compiler to optimize better.

pub unsafe fn offset_from(self, origin: Self) -> isizex4[src]

Calculates the distance between two pointers.

The returned value is in units of T: the distance in bytes is divided by mem::size_of::<T>().

This function is the inverse of offset.

Safety

If any of the following conditions are violated, the result is Undefined Behavior:

  • Both the starting and other pointer must be either in bounds or one byte past the end of the same allocated object.

  • The distance between the pointers, in bytes, cannot overflow an isize.

  • The distance between the pointers, in bytes, must be an exact multiple of the size of T.

  • The distance being in bounds cannot rely on "wrapping around" the address space.

The compiler and standard library generally try to ensure allocations never reach a size where an offset is a concern. For instance, Vec and Box ensure they never allocate more than isize::MAX bytes, so ptr_into_vec.offset_from(vec.as_ptr()) is always safe.

Most platforms fundamentally can't even construct such an allocation. For instance, no known 64-bit platform can ever serve a request for 263 bytes due to page-table limitations or splitting the address space. However, some 32-bit and 16-bit platforms may successfully serve a request for more than isize::MAX bytes with things like Physical Address Extension. As such, memory acquired directly from allocators or memory mapped files may be too large to handle with this function.

Consider using wrapping_offset_from instead if these constraints are difficult to satisfy. The only advantage of this method is that it enables more aggressive compiler optimizations.

pub fn wrapping_offset_from(self, origin: Self) -> isizex4[src]

Calculates the distance between two pointers.

The returned value is in units of T: the distance in bytes is divided by mem::size_of::<T>().

If the address different between the two pointers is not a multiple of mem::size_of::<T>() then the result of the division is rounded towards zero.

Though this method is safe for any two pointers, note that its result will be mostly useless if the two pointers aren't into the same allocated object, for example if they point to two different local variables.

pub unsafe fn add(self, count: usizex4) -> Self[src]

Calculates the offset from a pointer (convenience for .offset(count as isize)).

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

If any of the following conditions are violated, the result is Undefined Behavior:

  • Both the starting and resulting pointer must be either in bounds or one byte past the end of an allocated object.

  • The computed offset, in bytes, cannot overflow an isize.

  • The offset being in bounds cannot rely on "wrapping around" the address space. That is, the infinite-precision sum must fit in a usize.

The compiler and standard library generally tries to ensure allocations never reach a size where an offset is a concern. For instance, Vec and Box ensure they never allocate more than isize::MAX bytes, so vec.as_ptr().add(vec.len()) is always safe.

Most platforms fundamentally can't even construct such an allocation. For instance, no known 64-bit platform can ever serve a request for 263 bytes due to page-table limitations or splitting the address space. However, some 32-bit and 16-bit platforms may successfully serve a request for more than isize::MAX bytes with things like Physical Address Extension. As such, memory acquired directly from allocators or memory mapped files may be too large to handle with this function.

Consider using wrapping_offset instead if these constraints are difficult to satisfy. The only advantage of this method is that it enables more aggressive compiler optimizations.

pub unsafe fn sub(self, count: usizex4) -> Self[src]

Calculates the offset from a pointer (convenience for .offset((count as isize).wrapping_neg())).

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

If any of the following conditions are violated, the result is Undefined Behavior:

  • Both the starting and resulting pointer must be either in bounds or one byte past the end of an allocated object.

  • The computed offset cannot exceed isize::MAX bytes.

  • The offset being in bounds cannot rely on "wrapping around" the address space. That is, the infinite-precision sum must fit in a usize.

The compiler and standard library generally tries to ensure allocations never reach a size where an offset is a concern. For instance, Vec and Box ensure they never allocate more than isize::MAX bytes, so vec.as_ptr().add(vec.len()).sub(vec.len()) is always safe.

Most platforms fundamentally can't even construct such an allocation. For instance, no known 64-bit platform can ever serve a request for 263 bytes due to page-table limitations or splitting the address space. However, some 32-bit and 16-bit platforms may successfully serve a request for more than isize::MAX bytes with things like Physical Address Extension. As such, memory acquired directly from allocators or memory mapped files may be too large to handle with this function.

Consider using wrapping_offset instead if these constraints are difficult to satisfy. The only advantage of this method is that it enables more aggressive compiler optimizations.

pub fn wrapping_add(self, count: usizex4) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. (convenience for .wrapping_offset(count as isize))

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

The resulting pointer does not need to be in bounds, but it is potentially hazardous to dereference (which requires unsafe).

Always use .add(count) instead when possible, because add allows the compiler to optimize better.

pub fn wrapping_sub(self, count: usizex4) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. (convenience for .wrapping_offset((count as isize).wrapping_sub()))

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

The resulting pointer does not need to be in bounds, but it is potentially hazardous to dereference (which requires unsafe).

Always use .sub(count) instead when possible, because sub allows the compiler to optimize better.

impl<T> Simd<[*mut T; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl<T> Simd<[*mut T; 4]> where
    [T; 4]: SimdArray
[src]

pub unsafe fn read<M>(
    self,
    mask: Simd<[M; 4]>,
    value: Simd<[T; 4]>
) -> Simd<[T; 4]> where
    M: Mask,
    [M; 4]: SimdArray
[src]

Reads selected vector elements from memory.

Instantiates a new vector by reading the values from self for those lanes whose mask is true, and using the elements of value otherwise.

No memory is accessed for those lanes of self whose mask is false.

Safety

This method is unsafe because it dereferences raw pointers. The pointers must be aligned to mem::align_of::<T>().

impl<T> Simd<[*mut T; 4]> where
    [T; 4]: SimdArray
[src]

pub unsafe fn write<M>(self, mask: Simd<[M; 4]>, value: Simd<[T; 4]>) where
    M: Mask,
    [M; 4]: SimdArray
[src]

Writes selected vector elements to memory.

Writes the lanes of values for which the mask is true to their corresponding memory addresses in self.

No memory is accessed for those lanes of self whose mask is false.

Overlapping memory addresses of self are written to in order from the lest-significant to the most-significant element.

Safety

This method is unsafe because it dereferences raw pointers. The pointers must be aligned to mem::align_of::<T>().

impl<T> Simd<[*const T; 8]>[src]

pub const fn new(
    x0: *const T,
    x1: *const T,
    x2: *const T,
    x3: *const T,
    x4: *const T,
    x5: *const T,
    x6: *const T,
    x7: *const T
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: *const T) -> Self[src]

Constructs a new instance with each element initialized to value.

pub const fn null() -> Self[src]

Constructs a new instance with each element initialized to null.

pub fn is_null(self) -> msizex8[src]

Returns a mask that selects those lanes that contain null pointers.

pub fn extract(self, index: usize) -> *const T[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> *const T[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: *const T) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: *const T) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl<T> Simd<[*const T; 8]>[src]

pub fn eq(self, other: Self) -> msizex8[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> msizex8[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> msizex8[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> msizex8[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> msizex8[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> msizex8[src]

Lane-wise greater-than-or-equals comparison.

impl<T> Simd<[*const T; 8]>[src]

pub fn from_slice_aligned(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl<T> Simd<[*const T; 8]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl<T> Simd<[*const T; 8]>[src]

pub unsafe fn offset(self, count: isizex8) -> Self[src]

Calculates the offset from a pointer.

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

If any of the following conditions are violated, the result is Undefined Behavior:

  • Both the starting and resulting pointer must be either in bounds or one byte past the end of an allocated object.

  • The computed offset, in bytes, cannot overflow an isize.

  • The offset being in bounds cannot rely on "wrapping around" the address space. That is, the infinite-precision sum, in bytes must fit in a usize.

The compiler and standard library generally tries to ensure allocations never reach a size where an offset is a concern. For instance, Vec and Box ensure they never allocate more than isize::MAX bytes, so vec.as_ptr().offset(vec.len() as isize) is always safe.

Most platforms fundamentally can't even construct such an allocation. For instance, no known 64-bit platform can ever serve a request for 263 bytes due to page-table limitations or splitting the address space. However, some 32-bit and 16-bit platforms may successfully serve a request for more than isize::MAX bytes with things like Physical Address Extension. As such, memory acquired directly from allocators or memory mapped files may be too large to handle with this function.

Consider using wrapping_offset instead if these constraints are difficult to satisfy. The only advantage of this method is that it enables more aggressive compiler optimizations.

pub fn wrapping_offset(self, count: isizex8) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic.

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

The resulting pointer does not need to be in bounds, but it is potentially hazardous to dereference (which requires unsafe).

Always use .offset(count) instead when possible, because offset allows the compiler to optimize better.

pub unsafe fn offset_from(self, origin: Self) -> isizex8[src]

Calculates the distance between two pointers.

The returned value is in units of T: the distance in bytes is divided by mem::size_of::<T>().

This function is the inverse of offset.

Safety

If any of the following conditions are violated, the result is Undefined Behavior:

  • Both the starting and other pointer must be either in bounds or one byte past the end of the same allocated object.

  • The distance between the pointers, in bytes, cannot overflow an isize.

  • The distance between the pointers, in bytes, must be an exact multiple of the size of T.

  • The distance being in bounds cannot rely on "wrapping around" the address space.

The compiler and standard library generally try to ensure allocations never reach a size where an offset is a concern. For instance, Vec and Box ensure they never allocate more than isize::MAX bytes, so ptr_into_vec.offset_from(vec.as_ptr()) is always safe.

Most platforms fundamentally can't even construct such an allocation. For instance, no known 64-bit platform can ever serve a request for 263 bytes due to page-table limitations or splitting the address space. However, some 32-bit and 16-bit platforms may successfully serve a request for more than isize::MAX bytes with things like Physical Address Extension. As such, memory acquired directly from allocators or memory mapped files may be too large to handle with this function.

Consider using wrapping_offset_from instead if these constraints are difficult to satisfy. The only advantage of this method is that it enables more aggressive compiler optimizations.

pub fn wrapping_offset_from(self, origin: Self) -> isizex8[src]

Calculates the distance between two pointers.

The returned value is in units of T: the distance in bytes is divided by mem::size_of::<T>().

If the address different between the two pointers is not a multiple of mem::size_of::<T>() then the result of the division is rounded towards zero.

Though this method is safe for any two pointers, note that its result will be mostly useless if the two pointers aren't into the same allocated object, for example if they point to two different local variables.

pub unsafe fn add(self, count: usizex8) -> Self[src]

Calculates the offset from a pointer (convenience for .offset(count as isize)).

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

If any of the following conditions are violated, the result is Undefined Behavior:

  • Both the starting and resulting pointer must be either in bounds or one byte past the end of an allocated object.

  • The computed offset, in bytes, cannot overflow an isize.

  • The offset being in bounds cannot rely on "wrapping around" the address space. That is, the infinite-precision sum must fit in a usize.

The compiler and standard library generally tries to ensure allocations never reach a size where an offset is a concern. For instance, Vec and Box ensure they never allocate more than isize::MAX bytes, so vec.as_ptr().add(vec.len()) is always safe.

Most platforms fundamentally can't even construct such an allocation. For instance, no known 64-bit platform can ever serve a request for 263 bytes due to page-table limitations or splitting the address space. However, some 32-bit and 16-bit platforms may successfully serve a request for more than isize::MAX bytes with things like Physical Address Extension. As such, memory acquired directly from allocators or memory mapped files may be too large to handle with this function.

Consider using wrapping_offset instead if these constraints are difficult to satisfy. The only advantage of this method is that it enables more aggressive compiler optimizations.

pub unsafe fn sub(self, count: usizex8) -> Self[src]

Calculates the offset from a pointer (convenience for .offset((count as isize).wrapping_neg())).

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

If any of the following conditions are violated, the result is Undefined Behavior:

  • Both the starting and resulting pointer must be either in bounds or one byte past the end of an allocated object.

  • The computed offset cannot exceed isize::MAX bytes.

  • The offset being in bounds cannot rely on "wrapping around" the address space. That is, the infinite-precision sum must fit in a usize.

The compiler and standard library generally tries to ensure allocations never reach a size where an offset is a concern. For instance, Vec and Box ensure they never allocate more than isize::MAX bytes, so vec.as_ptr().add(vec.len()).sub(vec.len()) is always safe.

Most platforms fundamentally can't even construct such an allocation. For instance, no known 64-bit platform can ever serve a request for 263 bytes due to page-table limitations or splitting the address space. However, some 32-bit and 16-bit platforms may successfully serve a request for more than isize::MAX bytes with things like Physical Address Extension. As such, memory acquired directly from allocators or memory mapped files may be too large to handle with this function.

Consider using wrapping_offset instead if these constraints are difficult to satisfy. The only advantage of this method is that it enables more aggressive compiler optimizations.

pub fn wrapping_add(self, count: usizex8) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. (convenience for .wrapping_offset(count as isize))

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

The resulting pointer does not need to be in bounds, but it is potentially hazardous to dereference (which requires unsafe).

Always use .add(count) instead when possible, because add allows the compiler to optimize better.

pub fn wrapping_sub(self, count: usizex8) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. (convenience for .wrapping_offset((count as isize).wrapping_sub()))

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

The resulting pointer does not need to be in bounds, but it is potentially hazardous to dereference (which requires unsafe).

Always use .sub(count) instead when possible, because sub allows the compiler to optimize better.

impl<T> Simd<[*const T; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl<T> Simd<[*const T; 8]> where
    [T; 8]: SimdArray
[src]

pub unsafe fn read<M>(
    self,
    mask: Simd<[M; 8]>,
    value: Simd<[T; 8]>
) -> Simd<[T; 8]> where
    M: Mask,
    [M; 8]: SimdArray
[src]

Reads selected vector elements from memory.

Instantiates a new vector by reading the values from self for those lanes whose mask is true, and using the elements of value otherwise.

No memory is accessed for those lanes of self whose mask is false.

Safety

This method is unsafe because it dereferences raw pointers. The pointers must be aligned to mem::align_of::<T>().

impl<T> Simd<[*mut T; 8]>[src]

pub const fn new(
    x0: *mut T,
    x1: *mut T,
    x2: *mut T,
    x3: *mut T,
    x4: *mut T,
    x5: *mut T,
    x6: *mut T,
    x7: *mut T
) -> Self
[src]

Creates a new instance with each vector elements initialized with the provided values.

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

pub const fn splat(value: *mut T) -> Self[src]

Constructs a new instance with each element initialized to value.

pub const fn null() -> Self[src]

Constructs a new instance with each element initialized to null.

pub fn is_null(self) -> msizex8[src]

Returns a mask that selects those lanes that contain null pointers.

pub fn extract(self, index: usize) -> *mut T[src]

Extracts the value at index.

Panics

If index >= Self::lanes().

pub unsafe fn extract_unchecked(self, index: usize) -> *mut T[src]

Extracts the value at index.

Safety

If index >= Self::lanes() the behavior is undefined.

#[must_use = "replace does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: *mut T) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Panics

If index >= Self::lanes().

#[must_use = "replace_unchecked does not modify the original value - \ it returns a new vector with the value at `index` \ replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: *mut T) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

Safety

If index >= Self::lanes() the behavior is undefined.

impl<T> Simd<[*mut T; 8]>[src]

pub fn eq(self, other: Self) -> msizex8[src]

Lane-wise equality comparison.

pub fn ne(self, other: Self) -> msizex8[src]

Lane-wise inequality comparison.

pub fn lt(self, other: Self) -> msizex8[src]

Lane-wise less-than comparison.

pub fn le(self, other: Self) -> msizex8[src]

Lane-wise less-than-or-equals comparison.

pub fn gt(self, other: Self) -> msizex8[src]

Lane-wise greater-than comparison.

pub fn ge(self, other: Self) -> msizex8[src]

Lane-wise greater-than-or-equals comparison.

impl<T> Simd<[*mut T; 8]>[src]

pub fn from_slice_aligned(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn from_slice_unaligned(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn from_slice_aligned_unchecked(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn from_slice_unaligned_unchecked(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl<T> Simd<[*mut T; 8]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary.

pub fn write_to_slice_unaligned(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

Panics

If slice.len() < Self::lanes().

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() or &slice[0] is not aligned to an align_of::<Self>() boundary, the behavior is undefined.

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

Safety

If slice.len() < Self::lanes() the behavior is undefined.

impl<T> Simd<[*mut T; 8]>[src]

pub unsafe fn offset(self, count: isizex8) -> Self[src]

Calculates the offset from a pointer.

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

If any of the following conditions are violated, the result is Undefined Behavior:

  • Both the starting and resulting pointer must be either in bounds or one byte past the end of an allocated object.

  • The computed offset, in bytes, cannot overflow an isize.

  • The offset being in bounds cannot rely on "wrapping around" the address space. That is, the infinite-precision sum, in bytes must fit in a usize.

The compiler and standard library generally tries to ensure allocations never reach a size where an offset is a concern. For instance, Vec and Box ensure they never allocate more than isize::MAX bytes, so vec.as_ptr().offset(vec.len() as isize) is always safe.

Most platforms fundamentally can't even construct such an allocation. For instance, no known 64-bit platform can ever serve a request for 263 bytes due to page-table limitations or splitting the address space. However, some 32-bit and 16-bit platforms may successfully serve a request for more than isize::MAX bytes with things like Physical Address Extension. As such, memory acquired directly from allocators or memory mapped files may be too large to handle with this function.

Consider using wrapping_offset instead if these constraints are difficult to satisfy. The only advantage of this method is that it enables more aggressive compiler optimizations.

pub fn wrapping_offset(self, count: isizex8) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic.

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

The resulting pointer does not need to be in bounds, but it is potentially hazardous to dereference (which requires unsafe).

Always use .offset(count) instead when possible, because offset allows the compiler to optimize better.

pub unsafe fn offset_from(self, origin: Self) -> isizex8[src]

Calculates the distance between two pointers.

The returned value is in units of T: the distance in bytes is divided by mem::size_of::<T>().

This function is the inverse of offset.

Safety

If any of the following conditions are violated, the result is Undefined Behavior:

  • Both the starting and other pointer must be either in bounds or one byte past the end of the same allocated object.

  • The distance between the pointers, in bytes, cannot overflow an isize.

  • The distance between the pointers, in bytes, must be an exact multiple of the size of T.

  • The distance being in bounds cannot rely on "wrapping around" the address space.

The compiler and standard library generally try to ensure allocations never reach a size where an offset is a concern. For instance, Vec and Box ensure they never allocate more than isize::MAX bytes, so ptr_into_vec.offset_from(vec.as_ptr()) is always safe.

Most platforms fundamentally can't even construct such an allocation. For instance, no known 64-bit platform can ever serve a request for 263 bytes due to page-table limitations or splitting the address space. However, some 32-bit and 16-bit platforms may successfully serve a request for more than isize::MAX bytes with things like Physical Address Extension. As such, memory acquired directly from allocators or memory mapped files may be too large to handle with this function.

Consider using wrapping_offset_from instead if these constraints are difficult to satisfy. The only advantage of this method is that it enables more aggressive compiler optimizations.

pub fn wrapping_offset_from(self, origin: Self) -> isizex8[src]

Calculates the distance between two pointers.

The returned value is in units of T: the distance in bytes is divided by mem::size_of::<T>().

If the address different between the two pointers is not a multiple of mem::size_of::<T>() then the result of the division is rounded towards zero.

Though this method is safe for any two pointers, note that its result will be mostly useless if the two pointers aren't into the same allocated object, for example if they point to two different local variables.

pub unsafe fn add(self, count: usizex8) -> Self[src]

Calculates the offset from a pointer (convenience for .offset(count as isize)).

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

If any of the following conditions are violated, the result is Undefined Behavior:

  • Both the starting and resulting pointer must be either in bounds or one byte past the end of an allocated object.

  • The computed offset, in bytes, cannot overflow an isize.

  • The offset being in bounds cannot rely on "wrapping around" the address space. That is, the infinite-precision sum must fit in a usize.

The compiler and standard library generally tries to ensure allocations never reach a size where an offset is a concern. For instance, Vec and Box ensure they never allocate more than isize::MAX bytes, so vec.as_ptr().add(vec.len()) is always safe.

Most platforms fundamentally can't even construct such an allocation. For instance, no known 64-bit platform can ever serve a request for 263 bytes due to page-table limitations or splitting the address space. However, some 32-bit and 16-bit platforms may successfully serve a request for more than isize::MAX bytes with things like Physical Address Extension. As such, memory acquired directly from allocators or memory mapped files may be too large to handle with this function.

Consider using wrapping_offset instead if these constraints are difficult to satisfy. The only advantage of this method is that it enables more aggressive compiler optimizations.

pub unsafe fn sub(self, count: usizex8) -> Self[src]

Calculates the offset from a pointer (convenience for .offset((count as isize).wrapping_neg())).

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

If any of the following conditions are violated, the result is Undefined Behavior:

  • Both the starting and resulting pointer must be either in bounds or one byte past the end of an allocated object.

  • The computed offset cannot exceed isize::MAX bytes.

  • The offset being in bounds cannot rely on "wrapping around" the address space. That is, the infinite-precision sum must fit in a usize.

The compiler and standard library generally tries to ensure allocations never reach a size where an offset is a concern. For instance, Vec and Box ensure they never allocate more than isize::MAX bytes, so vec.as_ptr().add(vec.len()).sub(vec.len()) is always safe.

Most platforms fundamentally can't even construct such an allocation. For instance, no known 64-bit platform can ever serve a request for 263 bytes due to page-table limitations or splitting the address space. However, some 32-bit and 16-bit platforms may successfully serve a request for more than isize::MAX bytes with things like Physical Address Extension. As such, memory acquired directly from allocators or memory mapped files may be too large to handle with this function.

Consider using wrapping_offset instead if these constraints are difficult to satisfy. The only advantage of this method is that it enables more aggressive compiler optimizations.

pub fn wrapping_add(self, count: usizex8) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. (convenience for .wrapping_offset(count as isize))

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

The resulting pointer does not need to be in bounds, but it is potentially hazardous to dereference (which requires unsafe).

Always use .add(count) instead when possible, because add allows the compiler to optimize better.

pub fn wrapping_sub(self, count: usizex8) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. (convenience for .wrapping_offset((count as isize).wrapping_sub()))

count is in units of T; e.g. a count of 3 represents a pointer offset of 3 * size_of::<T>() bytes.

Safety

The resulting pointer does not need to be in bounds, but it is potentially hazardous to dereference (which requires unsafe).

Always use .sub(count) instead when possible, because sub allows the compiler to optimize better.

impl<T> Simd<[*mut T; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

impl<T> Simd<[*mut T; 8]> where
    [T; 8]: SimdArray
[src]

pub unsafe fn read<M>(
    self,
    mask: Simd<[M; 8]>,
    value: Simd<[T; 8]>
) -> Simd<[T; 8]> where
    M: Mask,
    [M; 8]: SimdArray
[src]

Reads selected vector elements from memory.

Instantiates a new vector by reading the values from self for those lanes whose mask is true, and using the elements of value otherwise.

No memory is accessed for those lanes of self whose mask is false.

Safety

This method is unsafe because it dereferences raw pointers. The pointers must be aligned to mem::align_of::<T>().

impl<T> Simd<[*mut T; 8]> where
    [T; 8]: SimdArray
[src]

pub unsafe fn write<M>(self, mask: Simd<[M; 8]>, value: Simd<[T; 8]>) where
    M: Mask,
    [M; 8]: SimdArray
[src]

Writes selected vector elements to memory.

Writes the lanes of values for which the mask is true to their corresponding memory addresses in self.

No memory is accessed for those lanes of self whose mask is false.

Overlapping memory addresses of self are written to in order from the lest-significant to the most-significant element.

Safety

This method is unsafe because it dereferences raw pointers. The pointers must be aligned to mem::align_of::<T>().

Trait Implementations

impl Add<Simd<[f32; 16]>> for f32x16[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[f32; 16]>> for f32[src]

type Output = f32x16

The resulting type after applying the + operator.

impl Add<Simd<[f32; 2]>> for f32x2[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[f32; 2]>> for f32[src]

type Output = f32x2

The resulting type after applying the + operator.

impl Add<Simd<[f32; 4]>> for f32x4[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[f32; 4]>> for f32[src]

type Output = f32x4

The resulting type after applying the + operator.

impl Add<Simd<[f32; 8]>> for f32x8[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[f32; 8]>> for f32[src]

type Output = f32x8

The resulting type after applying the + operator.

impl Add<Simd<[f64; 2]>> for f64x2[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[f64; 2]>> for f64[src]

type Output = f64x2

The resulting type after applying the + operator.

impl Add<Simd<[f64; 4]>> for f64x4[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[f64; 4]>> for f64[src]

type Output = f64x4

The resulting type after applying the + operator.

impl Add<Simd<[f64; 8]>> for f64x8[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[f64; 8]>> for f64[src]

type Output = f64x8

The resulting type after applying the + operator.

impl Add<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[i128; 1]>> for i128[src]

type Output = i128x1

The resulting type after applying the + operator.

impl Add<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[i128; 2]>> for i128[src]

type Output = i128x2

The resulting type after applying the + operator.

impl Add<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[i128; 4]>> for i128[src]

type Output = i128x4

The resulting type after applying the + operator.

impl Add<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[i16; 16]>> for i16[src]

type Output = i16x16

The resulting type after applying the + operator.

impl Add<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[i16; 2]>> for i16[src]

type Output = i16x2

The resulting type after applying the + operator.

impl Add<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[i16; 32]>> for i16[src]

type Output = i16x32

The resulting type after applying the + operator.

impl Add<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[i16; 4]>> for i16[src]

type Output = i16x4

The resulting type after applying the + operator.

impl Add<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[i16; 8]>> for i16[src]

type Output = i16x8

The resulting type after applying the + operator.

impl Add<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[i32; 16]>> for i32[src]

type Output = i32x16

The resulting type after applying the + operator.

impl Add<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[i32; 2]>> for i32[src]

type Output = i32x2

The resulting type after applying the + operator.

impl Add<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[i32; 4]>> for i32[src]

type Output = i32x4

The resulting type after applying the + operator.

impl Add<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[i32; 8]>> for i32[src]

type Output = i32x8

The resulting type after applying the + operator.

impl Add<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[i64; 2]>> for i64[src]

type Output = i64x2

The resulting type after applying the + operator.

impl Add<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[i64; 4]>> for i64[src]

type Output = i64x4

The resulting type after applying the + operator.

impl Add<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[i64; 8]>> for i64[src]

type Output = i64x8

The resulting type after applying the + operator.

impl Add<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[i8; 16]>> for i8[src]

type Output = i8x16

The resulting type after applying the + operator.

impl Add<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[i8; 2]>> for i8[src]

type Output = i8x2

The resulting type after applying the + operator.

impl Add<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[i8; 32]>> for i8[src]

type Output = i8x32

The resulting type after applying the + operator.

impl Add<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[i8; 4]>> for i8[src]

type Output = i8x4

The resulting type after applying the + operator.

impl Add<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[i8; 64]>> for i8[src]

type Output = i8x64

The resulting type after applying the + operator.

impl Add<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[i8; 8]>> for i8[src]

type Output = i8x8

The resulting type after applying the + operator.

impl Add<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[isize; 2]>> for isize[src]

type Output = isizex2

The resulting type after applying the + operator.

impl Add<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[isize; 4]>> for isize[src]

type Output = isizex4

The resulting type after applying the + operator.

impl Add<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[isize; 8]>> for isize[src]

type Output = isizex8

The resulting type after applying the + operator.

impl Add<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[u128; 1]>> for u128[src]

type Output = u128x1

The resulting type after applying the + operator.

impl Add<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[u128; 2]>> for u128[src]

type Output = u128x2

The resulting type after applying the + operator.

impl Add<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[u128; 4]>> for u128[src]

type Output = u128x4

The resulting type after applying the + operator.

impl Add<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[u16; 16]>> for u16[src]

type Output = u16x16

The resulting type after applying the + operator.

impl Add<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[u16; 2]>> for u16[src]

type Output = u16x2

The resulting type after applying the + operator.

impl Add<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[u16; 32]>> for u16[src]

type Output = u16x32

The resulting type after applying the + operator.

impl Add<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[u16; 4]>> for u16[src]

type Output = u16x4

The resulting type after applying the + operator.

impl Add<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[u16; 8]>> for u16[src]

type Output = u16x8

The resulting type after applying the + operator.

impl Add<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[u32; 16]>> for u32[src]

type Output = u32x16

The resulting type after applying the + operator.

impl Add<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[u32; 2]>> for u32[src]

type Output = u32x2

The resulting type after applying the + operator.

impl Add<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[u32; 4]>> for u32[src]

type Output = u32x4

The resulting type after applying the + operator.

impl Add<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[u32; 8]>> for u32[src]

type Output = u32x8

The resulting type after applying the + operator.

impl Add<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[u64; 2]>> for u64[src]

type Output = u64x2

The resulting type after applying the + operator.

impl Add<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[u64; 4]>> for u64[src]

type Output = u64x4

The resulting type after applying the + operator.

impl Add<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[u64; 8]>> for u64[src]

type Output = u64x8

The resulting type after applying the + operator.

impl Add<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[u8; 16]>> for u8[src]

type Output = u8x16

The resulting type after applying the + operator.

impl Add<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[u8; 2]>> for u8[src]

type Output = u8x2

The resulting type after applying the + operator.

impl Add<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[u8; 32]>> for u8[src]

type Output = u8x32

The resulting type after applying the + operator.

impl Add<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[u8; 4]>> for u8[src]

type Output = u8x4

The resulting type after applying the + operator.

impl Add<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[u8; 64]>> for u8[src]

type Output = u8x64

The resulting type after applying the + operator.

impl Add<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[u8; 8]>> for u8[src]

type Output = u8x8

The resulting type after applying the + operator.

impl Add<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[usize; 2]>> for usize[src]

type Output = usizex2

The resulting type after applying the + operator.

impl Add<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[usize; 4]>> for usize[src]

type Output = usizex4

The resulting type after applying the + operator.

impl Add<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the + operator.

impl Add<Simd<[usize; 8]>> for usize[src]

type Output = usizex8

The resulting type after applying the + operator.

impl AddAssign<Simd<[f32; 16]>> for f32x16[src]

impl AddAssign<Simd<[f32; 2]>> for f32x2[src]

impl AddAssign<Simd<[f32; 4]>> for f32x4[src]

impl AddAssign<Simd<[f32; 8]>> for f32x8[src]

impl AddAssign<Simd<[f64; 2]>> for f64x2[src]

impl AddAssign<Simd<[f64; 4]>> for f64x4[src]

impl AddAssign<Simd<[f64; 8]>> for f64x8[src]

impl AddAssign<Simd<[i128; 1]>> for i128x1[src]

impl AddAssign<Simd<[i128; 2]>> for i128x2[src]

impl AddAssign<Simd<[i128; 4]>> for i128x4[src]

impl AddAssign<Simd<[i16; 16]>> for i16x16[src]

impl AddAssign<Simd<[i16; 2]>> for i16x2[src]

impl AddAssign<Simd<[i16; 32]>> for i16x32[src]

impl AddAssign<Simd<[i16; 4]>> for i16x4[src]

impl AddAssign<Simd<[i16; 8]>> for i16x8[src]

impl AddAssign<Simd<[i32; 16]>> for i32x16[src]

impl AddAssign<Simd<[i32; 2]>> for i32x2[src]

impl AddAssign<Simd<[i32; 4]>> for i32x4[src]

impl AddAssign<Simd<[i32; 8]>> for i32x8[src]

impl AddAssign<Simd<[i64; 2]>> for i64x2[src]

impl AddAssign<Simd<[i64; 4]>> for i64x4[src]

impl AddAssign<Simd<[i64; 8]>> for i64x8[src]

impl AddAssign<Simd<[i8; 16]>> for i8x16[src]

impl AddAssign<Simd<[i8; 2]>> for i8x2[src]

impl AddAssign<Simd<[i8; 32]>> for i8x32[src]

impl AddAssign<Simd<[i8; 4]>> for i8x4[src]

impl AddAssign<Simd<[i8; 64]>> for i8x64[src]

impl AddAssign<Simd<[i8; 8]>> for i8x8[src]

impl AddAssign<Simd<[isize; 2]>> for isizex2[src]

impl AddAssign<Simd<[isize; 4]>> for isizex4[src]

impl AddAssign<Simd<[isize; 8]>> for isizex8[src]

impl AddAssign<Simd<[u128; 1]>> for u128x1[src]

impl AddAssign<Simd<[u128; 2]>> for u128x2[src]

impl AddAssign<Simd<[u128; 4]>> for u128x4[src]

impl AddAssign<Simd<[u16; 16]>> for u16x16[src]

impl AddAssign<Simd<[u16; 2]>> for u16x2[src]

impl AddAssign<Simd<[u16; 32]>> for u16x32[src]

impl AddAssign<Simd<[u16; 4]>> for u16x4[src]

impl AddAssign<Simd<[u16; 8]>> for u16x8[src]

impl AddAssign<Simd<[u32; 16]>> for u32x16[src]

impl AddAssign<Simd<[u32; 2]>> for u32x2[src]

impl AddAssign<Simd<[u32; 4]>> for u32x4[src]

impl AddAssign<Simd<[u32; 8]>> for u32x8[src]

impl AddAssign<Simd<[u64; 2]>> for u64x2[src]

impl AddAssign<Simd<[u64; 4]>> for u64x4[src]

impl AddAssign<Simd<[u64; 8]>> for u64x8[src]

impl AddAssign<Simd<[u8; 16]>> for u8x16[src]

impl AddAssign<Simd<[u8; 2]>> for u8x2[src]

impl AddAssign<Simd<[u8; 32]>> for u8x32[src]

impl AddAssign<Simd<[u8; 4]>> for u8x4[src]

impl AddAssign<Simd<[u8; 64]>> for u8x64[src]

impl AddAssign<Simd<[u8; 8]>> for u8x8[src]

impl AddAssign<Simd<[usize; 2]>> for usizex2[src]

impl AddAssign<Simd<[usize; 4]>> for usizex4[src]

impl AddAssign<Simd<[usize; 8]>> for usizex8[src]

impl BitAnd<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[i128; 1]>> for i128[src]

type Output = i128x1

The resulting type after applying the & operator.

impl BitAnd<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[i128; 2]>> for i128[src]

type Output = i128x2

The resulting type after applying the & operator.

impl BitAnd<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[i128; 4]>> for i128[src]

type Output = i128x4

The resulting type after applying the & operator.

impl BitAnd<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[i16; 16]>> for i16[src]

type Output = i16x16

The resulting type after applying the & operator.

impl BitAnd<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[i16; 2]>> for i16[src]

type Output = i16x2

The resulting type after applying the & operator.

impl BitAnd<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[i16; 32]>> for i16[src]

type Output = i16x32

The resulting type after applying the & operator.

impl BitAnd<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[i16; 4]>> for i16[src]

type Output = i16x4

The resulting type after applying the & operator.

impl BitAnd<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[i16; 8]>> for i16[src]

type Output = i16x8

The resulting type after applying the & operator.

impl BitAnd<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[i32; 16]>> for i32[src]

type Output = i32x16

The resulting type after applying the & operator.

impl BitAnd<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[i32; 2]>> for i32[src]

type Output = i32x2

The resulting type after applying the & operator.

impl BitAnd<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[i32; 4]>> for i32[src]

type Output = i32x4

The resulting type after applying the & operator.

impl BitAnd<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[i32; 8]>> for i32[src]

type Output = i32x8

The resulting type after applying the & operator.

impl BitAnd<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[i64; 2]>> for i64[src]

type Output = i64x2

The resulting type after applying the & operator.

impl BitAnd<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[i64; 4]>> for i64[src]

type Output = i64x4

The resulting type after applying the & operator.

impl BitAnd<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[i64; 8]>> for i64[src]

type Output = i64x8

The resulting type after applying the & operator.

impl BitAnd<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[i8; 16]>> for i8[src]

type Output = i8x16

The resulting type after applying the & operator.

impl BitAnd<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[i8; 2]>> for i8[src]

type Output = i8x2

The resulting type after applying the & operator.

impl BitAnd<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[i8; 32]>> for i8[src]

type Output = i8x32

The resulting type after applying the & operator.

impl BitAnd<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[i8; 4]>> for i8[src]

type Output = i8x4

The resulting type after applying the & operator.

impl BitAnd<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[i8; 64]>> for i8[src]

type Output = i8x64

The resulting type after applying the & operator.

impl BitAnd<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[i8; 8]>> for i8[src]

type Output = i8x8

The resulting type after applying the & operator.

impl BitAnd<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[isize; 2]>> for isize[src]

type Output = isizex2

The resulting type after applying the & operator.

impl BitAnd<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[isize; 4]>> for isize[src]

type Output = isizex4

The resulting type after applying the & operator.

impl BitAnd<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[isize; 8]>> for isize[src]

type Output = isizex8

The resulting type after applying the & operator.

impl BitAnd<Simd<[m128; 1]>> for m128x1[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[m128; 1]>> for bool[src]

type Output = m128x1

The resulting type after applying the & operator.

impl BitAnd<Simd<[m128; 2]>> for m128x2[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[m128; 2]>> for bool[src]

type Output = m128x2

The resulting type after applying the & operator.

impl BitAnd<Simd<[m128; 4]>> for m128x4[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[m128; 4]>> for bool[src]

type Output = m128x4

The resulting type after applying the & operator.

impl BitAnd<Simd<[m16; 16]>> for m16x16[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[m16; 16]>> for bool[src]

type Output = m16x16

The resulting type after applying the & operator.

impl BitAnd<Simd<[m16; 2]>> for m16x2[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[m16; 2]>> for bool[src]

type Output = m16x2

The resulting type after applying the & operator.

impl BitAnd<Simd<[m16; 32]>> for m16x32[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[m16; 32]>> for bool[src]

type Output = m16x32

The resulting type after applying the & operator.

impl BitAnd<Simd<[m16; 4]>> for m16x4[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[m16; 4]>> for bool[src]

type Output = m16x4

The resulting type after applying the & operator.

impl BitAnd<Simd<[m16; 8]>> for m16x8[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[m16; 8]>> for bool[src]

type Output = m16x8

The resulting type after applying the & operator.

impl BitAnd<Simd<[m32; 16]>> for m32x16[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[m32; 16]>> for bool[src]

type Output = m32x16

The resulting type after applying the & operator.

impl BitAnd<Simd<[m32; 2]>> for m32x2[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[m32; 2]>> for bool[src]

type Output = m32x2

The resulting type after applying the & operator.

impl BitAnd<Simd<[m32; 4]>> for m32x4[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[m32; 4]>> for bool[src]

type Output = m32x4

The resulting type after applying the & operator.

impl BitAnd<Simd<[m32; 8]>> for m32x8[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[m32; 8]>> for bool[src]

type Output = m32x8

The resulting type after applying the & operator.

impl BitAnd<Simd<[m64; 2]>> for m64x2[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[m64; 2]>> for bool[src]

type Output = m64x2

The resulting type after applying the & operator.

impl BitAnd<Simd<[m64; 4]>> for m64x4[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[m64; 4]>> for bool[src]

type Output = m64x4

The resulting type after applying the & operator.

impl BitAnd<Simd<[m64; 8]>> for m64x8[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[m64; 8]>> for bool[src]

type Output = m64x8

The resulting type after applying the & operator.

impl BitAnd<Simd<[m8; 16]>> for m8x16[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[m8; 16]>> for bool[src]

type Output = m8x16

The resulting type after applying the & operator.

impl BitAnd<Simd<[m8; 2]>> for m8x2[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[m8; 2]>> for bool[src]

type Output = m8x2

The resulting type after applying the & operator.

impl BitAnd<Simd<[m8; 32]>> for m8x32[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[m8; 32]>> for bool[src]

type Output = m8x32

The resulting type after applying the & operator.

impl BitAnd<Simd<[m8; 4]>> for m8x4[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[m8; 4]>> for bool[src]

type Output = m8x4

The resulting type after applying the & operator.

impl BitAnd<Simd<[m8; 64]>> for m8x64[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[m8; 64]>> for bool[src]

type Output = m8x64

The resulting type after applying the & operator.

impl BitAnd<Simd<[m8; 8]>> for m8x8[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[m8; 8]>> for bool[src]

type Output = m8x8

The resulting type after applying the & operator.

impl BitAnd<Simd<[msize; 2]>> for msizex2[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[msize; 2]>> for bool[src]

type Output = msizex2

The resulting type after applying the & operator.

impl BitAnd<Simd<[msize; 4]>> for msizex4[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[msize; 4]>> for bool[src]

type Output = msizex4

The resulting type after applying the & operator.

impl BitAnd<Simd<[msize; 8]>> for msizex8[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[msize; 8]>> for bool[src]

type Output = msizex8

The resulting type after applying the & operator.

impl BitAnd<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[u128; 1]>> for u128[src]

type Output = u128x1

The resulting type after applying the & operator.

impl BitAnd<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[u128; 2]>> for u128[src]

type Output = u128x2

The resulting type after applying the & operator.

impl BitAnd<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[u128; 4]>> for u128[src]

type Output = u128x4

The resulting type after applying the & operator.

impl BitAnd<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[u16; 16]>> for u16[src]

type Output = u16x16

The resulting type after applying the & operator.

impl BitAnd<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[u16; 2]>> for u16[src]

type Output = u16x2

The resulting type after applying the & operator.

impl BitAnd<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[u16; 32]>> for u16[src]

type Output = u16x32

The resulting type after applying the & operator.

impl BitAnd<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[u16; 4]>> for u16[src]

type Output = u16x4

The resulting type after applying the & operator.

impl BitAnd<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[u16; 8]>> for u16[src]

type Output = u16x8

The resulting type after applying the & operator.

impl BitAnd<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[u32; 16]>> for u32[src]

type Output = u32x16

The resulting type after applying the & operator.

impl BitAnd<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[u32; 2]>> for u32[src]

type Output = u32x2

The resulting type after applying the & operator.

impl BitAnd<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[u32; 4]>> for u32[src]

type Output = u32x4

The resulting type after applying the & operator.

impl BitAnd<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[u32; 8]>> for u32[src]

type Output = u32x8

The resulting type after applying the & operator.

impl BitAnd<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[u64; 2]>> for u64[src]

type Output = u64x2

The resulting type after applying the & operator.

impl BitAnd<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[u64; 4]>> for u64[src]

type Output = u64x4

The resulting type after applying the & operator.

impl BitAnd<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[u64; 8]>> for u64[src]

type Output = u64x8

The resulting type after applying the & operator.

impl BitAnd<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[u8; 16]>> for u8[src]

type Output = u8x16

The resulting type after applying the & operator.

impl BitAnd<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[u8; 2]>> for u8[src]

type Output = u8x2

The resulting type after applying the & operator.

impl BitAnd<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[u8; 32]>> for u8[src]

type Output = u8x32

The resulting type after applying the & operator.

impl BitAnd<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[u8; 4]>> for u8[src]

type Output = u8x4

The resulting type after applying the & operator.

impl BitAnd<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[u8; 64]>> for u8[src]

type Output = u8x64

The resulting type after applying the & operator.

impl BitAnd<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[u8; 8]>> for u8[src]

type Output = u8x8

The resulting type after applying the & operator.

impl BitAnd<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[usize; 2]>> for usize[src]

type Output = usizex2

The resulting type after applying the & operator.

impl BitAnd<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[usize; 4]>> for usize[src]

type Output = usizex4

The resulting type after applying the & operator.

impl BitAnd<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the & operator.

impl BitAnd<Simd<[usize; 8]>> for usize[src]

type Output = usizex8

The resulting type after applying the & operator.

impl BitAndAssign<Simd<[i128; 1]>> for i128x1[src]

impl BitAndAssign<Simd<[i128; 2]>> for i128x2[src]

impl BitAndAssign<Simd<[i128; 4]>> for i128x4[src]

impl BitAndAssign<Simd<[i16; 16]>> for i16x16[src]

impl BitAndAssign<Simd<[i16; 2]>> for i16x2[src]

impl BitAndAssign<Simd<[i16; 32]>> for i16x32[src]

impl BitAndAssign<Simd<[i16; 4]>> for i16x4[src]

impl BitAndAssign<Simd<[i16; 8]>> for i16x8[src]

impl BitAndAssign<Simd<[i32; 16]>> for i32x16[src]

impl BitAndAssign<Simd<[i32; 2]>> for i32x2[src]

impl BitAndAssign<Simd<[i32; 4]>> for i32x4[src]

impl BitAndAssign<Simd<[i32; 8]>> for i32x8[src]

impl BitAndAssign<Simd<[i64; 2]>> for i64x2[src]

impl BitAndAssign<Simd<[i64; 4]>> for i64x4[src]

impl BitAndAssign<Simd<[i64; 8]>> for i64x8[src]

impl BitAndAssign<Simd<[i8; 16]>> for i8x16[src]

impl BitAndAssign<Simd<[i8; 2]>> for i8x2[src]

impl BitAndAssign<Simd<[i8; 32]>> for i8x32[src]

impl BitAndAssign<Simd<[i8; 4]>> for i8x4[src]

impl BitAndAssign<Simd<[i8; 64]>> for i8x64[src]

impl BitAndAssign<Simd<[i8; 8]>> for i8x8[src]

impl BitAndAssign<Simd<[isize; 2]>> for isizex2[src]

impl BitAndAssign<Simd<[isize; 4]>> for isizex4[src]

impl BitAndAssign<Simd<[isize; 8]>> for isizex8[src]

impl BitAndAssign<Simd<[m128; 1]>> for m128x1[src]

impl BitAndAssign<Simd<[m128; 2]>> for m128x2[src]

impl BitAndAssign<Simd<[m128; 4]>> for m128x4[src]

impl BitAndAssign<Simd<[m16; 16]>> for m16x16[src]

impl BitAndAssign<Simd<[m16; 2]>> for m16x2[src]

impl BitAndAssign<Simd<[m16; 32]>> for m16x32[src]

impl BitAndAssign<Simd<[m16; 4]>> for m16x4[src]

impl BitAndAssign<Simd<[m16; 8]>> for m16x8[src]

impl BitAndAssign<Simd<[m32; 16]>> for m32x16[src]

impl BitAndAssign<Simd<[m32; 2]>> for m32x2[src]

impl BitAndAssign<Simd<[m32; 4]>> for m32x4[src]

impl BitAndAssign<Simd<[m32; 8]>> for m32x8[src]

impl BitAndAssign<Simd<[m64; 2]>> for m64x2[src]

impl BitAndAssign<Simd<[m64; 4]>> for m64x4[src]

impl BitAndAssign<Simd<[m64; 8]>> for m64x8[src]

impl BitAndAssign<Simd<[m8; 16]>> for m8x16[src]

impl BitAndAssign<Simd<[m8; 2]>> for m8x2[src]

impl BitAndAssign<Simd<[m8; 32]>> for m8x32[src]

impl BitAndAssign<Simd<[m8; 4]>> for m8x4[src]

impl BitAndAssign<Simd<[m8; 64]>> for m8x64[src]

impl BitAndAssign<Simd<[m8; 8]>> for m8x8[src]

impl BitAndAssign<Simd<[msize; 2]>> for msizex2[src]

impl BitAndAssign<Simd<[msize; 4]>> for msizex4[src]

impl BitAndAssign<Simd<[msize; 8]>> for msizex8[src]

impl BitAndAssign<Simd<[u128; 1]>> for u128x1[src]

impl BitAndAssign<Simd<[u128; 2]>> for u128x2[src]

impl BitAndAssign<Simd<[u128; 4]>> for u128x4[src]

impl BitAndAssign<Simd<[u16; 16]>> for u16x16[src]

impl BitAndAssign<Simd<[u16; 2]>> for u16x2[src]

impl BitAndAssign<Simd<[u16; 32]>> for u16x32[src]

impl BitAndAssign<Simd<[u16; 4]>> for u16x4[src]

impl BitAndAssign<Simd<[u16; 8]>> for u16x8[src]

impl BitAndAssign<Simd<[u32; 16]>> for u32x16[src]

impl BitAndAssign<Simd<[u32; 2]>> for u32x2[src]

impl BitAndAssign<Simd<[u32; 4]>> for u32x4[src]

impl BitAndAssign<Simd<[u32; 8]>> for u32x8[src]

impl BitAndAssign<Simd<[u64; 2]>> for u64x2[src]

impl BitAndAssign<Simd<[u64; 4]>> for u64x4[src]

impl BitAndAssign<Simd<[u64; 8]>> for u64x8[src]

impl BitAndAssign<Simd<[u8; 16]>> for u8x16[src]

impl BitAndAssign<Simd<[u8; 2]>> for u8x2[src]

impl BitAndAssign<Simd<[u8; 32]>> for u8x32[src]

impl BitAndAssign<Simd<[u8; 4]>> for u8x4[src]

impl BitAndAssign<Simd<[u8; 64]>> for u8x64[src]

impl BitAndAssign<Simd<[u8; 8]>> for u8x8[src]

impl BitAndAssign<Simd<[usize; 2]>> for usizex2[src]

impl BitAndAssign<Simd<[usize; 4]>> for usizex4[src]

impl BitAndAssign<Simd<[usize; 8]>> for usizex8[src]

impl BitOr<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[i128; 1]>> for i128[src]

type Output = i128x1

The resulting type after applying the | operator.

impl BitOr<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[i128; 2]>> for i128[src]

type Output = i128x2

The resulting type after applying the | operator.

impl BitOr<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[i128; 4]>> for i128[src]

type Output = i128x4

The resulting type after applying the | operator.

impl BitOr<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[i16; 16]>> for i16[src]

type Output = i16x16

The resulting type after applying the | operator.

impl BitOr<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[i16; 2]>> for i16[src]

type Output = i16x2

The resulting type after applying the | operator.

impl BitOr<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[i16; 32]>> for i16[src]

type Output = i16x32

The resulting type after applying the | operator.

impl BitOr<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[i16; 4]>> for i16[src]

type Output = i16x4

The resulting type after applying the | operator.

impl BitOr<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[i16; 8]>> for i16[src]

type Output = i16x8

The resulting type after applying the | operator.

impl BitOr<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[i32; 16]>> for i32[src]

type Output = i32x16

The resulting type after applying the | operator.

impl BitOr<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[i32; 2]>> for i32[src]

type Output = i32x2

The resulting type after applying the | operator.

impl BitOr<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[i32; 4]>> for i32[src]

type Output = i32x4

The resulting type after applying the | operator.

impl BitOr<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[i32; 8]>> for i32[src]

type Output = i32x8

The resulting type after applying the | operator.

impl BitOr<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[i64; 2]>> for i64[src]

type Output = i64x2

The resulting type after applying the | operator.

impl BitOr<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[i64; 4]>> for i64[src]

type Output = i64x4

The resulting type after applying the | operator.

impl BitOr<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[i64; 8]>> for i64[src]

type Output = i64x8

The resulting type after applying the | operator.

impl BitOr<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[i8; 16]>> for i8[src]

type Output = i8x16

The resulting type after applying the | operator.

impl BitOr<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[i8; 2]>> for i8[src]

type Output = i8x2

The resulting type after applying the | operator.

impl BitOr<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[i8; 32]>> for i8[src]

type Output = i8x32

The resulting type after applying the | operator.

impl BitOr<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[i8; 4]>> for i8[src]

type Output = i8x4

The resulting type after applying the | operator.

impl BitOr<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[i8; 64]>> for i8[src]

type Output = i8x64

The resulting type after applying the | operator.

impl BitOr<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[i8; 8]>> for i8[src]

type Output = i8x8

The resulting type after applying the | operator.

impl BitOr<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[isize; 2]>> for isize[src]

type Output = isizex2

The resulting type after applying the | operator.

impl BitOr<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[isize; 4]>> for isize[src]

type Output = isizex4

The resulting type after applying the | operator.

impl BitOr<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[isize; 8]>> for isize[src]

type Output = isizex8

The resulting type after applying the | operator.

impl BitOr<Simd<[m128; 1]>> for m128x1[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[m128; 1]>> for bool[src]

type Output = m128x1

The resulting type after applying the | operator.

impl BitOr<Simd<[m128; 2]>> for m128x2[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[m128; 2]>> for bool[src]

type Output = m128x2

The resulting type after applying the | operator.

impl BitOr<Simd<[m128; 4]>> for m128x4[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[m128; 4]>> for bool[src]

type Output = m128x4

The resulting type after applying the | operator.

impl BitOr<Simd<[m16; 16]>> for m16x16[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[m16; 16]>> for bool[src]

type Output = m16x16

The resulting type after applying the | operator.

impl BitOr<Simd<[m16; 2]>> for m16x2[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[m16; 2]>> for bool[src]

type Output = m16x2

The resulting type after applying the | operator.

impl BitOr<Simd<[m16; 32]>> for m16x32[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[m16; 32]>> for bool[src]

type Output = m16x32

The resulting type after applying the | operator.

impl BitOr<Simd<[m16; 4]>> for m16x4[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[m16; 4]>> for bool[src]

type Output = m16x4

The resulting type after applying the | operator.

impl BitOr<Simd<[m16; 8]>> for m16x8[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[m16; 8]>> for bool[src]

type Output = m16x8

The resulting type after applying the | operator.

impl BitOr<Simd<[m32; 16]>> for m32x16[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[m32; 16]>> for bool[src]

type Output = m32x16

The resulting type after applying the | operator.

impl BitOr<Simd<[m32; 2]>> for m32x2[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[m32; 2]>> for bool[src]

type Output = m32x2

The resulting type after applying the | operator.

impl BitOr<Simd<[m32; 4]>> for m32x4[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[m32; 4]>> for bool[src]

type Output = m32x4

The resulting type after applying the | operator.

impl BitOr<Simd<[m32; 8]>> for m32x8[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[m32; 8]>> for bool[src]

type Output = m32x8

The resulting type after applying the | operator.

impl BitOr<Simd<[m64; 2]>> for m64x2[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[m64; 2]>> for bool[src]

type Output = m64x2

The resulting type after applying the | operator.

impl BitOr<Simd<[m64; 4]>> for m64x4[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[m64; 4]>> for bool[src]

type Output = m64x4

The resulting type after applying the | operator.

impl BitOr<Simd<[m64; 8]>> for m64x8[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[m64; 8]>> for bool[src]

type Output = m64x8

The resulting type after applying the | operator.

impl BitOr<Simd<[m8; 16]>> for m8x16[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[m8; 16]>> for bool[src]

type Output = m8x16

The resulting type after applying the | operator.

impl BitOr<Simd<[m8; 2]>> for m8x2[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[m8; 2]>> for bool[src]

type Output = m8x2

The resulting type after applying the | operator.

impl BitOr<Simd<[m8; 32]>> for m8x32[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[m8; 32]>> for bool[src]

type Output = m8x32

The resulting type after applying the | operator.

impl BitOr<Simd<[m8; 4]>> for m8x4[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[m8; 4]>> for bool[src]

type Output = m8x4

The resulting type after applying the | operator.

impl BitOr<Simd<[m8; 64]>> for m8x64[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[m8; 64]>> for bool[src]

type Output = m8x64

The resulting type after applying the | operator.

impl BitOr<Simd<[m8; 8]>> for m8x8[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[m8; 8]>> for bool[src]

type Output = m8x8

The resulting type after applying the | operator.

impl BitOr<Simd<[msize; 2]>> for msizex2[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[msize; 2]>> for bool[src]

type Output = msizex2

The resulting type after applying the | operator.

impl BitOr<Simd<[msize; 4]>> for msizex4[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[msize; 4]>> for bool[src]

type Output = msizex4

The resulting type after applying the | operator.

impl BitOr<Simd<[msize; 8]>> for msizex8[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[msize; 8]>> for bool[src]

type Output = msizex8

The resulting type after applying the | operator.

impl BitOr<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[u128; 1]>> for u128[src]

type Output = u128x1

The resulting type after applying the | operator.

impl BitOr<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[u128; 2]>> for u128[src]

type Output = u128x2

The resulting type after applying the | operator.

impl BitOr<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[u128; 4]>> for u128[src]

type Output = u128x4

The resulting type after applying the | operator.

impl BitOr<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[u16; 16]>> for u16[src]

type Output = u16x16

The resulting type after applying the | operator.

impl BitOr<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[u16; 2]>> for u16[src]

type Output = u16x2

The resulting type after applying the | operator.

impl BitOr<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[u16; 32]>> for u16[src]

type Output = u16x32

The resulting type after applying the | operator.

impl BitOr<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[u16; 4]>> for u16[src]

type Output = u16x4

The resulting type after applying the | operator.

impl BitOr<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[u16; 8]>> for u16[src]

type Output = u16x8

The resulting type after applying the | operator.

impl BitOr<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[u32; 16]>> for u32[src]

type Output = u32x16

The resulting type after applying the | operator.

impl BitOr<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[u32; 2]>> for u32[src]

type Output = u32x2

The resulting type after applying the | operator.

impl BitOr<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[u32; 4]>> for u32[src]

type Output = u32x4

The resulting type after applying the | operator.

impl BitOr<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[u32; 8]>> for u32[src]

type Output = u32x8

The resulting type after applying the | operator.

impl BitOr<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[u64; 2]>> for u64[src]

type Output = u64x2

The resulting type after applying the | operator.

impl BitOr<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[u64; 4]>> for u64[src]

type Output = u64x4

The resulting type after applying the | operator.

impl BitOr<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[u64; 8]>> for u64[src]

type Output = u64x8

The resulting type after applying the | operator.

impl BitOr<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[u8; 16]>> for u8[src]

type Output = u8x16

The resulting type after applying the | operator.

impl BitOr<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[u8; 2]>> for u8[src]

type Output = u8x2

The resulting type after applying the | operator.

impl BitOr<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[u8; 32]>> for u8[src]

type Output = u8x32

The resulting type after applying the | operator.

impl BitOr<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[u8; 4]>> for u8[src]

type Output = u8x4

The resulting type after applying the | operator.

impl BitOr<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[u8; 64]>> for u8[src]

type Output = u8x64

The resulting type after applying the | operator.

impl BitOr<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[u8; 8]>> for u8[src]

type Output = u8x8

The resulting type after applying the | operator.

impl BitOr<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[usize; 2]>> for usize[src]

type Output = usizex2

The resulting type after applying the | operator.

impl BitOr<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[usize; 4]>> for usize[src]

type Output = usizex4

The resulting type after applying the | operator.

impl BitOr<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the | operator.

impl BitOr<Simd<[usize; 8]>> for usize[src]

type Output = usizex8

The resulting type after applying the | operator.

impl BitOrAssign<Simd<[i128; 1]>> for i128x1[src]

impl BitOrAssign<Simd<[i128; 2]>> for i128x2[src]

impl BitOrAssign<Simd<[i128; 4]>> for i128x4[src]

impl BitOrAssign<Simd<[i16; 16]>> for i16x16[src]

impl BitOrAssign<Simd<[i16; 2]>> for i16x2[src]

impl BitOrAssign<Simd<[i16; 32]>> for i16x32[src]

impl BitOrAssign<Simd<[i16; 4]>> for i16x4[src]

impl BitOrAssign<Simd<[i16; 8]>> for i16x8[src]

impl BitOrAssign<Simd<[i32; 16]>> for i32x16[src]

impl BitOrAssign<Simd<[i32; 2]>> for i32x2[src]

impl BitOrAssign<Simd<[i32; 4]>> for i32x4[src]

impl BitOrAssign<Simd<[i32; 8]>> for i32x8[src]

impl BitOrAssign<Simd<[i64; 2]>> for i64x2[src]

impl BitOrAssign<Simd<[i64; 4]>> for i64x4[src]

impl BitOrAssign<Simd<[i64; 8]>> for i64x8[src]

impl BitOrAssign<Simd<[i8; 16]>> for i8x16[src]

impl BitOrAssign<Simd<[i8; 2]>> for i8x2[src]

impl BitOrAssign<Simd<[i8; 32]>> for i8x32[src]

impl BitOrAssign<Simd<[i8; 4]>> for i8x4[src]

impl BitOrAssign<Simd<[i8; 64]>> for i8x64[src]

impl BitOrAssign<Simd<[i8; 8]>> for i8x8[src]

impl BitOrAssign<Simd<[isize; 2]>> for isizex2[src]

impl BitOrAssign<Simd<[isize; 4]>> for isizex4[src]

impl BitOrAssign<Simd<[isize; 8]>> for isizex8[src]

impl BitOrAssign<Simd<[m128; 1]>> for m128x1[src]

impl BitOrAssign<Simd<[m128; 2]>> for m128x2[src]

impl BitOrAssign<Simd<[m128; 4]>> for m128x4[src]

impl BitOrAssign<Simd<[m16; 16]>> for m16x16[src]

impl BitOrAssign<Simd<[m16; 2]>> for m16x2[src]

impl BitOrAssign<Simd<[m16; 32]>> for m16x32[src]

impl BitOrAssign<Simd<[m16; 4]>> for m16x4[src]

impl BitOrAssign<Simd<[m16; 8]>> for m16x8[src]

impl BitOrAssign<Simd<[m32; 16]>> for m32x16[src]

impl BitOrAssign<Simd<[m32; 2]>> for m32x2[src]

impl BitOrAssign<Simd<[m32; 4]>> for m32x4[src]

impl BitOrAssign<Simd<[m32; 8]>> for m32x8[src]

impl BitOrAssign<Simd<[m64; 2]>> for m64x2[src]

impl BitOrAssign<Simd<[m64; 4]>> for m64x4[src]

impl BitOrAssign<Simd<[m64; 8]>> for m64x8[src]

impl BitOrAssign<Simd<[m8; 16]>> for m8x16[src]

impl BitOrAssign<Simd<[m8; 2]>> for m8x2[src]

impl BitOrAssign<Simd<[m8; 32]>> for m8x32[src]

impl BitOrAssign<Simd<[m8; 4]>> for m8x4[src]

impl BitOrAssign<Simd<[m8; 64]>> for m8x64[src]

impl BitOrAssign<Simd<[m8; 8]>> for m8x8[src]

impl BitOrAssign<Simd<[msize; 2]>> for msizex2[src]

impl BitOrAssign<Simd<[msize; 4]>> for msizex4[src]

impl BitOrAssign<Simd<[msize; 8]>> for msizex8[src]

impl BitOrAssign<Simd<[u128; 1]>> for u128x1[src]

impl BitOrAssign<Simd<[u128; 2]>> for u128x2[src]

impl BitOrAssign<Simd<[u128; 4]>> for u128x4[src]

impl BitOrAssign<Simd<[u16; 16]>> for u16x16[src]

impl BitOrAssign<Simd<[u16; 2]>> for u16x2[src]

impl BitOrAssign<Simd<[u16; 32]>> for u16x32[src]

impl BitOrAssign<Simd<[u16; 4]>> for u16x4[src]

impl BitOrAssign<Simd<[u16; 8]>> for u16x8[src]

impl BitOrAssign<Simd<[u32; 16]>> for u32x16[src]

impl BitOrAssign<Simd<[u32; 2]>> for u32x2[src]

impl BitOrAssign<Simd<[u32; 4]>> for u32x4[src]

impl BitOrAssign<Simd<[u32; 8]>> for u32x8[src]

impl BitOrAssign<Simd<[u64; 2]>> for u64x2[src]

impl BitOrAssign<Simd<[u64; 4]>> for u64x4[src]

impl BitOrAssign<Simd<[u64; 8]>> for u64x8[src]

impl BitOrAssign<Simd<[u8; 16]>> for u8x16[src]

impl BitOrAssign<Simd<[u8; 2]>> for u8x2[src]

impl BitOrAssign<Simd<[u8; 32]>> for u8x32[src]

impl BitOrAssign<Simd<[u8; 4]>> for u8x4[src]

impl BitOrAssign<Simd<[u8; 64]>> for u8x64[src]

impl BitOrAssign<Simd<[u8; 8]>> for u8x8[src]

impl BitOrAssign<Simd<[usize; 2]>> for usizex2[src]

impl BitOrAssign<Simd<[usize; 4]>> for usizex4[src]

impl BitOrAssign<Simd<[usize; 8]>> for usizex8[src]

impl BitXor<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i128; 1]>> for i128[src]

type Output = i128x1

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i128; 2]>> for i128[src]

type Output = i128x2

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i128; 4]>> for i128[src]

type Output = i128x4

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i16; 16]>> for i16[src]

type Output = i16x16

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i16; 2]>> for i16[src]

type Output = i16x2

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i16; 32]>> for i16[src]

type Output = i16x32

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i16; 4]>> for i16[src]

type Output = i16x4

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i16; 8]>> for i16[src]

type Output = i16x8

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i32; 16]>> for i32[src]

type Output = i32x16

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i32; 2]>> for i32[src]

type Output = i32x2

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i32; 4]>> for i32[src]

type Output = i32x4

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i32; 8]>> for i32[src]

type Output = i32x8

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i64; 2]>> for i64[src]

type Output = i64x2

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i64; 4]>> for i64[src]

type Output = i64x4

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i64; 8]>> for i64[src]

type Output = i64x8

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i8; 16]>> for i8[src]

type Output = i8x16

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i8; 2]>> for i8[src]

type Output = i8x2

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i8; 32]>> for i8[src]

type Output = i8x32

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i8; 4]>> for i8[src]

type Output = i8x4

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i8; 64]>> for i8[src]

type Output = i8x64

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[i8; 8]>> for i8[src]

type Output = i8x8

The resulting type after applying the ^ operator.

impl BitXor<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[isize; 2]>> for isize[src]

type Output = isizex2

The resulting type after applying the ^ operator.

impl BitXor<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[isize; 4]>> for isize[src]

type Output = isizex4

The resulting type after applying the ^ operator.

impl BitXor<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[isize; 8]>> for isize[src]

type Output = isizex8

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m128; 1]>> for m128x1[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m128; 1]>> for bool[src]

type Output = m128x1

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m128; 2]>> for m128x2[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m128; 2]>> for bool[src]

type Output = m128x2

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m128; 4]>> for m128x4[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m128; 4]>> for bool[src]

type Output = m128x4

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m16; 16]>> for m16x16[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m16; 16]>> for bool[src]

type Output = m16x16

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m16; 2]>> for m16x2[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m16; 2]>> for bool[src]

type Output = m16x2

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m16; 32]>> for m16x32[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m16; 32]>> for bool[src]

type Output = m16x32

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m16; 4]>> for m16x4[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m16; 4]>> for bool[src]

type Output = m16x4

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m16; 8]>> for m16x8[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m16; 8]>> for bool[src]

type Output = m16x8

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m32; 16]>> for m32x16[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m32; 16]>> for bool[src]

type Output = m32x16

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m32; 2]>> for m32x2[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m32; 2]>> for bool[src]

type Output = m32x2

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m32; 4]>> for m32x4[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m32; 4]>> for bool[src]

type Output = m32x4

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m32; 8]>> for m32x8[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m32; 8]>> for bool[src]

type Output = m32x8

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m64; 2]>> for m64x2[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m64; 2]>> for bool[src]

type Output = m64x2

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m64; 4]>> for m64x4[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m64; 4]>> for bool[src]

type Output = m64x4

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m64; 8]>> for m64x8[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m64; 8]>> for bool[src]

type Output = m64x8

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m8; 16]>> for m8x16[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m8; 16]>> for bool[src]

type Output = m8x16

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m8; 2]>> for m8x2[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m8; 2]>> for bool[src]

type Output = m8x2

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m8; 32]>> for m8x32[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m8; 32]>> for bool[src]

type Output = m8x32

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m8; 4]>> for m8x4[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m8; 4]>> for bool[src]

type Output = m8x4

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m8; 64]>> for m8x64[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m8; 64]>> for bool[src]

type Output = m8x64

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m8; 8]>> for m8x8[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[m8; 8]>> for bool[src]

type Output = m8x8

The resulting type after applying the ^ operator.

impl BitXor<Simd<[msize; 2]>> for msizex2[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[msize; 2]>> for bool[src]

type Output = msizex2

The resulting type after applying the ^ operator.

impl BitXor<Simd<[msize; 4]>> for msizex4[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[msize; 4]>> for bool[src]

type Output = msizex4

The resulting type after applying the ^ operator.

impl BitXor<Simd<[msize; 8]>> for msizex8[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[msize; 8]>> for bool[src]

type Output = msizex8

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u128; 1]>> for u128[src]

type Output = u128x1

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u128; 2]>> for u128[src]

type Output = u128x2

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u128; 4]>> for u128[src]

type Output = u128x4

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u16; 16]>> for u16[src]

type Output = u16x16

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u16; 2]>> for u16[src]

type Output = u16x2

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u16; 32]>> for u16[src]

type Output = u16x32

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u16; 4]>> for u16[src]

type Output = u16x4

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u16; 8]>> for u16[src]

type Output = u16x8

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u32; 16]>> for u32[src]

type Output = u32x16

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u32; 2]>> for u32[src]

type Output = u32x2

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u32; 4]>> for u32[src]

type Output = u32x4

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u32; 8]>> for u32[src]

type Output = u32x8

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u64; 2]>> for u64[src]

type Output = u64x2

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u64; 4]>> for u64[src]

type Output = u64x4

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u64; 8]>> for u64[src]

type Output = u64x8

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u8; 16]>> for u8[src]

type Output = u8x16

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u8; 2]>> for u8[src]

type Output = u8x2

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u8; 32]>> for u8[src]

type Output = u8x32

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u8; 4]>> for u8[src]

type Output = u8x4

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u8; 64]>> for u8[src]

type Output = u8x64

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[u8; 8]>> for u8[src]

type Output = u8x8

The resulting type after applying the ^ operator.

impl BitXor<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[usize; 2]>> for usize[src]

type Output = usizex2

The resulting type after applying the ^ operator.

impl BitXor<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[usize; 4]>> for usize[src]

type Output = usizex4

The resulting type after applying the ^ operator.

impl BitXor<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the ^ operator.

impl BitXor<Simd<[usize; 8]>> for usize[src]

type Output = usizex8

The resulting type after applying the ^ operator.

impl BitXorAssign<Simd<[i128; 1]>> for i128x1[src]

impl BitXorAssign<Simd<[i128; 2]>> for i128x2[src]

impl BitXorAssign<Simd<[i128; 4]>> for i128x4[src]

impl BitXorAssign<Simd<[i16; 16]>> for i16x16[src]

impl BitXorAssign<Simd<[i16; 2]>> for i16x2[src]

impl BitXorAssign<Simd<[i16; 32]>> for i16x32[src]

impl BitXorAssign<Simd<[i16; 4]>> for i16x4[src]

impl BitXorAssign<Simd<[i16; 8]>> for i16x8[src]

impl BitXorAssign<Simd<[i32; 16]>> for i32x16[src]

impl BitXorAssign<Simd<[i32; 2]>> for i32x2[src]

impl BitXorAssign<Simd<[i32; 4]>> for i32x4[src]

impl BitXorAssign<Simd<[i32; 8]>> for i32x8[src]

impl BitXorAssign<Simd<[i64; 2]>> for i64x2[src]

impl BitXorAssign<Simd<[i64; 4]>> for i64x4[src]

impl BitXorAssign<Simd<[i64; 8]>> for i64x8[src]

impl BitXorAssign<Simd<[i8; 16]>> for i8x16[src]

impl BitXorAssign<Simd<[i8; 2]>> for i8x2[src]

impl BitXorAssign<Simd<[i8; 32]>> for i8x32[src]

impl BitXorAssign<Simd<[i8; 4]>> for i8x4[src]

impl BitXorAssign<Simd<[i8; 64]>> for i8x64[src]

impl BitXorAssign<Simd<[i8; 8]>> for i8x8[src]

impl BitXorAssign<Simd<[isize; 2]>> for isizex2[src]

impl BitXorAssign<Simd<[isize; 4]>> for isizex4[src]

impl BitXorAssign<Simd<[isize; 8]>> for isizex8[src]

impl BitXorAssign<Simd<[m128; 1]>> for m128x1[src]

impl BitXorAssign<Simd<[m128; 2]>> for m128x2[src]

impl BitXorAssign<Simd<[m128; 4]>> for m128x4[src]

impl BitXorAssign<Simd<[m16; 16]>> for m16x16[src]

impl BitXorAssign<Simd<[m16; 2]>> for m16x2[src]

impl BitXorAssign<Simd<[m16; 32]>> for m16x32[src]

impl BitXorAssign<Simd<[m16; 4]>> for m16x4[src]

impl BitXorAssign<Simd<[m16; 8]>> for m16x8[src]

impl BitXorAssign<Simd<[m32; 16]>> for m32x16[src]

impl BitXorAssign<Simd<[m32; 2]>> for m32x2[src]

impl BitXorAssign<Simd<[m32; 4]>> for m32x4[src]

impl BitXorAssign<Simd<[m32; 8]>> for m32x8[src]

impl BitXorAssign<Simd<[m64; 2]>> for m64x2[src]

impl BitXorAssign<Simd<[m64; 4]>> for m64x4[src]

impl BitXorAssign<Simd<[m64; 8]>> for m64x8[src]

impl BitXorAssign<Simd<[m8; 16]>> for m8x16[src]

impl BitXorAssign<Simd<[m8; 2]>> for m8x2[src]

impl BitXorAssign<Simd<[m8; 32]>> for m8x32[src]

impl BitXorAssign<Simd<[m8; 4]>> for m8x4[src]

impl BitXorAssign<Simd<[m8; 64]>> for m8x64[src]

impl BitXorAssign<Simd<[m8; 8]>> for m8x8[src]

impl BitXorAssign<Simd<[msize; 2]>> for msizex2[src]

impl BitXorAssign<Simd<[msize; 4]>> for msizex4[src]

impl BitXorAssign<Simd<[msize; 8]>> for msizex8[src]

impl BitXorAssign<Simd<[u128; 1]>> for u128x1[src]

impl BitXorAssign<Simd<[u128; 2]>> for u128x2[src]

impl BitXorAssign<Simd<[u128; 4]>> for u128x4[src]

impl BitXorAssign<Simd<[u16; 16]>> for u16x16[src]

impl BitXorAssign<Simd<[u16; 2]>> for u16x2[src]

impl BitXorAssign<Simd<[u16; 32]>> for u16x32[src]

impl BitXorAssign<Simd<[u16; 4]>> for u16x4[src]

impl BitXorAssign<Simd<[u16; 8]>> for u16x8[src]

impl BitXorAssign<Simd<[u32; 16]>> for u32x16[src]

impl BitXorAssign<Simd<[u32; 2]>> for u32x2[src]

impl BitXorAssign<Simd<[u32; 4]>> for u32x4[src]

impl BitXorAssign<Simd<[u32; 8]>> for u32x8[src]

impl BitXorAssign<Simd<[u64; 2]>> for u64x2[src]

impl BitXorAssign<Simd<[u64; 4]>> for u64x4[src]

impl BitXorAssign<Simd<[u64; 8]>> for u64x8[src]

impl BitXorAssign<Simd<[u8; 16]>> for u8x16[src]

impl BitXorAssign<Simd<[u8; 2]>> for u8x2[src]

impl BitXorAssign<Simd<[u8; 32]>> for u8x32[src]

impl BitXorAssign<Simd<[u8; 4]>> for u8x4[src]

impl BitXorAssign<Simd<[u8; 64]>> for u8x64[src]

impl BitXorAssign<Simd<[u8; 8]>> for u8x8[src]

impl BitXorAssign<Simd<[usize; 2]>> for usizex2[src]

impl BitXorAssign<Simd<[usize; 4]>> for usizex4[src]

impl BitXorAssign<Simd<[usize; 8]>> for usizex8[src]

impl<A: Clone + SimdArray> Clone for Simd<A>[src]

impl<A: Copy + SimdArray> Copy for Simd<A>[src]

impl Div<Simd<[f32; 16]>> for f32x16[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[f32; 16]>> for f32[src]

type Output = f32x16

The resulting type after applying the / operator.

impl Div<Simd<[f32; 2]>> for f32x2[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[f32; 2]>> for f32[src]

type Output = f32x2

The resulting type after applying the / operator.

impl Div<Simd<[f32; 4]>> for f32x4[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[f32; 4]>> for f32[src]

type Output = f32x4

The resulting type after applying the / operator.

impl Div<Simd<[f32; 8]>> for f32x8[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[f32; 8]>> for f32[src]

type Output = f32x8

The resulting type after applying the / operator.

impl Div<Simd<[f64; 2]>> for f64x2[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[f64; 2]>> for f64[src]

type Output = f64x2

The resulting type after applying the / operator.

impl Div<Simd<[f64; 4]>> for f64x4[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[f64; 4]>> for f64[src]

type Output = f64x4

The resulting type after applying the / operator.

impl Div<Simd<[f64; 8]>> for f64x8[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[f64; 8]>> for f64[src]

type Output = f64x8

The resulting type after applying the / operator.

impl Div<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[i128; 1]>> for i128[src]

type Output = i128x1

The resulting type after applying the / operator.

impl Div<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[i128; 2]>> for i128[src]

type Output = i128x2

The resulting type after applying the / operator.

impl Div<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[i128; 4]>> for i128[src]

type Output = i128x4

The resulting type after applying the / operator.

impl Div<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[i16; 16]>> for i16[src]

type Output = i16x16

The resulting type after applying the / operator.

impl Div<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[i16; 2]>> for i16[src]

type Output = i16x2

The resulting type after applying the / operator.

impl Div<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[i16; 32]>> for i16[src]

type Output = i16x32

The resulting type after applying the / operator.

impl Div<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[i16; 4]>> for i16[src]

type Output = i16x4

The resulting type after applying the / operator.

impl Div<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[i16; 8]>> for i16[src]

type Output = i16x8

The resulting type after applying the / operator.

impl Div<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[i32; 16]>> for i32[src]

type Output = i32x16

The resulting type after applying the / operator.

impl Div<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[i32; 2]>> for i32[src]

type Output = i32x2

The resulting type after applying the / operator.

impl Div<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[i32; 4]>> for i32[src]

type Output = i32x4

The resulting type after applying the / operator.

impl Div<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[i32; 8]>> for i32[src]

type Output = i32x8

The resulting type after applying the / operator.

impl Div<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[i64; 2]>> for i64[src]

type Output = i64x2

The resulting type after applying the / operator.

impl Div<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[i64; 4]>> for i64[src]

type Output = i64x4

The resulting type after applying the / operator.

impl Div<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[i64; 8]>> for i64[src]

type Output = i64x8

The resulting type after applying the / operator.

impl Div<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[i8; 16]>> for i8[src]

type Output = i8x16

The resulting type after applying the / operator.

impl Div<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[i8; 2]>> for i8[src]

type Output = i8x2

The resulting type after applying the / operator.

impl Div<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[i8; 32]>> for i8[src]

type Output = i8x32

The resulting type after applying the / operator.

impl Div<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[i8; 4]>> for i8[src]

type Output = i8x4

The resulting type after applying the / operator.

impl Div<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[i8; 64]>> for i8[src]

type Output = i8x64

The resulting type after applying the / operator.

impl Div<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[i8; 8]>> for i8[src]

type Output = i8x8

The resulting type after applying the / operator.

impl Div<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[isize; 2]>> for isize[src]

type Output = isizex2

The resulting type after applying the / operator.

impl Div<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[isize; 4]>> for isize[src]

type Output = isizex4

The resulting type after applying the / operator.

impl Div<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[isize; 8]>> for isize[src]

type Output = isizex8

The resulting type after applying the / operator.

impl Div<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[u128; 1]>> for u128[src]

type Output = u128x1

The resulting type after applying the / operator.

impl Div<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[u128; 2]>> for u128[src]

type Output = u128x2

The resulting type after applying the / operator.

impl Div<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[u128; 4]>> for u128[src]

type Output = u128x4

The resulting type after applying the / operator.

impl Div<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[u16; 16]>> for u16[src]

type Output = u16x16

The resulting type after applying the / operator.

impl Div<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[u16; 2]>> for u16[src]

type Output = u16x2

The resulting type after applying the / operator.

impl Div<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[u16; 32]>> for u16[src]

type Output = u16x32

The resulting type after applying the / operator.

impl Div<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[u16; 4]>> for u16[src]

type Output = u16x4

The resulting type after applying the / operator.

impl Div<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[u16; 8]>> for u16[src]

type Output = u16x8

The resulting type after applying the / operator.

impl Div<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[u32; 16]>> for u32[src]

type Output = u32x16

The resulting type after applying the / operator.

impl Div<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[u32; 2]>> for u32[src]

type Output = u32x2

The resulting type after applying the / operator.

impl Div<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[u32; 4]>> for u32[src]

type Output = u32x4

The resulting type after applying the / operator.

impl Div<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[u32; 8]>> for u32[src]

type Output = u32x8

The resulting type after applying the / operator.

impl Div<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[u64; 2]>> for u64[src]

type Output = u64x2

The resulting type after applying the / operator.

impl Div<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[u64; 4]>> for u64[src]

type Output = u64x4

The resulting type after applying the / operator.

impl Div<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[u64; 8]>> for u64[src]

type Output = u64x8

The resulting type after applying the / operator.

impl Div<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[u8; 16]>> for u8[src]

type Output = u8x16

The resulting type after applying the / operator.

impl Div<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[u8; 2]>> for u8[src]

type Output = u8x2

The resulting type after applying the / operator.

impl Div<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[u8; 32]>> for u8[src]

type Output = u8x32

The resulting type after applying the / operator.

impl Div<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[u8; 4]>> for u8[src]

type Output = u8x4

The resulting type after applying the / operator.

impl Div<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[u8; 64]>> for u8[src]

type Output = u8x64

The resulting type after applying the / operator.

impl Div<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[u8; 8]>> for u8[src]

type Output = u8x8

The resulting type after applying the / operator.

impl Div<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[usize; 2]>> for usize[src]

type Output = usizex2

The resulting type after applying the / operator.

impl Div<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[usize; 4]>> for usize[src]

type Output = usizex4

The resulting type after applying the / operator.

impl Div<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the / operator.

impl Div<Simd<[usize; 8]>> for usize[src]

type Output = usizex8

The resulting type after applying the / operator.

impl DivAssign<Simd<[f32; 16]>> for f32x16[src]

impl DivAssign<Simd<[f32; 2]>> for f32x2[src]

impl DivAssign<Simd<[f32; 4]>> for f32x4[src]

impl DivAssign<Simd<[f32; 8]>> for f32x8[src]

impl DivAssign<Simd<[f64; 2]>> for f64x2[src]

impl DivAssign<Simd<[f64; 4]>> for f64x4[src]

impl DivAssign<Simd<[f64; 8]>> for f64x8[src]

impl DivAssign<Simd<[i128; 1]>> for i128x1[src]

impl DivAssign<Simd<[i128; 2]>> for i128x2[src]

impl DivAssign<Simd<[i128; 4]>> for i128x4[src]

impl DivAssign<Simd<[i16; 16]>> for i16x16[src]

impl DivAssign<Simd<[i16; 2]>> for i16x2[src]

impl DivAssign<Simd<[i16; 32]>> for i16x32[src]

impl DivAssign<Simd<[i16; 4]>> for i16x4[src]

impl DivAssign<Simd<[i16; 8]>> for i16x8[src]

impl DivAssign<Simd<[i32; 16]>> for i32x16[src]

impl DivAssign<Simd<[i32; 2]>> for i32x2[src]

impl DivAssign<Simd<[i32; 4]>> for i32x4[src]

impl DivAssign<Simd<[i32; 8]>> for i32x8[src]

impl DivAssign<Simd<[i64; 2]>> for i64x2[src]

impl DivAssign<Simd<[i64; 4]>> for i64x4[src]

impl DivAssign<Simd<[i64; 8]>> for i64x8[src]

impl DivAssign<Simd<[i8; 16]>> for i8x16[src]

impl DivAssign<Simd<[i8; 2]>> for i8x2[src]

impl DivAssign<Simd<[i8; 32]>> for i8x32[src]

impl DivAssign<Simd<[i8; 4]>> for i8x4[src]

impl DivAssign<Simd<[i8; 64]>> for i8x64[src]

impl DivAssign<Simd<[i8; 8]>> for i8x8[src]

impl DivAssign<Simd<[isize; 2]>> for isizex2[src]

impl DivAssign<Simd<[isize; 4]>> for isizex4[src]

impl DivAssign<Simd<[isize; 8]>> for isizex8[src]

impl DivAssign<Simd<[u128; 1]>> for u128x1[src]

impl DivAssign<Simd<[u128; 2]>> for u128x2[src]

impl DivAssign<Simd<[u128; 4]>> for u128x4[src]

impl DivAssign<Simd<[u16; 16]>> for u16x16[src]

impl DivAssign<Simd<[u16; 2]>> for u16x2[src]

impl DivAssign<Simd<[u16; 32]>> for u16x32[src]

impl DivAssign<Simd<[u16; 4]>> for u16x4[src]

impl DivAssign<Simd<[u16; 8]>> for u16x8[src]

impl DivAssign<Simd<[u32; 16]>> for u32x16[src]

impl DivAssign<Simd<[u32; 2]>> for u32x2[src]

impl DivAssign<Simd<[u32; 4]>> for u32x4[src]

impl DivAssign<Simd<[u32; 8]>> for u32x8[src]

impl DivAssign<Simd<[u64; 2]>> for u64x2[src]

impl DivAssign<Simd<[u64; 4]>> for u64x4[src]

impl DivAssign<Simd<[u64; 8]>> for u64x8[src]

impl DivAssign<Simd<[u8; 16]>> for u8x16[src]

impl DivAssign<Simd<[u8; 2]>> for u8x2[src]

impl DivAssign<Simd<[u8; 32]>> for u8x32[src]

impl DivAssign<Simd<[u8; 4]>> for u8x4[src]

impl DivAssign<Simd<[u8; 64]>> for u8x64[src]

impl DivAssign<Simd<[u8; 8]>> for u8x8[src]

impl DivAssign<Simd<[usize; 2]>> for usizex2[src]

impl DivAssign<Simd<[usize; 4]>> for usizex4[src]

impl DivAssign<Simd<[usize; 8]>> for usizex8[src]

impl From<Simd<[f32; 16]>> for [f32; 16][src]

impl From<Simd<[f32; 2]>> for [f32; 2][src]

impl From<Simd<[f32; 2]>> for f64x2[src]

impl From<Simd<[f32; 4]>> for [f32; 4][src]

impl From<Simd<[f32; 4]>> for f64x4[src]

impl From<Simd<[f32; 8]>> for [f32; 8][src]

impl From<Simd<[f32; 8]>> for f64x8[src]

impl From<Simd<[f64; 2]>> for [f64; 2][src]

impl From<Simd<[f64; 4]>> for [f64; 4][src]

impl From<Simd<[f64; 8]>> for [f64; 8][src]

impl From<Simd<[i128; 1]>> for [i128; 1][src]

impl From<Simd<[i128; 2]>> for [i128; 2][src]

impl From<Simd<[i128; 4]>> for [i128; 4][src]

impl From<Simd<[i16; 16]>> for [i16; 16][src]

impl From<Simd<[i16; 16]>> for i32x16[src]

impl From<Simd<[i16; 16]>> for f32x16[src]

impl From<Simd<[i16; 2]>> for [i16; 2][src]

impl From<Simd<[i16; 2]>> for i32x2[src]

impl From<Simd<[i16; 2]>> for f32x2[src]

impl From<Simd<[i16; 2]>> for i64x2[src]

impl From<Simd<[i16; 2]>> for f64x2[src]

impl From<Simd<[i16; 2]>> for i128x2[src]

impl From<Simd<[i16; 32]>> for [i16; 32][src]

impl From<Simd<[i16; 4]>> for [i16; 4][src]

impl From<Simd<[i16; 4]>> for i32x4[src]

impl From<Simd<[i16; 4]>> for f32x4[src]

impl From<Simd<[i16; 4]>> for i64x4[src]

impl From<Simd<[i16; 4]>> for f64x4[src]

impl From<Simd<[i16; 4]>> for i128x4[src]

impl From<Simd<[i16; 8]>> for [i16; 8][src]

impl From<Simd<[i16; 8]>> for i32x8[src]

impl From<Simd<[i16; 8]>> for f32x8[src]

impl From<Simd<[i16; 8]>> for i64x8[src]

impl From<Simd<[i16; 8]>> for f64x8[src]

impl From<Simd<[i32; 16]>> for [i32; 16][src]

impl From<Simd<[i32; 2]>> for [i32; 2][src]

impl From<Simd<[i32; 2]>> for i64x2[src]

impl From<Simd<[i32; 2]>> for f64x2[src]

impl From<Simd<[i32; 2]>> for i128x2[src]

impl From<Simd<[i32; 4]>> for [i32; 4][src]

impl From<Simd<[i32; 4]>> for i64x4[src]

impl From<Simd<[i32; 4]>> for f64x4[src]

impl From<Simd<[i32; 4]>> for i128x4[src]

impl From<Simd<[i32; 8]>> for [i32; 8][src]

impl From<Simd<[i32; 8]>> for i64x8[src]

impl From<Simd<[i32; 8]>> for f64x8[src]

impl From<Simd<[i64; 2]>> for [i64; 2][src]

impl From<Simd<[i64; 2]>> for i128x2[src]

impl From<Simd<[i64; 4]>> for [i64; 4][src]

impl From<Simd<[i64; 4]>> for i128x4[src]

impl From<Simd<[i64; 8]>> for [i64; 8][src]

impl From<Simd<[i8; 16]>> for [i8; 16][src]

impl From<Simd<[i8; 16]>> for i16x16[src]

impl From<Simd<[i8; 16]>> for i32x16[src]

impl From<Simd<[i8; 16]>> for f32x16[src]

impl From<Simd<[i8; 2]>> for [i8; 2][src]

impl From<Simd<[i8; 2]>> for i16x2[src]

impl From<Simd<[i8; 2]>> for i32x2[src]

impl From<Simd<[i8; 2]>> for f32x2[src]

impl From<Simd<[i8; 2]>> for i64x2[src]

impl From<Simd<[i8; 2]>> for f64x2[src]

impl From<Simd<[i8; 2]>> for i128x2[src]

impl From<Simd<[i8; 32]>> for [i8; 32][src]

impl From<Simd<[i8; 32]>> for i16x32[src]

impl From<Simd<[i8; 4]>> for [i8; 4][src]

impl From<Simd<[i8; 4]>> for i16x4[src]

impl From<Simd<[i8; 4]>> for i32x4[src]

impl From<Simd<[i8; 4]>> for f32x4[src]

impl From<Simd<[i8; 4]>> for i64x4[src]

impl From<Simd<[i8; 4]>> for f64x4[src]

impl From<Simd<[i8; 4]>> for i128x4[src]

impl From<Simd<[i8; 64]>> for [i8; 64][src]

impl From<Simd<[i8; 8]>> for [i8; 8][src]

impl From<Simd<[i8; 8]>> for i16x8[src]

impl From<Simd<[i8; 8]>> for i32x8[src]

impl From<Simd<[i8; 8]>> for f32x8[src]

impl From<Simd<[i8; 8]>> for i64x8[src]

impl From<Simd<[i8; 8]>> for f64x8[src]

impl From<Simd<[isize; 2]>> for [isize; 2][src]

impl From<Simd<[isize; 4]>> for [isize; 4][src]

impl From<Simd<[isize; 8]>> for [isize; 8][src]

impl From<Simd<[m128; 1]>> for [m128; 1][src]

impl From<Simd<[m128; 2]>> for m8x2[src]

impl From<Simd<[m128; 2]>> for m16x2[src]

impl From<Simd<[m128; 2]>> for m32x2[src]

impl From<Simd<[m128; 2]>> for m64x2[src]

impl From<Simd<[m128; 2]>> for [m128; 2][src]

impl From<Simd<[m128; 4]>> for [m128; 4][src]

impl From<Simd<[m16; 16]>> for m8x16[src]

impl From<Simd<[m16; 16]>> for [m16; 16][src]

impl From<Simd<[m16; 16]>> for m32x16[src]

impl From<Simd<[m16; 2]>> for m8x2[src]

impl From<Simd<[m16; 2]>> for [m16; 2][src]

impl From<Simd<[m16; 2]>> for m32x2[src]

impl From<Simd<[m16; 2]>> for m64x2[src]

impl From<Simd<[m16; 2]>> for m128x2[src]

impl From<Simd<[m16; 32]>> for [m16; 32][src]

impl From<Simd<[m16; 4]>> for m8x4[src]

impl From<Simd<[m16; 4]>> for [m16; 4][src]

impl From<Simd<[m16; 4]>> for m32x4[src]

impl From<Simd<[m16; 4]>> for m64x4[src]

impl From<Simd<[m16; 4]>> for m128x4[src]

impl From<Simd<[m16; 8]>> for m8x8[src]

impl From<Simd<[m16; 8]>> for [m16; 8][src]

impl From<Simd<[m16; 8]>> for m32x8[src]

impl From<Simd<[m16; 8]>> for m64x8[src]

impl From<Simd<[m32; 16]>> for [m32; 16][src]

impl From<Simd<[m32; 2]>> for m8x2[src]

impl From<Simd<[m32; 2]>> for m16x2[src]

impl From<Simd<[m32; 2]>> for [m32; 2][src]

impl From<Simd<[m32; 2]>> for m64x2[src]

impl From<Simd<[m32; 2]>> for m128x2[src]

impl From<Simd<[m32; 4]>> for m8x4[src]

impl From<Simd<[m32; 4]>> for m16x4[src]

impl From<Simd<[m32; 4]>> for [m32; 4][src]

impl From<Simd<[m32; 4]>> for m64x4[src]

impl From<Simd<[m32; 4]>> for m128x4[src]

impl From<Simd<[m32; 8]>> for m8x8[src]

impl From<Simd<[m32; 8]>> for m16x8[src]

impl From<Simd<[m32; 8]>> for [m32; 8][src]

impl From<Simd<[m32; 8]>> for m64x8[src]

impl From<Simd<[m64; 2]>> for m8x2[src]

impl From<Simd<[m64; 2]>> for m16x2[src]

impl From<Simd<[m64; 2]>> for m32x2[src]

impl From<Simd<[m64; 2]>> for [m64; 2][src]

impl From<Simd<[m64; 2]>> for m128x2[src]

impl From<Simd<[m64; 4]>> for m8x4[src]

impl From<Simd<[m64; 4]>> for m16x4[src]

impl From<Simd<[m64; 4]>> for m32x4[src]

impl From<Simd<[m64; 4]>> for [m64; 4][src]

impl From<Simd<[m64; 4]>> for m128x4[src]

impl From<Simd<[m64; 8]>> for [m64; 8][src]

impl From<Simd<[m8; 16]>> for [m8; 16][src]

impl From<Simd<[m8; 16]>> for m16x16[src]

impl From<Simd<[m8; 16]>> for m32x16[src]

impl From<Simd<[m8; 2]>> for [m8; 2][src]

impl From<Simd<[m8; 2]>> for m16x2[src]

impl From<Simd<[m8; 2]>> for m32x2[src]

impl From<Simd<[m8; 2]>> for m64x2[src]

impl From<Simd<[m8; 2]>> for m128x2[src]

impl From<Simd<[m8; 32]>> for [m8; 32][src]

impl From<Simd<[m8; 32]>> for m16x32[src]

impl From<Simd<[m8; 4]>> for [m8; 4][src]

impl From<Simd<[m8; 4]>> for m16x4[src]

impl From<Simd<[m8; 4]>> for m32x4[src]

impl From<Simd<[m8; 4]>> for m64x4[src]

impl From<Simd<[m8; 4]>> for m128x4[src]

impl From<Simd<[m8; 64]>> for [m8; 64][src]

impl From<Simd<[m8; 8]>> for [m8; 8][src]

impl From<Simd<[m8; 8]>> for m16x8[src]

impl From<Simd<[m8; 8]>> for m32x8[src]

impl From<Simd<[m8; 8]>> for m64x8[src]

impl From<Simd<[msize; 2]>> for [msize; 2][src]

impl From<Simd<[msize; 4]>> for [msize; 4][src]

impl From<Simd<[msize; 8]>> for [msize; 8][src]

impl From<Simd<[u128; 1]>> for [u128; 1][src]

impl From<Simd<[u128; 2]>> for [u128; 2][src]

impl From<Simd<[u128; 4]>> for [u128; 4][src]

impl From<Simd<[u16; 16]>> for [u16; 16][src]

impl From<Simd<[u16; 16]>> for i32x16[src]

impl From<Simd<[u16; 16]>> for u32x16[src]

impl From<Simd<[u16; 16]>> for f32x16[src]

impl From<Simd<[u16; 2]>> for [u16; 2][src]

impl From<Simd<[u16; 2]>> for i32x2[src]

impl From<Simd<[u16; 2]>> for u32x2[src]

impl From<Simd<[u16; 2]>> for f32x2[src]

impl From<Simd<[u16; 2]>> for i64x2[src]

impl From<Simd<[u16; 2]>> for u64x2[src]

impl From<Simd<[u16; 2]>> for f64x2[src]

impl From<Simd<[u16; 2]>> for i128x2[src]

impl From<Simd<[u16; 2]>> for u128x2[src]

impl From<Simd<[u16; 32]>> for [u16; 32][src]

impl From<Simd<[u16; 4]>> for [u16; 4][src]

impl From<Simd<[u16; 4]>> for i32x4[src]

impl From<Simd<[u16; 4]>> for u32x4[src]

impl From<Simd<[u16; 4]>> for f32x4[src]

impl From<Simd<[u16; 4]>> for i64x4[src]

impl From<Simd<[u16; 4]>> for u64x4[src]

impl From<Simd<[u16; 4]>> for f64x4[src]

impl From<Simd<[u16; 4]>> for i128x4[src]

impl From<Simd<[u16; 4]>> for u128x4[src]

impl From<Simd<[u16; 8]>> for [u16; 8][src]

impl From<Simd<[u16; 8]>> for i32x8[src]

impl From<Simd<[u16; 8]>> for u32x8[src]

impl From<Simd<[u16; 8]>> for f32x8[src]

impl From<Simd<[u16; 8]>> for i64x8[src]

impl From<Simd<[u16; 8]>> for u64x8[src]

impl From<Simd<[u16; 8]>> for f64x8[src]

impl From<Simd<[u32; 16]>> for [u32; 16][src]

impl From<Simd<[u32; 2]>> for [u32; 2][src]

impl From<Simd<[u32; 2]>> for i64x2[src]

impl From<Simd<[u32; 2]>> for u64x2[src]

impl From<Simd<[u32; 2]>> for f64x2[src]

impl From<Simd<[u32; 2]>> for i128x2[src]

impl From<Simd<[u32; 2]>> for u128x2[src]

impl From<Simd<[u32; 4]>> for [u32; 4][src]

impl From<Simd<[u32; 4]>> for i64x4[src]

impl From<Simd<[u32; 4]>> for u64x4[src]

impl From<Simd<[u32; 4]>> for f64x4[src]

impl From<Simd<[u32; 4]>> for i128x4[src]

impl From<Simd<[u32; 4]>> for u128x4[src]

impl From<Simd<[u32; 8]>> for [u32; 8][src]

impl From<Simd<[u32; 8]>> for i64x8[src]

impl From<Simd<[u32; 8]>> for u64x8[src]

impl From<Simd<[u32; 8]>> for f64x8[src]

impl From<Simd<[u64; 2]>> for [u64; 2][src]

impl From<Simd<[u64; 2]>> for i128x2[src]

impl From<Simd<[u64; 2]>> for u128x2[src]

impl From<Simd<[u64; 4]>> for [u64; 4][src]

impl From<Simd<[u64; 4]>> for i128x4[src]

impl From<Simd<[u64; 4]>> for u128x4[src]

impl From<Simd<[u64; 8]>> for [u64; 8][src]

impl From<Simd<[u8; 16]>> for [u8; 16][src]

impl From<Simd<[u8; 16]>> for i16x16[src]

impl From<Simd<[u8; 16]>> for u16x16[src]

impl From<Simd<[u8; 16]>> for i32x16[src]

impl From<Simd<[u8; 16]>> for u32x16[src]

impl From<Simd<[u8; 16]>> for f32x16[src]

impl From<Simd<[u8; 2]>> for [u8; 2][src]

impl From<Simd<[u8; 2]>> for i16x2[src]

impl From<Simd<[u8; 2]>> for u128x2[src]

impl From<Simd<[u8; 2]>> for u16x2[src]

impl From<Simd<[u8; 2]>> for i32x2[src]

impl From<Simd<[u8; 2]>> for u32x2[src]

impl From<Simd<[u8; 2]>> for f32x2[src]

impl From<Simd<[u8; 2]>> for i64x2[src]

impl From<Simd<[u8; 2]>> for u64x2[src]

impl From<Simd<[u8; 2]>> for f64x2[src]

impl From<Simd<[u8; 2]>> for i128x2[src]

impl From<Simd<[u8; 32]>> for [u8; 32][src]

impl From<Simd<[u8; 32]>> for i16x32[src]

impl From<Simd<[u8; 32]>> for u16x32[src]

impl From<Simd<[u8; 4]>> for [u8; 4][src]

impl From<Simd<[u8; 4]>> for i16x4[src]

impl From<Simd<[u8; 4]>> for u128x4[src]

impl From<Simd<[u8; 4]>> for u16x4[src]

impl From<Simd<[u8; 4]>> for i32x4[src]

impl From<Simd<[u8; 4]>> for u32x4[src]

impl From<Simd<[u8; 4]>> for f32x4[src]

impl From<Simd<[u8; 4]>> for i64x4[src]

impl From<Simd<[u8; 4]>> for u64x4[src]

impl From<Simd<[u8; 4]>> for f64x4[src]

impl From<Simd<[u8; 4]>> for i128x4[src]

impl From<Simd<[u8; 64]>> for [u8; 64][src]

impl From<Simd<[u8; 8]>> for [u8; 8][src]

impl From<Simd<[u8; 8]>> for i16x8[src]

impl From<Simd<[u8; 8]>> for u16x8[src]

impl From<Simd<[u8; 8]>> for i32x8[src]

impl From<Simd<[u8; 8]>> for u32x8[src]

impl From<Simd<[u8; 8]>> for f32x8[src]

impl From<Simd<[u8; 8]>> for i64x8[src]

impl From<Simd<[u8; 8]>> for u64x8[src]

impl From<Simd<[u8; 8]>> for f64x8[src]

impl From<Simd<[usize; 2]>> for [usize; 2][src]

impl From<Simd<[usize; 4]>> for [usize; 4][src]

impl From<Simd<[usize; 8]>> for [usize; 8][src]

impl FromCast<Simd<[f32; 16]>> for i8x16[src]

impl FromCast<Simd<[f32; 16]>> for u8x16[src]

impl FromCast<Simd<[f32; 16]>> for m8x16[src]

impl FromCast<Simd<[f32; 16]>> for i16x16[src]

impl FromCast<Simd<[f32; 16]>> for u16x16[src]

impl FromCast<Simd<[f32; 16]>> for m16x16[src]

impl FromCast<Simd<[f32; 16]>> for i32x16[src]

impl FromCast<Simd<[f32; 16]>> for u32x16[src]

impl FromCast<Simd<[f32; 16]>> for m32x16[src]

impl FromCast<Simd<[f32; 2]>> for i8x2[src]

impl FromCast<Simd<[f32; 2]>> for u8x2[src]

impl FromCast<Simd<[f32; 2]>> for u64x2[src]

impl FromCast<Simd<[f32; 2]>> for f64x2[src]

impl FromCast<Simd<[f32; 2]>> for m64x2[src]

impl FromCast<Simd<[f32; 2]>> for isizex2[src]

impl FromCast<Simd<[f32; 2]>> for usizex2[src]

impl FromCast<Simd<[f32; 2]>> for msizex2[src]

impl FromCast<Simd<[f32; 2]>> for i128x2[src]

impl FromCast<Simd<[f32; 2]>> for u128x2[src]

impl FromCast<Simd<[f32; 2]>> for m128x2[src]

impl FromCast<Simd<[f32; 2]>> for m8x2[src]

impl FromCast<Simd<[f32; 2]>> for i16x2[src]

impl FromCast<Simd<[f32; 2]>> for u16x2[src]

impl FromCast<Simd<[f32; 2]>> for m16x2[src]

impl FromCast<Simd<[f32; 2]>> for i32x2[src]

impl FromCast<Simd<[f32; 2]>> for u32x2[src]

impl FromCast<Simd<[f32; 2]>> for m32x2[src]

impl FromCast<Simd<[f32; 2]>> for i64x2[src]

impl FromCast<Simd<[f32; 4]>> for i8x4[src]

impl FromCast<Simd<[f32; 4]>> for u8x4[src]

impl FromCast<Simd<[f32; 4]>> for u64x4[src]

impl FromCast<Simd<[f32; 4]>> for f64x4[src]

impl FromCast<Simd<[f32; 4]>> for m64x4[src]

impl FromCast<Simd<[f32; 4]>> for isizex4[src]

impl FromCast<Simd<[f32; 4]>> for usizex4[src]

impl FromCast<Simd<[f32; 4]>> for msizex4[src]

impl FromCast<Simd<[f32; 4]>> for i128x4[src]

impl FromCast<Simd<[f32; 4]>> for u128x4[src]

impl FromCast<Simd<[f32; 4]>> for m128x4[src]

impl FromCast<Simd<[f32; 4]>> for m8x4[src]

impl FromCast<Simd<[f32; 4]>> for i16x4[src]

impl FromCast<Simd<[f32; 4]>> for u16x4[src]

impl FromCast<Simd<[f32; 4]>> for m16x4[src]

impl FromCast<Simd<[f32; 4]>> for i32x4[src]

impl FromCast<Simd<[f32; 4]>> for u32x4[src]

impl FromCast<Simd<[f32; 4]>> for m32x4[src]

impl FromCast<Simd<[f32; 4]>> for i64x4[src]

impl FromCast<Simd<[f32; 8]>> for i8x8[src]

impl FromCast<Simd<[f32; 8]>> for u8x8[src]

impl FromCast<Simd<[f32; 8]>> for u64x8[src]

impl FromCast<Simd<[f32; 8]>> for f64x8[src]

impl FromCast<Simd<[f32; 8]>> for m64x8[src]

impl FromCast<Simd<[f32; 8]>> for isizex8[src]

impl FromCast<Simd<[f32; 8]>> for usizex8[src]

impl FromCast<Simd<[f32; 8]>> for msizex8[src]

impl FromCast<Simd<[f32; 8]>> for m8x8[src]

impl FromCast<Simd<[f32; 8]>> for i16x8[src]

impl FromCast<Simd<[f32; 8]>> for u16x8[src]

impl FromCast<Simd<[f32; 8]>> for m16x8[src]

impl FromCast<Simd<[f32; 8]>> for i32x8[src]

impl FromCast<Simd<[f32; 8]>> for u32x8[src]

impl FromCast<Simd<[f32; 8]>> for m32x8[src]

impl FromCast<Simd<[f32; 8]>> for i64x8[src]

impl FromCast<Simd<[f64; 2]>> for i8x2[src]

impl FromCast<Simd<[f64; 2]>> for u8x2[src]

impl FromCast<Simd<[f64; 2]>> for i64x2[src]

impl FromCast<Simd<[f64; 2]>> for u64x2[src]

impl FromCast<Simd<[f64; 2]>> for m64x2[src]

impl FromCast<Simd<[f64; 2]>> for isizex2[src]

impl FromCast<Simd<[f64; 2]>> for usizex2[src]

impl FromCast<Simd<[f64; 2]>> for msizex2[src]

impl FromCast<Simd<[f64; 2]>> for i128x2[src]

impl FromCast<Simd<[f64; 2]>> for u128x2[src]

impl FromCast<Simd<[f64; 2]>> for m128x2[src]

impl FromCast<Simd<[f64; 2]>> for m8x2[src]

impl FromCast<Simd<[f64; 2]>> for i16x2[src]

impl FromCast<Simd<[f64; 2]>> for u16x2[src]

impl FromCast<Simd<[f64; 2]>> for m16x2[src]

impl FromCast<Simd<[f64; 2]>> for i32x2[src]

impl FromCast<Simd<[f64; 2]>> for u32x2[src]

impl FromCast<Simd<[f64; 2]>> for f32x2[src]

impl FromCast<Simd<[f64; 2]>> for m32x2[src]

impl FromCast<Simd<[f64; 4]>> for i8x4[src]

impl FromCast<Simd<[f64; 4]>> for u8x4[src]

impl FromCast<Simd<[f64; 4]>> for i64x4[src]

impl FromCast<Simd<[f64; 4]>> for u64x4[src]

impl FromCast<Simd<[f64; 4]>> for m64x4[src]

impl FromCast<Simd<[f64; 4]>> for isizex4[src]

impl FromCast<Simd<[f64; 4]>> for usizex4[src]

impl FromCast<Simd<[f64; 4]>> for msizex4[src]

impl FromCast<Simd<[f64; 4]>> for i128x4[src]

impl FromCast<Simd<[f64; 4]>> for u128x4[src]

impl FromCast<Simd<[f64; 4]>> for m128x4[src]

impl FromCast<Simd<[f64; 4]>> for m8x4[src]

impl FromCast<Simd<[f64; 4]>> for i16x4[src]

impl FromCast<Simd<[f64; 4]>> for u16x4[src]

impl FromCast<Simd<[f64; 4]>> for m16x4[src]

impl FromCast<Simd<[f64; 4]>> for i32x4[src]

impl FromCast<Simd<[f64; 4]>> for u32x4[src]

impl FromCast<Simd<[f64; 4]>> for f32x4[src]

impl FromCast<Simd<[f64; 4]>> for m32x4[src]

impl FromCast<Simd<[f64; 8]>> for i8x8[src]

impl FromCast<Simd<[f64; 8]>> for u8x8[src]

impl FromCast<Simd<[f64; 8]>> for i64x8[src]

impl FromCast<Simd<[f64; 8]>> for u64x8[src]

impl FromCast<Simd<[f64; 8]>> for m64x8[src]

impl FromCast<Simd<[f64; 8]>> for isizex8[src]

impl FromCast<Simd<[f64; 8]>> for usizex8[src]

impl FromCast<Simd<[f64; 8]>> for msizex8[src]

impl FromCast<Simd<[f64; 8]>> for m8x8[src]

impl FromCast<Simd<[f64; 8]>> for i16x8[src]

impl FromCast<Simd<[f64; 8]>> for u16x8[src]

impl FromCast<Simd<[f64; 8]>> for m16x8[src]

impl FromCast<Simd<[f64; 8]>> for i32x8[src]

impl FromCast<Simd<[f64; 8]>> for u32x8[src]

impl FromCast<Simd<[f64; 8]>> for f32x8[src]

impl FromCast<Simd<[f64; 8]>> for m32x8[src]

impl FromCast<Simd<[i128; 1]>> for u128x1[src]

impl FromCast<Simd<[i128; 1]>> for m128x1[src]

impl FromCast<Simd<[i128; 2]>> for i8x2[src]

impl FromCast<Simd<[i128; 2]>> for u8x2[src]

impl FromCast<Simd<[i128; 2]>> for i64x2[src]

impl FromCast<Simd<[i128; 2]>> for u64x2[src]

impl FromCast<Simd<[i128; 2]>> for f64x2[src]

impl FromCast<Simd<[i128; 2]>> for m64x2[src]

impl FromCast<Simd<[i128; 2]>> for isizex2[src]

impl FromCast<Simd<[i128; 2]>> for usizex2[src]

impl FromCast<Simd<[i128; 2]>> for msizex2[src]

impl FromCast<Simd<[i128; 2]>> for u128x2[src]

impl FromCast<Simd<[i128; 2]>> for m128x2[src]

impl FromCast<Simd<[i128; 2]>> for m8x2[src]

impl FromCast<Simd<[i128; 2]>> for i16x2[src]

impl FromCast<Simd<[i128; 2]>> for u16x2[src]

impl FromCast<Simd<[i128; 2]>> for m16x2[src]

impl FromCast<Simd<[i128; 2]>> for i32x2[src]

impl FromCast<Simd<[i128; 2]>> for u32x2[src]

impl FromCast<Simd<[i128; 2]>> for f32x2[src]

impl FromCast<Simd<[i128; 2]>> for m32x2[src]

impl FromCast<Simd<[i128; 4]>> for i8x4[src]

impl FromCast<Simd<[i128; 4]>> for u8x4[src]

impl FromCast<Simd<[i128; 4]>> for i64x4[src]

impl FromCast<Simd<[i128; 4]>> for u64x4[src]

impl FromCast<Simd<[i128; 4]>> for f64x4[src]

impl FromCast<Simd<[i128; 4]>> for m64x4[src]

impl FromCast<Simd<[i128; 4]>> for isizex4[src]

impl FromCast<Simd<[i128; 4]>> for usizex4[src]

impl FromCast<Simd<[i128; 4]>> for msizex4[src]

impl FromCast<Simd<[i128; 4]>> for u128x4[src]

impl FromCast<Simd<[i128; 4]>> for m128x4[src]

impl FromCast<Simd<[i128; 4]>> for m8x4[src]

impl FromCast<Simd<[i128; 4]>> for i16x4[src]

impl FromCast<Simd<[i128; 4]>> for u16x4[src]

impl FromCast<Simd<[i128; 4]>> for m16x4[src]

impl FromCast<Simd<[i128; 4]>> for i32x4[src]

impl FromCast<Simd<[i128; 4]>> for u32x4[src]

impl FromCast<Simd<[i128; 4]>> for f32x4[src]

impl FromCast<Simd<[i128; 4]>> for m32x4[src]

impl FromCast<Simd<[i16; 16]>> for i8x16[src]

impl FromCast<Simd<[i16; 16]>> for u8x16[src]

impl FromCast<Simd<[i16; 16]>> for m8x16[src]

impl FromCast<Simd<[i16; 16]>> for u16x16[src]

impl FromCast<Simd<[i16; 16]>> for m16x16[src]

impl FromCast<Simd<[i16; 16]>> for i32x16[src]

impl FromCast<Simd<[i16; 16]>> for u32x16[src]

impl FromCast<Simd<[i16; 16]>> for f32x16[src]

impl FromCast<Simd<[i16; 16]>> for m32x16[src]

impl FromCast<Simd<[i16; 2]>> for i8x2[src]

impl FromCast<Simd<[i16; 2]>> for u8x2[src]

impl FromCast<Simd<[i16; 2]>> for u64x2[src]

impl FromCast<Simd<[i16; 2]>> for f64x2[src]

impl FromCast<Simd<[i16; 2]>> for m64x2[src]

impl FromCast<Simd<[i16; 2]>> for isizex2[src]

impl FromCast<Simd<[i16; 2]>> for usizex2[src]

impl FromCast<Simd<[i16; 2]>> for msizex2[src]

impl FromCast<Simd<[i16; 2]>> for i128x2[src]

impl FromCast<Simd<[i16; 2]>> for u128x2[src]

impl FromCast<Simd<[i16; 2]>> for m128x2[src]

impl FromCast<Simd<[i16; 2]>> for m8x2[src]

impl FromCast<Simd<[i16; 2]>> for u16x2[src]

impl FromCast<Simd<[i16; 2]>> for m16x2[src]

impl FromCast<Simd<[i16; 2]>> for i32x2[src]

impl FromCast<Simd<[i16; 2]>> for u32x2[src]

impl FromCast<Simd<[i16; 2]>> for f32x2[src]

impl FromCast<Simd<[i16; 2]>> for m32x2[src]

impl FromCast<Simd<[i16; 2]>> for i64x2[src]

impl FromCast<Simd<[i16; 32]>> for i8x32[src]

impl FromCast<Simd<[i16; 32]>> for u8x32[src]

impl FromCast<Simd<[i16; 32]>> for m8x32[src]

impl FromCast<Simd<[i16; 32]>> for u16x32[src]

impl FromCast<Simd<[i16; 32]>> for m16x32[src]

impl FromCast<Simd<[i16; 4]>> for i8x4[src]

impl FromCast<Simd<[i16; 4]>> for u8x4[src]

impl FromCast<Simd<[i16; 4]>> for u64x4[src]

impl FromCast<Simd<[i16; 4]>> for f64x4[src]

impl FromCast<Simd<[i16; 4]>> for m64x4[src]

impl FromCast<Simd<[i16; 4]>> for isizex4[src]

impl FromCast<Simd<[i16; 4]>> for usizex4[src]

impl FromCast<Simd<[i16; 4]>> for msizex4[src]

impl FromCast<Simd<[i16; 4]>> for i128x4[src]

impl FromCast<Simd<[i16; 4]>> for u128x4[src]

impl FromCast<Simd<[i16; 4]>> for m128x4[src]

impl FromCast<Simd<[i16; 4]>> for m8x4[src]

impl FromCast<Simd<[i16; 4]>> for u16x4[src]

impl FromCast<Simd<[i16; 4]>> for m16x4[src]

impl FromCast<Simd<[i16; 4]>> for i32x4[src]

impl FromCast<Simd<[i16; 4]>> for u32x4[src]

impl FromCast<Simd<[i16; 4]>> for f32x4[src]

impl FromCast<Simd<[i16; 4]>> for m32x4[src]

impl FromCast<Simd<[i16; 4]>> for i64x4[src]

impl FromCast<Simd<[i16; 8]>> for i8x8[src]

impl FromCast<Simd<[i16; 8]>> for u8x8[src]

impl FromCast<Simd<[i16; 8]>> for u64x8[src]

impl FromCast<Simd<[i16; 8]>> for f64x8[src]

impl FromCast<Simd<[i16; 8]>> for m64x8[src]

impl FromCast<Simd<[i16; 8]>> for isizex8[src]

impl FromCast<Simd<[i16; 8]>> for usizex8[src]

impl FromCast<Simd<[i16; 8]>> for msizex8[src]

impl FromCast<Simd<[i16; 8]>> for m8x8[src]

impl FromCast<Simd<[i16; 8]>> for u16x8[src]

impl FromCast<Simd<[i16; 8]>> for m16x8[src]

impl FromCast<Simd<[i16; 8]>> for i32x8[src]

impl FromCast<Simd<[i16; 8]>> for u32x8[src]

impl FromCast<Simd<[i16; 8]>> for f32x8[src]

impl FromCast<Simd<[i16; 8]>> for m32x8[src]

impl FromCast<Simd<[i16; 8]>> for i64x8[src]

impl FromCast<Simd<[i32; 16]>> for i8x16[src]

impl FromCast<Simd<[i32; 16]>> for u8x16[src]

impl FromCast<Simd<[i32; 16]>> for m8x16[src]

impl FromCast<Simd<[i32; 16]>> for i16x16[src]

impl FromCast<Simd<[i32; 16]>> for u16x16[src]

impl FromCast<Simd<[i32; 16]>> for m16x16[src]

impl FromCast<Simd<[i32; 16]>> for u32x16[src]

impl FromCast<Simd<[i32; 16]>> for f32x16[src]

impl FromCast<Simd<[i32; 16]>> for m32x16[src]

impl FromCast<Simd<[i32; 2]>> for i8x2[src]

impl FromCast<Simd<[i32; 2]>> for u8x2[src]

impl FromCast<Simd<[i32; 2]>> for u64x2[src]

impl FromCast<Simd<[i32; 2]>> for f64x2[src]

impl FromCast<Simd<[i32; 2]>> for m64x2[src]

impl FromCast<Simd<[i32; 2]>> for isizex2[src]

impl FromCast<Simd<[i32; 2]>> for usizex2[src]

impl FromCast<Simd<[i32; 2]>> for msizex2[src]

impl FromCast<Simd<[i32; 2]>> for i128x2[src]

impl FromCast<Simd<[i32; 2]>> for u128x2[src]

impl FromCast<Simd<[i32; 2]>> for m128x2[src]

impl FromCast<Simd<[i32; 2]>> for m8x2[src]

impl FromCast<Simd<[i32; 2]>> for i16x2[src]

impl FromCast<Simd<[i32; 2]>> for u16x2[src]

impl FromCast<Simd<[i32; 2]>> for m16x2[src]

impl FromCast<Simd<[i32; 2]>> for u32x2[src]

impl FromCast<Simd<[i32; 2]>> for f32x2[src]

impl FromCast<Simd<[i32; 2]>> for m32x2[src]

impl FromCast<Simd<[i32; 2]>> for i64x2[src]

impl FromCast<Simd<[i32; 4]>> for i8x4[src]

impl FromCast<Simd<[i32; 4]>> for u8x4[src]

impl FromCast<Simd<[i32; 4]>> for u64x4[src]

impl FromCast<Simd<[i32; 4]>> for f64x4[src]

impl FromCast<Simd<[i32; 4]>> for m64x4[src]

impl FromCast<Simd<[i32; 4]>> for isizex4[src]

impl FromCast<Simd<[i32; 4]>> for usizex4[src]

impl FromCast<Simd<[i32; 4]>> for msizex4[src]

impl FromCast<Simd<[i32; 4]>> for i128x4[src]

impl FromCast<Simd<[i32; 4]>> for u128x4[src]

impl FromCast<Simd<[i32; 4]>> for m128x4[src]

impl FromCast<Simd<[i32; 4]>> for m8x4[src]

impl FromCast<Simd<[i32; 4]>> for i16x4[src]

impl FromCast<Simd<[i32; 4]>> for u16x4[src]

impl FromCast<Simd<[i32; 4]>> for m16x4[src]

impl FromCast<Simd<[i32; 4]>> for u32x4[src]

impl FromCast<Simd<[i32; 4]>> for f32x4[src]

impl FromCast<Simd<[i32; 4]>> for m32x4[src]

impl FromCast<Simd<[i32; 4]>> for i64x4[src]

impl FromCast<Simd<[i32; 8]>> for i8x8[src]

impl FromCast<Simd<[i32; 8]>> for u8x8[src]

impl FromCast<Simd<[i32; 8]>> for u64x8[src]

impl FromCast<Simd<[i32; 8]>> for f64x8[src]

impl FromCast<Simd<[i32; 8]>> for m64x8[src]

impl FromCast<Simd<[i32; 8]>> for isizex8[src]

impl FromCast<Simd<[i32; 8]>> for usizex8[src]

impl FromCast<Simd<[i32; 8]>> for msizex8[src]

impl FromCast<Simd<[i32; 8]>> for m8x8[src]

impl FromCast<Simd<[i32; 8]>> for i16x8[src]

impl FromCast<Simd<[i32; 8]>> for u16x8[src]

impl FromCast<Simd<[i32; 8]>> for m16x8[src]

impl FromCast<Simd<[i32; 8]>> for u32x8[src]

impl FromCast<Simd<[i32; 8]>> for f32x8[src]

impl FromCast<Simd<[i32; 8]>> for m32x8[src]

impl FromCast<Simd<[i32; 8]>> for i64x8[src]

impl FromCast<Simd<[i64; 2]>> for i8x2[src]

impl FromCast<Simd<[i64; 2]>> for u8x2[src]

impl FromCast<Simd<[i64; 2]>> for u64x2[src]

impl FromCast<Simd<[i64; 2]>> for f64x2[src]

impl FromCast<Simd<[i64; 2]>> for m64x2[src]

impl FromCast<Simd<[i64; 2]>> for isizex2[src]

impl FromCast<Simd<[i64; 2]>> for usizex2[src]

impl FromCast<Simd<[i64; 2]>> for msizex2[src]

impl FromCast<Simd<[i64; 2]>> for i128x2[src]

impl FromCast<Simd<[i64; 2]>> for u128x2[src]

impl FromCast<Simd<[i64; 2]>> for m128x2[src]

impl FromCast<Simd<[i64; 2]>> for m8x2[src]

impl FromCast<Simd<[i64; 2]>> for i16x2[src]

impl FromCast<Simd<[i64; 2]>> for u16x2[src]

impl FromCast<Simd<[i64; 2]>> for m16x2[src]

impl FromCast<Simd<[i64; 2]>> for i32x2[src]

impl FromCast<Simd<[i64; 2]>> for u32x2[src]

impl FromCast<Simd<[i64; 2]>> for f32x2[src]

impl FromCast<Simd<[i64; 2]>> for m32x2[src]

impl FromCast<Simd<[i64; 4]>> for i8x4[src]

impl FromCast<Simd<[i64; 4]>> for u8x4[src]

impl FromCast<Simd<[i64; 4]>> for u64x4[src]

impl FromCast<Simd<[i64; 4]>> for f64x4[src]

impl FromCast<Simd<[i64; 4]>> for m64x4[src]

impl FromCast<Simd<[i64; 4]>> for isizex4[src]

impl FromCast<Simd<[i64; 4]>> for usizex4[src]

impl FromCast<Simd<[i64; 4]>> for msizex4[src]

impl FromCast<Simd<[i64; 4]>> for i128x4[src]

impl FromCast<Simd<[i64; 4]>> for u128x4[src]

impl FromCast<Simd<[i64; 4]>> for m128x4[src]

impl FromCast<Simd<[i64; 4]>> for m8x4[src]

impl FromCast<Simd<[i64; 4]>> for i16x4[src]

impl FromCast<Simd<[i64; 4]>> for u16x4[src]

impl FromCast<Simd<[i64; 4]>> for m16x4[src]

impl FromCast<Simd<[i64; 4]>> for i32x4[src]

impl FromCast<Simd<[i64; 4]>> for u32x4[src]

impl FromCast<Simd<[i64; 4]>> for f32x4[src]

impl FromCast<Simd<[i64; 4]>> for m32x4[src]

impl FromCast<Simd<[i64; 8]>> for i8x8[src]

impl FromCast<Simd<[i64; 8]>> for u8x8[src]

impl FromCast<Simd<[i64; 8]>> for u64x8[src]

impl FromCast<Simd<[i64; 8]>> for f64x8[src]

impl FromCast<Simd<[i64; 8]>> for m64x8[src]

impl FromCast<Simd<[i64; 8]>> for isizex8[src]

impl FromCast<Simd<[i64; 8]>> for usizex8[src]

impl FromCast<Simd<[i64; 8]>> for msizex8[src]

impl FromCast<Simd<[i64; 8]>> for m8x8[src]

impl FromCast<Simd<[i64; 8]>> for i16x8[src]

impl FromCast<Simd<[i64; 8]>> for u16x8[src]

impl FromCast<Simd<[i64; 8]>> for m16x8[src]

impl FromCast<Simd<[i64; 8]>> for i32x8[src]

impl FromCast<Simd<[i64; 8]>> for u32x8[src]

impl FromCast<Simd<[i64; 8]>> for f32x8[src]

impl FromCast<Simd<[i64; 8]>> for m32x8[src]

impl FromCast<Simd<[i8; 16]>> for u8x16[src]

impl FromCast<Simd<[i8; 16]>> for m8x16[src]

impl FromCast<Simd<[i8; 16]>> for i16x16[src]

impl FromCast<Simd<[i8; 16]>> for u16x16[src]

impl FromCast<Simd<[i8; 16]>> for m16x16[src]

impl FromCast<Simd<[i8; 16]>> for i32x16[src]

impl FromCast<Simd<[i8; 16]>> for u32x16[src]

impl FromCast<Simd<[i8; 16]>> for f32x16[src]

impl FromCast<Simd<[i8; 16]>> for m32x16[src]

impl FromCast<Simd<[i8; 2]>> for u8x2[src]

impl FromCast<Simd<[i8; 2]>> for m8x2[src]

impl FromCast<Simd<[i8; 2]>> for u64x2[src]

impl FromCast<Simd<[i8; 2]>> for f64x2[src]

impl FromCast<Simd<[i8; 2]>> for m64x2[src]

impl FromCast<Simd<[i8; 2]>> for isizex2[src]

impl FromCast<Simd<[i8; 2]>> for usizex2[src]

impl FromCast<Simd<[i8; 2]>> for msizex2[src]

impl FromCast<Simd<[i8; 2]>> for i128x2[src]

impl FromCast<Simd<[i8; 2]>> for u128x2[src]

impl FromCast<Simd<[i8; 2]>> for m128x2[src]

impl FromCast<Simd<[i8; 2]>> for i16x2[src]

impl FromCast<Simd<[i8; 2]>> for u16x2[src]

impl FromCast<Simd<[i8; 2]>> for m16x2[src]

impl FromCast<Simd<[i8; 2]>> for i32x2[src]

impl FromCast<Simd<[i8; 2]>> for u32x2[src]

impl FromCast<Simd<[i8; 2]>> for f32x2[src]

impl FromCast<Simd<[i8; 2]>> for m32x2[src]

impl FromCast<Simd<[i8; 2]>> for i64x2[src]

impl FromCast<Simd<[i8; 32]>> for u8x32[src]

impl FromCast<Simd<[i8; 32]>> for m8x32[src]

impl FromCast<Simd<[i8; 32]>> for i16x32[src]

impl FromCast<Simd<[i8; 32]>> for u16x32[src]

impl FromCast<Simd<[i8; 32]>> for m16x32[src]

impl FromCast<Simd<[i8; 4]>> for u8x4[src]

impl FromCast<Simd<[i8; 4]>> for m8x4[src]

impl FromCast<Simd<[i8; 4]>> for u64x4[src]

impl FromCast<Simd<[i8; 4]>> for f64x4[src]

impl FromCast<Simd<[i8; 4]>> for m64x4[src]

impl FromCast<Simd<[i8; 4]>> for isizex4[src]

impl FromCast<Simd<[i8; 4]>> for usizex4[src]

impl FromCast<Simd<[i8; 4]>> for msizex4[src]

impl FromCast<Simd<[i8; 4]>> for i128x4[src]

impl FromCast<Simd<[i8; 4]>> for u128x4[src]

impl FromCast<Simd<[i8; 4]>> for m128x4[src]

impl FromCast<Simd<[i8; 4]>> for i16x4[src]

impl FromCast<Simd<[i8; 4]>> for u16x4[src]

impl FromCast<Simd<[i8; 4]>> for m16x4[src]

impl FromCast<Simd<[i8; 4]>> for i32x4[src]

impl FromCast<Simd<[i8; 4]>> for u32x4[src]

impl FromCast<Simd<[i8; 4]>> for f32x4[src]

impl FromCast<Simd<[i8; 4]>> for m32x4[src]

impl FromCast<Simd<[i8; 4]>> for i64x4[src]

impl FromCast<Simd<[i8; 64]>> for u8x64[src]

impl FromCast<Simd<[i8; 64]>> for m8x64[src]

impl FromCast<Simd<[i8; 8]>> for u8x8[src]

impl FromCast<Simd<[i8; 8]>> for m8x8[src]

impl FromCast<Simd<[i8; 8]>> for u64x8[src]

impl FromCast<Simd<[i8; 8]>> for f64x8[src]

impl FromCast<Simd<[i8; 8]>> for m64x8[src]

impl FromCast<Simd<[i8; 8]>> for isizex8[src]

impl FromCast<Simd<[i8; 8]>> for usizex8[src]

impl FromCast<Simd<[i8; 8]>> for msizex8[src]

impl FromCast<Simd<[i8; 8]>> for i16x8[src]

impl FromCast<Simd<[i8; 8]>> for u16x8[src]

impl FromCast<Simd<[i8; 8]>> for m16x8[src]

impl FromCast<Simd<[i8; 8]>> for i32x8[src]

impl FromCast<Simd<[i8; 8]>> for u32x8[src]

impl FromCast<Simd<[i8; 8]>> for f32x8[src]

impl FromCast<Simd<[i8; 8]>> for m32x8[src]

impl FromCast<Simd<[i8; 8]>> for i64x8[src]

impl FromCast<Simd<[isize; 2]>> for i8x2[src]

impl FromCast<Simd<[isize; 2]>> for u8x2[src]

impl FromCast<Simd<[isize; 2]>> for i64x2[src]

impl FromCast<Simd<[isize; 2]>> for u64x2[src]

impl FromCast<Simd<[isize; 2]>> for f64x2[src]

impl FromCast<Simd<[isize; 2]>> for m64x2[src]

impl FromCast<Simd<[isize; 2]>> for usizex2[src]

impl FromCast<Simd<[isize; 2]>> for msizex2[src]

impl FromCast<Simd<[isize; 2]>> for i128x2[src]

impl FromCast<Simd<[isize; 2]>> for u128x2[src]

impl FromCast<Simd<[isize; 2]>> for m128x2[src]

impl FromCast<Simd<[isize; 2]>> for m8x2[src]

impl FromCast<Simd<[isize; 2]>> for i16x2[src]

impl FromCast<Simd<[isize; 2]>> for u16x2[src]

impl FromCast<Simd<[isize; 2]>> for m16x2[src]

impl FromCast<Simd<[isize; 2]>> for i32x2[src]

impl FromCast<Simd<[isize; 2]>> for u32x2[src]

impl FromCast<Simd<[isize; 2]>> for f32x2[src]

impl FromCast<Simd<[isize; 2]>> for m32x2[src]

impl FromCast<Simd<[isize; 4]>> for i8x4[src]

impl FromCast<Simd<[isize; 4]>> for u8x4[src]

impl FromCast<Simd<[isize; 4]>> for i64x4[src]

impl FromCast<Simd<[isize; 4]>> for u64x4[src]

impl FromCast<Simd<[isize; 4]>> for f64x4[src]

impl FromCast<Simd<[isize; 4]>> for m64x4[src]

impl FromCast<Simd<[isize; 4]>> for usizex4[src]

impl FromCast<Simd<[isize; 4]>> for msizex4[src]

impl FromCast<Simd<[isize; 4]>> for i128x4[src]

impl FromCast<Simd<[isize; 4]>> for u128x4[src]

impl FromCast<Simd<[isize; 4]>> for m128x4[src]

impl FromCast<Simd<[isize; 4]>> for m8x4[src]

impl FromCast<Simd<[isize; 4]>> for i16x4[src]

impl FromCast<Simd<[isize; 4]>> for u16x4[src]

impl FromCast<Simd<[isize; 4]>> for m16x4[src]

impl FromCast<Simd<[isize; 4]>> for i32x4[src]

impl FromCast<Simd<[isize; 4]>> for u32x4[src]

impl FromCast<Simd<[isize; 4]>> for f32x4[src]

impl FromCast<Simd<[isize; 4]>> for m32x4[src]

impl FromCast<Simd<[isize; 8]>> for i8x8[src]

impl FromCast<Simd<[isize; 8]>> for u8x8[src]

impl FromCast<Simd<[isize; 8]>> for i64x8[src]

impl FromCast<Simd<[isize; 8]>> for u64x8[src]

impl FromCast<Simd<[isize; 8]>> for f64x8[src]

impl FromCast<Simd<[isize; 8]>> for m64x8[src]

impl FromCast<Simd<[isize; 8]>> for usizex8[src]

impl FromCast<Simd<[isize; 8]>> for msizex8[src]

impl FromCast<Simd<[isize; 8]>> for m8x8[src]

impl FromCast<Simd<[isize; 8]>> for i16x8[src]

impl FromCast<Simd<[isize; 8]>> for u16x8[src]

impl FromCast<Simd<[isize; 8]>> for m16x8[src]

impl FromCast<Simd<[isize; 8]>> for i32x8[src]

impl FromCast<Simd<[isize; 8]>> for u32x8[src]

impl FromCast<Simd<[isize; 8]>> for f32x8[src]

impl FromCast<Simd<[isize; 8]>> for m32x8[src]

impl FromCast<Simd<[m128; 1]>> for i128x1[src]

impl FromCast<Simd<[m128; 1]>> for u128x1[src]

impl FromCast<Simd<[m128; 2]>> for i8x2[src]

impl FromCast<Simd<[m128; 2]>> for u8x2[src]

impl FromCast<Simd<[m128; 2]>> for i64x2[src]

impl FromCast<Simd<[m128; 2]>> for u64x2[src]

impl FromCast<Simd<[m128; 2]>> for f64x2[src]

impl FromCast<Simd<[m128; 2]>> for m64x2[src]

impl FromCast<Simd<[m128; 2]>> for isizex2[src]

impl FromCast<Simd<[m128; 2]>> for usizex2[src]

impl FromCast<Simd<[m128; 2]>> for msizex2[src]

impl FromCast<Simd<[m128; 2]>> for i128x2[src]

impl FromCast<Simd<[m128; 2]>> for u128x2[src]

impl FromCast<Simd<[m128; 2]>> for m8x2[src]

impl FromCast<Simd<[m128; 2]>> for i16x2[src]

impl FromCast<Simd<[m128; 2]>> for u16x2[src]

impl FromCast<Simd<[m128; 2]>> for m16x2[src]

impl FromCast<Simd<[m128; 2]>> for i32x2[src]

impl FromCast<Simd<[m128; 2]>> for u32x2[src]

impl FromCast<Simd<[m128; 2]>> for f32x2[src]

impl FromCast<Simd<[m128; 2]>> for m32x2[src]

impl FromCast<Simd<[m128; 4]>> for i8x4[src]

impl FromCast<Simd<[m128; 4]>> for u8x4[src]

impl FromCast<Simd<[m128; 4]>> for i64x4[src]

impl FromCast<Simd<[m128; 4]>> for u64x4[src]

impl FromCast<Simd<[m128; 4]>> for f64x4[src]

impl FromCast<Simd<[m128; 4]>> for m64x4[src]

impl FromCast<Simd<[m128; 4]>> for isizex4[src]

impl FromCast<Simd<[m128; 4]>> for usizex4[src]

impl FromCast<Simd<[m128; 4]>> for msizex4[src]

impl FromCast<Simd<[m128; 4]>> for i128x4[src]

impl FromCast<Simd<[m128; 4]>> for u128x4[src]

impl FromCast<Simd<[m128; 4]>> for m8x4[src]

impl FromCast<Simd<[m128; 4]>> for i16x4[src]

impl FromCast<Simd<[m128; 4]>> for u16x4[src]

impl FromCast<Simd<[m128; 4]>> for m16x4[src]

impl FromCast<Simd<[m128; 4]>> for i32x4[src]

impl FromCast<Simd<[m128; 4]>> for u32x4[src]

impl FromCast<Simd<[m128; 4]>> for f32x4[src]

impl FromCast<Simd<[m128; 4]>> for m32x4[src]

impl FromCast<Simd<[m16; 16]>> for i8x16[src]

impl FromCast<Simd<[m16; 16]>> for u8x16[src]

impl FromCast<Simd<[m16; 16]>> for m8x16[src]

impl FromCast<Simd<[m16; 16]>> for i16x16[src]

impl FromCast<Simd<[m16; 16]>> for u16x16[src]

impl FromCast<Simd<[m16; 16]>> for i32x16[src]

impl FromCast<Simd<[m16; 16]>> for u32x16[src]

impl FromCast<Simd<[m16; 16]>> for f32x16[src]

impl FromCast<Simd<[m16; 16]>> for m32x16[src]

impl FromCast<Simd<[m16; 2]>> for i8x2[src]

impl FromCast<Simd<[m16; 2]>> for u8x2[src]

impl FromCast<Simd<[m16; 2]>> for u64x2[src]

impl FromCast<Simd<[m16; 2]>> for f64x2[src]

impl FromCast<Simd<[m16; 2]>> for m64x2[src]

impl FromCast<Simd<[m16; 2]>> for isizex2[src]

impl FromCast<Simd<[m16; 2]>> for usizex2[src]

impl FromCast<Simd<[m16; 2]>> for msizex2[src]

impl FromCast<Simd<[m16; 2]>> for i128x2[src]

impl FromCast<Simd<[m16; 2]>> for u128x2[src]

impl FromCast<Simd<[m16; 2]>> for m128x2[src]

impl FromCast<Simd<[m16; 2]>> for m8x2[src]

impl FromCast<Simd<[m16; 2]>> for i16x2[src]

impl FromCast<Simd<[m16; 2]>> for u16x2[src]

impl FromCast<Simd<[m16; 2]>> for i32x2[src]

impl FromCast<Simd<[m16; 2]>> for u32x2[src]

impl FromCast<Simd<[m16; 2]>> for f32x2[src]

impl FromCast<Simd<[m16; 2]>> for m32x2[src]

impl FromCast<Simd<[m16; 2]>> for i64x2[src]

impl FromCast<Simd<[m16; 32]>> for i8x32[src]

impl FromCast<Simd<[m16; 32]>> for u8x32[src]

impl FromCast<Simd<[m16; 32]>> for m8x32[src]

impl FromCast<Simd<[m16; 32]>> for i16x32[src]

impl FromCast<Simd<[m16; 32]>> for u16x32[src]

impl FromCast<Simd<[m16; 4]>> for i8x4[src]

impl FromCast<Simd<[m16; 4]>> for u8x4[src]

impl FromCast<Simd<[m16; 4]>> for u64x4[src]

impl FromCast<Simd<[m16; 4]>> for f64x4[src]

impl FromCast<Simd<[m16; 4]>> for m64x4[src]

impl FromCast<Simd<[m16; 4]>> for isizex4[src]

impl FromCast<Simd<[m16; 4]>> for usizex4[src]

impl FromCast<Simd<[m16; 4]>> for msizex4[src]

impl FromCast<Simd<[m16; 4]>> for i128x4[src]

impl FromCast<Simd<[m16; 4]>> for u128x4[src]

impl FromCast<Simd<[m16; 4]>> for m128x4[src]

impl FromCast<Simd<[m16; 4]>> for m8x4[src]

impl FromCast<Simd<[m16; 4]>> for i16x4[src]

impl FromCast<Simd<[m16; 4]>> for u16x4[src]

impl FromCast<Simd<[m16; 4]>> for i32x4[src]

impl FromCast<Simd<[m16; 4]>> for u32x4[src]

impl FromCast<Simd<[m16; 4]>> for f32x4[src]

impl FromCast<Simd<[m16; 4]>> for m32x4[src]

impl FromCast<Simd<[m16; 4]>> for i64x4[src]

impl FromCast<Simd<[m16; 8]>> for i8x8[src]

impl FromCast<Simd<[m16; 8]>> for u8x8[src]

impl FromCast<Simd<[m16; 8]>> for u64x8[src]

impl FromCast<Simd<[m16; 8]>> for f64x8[src]

impl FromCast<Simd<[m16; 8]>> for m64x8[src]

impl FromCast<Simd<[m16; 8]>> for isizex8[src]

impl FromCast<Simd<[m16; 8]>> for usizex8[src]

impl FromCast<Simd<[m16; 8]>> for msizex8[src]

impl FromCast<Simd<[m16; 8]>> for m8x8[src]

impl FromCast<Simd<[m16; 8]>> for i16x8[src]

impl FromCast<Simd<[m16; 8]>> for u16x8[src]

impl FromCast<Simd<[m16; 8]>> for i32x8[src]

impl FromCast<Simd<[m16; 8]>> for u32x8[src]

impl FromCast<Simd<[m16; 8]>> for f32x8[src]

impl FromCast<Simd<[m16; 8]>> for m32x8[src]

impl FromCast<Simd<[m16; 8]>> for i64x8[src]

impl FromCast<Simd<[m32; 16]>> for i8x16[src]

impl FromCast<Simd<[m32; 16]>> for u8x16[src]

impl FromCast<Simd<[m32; 16]>> for m8x16[src]

impl FromCast<Simd<[m32; 16]>> for i16x16[src]

impl FromCast<Simd<[m32; 16]>> for u16x16[src]

impl FromCast<Simd<[m32; 16]>> for m16x16[src]

impl FromCast<Simd<[m32; 16]>> for i32x16[src]

impl FromCast<Simd<[m32; 16]>> for u32x16[src]

impl FromCast<Simd<[m32; 16]>> for f32x16[src]

impl FromCast<Simd<[m32; 2]>> for i8x2[src]

impl FromCast<Simd<[m32; 2]>> for u8x2[src]

impl FromCast<Simd<[m32; 2]>> for u64x2[src]

impl FromCast<Simd<[m32; 2]>> for f64x2[src]

impl FromCast<Simd<[m32; 2]>> for m64x2[src]

impl FromCast<Simd<[m32; 2]>> for isizex2[src]

impl FromCast<Simd<[m32; 2]>> for usizex2[src]

impl FromCast<Simd<[m32; 2]>> for msizex2[src]

impl FromCast<Simd<[m32; 2]>> for i128x2[src]

impl FromCast<Simd<[m32; 2]>> for u128x2[src]

impl FromCast<Simd<[m32; 2]>> for m128x2[src]

impl FromCast<Simd<[m32; 2]>> for m8x2[src]

impl FromCast<Simd<[m32; 2]>> for i16x2[src]

impl FromCast<Simd<[m32; 2]>> for u16x2[src]

impl FromCast<Simd<[m32; 2]>> for m16x2[src]

impl FromCast<Simd<[m32; 2]>> for i32x2[src]

impl FromCast<Simd<[m32; 2]>> for u32x2[src]

impl FromCast<Simd<[m32; 2]>> for f32x2[src]

impl FromCast<Simd<[m32; 2]>> for i64x2[src]

impl FromCast<Simd<[m32; 4]>> for i8x4[src]

impl FromCast<Simd<[m32; 4]>> for u8x4[src]

impl FromCast<Simd<[m32; 4]>> for u64x4[src]

impl FromCast<Simd<[m32; 4]>> for f64x4[src]

impl FromCast<Simd<[m32; 4]>> for m64x4[src]

impl FromCast<Simd<[m32; 4]>> for isizex4[src]

impl FromCast<Simd<[m32; 4]>> for usizex4[src]

impl FromCast<Simd<[m32; 4]>> for msizex4[src]

impl FromCast<Simd<[m32; 4]>> for i128x4[src]

impl FromCast<Simd<[m32; 4]>> for u128x4[src]

impl FromCast<Simd<[m32; 4]>> for m128x4[src]

impl FromCast<Simd<[m32; 4]>> for m8x4[src]

impl FromCast<Simd<[m32; 4]>> for i16x4[src]

impl FromCast<Simd<[m32; 4]>> for u16x4[src]

impl FromCast<Simd<[m32; 4]>> for m16x4[src]

impl FromCast<Simd<[m32; 4]>> for i32x4[src]

impl FromCast<Simd<[m32; 4]>> for u32x4[src]

impl FromCast<Simd<[m32; 4]>> for f32x4[src]

impl FromCast<Simd<[m32; 4]>> for i64x4[src]

impl FromCast<Simd<[m32; 8]>> for i8x8[src]

impl FromCast<Simd<[m32; 8]>> for u8x8[src]

impl FromCast<Simd<[m32; 8]>> for u64x8[src]

impl FromCast<Simd<[m32; 8]>> for f64x8[src]

impl FromCast<Simd<[m32; 8]>> for m64x8[src]

impl FromCast<Simd<[m32; 8]>> for isizex8[src]

impl FromCast<Simd<[m32; 8]>> for usizex8[src]

impl FromCast<Simd<[m32; 8]>> for msizex8[src]

impl FromCast<Simd<[m32; 8]>> for m8x8[src]

impl FromCast<Simd<[m32; 8]>> for i16x8[src]

impl FromCast<Simd<[m32; 8]>> for u16x8[src]

impl FromCast<Simd<[m32; 8]>> for m16x8[src]

impl FromCast<Simd<[m32; 8]>> for i32x8[src]

impl FromCast<Simd<[m32; 8]>> for u32x8[src]

impl FromCast<Simd<[m32; 8]>> for f32x8[src]

impl FromCast<Simd<[m32; 8]>> for i64x8[src]

impl FromCast<Simd<[m64; 2]>> for i8x2[src]

impl FromCast<Simd<[m64; 2]>> for u8x2[src]

impl FromCast<Simd<[m64; 2]>> for i64x2[src]

impl FromCast<Simd<[m64; 2]>> for u64x2[src]

impl FromCast<Simd<[m64; 2]>> for f64x2[src]

impl FromCast<Simd<[m64; 2]>> for isizex2[src]

impl FromCast<Simd<[m64; 2]>> for usizex2[src]

impl FromCast<Simd<[m64; 2]>> for msizex2[src]

impl FromCast<Simd<[m64; 2]>> for i128x2[src]

impl FromCast<Simd<[m64; 2]>> for u128x2[src]

impl FromCast<Simd<[m64; 2]>> for m128x2[src]

impl FromCast<Simd<[m64; 2]>> for m8x2[src]

impl FromCast<Simd<[m64; 2]>> for i16x2[src]

impl FromCast<Simd<[m64; 2]>> for u16x2[src]

impl FromCast<Simd<[m64; 2]>> for m16x2[src]

impl FromCast<Simd<[m64; 2]>> for i32x2[src]

impl FromCast<Simd<[m64; 2]>> for u32x2[src]

impl FromCast<Simd<[m64; 2]>> for f32x2[src]

impl FromCast<Simd<[m64; 2]>> for m32x2[src]

impl FromCast<Simd<[m64; 4]>> for i8x4[src]

impl FromCast<Simd<[m64; 4]>> for u8x4[src]

impl FromCast<Simd<[m64; 4]>> for i64x4[src]

impl FromCast<Simd<[m64; 4]>> for u64x4[src]

impl FromCast<Simd<[m64; 4]>> for f64x4[src]

impl FromCast<Simd<[m64; 4]>> for isizex4[src]

impl FromCast<Simd<[m64; 4]>> for usizex4[src]

impl FromCast<Simd<[m64; 4]>> for msizex4[src]

impl FromCast<Simd<[m64; 4]>> for i128x4[src]

impl FromCast<Simd<[m64; 4]>> for u128x4[src]

impl FromCast<Simd<[m64; 4]>> for m128x4[src]

impl FromCast<Simd<[m64; 4]>> for m8x4[src]

impl FromCast<Simd<[m64; 4]>> for i16x4[src]

impl FromCast<Simd<[m64; 4]>> for u16x4[src]

impl FromCast<Simd<[m64; 4]>> for m16x4[src]

impl FromCast<Simd<[m64; 4]>> for i32x4[src]

impl FromCast<Simd<[m64; 4]>> for u32x4[src]

impl FromCast<Simd<[m64; 4]>> for f32x4[src]

impl FromCast<Simd<[m64; 4]>> for m32x4[src]

impl FromCast<Simd<[m64; 8]>> for i8x8[src]

impl FromCast<Simd<[m64; 8]>> for u8x8[src]

impl FromCast<Simd<[m64; 8]>> for i64x8[src]

impl FromCast<Simd<[m64; 8]>> for u64x8[src]

impl FromCast<Simd<[m64; 8]>> for f64x8[src]

impl FromCast<Simd<[m64; 8]>> for isizex8[src]

impl FromCast<Simd<[m64; 8]>> for usizex8[src]

impl FromCast<Simd<[m64; 8]>> for msizex8[src]

impl FromCast<Simd<[m64; 8]>> for m8x8[src]

impl FromCast<Simd<[m64; 8]>> for i16x8[src]

impl FromCast<Simd<[m64; 8]>> for u16x8[src]

impl FromCast<Simd<[m64; 8]>> for m16x8[src]

impl FromCast<Simd<[m64; 8]>> for i32x8[src]

impl FromCast<Simd<[m64; 8]>> for u32x8[src]

impl FromCast<Simd<[m64; 8]>> for f32x8[src]

impl FromCast<Simd<[m64; 8]>> for m32x8[src]

impl FromCast<Simd<[m8; 16]>> for i8x16[src]

impl FromCast<Simd<[m8; 16]>> for u8x16[src]

impl FromCast<Simd<[m8; 16]>> for i16x16[src]

impl FromCast<Simd<[m8; 16]>> for u16x16[src]

impl FromCast<Simd<[m8; 16]>> for m16x16[src]

impl FromCast<Simd<[m8; 16]>> for i32x16[src]

impl FromCast<Simd<[m8; 16]>> for u32x16[src]

impl FromCast<Simd<[m8; 16]>> for f32x16[src]

impl FromCast<Simd<[m8; 16]>> for m32x16[src]

impl FromCast<Simd<[m8; 2]>> for i8x2[src]

impl FromCast<Simd<[m8; 2]>> for u8x2[src]

impl FromCast<Simd<[m8; 2]>> for u64x2[src]

impl FromCast<Simd<[m8; 2]>> for f64x2[src]

impl FromCast<Simd<[m8; 2]>> for m64x2[src]

impl FromCast<Simd<[m8; 2]>> for isizex2[src]

impl FromCast<Simd<[m8; 2]>> for usizex2[src]

impl FromCast<Simd<[m8; 2]>> for msizex2[src]

impl FromCast<Simd<[m8; 2]>> for i128x2[src]

impl FromCast<Simd<[m8; 2]>> for u128x2[src]

impl FromCast<Simd<[m8; 2]>> for m128x2[src]

impl FromCast<Simd<[m8; 2]>> for i16x2[src]

impl FromCast<Simd<[m8; 2]>> for u16x2[src]

impl FromCast<Simd<[m8; 2]>> for m16x2[src]

impl FromCast<Simd<[m8; 2]>> for i32x2[src]

impl FromCast<Simd<[m8; 2]>> for u32x2[src]

impl FromCast<Simd<[m8; 2]>> for f32x2[src]

impl FromCast<Simd<[m8; 2]>> for m32x2[src]

impl FromCast<Simd<[m8; 2]>> for i64x2[src]

impl FromCast<Simd<[m8; 32]>> for i8x32[src]

impl FromCast<Simd<[m8; 32]>> for u8x32[src]

impl FromCast<Simd<[m8; 32]>> for i16x32[src]

impl FromCast<Simd<[m8; 32]>> for u16x32[src]

impl FromCast<Simd<[m8; 32]>> for m16x32[src]

impl FromCast<Simd<[m8; 4]>> for i8x4[src]

impl FromCast<Simd<[m8; 4]>> for u8x4[src]

impl FromCast<Simd<[m8; 4]>> for u64x4[src]

impl FromCast<Simd<[m8; 4]>> for f64x4[src]

impl FromCast<Simd<[m8; 4]>> for m64x4[src]

impl FromCast<Simd<[m8; 4]>> for isizex4[src]

impl FromCast<Simd<[m8; 4]>> for usizex4[src]

impl FromCast<Simd<[m8; 4]>> for msizex4[src]

impl FromCast<Simd<[m8; 4]>> for i128x4[src]

impl FromCast<Simd<[m8; 4]>> for u128x4[src]

impl FromCast<Simd<[m8; 4]>> for m128x4[src]

impl FromCast<Simd<[m8; 4]>> for i16x4[src]

impl FromCast<Simd<[m8; 4]>> for u16x4[src]

impl FromCast<Simd<[m8; 4]>> for m16x4[src]

impl FromCast<Simd<[m8; 4]>> for i32x4[src]

impl FromCast<Simd<[m8; 4]>> for u32x4[src]

impl FromCast<Simd<[m8; 4]>> for f32x4[src]

impl FromCast<Simd<[m8; 4]>> for m32x4[src]

impl FromCast<Simd<[m8; 4]>> for i64x4[src]

impl FromCast<Simd<[m8; 64]>> for i8x64[src]

impl FromCast<Simd<[m8; 64]>> for u8x64[src]

impl FromCast<Simd<[m8; 8]>> for i8x8[src]

impl FromCast<Simd<[m8; 8]>> for u8x8[src]

impl FromCast<Simd<[m8; 8]>> for u64x8[src]

impl FromCast<Simd<[m8; 8]>> for f64x8[src]

impl FromCast<Simd<[m8; 8]>> for m64x8[src]

impl FromCast<Simd<[m8; 8]>> for isizex8[src]

impl FromCast<Simd<[m8; 8]>> for usizex8[src]

impl FromCast<Simd<[m8; 8]>> for msizex8[src]

impl FromCast<Simd<[m8; 8]>> for i16x8[src]

impl FromCast<Simd<[m8; 8]>> for u16x8[src]

impl FromCast<Simd<[m8; 8]>> for m16x8[src]

impl FromCast<Simd<[m8; 8]>> for i32x8[src]

impl FromCast<Simd<[m8; 8]>> for u32x8[src]

impl FromCast<Simd<[m8; 8]>> for f32x8[src]

impl FromCast<Simd<[m8; 8]>> for m32x8[src]

impl FromCast<Simd<[m8; 8]>> for i64x8[src]

impl FromCast<Simd<[msize; 2]>> for i8x2[src]

impl FromCast<Simd<[msize; 2]>> for u8x2[src]

impl FromCast<Simd<[msize; 2]>> for i64x2[src]

impl FromCast<Simd<[msize; 2]>> for u64x2[src]

impl FromCast<Simd<[msize; 2]>> for f64x2[src]

impl FromCast<Simd<[msize; 2]>> for m64x2[src]

impl FromCast<Simd<[msize; 2]>> for isizex2[src]

impl FromCast<Simd<[msize; 2]>> for usizex2[src]

impl FromCast<Simd<[msize; 2]>> for i128x2[src]

impl FromCast<Simd<[msize; 2]>> for u128x2[src]

impl FromCast<Simd<[msize; 2]>> for m128x2[src]

impl FromCast<Simd<[msize; 2]>> for m8x2[src]

impl FromCast<Simd<[msize; 2]>> for i16x2[src]

impl FromCast<Simd<[msize; 2]>> for u16x2[src]

impl FromCast<Simd<[msize; 2]>> for m16x2[src]

impl FromCast<Simd<[msize; 2]>> for i32x2[src]

impl FromCast<Simd<[msize; 2]>> for u32x2[src]

impl FromCast<Simd<[msize; 2]>> for f32x2[src]

impl FromCast<Simd<[msize; 2]>> for m32x2[src]

impl FromCast<Simd<[msize; 4]>> for i8x4[src]

impl FromCast<Simd<[msize; 4]>> for u8x4[src]

impl FromCast<Simd<[msize; 4]>> for i64x4[src]

impl FromCast<Simd<[msize; 4]>> for u64x4[src]

impl FromCast<Simd<[msize; 4]>> for f64x4[src]

impl FromCast<Simd<[msize; 4]>> for m64x4[src]

impl FromCast<Simd<[msize; 4]>> for isizex4[src]

impl FromCast<Simd<[msize; 4]>> for usizex4[src]

impl FromCast<Simd<[msize; 4]>> for i128x4[src]

impl FromCast<Simd<[msize; 4]>> for u128x4[src]

impl FromCast<Simd<[msize; 4]>> for m128x4[src]

impl FromCast<Simd<[msize; 4]>> for m8x4[src]

impl FromCast<Simd<[msize; 4]>> for i16x4[src]

impl FromCast<Simd<[msize; 4]>> for u16x4[src]

impl FromCast<Simd<[msize; 4]>> for m16x4[src]

impl FromCast<Simd<[msize; 4]>> for i32x4[src]

impl FromCast<Simd<[msize; 4]>> for u32x4[src]

impl FromCast<Simd<[msize; 4]>> for f32x4[src]

impl FromCast<Simd<[msize; 4]>> for m32x4[src]

impl FromCast<Simd<[msize; 8]>> for i8x8[src]

impl FromCast<Simd<[msize; 8]>> for u8x8[src]

impl FromCast<Simd<[msize; 8]>> for i64x8[src]

impl FromCast<Simd<[msize; 8]>> for u64x8[src]

impl FromCast<Simd<[msize; 8]>> for f64x8[src]

impl FromCast<Simd<[msize; 8]>> for m64x8[src]

impl FromCast<Simd<[msize; 8]>> for isizex8[src]

impl FromCast<Simd<[msize; 8]>> for usizex8[src]

impl FromCast<Simd<[msize; 8]>> for m8x8[src]

impl FromCast<Simd<[msize; 8]>> for i16x8[src]

impl FromCast<Simd<[msize; 8]>> for u16x8[src]

impl FromCast<Simd<[msize; 8]>> for m16x8[src]

impl FromCast<Simd<[msize; 8]>> for i32x8[src]

impl FromCast<Simd<[msize; 8]>> for u32x8[src]

impl FromCast<Simd<[msize; 8]>> for f32x8[src]

impl FromCast<Simd<[msize; 8]>> for m32x8[src]

impl FromCast<Simd<[u128; 1]>> for i128x1[src]

impl FromCast<Simd<[u128; 1]>> for m128x1[src]

impl FromCast<Simd<[u128; 2]>> for i8x2[src]

impl FromCast<Simd<[u128; 2]>> for u8x2[src]

impl FromCast<Simd<[u128; 2]>> for i64x2[src]

impl FromCast<Simd<[u128; 2]>> for u64x2[src]

impl FromCast<Simd<[u128; 2]>> for f64x2[src]

impl FromCast<Simd<[u128; 2]>> for m64x2[src]

impl FromCast<Simd<[u128; 2]>> for isizex2[src]

impl FromCast<Simd<[u128; 2]>> for usizex2[src]

impl FromCast<Simd<[u128; 2]>> for msizex2[src]

impl FromCast<Simd<[u128; 2]>> for i128x2[src]

impl FromCast<Simd<[u128; 2]>> for m128x2[src]

impl FromCast<Simd<[u128; 2]>> for m8x2[src]

impl FromCast<Simd<[u128; 2]>> for i16x2[src]

impl FromCast<Simd<[u128; 2]>> for u16x2[src]

impl FromCast<Simd<[u128; 2]>> for m16x2[src]

impl FromCast<Simd<[u128; 2]>> for i32x2[src]

impl FromCast<Simd<[u128; 2]>> for u32x2[src]

impl FromCast<Simd<[u128; 2]>> for f32x2[src]

impl FromCast<Simd<[u128; 2]>> for m32x2[src]

impl FromCast<Simd<[u128; 4]>> for i8x4[src]

impl FromCast<Simd<[u128; 4]>> for u8x4[src]

impl FromCast<Simd<[u128; 4]>> for i64x4[src]

impl FromCast<Simd<[u128; 4]>> for u64x4[src]

impl FromCast<Simd<[u128; 4]>> for f64x4[src]

impl FromCast<Simd<[u128; 4]>> for m64x4[src]

impl FromCast<Simd<[u128; 4]>> for isizex4[src]

impl FromCast<Simd<[u128; 4]>> for usizex4[src]

impl FromCast<Simd<[u128; 4]>> for msizex4[src]

impl FromCast<Simd<[u128; 4]>> for i128x4[src]

impl FromCast<Simd<[u128; 4]>> for m128x4[src]

impl FromCast<Simd<[u128; 4]>> for m8x4[src]

impl FromCast<Simd<[u128; 4]>> for i16x4[src]

impl FromCast<Simd<[u128; 4]>> for u16x4[src]

impl FromCast<Simd<[u128; 4]>> for m16x4[src]

impl FromCast<Simd<[u128; 4]>> for i32x4[src]

impl FromCast<Simd<[u128; 4]>> for u32x4[src]

impl FromCast<Simd<[u128; 4]>> for f32x4[src]

impl FromCast<Simd<[u128; 4]>> for m32x4[src]

impl FromCast<Simd<[u16; 16]>> for i8x16[src]

impl FromCast<Simd<[u16; 16]>> for u8x16[src]

impl FromCast<Simd<[u16; 16]>> for m8x16[src]

impl FromCast<Simd<[u16; 16]>> for i16x16[src]

impl FromCast<Simd<[u16; 16]>> for m16x16[src]

impl FromCast<Simd<[u16; 16]>> for i32x16[src]

impl FromCast<Simd<[u16; 16]>> for u32x16[src]

impl FromCast<Simd<[u16; 16]>> for f32x16[src]

impl FromCast<Simd<[u16; 16]>> for m32x16[src]

impl FromCast<Simd<[u16; 2]>> for i8x2[src]

impl FromCast<Simd<[u16; 2]>> for u8x2[src]

impl FromCast<Simd<[u16; 2]>> for u64x2[src]

impl FromCast<Simd<[u16; 2]>> for f64x2[src]

impl FromCast<Simd<[u16; 2]>> for m64x2[src]

impl FromCast<Simd<[u16; 2]>> for isizex2[src]

impl FromCast<Simd<[u16; 2]>> for usizex2[src]

impl FromCast<Simd<[u16; 2]>> for msizex2[src]

impl FromCast<Simd<[u16; 2]>> for i128x2[src]

impl FromCast<Simd<[u16; 2]>> for u128x2[src]

impl FromCast<Simd<[u16; 2]>> for m128x2[src]

impl FromCast<Simd<[u16; 2]>> for m8x2[src]

impl FromCast<Simd<[u16; 2]>> for i16x2[src]

impl FromCast<Simd<[u16; 2]>> for m16x2[src]

impl FromCast<Simd<[u16; 2]>> for i32x2[src]

impl FromCast<Simd<[u16; 2]>> for u32x2[src]

impl FromCast<Simd<[u16; 2]>> for f32x2[src]

impl FromCast<Simd<[u16; 2]>> for m32x2[src]

impl FromCast<Simd<[u16; 2]>> for i64x2[src]

impl FromCast<Simd<[u16; 32]>> for i8x32[src]

impl FromCast<Simd<[u16; 32]>> for u8x32[src]

impl FromCast<Simd<[u16; 32]>> for m8x32[src]

impl FromCast<Simd<[u16; 32]>> for i16x32[src]

impl FromCast<Simd<[u16; 32]>> for m16x32[src]

impl FromCast<Simd<[u16; 4]>> for i8x4[src]

impl FromCast<Simd<[u16; 4]>> for u8x4[src]

impl FromCast<Simd<[u16; 4]>> for u64x4[src]

impl FromCast<Simd<[u16; 4]>> for f64x4[src]

impl FromCast<Simd<[u16; 4]>> for m64x4[src]

impl FromCast<Simd<[u16; 4]>> for isizex4[src]

impl FromCast<Simd<[u16; 4]>> for usizex4[src]

impl FromCast<Simd<[u16; 4]>> for msizex4[src]

impl FromCast<Simd<[u16; 4]>> for i128x4[src]

impl FromCast<Simd<[u16; 4]>> for u128x4[src]

impl FromCast<Simd<[u16; 4]>> for m128x4[src]

impl FromCast<Simd<[u16; 4]>> for m8x4[src]

impl FromCast<Simd<[u16; 4]>> for i16x4[src]

impl FromCast<Simd<[u16; 4]>> for m16x4[src]

impl FromCast<Simd<[u16; 4]>> for i32x4[src]

impl FromCast<Simd<[u16; 4]>> for u32x4[src]

impl FromCast<Simd<[u16; 4]>> for f32x4[src]

impl FromCast<Simd<[u16; 4]>> for m32x4[src]

impl FromCast<Simd<[u16; 4]>> for i64x4[src]

impl FromCast<Simd<[u16; 8]>> for i8x8[src]

impl FromCast<Simd<[u16; 8]>> for u8x8[src]

impl FromCast<Simd<[u16; 8]>> for u64x8[src]

impl FromCast<Simd<[u16; 8]>> for f64x8[src]

impl FromCast<Simd<[u16; 8]>> for m64x8[src]

impl FromCast<Simd<[u16; 8]>> for isizex8[src]

impl FromCast<Simd<[u16; 8]>> for usizex8[src]

impl FromCast<Simd<[u16; 8]>> for msizex8[src]

impl FromCast<Simd<[u16; 8]>> for m8x8[src]

impl FromCast<Simd<[u16; 8]>> for i16x8[src]

impl FromCast<Simd<[u16; 8]>> for m16x8[src]

impl FromCast<Simd<[u16; 8]>> for i32x8[src]

impl FromCast<Simd<[u16; 8]>> for u32x8[src]

impl FromCast<Simd<[u16; 8]>> for f32x8[src]

impl FromCast<Simd<[u16; 8]>> for m32x8[src]

impl FromCast<Simd<[u16; 8]>> for i64x8[src]

impl FromCast<Simd<[u32; 16]>> for i8x16[src]

impl FromCast<Simd<[u32; 16]>> for u8x16[src]

impl FromCast<Simd<[u32; 16]>> for m8x16[src]

impl FromCast<Simd<[u32; 16]>> for i16x16[src]

impl FromCast<Simd<[u32; 16]>> for u16x16[src]

impl FromCast<Simd<[u32; 16]>> for m16x16[src]

impl FromCast<Simd<[u32; 16]>> for i32x16[src]

impl FromCast<Simd<[u32; 16]>> for f32x16[src]

impl FromCast<Simd<[u32; 16]>> for m32x16[src]

impl FromCast<Simd<[u32; 2]>> for i8x2[src]

impl FromCast<Simd<[u32; 2]>> for u8x2[src]

impl FromCast<Simd<[u32; 2]>> for u64x2[src]

impl FromCast<Simd<[u32; 2]>> for f64x2[src]

impl FromCast<Simd<[u32; 2]>> for m64x2[src]

impl FromCast<Simd<[u32; 2]>> for isizex2[src]

impl FromCast<Simd<[u32; 2]>> for usizex2[src]

impl FromCast<Simd<[u32; 2]>> for msizex2[src]

impl FromCast<Simd<[u32; 2]>> for i128x2[src]

impl FromCast<Simd<[u32; 2]>> for u128x2[src]

impl FromCast<Simd<[u32; 2]>> for m128x2[src]

impl FromCast<Simd<[u32; 2]>> for m8x2[src]

impl FromCast<Simd<[u32; 2]>> for i16x2[src]

impl FromCast<Simd<[u32; 2]>> for u16x2[src]

impl FromCast<Simd<[u32; 2]>> for m16x2[src]

impl FromCast<Simd<[u32; 2]>> for i32x2[src]

impl FromCast<Simd<[u32; 2]>> for f32x2[src]

impl FromCast<Simd<[u32; 2]>> for m32x2[src]

impl FromCast<Simd<[u32; 2]>> for i64x2[src]

impl FromCast<Simd<[u32; 4]>> for i8x4[src]

impl FromCast<Simd<[u32; 4]>> for u8x4[src]

impl FromCast<Simd<[u32; 4]>> for u64x4[src]

impl FromCast<Simd<[u32; 4]>> for f64x4[src]

impl FromCast<Simd<[u32; 4]>> for m64x4[src]

impl FromCast<Simd<[u32; 4]>> for isizex4[src]

impl FromCast<Simd<[u32; 4]>> for usizex4[src]

impl FromCast<Simd<[u32; 4]>> for msizex4[src]

impl FromCast<Simd<[u32; 4]>> for i128x4[src]

impl FromCast<Simd<[u32; 4]>> for u128x4[src]

impl FromCast<Simd<[u32; 4]>> for m128x4[src]

impl FromCast<Simd<[u32; 4]>> for m8x4[src]

impl FromCast<Simd<[u32; 4]>> for i16x4[src]

impl FromCast<Simd<[u32; 4]>> for u16x4[src]

impl FromCast<Simd<[u32; 4]>> for m16x4[src]

impl FromCast<Simd<[u32; 4]>> for i32x4[src]

impl FromCast<Simd<[u32; 4]>> for f32x4[src]

impl FromCast<Simd<[u32; 4]>> for m32x4[src]

impl FromCast<Simd<[u32; 4]>> for i64x4[src]

impl FromCast<Simd<[u32; 8]>> for i8x8[src]

impl FromCast<Simd<[u32; 8]>> for u8x8[src]

impl FromCast<Simd<[u32; 8]>> for u64x8[src]

impl FromCast<Simd<[u32; 8]>> for f64x8[src]

impl FromCast<Simd<[u32; 8]>> for m64x8[src]

impl FromCast<Simd<[u32; 8]>> for isizex8[src]

impl FromCast<Simd<[u32; 8]>> for usizex8[src]

impl FromCast<Simd<[u32; 8]>> for msizex8[src]

impl FromCast<Simd<[u32; 8]>> for m8x8[src]

impl FromCast<Simd<[u32; 8]>> for i16x8[src]

impl FromCast<Simd<[u32; 8]>> for u16x8[src]

impl FromCast<Simd<[u32; 8]>> for m16x8[src]

impl FromCast<Simd<[u32; 8]>> for i32x8[src]

impl FromCast<Simd<[u32; 8]>> for f32x8[src]

impl FromCast<Simd<[u32; 8]>> for m32x8[src]

impl FromCast<Simd<[u32; 8]>> for i64x8[src]

impl FromCast<Simd<[u64; 2]>> for i8x2[src]

impl FromCast<Simd<[u64; 2]>> for u8x2[src]

impl FromCast<Simd<[u64; 2]>> for i64x2[src]

impl FromCast<Simd<[u64; 2]>> for f64x2[src]

impl FromCast<Simd<[u64; 2]>> for m64x2[src]

impl FromCast<Simd<[u64; 2]>> for isizex2[src]

impl FromCast<Simd<[u64; 2]>> for usizex2[src]

impl FromCast<Simd<[u64; 2]>> for msizex2[src]

impl FromCast<Simd<[u64; 2]>> for i128x2[src]

impl FromCast<Simd<[u64; 2]>> for u128x2[src]

impl FromCast<Simd<[u64; 2]>> for m128x2[src]

impl FromCast<Simd<[u64; 2]>> for m8x2[src]

impl FromCast<Simd<[u64; 2]>> for i16x2[src]

impl FromCast<Simd<[u64; 2]>> for u16x2[src]

impl FromCast<Simd<[u64; 2]>> for m16x2[src]

impl FromCast<Simd<[u64; 2]>> for i32x2[src]

impl FromCast<Simd<[u64; 2]>> for u32x2[src]

impl FromCast<Simd<[u64; 2]>> for f32x2[src]

impl FromCast<Simd<[u64; 2]>> for m32x2[src]

impl FromCast<Simd<[u64; 4]>> for i8x4[src]

impl FromCast<Simd<[u64; 4]>> for u8x4[src]

impl FromCast<Simd<[u64; 4]>> for i64x4[src]

impl FromCast<Simd<[u64; 4]>> for f64x4[src]

impl FromCast<Simd<[u64; 4]>> for m64x4[src]

impl FromCast<Simd<[u64; 4]>> for isizex4[src]

impl FromCast<Simd<[u64; 4]>> for usizex4[src]

impl FromCast<Simd<[u64; 4]>> for msizex4[src]

impl FromCast<Simd<[u64; 4]>> for i128x4[src]

impl FromCast<Simd<[u64; 4]>> for u128x4[src]

impl FromCast<Simd<[u64; 4]>> for m128x4[src]

impl FromCast<Simd<[u64; 4]>> for m8x4[src]

impl FromCast<Simd<[u64; 4]>> for i16x4[src]

impl FromCast<Simd<[u64; 4]>> for u16x4[src]

impl FromCast<Simd<[u64; 4]>> for m16x4[src]

impl FromCast<Simd<[u64; 4]>> for i32x4[src]

impl FromCast<Simd<[u64; 4]>> for u32x4[src]

impl FromCast<Simd<[u64; 4]>> for f32x4[src]

impl FromCast<Simd<[u64; 4]>> for m32x4[src]

impl FromCast<Simd<[u64; 8]>> for i8x8[src]

impl FromCast<Simd<[u64; 8]>> for u8x8[src]

impl FromCast<Simd<[u64; 8]>> for i64x8[src]

impl FromCast<Simd<[u64; 8]>> for f64x8[src]

impl FromCast<Simd<[u64; 8]>> for m64x8[src]

impl FromCast<Simd<[u64; 8]>> for isizex8[src]

impl FromCast<Simd<[u64; 8]>> for usizex8[src]

impl FromCast<Simd<[u64; 8]>> for msizex8[src]

impl FromCast<Simd<[u64; 8]>> for m8x8[src]

impl FromCast<Simd<[u64; 8]>> for i16x8[src]

impl FromCast<Simd<[u64; 8]>> for u16x8[src]

impl FromCast<Simd<[u64; 8]>> for m16x8[src]

impl FromCast<Simd<[u64; 8]>> for i32x8[src]

impl FromCast<Simd<[u64; 8]>> for u32x8[src]

impl FromCast<Simd<[u64; 8]>> for f32x8[src]

impl FromCast<Simd<[u64; 8]>> for m32x8[src]

impl FromCast<Simd<[u8; 16]>> for i8x16[src]

impl FromCast<Simd<[u8; 16]>> for m8x16[src]

impl FromCast<Simd<[u8; 16]>> for i16x16[src]

impl FromCast<Simd<[u8; 16]>> for u16x16[src]

impl FromCast<Simd<[u8; 16]>> for m16x16[src]

impl FromCast<Simd<[u8; 16]>> for i32x16[src]

impl FromCast<Simd<[u8; 16]>> for u32x16[src]

impl FromCast<Simd<[u8; 16]>> for f32x16[src]

impl FromCast<Simd<[u8; 16]>> for m32x16[src]

impl FromCast<Simd<[u8; 2]>> for i8x2[src]

impl FromCast<Simd<[u8; 2]>> for m8x2[src]

impl FromCast<Simd<[u8; 2]>> for u64x2[src]

impl FromCast<Simd<[u8; 2]>> for f64x2[src]

impl FromCast<Simd<[u8; 2]>> for m64x2[src]

impl FromCast<Simd<[u8; 2]>> for isizex2[src]

impl FromCast<Simd<[u8; 2]>> for usizex2[src]

impl FromCast<Simd<[u8; 2]>> for msizex2[src]

impl FromCast<Simd<[u8; 2]>> for i128x2[src]

impl FromCast<Simd<[u8; 2]>> for u128x2[src]

impl FromCast<Simd<[u8; 2]>> for m128x2[src]

impl FromCast<Simd<[u8; 2]>> for i16x2[src]

impl FromCast<Simd<[u8; 2]>> for u16x2[src]

impl FromCast<Simd<[u8; 2]>> for m16x2[src]

impl FromCast<Simd<[u8; 2]>> for i32x2[src]

impl FromCast<Simd<[u8; 2]>> for u32x2[src]

impl FromCast<Simd<[u8; 2]>> for f32x2[src]

impl FromCast<Simd<[u8; 2]>> for m32x2[src]

impl FromCast<Simd<[u8; 2]>> for i64x2[src]

impl FromCast<Simd<[u8; 32]>> for i8x32[src]

impl FromCast<Simd<[u8; 32]>> for m8x32[src]

impl FromCast<Simd<[u8; 32]>> for i16x32[src]

impl FromCast<Simd<[u8; 32]>> for u16x32[src]

impl FromCast<Simd<[u8; 32]>> for m16x32[src]

impl FromCast<Simd<[u8; 4]>> for i8x4[src]

impl FromCast<Simd<[u8; 4]>> for m8x4[src]

impl FromCast<Simd<[u8; 4]>> for u64x4[src]

impl FromCast<Simd<[u8; 4]>> for f64x4[src]

impl FromCast<Simd<[u8; 4]>> for m64x4[src]

impl FromCast<Simd<[u8; 4]>> for isizex4[src]

impl FromCast<Simd<[u8; 4]>> for usizex4[src]

impl FromCast<Simd<[u8; 4]>> for msizex4[src]

impl FromCast<Simd<[u8; 4]>> for i128x4[src]

impl FromCast<Simd<[u8; 4]>> for u128x4[src]

impl FromCast<Simd<[u8; 4]>> for m128x4[src]

impl FromCast<Simd<[u8; 4]>> for i16x4[src]

impl FromCast<Simd<[u8; 4]>> for u16x4[src]

impl FromCast<Simd<[u8; 4]>> for m16x4[src]

impl FromCast<Simd<[u8; 4]>> for i32x4[src]

impl FromCast<Simd<[u8; 4]>> for u32x4[src]

impl FromCast<Simd<[u8; 4]>> for f32x4[src]

impl FromCast<Simd<[u8; 4]>> for m32x4[src]

impl FromCast<Simd<[u8; 4]>> for i64x4[src]

impl FromCast<Simd<[u8; 64]>> for i8x64[src]

impl FromCast<Simd<[u8; 64]>> for m8x64[src]

impl FromCast<Simd<[u8; 8]>> for i8x8[src]

impl FromCast<Simd<[u8; 8]>> for m8x8[src]

impl FromCast<Simd<[u8; 8]>> for u64x8[src]

impl FromCast<Simd<[u8; 8]>> for f64x8[src]

impl FromCast<Simd<[u8; 8]>> for m64x8[src]

impl FromCast<Simd<[u8; 8]>> for isizex8[src]

impl FromCast<Simd<[u8; 8]>> for usizex8[src]

impl FromCast<Simd<[u8; 8]>> for msizex8[src]

impl FromCast<Simd<[u8; 8]>> for i16x8[src]

impl FromCast<Simd<[u8; 8]>> for u16x8[src]

impl FromCast<Simd<[u8; 8]>> for m16x8[src]

impl FromCast<Simd<[u8; 8]>> for i32x8[src]

impl FromCast<Simd<[u8; 8]>> for u32x8[src]

impl FromCast<Simd<[u8; 8]>> for f32x8[src]

impl FromCast<Simd<[u8; 8]>> for m32x8[src]

impl FromCast<Simd<[u8; 8]>> for i64x8[src]

impl FromCast<Simd<[usize; 2]>> for i8x2[src]

impl FromCast<Simd<[usize; 2]>> for u8x2[src]

impl FromCast<Simd<[usize; 2]>> for i64x2[src]

impl FromCast<Simd<[usize; 2]>> for u64x2[src]

impl FromCast<Simd<[usize; 2]>> for f64x2[src]

impl FromCast<Simd<[usize; 2]>> for m64x2[src]

impl FromCast<Simd<[usize; 2]>> for isizex2[src]

impl FromCast<Simd<[usize; 2]>> for msizex2[src]

impl FromCast<Simd<[usize; 2]>> for i128x2[src]

impl FromCast<Simd<[usize; 2]>> for u128x2[src]

impl FromCast<Simd<[usize; 2]>> for m128x2[src]

impl FromCast<Simd<[usize; 2]>> for m8x2[src]

impl FromCast<Simd<[usize; 2]>> for i16x2[src]

impl FromCast<Simd<[usize; 2]>> for u16x2[src]

impl FromCast<Simd<[usize; 2]>> for m16x2[src]

impl FromCast<Simd<[usize; 2]>> for i32x2[src]

impl FromCast<Simd<[usize; 2]>> for u32x2[src]

impl FromCast<Simd<[usize; 2]>> for f32x2[src]

impl FromCast<Simd<[usize; 2]>> for m32x2[src]

impl FromCast<Simd<[usize; 4]>> for i8x4[src]

impl FromCast<Simd<[usize; 4]>> for u8x4[src]

impl FromCast<Simd<[usize; 4]>> for i64x4[src]

impl FromCast<Simd<[usize; 4]>> for u64x4[src]

impl FromCast<Simd<[usize; 4]>> for f64x4[src]

impl FromCast<Simd<[usize; 4]>> for m64x4[src]

impl FromCast<Simd<[usize; 4]>> for isizex4[src]

impl FromCast<Simd<[usize; 4]>> for msizex4[src]

impl FromCast<Simd<[usize; 4]>> for i128x4[src]

impl FromCast<Simd<[usize; 4]>> for u128x4[src]

impl FromCast<Simd<[usize; 4]>> for m128x4[src]

impl FromCast<Simd<[usize; 4]>> for m8x4[src]

impl FromCast<Simd<[usize; 4]>> for i16x4[src]

impl FromCast<Simd<[usize; 4]>> for u16x4[src]

impl FromCast<Simd<[usize; 4]>> for m16x4[src]

impl FromCast<Simd<[usize; 4]>> for i32x4[src]

impl FromCast<Simd<[usize; 4]>> for u32x4[src]

impl FromCast<Simd<[usize; 4]>> for f32x4[src]

impl FromCast<Simd<[usize; 4]>> for m32x4[src]

impl FromCast<Simd<[usize; 8]>> for i8x8[src]

impl FromCast<Simd<[usize; 8]>> for u8x8[src]

impl FromCast<Simd<[usize; 8]>> for i64x8[src]

impl FromCast<Simd<[usize; 8]>> for u64x8[src]

impl FromCast<Simd<[usize; 8]>> for f64x8[src]

impl FromCast<Simd<[usize; 8]>> for m64x8[src]

impl FromCast<Simd<[usize; 8]>> for isizex8[src]

impl FromCast<Simd<[usize; 8]>> for msizex8[src]

impl FromCast<Simd<[usize; 8]>> for m8x8[src]

impl FromCast<Simd<[usize; 8]>> for i16x8[src]

impl FromCast<Simd<[usize; 8]>> for u16x8[src]

impl FromCast<Simd<[usize; 8]>> for m16x8[src]

impl FromCast<Simd<[usize; 8]>> for i32x8[src]

impl FromCast<Simd<[usize; 8]>> for u32x8[src]

impl FromCast<Simd<[usize; 8]>> for f32x8[src]

impl FromCast<Simd<[usize; 8]>> for m32x8[src]

impl Mul<Simd<[f32; 16]>> for f32x16[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[f32; 16]>> for f32[src]

type Output = f32x16

The resulting type after applying the * operator.

impl Mul<Simd<[f32; 2]>> for f32x2[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[f32; 2]>> for f32[src]

type Output = f32x2

The resulting type after applying the * operator.

impl Mul<Simd<[f32; 4]>> for f32x4[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[f32; 4]>> for f32[src]

type Output = f32x4

The resulting type after applying the * operator.

impl Mul<Simd<[f32; 8]>> for f32x8[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[f32; 8]>> for f32[src]

type Output = f32x8

The resulting type after applying the * operator.

impl Mul<Simd<[f64; 2]>> for f64x2[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[f64; 2]>> for f64[src]

type Output = f64x2

The resulting type after applying the * operator.

impl Mul<Simd<[f64; 4]>> for f64x4[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[f64; 4]>> for f64[src]

type Output = f64x4

The resulting type after applying the * operator.

impl Mul<Simd<[f64; 8]>> for f64x8[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[f64; 8]>> for f64[src]

type Output = f64x8

The resulting type after applying the * operator.

impl Mul<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[i128; 1]>> for i128[src]

type Output = i128x1

The resulting type after applying the * operator.

impl Mul<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[i128; 2]>> for i128[src]

type Output = i128x2

The resulting type after applying the * operator.

impl Mul<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[i128; 4]>> for i128[src]

type Output = i128x4

The resulting type after applying the * operator.

impl Mul<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[i16; 16]>> for i16[src]

type Output = i16x16

The resulting type after applying the * operator.

impl Mul<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[i16; 2]>> for i16[src]

type Output = i16x2

The resulting type after applying the * operator.

impl Mul<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[i16; 32]>> for i16[src]

type Output = i16x32

The resulting type after applying the * operator.

impl Mul<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[i16; 4]>> for i16[src]

type Output = i16x4

The resulting type after applying the * operator.

impl Mul<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[i16; 8]>> for i16[src]

type Output = i16x8

The resulting type after applying the * operator.

impl Mul<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[i32; 16]>> for i32[src]

type Output = i32x16

The resulting type after applying the * operator.

impl Mul<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[i32; 2]>> for i32[src]

type Output = i32x2

The resulting type after applying the * operator.

impl Mul<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[i32; 4]>> for i32[src]

type Output = i32x4

The resulting type after applying the * operator.

impl Mul<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[i32; 8]>> for i32[src]

type Output = i32x8

The resulting type after applying the * operator.

impl Mul<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[i64; 2]>> for i64[src]

type Output = i64x2

The resulting type after applying the * operator.

impl Mul<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[i64; 4]>> for i64[src]

type Output = i64x4

The resulting type after applying the * operator.

impl Mul<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[i64; 8]>> for i64[src]

type Output = i64x8

The resulting type after applying the * operator.

impl Mul<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[i8; 16]>> for i8[src]

type Output = i8x16

The resulting type after applying the * operator.

impl Mul<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[i8; 2]>> for i8[src]

type Output = i8x2

The resulting type after applying the * operator.

impl Mul<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[i8; 32]>> for i8[src]

type Output = i8x32

The resulting type after applying the * operator.

impl Mul<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[i8; 4]>> for i8[src]

type Output = i8x4

The resulting type after applying the * operator.

impl Mul<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[i8; 64]>> for i8[src]

type Output = i8x64

The resulting type after applying the * operator.

impl Mul<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[i8; 8]>> for i8[src]

type Output = i8x8

The resulting type after applying the * operator.

impl Mul<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[isize; 2]>> for isize[src]

type Output = isizex2

The resulting type after applying the * operator.

impl Mul<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[isize; 4]>> for isize[src]

type Output = isizex4

The resulting type after applying the * operator.

impl Mul<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[isize; 8]>> for isize[src]

type Output = isizex8

The resulting type after applying the * operator.

impl Mul<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[u128; 1]>> for u128[src]

type Output = u128x1

The resulting type after applying the * operator.

impl Mul<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[u128; 2]>> for u128[src]

type Output = u128x2

The resulting type after applying the * operator.

impl Mul<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[u128; 4]>> for u128[src]

type Output = u128x4

The resulting type after applying the * operator.

impl Mul<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[u16; 16]>> for u16[src]

type Output = u16x16

The resulting type after applying the * operator.

impl Mul<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[u16; 2]>> for u16[src]

type Output = u16x2

The resulting type after applying the * operator.

impl Mul<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[u16; 32]>> for u16[src]

type Output = u16x32

The resulting type after applying the * operator.

impl Mul<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[u16; 4]>> for u16[src]

type Output = u16x4

The resulting type after applying the * operator.

impl Mul<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[u16; 8]>> for u16[src]

type Output = u16x8

The resulting type after applying the * operator.

impl Mul<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[u32; 16]>> for u32[src]

type Output = u32x16

The resulting type after applying the * operator.

impl Mul<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[u32; 2]>> for u32[src]

type Output = u32x2

The resulting type after applying the * operator.

impl Mul<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[u32; 4]>> for u32[src]

type Output = u32x4

The resulting type after applying the * operator.

impl Mul<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[u32; 8]>> for u32[src]

type Output = u32x8

The resulting type after applying the * operator.

impl Mul<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[u64; 2]>> for u64[src]

type Output = u64x2

The resulting type after applying the * operator.

impl Mul<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[u64; 4]>> for u64[src]

type Output = u64x4

The resulting type after applying the * operator.

impl Mul<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[u64; 8]>> for u64[src]

type Output = u64x8

The resulting type after applying the * operator.

impl Mul<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[u8; 16]>> for u8[src]

type Output = u8x16

The resulting type after applying the * operator.

impl Mul<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[u8; 2]>> for u8[src]

type Output = u8x2

The resulting type after applying the * operator.

impl Mul<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[u8; 32]>> for u8[src]

type Output = u8x32

The resulting type after applying the * operator.

impl Mul<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[u8; 4]>> for u8[src]

type Output = u8x4

The resulting type after applying the * operator.

impl Mul<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[u8; 64]>> for u8[src]

type Output = u8x64

The resulting type after applying the * operator.

impl Mul<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[u8; 8]>> for u8[src]

type Output = u8x8

The resulting type after applying the * operator.

impl Mul<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[usize; 2]>> for usize[src]

type Output = usizex2

The resulting type after applying the * operator.

impl Mul<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[usize; 4]>> for usize[src]

type Output = usizex4

The resulting type after applying the * operator.

impl Mul<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the * operator.

impl Mul<Simd<[usize; 8]>> for usize[src]

type Output = usizex8

The resulting type after applying the * operator.

impl MulAssign<Simd<[f32; 16]>> for f32x16[src]

impl MulAssign<Simd<[f32; 2]>> for f32x2[src]

impl MulAssign<Simd<[f32; 4]>> for f32x4[src]

impl MulAssign<Simd<[f32; 8]>> for f32x8[src]

impl MulAssign<Simd<[f64; 2]>> for f64x2[src]

impl MulAssign<Simd<[f64; 4]>> for f64x4[src]

impl MulAssign<Simd<[f64; 8]>> for f64x8[src]

impl MulAssign<Simd<[i128; 1]>> for i128x1[src]

impl MulAssign<Simd<[i128; 2]>> for i128x2[src]

impl MulAssign<Simd<[i128; 4]>> for i128x4[src]

impl MulAssign<Simd<[i16; 16]>> for i16x16[src]

impl MulAssign<Simd<[i16; 2]>> for i16x2[src]

impl MulAssign<Simd<[i16; 32]>> for i16x32[src]

impl MulAssign<Simd<[i16; 4]>> for i16x4[src]

impl MulAssign<Simd<[i16; 8]>> for i16x8[src]

impl MulAssign<Simd<[i32; 16]>> for i32x16[src]

impl MulAssign<Simd<[i32; 2]>> for i32x2[src]

impl MulAssign<Simd<[i32; 4]>> for i32x4[src]

impl MulAssign<Simd<[i32; 8]>> for i32x8[src]

impl MulAssign<Simd<[i64; 2]>> for i64x2[src]

impl MulAssign<Simd<[i64; 4]>> for i64x4[src]

impl MulAssign<Simd<[i64; 8]>> for i64x8[src]

impl MulAssign<Simd<[i8; 16]>> for i8x16[src]

impl MulAssign<Simd<[i8; 2]>> for i8x2[src]

impl MulAssign<Simd<[i8; 32]>> for i8x32[src]

impl MulAssign<Simd<[i8; 4]>> for i8x4[src]

impl MulAssign<Simd<[i8; 64]>> for i8x64[src]

impl MulAssign<Simd<[i8; 8]>> for i8x8[src]

impl MulAssign<Simd<[isize; 2]>> for isizex2[src]

impl MulAssign<Simd<[isize; 4]>> for isizex4[src]

impl MulAssign<Simd<[isize; 8]>> for isizex8[src]

impl MulAssign<Simd<[u128; 1]>> for u128x1[src]

impl MulAssign<Simd<[u128; 2]>> for u128x2[src]

impl MulAssign<Simd<[u128; 4]>> for u128x4[src]

impl MulAssign<Simd<[u16; 16]>> for u16x16[src]

impl MulAssign<Simd<[u16; 2]>> for u16x2[src]

impl MulAssign<Simd<[u16; 32]>> for u16x32[src]

impl MulAssign<Simd<[u16; 4]>> for u16x4[src]

impl MulAssign<Simd<[u16; 8]>> for u16x8[src]

impl MulAssign<Simd<[u32; 16]>> for u32x16[src]

impl MulAssign<Simd<[u32; 2]>> for u32x2[src]

impl MulAssign<Simd<[u32; 4]>> for u32x4[src]

impl MulAssign<Simd<[u32; 8]>> for u32x8[src]

impl MulAssign<Simd<[u64; 2]>> for u64x2[src]

impl MulAssign<Simd<[u64; 4]>> for u64x4[src]

impl MulAssign<Simd<[u64; 8]>> for u64x8[src]

impl MulAssign<Simd<[u8; 16]>> for u8x16[src]

impl MulAssign<Simd<[u8; 2]>> for u8x2[src]

impl MulAssign<Simd<[u8; 32]>> for u8x32[src]

impl MulAssign<Simd<[u8; 4]>> for u8x4[src]

impl MulAssign<Simd<[u8; 64]>> for u8x64[src]

impl MulAssign<Simd<[u8; 8]>> for u8x8[src]

impl MulAssign<Simd<[usize; 2]>> for usizex2[src]

impl MulAssign<Simd<[usize; 4]>> for usizex4[src]

impl MulAssign<Simd<[usize; 8]>> for usizex8[src]

impl<T> PartialEq<Simd<[*const T; 2]>> for cptrx2<T>[src]

impl<T> PartialEq<Simd<[*const T; 4]>> for cptrx4<T>[src]

impl<T> PartialEq<Simd<[*const T; 8]>> for cptrx8<T>[src]

impl<T> PartialEq<Simd<[*mut T; 2]>> for mptrx2<T>[src]

impl<T> PartialEq<Simd<[*mut T; 4]>> for mptrx4<T>[src]

impl<T> PartialEq<Simd<[*mut T; 8]>> for mptrx8<T>[src]

impl PartialEq<Simd<[f32; 16]>> for f32x16[src]

impl PartialEq<Simd<[f32; 2]>> for f32x2[src]

impl PartialEq<Simd<[f32; 4]>> for f32x4[src]

impl PartialEq<Simd<[f32; 8]>> for f32x8[src]

impl PartialEq<Simd<[f64; 2]>> for f64x2[src]

impl PartialEq<Simd<[f64; 4]>> for f64x4[src]

impl PartialEq<Simd<[f64; 8]>> for f64x8[src]

impl PartialEq<Simd<[i128; 1]>> for i128x1[src]

impl PartialEq<Simd<[i128; 2]>> for i128x2[src]

impl PartialEq<Simd<[i128; 4]>> for i128x4[src]

impl PartialEq<Simd<[i16; 16]>> for i16x16[src]

impl PartialEq<Simd<[i16; 2]>> for i16x2[src]

impl PartialEq<Simd<[i16; 32]>> for i16x32[src]

impl PartialEq<Simd<[i16; 4]>> for i16x4[src]

impl PartialEq<Simd<[i16; 8]>> for i16x8[src]

impl PartialEq<Simd<[i32; 16]>> for i32x16[src]

impl PartialEq<Simd<[i32; 2]>> for i32x2[src]

impl PartialEq<Simd<[i32; 4]>> for i32x4[src]

impl PartialEq<Simd<[i32; 8]>> for i32x8[src]

impl PartialEq<Simd<[i64; 2]>> for i64x2[src]

impl PartialEq<Simd<[i64; 4]>> for i64x4[src]

impl PartialEq<Simd<[i64; 8]>> for i64x8[src]

impl PartialEq<Simd<[i8; 16]>> for i8x16[src]

impl PartialEq<Simd<[i8; 2]>> for i8x2[src]

impl PartialEq<Simd<[i8; 32]>> for i8x32[src]

impl PartialEq<Simd<[i8; 4]>> for i8x4[src]

impl PartialEq<Simd<[i8; 64]>> for i8x64[src]

impl PartialEq<Simd<[i8; 8]>> for i8x8[src]

impl PartialEq<Simd<[isize; 2]>> for isizex2[src]

impl PartialEq<Simd<[isize; 4]>> for isizex4[src]

impl PartialEq<Simd<[isize; 8]>> for isizex8[src]

impl PartialEq<Simd<[m128; 1]>> for m128x1[src]

impl PartialEq<Simd<[m128; 2]>> for m128x2[src]

impl PartialEq<Simd<[m128; 4]>> for m128x4[src]

impl PartialEq<Simd<[m16; 16]>> for m16x16[src]

impl PartialEq<Simd<[m16; 2]>> for m16x2[src]

impl PartialEq<Simd<[m16; 32]>> for m16x32[src]

impl PartialEq<Simd<[m16; 4]>> for m16x4[src]

impl PartialEq<Simd<[m16; 8]>> for m16x8[src]

impl PartialEq<Simd<[m32; 16]>> for m32x16[src]

impl PartialEq<Simd<[m32; 2]>> for m32x2[src]

impl PartialEq<Simd<[m32; 4]>> for m32x4[src]

impl PartialEq<Simd<[m32; 8]>> for m32x8[src]

impl PartialEq<Simd<[m64; 2]>> for m64x2[src]

impl PartialEq<Simd<[m64; 4]>> for m64x4[src]

impl PartialEq<Simd<[m64; 8]>> for m64x8[src]

impl PartialEq<Simd<[m8; 16]>> for m8x16[src]

impl PartialEq<Simd<[m8; 2]>> for m8x2[src]

impl PartialEq<Simd<[m8; 32]>> for m8x32[src]

impl PartialEq<Simd<[m8; 4]>> for m8x4[src]

impl PartialEq<Simd<[m8; 64]>> for m8x64[src]

impl PartialEq<Simd<[m8; 8]>> for m8x8[src]

impl PartialEq<Simd<[msize; 2]>> for msizex2[src]

impl PartialEq<Simd<[msize; 4]>> for msizex4[src]

impl PartialEq<Simd<[msize; 8]>> for msizex8[src]

impl PartialEq<Simd<[u128; 1]>> for u128x1[src]

impl PartialEq<Simd<[u128; 2]>> for u128x2[src]

impl PartialEq<Simd<[u128; 4]>> for u128x4[src]

impl PartialEq<Simd<[u16; 16]>> for u16x16[src]

impl PartialEq<Simd<[u16; 2]>> for u16x2[src]

impl PartialEq<Simd<[u16; 32]>> for u16x32[src]

impl PartialEq<Simd<[u16; 4]>> for u16x4[src]

impl PartialEq<Simd<[u16; 8]>> for u16x8[src]

impl PartialEq<Simd<[u32; 16]>> for u32x16[src]

impl PartialEq<Simd<[u32; 2]>> for u32x2[src]

impl PartialEq<Simd<[u32; 4]>> for u32x4[src]

impl PartialEq<Simd<[u32; 8]>> for u32x8[src]

impl PartialEq<Simd<[u64; 2]>> for u64x2[src]

impl PartialEq<Simd<[u64; 4]>> for u64x4[src]

impl PartialEq<Simd<[u64; 8]>> for u64x8[src]

impl PartialEq<Simd<[u8; 16]>> for u8x16[src]

impl PartialEq<Simd<[u8; 2]>> for u8x2[src]

impl PartialEq<Simd<[u8; 32]>> for u8x32[src]

impl PartialEq<Simd<[u8; 4]>> for u8x4[src]

impl PartialEq<Simd<[u8; 64]>> for u8x64[src]

impl PartialEq<Simd<[u8; 8]>> for u8x8[src]

impl PartialEq<Simd<[usize; 2]>> for usizex2[src]

impl PartialEq<Simd<[usize; 4]>> for usizex4[src]

impl PartialEq<Simd<[usize; 8]>> for usizex8[src]

impl<'a> Product<&'a Simd<[f32; 16]>> for f32x16[src]

impl<'a> Product<&'a Simd<[f32; 2]>> for f32x2[src]

impl<'a> Product<&'a Simd<[f32; 4]>> for f32x4[src]

impl<'a> Product<&'a Simd<[f32; 8]>> for f32x8[src]

impl<'a> Product<&'a Simd<[f64; 2]>> for f64x2[src]

impl<'a> Product<&'a Simd<[f64; 4]>> for f64x4[src]

impl<'a> Product<&'a Simd<[f64; 8]>> for f64x8[src]

impl<'a> Product<&'a Simd<[i128; 1]>> for i128x1[src]

impl<'a> Product<&'a Simd<[i128; 2]>> for i128x2[src]

impl<'a> Product<&'a Simd<[i128; 4]>> for i128x4[src]

impl<'a> Product<&'a Simd<[i16; 16]>> for i16x16[src]

impl<'a> Product<&'a Simd<[i16; 2]>> for i16x2[src]

impl<'a> Product<&'a Simd<[i16; 32]>> for i16x32[src]

impl<'a> Product<&'a Simd<[i16; 4]>> for i16x4[src]

impl<'a> Product<&'a Simd<[i16; 8]>> for i16x8[src]

impl<'a> Product<&'a Simd<[i32; 16]>> for i32x16[src]

impl<'a> Product<&'a Simd<[i32; 2]>> for i32x2[src]

impl<'a> Product<&'a Simd<[i32; 4]>> for i32x4[src]

impl<'a> Product<&'a Simd<[i32; 8]>> for i32x8[src]

impl<'a> Product<&'a Simd<[i64; 2]>> for i64x2[src]

impl<'a> Product<&'a Simd<[i64; 4]>> for i64x4[src]

impl<'a> Product<&'a Simd<[i64; 8]>> for i64x8[src]

impl<'a> Product<&'a Simd<[i8; 16]>> for i8x16[src]

impl<'a> Product<&'a Simd<[i8; 2]>> for i8x2[src]

impl<'a> Product<&'a Simd<[i8; 32]>> for i8x32[src]

impl<'a> Product<&'a Simd<[i8; 4]>> for i8x4[src]

impl<'a> Product<&'a Simd<[i8; 64]>> for i8x64[src]

impl<'a> Product<&'a Simd<[i8; 8]>> for i8x8[src]

impl<'a> Product<&'a Simd<[isize; 2]>> for isizex2[src]

impl<'a> Product<&'a Simd<[isize; 4]>> for isizex4[src]

impl<'a> Product<&'a Simd<[isize; 8]>> for isizex8[src]

impl<'a> Product<&'a Simd<[u128; 1]>> for u128x1[src]

impl<'a> Product<&'a Simd<[u128; 2]>> for u128x2[src]

impl<'a> Product<&'a Simd<[u128; 4]>> for u128x4[src]

impl<'a> Product<&'a Simd<[u16; 16]>> for u16x16[src]

impl<'a> Product<&'a Simd<[u16; 2]>> for u16x2[src]

impl<'a> Product<&'a Simd<[u16; 32]>> for u16x32[src]

impl<'a> Product<&'a Simd<[u16; 4]>> for u16x4[src]

impl<'a> Product<&'a Simd<[u16; 8]>> for u16x8[src]

impl<'a> Product<&'a Simd<[u32; 16]>> for u32x16[src]

impl<'a> Product<&'a Simd<[u32; 2]>> for u32x2[src]

impl<'a> Product<&'a Simd<[u32; 4]>> for u32x4[src]

impl<'a> Product<&'a Simd<[u32; 8]>> for u32x8[src]

impl<'a> Product<&'a Simd<[u64; 2]>> for u64x2[src]

impl<'a> Product<&'a Simd<[u64; 4]>> for u64x4[src]

impl<'a> Product<&'a Simd<[u64; 8]>> for u64x8[src]

impl<'a> Product<&'a Simd<[u8; 16]>> for u8x16[src]

impl<'a> Product<&'a Simd<[u8; 2]>> for u8x2[src]

impl<'a> Product<&'a Simd<[u8; 32]>> for u8x32[src]

impl<'a> Product<&'a Simd<[u8; 4]>> for u8x4[src]

impl<'a> Product<&'a Simd<[u8; 64]>> for u8x64[src]

impl<'a> Product<&'a Simd<[u8; 8]>> for u8x8[src]

impl<'a> Product<&'a Simd<[usize; 2]>> for usizex2[src]

impl<'a> Product<&'a Simd<[usize; 4]>> for usizex4[src]

impl<'a> Product<&'a Simd<[usize; 8]>> for usizex8[src]

impl Product<Simd<[f32; 16]>> for f32x16[src]

impl Product<Simd<[f32; 2]>> for f32x2[src]

impl Product<Simd<[f32; 4]>> for f32x4[src]

impl Product<Simd<[f32; 8]>> for f32x8[src]

impl Product<Simd<[f64; 2]>> for f64x2[src]

impl Product<Simd<[f64; 4]>> for f64x4[src]

impl Product<Simd<[f64; 8]>> for f64x8[src]

impl Product<Simd<[i128; 1]>> for i128x1[src]

impl Product<Simd<[i128; 2]>> for i128x2[src]

impl Product<Simd<[i128; 4]>> for i128x4[src]

impl Product<Simd<[i16; 16]>> for i16x16[src]

impl Product<Simd<[i16; 2]>> for i16x2[src]

impl Product<Simd<[i16; 32]>> for i16x32[src]

impl Product<Simd<[i16; 4]>> for i16x4[src]

impl Product<Simd<[i16; 8]>> for i16x8[src]

impl Product<Simd<[i32; 16]>> for i32x16[src]

impl Product<Simd<[i32; 2]>> for i32x2[src]

impl Product<Simd<[i32; 4]>> for i32x4[src]

impl Product<Simd<[i32; 8]>> for i32x8[src]

impl Product<Simd<[i64; 2]>> for i64x2[src]

impl Product<Simd<[i64; 4]>> for i64x4[src]

impl Product<Simd<[i64; 8]>> for i64x8[src]

impl Product<Simd<[i8; 16]>> for i8x16[src]

impl Product<Simd<[i8; 2]>> for i8x2[src]

impl Product<Simd<[i8; 32]>> for i8x32[src]

impl Product<Simd<[i8; 4]>> for i8x4[src]

impl Product<Simd<[i8; 64]>> for i8x64[src]

impl Product<Simd<[i8; 8]>> for i8x8[src]

impl Product<Simd<[isize; 2]>> for isizex2[src]

impl Product<Simd<[isize; 4]>> for isizex4[src]

impl Product<Simd<[isize; 8]>> for isizex8[src]

impl Product<Simd<[u128; 1]>> for u128x1[src]

impl Product<Simd<[u128; 2]>> for u128x2[src]

impl Product<Simd<[u128; 4]>> for u128x4[src]

impl Product<Simd<[u16; 16]>> for u16x16[src]

impl Product<Simd<[u16; 2]>> for u16x2[src]

impl Product<Simd<[u16; 32]>> for u16x32[src]

impl Product<Simd<[u16; 4]>> for u16x4[src]

impl Product<Simd<[u16; 8]>> for u16x8[src]

impl Product<Simd<[u32; 16]>> for u32x16[src]

impl Product<Simd<[u32; 2]>> for u32x2[src]

impl Product<Simd<[u32; 4]>> for u32x4[src]

impl Product<Simd<[u32; 8]>> for u32x8[src]

impl Product<Simd<[u64; 2]>> for u64x2[src]

impl Product<Simd<[u64; 4]>> for u64x4[src]

impl Product<Simd<[u64; 8]>> for u64x8[src]

impl Product<Simd<[u8; 16]>> for u8x16[src]

impl Product<Simd<[u8; 2]>> for u8x2[src]

impl Product<Simd<[u8; 32]>> for u8x32[src]

impl Product<Simd<[u8; 4]>> for u8x4[src]

impl Product<Simd<[u8; 64]>> for u8x64[src]

impl Product<Simd<[u8; 8]>> for u8x8[src]

impl Product<Simd<[usize; 2]>> for usizex2[src]

impl Product<Simd<[usize; 4]>> for usizex4[src]

impl Product<Simd<[usize; 8]>> for usizex8[src]

impl Rem<Simd<[f32; 16]>> for f32x16[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[f32; 16]>> for f32[src]

type Output = f32x16

The resulting type after applying the % operator.

impl Rem<Simd<[f32; 2]>> for f32x2[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[f32; 2]>> for f32[src]

type Output = f32x2

The resulting type after applying the % operator.

impl Rem<Simd<[f32; 4]>> for f32x4[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[f32; 4]>> for f32[src]

type Output = f32x4

The resulting type after applying the % operator.

impl Rem<Simd<[f32; 8]>> for f32x8[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[f32; 8]>> for f32[src]

type Output = f32x8

The resulting type after applying the % operator.

impl Rem<Simd<[f64; 2]>> for f64x2[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[f64; 2]>> for f64[src]

type Output = f64x2

The resulting type after applying the % operator.

impl Rem<Simd<[f64; 4]>> for f64x4[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[f64; 4]>> for f64[src]

type Output = f64x4

The resulting type after applying the % operator.

impl Rem<Simd<[f64; 8]>> for f64x8[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[f64; 8]>> for f64[src]

type Output = f64x8

The resulting type after applying the % operator.

impl Rem<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[i128; 1]>> for i128[src]

type Output = i128x1

The resulting type after applying the % operator.

impl Rem<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[i128; 2]>> for i128[src]

type Output = i128x2

The resulting type after applying the % operator.

impl Rem<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[i128; 4]>> for i128[src]

type Output = i128x4

The resulting type after applying the % operator.

impl Rem<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[i16; 16]>> for i16[src]

type Output = i16x16

The resulting type after applying the % operator.

impl Rem<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[i16; 2]>> for i16[src]

type Output = i16x2

The resulting type after applying the % operator.

impl Rem<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[i16; 32]>> for i16[src]

type Output = i16x32

The resulting type after applying the % operator.

impl Rem<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[i16; 4]>> for i16[src]

type Output = i16x4

The resulting type after applying the % operator.

impl Rem<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[i16; 8]>> for i16[src]

type Output = i16x8

The resulting type after applying the % operator.

impl Rem<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[i32; 16]>> for i32[src]

type Output = i32x16

The resulting type after applying the % operator.

impl Rem<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[i32; 2]>> for i32[src]

type Output = i32x2

The resulting type after applying the % operator.

impl Rem<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[i32; 4]>> for i32[src]

type Output = i32x4

The resulting type after applying the % operator.

impl Rem<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[i32; 8]>> for i32[src]

type Output = i32x8

The resulting type after applying the % operator.

impl Rem<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[i64; 2]>> for i64[src]

type Output = i64x2

The resulting type after applying the % operator.

impl Rem<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[i64; 4]>> for i64[src]

type Output = i64x4

The resulting type after applying the % operator.

impl Rem<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[i64; 8]>> for i64[src]

type Output = i64x8

The resulting type after applying the % operator.

impl Rem<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[i8; 16]>> for i8[src]

type Output = i8x16

The resulting type after applying the % operator.

impl Rem<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[i8; 2]>> for i8[src]

type Output = i8x2

The resulting type after applying the % operator.

impl Rem<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[i8; 32]>> for i8[src]

type Output = i8x32

The resulting type after applying the % operator.

impl Rem<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[i8; 4]>> for i8[src]

type Output = i8x4

The resulting type after applying the % operator.

impl Rem<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[i8; 64]>> for i8[src]

type Output = i8x64

The resulting type after applying the % operator.

impl Rem<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[i8; 8]>> for i8[src]

type Output = i8x8

The resulting type after applying the % operator.

impl Rem<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[isize; 2]>> for isize[src]

type Output = isizex2

The resulting type after applying the % operator.

impl Rem<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[isize; 4]>> for isize[src]

type Output = isizex4

The resulting type after applying the % operator.

impl Rem<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[isize; 8]>> for isize[src]

type Output = isizex8

The resulting type after applying the % operator.

impl Rem<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[u128; 1]>> for u128[src]

type Output = u128x1

The resulting type after applying the % operator.

impl Rem<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[u128; 2]>> for u128[src]

type Output = u128x2

The resulting type after applying the % operator.

impl Rem<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[u128; 4]>> for u128[src]

type Output = u128x4

The resulting type after applying the % operator.

impl Rem<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[u16; 16]>> for u16[src]

type Output = u16x16

The resulting type after applying the % operator.

impl Rem<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[u16; 2]>> for u16[src]

type Output = u16x2

The resulting type after applying the % operator.

impl Rem<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[u16; 32]>> for u16[src]

type Output = u16x32

The resulting type after applying the % operator.

impl Rem<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[u16; 4]>> for u16[src]

type Output = u16x4

The resulting type after applying the % operator.

impl Rem<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[u16; 8]>> for u16[src]

type Output = u16x8

The resulting type after applying the % operator.

impl Rem<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[u32; 16]>> for u32[src]

type Output = u32x16

The resulting type after applying the % operator.

impl Rem<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[u32; 2]>> for u32[src]

type Output = u32x2

The resulting type after applying the % operator.

impl Rem<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[u32; 4]>> for u32[src]

type Output = u32x4

The resulting type after applying the % operator.

impl Rem<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[u32; 8]>> for u32[src]

type Output = u32x8

The resulting type after applying the % operator.

impl Rem<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[u64; 2]>> for u64[src]

type Output = u64x2

The resulting type after applying the % operator.

impl Rem<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[u64; 4]>> for u64[src]

type Output = u64x4

The resulting type after applying the % operator.

impl Rem<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[u64; 8]>> for u64[src]

type Output = u64x8

The resulting type after applying the % operator.

impl Rem<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[u8; 16]>> for u8[src]

type Output = u8x16

The resulting type after applying the % operator.

impl Rem<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[u8; 2]>> for u8[src]

type Output = u8x2

The resulting type after applying the % operator.

impl Rem<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[u8; 32]>> for u8[src]

type Output = u8x32

The resulting type after applying the % operator.

impl Rem<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[u8; 4]>> for u8[src]

type Output = u8x4

The resulting type after applying the % operator.

impl Rem<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[u8; 64]>> for u8[src]

type Output = u8x64

The resulting type after applying the % operator.

impl Rem<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[u8; 8]>> for u8[src]

type Output = u8x8

The resulting type after applying the % operator.

impl Rem<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[usize; 2]>> for usize[src]

type Output = usizex2

The resulting type after applying the % operator.

impl Rem<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[usize; 4]>> for usize[src]

type Output = usizex4

The resulting type after applying the % operator.

impl Rem<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the % operator.

impl Rem<Simd<[usize; 8]>> for usize[src]

type Output = usizex8

The resulting type after applying the % operator.

impl RemAssign<Simd<[f32; 16]>> for f32x16[src]

impl RemAssign<Simd<[f32; 2]>> for f32x2[src]

impl RemAssign<Simd<[f32; 4]>> for f32x4[src]

impl RemAssign<Simd<[f32; 8]>> for f32x8[src]

impl RemAssign<Simd<[f64; 2]>> for f64x2[src]

impl RemAssign<Simd<[f64; 4]>> for f64x4[src]

impl RemAssign<Simd<[f64; 8]>> for f64x8[src]

impl RemAssign<Simd<[i128; 1]>> for i128x1[src]

impl RemAssign<Simd<[i128; 2]>> for i128x2[src]

impl RemAssign<Simd<[i128; 4]>> for i128x4[src]

impl RemAssign<Simd<[i16; 16]>> for i16x16[src]

impl RemAssign<Simd<[i16; 2]>> for i16x2[src]

impl RemAssign<Simd<[i16; 32]>> for i16x32[src]

impl RemAssign<Simd<[i16; 4]>> for i16x4[src]

impl RemAssign<Simd<[i16; 8]>> for i16x8[src]

impl RemAssign<Simd<[i32; 16]>> for i32x16[src]

impl RemAssign<Simd<[i32; 2]>> for i32x2[src]

impl RemAssign<Simd<[i32; 4]>> for i32x4[src]

impl RemAssign<Simd<[i32; 8]>> for i32x8[src]

impl RemAssign<Simd<[i64; 2]>> for i64x2[src]

impl RemAssign<Simd<[i64; 4]>> for i64x4[src]

impl RemAssign<Simd<[i64; 8]>> for i64x8[src]

impl RemAssign<Simd<[i8; 16]>> for i8x16[src]

impl RemAssign<Simd<[i8; 2]>> for i8x2[src]

impl RemAssign<Simd<[i8; 32]>> for i8x32[src]

impl RemAssign<Simd<[i8; 4]>> for i8x4[src]

impl RemAssign<Simd<[i8; 64]>> for i8x64[src]

impl RemAssign<Simd<[i8; 8]>> for i8x8[src]

impl RemAssign<Simd<[isize; 2]>> for isizex2[src]

impl RemAssign<Simd<[isize; 4]>> for isizex4[src]

impl RemAssign<Simd<[isize; 8]>> for isizex8[src]

impl RemAssign<Simd<[u128; 1]>> for u128x1[src]

impl RemAssign<Simd<[u128; 2]>> for u128x2[src]

impl RemAssign<Simd<[u128; 4]>> for u128x4[src]

impl RemAssign<Simd<[u16; 16]>> for u16x16[src]

impl RemAssign<Simd<[u16; 2]>> for u16x2[src]

impl RemAssign<Simd<[u16; 32]>> for u16x32[src]

impl RemAssign<Simd<[u16; 4]>> for u16x4[src]

impl RemAssign<Simd<[u16; 8]>> for u16x8[src]

impl RemAssign<Simd<[u32; 16]>> for u32x16[src]

impl RemAssign<Simd<[u32; 2]>> for u32x2[src]

impl RemAssign<Simd<[u32; 4]>> for u32x4[src]

impl RemAssign<Simd<[u32; 8]>> for u32x8[src]

impl RemAssign<Simd<[u64; 2]>> for u64x2[src]

impl RemAssign<Simd<[u64; 4]>> for u64x4[src]

impl RemAssign<Simd<[u64; 8]>> for u64x8[src]

impl RemAssign<Simd<[u8; 16]>> for u8x16[src]

impl RemAssign<Simd<[u8; 2]>> for u8x2[src]

impl RemAssign<Simd<[u8; 32]>> for u8x32[src]

impl RemAssign<Simd<[u8; 4]>> for u8x4[src]

impl RemAssign<Simd<[u8; 64]>> for u8x64[src]

impl RemAssign<Simd<[u8; 8]>> for u8x8[src]

impl RemAssign<Simd<[usize; 2]>> for usizex2[src]

impl RemAssign<Simd<[usize; 4]>> for usizex4[src]

impl RemAssign<Simd<[usize; 8]>> for usizex8[src]

impl Shl<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the << operator.

impl Shl<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the << operator.

impl ShlAssign<Simd<[i128; 1]>> for i128x1[src]

impl ShlAssign<Simd<[i128; 2]>> for i128x2[src]

impl ShlAssign<Simd<[i128; 4]>> for i128x4[src]

impl ShlAssign<Simd<[i16; 16]>> for i16x16[src]

impl ShlAssign<Simd<[i16; 2]>> for i16x2[src]

impl ShlAssign<Simd<[i16; 32]>> for i16x32[src]

impl ShlAssign<Simd<[i16; 4]>> for i16x4[src]

impl ShlAssign<Simd<[i16; 8]>> for i16x8[src]

impl ShlAssign<Simd<[i32; 16]>> for i32x16[src]

impl ShlAssign<Simd<[i32; 2]>> for i32x2[src]

impl ShlAssign<Simd<[i32; 4]>> for i32x4[src]

impl ShlAssign<Simd<[i32; 8]>> for i32x8[src]

impl ShlAssign<Simd<[i64; 2]>> for i64x2[src]

impl ShlAssign<Simd<[i64; 4]>> for i64x4[src]

impl ShlAssign<Simd<[i64; 8]>> for i64x8[src]

impl ShlAssign<Simd<[i8; 16]>> for i8x16[src]

impl ShlAssign<Simd<[i8; 2]>> for i8x2[src]

impl ShlAssign<Simd<[i8; 32]>> for i8x32[src]

impl ShlAssign<Simd<[i8; 4]>> for i8x4[src]

impl ShlAssign<Simd<[i8; 64]>> for i8x64[src]

impl ShlAssign<Simd<[i8; 8]>> for i8x8[src]

impl ShlAssign<Simd<[isize; 2]>> for isizex2[src]

impl ShlAssign<Simd<[isize; 4]>> for isizex4[src]

impl ShlAssign<Simd<[isize; 8]>> for isizex8[src]

impl ShlAssign<Simd<[u128; 1]>> for u128x1[src]

impl ShlAssign<Simd<[u128; 2]>> for u128x2[src]

impl ShlAssign<Simd<[u128; 4]>> for u128x4[src]

impl ShlAssign<Simd<[u16; 16]>> for u16x16[src]

impl ShlAssign<Simd<[u16; 2]>> for u16x2[src]

impl ShlAssign<Simd<[u16; 32]>> for u16x32[src]

impl ShlAssign<Simd<[u16; 4]>> for u16x4[src]

impl ShlAssign<Simd<[u16; 8]>> for u16x8[src]

impl ShlAssign<Simd<[u32; 16]>> for u32x16[src]

impl ShlAssign<Simd<[u32; 2]>> for u32x2[src]

impl ShlAssign<Simd<[u32; 4]>> for u32x4[src]

impl ShlAssign<Simd<[u32; 8]>> for u32x8[src]

impl ShlAssign<Simd<[u64; 2]>> for u64x2[src]

impl ShlAssign<Simd<[u64; 4]>> for u64x4[src]

impl ShlAssign<Simd<[u64; 8]>> for u64x8[src]

impl ShlAssign<Simd<[u8; 16]>> for u8x16[src]

impl ShlAssign<Simd<[u8; 2]>> for u8x2[src]

impl ShlAssign<Simd<[u8; 32]>> for u8x32[src]

impl ShlAssign<Simd<[u8; 4]>> for u8x4[src]

impl ShlAssign<Simd<[u8; 64]>> for u8x64[src]

impl ShlAssign<Simd<[u8; 8]>> for u8x8[src]

impl ShlAssign<Simd<[usize; 2]>> for usizex2[src]

impl ShlAssign<Simd<[usize; 4]>> for usizex4[src]

impl ShlAssign<Simd<[usize; 8]>> for usizex8[src]

impl Shr<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the >> operator.

impl Shr<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the >> operator.

impl ShrAssign<Simd<[i128; 1]>> for i128x1[src]

impl ShrAssign<Simd<[i128; 2]>> for i128x2[src]

impl ShrAssign<Simd<[i128; 4]>> for i128x4[src]

impl ShrAssign<Simd<[i16; 16]>> for i16x16[src]

impl ShrAssign<Simd<[i16; 2]>> for i16x2[src]

impl ShrAssign<Simd<[i16; 32]>> for i16x32[src]

impl ShrAssign<Simd<[i16; 4]>> for i16x4[src]

impl ShrAssign<Simd<[i16; 8]>> for i16x8[src]

impl ShrAssign<Simd<[i32; 16]>> for i32x16[src]

impl ShrAssign<Simd<[i32; 2]>> for i32x2[src]

impl ShrAssign<Simd<[i32; 4]>> for i32x4[src]

impl ShrAssign<Simd<[i32; 8]>> for i32x8[src]

impl ShrAssign<Simd<[i64; 2]>> for i64x2[src]

impl ShrAssign<Simd<[i64; 4]>> for i64x4[src]

impl ShrAssign<Simd<[i64; 8]>> for i64x8[src]

impl ShrAssign<Simd<[i8; 16]>> for i8x16[src]

impl ShrAssign<Simd<[i8; 2]>> for i8x2[src]

impl ShrAssign<Simd<[i8; 32]>> for i8x32[src]

impl ShrAssign<Simd<[i8; 4]>> for i8x4[src]

impl ShrAssign<Simd<[i8; 64]>> for i8x64[src]

impl ShrAssign<Simd<[i8; 8]>> for i8x8[src]

impl ShrAssign<Simd<[isize; 2]>> for isizex2[src]

impl ShrAssign<Simd<[isize; 4]>> for isizex4[src]

impl ShrAssign<Simd<[isize; 8]>> for isizex8[src]

impl ShrAssign<Simd<[u128; 1]>> for u128x1[src]

impl ShrAssign<Simd<[u128; 2]>> for u128x2[src]

impl ShrAssign<Simd<[u128; 4]>> for u128x4[src]

impl ShrAssign<Simd<[u16; 16]>> for u16x16[src]

impl ShrAssign<Simd<[u16; 2]>> for u16x2[src]

impl ShrAssign<Simd<[u16; 32]>> for u16x32[src]

impl ShrAssign<Simd<[u16; 4]>> for u16x4[src]

impl ShrAssign<Simd<[u16; 8]>> for u16x8[src]

impl ShrAssign<Simd<[u32; 16]>> for u32x16[src]

impl ShrAssign<Simd<[u32; 2]>> for u32x2[src]

impl ShrAssign<Simd<[u32; 4]>> for u32x4[src]

impl ShrAssign<Simd<[u32; 8]>> for u32x8[src]

impl ShrAssign<Simd<[u64; 2]>> for u64x2[src]

impl ShrAssign<Simd<[u64; 4]>> for u64x4[src]

impl ShrAssign<Simd<[u64; 8]>> for u64x8[src]

impl ShrAssign<Simd<[u8; 16]>> for u8x16[src]

impl ShrAssign<Simd<[u8; 2]>> for u8x2[src]

impl ShrAssign<Simd<[u8; 32]>> for u8x32[src]

impl ShrAssign<Simd<[u8; 4]>> for u8x4[src]

impl ShrAssign<Simd<[u8; 64]>> for u8x64[src]

impl ShrAssign<Simd<[u8; 8]>> for u8x8[src]

impl ShrAssign<Simd<[usize; 2]>> for usizex2[src]

impl ShrAssign<Simd<[usize; 4]>> for usizex4[src]

impl ShrAssign<Simd<[usize; 8]>> for usizex8[src]

impl Sub<Simd<[f32; 16]>> for f32x16[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[f32; 16]>> for f32[src]

type Output = f32x16

The resulting type after applying the - operator.

impl Sub<Simd<[f32; 2]>> for f32x2[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[f32; 2]>> for f32[src]

type Output = f32x2

The resulting type after applying the - operator.

impl Sub<Simd<[f32; 4]>> for f32x4[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[f32; 4]>> for f32[src]

type Output = f32x4

The resulting type after applying the - operator.

impl Sub<Simd<[f32; 8]>> for f32x8[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[f32; 8]>> for f32[src]

type Output = f32x8

The resulting type after applying the - operator.

impl Sub<Simd<[f64; 2]>> for f64x2[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[f64; 2]>> for f64[src]

type Output = f64x2

The resulting type after applying the - operator.

impl Sub<Simd<[f64; 4]>> for f64x4[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[f64; 4]>> for f64[src]

type Output = f64x4

The resulting type after applying the - operator.

impl Sub<Simd<[f64; 8]>> for f64x8[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[f64; 8]>> for f64[src]

type Output = f64x8

The resulting type after applying the - operator.

impl Sub<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[i128; 1]>> for i128[src]

type Output = i128x1

The resulting type after applying the - operator.

impl Sub<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[i128; 2]>> for i128[src]

type Output = i128x2

The resulting type after applying the - operator.

impl Sub<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[i128; 4]>> for i128[src]

type Output = i128x4

The resulting type after applying the - operator.

impl Sub<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[i16; 16]>> for i16[src]

type Output = i16x16

The resulting type after applying the - operator.

impl Sub<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[i16; 2]>> for i16[src]

type Output = i16x2

The resulting type after applying the - operator.

impl Sub<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[i16; 32]>> for i16[src]

type Output = i16x32

The resulting type after applying the - operator.

impl Sub<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[i16; 4]>> for i16[src]

type Output = i16x4

The resulting type after applying the - operator.

impl Sub<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[i16; 8]>> for i16[src]

type Output = i16x8

The resulting type after applying the - operator.

impl Sub<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[i32; 16]>> for i32[src]

type Output = i32x16

The resulting type after applying the - operator.

impl Sub<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[i32; 2]>> for i32[src]

type Output = i32x2

The resulting type after applying the - operator.

impl Sub<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[i32; 4]>> for i32[src]

type Output = i32x4

The resulting type after applying the - operator.

impl Sub<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[i32; 8]>> for i32[src]

type Output = i32x8

The resulting type after applying the - operator.

impl Sub<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[i64; 2]>> for i64[src]

type Output = i64x2

The resulting type after applying the - operator.

impl Sub<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[i64; 4]>> for i64[src]

type Output = i64x4

The resulting type after applying the - operator.

impl Sub<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[i64; 8]>> for i64[src]

type Output = i64x8

The resulting type after applying the - operator.

impl Sub<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[i8; 16]>> for i8[src]

type Output = i8x16

The resulting type after applying the - operator.

impl Sub<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[i8; 2]>> for i8[src]

type Output = i8x2

The resulting type after applying the - operator.

impl Sub<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[i8; 32]>> for i8[src]

type Output = i8x32

The resulting type after applying the - operator.

impl Sub<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[i8; 4]>> for i8[src]

type Output = i8x4

The resulting type after applying the - operator.

impl Sub<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[i8; 64]>> for i8[src]

type Output = i8x64

The resulting type after applying the - operator.

impl Sub<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[i8; 8]>> for i8[src]

type Output = i8x8

The resulting type after applying the - operator.

impl Sub<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[isize; 2]>> for isize[src]

type Output = isizex2

The resulting type after applying the - operator.

impl Sub<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[isize; 4]>> for isize[src]

type Output = isizex4

The resulting type after applying the - operator.

impl Sub<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[isize; 8]>> for isize[src]

type Output = isizex8

The resulting type after applying the - operator.

impl Sub<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[u128; 1]>> for u128[src]

type Output = u128x1

The resulting type after applying the - operator.

impl Sub<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[u128; 2]>> for u128[src]

type Output = u128x2

The resulting type after applying the - operator.

impl Sub<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[u128; 4]>> for u128[src]

type Output = u128x4

The resulting type after applying the - operator.

impl Sub<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[u16; 16]>> for u16[src]

type Output = u16x16

The resulting type after applying the - operator.

impl Sub<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[u16; 2]>> for u16[src]

type Output = u16x2

The resulting type after applying the - operator.

impl Sub<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[u16; 32]>> for u16[src]

type Output = u16x32

The resulting type after applying the - operator.

impl Sub<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[u16; 4]>> for u16[src]

type Output = u16x4

The resulting type after applying the - operator.

impl Sub<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[u16; 8]>> for u16[src]

type Output = u16x8

The resulting type after applying the - operator.

impl Sub<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[u32; 16]>> for u32[src]

type Output = u32x16

The resulting type after applying the - operator.

impl Sub<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[u32; 2]>> for u32[src]

type Output = u32x2

The resulting type after applying the - operator.

impl Sub<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[u32; 4]>> for u32[src]

type Output = u32x4

The resulting type after applying the - operator.

impl Sub<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[u32; 8]>> for u32[src]

type Output = u32x8

The resulting type after applying the - operator.

impl Sub<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[u64; 2]>> for u64[src]

type Output = u64x2

The resulting type after applying the - operator.

impl Sub<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[u64; 4]>> for u64[src]

type Output = u64x4

The resulting type after applying the - operator.

impl Sub<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[u64; 8]>> for u64[src]

type Output = u64x8

The resulting type after applying the - operator.

impl Sub<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[u8; 16]>> for u8[src]

type Output = u8x16

The resulting type after applying the - operator.

impl Sub<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[u8; 2]>> for u8[src]

type Output = u8x2

The resulting type after applying the - operator.

impl Sub<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[u8; 32]>> for u8[src]

type Output = u8x32

The resulting type after applying the - operator.

impl Sub<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[u8; 4]>> for u8[src]

type Output = u8x4

The resulting type after applying the - operator.

impl Sub<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[u8; 64]>> for u8[src]

type Output = u8x64

The resulting type after applying the - operator.

impl Sub<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[u8; 8]>> for u8[src]

type Output = u8x8

The resulting type after applying the - operator.

impl Sub<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[usize; 2]>> for usize[src]

type Output = usizex2

The resulting type after applying the - operator.

impl Sub<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[usize; 4]>> for usize[src]

type Output = usizex4

The resulting type after applying the - operator.

impl Sub<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the - operator.

impl Sub<Simd<[usize; 8]>> for usize[src]

type Output = usizex8

The resulting type after applying the - operator.

impl SubAssign<Simd<[f32; 16]>> for f32x16[src]

impl SubAssign<Simd<[f32; 2]>> for f32x2[src]

impl SubAssign<Simd<[f32; 4]>> for f32x4[src]

impl SubAssign<Simd<[f32; 8]>> for f32x8[src]

impl SubAssign<Simd<[f64; 2]>> for f64x2[src]

impl SubAssign<Simd<[f64; 4]>> for f64x4[src]

impl SubAssign<Simd<[f64; 8]>> for f64x8[src]

impl SubAssign<Simd<[i128; 1]>> for i128x1[src]

impl SubAssign<Simd<[i128; 2]>> for i128x2[src]

impl SubAssign<Simd<[i128; 4]>> for i128x4[src]

impl SubAssign<Simd<[i16; 16]>> for i16x16[src]

impl SubAssign<Simd<[i16; 2]>> for i16x2[src]

impl SubAssign<Simd<[i16; 32]>> for i16x32[src]

impl SubAssign<Simd<[i16; 4]>> for i16x4[src]

impl SubAssign<Simd<[i16; 8]>> for i16x8[src]

impl SubAssign<Simd<[i32; 16]>> for i32x16[src]

impl SubAssign<Simd<[i32; 2]>> for i32x2[src]

impl SubAssign<Simd<[i32; 4]>> for i32x4[src]

impl SubAssign<Simd<[i32; 8]>> for i32x8[src]

impl SubAssign<Simd<[i64; 2]>> for i64x2[src]

impl SubAssign<Simd<[i64; 4]>> for i64x4[src]

impl SubAssign<Simd<[i64; 8]>> for i64x8[src]

impl SubAssign<Simd<[i8; 16]>> for i8x16[src]

impl SubAssign<Simd<[i8; 2]>> for i8x2[src]

impl SubAssign<Simd<[i8; 32]>> for i8x32[src]

impl SubAssign<Simd<[i8; 4]>> for i8x4[src]

impl SubAssign<Simd<[i8; 64]>> for i8x64[src]

impl SubAssign<Simd<[i8; 8]>> for i8x8[src]

impl SubAssign<Simd<[isize; 2]>> for isizex2[src]

impl SubAssign<Simd<[isize; 4]>> for isizex4[src]

impl SubAssign<Simd<[isize; 8]>> for isizex8[src]

impl SubAssign<Simd<[u128; 1]>> for u128x1[src]

impl SubAssign<Simd<[u128; 2]>> for u128x2[src]

impl SubAssign<Simd<[u128; 4]>> for u128x4[src]

impl SubAssign<Simd<[u16; 16]>> for u16x16[src]

impl SubAssign<Simd<[u16; 2]>> for u16x2[src]

impl SubAssign<Simd<[u16; 32]>> for u16x32[src]

impl SubAssign<Simd<[u16; 4]>> for u16x4[src]

impl SubAssign<Simd<[u16; 8]>> for u16x8[src]

impl SubAssign<Simd<[u32; 16]>> for u32x16[src]

impl SubAssign<Simd<[u32; 2]>> for u32x2[src]

impl SubAssign<Simd<[u32; 4]>> for u32x4[src]

impl SubAssign<Simd<[u32; 8]>> for u32x8[src]

impl SubAssign<Simd<[u64; 2]>> for u64x2[src]

impl SubAssign<Simd<[u64; 4]>> for u64x4[src]

impl SubAssign<Simd<[u64; 8]>> for u64x8[src]

impl SubAssign<Simd<[u8; 16]>> for u8x16[src]

impl SubAssign<Simd<[u8; 2]>> for u8x2[src]

impl SubAssign<Simd<[u8; 32]>> for u8x32[src]

impl SubAssign<Simd<[u8; 4]>> for u8x4[src]

impl SubAssign<Simd<[u8; 64]>> for u8x64[src]

impl SubAssign<Simd<[u8; 8]>> for u8x8[src]

impl SubAssign<Simd<[usize; 2]>> for usizex2[src]

impl SubAssign<Simd<[usize; 4]>> for usizex4[src]

impl SubAssign<Simd<[usize; 8]>> for usizex8[src]

impl<'a> Sum<&'a Simd<[f32; 16]>> for f32x16[src]

impl<'a> Sum<&'a Simd<[f32; 2]>> for f32x2[src]

impl<'a> Sum<&'a Simd<[f32; 4]>> for f32x4[src]

impl<'a> Sum<&'a Simd<[f32; 8]>> for f32x8[src]

impl<'a> Sum<&'a Simd<[f64; 2]>> for f64x2[src]

impl<'a> Sum<&'a Simd<[f64; 4]>> for f64x4[src]

impl<'a> Sum<&'a Simd<[f64; 8]>> for f64x8[src]

impl<'a> Sum<&'a Simd<[i128; 1]>> for i128x1[src]

impl<'a> Sum<&'a Simd<[i128; 2]>> for i128x2[src]

impl<'a> Sum<&'a Simd<[i128; 4]>> for i128x4[src]

impl<'a> Sum<&'a Simd<[i16; 16]>> for i16x16[src]

impl<'a> Sum<&'a Simd<[i16; 2]>> for i16x2[src]

impl<'a> Sum<&'a Simd<[i16; 32]>> for i16x32[src]

impl<'a> Sum<&'a Simd<[i16; 4]>> for i16x4[src]

impl<'a> Sum<&'a Simd<[i16; 8]>> for i16x8[src]

impl<'a> Sum<&'a Simd<[i32; 16]>> for i32x16[src]

impl<'a> Sum<&'a Simd<[i32; 2]>> for i32x2[src]

impl<'a> Sum<&'a Simd<[i32; 4]>> for i32x4[src]

impl<'a> Sum<&'a Simd<[i32; 8]>> for i32x8[src]

impl<'a> Sum<&'a Simd<[i64; 2]>> for i64x2[src]

impl<'a> Sum<&'a Simd<[i64; 4]>> for i64x4[src]

impl<'a> Sum<&'a Simd<[i64; 8]>> for i64x8[src]

impl<'a> Sum<&'a Simd<[i8; 16]>> for i8x16[src]

impl<'a> Sum<&'a Simd<[i8; 2]>> for i8x2[src]

impl<'a> Sum<&'a Simd<[i8; 32]>> for i8x32[src]

impl<'a> Sum<&'a Simd<[i8; 4]>> for i8x4[src]

impl<'a> Sum<&'a Simd<[i8; 64]>> for i8x64[src]

impl<'a> Sum<&'a Simd<[i8; 8]>> for i8x8[src]

impl<'a> Sum<&'a Simd<[isize; 2]>> for isizex2[src]

impl<'a> Sum<&'a Simd<[isize; 4]>> for isizex4[src]

impl<'a> Sum<&'a Simd<[isize; 8]>> for isizex8[src]

impl<'a> Sum<&'a Simd<[u128; 1]>> for u128x1[src]

impl<'a> Sum<&'a Simd<[u128; 2]>> for u128x2[src]

impl<'a> Sum<&'a Simd<[u128; 4]>> for u128x4[src]

impl<'a> Sum<&'a Simd<[u16; 16]>> for u16x16[src]

impl<'a> Sum<&'a Simd<[u16; 2]>> for u16x2[src]

impl<'a> Sum<&'a Simd<[u16; 32]>> for u16x32[src]

impl<'a> Sum<&'a Simd<[u16; 4]>> for u16x4[src]

impl<'a> Sum<&'a Simd<[u16; 8]>> for u16x8[src]

impl<'a> Sum<&'a Simd<[u32; 16]>> for u32x16[src]

impl<'a> Sum<&'a Simd<[u32; 2]>> for u32x2[src]

impl<'a> Sum<&'a Simd<[u32; 4]>> for u32x4[src]

impl<'a> Sum<&'a Simd<[u32; 8]>> for u32x8[src]

impl<'a> Sum<&'a Simd<[u64; 2]>> for u64x2[src]

impl<'a> Sum<&'a Simd<[u64; 4]>> for u64x4[src]

impl<'a> Sum<&'a Simd<[u64; 8]>> for u64x8[src]

impl<'a> Sum<&'a Simd<[u8; 16]>> for u8x16[src]

impl<'a> Sum<&'a Simd<[u8; 2]>> for u8x2[src]

impl<'a> Sum<&'a Simd<[u8; 32]>> for u8x32[src]

impl<'a> Sum<&'a Simd<[u8; 4]>> for u8x4[src]

impl<'a> Sum<&'a Simd<[u8; 64]>> for u8x64[src]

impl<'a> Sum<&'a Simd<[u8; 8]>> for u8x8[src]

impl<'a> Sum<&'a Simd<[usize; 2]>> for usizex2[src]

impl<'a> Sum<&'a Simd<[usize; 4]>> for usizex4[src]

impl<'a> Sum<&'a Simd<[usize; 8]>> for usizex8[src]

impl Sum<Simd<[f32; 16]>> for f32x16[src]

impl Sum<Simd<[f32; 2]>> for f32x2[src]

impl Sum<Simd<[f32; 4]>> for f32x4[src]

impl Sum<Simd<[f32; 8]>> for f32x8[src]

impl Sum<Simd<[f64; 2]>> for f64x2[src]

impl Sum<Simd<[f64; 4]>> for f64x4[src]

impl Sum<Simd<[f64; 8]>> for f64x8[src]

impl Sum<Simd<[i128; 1]>> for i128x1[src]

impl Sum<Simd<[i128; 2]>> for i128x2[src]

impl Sum<Simd<[i128; 4]>> for i128x4[src]

impl Sum<Simd<[i16; 16]>> for i16x16[src]

impl Sum<Simd<[i16; 2]>> for i16x2[src]

impl Sum<Simd<[i16; 32]>> for i16x32[src]

impl Sum<Simd<[i16; 4]>> for i16x4[src]

impl Sum<Simd<[i16; 8]>> for i16x8[src]

impl Sum<Simd<[i32; 16]>> for i32x16[src]

impl Sum<Simd<[i32; 2]>> for i32x2[src]

impl Sum<Simd<[i32; 4]>> for i32x4[src]

impl Sum<Simd<[i32; 8]>> for i32x8[src]

impl Sum<Simd<[i64; 2]>> for i64x2[src]

impl Sum<Simd<[i64; 4]>> for i64x4[src]

impl Sum<Simd<[i64; 8]>> for i64x8[src]

impl Sum<Simd<[i8; 16]>> for i8x16[src]

impl Sum<Simd<[i8; 2]>> for i8x2[src]

impl Sum<Simd<[i8; 32]>> for i8x32[src]

impl Sum<Simd<[i8; 4]>> for i8x4[src]

impl Sum<Simd<[i8; 64]>> for i8x64[src]

impl Sum<Simd<[i8; 8]>> for i8x8[src]

impl Sum<Simd<[isize; 2]>> for isizex2[src]

impl Sum<Simd<[isize; 4]>> for isizex4[src]

impl Sum<Simd<[isize; 8]>> for isizex8[src]

impl Sum<Simd<[u128; 1]>> for u128x1[src]

impl Sum<Simd<[u128; 2]>> for u128x2[src]

impl Sum<Simd<[u128; 4]>> for u128x4[src]

impl Sum<Simd<[u16; 16]>> for u16x16[src]

impl Sum<Simd<[u16; 2]>> for u16x2[src]

impl Sum<Simd<[u16; 32]>> for u16x32[src]

impl Sum<Simd<[u16; 4]>> for u16x4[src]

impl Sum<Simd<[u16; 8]>> for u16x8[src]

impl Sum<Simd<[u32; 16]>> for u32x16[src]

impl Sum<Simd<[u32; 2]>> for u32x2[src]

impl Sum<Simd<[u32; 4]>> for u32x4[src]

impl Sum<Simd<[u32; 8]>> for u32x8[src]

impl Sum<Simd<[u64; 2]>> for u64x2[src]

impl Sum<Simd<[u64; 4]>> for u64x4[src]

impl Sum<Simd<[u64; 8]>> for u64x8[src]

impl Sum<Simd<[u8; 16]>> for u8x16[src]

impl Sum<Simd<[u8; 2]>> for u8x2[src]

impl Sum<Simd<[u8; 32]>> for u8x32[src]

impl Sum<Simd<[u8; 4]>> for u8x4[src]

impl Sum<Simd<[u8; 64]>> for u8x64[src]

impl Sum<Simd<[u8; 8]>> for u8x8[src]

impl Sum<Simd<[usize; 2]>> for usizex2[src]

impl Sum<Simd<[usize; 4]>> for usizex4[src]

impl Sum<Simd<[usize; 8]>> for usizex8[src]

Auto Trait Implementations

impl<A> Send for Simd<A> where
    <A as SimdArray>::Tuple: Send

impl<A> Sync for Simd<A> where
    <A as SimdArray>::Tuple: Sync

impl<A> Unpin for Simd<A> where
    <A as SimdArray>::Tuple: Unpin

Blanket Implementations

impl<T> Any for T where
    T: 'static + ?Sized
[src]

impl<T> Borrow<T> for T where
    T: ?Sized
[src]

impl<T> BorrowMut<T> for T where
    T: ?Sized
[src]

impl<T> From<T> for T[src]

impl<T, U> Into<U> for T where
    U: From<T>, 
[src]

impl<T, U> TryFrom<U> for T where
    U: Into<T>, 
[src]

type Error = Infallible

The type returned in the event of a conversion error.

impl<T, U> TryInto<U> for T where
    U: TryFrom<T>, 
[src]

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.