[][src]Struct lokacore::arch::x86_64::m128d

#[repr(transparent)]
pub struct m128d(pub __m128d);

A 128-bit SIMD value. Always used as f64x2.

  • This documentation numbers the lanes based on the index you'd need to use to access that lane if the value were cast to an array.
  • This is also the way that the type is printed out using Debug, Display, LowerExp, and UpperExp.
  • This is not necessarily the ordering you'll see if you look an xmm register in a debugger! Basically because of how little-endian works.
  • Most operations work per-lane, "lanewise".
  • Some operations work using lane 0 only. When appropriate, these have the same name as the lanewise version but with a 0 on the end. Eg: cmp_eq and cmp_eq0. The other lanes are simply copied forward from self.
  • Comparisons give "bool-ish" output, where all bits 1 in a lane is true, and all bits 0 in a lane is false. Unfortunately, all bits 1 with an f32 is one of the NaN values, and NaN != NaN, so it can be a little tricky to work with until you're used to it.

Methods

impl m128d[src]

pub fn add0(self, rhs: Self) -> Self[src]

Adds the low lane, high lane unaffected.

pub fn andnot(self, rhs: Self) -> Self[src]

Bitwise (!self) & rhs

pub fn cast_m128i(self) -> m128i[src]

Cast the bits of this m128d directly to m128i without modification.

pub fn cmp_eq(self, rhs: Self) -> Self[src]

Lanewise self == rhs, bool-ish output

pub fn cmp_eq0(self, rhs: Self) -> Self[src]

Lane 0: self == rhs, bool-ish output

pub fn cmp_ge(self, rhs: Self) -> Self[src]

Lanewise self >= rhs, bool-ish output

pub fn cmp_ge0(self, rhs: Self) -> Self[src]

Lane 0: self >= rhs, bool-ish output

pub fn cmp_gt(self, rhs: Self) -> Self[src]

Lanewise self > rhs, bool-ish output

pub fn cmp_gt0(self, rhs: Self) -> Self[src]

Lane 0: self > rhs, bool-ish output

pub fn cmp_le(self, rhs: Self) -> Self[src]

Lanewise self <= rhs, bool-ish output

pub fn cmp_le0(self, rhs: Self) -> Self[src]

Lane 0: self <= rhs, bool-ish output

pub fn cmp_lt(self, rhs: Self) -> Self[src]

Lanewise self < rhs, bool-ish output

pub fn cmp_lt0(self, rhs: Self) -> Self[src]

Lane 0: self < rhs, bool-ish output

pub fn cmp_ne(self, rhs: Self) -> Self[src]

Lanewise self != rhs, bool-ish output

pub fn cmp_ne0(self, rhs: Self) -> Self[src]

Lane 0: self != rhs, bool-ish output

pub fn cmp_nge(self, rhs: Self) -> Self[src]

Lanewise !(self >= rhs), bool-ish output

Also, 3rd Impact and all that, of course.

pub fn cmp_nge0(self, rhs: Self) -> Self[src]

Lane 0: !(self >= rhs), bool-ish output

pub fn cmp_ngt(self, rhs: Self) -> Self[src]

Lanewise !(self > rhs), bool-ish output

pub fn cmp_ngt0(self, rhs: Self) -> Self[src]

Lane 0: !(self > rhs), bool-ish output

pub fn cmp_nle(self, rhs: Self) -> Self[src]

Lanewise !(self <= rhs), bool-ish output

pub fn cmp_nle0(self, rhs: Self) -> Self[src]

Lane 0: !(self <= rhs), bool-ish output

pub fn cmp_nlt(self, rhs: Self) -> Self[src]

Lanewise !(self < rhs), bool-ish output

pub fn cmp_nlt0(self, rhs: Self) -> Self[src]

Lane 0: !(self < rhs), bool-ish output

pub fn cmp_ordinary(self, rhs: Self) -> Self[src]

Lanewise self.not_nan() & rhs.not_nan(), bool-ish output

pub fn cmp_ordinary0(self, rhs: Self) -> Self[src]

Lane 0: self.not_nan() & rhs.not_nan(), bool-ish output

pub fn cmp_nan(self, rhs: Self) -> Self[src]

Lanewise self.is_nan() | rhs.is_nan(), bool-ish output

pub fn cmp_nan0(self, rhs: Self) -> Self[src]

Lane 0: self.is_nan() | rhs.is_nan(), bool-ish output

pub fn cmpi_eq0(self, rhs: Self) -> i32[src]

Lane 0: self == rhs, 0 or 1 i32 output.

pub fn cmpi_ge0(self, rhs: Self) -> i32[src]

Lane 0: self >= rhs, 0 or 1 i32 output.

pub fn cmpi_gt0(self, rhs: Self) -> i32[src]

Lane 0: self > rhs, 0 or 1 i32 output.

pub fn cmpi_le0(self, rhs: Self) -> i32[src]

Lane 0: self <= rhs, 0 or 1 i32 output.

pub fn cmpi_lt0(self, rhs: Self) -> i32[src]

Lane 0: self < rhs, 0 or 1 i32 output.

pub fn cmpi_ne0(self, rhs: Self) -> i32[src]

Lane 0: self != rhs, 0 or 1 i32 output.

pub fn round_i32x4(self) -> m128i[src]

Round the lanes to i32 and place as the two lower lanes of an m128i

pub fn round_f32x4(self) -> m128[src]

Round the lanes to f32 and place as the two lower lanes of an m128

pub fn extract0(self) -> f64[src]

Get the lower lane value as f64.

pub fn round_i32_extract0(self) -> i32[src]

Round lower lane to i32 and return it.

pub fn round_i64_extract0(self) -> i64[src]

Round lower lane to i64 and return it.

pub fn replace0_with_i32(self, rhs: i32) -> Self[src]

Replace lane 0 with i32 rounded to f64, lane 1 unaffected.

pub fn replace0_with_i64(self, rhs: i64) -> Self[src]

Replace lane 0 with i64 rounded to f64, lane 1 unaffected.

pub fn replace0_with_f32(self, rhs: m128) -> Self[src]

Replace lane 0 with rhs low f32 rounded to f64, lane 1 unaffected.

pub fn truncate_i32x4(self) -> m128i[src]

Truncate the lanes to i32 and place as the two lower lanes of an m128i

pub fn truncate0_i32(self) -> i32[src]

Truncate lane 0 to i32 and return it.

pub fn truncate0_i64(self) -> i64[src]

Truncate lane 0 to i64 and return it.

pub fn div0(self, rhs: Self) -> Self[src]

Divides the low lane, high lane unaffected.

pub fn load(addr: &Align16<[f64; 2]>) -> Self[src]

Load aligned f64 array data.

Loads the 0th index as the 0th lane, and the 1st index as the 1st lane.

pub fn load_aligned_splat(addr: &Align16<f64>) -> Self[src]

Load the 16-byte aligned f64 address into both lanes.

pub fn load0(addr: &f64) -> Self[src]

Load the f64 addressed into the low lane, high lane 0.0.

pub fn replace_high(self, addr: &f64) -> Self[src]

Replace high lane with the float referenced, low lane unaffected.

pub fn replace_low(self, addr: &f64) -> Self[src]

Replace low lane with the float referenced, high lane unaffected.

pub fn load_reverse(addr: &Align16<[f64; 2]>) -> Self[src]

Load aligned f64 array data in reverse order.

Loads the 0th index as the 1st lane, and the 1st index as the 0th lane.

pub fn load_unaligned(addr: &[f64; 2]) -> Self[src]

Load f64 array data without alignment requirement.

Loads the 0th index as the 1st lane, and the 1st index as the 0th lane.

pub fn max(self, rhs: Self) -> Self[src]

Lanewise maximum.

pub fn max0(self, rhs: Self) -> Self[src]

Lane 0 maximum, other lanes are self.

pub fn min(self, rhs: Self) -> Self[src]

Lanewise minimum.

pub fn min0(self, rhs: Self) -> Self[src]

Lane 0 minimum, other lanes are self.

pub fn copy0(self, rhs: Self) -> Self[src]

Copies lane 0 from rhs, other lane is unchanged.

pub fn move_mask(self) -> i32[src]

Assumes that this is a bool-ish mask and packs it into an i32.

Specifically, the output i32 has bits 0/1 set to be the same as the most significant bit in lanes 0/1 of self.

(Yes, this name is kinda stupid but I couldn't come up with a better thing to rename it to, oh well.)

pub fn mul0(self, rhs: Self) -> Self[src]

Multiplies the low lane, high lane unaffected.

pub fn set(a: f64, b: f64) -> Self[src]

Set two f64 values into an m128d.

Because of how little-endian works, this produces the opposite lane order as you'd get compared to putting the arguments in to an array and then using load on that array. Same with using transmute or similar.

pub fn splat(a: f64) -> Self[src]

Set an f64 as the value in both lanes.

pub fn set0(a: f64) -> Self[src]

Sets the f64 as lane 0, other lane 0.0.

pub fn set_reverse(a: f64, b: f64) -> Self[src]

Set two f64 values into an m128d with reverse ordering.

Because of how little-endian works, this produces the the same lane order as you'd get compared to putting the arguments in to an array and then using load on that array. Same with using transmute or similar.

pub fn sqrt(self) -> Self[src]

Lanewise square root.

pub fn sqrt_other0(self, rhs: Self) -> Self[src]

rhs[0] square root copied over top of self[0], self[1] is unaffected.

pub fn store(self, addr: &mut Align16<[f64; 2]>)[src]

Stores the data here to the aligned address given.

pub fn store0_all(self, addr: &mut Align16<[f64; 2]>)[src]

Stores lane 0 to both slots of the aligned address given.

pub fn store_low(self, addr: &mut f64)[src]

Stores the low lane to the address given.

pub fn store_high(self, addr: &mut f64)[src]

Stores the high lane to the address given.

pub fn store_reverse(self, addr: &mut Align16<[f64; 2]>)[src]

Stores the data here to the aligned address given, reverse order.

pub fn store_unaligned(self, addr: &mut [f64; 2])[src]

Stores the data here to the address given.

pub fn sub0(self, rhs: Self) -> Self[src]

Subtracts the low lane, high lane unaffected.

pub fn unpack_high(self, rhs: Self) -> Self[src]

Gives m128d(self[1], rhs[1])

pub fn unpack_low(self, rhs: Self) -> Self[src]

Gives m128d(self[0], rhs[0])

impl m128d[src]

pub fn add_sub(self, rhs: Self) -> Self[src]

Adds the high lane (1) and subtracts the low lane (0).

out[0]= self[0] - rhs[0]
out[1]= self[1] + rhs[1]

pub fn horizontal_add(self, rhs: Self) -> Self[src]

Horizontal add both self and rhs, then pack together.

out[0]= self[0] + self[1]
out[1]= rhs[0] + rhs[1]

pub fn horizontal_sub(self, rhs: Self) -> Self[src]

Horizontal subtract both self and rhs, then pack together.

out[0]= self[0] - self[1]
out[1]= rhs[0] - rhs[1]

pub fn load_splat(addr: &f64) -> Self[src]

Load the given f64 address, duplicating it into both lanes.

pub fn duplicate_low(self) -> Self[src]

Duplicate the low lane of self into both lanes of a new m128d.

out[0]= self[0]
out[1]= self[0]

Trait Implementations

impl Zeroable for m128d[src]

fn zeroed() -> Self[src]

Calls zeroed. Read more

impl Pod for m128d[src]

impl Display for m128d[src]

fn fmt(&self, f: &mut Formatter) -> Result[src]

Display formats in offset order.

All Formatter information is passed directly to each individual f64 lane being formatted.

impl Debug for m128d[src]

fn fmt(&self, f: &mut Formatter) -> Result[src]

Debug formats in offset order.

All Formatter information is passed directly to each individual f64 lane being formatted.

impl UpperExp for m128d[src]

fn fmt(&self, f: &mut Formatter) -> Result[src]

UpperExp formats in offset order.

All Formatter information is passed directly to each individual f64 lane being formatted.

impl LowerExp for m128d[src]

fn fmt(&self, f: &mut Formatter) -> Result[src]

LowerExp formats in offset order.

All Formatter information is passed directly to each individual f64 lane being formatted.

impl Div<m128d> for m128d[src]

type Output = Self

The resulting type after applying the / operator.

fn div(self, rhs: Self) -> Self[src]

Lanewise division.

impl Add<m128d> for m128d[src]

type Output = Self

The resulting type after applying the + operator.

fn add(self, rhs: Self) -> Self[src]

Lanewise addition.

impl Sub<m128d> for m128d[src]

type Output = Self

The resulting type after applying the - operator.

fn sub(self, rhs: Self) -> Self[src]

Lanewise subtraction.

impl Mul<m128d> for m128d[src]

type Output = Self

The resulting type after applying the * operator.

fn mul(self, rhs: Self) -> Self[src]

Lanewise multiplication.

impl Neg for m128d[src]

type Output = Self

The resulting type after applying the - operator.

fn neg(self) -> Self[src]

Lanewise 0.0 - self

impl AddAssign<m128d> for m128d[src]

fn add_assign(&mut self, rhs: Self)[src]

Lanewise addition.

impl SubAssign<m128d> for m128d[src]

fn sub_assign(&mut self, rhs: Self)[src]

Lanewise subtraction.

impl MulAssign<m128d> for m128d[src]

fn mul_assign(&mut self, rhs: Self)[src]

Lanewise multiplication.

impl DivAssign<m128d> for m128d[src]

fn div_assign(&mut self, rhs: Self)[src]

Lanewise division.

impl Not for m128d[src]

type Output = Self

The resulting type after applying the ! operator.

fn not(self) -> Self[src]

Bitwise negation

impl BitAnd<m128d> for m128d[src]

type Output = Self

The resulting type after applying the & operator.

fn bitand(self, rhs: Self) -> Self[src]

Bitwise AND.

impl BitOr<m128d> for m128d[src]

type Output = Self

The resulting type after applying the | operator.

fn bitor(self, rhs: Self) -> Self[src]

Bitwise OR.

impl BitXor<m128d> for m128d[src]

type Output = Self

The resulting type after applying the ^ operator.

fn bitxor(self, rhs: Self) -> Self[src]

Bitwise XOR.

impl BitAndAssign<m128d> for m128d[src]

fn bitand_assign(&mut self, rhs: Self)[src]

Bitwise AND.

impl BitOrAssign<m128d> for m128d[src]

fn bitor_assign(&mut self, rhs: Self)[src]

Bitwise OR.

impl BitXorAssign<m128d> for m128d[src]

fn bitxor_assign(&mut self, rhs: Self)[src]

Bitwise XOR.

impl Copy for m128d[src]

impl Clone for m128d[src]

fn clone_from(&mut self, source: &Self)1.0.0[src]

Performs copy-assignment from source. Read more

Auto Trait Implementations

impl Unpin for m128d

impl Sync for m128d

impl Send for m128d

Blanket Implementations

impl<T, U> TryFrom<U> for T where
    U: Into<T>, 
[src]

type Error = Infallible

The type returned in the event of a conversion error.

impl<T, U> Into<U> for T where
    U: From<T>, 
[src]

impl<T> From<T> for T[src]

impl<T, U> TryInto<U> for T where
    U: TryFrom<T>, 
[src]

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.

impl<T> Borrow<T> for T where
    T: ?Sized
[src]

impl<T> BorrowMut<T> for T where
    T: ?Sized
[src]

impl<T> Any for T where
    T: 'static + ?Sized
[src]