[][src]Struct lokacore::arch::x86_64::m128i

#[repr(transparent)]
pub struct m128i(pub __m128i);

A 128-bit SIMD value. Integral data, lanes determined by each op.

  • This documentation numbers the lanes based on the index you'd need to use to access that lane if the value were cast to an array.
  • This is also the way that the type is printed out using Debug, Display, LowerExp, and UpperExp.
  • This is not necessarily the ordering you'll see if you look an xmm register in a debugger! Basically because of how little-endian works.
  • Most operations work per-lane, "lanewise".
  • Some operations work using lane 0 only. When appropriate, these have the same name as the lanewise version but with a 0 on the end. Eg: cmp_eq and cmp_eq0. The other lanes are simply copied forward from self.
  • Comparisons give "bool-ish" output, where all bits 1 in a lane is true, and all bits 0 in a lane is false. Unfortunately, all bits 1 with an f32 is one of the NaN values, and NaN != NaN, so it can be a little tricky to work with until you're used to it.

Methods

impl m128i[src]

pub fn add_i8(self, rhs: Self) -> Self[src]

Lanewise i8 wrapping addition

pub fn add_i16(self, rhs: Self) -> Self[src]

Lanewise i16 wrapping addition

pub fn add_i32(self, rhs: Self) -> Self[src]

Lanewise i32 wrapping addition

pub fn add_i64(self, rhs: Self) -> Self[src]

Lanewise i64 wrapping addition

pub fn saturating_add_i8(self, rhs: Self) -> Self[src]

Lanewise i8 saturating addition

pub fn saturating_add_i16(self, rhs: Self) -> Self[src]

Lanewise i16 saturating addition

pub fn saturating_add_u8(self, rhs: Self) -> Self[src]

Lanewise u8 saturating addition

pub fn saturating_add_u16(self, rhs: Self) -> Self[src]

Lanewise u16 saturating addition

pub fn andnot(self, rhs: Self) -> Self[src]

Bitwise (!self) & rhs

pub fn average_u8(self, rhs: Self) -> Self[src]

Lanewise u8 average: (a + b + 1) >> 1

pub fn average_u16(self, rhs: Self) -> Self[src]

Lanewise u16 average: (a + b + 1) >> 1

pub fn cast_m128(self) -> m128[src]

Cast the bits of this m128i directly to m128 without modification.

pub fn cmp_eq_i8(self, rhs: Self) -> Self[src]

Lanewise i8 equality: bool-ish output

pub fn cmp_eq_i16(self, rhs: Self) -> Self[src]

Lanewise i16 equality: bool-ish output

pub fn cmp_eq_i32(self, rhs: Self) -> Self[src]

Lanewise i32 equality: bool-ish output

pub fn cmp_gt_i8(self, rhs: Self) -> Self[src]

Lanewise i8 greater than: bool-ish output

pub fn cmp_gt_i16(self, rhs: Self) -> Self[src]

Lanewise i16 greater than: bool-ish output

pub fn cmp_gt_i32(self, rhs: Self) -> Self[src]

Lanewise i32 greater than: bool-ish output

pub fn cmp_lt_i8(self, rhs: Self) -> Self[src]

Lanewise i8 greater than: bool-ish output

pub fn cmp_lt_i16(self, rhs: Self) -> Self[src]

Lanewise i16 greater than: bool-ish output

pub fn cmp_lt_i32(self, rhs: Self) -> Self[src]

Lanewise i32 greater than: bool-ish output

pub fn round_low_f64(self) -> m128d[src]

Rounds the lower two i32 lanes to f64 lanes.

pub fn round_f32(self) -> m128[src]

Rounds the i32 lanes to f32 lanes.

pub fn extract0_i32(self) -> i32[src]

Gets out the lowest i32 lane.

pub fn extract0_i64(self) -> i64[src]

Gets out the lowest i64 lane.

pub fn set0_i32(val: i32) -> Self[src]

Places the i32 in the low lane and zeroes other lanes.

pub fn set0_i64(val: i64) -> Self[src]

Places the i64 in the low lane and zeroes the other lane.

pub fn load(addr: &Align16<i128>) -> Self[src]

Loads the aligned i128 address specified.

pub fn load0_i64(addr: &Align16<i64>) -> Self[src]

Loads the aligned i64 address specified into the low lane.

pub fn load_unaligned(addr: &[u8; 16]) -> Self[src]

Loads the i128 address specified, no alignment requirements.

pub fn mul_i16_hadd(self, rhs: Self) -> Self[src]

Lanewise i16 multiply then horizontal add into four lanes.

The eight i16 multiplies produce eight intermediate i32 values, which then get horizontal added into four i32 values.

pub fn max_u8(self, rhs: Self) -> Self[src]

Lanewise u8 maximum

pub fn min_u8(self, rhs: Self) -> Self[src]

Lanewise u8 minimum

pub fn max_i16(self, rhs: Self) -> Self[src]

Lanewise i16 maximum

pub fn min_i16(self, rhs: Self) -> Self[src]

Lanewise i16 minimum

pub fn copy0_i64(self) -> Self[src]

Copies the low lane i64 into a new value, upper lane is 0.

pub fn move_mask_i8(self) -> i32[src]

Crates a move mask from the i8 lanes.

pub fn mul_high_i16(self, rhs: Self) -> Self[src]

Lanewise i16 multiplication, keep high bits.

pub fn mul_low_i16(self, rhs: Self) -> Self[src]

Lanewise i16 multiplication, keep low bits.

pub fn mul_high_u16(self, rhs: Self) -> Self[src]

Lanewise u16 multiplication, keep high bits.

pub fn half_mul_u32(self, rhs: Self) -> Self[src]

Lower half of each i64 lane is u32 multiplied into u64 lanes.

pub fn pack_i16_saturating_i8(self, rhs: Self) -> Self[src]

Pack self then rhs i16 values into saturated i8s

pub fn pack_i32_saturating_i16(self, rhs: Self) -> Self[src]

Pack self then rhs i32 values into saturated i16s

pub fn pack_i16_saturating_u8(self, rhs: Self) -> Self[src]

Pack self then rhs i16 values into saturated u8s

pub fn signed_abs_diff_i8(self, rhs: Self) -> Self[src]

Sum of absolute i8 differences, eight at a time into two i64 lanes.

pub fn set_i8(
    a: i8,
    b: i8,
    c: i8,
    d: i8,
    e: i8,
    f: i8,
    g: i8,
    h: i8,
    i: i8,
    j: i8,
    k: i8,
    l: i8,
    m: i8,
    n: i8,
    o: i8,
    p: i8
) -> Self
[src]

Sets the i8 values together in standard order.

pub fn set_reverse_i8(
    a: i8,
    b: i8,
    c: i8,
    d: i8,
    e: i8,
    f: i8,
    g: i8,
    h: i8,
    i: i8,
    j: i8,
    k: i8,
    l: i8,
    m: i8,
    n: i8,
    o: i8,
    p: i8
) -> Self
[src]

Sets the i8 values together in reverse order.

pub fn set_i16(
    a: i16,
    b: i16,
    c: i16,
    d: i16,
    e: i16,
    f: i16,
    g: i16,
    h: i16
) -> Self
[src]

Sets the i16 values together in standard order.

pub fn set_reverse_i16(
    a: i16,
    b: i16,
    c: i16,
    d: i16,
    e: i16,
    f: i16,
    g: i16,
    h: i16
) -> Self
[src]

Sets the i16 values together in reverse order.

pub fn set_i32(a: i32, b: i32, c: i32, d: i32) -> Self[src]

Sets the i32 values together in standard order.

pub fn set_reverse_i32(a: i32, b: i32, c: i32, d: i32) -> Self[src]

Sets the i32 values together in reverse order.

pub fn set_i64(a: i64, b: i64) -> Self[src]

Sets the i64 values together in standard order.

pub fn splat_i8(a: i8) -> Self[src]

Splats the i8 value across all lanes.

pub fn splat_i16(a: i16) -> Self[src]

Splats the i16 value across all lanes.

pub fn splat_i32(a: i32) -> Self[src]

Splats the i32 value across all lanes.

pub fn splat_i64(a: i64) -> Self[src]

Splats the i64 value across all lanes.

pub fn shift_left_i16(self, rhs: Self) -> Self[src]

Lanewise i16 left shift using rhs as a i128: self << rhs

pub fn shift_left_i32(self, rhs: Self) -> Self[src]

Lanewise i32 left shift using rhs as a i128: self << rhs

pub fn shift_left_i64(self, rhs: Self) -> Self[src]

Lanewise i64 left shift using rhs as a i128: self << rhs

pub fn shift_right_sign_i16(self, rhs: Self) -> Self[src]

Lanewise i16 right shift using rhs as a i128: self >> rhs

Sign bit is preserved when shifting.

pub fn shift_right_sign_i32(self, rhs: Self) -> Self[src]

Lanewise i32 right shift using rhs as a i128: self >> rhs

Sign bit is preserved when shifting.

pub fn shift_right_zero_i16(self, rhs: Self) -> Self[src]

Lanewise i16 right shift using rhs as a i128: self >> rhs

Zeroes are shifted in regardless of the sign bit.

pub fn shift_right_zero_i32(self, rhs: Self) -> Self[src]

Lanewise i32 right shift using rhs as a i128: self >> rhs

Zeroes are shifted in regardless of the sign bit.

pub fn shift_right_zero_i64(self, rhs: Self) -> Self[src]

Lanewise i64 right shift using rhs as a i128: self >> rhs

Zeroes are shifted in regardless of the sign bit.

pub fn store(self, addr: &mut Align16<i128>)[src]

Stores the data to the aligned address given.

pub fn store0_i64(self, addr: &mut Align16<i64>)[src]

Stores the lower i64 lane to the aligned address given.

pub fn store_unaligned(self, addr: &mut [u8; 16])[src]

Stores the data to the address given, no alignment requirements.

pub fn sub_i8(self, rhs: Self) -> Self[src]

Lanewise i8 wrapping subtraction

pub fn sub_i16(self, rhs: Self) -> Self[src]

Lanewise i16 wrapping subtraction

pub fn sub_i32(self, rhs: Self) -> Self[src]

Lanewise i32 wrapping subtraction

pub fn sub_i64(self, rhs: Self) -> Self[src]

Lanewise i64 wrapping subtraction

pub fn saturating_sub_i8(self, rhs: Self) -> Self[src]

Lanewise i8 saturating subtraction

pub fn saturating_sub_i16(self, rhs: Self) -> Self[src]

Lanewise i16 saturating subtraction

pub fn saturating_sub_u8(self, rhs: Self) -> Self[src]

Lanewise u8 saturating subtraction

pub fn saturating_sub_u16(self, rhs: Self) -> Self[src]

Lanewise u16 saturating subtraction

pub fn unpack_high_i8(self, rhs: Self) -> Self[src]

Unpack i8 values from the high half of self and rhs

pub fn unpack_high_i16(self, rhs: Self) -> Self[src]

Unpack i16 values from the high half of self and rhs

pub fn unpack_high_i32(self, rhs: Self) -> Self[src]

Unpack i32 values from the high half of self and rhs

pub fn unpack_high_i64(self, rhs: Self) -> Self[src]

Unpack i64 values from the high half of self and rhs

pub fn unpack_low_i8(self, rhs: Self) -> Self[src]

Unpack i8 values from the low half of self and rhs

pub fn unpack_low_i16(self, rhs: Self) -> Self[src]

Unpack i16 values from the low half of self and rhs

pub fn unpack_low_i32(self, rhs: Self) -> Self[src]

Unpack i32 values from the low half of self and rhs

pub fn unpack_low_i64(self, rhs: Self) -> Self[src]

Unpack i64 values from the low half of self and rhs

impl m128i[src]

pub fn load_quick_unaligned(addr: *const i128) -> Self[src]

Loads 128-bits of integer data without alignment requirements.

This can perform faster than m128i::load_unaligned if the data would cross a cache line boundary.

impl m128i[src]

pub fn abs_i8(self) -> Self[src]

Lanewise i8 wrapping absolute value.

pub fn abs_i16(self) -> Self[src]

Lanewise i16 wrapping absolute value.

pub fn abs_i32(self) -> Self[src]

Lanewise i32 wrapping absolute value.

pub fn horizontal_add_i16(self, rhs: Self) -> Self[src]

Horizontal add i16 pairs in self and rhs.

out[0]= self[1] + self[0]
out[1]= self[3] + self[2]
out[2]= self[5] + self[4]
out[3]= self[7] + self[6]
out[4]= rhs[1] + rhs[0]
out[5]= rhs[3] + rhs[2]
out[6]= rhs[5] + rhs[4]
out[7]= rhs[7] + rhs[6]

pub fn horizontal_saturating_add_i16(self, rhs: Self) -> Self[src]

Horizontal saturating add i16 pairs in self and rhs.

out[0]= self[1].saturating_add(self[0])
out[1]= self[3].saturating_add(self[2])
out[2]= self[5].saturating_add(self[4])
out[3]= self[7].saturating_add(self[6])
out[4]= rhs[1].saturating_add(rhs[0])
out[5]= rhs[3].saturating_add(rhs[2])
out[6]= rhs[5].saturating_add(rhs[4])
out[7]= rhs[7].saturating_add(rhs[6])

pub fn horizontal_add_i32(self, rhs: Self) -> Self[src]

Horizontal add i32 pairs in self and rhs.

out[0]= self[1] + self[0]
out[1]= self[3] + self[2]
out[2]= rhs[5] + rhs[4]
out[3]= rhs[7] + rhs[6]

pub fn horizontal_sub_i16(self, rhs: Self) -> Self[src]

Horizontal subtract i16 pairs in self and rhs.

out[0]= self[0] - self[1]
out[1]= self[2] - self[3]
out[2]= self[4] - self[5]
out[3]= self[6] - self[7]
out[4]= rhs[0] - rhs[1]
out[5]= rhs[2] - rhs[3]
out[6]= rhs[4] - rhs[5]
out[7]= rhs[6] - rhs[7]

pub fn horizontal_saturating_sub_i16(self, rhs: Self) -> Self[src]

Horizontal saturating subtract i16 pairs in self and rhs.

out[0]= self[1].saturating_sub(self[0])
out[1]= self[3].saturating_sub(self[2])
out[2]= self[5].saturating_sub(self[4])
out[3]= self[7].saturating_sub(self[6])
out[4]= rhs[1].saturating_sub(rhs[0])
out[5]= rhs[3].saturating_sub(rhs[2])
out[6]= rhs[5].saturating_sub(rhs[4])
out[7]= rhs[7].saturating_sub(rhs[6])

pub fn horizontal_sub_i32(self, rhs: Self) -> Self[src]

Horizontal sub i32 pairs in self and rhs.

out[0]= self[1] - self[0]
out[1]= self[3] - self[2]
out[2]= rhs[5] - rhs[4]
out[3]= rhs[7] - rhs[6]

pub fn mul_hadd_u8_to_i16(self, rhs: Self) -> Self[src]

Multiply u8 values in self and rhs into i16 intermediates, then horizontal add pack into the output.

out_i16[0]= self_u8[0]*rhs_u8[0] + self_u8[1]*rhs_u8[1]
out_i16[1]= self_u8[2]*rhs_u8[2] + self_u8[3]*rhs_u8[3]
out_i16[2]= self_u8[4]*rhs_u8[4] + self_u8[5]*rhs_u8[5]
out_i16[3]= self_u8[6]*rhs_u8[6] + self_u8[7]*rhs_u8[7]
out_i16[4]= self_u8[8]*rhs_u8[8] + self_u8[9]*rhs_u8[9]
out_i16[5]= self_u8[10]*rhs_u8[10] + self_u8[11]*rhs_u8[11]
out_i16[6]= self_u8[12]*rhs_u8[12] + self_u8[13]*rhs_u8[13]
out_i16[7]= self_u8[14]*rhs_u8[14] + self_u8[15]*rhs_u8[15]

pub fn mul_higher_ish_i16(self, rhs: Self) -> Self[src]

Multiply i16 values in self and rhs into i32, keep high 18 bits, add 1, and then keep the middle 16 bits.

for i in 0..16 {
  out[i] = (((self[i] * rhs[i]) >> 14) + 1) >> 1
}

pub fn shuffle_i8(self, rhs: Self) -> Self[src]

Shuffle i8 values in self according to control mask in rhs.

for i in 0..16 {
  out[i] = if rhs[i] < 0 {
    0
  } else {
    self[rhs[i] & 0b1111]
  };
}

pub fn sign_i8(self, rhs: Self) -> Self[src]

i8: negate, zero, or no-change each lane of self based on rhs.

for i in 0..16 {
  out[i] = match sign(rhs[i]) {
    Positive => self[i],
    Zero => 0,
    Negative => -self[i],
  };
}

pub fn sign_i16(self, rhs: Self) -> Self[src]

i16: negate, zero, or no-change each lane of self based on rhs.

for i in 0..8 {
  out[i] = match sign(rhs[i]) {
    Positive => self[i],
    Zero => 0,
    Negative => -self[i],
  };
}

pub fn sign_i32(self, rhs: Self) -> Self[src]

i32: negate, zero, or no-change each lane of self based on rhs.

for i in 0..4 {
  out[i] = match sign(rhs[i]) {
    Positive => self[i],
    Zero => 0,
    Negative => -self[i],
  };
}

impl m128i[src]

pub fn blend_var_i8(self, rhs: Self, mask: Self) -> Self[src]

i8: blend values in self and rhs using a variable mask.

for i in 0..16 {
  out[i] = if mask[i] < 0 {
    rhs[i]
  } else {
    self[i]
  };
}

pub fn cmp_eq_i64(self, rhs: Self) -> Self[src]

Lanewise i64 equality comparison, bool-ish output.

pub fn sign_extend_i16_i32(self) -> Self[src]

Sign extend the lower four i16 values into i32.

pub fn sign_extend_i16_i64(self) -> Self[src]

Sign extend the lower two i16 values into i64.

pub fn sign_extend_i32_i64(self) -> Self[src]

Sign extend the lower two i32 values into i64.

pub fn sign_extend_i8_i16(self) -> Self[src]

Sign extend the lower eight i8 values into i16.

pub fn sign_extend_i8_i32(self) -> Self[src]

Sign extend the lower four i8 values into i32.

pub fn sign_extend_i8_i64(self) -> Self[src]

Sign extend the lower two i8 values into i64.

pub fn zero_extend_u16_i32(self) -> Self[src]

Zero extend the lower four u16 values into i32.

pub fn zero_extend_u16_i64(self) -> Self[src]

Zero extend the lower two u16 values into i64.

pub fn zero_extend_u32_i64(self) -> Self[src]

Zero extend the lower two u32 values into i64.

pub fn zero_extend_u8_i16(self) -> Self[src]

Zero extend the lower eight u8 values into i16.

pub fn zero_extend_u8_i32(self) -> Self[src]

Zero extend the lower four u8 values into i32.

pub fn zero_extend_u8_i64(self) -> Self[src]

Zero extend the lower two u8 values into i64.

pub fn max_i32(self, rhs: Self) -> Self[src]

Lanewise i32 maximum between self and rhs

pub fn max_i8(self, rhs: Self) -> Self[src]

Lanewise i8 maximum between self and rhs

pub fn max_u16(self, rhs: Self) -> Self[src]

Lanewise u16 maximum between self and rhs

pub fn max_u32(self, rhs: Self) -> Self[src]

Lanewise u32 maximum between self and rhs

pub fn min_i32(self, rhs: Self) -> Self[src]

Lanewise i32 minimum between self and rhs

pub fn min_i8(self, rhs: Self) -> Self[src]

Lanewise i8 minimum between self and rhs

pub fn min_u16(self, rhs: Self) -> Self[src]

Lanewise u16 minimum between self and rhs

pub fn min_u32(self, rhs: Self) -> Self[src]

Lanewise u32 minimum between self and rhs

pub fn min_and_position_u16(self) -> Self[src]

Minimum u16 and its position.

out_u16[0] = minimum lane value of self
out_u16[1] = previous index of selected value
the rest = zeroed

pub fn widen_mul_i32_i64(self, rhs: Self) -> Self[src]

Multiply the even i32 lanes and produce i64 outputs

out_i64[0] = self_i32[0] * rhs_i32[0]
out_i64[1] = self_i32[2] * rhs_i32[2]

pub fn mul_i32(self, rhs: Self) -> Self[src]

Lanewise i32 multiply, keeping the low 32 bits of each result.

pub fn pack_u16(self, rhs: Self) -> Self[src]

Pack self then rhs i32 lanes into u16 lanes in the output.

pub fn test_all_bits_one(self) -> i32[src]

Sets CF to be !self & All_1s, then returns CF.

pub fn test_cf(self, rhs: Self) -> i32[src]

Sets ZF and CF as below, returns CF

ZF = if self & rhs == 0 { 1 } else { 0 };
CF = if (!self) & rhs == 0 { 1 } else { 0 };
return CF;

pub fn test_zf(self, rhs: Self) -> i32[src]

Sets ZF and CF as below, returns ZF.

ZF = if self & rhs == 0 { 1 } else { 0 };
CF = if (!self) & rhs == 0 { 1 } else { 0 };
return ZF;

pub fn test_not_zf_cf(self, rhs: Self) -> i32[src]

Sets ZF and CF as below, returns if both are 0.

ZF = if self & rhs == 0 { 1 } else { 0 };
CF = if (!self) & rhs == 0 { 1 } else { 0 };
return if ZF == 0 && CF == 0 { 1 } else { 0 };

impl m128i[src]

pub fn cmp_gt_i64(self, rhs: Self) -> Self[src]

Lanewise i64 greater than, bool-ish output.

Trait Implementations

impl Not for m128i[src]

type Output = Self

The resulting type after applying the ! operator.

fn not(self) -> Self[src]

Bitwise negation

impl BitAnd<m128i> for m128i[src]

type Output = Self

The resulting type after applying the & operator.

fn bitand(self, rhs: Self) -> Self[src]

Bitwise AND.

impl BitOr<m128i> for m128i[src]

type Output = Self

The resulting type after applying the | operator.

fn bitor(self, rhs: Self) -> Self[src]

Bitwise OR.

impl BitXor<m128i> for m128i[src]

type Output = Self

The resulting type after applying the ^ operator.

fn bitxor(self, rhs: Self) -> Self[src]

Bitwise XOR.

impl BitAndAssign<m128i> for m128i[src]

fn bitand_assign(&mut self, rhs: Self)[src]

Bitwise AND.

impl BitOrAssign<m128i> for m128i[src]

fn bitor_assign(&mut self, rhs: Self)[src]

Bitwise OR.

impl BitXorAssign<m128i> for m128i[src]

fn bitxor_assign(&mut self, rhs: Self)[src]

Bitwise XOR.

impl Debug for m128i[src]

fn fmt(&self, f: &mut Formatter) -> Result[src]

Debug formats in offset order.

  • Use width to specify the lane count you want (default 1).
  • Use alternate format specifier to give uX instead of iX output.

Eg, for 4 lanes of u32:

format!("{:#4?}", m)

impl Display for m128i[src]

fn fmt(&self, f: &mut Formatter) -> Result[src]

Display formats in offset order.

  • Use width to specify the lane count you want (default 1).
  • Use alternate format specifier to give uX instead of iX output.

Eg, for 4 lanes of u32:

format!("{:#4?}", m)

impl Octal for m128i[src]

fn fmt(&self, f: &mut Formatter) -> Result[src]

Octal formats in offset order.

  • Use width to specify the lane count you want (default 1).
  • Use alternate format specifier to give leading 0o.

Eg, for 4 lanes and leading 0o:

format!("{:#4o}", m)

impl Binary for m128i[src]

fn fmt(&self, f: &mut Formatter) -> Result[src]

Binary formats in offset order.

  • Use width to specify the lane count you want (default 1).
  • Use alternate format specifier to give leading 0b.

Eg, for 4 lanes and leading 0b:

format!("{:#4b}", m)

impl LowerHex for m128i[src]

fn fmt(&self, f: &mut Formatter) -> Result[src]

LowerHex formats in offset order.

  • Use width to specify the lane count you want (default 1).
  • Use alternate format specifier to give leading 0x.

Eg, for 4 lanes and leading 0x:

format!("{:#4x}", m)

impl UpperHex for m128i[src]

fn fmt(&self, f: &mut Formatter) -> Result[src]

UpperHex formats in offset order.

  • Use width to specify the lane count you want (default 1).
  • Use alternate format specifier to give leading 0x.

Eg, for 4 lanes and leading 0x:

format!("{:#4X}", m)

impl Copy for m128i[src]

impl Clone for m128i[src]

impl Default for m128i[src]

impl Zeroable for m128i[src]

impl Pod for m128i[src]

Auto Trait Implementations

impl Unpin for m128i

impl Send for m128i

impl Sync for m128i

Blanket Implementations

impl<T> From<T> for T[src]

impl<T, U> TryFrom<U> for T where
    U: Into<T>, 
[src]

type Error = Infallible

The type returned in the event of a conversion error.

impl<T, U> Into<U> for T where
    U: From<T>, 
[src]

impl<T, U> TryInto<U> for T where
    U: TryFrom<T>, 
[src]

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.

impl<T> Borrow<T> for T where
    T: ?Sized
[src]

impl<T> BorrowMut<T> for T where
    T: ?Sized
[src]

impl<T> Any for T where
    T: 'static + ?Sized
[src]