cmov 0.4.3

Conditional move CPU intrinsics which are guaranteed on major platforms to execute in constant-time and not be rewritten as branches by the compiler. Provides wrappers for the CMOV family of instructions on x86/x86_64 and CSEL on AArch64, along with a portable "best-effort" fallback.
Documentation
#![no_std]
#![doc = include_str!("../README.md")]
#![doc(
    html_logo_url = "https://raw.githubusercontent.com/RustCrypto/meta/master/logo.svg",
    html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/meta/master/logo.svg"
)]
#![warn(
    clippy::cast_possible_truncation,
    clippy::integer_division_remainder_used,
    clippy::mod_module_files,
    missing_docs,
    missing_debug_implementations,
    missing_copy_implementations,
    rust_2018_idioms,
    trivial_casts,
    trivial_numeric_casts,
    unused_qualifications
)]

#[cfg(not(miri))]
#[cfg(target_arch = "aarch64")]
mod aarch64;
#[cfg(any(
    not(any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64")),
    miri
))]
mod portable;
#[cfg(not(miri))]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
mod x86;

/// Condition
pub type Condition = u8;

/// Conditional move
pub trait Cmov {
    /// Move if non-zero.
    ///
    /// Uses a `test` instruction to check if the given `condition` value is
    /// equal to zero, conditionally moves `value` to `self` when `condition` is
    /// not equal to zero.
    fn cmovnz(&mut self, value: &Self, condition: Condition);

    /// Move if zero.
    ///
    /// Uses a `cmp` instruction to check if the given `condition` value is
    /// equal to zero, and if so, conditionally moves `value` to `self`
    /// when `condition` is equal to zero.
    fn cmovz(&mut self, value: &Self, condition: Condition) {
        self.cmovnz(value, !condition)
    }
}

/// Conditional move with equality comparison
pub trait CmovEq {
    /// Move if both inputs are equal.
    ///
    /// Uses a `xor` instruction to compare the two values, and
    /// conditionally moves `input` to `output` when they are equal.
    fn cmoveq(&self, rhs: &Self, input: Condition, output: &mut Condition);

    /// Move if both inputs are not equal.
    ///
    /// Uses a `xor` instruction to compare the two values, and
    /// conditionally moves `input` to `output` when they are not equal.
    fn cmovne(&self, rhs: &Self, input: Condition, output: &mut Condition) {
        let mut tmp = 1u8;
        self.cmoveq(rhs, 0u8, &mut tmp);
        tmp.cmoveq(&1u8, input, output);
    }
}

impl Cmov for u8 {
    #[inline]
    fn cmovnz(&mut self, value: &Self, condition: Condition) {
        let mut tmp = *self as u16;
        tmp.cmovnz(&(*value as u16), condition);
        debug_assert!(tmp <= u8::MAX as u16);
        *self = (tmp & 0xFF) as u8;
    }

    #[inline]
    fn cmovz(&mut self, value: &Self, condition: Condition) {
        let mut tmp = *self as u16;
        tmp.cmovz(&(*value as u16), condition);
        debug_assert!(tmp <= u8::MAX as u16);
        *self = (tmp & 0xFF) as u8;
    }
}

impl CmovEq for u8 {
    #[inline]
    fn cmoveq(&self, rhs: &Self, input: Condition, output: &mut Condition) {
        (*self as u16).cmoveq(&(*rhs as u16), input, output);
    }

    #[inline]
    fn cmovne(&self, rhs: &Self, input: Condition, output: &mut Condition) {
        (*self as u16).cmovne(&(*rhs as u16), input, output);
    }
}

// TODO(tarcieri): address truncation lint
#[allow(clippy::cast_possible_truncation)]
impl Cmov for u128 {
    #[inline]
    fn cmovnz(&mut self, value: &Self, condition: Condition) {
        let mut lo = (*self & u64::MAX as u128) as u64;
        let mut hi = (*self >> 64) as u64;

        lo.cmovnz(&((*value & u64::MAX as u128) as u64), condition);
        hi.cmovnz(&((*value >> 64) as u64), condition);

        *self = (lo as u128) | ((hi as u128) << 64);
    }

    #[inline]
    fn cmovz(&mut self, value: &Self, condition: Condition) {
        let mut lo = (*self & u64::MAX as u128) as u64;
        let mut hi = (*self >> 64) as u64;

        lo.cmovz(&((*value & u64::MAX as u128) as u64), condition);
        hi.cmovz(&((*value >> 64) as u64), condition);

        *self = (lo as u128) | ((hi as u128) << 64);
    }
}

// TODO(tarcieri): address truncation lint
#[allow(clippy::cast_possible_truncation)]
impl CmovEq for u128 {
    #[inline]
    fn cmovne(&self, rhs: &Self, input: Condition, output: &mut Condition) {
        let lo = (*self & u64::MAX as u128) as u64;
        let hi = (*self >> 64) as u64;

        let mut tmp = 1u8;
        lo.cmovne(&((*rhs & u64::MAX as u128) as u64), 0, &mut tmp);
        hi.cmovne(&((*rhs >> 64) as u64), 0, &mut tmp);
        tmp.cmoveq(&0, input, output);
    }

    #[inline]
    fn cmoveq(&self, rhs: &Self, input: Condition, output: &mut Condition) {
        let lo = (*self & u64::MAX as u128) as u64;
        let hi = (*self >> 64) as u64;

        let mut tmp = 1u8;
        lo.cmovne(&((*rhs & u64::MAX as u128) as u64), 0, &mut tmp);
        hi.cmovne(&((*rhs >> 64) as u64), 0, &mut tmp);
        tmp.cmoveq(&1, input, output);
    }
}

// Impl `Cmov*` by first casting to unsigned then using the unsigned `Cmov` impls
// TODO(tarcieri): use `cast_unsigned`/`cast_signed` to get rid of the `=> u*`
macro_rules! impl_cmov_traits_for_signed_ints {
    ( $($int:ty => $uint:ty),+ ) => {
        $(
            impl Cmov for $int {
                #[inline]
                fn cmovnz(&mut self, value: &Self, condition: Condition) {
                    let mut tmp = *self as $uint;
                    tmp.cmovnz(&(*value as $uint), condition);
                    *self = tmp as $int;
                }

                #[inline]
                fn cmovz(&mut self, value: &Self, condition: Condition) {
                    let mut tmp = *self as $uint;
                    tmp.cmovz(&(*value as $uint), condition);
                    *self = tmp as $int;
                }
            }

            impl CmovEq for $int {
                #[inline]
                fn cmoveq(&self, rhs: &Self, input: Condition, output: &mut Condition) {
                    (*self as $uint).cmoveq(&(*rhs as $uint), input, output);
                }

                #[inline]
                fn cmovne(&self, rhs: &Self, input: Condition, output: &mut Condition) {
                    (*self as $uint).cmovne(&(*rhs as $uint), input, output);
                }
            }
        )+
    };
}

impl_cmov_traits_for_signed_ints!(i8 => u8, i16 => u16, i32 => u32, i64 => u64, i128 => u128);

impl<T: CmovEq> CmovEq for [T] {
    fn cmoveq(&self, rhs: &Self, input: Condition, output: &mut Condition) {
        let mut tmp = 1u8;
        self.cmovne(rhs, 0u8, &mut tmp);
        tmp.cmoveq(&1, input, output);
    }

    fn cmovne(&self, rhs: &Self, input: Condition, output: &mut Condition) {
        // Short-circuit the comparison if the slices are of different lengths, and set the output
        // condition to the input condition.
        if self.len() != rhs.len() {
            *output = input;
            return;
        }

        // Compare each byte.
        for (a, b) in self.iter().zip(rhs.iter()) {
            a.cmovne(b, input, output);
        }
    }
}