use core::arch::asm;
#[cfg(target_arch = "aarch64")]
use core::arch::aarch64::*;
use crate::with_dit;
#[must_use]
#[inline(always)]
fn vceqq_u8_hide(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
let mut c;
#[cfg(target_arch = "aarch64")]
unsafe {
asm!("cmeq {c:v}.16b, {a:v}.16b, {b:v}.16b",
c = lateout(vreg) c,
a = in(vreg) a,
b = in(vreg) b,
options(pure, nomem, preserves_flags, nostack));
}
c
}
#[must_use]
#[inline(always)]
fn vandq_u8_hide(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
let mut c;
#[cfg(target_arch = "aarch64")]
unsafe {
asm!("and {c:v}.16b, {a:v}.16b, {b:v}.16b",
c = lateout(vreg) c,
a = in(vreg) a,
b = in(vreg) b,
options(pure, nomem, preserves_flags, nostack));
}
c
}
#[must_use]
#[inline(always)]
fn vshrn_n_u16_4_hide(a: uint16x8_t) -> uint8x8_t {
let mut mask;
#[cfg(target_arch = "aarch64")]
unsafe {
asm!("shrn {mask:v}.8b, {a:v}.8h, #{n}",
mask = lateout(vreg) mask,
a = in(vreg) a,
n = const 4,
options(pure, nomem, preserves_flags, nostack));
}
mask
}
#[must_use]
#[inline(always)]
fn get_mask_u64(mask: uint8x16_t) -> u64 {
unsafe {
let mask = vshrn_n_u16_4_hide(vreinterpretq_u16_u8(mask));
vget_lane_u64(vreinterpret_u64_u8(mask), 0)
}
}
#[must_use]
#[inline(always)]
unsafe fn constant_time_eq_neon(mut a: *const u8, mut b: *const u8, mut n: usize) -> bool {
const LANES: usize = 16;
let tmp = if n >= LANES * 2 {
let mut mask0;
let mut mask1;
unsafe {
let tmpa = vld1q_u8_x2(a);
let tmpb = vld1q_u8_x2(b);
a = a.add(LANES * 2);
b = b.add(LANES * 2);
n -= LANES * 2;
mask0 = vceqq_u8_hide(tmpa.0, tmpb.0);
mask1 = vceqq_u8_hide(tmpa.1, tmpb.1);
}
while n >= LANES * 2 {
unsafe {
let tmpa = vld1q_u8_x2(a);
let tmpb = vld1q_u8_x2(b);
a = a.add(LANES * 2);
b = b.add(LANES * 2);
n -= LANES * 2;
let tmp0 = vceqq_u8_hide(tmpa.0, tmpb.0);
let tmp1 = vceqq_u8_hide(tmpa.1, tmpb.1);
mask0 = vandq_u8_hide(mask0, tmp0);
mask1 = vandq_u8_hide(mask1, tmp1);
}
}
if n >= LANES {
unsafe {
let tmpa = vld1q_u8(a);
let tmpb = vld1q_u8(b);
a = a.add(LANES);
b = b.add(LANES);
n -= LANES;
let tmp = vceqq_u8_hide(tmpa, tmpb);
mask0 = vandq_u8_hide(mask0, tmp);
}
}
let mask = vandq_u8_hide(mask0, mask1);
get_mask_u64(mask) ^ !0
} else if n >= LANES {
let mask = unsafe {
let tmpa = vld1q_u8(a);
let tmpb = vld1q_u8(b);
a = a.add(LANES);
b = b.add(LANES);
n -= LANES;
vceqq_u8_hide(tmpa, tmpb)
};
get_mask_u64(mask) ^ !0
} else {
0
};
unsafe { crate::generic::constant_time_eq_impl(a, b, n, tmp) }
}
#[must_use]
pub fn constant_time_eq(a: &[u8], b: &[u8]) -> bool {
with_dit(|| {
a.len() == b.len() && unsafe { constant_time_eq_neon(a.as_ptr(), b.as_ptr(), a.len()) }
})
}
#[must_use]
pub fn constant_time_eq_n<const N: usize>(a: &[u8; N], b: &[u8; N]) -> bool {
with_dit(|| {
unsafe { constant_time_eq_neon(a.as_ptr(), b.as_ptr(), N) }
})
}