#[cfg(target_arch = "x86")]
use core::arch::x86::*;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64::*;
use byteorder::{ByteOrder, LittleEndian};
use core::mem;
use Block;
use StateWords;
use IV;
use SIGMA;
#[inline(always)]
unsafe fn load_256_unaligned(mem_addr: &[u64; 4]) -> __m256i {
_mm256_loadu_si256(mem_addr.as_ptr() as *const __m256i)
}
#[inline(always)]
unsafe fn store_256_unaligned(mem_addr: &mut [u64; 4], a: __m256i) {
_mm256_storeu_si256(mem_addr.as_mut_ptr() as *mut __m256i, a);
}
#[inline(always)]
unsafe fn load_128_unaligned(mem_addr: &[u8; 16]) -> __m128i {
_mm_loadu_si128(mem_addr.as_ptr() as *const __m128i)
}
#[inline(always)]
unsafe fn add(a: __m256i, b: __m256i) -> __m256i {
_mm256_add_epi64(a, b)
}
#[inline(always)]
unsafe fn xor(a: __m256i, b: __m256i) -> __m256i {
_mm256_xor_si256(a, b)
}
macro_rules! _MM_SHUFFLE {
($z:expr, $y:expr, $x:expr, $w:expr) => {
($z << 6) | ($y << 4) | ($x << 2) | $w
};
}
#[inline(always)]
unsafe fn rot32(x: __m256i) -> __m256i {
_mm256_shuffle_epi32(x, _MM_SHUFFLE!(2, 3, 0, 1))
}
#[inline(always)]
unsafe fn rot24(x: __m256i) -> __m256i {
let rotate24 = _mm256_setr_epi8(
3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10, 3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13,
14, 15, 8, 9, 10,
);
_mm256_shuffle_epi8(x, rotate24)
}
#[inline(always)]
unsafe fn rot16(x: __m256i) -> __m256i {
let rotate16 = _mm256_setr_epi8(
2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9, 2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12,
13, 14, 15, 8, 9,
);
_mm256_shuffle_epi8(x, rotate16)
}
#[inline(always)]
unsafe fn rot63(x: __m256i) -> __m256i {
_mm256_or_si256(_mm256_srli_epi64(x, 63), add(x, x))
}
#[inline(always)]
unsafe fn blake2b_g1_v1(
a: &mut __m256i,
b: &mut __m256i,
c: &mut __m256i,
d: &mut __m256i,
m: &mut __m256i,
) {
*a = add(*a, *m);
*a = add(*a, *b);
*d = xor(*d, *a);
*d = rot32(*d);
*c = add(*c, *d);
*b = xor(*b, *c);
*b = rot24(*b);
}
#[inline(always)]
unsafe fn blake2b_g2_v1(
a: &mut __m256i,
b: &mut __m256i,
c: &mut __m256i,
d: &mut __m256i,
m: &mut __m256i,
) {
*a = add(*a, *m);
*a = add(*a, *b);
*d = xor(*d, *a);
*d = rot16(*d);
*c = add(*c, *d);
*b = xor(*b, *c);
*b = rot63(*b);
}
#[inline(always)]
unsafe fn blake2b_diag_v1(_a: &mut __m256i, b: &mut __m256i, c: &mut __m256i, d: &mut __m256i) {
*d = _mm256_permute4x64_epi64(*d, _MM_SHUFFLE!(2, 1, 0, 3));
*c = _mm256_permute4x64_epi64(*c, _MM_SHUFFLE!(1, 0, 3, 2));
*b = _mm256_permute4x64_epi64(*b, _MM_SHUFFLE!(0, 3, 2, 1));
}
#[inline(always)]
unsafe fn blake2b_undiag_v1(_a: &mut __m256i, b: &mut __m256i, c: &mut __m256i, d: &mut __m256i) {
*d = _mm256_permute4x64_epi64(*d, _MM_SHUFFLE!(0, 3, 2, 1));
*c = _mm256_permute4x64_epi64(*c, _MM_SHUFFLE!(1, 0, 3, 2));
*b = _mm256_permute4x64_epi64(*b, _MM_SHUFFLE!(2, 1, 0, 3));
}
#[target_feature(enable = "avx2")]
pub unsafe fn compress(
h: &mut StateWords,
msg: &Block,
count: u128,
lastblock: u64,
lastnode: u64,
) {
let (h_low, h_high) = mut_array_refs!(h, 4, 4);
let (iv_low, iv_high) = array_refs!(&IV, 4, 4);
let count_low = count as i64;
let count_high = (count >> 64) as i64;
let msg_chunks = array_refs!(msg, 16, 16, 16, 16, 16, 16, 16, 16);
let mut a = load_256_unaligned(h_low);
let mut b = load_256_unaligned(h_high);
let mut c = load_256_unaligned(iv_low);
let flags = _mm256_set_epi64x(lastnode as i64, lastblock as i64, count_high, count_low);
let mut d = xor(load_256_unaligned(iv_high), flags);
let m0 = _mm256_broadcastsi128_si256(load_128_unaligned(msg_chunks.0));
let m1 = _mm256_broadcastsi128_si256(load_128_unaligned(msg_chunks.1));
let m2 = _mm256_broadcastsi128_si256(load_128_unaligned(msg_chunks.2));
let m3 = _mm256_broadcastsi128_si256(load_128_unaligned(msg_chunks.3));
let m4 = _mm256_broadcastsi128_si256(load_128_unaligned(msg_chunks.4));
let m5 = _mm256_broadcastsi128_si256(load_128_unaligned(msg_chunks.5));
let m6 = _mm256_broadcastsi128_si256(load_128_unaligned(msg_chunks.6));
let m7 = _mm256_broadcastsi128_si256(load_128_unaligned(msg_chunks.7));
let iv0 = a;
let iv1 = b;
let mut t0;
let mut t1;
let mut b0;
t0 = _mm256_unpacklo_epi64(m0, m1);
t1 = _mm256_unpacklo_epi64(m2, m3);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g1_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
t0 = _mm256_unpackhi_epi64(m0, m1);
t1 = _mm256_unpackhi_epi64(m2, m3);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g2_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
blake2b_diag_v1(&mut a, &mut b, &mut c, &mut d);
t0 = _mm256_unpacklo_epi64(m4, m5);
t1 = _mm256_unpacklo_epi64(m6, m7);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g1_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
t0 = _mm256_unpackhi_epi64(m4, m5);
t1 = _mm256_unpackhi_epi64(m6, m7);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g2_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
blake2b_undiag_v1(&mut a, &mut b, &mut c, &mut d);
t0 = _mm256_unpacklo_epi64(m7, m2);
t1 = _mm256_unpackhi_epi64(m4, m6);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g1_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
t0 = _mm256_unpacklo_epi64(m5, m4);
t1 = _mm256_alignr_epi8(m3, m7, 8);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g2_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
blake2b_diag_v1(&mut a, &mut b, &mut c, &mut d);
t0 = _mm256_shuffle_epi32(m0, _MM_SHUFFLE!(1, 0, 3, 2));
t1 = _mm256_unpackhi_epi64(m5, m2);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g1_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
t0 = _mm256_unpacklo_epi64(m6, m1);
t1 = _mm256_unpackhi_epi64(m3, m1);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g2_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
blake2b_undiag_v1(&mut a, &mut b, &mut c, &mut d);
t0 = _mm256_alignr_epi8(m6, m5, 8);
t1 = _mm256_unpackhi_epi64(m2, m7);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g1_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
t0 = _mm256_unpacklo_epi64(m4, m0);
t1 = _mm256_blend_epi32(m6, m1, 0x33);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g2_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
blake2b_diag_v1(&mut a, &mut b, &mut c, &mut d);
t0 = _mm256_blend_epi32(m1, m5, 0x33);
t1 = _mm256_unpackhi_epi64(m3, m4);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g1_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
t0 = _mm256_unpacklo_epi64(m7, m3);
t1 = _mm256_alignr_epi8(m2, m0, 8);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g2_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
blake2b_undiag_v1(&mut a, &mut b, &mut c, &mut d);
t0 = _mm256_unpackhi_epi64(m3, m1);
t1 = _mm256_unpackhi_epi64(m6, m5);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g1_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
t0 = _mm256_unpackhi_epi64(m4, m0);
t1 = _mm256_unpacklo_epi64(m6, m7);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g2_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
blake2b_diag_v1(&mut a, &mut b, &mut c, &mut d);
t0 = _mm256_blend_epi32(m2, m1, 0x33);
t1 = _mm256_blend_epi32(m7, m2, 0x33);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g1_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
t0 = _mm256_unpacklo_epi64(m3, m5);
t1 = _mm256_unpacklo_epi64(m0, m4);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g2_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
blake2b_undiag_v1(&mut a, &mut b, &mut c, &mut d);
t0 = _mm256_unpackhi_epi64(m4, m2);
t1 = _mm256_unpacklo_epi64(m1, m5);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g1_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
t0 = _mm256_blend_epi32(m3, m0, 0x33);
t1 = _mm256_blend_epi32(m7, m2, 0x33);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g2_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
blake2b_diag_v1(&mut a, &mut b, &mut c, &mut d);
t0 = _mm256_blend_epi32(m5, m7, 0x33);
t1 = _mm256_blend_epi32(m1, m3, 0x33);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g1_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
t0 = _mm256_alignr_epi8(m6, m0, 8);
t1 = _mm256_blend_epi32(m6, m4, 0x33);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g2_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
blake2b_undiag_v1(&mut a, &mut b, &mut c, &mut d);
t0 = _mm256_unpacklo_epi64(m1, m3);
t1 = _mm256_unpacklo_epi64(m0, m4);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g1_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
t0 = _mm256_unpacklo_epi64(m6, m5);
t1 = _mm256_unpackhi_epi64(m5, m1);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g2_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
blake2b_diag_v1(&mut a, &mut b, &mut c, &mut d);
t0 = _mm256_blend_epi32(m3, m2, 0x33);
t1 = _mm256_unpackhi_epi64(m7, m0);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g1_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
t0 = _mm256_unpackhi_epi64(m6, m2);
t1 = _mm256_blend_epi32(m4, m7, 0x33);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g2_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
blake2b_undiag_v1(&mut a, &mut b, &mut c, &mut d);
t0 = _mm256_blend_epi32(m0, m6, 0x33);
t1 = _mm256_unpacklo_epi64(m7, m2);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g1_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
t0 = _mm256_unpackhi_epi64(m2, m7);
t1 = _mm256_alignr_epi8(m5, m6, 8);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g2_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
blake2b_diag_v1(&mut a, &mut b, &mut c, &mut d);
t0 = _mm256_unpacklo_epi64(m0, m3);
t1 = _mm256_shuffle_epi32(m4, _MM_SHUFFLE!(1, 0, 3, 2));
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g1_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
t0 = _mm256_unpackhi_epi64(m3, m1);
t1 = _mm256_blend_epi32(m5, m1, 0x33);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g2_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
blake2b_undiag_v1(&mut a, &mut b, &mut c, &mut d);
t0 = _mm256_unpackhi_epi64(m6, m3);
t1 = _mm256_blend_epi32(m1, m6, 0x33);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g1_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
t0 = _mm256_alignr_epi8(m7, m5, 8);
t1 = _mm256_unpackhi_epi64(m0, m4);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g2_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
blake2b_diag_v1(&mut a, &mut b, &mut c, &mut d);
t0 = _mm256_unpackhi_epi64(m2, m7);
t1 = _mm256_unpacklo_epi64(m4, m1);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g1_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
t0 = _mm256_unpacklo_epi64(m0, m2);
t1 = _mm256_unpacklo_epi64(m3, m5);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g2_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
blake2b_undiag_v1(&mut a, &mut b, &mut c, &mut d);
t0 = _mm256_unpacklo_epi64(m3, m7);
t1 = _mm256_alignr_epi8(m0, m5, 8);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g1_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
t0 = _mm256_unpackhi_epi64(m7, m4);
t1 = _mm256_alignr_epi8(m4, m1, 8);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g2_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
blake2b_diag_v1(&mut a, &mut b, &mut c, &mut d);
t0 = m6;
t1 = _mm256_alignr_epi8(m5, m0, 8);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g1_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
t0 = _mm256_blend_epi32(m3, m1, 0x33);
t1 = m2;
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g2_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
blake2b_undiag_v1(&mut a, &mut b, &mut c, &mut d);
t0 = _mm256_unpacklo_epi64(m5, m4);
t1 = _mm256_unpackhi_epi64(m3, m0);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g1_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
t0 = _mm256_unpacklo_epi64(m1, m2);
t1 = _mm256_blend_epi32(m2, m3, 0x33);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g2_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
blake2b_diag_v1(&mut a, &mut b, &mut c, &mut d);
t0 = _mm256_unpackhi_epi64(m7, m4);
t1 = _mm256_unpackhi_epi64(m1, m6);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g1_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
t0 = _mm256_alignr_epi8(m7, m5, 8);
t1 = _mm256_unpacklo_epi64(m6, m0);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g2_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
blake2b_undiag_v1(&mut a, &mut b, &mut c, &mut d);
t0 = _mm256_unpacklo_epi64(m0, m1);
t1 = _mm256_unpacklo_epi64(m2, m3);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g1_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
t0 = _mm256_unpackhi_epi64(m0, m1);
t1 = _mm256_unpackhi_epi64(m2, m3);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g2_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
blake2b_diag_v1(&mut a, &mut b, &mut c, &mut d);
t0 = _mm256_unpacklo_epi64(m4, m5);
t1 = _mm256_unpacklo_epi64(m6, m7);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g1_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
t0 = _mm256_unpackhi_epi64(m4, m5);
t1 = _mm256_unpackhi_epi64(m6, m7);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g2_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
blake2b_undiag_v1(&mut a, &mut b, &mut c, &mut d);
t0 = _mm256_unpacklo_epi64(m7, m2);
t1 = _mm256_unpackhi_epi64(m4, m6);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g1_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
t0 = _mm256_unpacklo_epi64(m5, m4);
t1 = _mm256_alignr_epi8(m3, m7, 8);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g2_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
blake2b_diag_v1(&mut a, &mut b, &mut c, &mut d);
t0 = _mm256_shuffle_epi32(m0, _MM_SHUFFLE!(1, 0, 3, 2));
t1 = _mm256_unpackhi_epi64(m5, m2);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g1_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
t0 = _mm256_unpacklo_epi64(m6, m1);
t1 = _mm256_unpackhi_epi64(m3, m1);
b0 = _mm256_blend_epi32(t0, t1, 0xF0);
blake2b_g2_v1(&mut a, &mut b, &mut c, &mut d, &mut b0);
blake2b_undiag_v1(&mut a, &mut b, &mut c, &mut d);
a = xor(a, c);
b = xor(b, d);
a = xor(a, iv0);
b = xor(b, iv1);
store_256_unaligned(h_low, a);
store_256_unaligned(h_high, b);
}
#[inline(always)]
unsafe fn load_256_from_u64(x: u64) -> __m256i {
_mm256_set1_epi64x(x as i64)
}
#[inline(always)]
unsafe fn load_256_from_4xu64(x1: u64, x2: u64, x3: u64, x4: u64) -> __m256i {
_mm256_set_epi64x(x4 as i64, x3 as i64, x2 as i64, x1 as i64)
}
#[inline(always)]
unsafe fn load_msg4_words(
msg0: &Block,
msg1: &Block,
msg2: &Block,
msg3: &Block,
i: usize,
) -> __m256i {
load_256_from_4xu64(
LittleEndian::read_u64(&msg0[8 * i..]),
LittleEndian::read_u64(&msg1[8 * i..]),
LittleEndian::read_u64(&msg2[8 * i..]),
LittleEndian::read_u64(&msg3[8 * i..]),
)
}
#[inline(always)]
unsafe fn blake2b_round_4x(v: &mut [__m256i; 16], m: &[__m256i; 16], r: usize) {
v[0] = add(v[0], m[SIGMA[r][0] as usize]);
v[1] = add(v[1], m[SIGMA[r][2] as usize]);
v[2] = add(v[2], m[SIGMA[r][4] as usize]);
v[3] = add(v[3], m[SIGMA[r][6] as usize]);
v[0] = add(v[0], v[4]);
v[1] = add(v[1], v[5]);
v[2] = add(v[2], v[6]);
v[3] = add(v[3], v[7]);
v[12] = xor(v[12], v[0]);
v[13] = xor(v[13], v[1]);
v[14] = xor(v[14], v[2]);
v[15] = xor(v[15], v[3]);
v[12] = rot32(v[12]);
v[13] = rot32(v[13]);
v[14] = rot32(v[14]);
v[15] = rot32(v[15]);
v[8] = add(v[8], v[12]);
v[9] = add(v[9], v[13]);
v[10] = add(v[10], v[14]);
v[11] = add(v[11], v[15]);
v[4] = xor(v[4], v[8]);
v[5] = xor(v[5], v[9]);
v[6] = xor(v[6], v[10]);
v[7] = xor(v[7], v[11]);
v[4] = rot24(v[4]);
v[5] = rot24(v[5]);
v[6] = rot24(v[6]);
v[7] = rot24(v[7]);
v[0] = add(v[0], m[SIGMA[r][1] as usize]);
v[1] = add(v[1], m[SIGMA[r][3] as usize]);
v[2] = add(v[2], m[SIGMA[r][5] as usize]);
v[3] = add(v[3], m[SIGMA[r][7] as usize]);
v[0] = add(v[0], v[4]);
v[1] = add(v[1], v[5]);
v[2] = add(v[2], v[6]);
v[3] = add(v[3], v[7]);
v[12] = xor(v[12], v[0]);
v[13] = xor(v[13], v[1]);
v[14] = xor(v[14], v[2]);
v[15] = xor(v[15], v[3]);
v[12] = rot16(v[12]);
v[13] = rot16(v[13]);
v[14] = rot16(v[14]);
v[15] = rot16(v[15]);
v[8] = add(v[8], v[12]);
v[9] = add(v[9], v[13]);
v[10] = add(v[10], v[14]);
v[11] = add(v[11], v[15]);
v[4] = xor(v[4], v[8]);
v[5] = xor(v[5], v[9]);
v[6] = xor(v[6], v[10]);
v[7] = xor(v[7], v[11]);
v[4] = rot63(v[4]);
v[5] = rot63(v[5]);
v[6] = rot63(v[6]);
v[7] = rot63(v[7]);
v[0] = add(v[0], m[SIGMA[r][8] as usize]);
v[1] = add(v[1], m[SIGMA[r][10] as usize]);
v[2] = add(v[2], m[SIGMA[r][12] as usize]);
v[3] = add(v[3], m[SIGMA[r][14] as usize]);
v[0] = add(v[0], v[5]);
v[1] = add(v[1], v[6]);
v[2] = add(v[2], v[7]);
v[3] = add(v[3], v[4]);
v[15] = xor(v[15], v[0]);
v[12] = xor(v[12], v[1]);
v[13] = xor(v[13], v[2]);
v[14] = xor(v[14], v[3]);
v[15] = rot32(v[15]);
v[12] = rot32(v[12]);
v[13] = rot32(v[13]);
v[14] = rot32(v[14]);
v[10] = add(v[10], v[15]);
v[11] = add(v[11], v[12]);
v[8] = add(v[8], v[13]);
v[9] = add(v[9], v[14]);
v[5] = xor(v[5], v[10]);
v[6] = xor(v[6], v[11]);
v[7] = xor(v[7], v[8]);
v[4] = xor(v[4], v[9]);
v[5] = rot24(v[5]);
v[6] = rot24(v[6]);
v[7] = rot24(v[7]);
v[4] = rot24(v[4]);
v[0] = add(v[0], m[SIGMA[r][9] as usize]);
v[1] = add(v[1], m[SIGMA[r][11] as usize]);
v[2] = add(v[2], m[SIGMA[r][13] as usize]);
v[3] = add(v[3], m[SIGMA[r][15] as usize]);
v[0] = add(v[0], v[5]);
v[1] = add(v[1], v[6]);
v[2] = add(v[2], v[7]);
v[3] = add(v[3], v[4]);
v[15] = xor(v[15], v[0]);
v[12] = xor(v[12], v[1]);
v[13] = xor(v[13], v[2]);
v[14] = xor(v[14], v[3]);
v[15] = rot16(v[15]);
v[12] = rot16(v[12]);
v[13] = rot16(v[13]);
v[14] = rot16(v[14]);
v[10] = add(v[10], v[15]);
v[11] = add(v[11], v[12]);
v[8] = add(v[8], v[13]);
v[9] = add(v[9], v[14]);
v[5] = xor(v[5], v[10]);
v[6] = xor(v[6], v[11]);
v[7] = xor(v[7], v[8]);
v[4] = xor(v[4], v[9]);
v[5] = rot63(v[5]);
v[6] = rot63(v[6]);
v[7] = rot63(v[7]);
v[4] = rot63(v[4]);
}
#[inline(always)]
unsafe fn export_state_words_4x(
orig_vec: __m256i,
low_state: __m256i,
high_state: __m256i,
h0: &mut StateWords,
h1: &mut StateWords,
h2: &mut StateWords,
h3: &mut StateWords,
i: usize,
) {
let parts: [u64; 4] = mem::transmute(xor(xor(orig_vec, low_state), high_state));
h0[i] = parts[0];
h1[i] = parts[1];
h2[i] = parts[2];
h3[i] = parts[3];
}
#[target_feature(enable = "avx2")]
pub unsafe fn compress_4x(
h0: &mut StateWords,
h1: &mut StateWords,
h2: &mut StateWords,
h3: &mut StateWords,
msg0: &Block,
msg1: &Block,
msg2: &Block,
msg3: &Block,
count0: u128,
count1: u128,
count2: u128,
count3: u128,
lastblock0: u64,
lastblock1: u64,
lastblock2: u64,
lastblock3: u64,
lastnode0: u64,
lastnode1: u64,
lastnode2: u64,
lastnode3: u64,
) {
let h_vecs = [
load_256_from_4xu64(h0[0], h1[0], h2[0], h3[0]),
load_256_from_4xu64(h0[1], h1[1], h2[1], h3[1]),
load_256_from_4xu64(h0[2], h1[2], h2[2], h3[2]),
load_256_from_4xu64(h0[3], h1[3], h2[3], h3[3]),
load_256_from_4xu64(h0[4], h1[4], h2[4], h3[4]),
load_256_from_4xu64(h0[5], h1[5], h2[5], h3[5]),
load_256_from_4xu64(h0[6], h1[6], h2[6], h3[6]),
load_256_from_4xu64(h0[7], h1[7], h2[7], h3[7]),
];
let count_low = load_256_from_4xu64(count0 as u64, count1 as u64, count2 as u64, count3 as u64);
let count_high = load_256_from_4xu64(
(count0 >> 64) as u64,
(count1 >> 64) as u64,
(count2 >> 64) as u64,
(count3 >> 64) as u64,
);
let lastblock = load_256_from_4xu64(
lastblock0 as u64,
lastblock1 as u64,
lastblock2 as u64,
lastblock3 as u64,
);
let lastnode = load_256_from_4xu64(
lastnode0 as u64,
lastnode1 as u64,
lastnode2 as u64,
lastnode3 as u64,
);
let mut v = [
h_vecs[0],
h_vecs[1],
h_vecs[2],
h_vecs[3],
h_vecs[4],
h_vecs[5],
h_vecs[6],
h_vecs[7],
load_256_from_u64(IV[0]),
load_256_from_u64(IV[1]),
load_256_from_u64(IV[2]),
load_256_from_u64(IV[3]),
xor(load_256_from_u64(IV[4]), count_low),
xor(load_256_from_u64(IV[5]), count_high),
xor(load_256_from_u64(IV[6]), lastblock),
xor(load_256_from_u64(IV[7]), lastnode),
];
let m = [
load_msg4_words(msg0, msg1, msg2, msg3, 0),
load_msg4_words(msg0, msg1, msg2, msg3, 1),
load_msg4_words(msg0, msg1, msg2, msg3, 2),
load_msg4_words(msg0, msg1, msg2, msg3, 3),
load_msg4_words(msg0, msg1, msg2, msg3, 4),
load_msg4_words(msg0, msg1, msg2, msg3, 5),
load_msg4_words(msg0, msg1, msg2, msg3, 6),
load_msg4_words(msg0, msg1, msg2, msg3, 7),
load_msg4_words(msg0, msg1, msg2, msg3, 8),
load_msg4_words(msg0, msg1, msg2, msg3, 9),
load_msg4_words(msg0, msg1, msg2, msg3, 10),
load_msg4_words(msg0, msg1, msg2, msg3, 11),
load_msg4_words(msg0, msg1, msg2, msg3, 12),
load_msg4_words(msg0, msg1, msg2, msg3, 13),
load_msg4_words(msg0, msg1, msg2, msg3, 14),
load_msg4_words(msg0, msg1, msg2, msg3, 15),
];
blake2b_round_4x(&mut v, &m, 0);
blake2b_round_4x(&mut v, &m, 1);
blake2b_round_4x(&mut v, &m, 2);
blake2b_round_4x(&mut v, &m, 3);
blake2b_round_4x(&mut v, &m, 4);
blake2b_round_4x(&mut v, &m, 5);
blake2b_round_4x(&mut v, &m, 6);
blake2b_round_4x(&mut v, &m, 7);
blake2b_round_4x(&mut v, &m, 8);
blake2b_round_4x(&mut v, &m, 9);
blake2b_round_4x(&mut v, &m, 10);
blake2b_round_4x(&mut v, &m, 11);
export_state_words_4x(h_vecs[0], v[0], v[8], h0, h1, h2, h3, 0);
export_state_words_4x(h_vecs[1], v[1], v[9], h0, h1, h2, h3, 1);
export_state_words_4x(h_vecs[2], v[2], v[10], h0, h1, h2, h3, 2);
export_state_words_4x(h_vecs[3], v[3], v[11], h0, h1, h2, h3, 3);
export_state_words_4x(h_vecs[4], v[4], v[12], h0, h1, h2, h3, 4);
export_state_words_4x(h_vecs[5], v[5], v[13], h0, h1, h2, h3, 5);
export_state_words_4x(h_vecs[6], v[6], v[14], h0, h1, h2, h3, 6);
export_state_words_4x(h_vecs[7], v[7], v[15], h0, h1, h2, h3, 7);
}