#![allow(clippy::indexing_slicing)]
use core::fmt;
#[cfg(target_arch = "x86_64")]
use super::polyval::{accumulate_4blocks, precompute_powers};
use super::{
AeadBufferError, Nonce96, OpenError, SealError, aes, polyval,
targets::{AeadBackend, AeadPrimitive, select_backend},
};
use crate::traits::{Aead, ct};
const KEY_SIZE: usize = 32;
const TAG_SIZE: usize = 16;
const NONCE_SIZE: usize = Nonce96::LENGTH;
const MAX_PLAINTEXT_LEN: u64 = (1u64 << 36).strict_sub(32);
define_aead_key_type!(Aes256GcmSivKey, KEY_SIZE, "AES-256-GCM-SIV secret key (32 bytes).");
define_aead_tag_type!(
Aes256GcmSivTag,
TAG_SIZE,
"AES-256-GCM-SIV authentication tag (16 bytes)."
);
#[derive(Clone)]
pub struct Aes256GcmSiv {
master_ek: aes::Aes256EncKey,
backend: AeadBackend,
}
impl fmt::Debug for Aes256GcmSiv {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Aes256GcmSiv").finish_non_exhaustive()
}
}
impl Aes256GcmSiv {
pub const KEY_SIZE: usize = KEY_SIZE;
pub const NONCE_SIZE: usize = NONCE_SIZE;
pub const TAG_SIZE: usize = TAG_SIZE;
#[inline]
#[must_use]
pub fn new(key: &Aes256GcmSivKey) -> Self {
<Self as Aead>::new(key)
}
#[inline]
pub fn tag_from_slice(bytes: &[u8]) -> Result<Aes256GcmSivTag, AeadBufferError> {
<Self as Aead>::tag_from_slice(bytes)
}
#[inline]
pub fn encrypt_in_place(&self, nonce: &Nonce96, aad: &[u8], buffer: &mut [u8]) -> Result<Aes256GcmSivTag, SealError> {
<Self as Aead>::encrypt_in_place(self, nonce, aad, buffer)
}
#[inline]
pub fn decrypt_in_place(
&self,
nonce: &Nonce96,
aad: &[u8],
buffer: &mut [u8],
tag: &Aes256GcmSivTag,
) -> Result<(), OpenError> {
<Self as Aead>::decrypt_in_place(self, nonce, aad, buffer, tag)
}
#[inline]
pub fn encrypt(&self, nonce: &Nonce96, aad: &[u8], plaintext: &[u8], out: &mut [u8]) -> Result<(), SealError> {
<Self as Aead>::encrypt(self, nonce, aad, plaintext, out)
}
#[inline]
pub fn decrypt(
&self,
nonce: &Nonce96,
aad: &[u8],
ciphertext_and_tag: &[u8],
out: &mut [u8],
) -> Result<(), OpenError> {
<Self as Aead>::decrypt(self, nonce, aad, ciphertext_and_tag, out)
}
}
#[inline]
fn derive_keys(master_ek: &aes::Aes256EncKey, nonce: &Nonce96) -> ([u8; 16], [u8; 32]) {
let nonce_bytes = nonce.as_bytes();
let mut blocks = [[0u8; 16]; 6];
let mut i = 0u32;
while i < 6 {
blocks[i as usize][0..4].copy_from_slice(&i.to_le_bytes());
blocks[i as usize][4..16].copy_from_slice(nonce_bytes);
i = i.strict_add(1);
}
aes::aes256_encrypt_blocks_ecb(master_ek, &mut blocks);
let mut auth_key = [0u8; 16];
let mut enc_key = [0u8; 32];
auth_key[0..8].copy_from_slice(&blocks[0][0..8]);
auth_key[8..16].copy_from_slice(&blocks[1][0..8]);
enc_key[0..8].copy_from_slice(&blocks[2][0..8]);
enc_key[8..16].copy_from_slice(&blocks[3][0..8]);
enc_key[16..24].copy_from_slice(&blocks[4][0..8]);
enc_key[24..32].copy_from_slice(&blocks[5][0..8]);
ct::zeroize(blocks.as_flattened_mut());
(auth_key, enc_key)
}
#[inline]
fn compute_tag(
auth_key: &[u8; 16],
enc_ek: &aes::Aes256EncKey,
nonce: &Nonce96,
aad: &[u8],
plaintext: &[u8],
) -> [u8; TAG_SIZE] {
let mut pv = polyval::Polyval::new(auth_key);
pv.update_padded(aad);
pv.update_padded(plaintext);
let length_block = super::AeadByteLengths::from_usize(aad.len(), plaintext.len()).to_le_bits_block();
pv.update_block(&length_block);
let mut s = pv.finalize();
let nonce_bytes = nonce.as_bytes();
let mut j = 0usize;
while j < 12 {
s[j] ^= nonce_bytes[j];
j = j.strict_add(1);
}
s[15] &= 0x7f;
aes::aes256_encrypt_block(enc_ek, &mut s);
s
}
#[cfg(target_arch = "riscv64")]
#[derive(Clone, Copy)]
enum RiscvPolyvalBackend {
Portable,
Scalar,
Vector,
}
#[cfg(target_arch = "riscv64")]
#[inline]
fn reduce_riscv_portable(a: u128, b: u128) -> u128 {
polyval::portable_clmul128_reduce_inline(a, b)
}
#[cfg(target_arch = "riscv64")]
#[inline]
fn reduce_riscv_scalar(a: u128, b: u128) -> u128 {
unsafe { polyval::riscv_scalar_clmul128_reduce_inline(a, b) }
}
#[cfg(target_arch = "riscv64")]
#[inline]
fn reduce_riscv_vector(a: u128, b: u128) -> u128 {
unsafe { polyval::riscv_vector_clmul128_reduce_inline(a, b) }
}
#[cfg(target_arch = "riscv64")]
#[inline]
fn compute_tag_riscv_with_reduce(
auth_key: &[u8; 16],
enc_ek: &aes::Aes256EncKey,
nonce: &Nonce96,
aad: &[u8],
plaintext: &[u8],
reduce: impl Fn(u128, u128) -> u128,
) -> [u8; TAG_SIZE] {
let h = u128::from_le_bytes(*auth_key);
let mut acc: u128 = 0;
let mut offset = 0usize;
while offset.strict_add(16) <= aad.len() {
let mut block = [0u8; 16];
block.copy_from_slice(&aad[offset..offset.strict_add(16)]);
acc ^= u128::from_le_bytes(block);
acc = reduce(acc, h);
offset = offset.strict_add(16);
}
if offset < aad.len() {
let mut block = [0u8; 16];
block[..aad.len().strict_sub(offset)].copy_from_slice(&aad[offset..]);
acc ^= u128::from_le_bytes(block);
acc = reduce(acc, h);
}
offset = 0;
while offset.strict_add(16) <= plaintext.len() {
let mut block = [0u8; 16];
block.copy_from_slice(&plaintext[offset..offset.strict_add(16)]);
acc ^= u128::from_le_bytes(block);
acc = reduce(acc, h);
offset = offset.strict_add(16);
}
if offset < plaintext.len() {
let mut block = [0u8; 16];
block[..plaintext.len().strict_sub(offset)].copy_from_slice(&plaintext[offset..]);
acc ^= u128::from_le_bytes(block);
acc = reduce(acc, h);
}
let length_block = super::AeadByteLengths::from_usize(aad.len(), plaintext.len()).to_le_bits_block();
acc ^= u128::from_le_bytes(length_block);
acc = reduce(acc, h);
let mut s = acc.to_le_bytes();
let nonce_bytes = nonce.as_bytes();
let mut j = 0usize;
while j < 12 {
s[j] ^= nonce_bytes[j];
j = j.strict_add(1);
}
s[15] &= 0x7f;
aes::aes256_encrypt_block(enc_ek, &mut s);
s
}
#[cfg(target_arch = "riscv64")]
#[inline]
fn expand_key_riscv_for_backend(key: &[u8; 32], backend: AeadBackend) -> aes::Aes256EncKey {
match backend {
AeadBackend::Riscv64VectorCrypto => aes::aes256_expand_key_riscv_vector(key),
AeadBackend::Riscv64ScalarCrypto => aes::aes256_expand_key_riscv_scalar(key),
AeadBackend::Riscv64Vperm => aes::aes256_expand_key_riscv_vperm(key),
AeadBackend::Portable => aes::aes256_expand_key_riscv_ttable(key),
_ => aes::aes256_expand_key_riscv_ttable(key),
}
}
#[cfg(target_arch = "riscv64")]
#[inline]
fn expand_message_key_riscv(enc_key: &[u8; 32], backend: AeadBackend) -> aes::Aes256EncKey {
expand_key_riscv_for_backend(enc_key, backend)
}
#[inline]
fn resolve_backend() -> AeadBackend {
select_backend(
AeadPrimitive::Aes256GcmSiv,
crate::platform::arch(),
crate::platform::caps(),
)
}
#[cfg(target_arch = "riscv64")]
#[inline]
fn riscv_polyval_backend(backend: AeadBackend) -> RiscvPolyvalBackend {
match backend {
AeadBackend::Riscv64VectorCrypto => RiscvPolyvalBackend::Vector,
AeadBackend::Riscv64ScalarCrypto => RiscvPolyvalBackend::Scalar,
AeadBackend::Portable | AeadBackend::Riscv64Vperm => {
let caps = crate::platform::caps();
if caps.has(crate::platform::caps::riscv::ZBC) || caps.has(crate::platform::caps::riscv::ZBKC) {
RiscvPolyvalBackend::Scalar
} else {
RiscvPolyvalBackend::Portable
}
}
_ => RiscvPolyvalBackend::Portable,
}
}
#[cfg(target_arch = "riscv64")]
#[inline]
fn compute_tag_riscv(
auth_key: &[u8; 16],
enc_ek: &aes::Aes256EncKey,
nonce: &Nonce96,
aad: &[u8],
plaintext: &[u8],
backend: AeadBackend,
) -> [u8; TAG_SIZE] {
match riscv_polyval_backend(backend) {
RiscvPolyvalBackend::Portable => {
compute_tag_riscv_with_reduce(auth_key, enc_ek, nonce, aad, plaintext, reduce_riscv_portable)
}
RiscvPolyvalBackend::Scalar => {
compute_tag_riscv_with_reduce(auth_key, enc_ek, nonce, aad, plaintext, reduce_riscv_scalar)
}
RiscvPolyvalBackend::Vector => {
compute_tag_riscv_with_reduce(auth_key, enc_ek, nonce, aad, plaintext, reduce_riscv_vector)
}
}
}
#[cfg(target_arch = "riscv64")]
#[inline]
fn encrypt_riscv(
master_ek: &aes::Aes256EncKey,
backend: AeadBackend,
nonce: &Nonce96,
aad: &[u8],
buffer: &mut [u8],
) -> [u8; TAG_SIZE] {
let (mut auth_key, mut enc_key) = derive_keys(master_ek, nonce);
let ek = expand_message_key_riscv(&enc_key, backend);
let tag_bytes = compute_tag_riscv(&auth_key, &ek, nonce, aad, buffer, backend);
let mut counter_block = tag_bytes;
counter_block[15] |= 0x80;
aes::aes256_ctr32_encrypt(&ek, &counter_block, buffer);
ct::zeroize(&mut auth_key);
ct::zeroize(&mut enc_key);
tag_bytes
}
#[cfg(target_arch = "riscv64")]
#[inline]
fn decrypt_riscv(
master_ek: &aes::Aes256EncKey,
backend: AeadBackend,
nonce: &Nonce96,
aad: &[u8],
buffer: &mut [u8],
tag: &Aes256GcmSivTag,
) -> Result<(), crate::traits::VerificationError> {
let (mut auth_key, mut enc_key) = derive_keys(master_ek, nonce);
let ek = expand_message_key_riscv(&enc_key, backend);
let mut counter_block = tag.0;
counter_block[15] |= 0x80;
aes::aes256_ctr32_encrypt(&ek, &counter_block, buffer);
let expected = compute_tag_riscv(&auth_key, &ek, nonce, aad, buffer, backend);
ct::zeroize(&mut auth_key);
ct::zeroize(&mut enc_key);
if !ct::constant_time_eq(&expected, tag.as_bytes()) {
ct::zeroize(buffer);
return Err(crate::traits::VerificationError::new());
}
Ok(())
}
#[cfg(target_arch = "x86_64")]
#[inline]
fn compute_tag_wide(
auth_key: &[u8; 16],
enc_ek: &aes::Aes256EncKey,
nonce: &Nonce96,
aad: &[u8],
plaintext: &[u8],
) -> [u8; TAG_SIZE] {
let h = u128::from_le_bytes(*auth_key);
let powers = precompute_powers(h);
let h_powers_rev = [powers[3], powers[2], powers[1], powers[0]];
let mut acc: u128 = 0;
let mut offset = 0usize;
while offset.strict_add(64) <= aad.len() {
let mut blocks = [0u128; 4];
let mut i = 0usize;
while i < 4 {
let base = offset.strict_add(i.strict_mul(16));
let mut block = [0u8; 16];
block.copy_from_slice(&aad[base..base.strict_add(16)]);
blocks[i] = u128::from_le_bytes(block);
i = i.strict_add(1);
}
acc = accumulate_4blocks(acc, h, &h_powers_rev, &blocks);
offset = offset.strict_add(64);
}
while offset.strict_add(16) <= aad.len() {
let mut block = [0u8; 16];
block.copy_from_slice(&aad[offset..offset.strict_add(16)]);
acc ^= u128::from_le_bytes(block);
acc = polyval::clmul128_reduce(acc, h);
offset = offset.strict_add(16);
}
let remaining_aad = aad.len().strict_sub(offset);
if remaining_aad > 0 {
let mut block = [0u8; 16];
block[..remaining_aad].copy_from_slice(&aad[offset..]);
acc ^= u128::from_le_bytes(block);
acc = polyval::clmul128_reduce(acc, h);
}
offset = 0;
while offset.strict_add(64) <= plaintext.len() {
let mut blocks = [0u128; 4];
let mut i = 0usize;
while i < 4 {
let base = offset.strict_add(i.strict_mul(16));
let mut block = [0u8; 16];
block.copy_from_slice(&plaintext[base..base.strict_add(16)]);
blocks[i] = u128::from_le_bytes(block);
i = i.strict_add(1);
}
acc = accumulate_4blocks(acc, h, &h_powers_rev, &blocks);
offset = offset.strict_add(64);
}
while offset.strict_add(16) <= plaintext.len() {
let mut block = [0u8; 16];
block.copy_from_slice(&plaintext[offset..offset.strict_add(16)]);
acc ^= u128::from_le_bytes(block);
acc = polyval::clmul128_reduce(acc, h);
offset = offset.strict_add(16);
}
let remaining_pt = plaintext.len().strict_sub(offset);
if remaining_pt > 0 {
let mut block = [0u8; 16];
block[..remaining_pt].copy_from_slice(&plaintext[offset..]);
acc ^= u128::from_le_bytes(block);
acc = polyval::clmul128_reduce(acc, h);
}
let length_block = super::AeadByteLengths::from_usize(aad.len(), plaintext.len()).to_le_bits_block();
acc ^= u128::from_le_bytes(length_block);
acc = polyval::clmul128_reduce(acc, h);
let mut s = acc.to_le_bytes();
let nonce_bytes = nonce.as_bytes();
let mut j = 0usize;
while j < 12 {
s[j] ^= nonce_bytes[j];
j = j.strict_add(1);
}
s[15] &= 0x7f;
aes::aes256_encrypt_block(enc_ek, &mut s);
s
}
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "aes,neon")]
unsafe fn encrypt_fused_aarch64(
auth_key: &mut [u8; 16],
enc_key_bytes: &mut [u8; 32],
nonce: &Nonce96,
aad: &[u8],
buffer: &mut [u8],
) -> [u8; TAG_SIZE] {
unsafe {
let nonce_bytes = nonce.as_bytes();
let enc_ek = aes::aarch64_expand_key_inline(enc_key_bytes);
ct::zeroize(enc_key_bytes);
let h = u128::from_le_bytes(*auth_key);
ct::zeroize(auth_key);
let mut acc: u128 = 0;
let mut h_powers_rev = [0u128; 4];
if aad.len() >= 64 || buffer.len() >= 64 {
let powers = polyval::precompute_powers(h);
h_powers_rev = [powers[3], powers[2], powers[1], powers[0]];
}
let mut offset = 0usize;
while offset.strict_add(64) <= aad.len() {
let mut b = [0u128; 4];
let mut i = 0usize;
while i < 4 {
let base = offset.strict_add(i.strict_mul(16));
let mut block = [0u8; 16];
block.copy_from_slice(&aad[base..base.strict_add(16)]);
b[i] = u128::from_le_bytes(block);
i = i.strict_add(1);
}
acc = polyval::aarch64_aggregate_4blocks_inline(acc, &h_powers_rev, &b);
offset = offset.strict_add(64);
}
while offset.strict_add(16) <= aad.len() {
let mut block = [0u8; 16];
block.copy_from_slice(&aad[offset..offset.strict_add(16)]);
acc ^= u128::from_le_bytes(block);
acc = polyval::aarch64_clmul128_reduce_inline(acc, h);
offset = offset.strict_add(16);
}
if offset < aad.len() {
let mut block = [0u8; 16];
block[..aad.len().strict_sub(offset)].copy_from_slice(&aad[offset..]);
acc ^= u128::from_le_bytes(block);
acc = polyval::aarch64_clmul128_reduce_inline(acc, h);
}
offset = 0;
while offset.strict_add(64) <= buffer.len() {
let mut b = [0u128; 4];
let mut i = 0usize;
while i < 4 {
let base = offset.strict_add(i.strict_mul(16));
let mut block = [0u8; 16];
block.copy_from_slice(&buffer[base..base.strict_add(16)]);
b[i] = u128::from_le_bytes(block);
i = i.strict_add(1);
}
acc = polyval::aarch64_aggregate_4blocks_inline(acc, &h_powers_rev, &b);
offset = offset.strict_add(64);
}
while offset.strict_add(16) <= buffer.len() {
let mut block = [0u8; 16];
block.copy_from_slice(&buffer[offset..offset.strict_add(16)]);
acc ^= u128::from_le_bytes(block);
acc = polyval::aarch64_clmul128_reduce_inline(acc, h);
offset = offset.strict_add(16);
}
if offset < buffer.len() {
let mut block = [0u8; 16];
block[..buffer.len().strict_sub(offset)].copy_from_slice(&buffer[offset..]);
acc ^= u128::from_le_bytes(block);
acc = polyval::aarch64_clmul128_reduce_inline(acc, h);
}
let length_block = super::AeadByteLengths::from_usize(aad.len(), buffer.len()).to_le_bits_block();
acc ^= u128::from_le_bytes(length_block);
acc = polyval::aarch64_clmul128_reduce_inline(acc, h);
let mut tag = acc.to_le_bytes();
let mut j = 0usize;
while j < 12 {
tag[j] ^= nonce_bytes[j];
j = j.strict_add(1);
}
tag[15] &= 0x7f;
aes::aarch64_encrypt_block_inline(&enc_ek, &mut tag);
let mut counter_block = tag;
counter_block[15] |= 0x80;
let mut ctr = u32::from_le_bytes([counter_block[0], counter_block[1], counter_block[2], counter_block[3]]);
offset = 0;
while offset < buffer.len() {
counter_block[0..4].copy_from_slice(&ctr.to_le_bytes());
let mut keystream = counter_block;
aes::aarch64_encrypt_block_inline(&enc_ek, &mut keystream);
let remaining = buffer.len().strict_sub(offset);
if remaining >= 16 {
let mut d = [0u8; 16];
d.copy_from_slice(&buffer[offset..offset.strict_add(16)]);
let xored = u128::from_ne_bytes(d) ^ u128::from_ne_bytes(keystream);
buffer[offset..offset.strict_add(16)].copy_from_slice(&xored.to_ne_bytes());
offset = offset.strict_add(16);
} else {
let mut i = 0usize;
while i < remaining {
buffer[offset.strict_add(i)] ^= keystream[i];
i = i.strict_add(1);
}
offset = offset.strict_add(remaining);
}
ctr = ctr.wrapping_add(1);
}
tag
}
}
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "aes,neon")]
unsafe fn decrypt_fused_aarch64(
auth_key: &mut [u8; 16],
enc_key_bytes: &mut [u8; 32],
nonce: &Nonce96,
aad: &[u8],
buffer: &mut [u8],
tag: &Aes256GcmSivTag,
) -> Result<(), crate::traits::VerificationError> {
unsafe {
let nonce_bytes = nonce.as_bytes();
let enc_ek = aes::aarch64_expand_key_inline(enc_key_bytes);
ct::zeroize(enc_key_bytes);
let mut counter_block = tag.0;
counter_block[15] |= 0x80;
let mut ctr = u32::from_le_bytes([counter_block[0], counter_block[1], counter_block[2], counter_block[3]]);
let mut offset = 0usize;
while offset < buffer.len() {
counter_block[0..4].copy_from_slice(&ctr.to_le_bytes());
let mut keystream = counter_block;
aes::aarch64_encrypt_block_inline(&enc_ek, &mut keystream);
let remaining = buffer.len().strict_sub(offset);
if remaining >= 16 {
let mut d = [0u8; 16];
d.copy_from_slice(&buffer[offset..offset.strict_add(16)]);
let xored = u128::from_ne_bytes(d) ^ u128::from_ne_bytes(keystream);
buffer[offset..offset.strict_add(16)].copy_from_slice(&xored.to_ne_bytes());
offset = offset.strict_add(16);
} else {
let mut i = 0usize;
while i < remaining {
buffer[offset.strict_add(i)] ^= keystream[i];
i = i.strict_add(1);
}
offset = offset.strict_add(remaining);
}
ctr = ctr.wrapping_add(1);
}
let h = u128::from_le_bytes(*auth_key);
ct::zeroize(auth_key);
let mut acc: u128 = 0;
let mut h_powers_rev = [0u128; 4];
if aad.len() >= 64 || buffer.len() >= 64 {
let powers = polyval::precompute_powers(h);
h_powers_rev = [powers[3], powers[2], powers[1], powers[0]];
}
offset = 0;
while offset.strict_add(64) <= aad.len() {
let mut b = [0u128; 4];
let mut i = 0usize;
while i < 4 {
let base = offset.strict_add(i.strict_mul(16));
let mut block = [0u8; 16];
block.copy_from_slice(&aad[base..base.strict_add(16)]);
b[i] = u128::from_le_bytes(block);
i = i.strict_add(1);
}
acc = polyval::aarch64_aggregate_4blocks_inline(acc, &h_powers_rev, &b);
offset = offset.strict_add(64);
}
while offset.strict_add(16) <= aad.len() {
let mut block = [0u8; 16];
block.copy_from_slice(&aad[offset..offset.strict_add(16)]);
acc ^= u128::from_le_bytes(block);
acc = polyval::aarch64_clmul128_reduce_inline(acc, h);
offset = offset.strict_add(16);
}
if offset < aad.len() {
let mut block = [0u8; 16];
block[..aad.len().strict_sub(offset)].copy_from_slice(&aad[offset..]);
acc ^= u128::from_le_bytes(block);
acc = polyval::aarch64_clmul128_reduce_inline(acc, h);
}
offset = 0;
while offset.strict_add(64) <= buffer.len() {
let mut b = [0u128; 4];
let mut i = 0usize;
while i < 4 {
let base = offset.strict_add(i.strict_mul(16));
let mut block = [0u8; 16];
block.copy_from_slice(&buffer[base..base.strict_add(16)]);
b[i] = u128::from_le_bytes(block);
i = i.strict_add(1);
}
acc = polyval::aarch64_aggregate_4blocks_inline(acc, &h_powers_rev, &b);
offset = offset.strict_add(64);
}
while offset.strict_add(16) <= buffer.len() {
let mut block = [0u8; 16];
block.copy_from_slice(&buffer[offset..offset.strict_add(16)]);
acc ^= u128::from_le_bytes(block);
acc = polyval::aarch64_clmul128_reduce_inline(acc, h);
offset = offset.strict_add(16);
}
if offset < buffer.len() {
let mut block = [0u8; 16];
block[..buffer.len().strict_sub(offset)].copy_from_slice(&buffer[offset..]);
acc ^= u128::from_le_bytes(block);
acc = polyval::aarch64_clmul128_reduce_inline(acc, h);
}
let length_block = super::AeadByteLengths::from_usize(aad.len(), buffer.len()).to_le_bits_block();
acc ^= u128::from_le_bytes(length_block);
acc = polyval::aarch64_clmul128_reduce_inline(acc, h);
let mut expected = acc.to_le_bytes();
let mut j = 0usize;
while j < 12 {
expected[j] ^= nonce_bytes[j];
j = j.strict_add(1);
}
expected[15] &= 0x7f;
aes::aarch64_encrypt_block_inline(&enc_ek, &mut expected);
if !ct::constant_time_eq(&expected, tag.as_bytes()) {
ct::zeroize(buffer);
return Err(crate::traits::VerificationError::new());
}
Ok(())
}
}
#[cfg(target_arch = "powerpc64")]
#[target_feature(enable = "altivec,vsx,power8-vector,power8-crypto")]
unsafe fn encrypt_fused_ppc(
auth_key: &mut [u8; 16],
enc_key_bytes: &mut [u8; 32],
nonce: &Nonce96,
aad: &[u8],
buffer: &mut [u8],
) -> [u8; TAG_SIZE] {
unsafe {
let nonce_bytes = nonce.as_bytes();
let enc_ek = aes::ppc_expand_key_inline(enc_key_bytes);
ct::zeroize(enc_key_bytes);
let h = u128::from_le_bytes(*auth_key);
ct::zeroize(auth_key);
let mut acc: u128 = 0;
let mut h_powers_rev = [0u128; 4];
if aad.len() >= 64 || buffer.len() >= 64 {
let powers = polyval::precompute_powers(h);
h_powers_rev = [powers[3], powers[2], powers[1], powers[0]];
}
let mut offset = 0usize;
while offset.strict_add(64) <= aad.len() {
let mut b = [0u128; 4];
let mut i = 0usize;
while i < 4 {
let base = offset.strict_add(i.strict_mul(16));
let mut block = [0u8; 16];
block.copy_from_slice(&aad[base..base.strict_add(16)]);
b[i] = u128::from_le_bytes(block);
i = i.strict_add(1);
}
acc = polyval::ppc_aggregate_4blocks_inline(acc, &h_powers_rev, &b);
offset = offset.strict_add(64);
}
while offset.strict_add(16) <= aad.len() {
let mut block = [0u8; 16];
block.copy_from_slice(&aad[offset..offset.strict_add(16)]);
acc ^= u128::from_le_bytes(block);
acc = polyval::ppc_clmul128_reduce_inline(acc, h);
offset = offset.strict_add(16);
}
if offset < aad.len() {
let mut block = [0u8; 16];
block[..aad.len().strict_sub(offset)].copy_from_slice(&aad[offset..]);
acc ^= u128::from_le_bytes(block);
acc = polyval::ppc_clmul128_reduce_inline(acc, h);
}
offset = 0;
while offset.strict_add(64) <= buffer.len() {
let mut b = [0u128; 4];
let mut i = 0usize;
while i < 4 {
let base = offset.strict_add(i.strict_mul(16));
let mut block = [0u8; 16];
block.copy_from_slice(&buffer[base..base.strict_add(16)]);
b[i] = u128::from_le_bytes(block);
i = i.strict_add(1);
}
acc = polyval::ppc_aggregate_4blocks_inline(acc, &h_powers_rev, &b);
offset = offset.strict_add(64);
}
while offset.strict_add(16) <= buffer.len() {
let mut block = [0u8; 16];
block.copy_from_slice(&buffer[offset..offset.strict_add(16)]);
acc ^= u128::from_le_bytes(block);
acc = polyval::ppc_clmul128_reduce_inline(acc, h);
offset = offset.strict_add(16);
}
if offset < buffer.len() {
let mut block = [0u8; 16];
block[..buffer.len().strict_sub(offset)].copy_from_slice(&buffer[offset..]);
acc ^= u128::from_le_bytes(block);
acc = polyval::ppc_clmul128_reduce_inline(acc, h);
}
let length_block = super::AeadByteLengths::from_usize(aad.len(), buffer.len()).to_le_bits_block();
acc ^= u128::from_le_bytes(length_block);
acc = polyval::ppc_clmul128_reduce_inline(acc, h);
let mut tag = acc.to_le_bytes();
let mut j = 0usize;
while j < 12 {
tag[j] ^= nonce_bytes[j];
j = j.strict_add(1);
}
tag[15] &= 0x7f;
aes::ppc_encrypt_block_inline(&enc_ek, &mut tag);
let mut counter_block = tag;
counter_block[15] |= 0x80;
let mut ctr = u32::from_le_bytes([counter_block[0], counter_block[1], counter_block[2], counter_block[3]]);
offset = 0;
while offset < buffer.len() {
counter_block[0..4].copy_from_slice(&ctr.to_le_bytes());
let mut keystream = counter_block;
aes::ppc_encrypt_block_inline(&enc_ek, &mut keystream);
let remaining = buffer.len().strict_sub(offset);
if remaining >= 16 {
let mut d = [0u8; 16];
d.copy_from_slice(&buffer[offset..offset.strict_add(16)]);
let xored = u128::from_ne_bytes(d) ^ u128::from_ne_bytes(keystream);
buffer[offset..offset.strict_add(16)].copy_from_slice(&xored.to_ne_bytes());
offset = offset.strict_add(16);
} else {
let mut i = 0usize;
while i < remaining {
buffer[offset.strict_add(i)] ^= keystream[i];
i = i.strict_add(1);
}
offset = offset.strict_add(remaining);
}
ctr = ctr.wrapping_add(1);
}
tag
}
}
#[cfg(target_arch = "powerpc64")]
#[target_feature(enable = "altivec,vsx,power8-vector,power8-crypto")]
unsafe fn decrypt_fused_ppc(
auth_key: &mut [u8; 16],
enc_key_bytes: &mut [u8; 32],
nonce: &Nonce96,
aad: &[u8],
buffer: &mut [u8],
tag: &Aes256GcmSivTag,
) -> Result<(), crate::traits::VerificationError> {
unsafe {
let nonce_bytes = nonce.as_bytes();
let enc_ek = aes::ppc_expand_key_inline(enc_key_bytes);
ct::zeroize(enc_key_bytes);
let mut counter_block = tag.0;
counter_block[15] |= 0x80;
let mut ctr = u32::from_le_bytes([counter_block[0], counter_block[1], counter_block[2], counter_block[3]]);
let mut offset = 0usize;
while offset < buffer.len() {
counter_block[0..4].copy_from_slice(&ctr.to_le_bytes());
let mut keystream = counter_block;
aes::ppc_encrypt_block_inline(&enc_ek, &mut keystream);
let remaining = buffer.len().strict_sub(offset);
if remaining >= 16 {
let mut d = [0u8; 16];
d.copy_from_slice(&buffer[offset..offset.strict_add(16)]);
let xored = u128::from_ne_bytes(d) ^ u128::from_ne_bytes(keystream);
buffer[offset..offset.strict_add(16)].copy_from_slice(&xored.to_ne_bytes());
offset = offset.strict_add(16);
} else {
let mut i = 0usize;
while i < remaining {
buffer[offset.strict_add(i)] ^= keystream[i];
i = i.strict_add(1);
}
offset = offset.strict_add(remaining);
}
ctr = ctr.wrapping_add(1);
}
let h = u128::from_le_bytes(*auth_key);
ct::zeroize(auth_key);
let mut acc: u128 = 0;
let mut h_powers_rev = [0u128; 4];
if aad.len() >= 64 || buffer.len() >= 64 {
let powers = polyval::precompute_powers(h);
h_powers_rev = [powers[3], powers[2], powers[1], powers[0]];
}
offset = 0;
while offset.strict_add(64) <= aad.len() {
let mut b = [0u128; 4];
let mut i = 0usize;
while i < 4 {
let base = offset.strict_add(i.strict_mul(16));
let mut block = [0u8; 16];
block.copy_from_slice(&aad[base..base.strict_add(16)]);
b[i] = u128::from_le_bytes(block);
i = i.strict_add(1);
}
acc = polyval::ppc_aggregate_4blocks_inline(acc, &h_powers_rev, &b);
offset = offset.strict_add(64);
}
while offset.strict_add(16) <= aad.len() {
let mut block = [0u8; 16];
block.copy_from_slice(&aad[offset..offset.strict_add(16)]);
acc ^= u128::from_le_bytes(block);
acc = polyval::ppc_clmul128_reduce_inline(acc, h);
offset = offset.strict_add(16);
}
if offset < aad.len() {
let mut block = [0u8; 16];
block[..aad.len().strict_sub(offset)].copy_from_slice(&aad[offset..]);
acc ^= u128::from_le_bytes(block);
acc = polyval::ppc_clmul128_reduce_inline(acc, h);
}
offset = 0;
while offset.strict_add(64) <= buffer.len() {
let mut b = [0u128; 4];
let mut i = 0usize;
while i < 4 {
let base = offset.strict_add(i.strict_mul(16));
let mut block = [0u8; 16];
block.copy_from_slice(&buffer[base..base.strict_add(16)]);
b[i] = u128::from_le_bytes(block);
i = i.strict_add(1);
}
acc = polyval::ppc_aggregate_4blocks_inline(acc, &h_powers_rev, &b);
offset = offset.strict_add(64);
}
while offset.strict_add(16) <= buffer.len() {
let mut block = [0u8; 16];
block.copy_from_slice(&buffer[offset..offset.strict_add(16)]);
acc ^= u128::from_le_bytes(block);
acc = polyval::ppc_clmul128_reduce_inline(acc, h);
offset = offset.strict_add(16);
}
if offset < buffer.len() {
let mut block = [0u8; 16];
block[..buffer.len().strict_sub(offset)].copy_from_slice(&buffer[offset..]);
acc ^= u128::from_le_bytes(block);
acc = polyval::ppc_clmul128_reduce_inline(acc, h);
}
let length_block = super::AeadByteLengths::from_usize(aad.len(), buffer.len()).to_le_bits_block();
acc ^= u128::from_le_bytes(length_block);
acc = polyval::ppc_clmul128_reduce_inline(acc, h);
let mut expected = acc.to_le_bytes();
let mut j = 0usize;
while j < 12 {
expected[j] ^= nonce_bytes[j];
j = j.strict_add(1);
}
expected[15] &= 0x7f;
aes::ppc_encrypt_block_inline(&enc_ek, &mut expected);
if !ct::constant_time_eq(&expected, tag.as_bytes()) {
ct::zeroize(buffer);
return Err(crate::traits::VerificationError::new());
}
Ok(())
}
}
#[cfg(target_arch = "s390x")]
#[target_feature(enable = "vector")]
unsafe fn encrypt_fused_s390x(
auth_key: &mut [u8; 16],
enc_key_bytes: &mut [u8; 32],
nonce: &Nonce96,
aad: &[u8],
buffer: &mut [u8],
) -> [u8; TAG_SIZE] {
unsafe {
let nonce_bytes = nonce.as_bytes();
let h = u128::from_le_bytes(*auth_key);
ct::zeroize(auth_key);
let mut acc: u128 = 0;
let mut h_powers_rev = [0u128; 4];
if aad.len() >= 64 || buffer.len() >= 64 {
let powers = polyval::precompute_powers(h);
h_powers_rev = [powers[3], powers[2], powers[1], powers[0]];
}
let mut offset = 0usize;
while offset.strict_add(64) <= aad.len() {
let mut b = [0u128; 4];
let mut i = 0usize;
while i < 4 {
let base = offset.strict_add(i.strict_mul(16));
let mut block = [0u8; 16];
block.copy_from_slice(&aad[base..base.strict_add(16)]);
b[i] = u128::from_le_bytes(block);
i = i.strict_add(1);
}
acc = polyval::s390x_aggregate_4blocks_inline(acc, &h_powers_rev, &b);
offset = offset.strict_add(64);
}
while offset.strict_add(16) <= aad.len() {
let mut block = [0u8; 16];
block.copy_from_slice(&aad[offset..offset.strict_add(16)]);
acc ^= u128::from_le_bytes(block);
acc = polyval::s390x_clmul128_reduce_inline(acc, h);
offset = offset.strict_add(16);
}
if offset < aad.len() {
let mut block = [0u8; 16];
block[..aad.len().strict_sub(offset)].copy_from_slice(&aad[offset..]);
acc ^= u128::from_le_bytes(block);
acc = polyval::s390x_clmul128_reduce_inline(acc, h);
}
offset = 0;
while offset.strict_add(64) <= buffer.len() {
let mut b = [0u128; 4];
let mut i = 0usize;
while i < 4 {
let base = offset.strict_add(i.strict_mul(16));
let mut block = [0u8; 16];
block.copy_from_slice(&buffer[base..base.strict_add(16)]);
b[i] = u128::from_le_bytes(block);
i = i.strict_add(1);
}
acc = polyval::s390x_aggregate_4blocks_inline(acc, &h_powers_rev, &b);
offset = offset.strict_add(64);
}
while offset.strict_add(16) <= buffer.len() {
let mut block = [0u8; 16];
block.copy_from_slice(&buffer[offset..offset.strict_add(16)]);
acc ^= u128::from_le_bytes(block);
acc = polyval::s390x_clmul128_reduce_inline(acc, h);
offset = offset.strict_add(16);
}
if offset < buffer.len() {
let mut block = [0u8; 16];
block[..buffer.len().strict_sub(offset)].copy_from_slice(&buffer[offset..]);
acc ^= u128::from_le_bytes(block);
acc = polyval::s390x_clmul128_reduce_inline(acc, h);
}
let length_block = super::AeadByteLengths::from_usize(aad.len(), buffer.len()).to_le_bits_block();
acc ^= u128::from_le_bytes(length_block);
acc = polyval::s390x_clmul128_reduce_inline(acc, h);
let mut tag = acc.to_le_bytes();
let mut j = 0usize;
while j < 12 {
tag[j] ^= nonce_bytes[j];
j = j.strict_add(1);
}
tag[15] &= 0x7f;
aes::s390x_encrypt_block_raw_inline(enc_key_bytes, &mut tag);
let mut counter_block = tag;
counter_block[15] |= 0x80;
let mut ctr = u32::from_le_bytes([counter_block[0], counter_block[1], counter_block[2], counter_block[3]]);
offset = 0;
while offset < buffer.len() {
counter_block[0..4].copy_from_slice(&ctr.to_le_bytes());
let mut keystream = counter_block;
aes::s390x_encrypt_block_raw_inline(enc_key_bytes, &mut keystream);
let remaining = buffer.len().strict_sub(offset);
if remaining >= 16 {
let mut d = [0u8; 16];
d.copy_from_slice(&buffer[offset..offset.strict_add(16)]);
let xored = u128::from_ne_bytes(d) ^ u128::from_ne_bytes(keystream);
buffer[offset..offset.strict_add(16)].copy_from_slice(&xored.to_ne_bytes());
offset = offset.strict_add(16);
} else {
let mut i = 0usize;
while i < remaining {
buffer[offset.strict_add(i)] ^= keystream[i];
i = i.strict_add(1);
}
offset = offset.strict_add(remaining);
}
ctr = ctr.wrapping_add(1);
}
ct::zeroize(enc_key_bytes);
tag
}
}
#[cfg(target_arch = "s390x")]
#[target_feature(enable = "vector")]
unsafe fn decrypt_fused_s390x(
auth_key: &mut [u8; 16],
enc_key_bytes: &mut [u8; 32],
nonce: &Nonce96,
aad: &[u8],
buffer: &mut [u8],
tag: &Aes256GcmSivTag,
) -> Result<(), crate::traits::VerificationError> {
unsafe {
let nonce_bytes = nonce.as_bytes();
let mut counter_block = tag.0;
counter_block[15] |= 0x80;
let mut ctr = u32::from_le_bytes([counter_block[0], counter_block[1], counter_block[2], counter_block[3]]);
let mut offset = 0usize;
while offset < buffer.len() {
counter_block[0..4].copy_from_slice(&ctr.to_le_bytes());
let mut keystream = counter_block;
aes::s390x_encrypt_block_raw_inline(enc_key_bytes, &mut keystream);
let remaining = buffer.len().strict_sub(offset);
if remaining >= 16 {
let mut d = [0u8; 16];
d.copy_from_slice(&buffer[offset..offset.strict_add(16)]);
let xored = u128::from_ne_bytes(d) ^ u128::from_ne_bytes(keystream);
buffer[offset..offset.strict_add(16)].copy_from_slice(&xored.to_ne_bytes());
offset = offset.strict_add(16);
} else {
let mut i = 0usize;
while i < remaining {
buffer[offset.strict_add(i)] ^= keystream[i];
i = i.strict_add(1);
}
offset = offset.strict_add(remaining);
}
ctr = ctr.wrapping_add(1);
}
let h = u128::from_le_bytes(*auth_key);
ct::zeroize(auth_key);
let mut acc: u128 = 0;
let mut h_powers_rev = [0u128; 4];
if aad.len() >= 64 || buffer.len() >= 64 {
let powers = polyval::precompute_powers(h);
h_powers_rev = [powers[3], powers[2], powers[1], powers[0]];
}
offset = 0;
while offset.strict_add(64) <= aad.len() {
let mut b = [0u128; 4];
let mut i = 0usize;
while i < 4 {
let base = offset.strict_add(i.strict_mul(16));
let mut block = [0u8; 16];
block.copy_from_slice(&aad[base..base.strict_add(16)]);
b[i] = u128::from_le_bytes(block);
i = i.strict_add(1);
}
acc = polyval::s390x_aggregate_4blocks_inline(acc, &h_powers_rev, &b);
offset = offset.strict_add(64);
}
while offset.strict_add(16) <= aad.len() {
let mut block = [0u8; 16];
block.copy_from_slice(&aad[offset..offset.strict_add(16)]);
acc ^= u128::from_le_bytes(block);
acc = polyval::s390x_clmul128_reduce_inline(acc, h);
offset = offset.strict_add(16);
}
if offset < aad.len() {
let mut block = [0u8; 16];
block[..aad.len().strict_sub(offset)].copy_from_slice(&aad[offset..]);
acc ^= u128::from_le_bytes(block);
acc = polyval::s390x_clmul128_reduce_inline(acc, h);
}
offset = 0;
while offset.strict_add(64) <= buffer.len() {
let mut b = [0u128; 4];
let mut i = 0usize;
while i < 4 {
let base = offset.strict_add(i.strict_mul(16));
let mut block = [0u8; 16];
block.copy_from_slice(&buffer[base..base.strict_add(16)]);
b[i] = u128::from_le_bytes(block);
i = i.strict_add(1);
}
acc = polyval::s390x_aggregate_4blocks_inline(acc, &h_powers_rev, &b);
offset = offset.strict_add(64);
}
while offset.strict_add(16) <= buffer.len() {
let mut block = [0u8; 16];
block.copy_from_slice(&buffer[offset..offset.strict_add(16)]);
acc ^= u128::from_le_bytes(block);
acc = polyval::s390x_clmul128_reduce_inline(acc, h);
offset = offset.strict_add(16);
}
if offset < buffer.len() {
let mut block = [0u8; 16];
block[..buffer.len().strict_sub(offset)].copy_from_slice(&buffer[offset..]);
acc ^= u128::from_le_bytes(block);
acc = polyval::s390x_clmul128_reduce_inline(acc, h);
}
let length_block = super::AeadByteLengths::from_usize(aad.len(), buffer.len()).to_le_bits_block();
acc ^= u128::from_le_bytes(length_block);
acc = polyval::s390x_clmul128_reduce_inline(acc, h);
let mut expected = acc.to_le_bytes();
let mut j = 0usize;
while j < 12 {
expected[j] ^= nonce_bytes[j];
j = j.strict_add(1);
}
expected[15] &= 0x7f;
aes::s390x_encrypt_block_raw_inline(enc_key_bytes, &mut expected);
ct::zeroize(enc_key_bytes);
if !ct::constant_time_eq(&expected, tag.as_bytes()) {
ct::zeroize(buffer);
return Err(crate::traits::VerificationError::new());
}
Ok(())
}
}
impl Aead for Aes256GcmSiv {
const KEY_SIZE: usize = KEY_SIZE;
const NONCE_SIZE: usize = NONCE_SIZE;
const TAG_SIZE: usize = TAG_SIZE;
type Key = Aes256GcmSivKey;
type Nonce = Nonce96;
type Tag = Aes256GcmSivTag;
fn new(key: &Self::Key) -> Self {
let backend = resolve_backend();
Self {
#[cfg(target_arch = "riscv64")]
master_ek: expand_key_riscv_for_backend(key.as_bytes(), backend),
#[cfg(not(target_arch = "riscv64"))]
master_ek: aes::aes256_expand_key(key.as_bytes()),
backend,
}
}
fn tag_from_slice(bytes: &[u8]) -> Result<Self::Tag, AeadBufferError> {
if bytes.len() != TAG_SIZE {
return Err(AeadBufferError::new());
}
let mut tag = [0u8; TAG_SIZE];
tag.copy_from_slice(bytes);
Ok(Aes256GcmSivTag::from_bytes(tag))
}
fn encrypt_in_place(&self, nonce: &Self::Nonce, aad: &[u8], buffer: &mut [u8]) -> Result<Self::Tag, SealError> {
super::seal_bounded_length_as_u64(buffer.len(), MAX_PLAINTEXT_LEN)?;
super::seal_bit_lengths(aad.len(), buffer.len())?;
#[cfg(target_arch = "x86_64")]
if self.backend == AeadBackend::X86VaesVpclmul {
let (mut auth_key, mut enc_key) = derive_keys(&self.master_ek, nonce);
let ek = aes::aes256_expand_key(&enc_key);
let tag_bytes = compute_tag_wide(&auth_key, &ek, nonce, aad, buffer);
let mut counter_block = tag_bytes;
counter_block[15] |= 0x80;
unsafe { aes::aes256_ctr32_encrypt_wide(&ek, &counter_block, buffer) };
ct::zeroize(&mut auth_key);
ct::zeroize(&mut enc_key);
return Ok(Aes256GcmSivTag::from_bytes(tag_bytes));
}
#[cfg(target_arch = "aarch64")]
if matches!(
self.backend,
AeadBackend::Aarch64AesPmull | AeadBackend::Aarch64Sve2AesPmull
) {
let (mut auth_key, mut enc_key) = derive_keys(&self.master_ek, nonce);
let tag_bytes = unsafe { encrypt_fused_aarch64(&mut auth_key, &mut enc_key, nonce, aad, buffer) };
return Ok(Aes256GcmSivTag::from_bytes(tag_bytes));
}
#[cfg(target_arch = "powerpc64")]
if self.backend == AeadBackend::Power8Crypto {
let (mut auth_key, mut enc_key) = derive_keys(&self.master_ek, nonce);
let tag_bytes = unsafe { encrypt_fused_ppc(&mut auth_key, &mut enc_key, nonce, aad, buffer) };
return Ok(Aes256GcmSivTag::from_bytes(tag_bytes));
}
#[cfg(target_arch = "s390x")]
if self.backend == AeadBackend::S390xMsa {
let (mut auth_key, mut enc_key) = derive_keys(&self.master_ek, nonce);
let tag_bytes = unsafe { encrypt_fused_s390x(&mut auth_key, &mut enc_key, nonce, aad, buffer) };
return Ok(Aes256GcmSivTag::from_bytes(tag_bytes));
}
#[cfg(target_arch = "riscv64")]
{
match self.backend {
AeadBackend::Portable
| AeadBackend::Riscv64VectorCrypto
| AeadBackend::Riscv64ScalarCrypto
| AeadBackend::Riscv64Vperm => {
let tag_bytes = encrypt_riscv(&self.master_ek, self.backend, nonce, aad, buffer);
return Ok(Aes256GcmSivTag::from_bytes(tag_bytes));
}
_ => {}
}
}
let (mut auth_key, mut enc_key) = derive_keys(&self.master_ek, nonce);
let ek = aes::aes256_expand_key(&enc_key);
let tag_bytes = compute_tag(&auth_key, &ek, nonce, aad, buffer);
let mut counter_block = tag_bytes;
counter_block[15] |= 0x80;
aes::aes256_ctr32_encrypt(&ek, &counter_block, buffer);
ct::zeroize(&mut auth_key);
ct::zeroize(&mut enc_key);
Ok(Aes256GcmSivTag::from_bytes(tag_bytes))
}
fn decrypt_in_place(
&self,
nonce: &Self::Nonce,
aad: &[u8],
buffer: &mut [u8],
tag: &Self::Tag,
) -> Result<(), OpenError> {
super::open_bounded_length_as_u64(buffer.len(), MAX_PLAINTEXT_LEN)?;
super::open_bit_lengths(aad.len(), buffer.len())?;
#[cfg(target_arch = "x86_64")]
if self.backend == AeadBackend::X86VaesVpclmul {
let (mut auth_key, mut enc_key) = derive_keys(&self.master_ek, nonce);
let ek = aes::aes256_expand_key(&enc_key);
let mut counter_block = tag.0;
counter_block[15] |= 0x80;
unsafe { aes::aes256_ctr32_encrypt_wide(&ek, &counter_block, buffer) };
let expected = compute_tag_wide(&auth_key, &ek, nonce, aad, buffer);
ct::zeroize(&mut auth_key);
ct::zeroize(&mut enc_key);
if !ct::constant_time_eq(&expected, tag.as_bytes()) {
ct::zeroize(buffer);
return Err(OpenError::verification());
}
return Ok(());
}
#[cfg(target_arch = "aarch64")]
if matches!(
self.backend,
AeadBackend::Aarch64AesPmull | AeadBackend::Aarch64Sve2AesPmull
) {
let (mut auth_key, mut enc_key) = derive_keys(&self.master_ek, nonce);
return unsafe { decrypt_fused_aarch64(&mut auth_key, &mut enc_key, nonce, aad, buffer, tag) }
.map_err(OpenError::from);
}
#[cfg(target_arch = "powerpc64")]
if self.backend == AeadBackend::Power8Crypto {
let (mut auth_key, mut enc_key) = derive_keys(&self.master_ek, nonce);
return unsafe { decrypt_fused_ppc(&mut auth_key, &mut enc_key, nonce, aad, buffer, tag) }
.map_err(OpenError::from);
}
#[cfg(target_arch = "s390x")]
if self.backend == AeadBackend::S390xMsa {
let (mut auth_key, mut enc_key) = derive_keys(&self.master_ek, nonce);
return unsafe { decrypt_fused_s390x(&mut auth_key, &mut enc_key, nonce, aad, buffer, tag) }
.map_err(OpenError::from);
}
#[cfg(target_arch = "riscv64")]
{
match self.backend {
AeadBackend::Portable
| AeadBackend::Riscv64VectorCrypto
| AeadBackend::Riscv64ScalarCrypto
| AeadBackend::Riscv64Vperm => {
return decrypt_riscv(&self.master_ek, self.backend, nonce, aad, buffer, tag).map_err(OpenError::from);
}
_ => {}
}
}
let (mut auth_key, mut enc_key) = derive_keys(&self.master_ek, nonce);
let ek = aes::aes256_expand_key(&enc_key);
let mut counter_block = tag.0;
counter_block[15] |= 0x80;
aes::aes256_ctr32_encrypt(&ek, &counter_block, buffer);
let expected = compute_tag(&auth_key, &ek, nonce, aad, buffer);
ct::zeroize(&mut auth_key);
ct::zeroize(&mut enc_key);
if !ct::constant_time_eq(&expected, tag.as_bytes()) {
ct::zeroize(buffer);
return Err(OpenError::verification());
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use alloc::{vec, vec::Vec};
use super::*;
#[test]
fn aes256gcmsiv_empty() {
let key = Aes256GcmSivKey::from_bytes(hex32(
"0100000000000000000000000000000000000000000000000000000000000000",
));
let nonce = Nonce96::from_bytes(hex12("030000000000000000000000"));
let expected_ct_tag = hex_vec("07f5f4169bbf55a8400cd47ea6fd400f");
let cipher = Aes256GcmSiv::new(&key);
let mut out = vec![0u8; expected_ct_tag.len()];
cipher.encrypt(&nonce, &[], &[], &mut out).unwrap();
assert_eq!(out, expected_ct_tag);
let mut pt_out = vec![0u8; 0];
cipher.decrypt(&nonce, &[], &expected_ct_tag, &mut pt_out).unwrap();
assert!(pt_out.is_empty());
}
#[test]
fn aes256gcmsiv_aad_and_plaintext() {
let key = Aes256GcmSivKey::from_bytes(hex32(
"0100000000000000000000000000000000000000000000000000000000000000",
));
let nonce = Nonce96::from_bytes(hex12("030000000000000000000000"));
let aad = hex_vec("01");
let plaintext = hex_vec("0200000000000000");
let expected_ct_tag = hex_vec("1de22967237a813291213f267e3b452f02d01ae33e4ec854");
let cipher = Aes256GcmSiv::new(&key);
let mut out = vec![0u8; plaintext.len().strict_add(TAG_SIZE)];
cipher.encrypt(&nonce, &aad, &plaintext, &mut out).unwrap();
assert_eq!(out, expected_ct_tag);
let mut pt_out = vec![0u8; plaintext.len()];
cipher.decrypt(&nonce, &aad, &expected_ct_tag, &mut pt_out).unwrap();
assert_eq!(pt_out, plaintext);
}
#[test]
fn aes256gcmsiv_longer_aad_and_plaintext() {
let key = Aes256GcmSivKey::from_bytes(hex32(
"0100000000000000000000000000000000000000000000000000000000000000",
));
let nonce = Nonce96::from_bytes(hex12("030000000000000000000000"));
let aad = hex_vec("010000000000000000000000000000000200");
let plaintext = hex_vec("0300000000000000000000000000000004000000");
let expected_ct_tag = hex_vec("43dd0163cdb48f9fe3212bf61b201976067f342bb879ad976d8242acc188ab59cabfe307");
let cipher = Aes256GcmSiv::new(&key);
let mut out = vec![0u8; plaintext.len().strict_add(TAG_SIZE)];
cipher.encrypt(&nonce, &aad, &plaintext, &mut out).unwrap();
assert_eq!(out, expected_ct_tag);
let mut pt_out = vec![0u8; plaintext.len()];
cipher.decrypt(&nonce, &aad, &expected_ct_tag, &mut pt_out).unwrap();
assert_eq!(pt_out, plaintext);
}
#[test]
fn aes256gcmsiv_bad_tag() {
let key = Aes256GcmSivKey::from_bytes(hex32(
"0100000000000000000000000000000000000000000000000000000000000000",
));
let nonce = Nonce96::from_bytes(hex12("030000000000000000000000"));
let mut bad_ct_tag = hex_vec("07f5f4169bbf55a8400cd47ea6fd400f");
bad_ct_tag[0] ^= 1;
let cipher = Aes256GcmSiv::new(&key);
let mut pt_out = vec![0u8; 0];
let result = cipher.decrypt(&nonce, &[], &bad_ct_tag, &mut pt_out);
assert!(result.is_err());
}
#[test]
fn aes256gcmsiv_wrong_aad_rejected() {
let key = Aes256GcmSivKey::from_bytes(hex32(
"0100000000000000000000000000000000000000000000000000000000000000",
));
let nonce = Nonce96::from_bytes(hex12("030000000000000000000000"));
let ct_tag = hex_vec("1de22967237a813291213f267e3b452f02d01ae33e4ec854");
let cipher = Aes256GcmSiv::new(&key);
let mut pt_out = vec![0u8; 8]; let result = cipher.decrypt(&nonce, &[0x02], &ct_tag, &mut pt_out);
assert!(result.is_err());
}
#[test]
fn aes256gcmsiv_wrong_nonce_rejected() {
let key = Aes256GcmSivKey::from_bytes(hex32(
"0100000000000000000000000000000000000000000000000000000000000000",
));
let aad = hex_vec("01");
let ct_tag = hex_vec("1de22967237a813291213f267e3b452f02d01ae33e4ec854");
let cipher = Aes256GcmSiv::new(&key);
let mut pt_out = vec![0u8; 8]; let wrong_nonce = Nonce96::from_bytes(hex12("040000000000000000000000"));
let result = cipher.decrypt(&wrong_nonce, &aad, &ct_tag, &mut pt_out);
assert!(result.is_err());
}
#[test]
fn aes256gcmsiv_ciphertext_tampering_rejected() {
let key = Aes256GcmSivKey::from_bytes(hex32(
"0100000000000000000000000000000000000000000000000000000000000000",
));
let nonce = Nonce96::from_bytes(hex12("030000000000000000000000"));
let aad = hex_vec("01");
let plaintext = hex_vec("0200000000000000");
let mut ct_tag = hex_vec("1de22967237a813291213f267e3b452f02d01ae33e4ec854");
ct_tag[0] ^= 1;
let cipher = Aes256GcmSiv::new(&key);
let mut pt_out = vec![0u8; plaintext.len()];
let result = cipher.decrypt(&nonce, &aad, &ct_tag, &mut pt_out);
assert!(result.is_err());
}
#[test]
fn aes256gcmsiv_buffer_zeroed_on_auth_failure() {
let key = Aes256GcmSivKey::from_bytes(hex32(
"0100000000000000000000000000000000000000000000000000000000000000",
));
let nonce = Nonce96::from_bytes(hex12("030000000000000000000000"));
let aad = hex_vec("01");
let plaintext = hex_vec("0200000000000000");
let cipher = Aes256GcmSiv::new(&key);
let mut out = vec![0u8; plaintext.len().strict_add(TAG_SIZE)];
cipher.encrypt(&nonce, &aad, &plaintext, &mut out).unwrap();
let last = out.len().strict_sub(1);
out[last] ^= 0xff;
let mut pt_out = vec![0xffu8; plaintext.len()]; let result = cipher.decrypt(&nonce, &aad, &out, &mut pt_out);
assert!(result.is_err());
assert!(pt_out.iter().all(|&b| b == 0), "buffer not zeroed on auth failure");
}
#[test]
fn aes256gcmsiv_detached_round_trip() {
let key = Aes256GcmSivKey::from_bytes(hex32(
"0100000000000000000000000000000000000000000000000000000000000000",
));
let nonce = Nonce96::from_bytes(hex12("030000000000000000000000"));
let aad = hex_vec("01");
let plaintext = hex_vec("0200000000000000");
let cipher = Aes256GcmSiv::new(&key);
let mut buf = plaintext.clone();
let tag = cipher.encrypt_in_place(&nonce, &aad, &mut buf).unwrap();
assert_ne!(buf, plaintext);
cipher.decrypt_in_place(&nonce, &aad, &mut buf, &tag).unwrap();
assert_eq!(buf, plaintext);
}
#[test]
fn aes256gcmsiv_tag_from_slice_rejects_bad_length() {
assert!(Aes256GcmSiv::tag_from_slice(&[0u8; 15]).is_err());
assert!(Aes256GcmSiv::tag_from_slice(&[0u8; 17]).is_err());
assert!(Aes256GcmSiv::tag_from_slice(&[0u8; 0]).is_err());
assert!(Aes256GcmSiv::tag_from_slice(&[0u8; 16]).is_ok());
}
#[test]
fn aes256gcmsiv_different_key_vector() {
let key = Aes256GcmSivKey::from_bytes(hex32(
"e66021d5eb8e4f4066d4adb9c33560e4f46e44bb3da0015c94f7088736864200",
));
let nonce = Nonce96::from_bytes(hex12("e0eaf5284d884a0e77d31646"));
let expected_ct_tag = hex_vec("169fbb2fbf389a995f6390af22228a62");
let cipher = Aes256GcmSiv::new(&key);
let mut out = vec![0u8; expected_ct_tag.len()];
cipher.encrypt(&nonce, &[], &[], &mut out).unwrap();
assert_eq!(out, expected_ct_tag);
let mut pt_out = vec![0u8; 0];
cipher.decrypt(&nonce, &[], &expected_ct_tag, &mut pt_out).unwrap();
}
fn hex32(hex: &str) -> [u8; 32] {
let mut out = [0u8; 32];
for i in 0..32 {
out[i] = u8::from_str_radix(&hex[2 * i..2 * i + 2], 16).unwrap();
}
out
}
fn hex12(hex: &str) -> [u8; 12] {
let mut out = [0u8; 12];
for i in 0..12 {
out[i] = u8::from_str_radix(&hex[2 * i..2 * i + 2], 16).unwrap();
}
out
}
fn hex_vec(hex: &str) -> Vec<u8> {
let mut out = Vec::with_capacity(hex.len() / 2);
let mut i = 0;
while i < hex.len() {
out.push(u8::from_str_radix(&hex[i..i + 2], 16).unwrap());
i += 2;
}
out
}
}