#![no_std]
#![doc = include_str!("../README.md")]
#![doc(
html_logo_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg",
html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg"
)]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![forbid(unsafe_code)]
#![warn(missing_docs, rust_2018_idioms)]
#[cfg(feature = "alloc")]
#[macro_use]
extern crate alloc;
#[cfg(feature = "std")]
extern crate std;
mod error;
pub use error::{Error, Result};
use aes::cipher::{
generic_array::GenericArray,
typenum::{Unsigned, U16, U24, U32},
Block, BlockBackend, BlockCipher, BlockClosure, BlockDecrypt, BlockEncrypt, BlockSizeUser,
KeyInit,
};
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
pub const SEMIBLOCK_SIZE: usize = 8;
pub const KWP_MAX_LEN: usize = u32::MAX as usize;
pub const IV_LEN: usize = SEMIBLOCK_SIZE;
pub const IV: [u8; IV_LEN] = [0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6];
pub const KWP_IV_PREFIX: [u8; IV_LEN / 2] = [0xA6, 0x59, 0x59, 0xA6];
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct Kek<Aes>
where
Aes: KeyInit + BlockCipher + BlockSizeUser<BlockSize = U16> + BlockEncrypt + BlockDecrypt,
{
cipher: Aes,
}
pub type KekAes128 = Kek<aes::Aes128>;
pub type KekAes192 = Kek<aes::Aes192>;
pub type KekAes256 = Kek<aes::Aes256>;
impl From<GenericArray<u8, U16>> for KekAes128 {
fn from(kek: GenericArray<u8, U16>) -> Self {
Kek::new(&kek)
}
}
impl From<GenericArray<u8, U24>> for KekAes192 {
fn from(kek: GenericArray<u8, U24>) -> Self {
Kek::new(&kek)
}
}
impl From<GenericArray<u8, U32>> for KekAes256 {
fn from(kek: GenericArray<u8, U32>) -> Self {
Kek::new(&kek)
}
}
impl From<[u8; 16]> for KekAes128 {
fn from(kek: [u8; 16]) -> Self {
Kek::new(&kek.into())
}
}
impl From<[u8; 24]> for KekAes192 {
fn from(kek: [u8; 24]) -> Self {
Kek::new(&kek.into())
}
}
impl From<[u8; 32]> for KekAes256 {
fn from(kek: [u8; 32]) -> Self {
Kek::new(&kek.into())
}
}
impl<Aes> TryFrom<&[u8]> for Kek<Aes>
where
Aes: KeyInit + BlockCipher + BlockSizeUser<BlockSize = U16> + BlockEncrypt + BlockDecrypt,
{
type Error = Error;
fn try_from(value: &[u8]) -> Result<Self> {
if value.len() == Aes::KeySize::to_usize() {
Ok(Kek::new(GenericArray::from_slice(value)))
} else {
Err(Error::InvalidKekSize { size: value.len() })
}
}
}
impl<Aes> Kek<Aes>
where
Aes: KeyInit + BlockCipher + BlockSizeUser<BlockSize = U16> + BlockEncrypt + BlockDecrypt,
{
pub fn new(key: &GenericArray<u8, Aes::KeySize>) -> Self {
let cipher = Aes::new(key);
Kek { cipher }
}
pub fn wrap(&self, data: &[u8], out: &mut [u8]) -> Result<()> {
if data.len() % SEMIBLOCK_SIZE != 0 {
return Err(Error::InvalidDataSize);
}
if out.len() != data.len() + IV_LEN {
return Err(Error::InvalidOutputSize {
expected: data.len() + IV_LEN,
});
}
let n = data.len() / 8;
let block = &mut Block::<WCtx<'_>>::default();
block[..IV_LEN].copy_from_slice(&IV);
out[IV_LEN..].copy_from_slice(data);
self.cipher.encrypt_with_backend(WCtx { n, block, out });
out[..IV_LEN].copy_from_slice(&block[..IV_LEN]);
Ok(())
}
#[cfg(feature = "alloc")]
#[cfg_attr(docsrs, doc(cfg(feature = "alloc")))]
pub fn wrap_vec(&self, data: &[u8]) -> Result<Vec<u8>> {
let mut out = vec![0u8; data.len() + IV_LEN];
self.wrap(data, &mut out)?;
Ok(out)
}
pub fn unwrap(&self, data: &[u8], out: &mut [u8]) -> Result<()> {
if data.len() % SEMIBLOCK_SIZE != 0 {
return Err(Error::InvalidDataSize);
}
let n = (data.len() / SEMIBLOCK_SIZE)
.checked_sub(1)
.ok_or(Error::InvalidDataSize)?;
if out.len() != n * SEMIBLOCK_SIZE {
return Err(Error::InvalidOutputSize {
expected: n * SEMIBLOCK_SIZE,
});
}
let block = &mut Block::<WInverseCtx<'_>>::default();
block[..IV_LEN].copy_from_slice(&data[..IV_LEN]);
out.copy_from_slice(&data[IV_LEN..]);
self.cipher
.decrypt_with_backend(WInverseCtx { n, block, out });
if block[..IV_LEN] == IV[..] {
Ok(())
} else {
Err(Error::IntegrityCheckFailed)
}
}
#[cfg(feature = "alloc")]
#[cfg_attr(docsrs, doc(cfg(feature = "alloc")))]
pub fn unwrap_vec(&self, data: &[u8]) -> Result<Vec<u8>> {
let out_len = data
.len()
.checked_sub(IV_LEN)
.ok_or(Error::InvalidDataSize)?;
let mut out = vec![0u8; out_len];
self.unwrap(data, &mut out)?;
Ok(out)
}
pub fn wrap_with_padding(&self, data: &[u8], out: &mut [u8]) -> Result<()> {
if data.len() > KWP_MAX_LEN {
return Err(Error::InvalidDataSize);
}
let n = (data.len() + SEMIBLOCK_SIZE - 1) / SEMIBLOCK_SIZE;
if out.len() != n * SEMIBLOCK_SIZE + IV_LEN {
return Err(Error::InvalidOutputSize {
expected: n * SEMIBLOCK_SIZE + IV_LEN,
});
}
let mli = (data.len() as u32).to_be_bytes();
let block = &mut Block::<WCtx<'_>>::default();
block[..IV_LEN / 2].copy_from_slice(&KWP_IV_PREFIX);
block[IV_LEN / 2..IV_LEN].copy_from_slice(&mli);
if n == 1 {
for i in data.len()..n * SEMIBLOCK_SIZE {
block[IV_LEN + i] = 0;
}
block[IV_LEN..IV_LEN + data.len()].copy_from_slice(data);
self.cipher.encrypt_block(block);
out.copy_from_slice(block);
} else {
for i in data.len()..n * SEMIBLOCK_SIZE {
out[IV_LEN + i] = 0;
}
out[IV_LEN..IV_LEN + data.len()].copy_from_slice(data);
self.cipher.encrypt_with_backend(WCtx { n, block, out });
out[..IV_LEN].copy_from_slice(&block[..IV_LEN]);
}
Ok(())
}
#[cfg(feature = "alloc")]
#[cfg_attr(docsrs, doc(cfg(feature = "alloc")))]
pub fn wrap_with_padding_vec(&self, data: &[u8]) -> Result<Vec<u8>> {
let n = (data.len() + SEMIBLOCK_SIZE - 1) / SEMIBLOCK_SIZE;
let mut out = vec![0u8; n * SEMIBLOCK_SIZE + IV_LEN];
self.wrap_with_padding(data, &mut out)?;
Ok(out)
}
pub fn unwrap_with_padding<'a>(&self, data: &[u8], out: &'a mut [u8]) -> Result<&'a [u8]> {
if data.len() % SEMIBLOCK_SIZE != 0 {
return Err(Error::InvalidDataSize);
}
let n = (data.len() / SEMIBLOCK_SIZE)
.checked_sub(1)
.ok_or(Error::InvalidDataSize)?;
if out.len() != n * SEMIBLOCK_SIZE {
return Err(Error::InvalidOutputSize {
expected: n * SEMIBLOCK_SIZE,
});
}
let block = &mut Block::<WInverseCtx<'_>>::default();
if n == 1 {
block.copy_from_slice(data);
self.cipher.decrypt_block(block);
out.copy_from_slice(&block[IV_LEN..]);
} else {
block[..IV_LEN].copy_from_slice(&data[..IV_LEN]);
out.copy_from_slice(&data[IV_LEN..]);
self.cipher
.decrypt_with_backend(WInverseCtx { n, block, out });
}
if block[..IV_LEN / 2] != KWP_IV_PREFIX {
return Err(Error::IntegrityCheckFailed);
}
let mli = u32::from_be_bytes(block[IV_LEN / 2..IV_LEN].try_into().unwrap()) as usize;
if !(SEMIBLOCK_SIZE * (n - 1) < mli && mli <= SEMIBLOCK_SIZE * n) {
return Err(Error::IntegrityCheckFailed);
}
let b = SEMIBLOCK_SIZE * n - mli;
if !out.iter().rev().take(b).all(|&x| x == 0) {
return Err(Error::IntegrityCheckFailed);
}
Ok(&out[..mli])
}
#[cfg(feature = "alloc")]
#[cfg_attr(docsrs, doc(cfg(feature = "alloc")))]
pub fn unwrap_with_padding_vec(&self, data: &[u8]) -> Result<Vec<u8>> {
let out_len = data
.len()
.checked_sub(IV_LEN)
.ok_or(Error::InvalidDataSize)?;
let mut out = vec![0u8; out_len];
let out_len = self.unwrap_with_padding(data, &mut out)?.len();
out.truncate(out_len);
Ok(out)
}
}
struct WCtx<'a> {
n: usize,
block: &'a mut Block<Self>,
out: &'a mut [u8],
}
impl<'a> BlockSizeUser for WCtx<'a> {
type BlockSize = U16;
}
impl<'a> BlockClosure for WCtx<'a> {
#[inline(always)]
fn call<B: BlockBackend<BlockSize = Self::BlockSize>>(self, backend: &mut B) {
for j in 0..=5 {
for (i, chunk) in self.out.chunks_mut(SEMIBLOCK_SIZE).skip(1).enumerate() {
self.block[IV_LEN..].copy_from_slice(chunk);
backend.proc_block(self.block.into());
let t = (self.n * j + (i + 1)) as u64;
for (ai, ti) in self.block[..IV_LEN].iter_mut().zip(&t.to_be_bytes()) {
*ai ^= ti;
}
chunk.copy_from_slice(&self.block[IV_LEN..]);
}
}
}
}
struct WInverseCtx<'a> {
n: usize,
block: &'a mut Block<Self>,
out: &'a mut [u8],
}
impl<'a> BlockSizeUser for WInverseCtx<'a> {
type BlockSize = U16;
}
impl<'a> BlockClosure for WInverseCtx<'a> {
#[inline(always)]
fn call<B: BlockBackend<BlockSize = Self::BlockSize>>(self, backend: &mut B) {
for j in (0..=5).rev() {
for (i, chunk) in self.out.chunks_mut(SEMIBLOCK_SIZE).enumerate().rev() {
let t = (self.n * j + (i + 1)) as u64;
for (ai, ti) in self.block[..IV_LEN].iter_mut().zip(&t.to_be_bytes()) {
*ai ^= ti;
}
self.block[IV_LEN..].copy_from_slice(chunk);
backend.proc_block(self.block.into());
chunk.copy_from_slice(&self.block[IV_LEN..]);
}
}
}
}