#![cfg_attr(docsrs, procmacros::doc_replace)]
#![allow(deprecated, reason = "generic_array 0.14 has been deprecated")]
use core::{
borrow::BorrowMut,
convert::Infallible,
marker::PhantomData,
mem::size_of,
ptr::NonNull,
};
pub use digest::Digest;
use crate::{
peripherals::SHA,
reg_access::{AlignmentHelper, SocDependentEndianess},
system::GenericPeripheralGuard,
work_queue::{Handle, Poll, Status, VTable, WorkQueue, WorkQueueDriver, WorkQueueFrontend},
};
pub struct Sha<'d> {
sha: SHA<'d>,
_guard: GenericPeripheralGuard<{ crate::system::Peripheral::Sha as u8 }>,
}
impl<'d> Sha<'d> {
pub fn new(sha: SHA<'d>) -> Self {
let guard = GenericPeripheralGuard::new();
Self { sha, _guard: guard }
}
pub fn start<'a, A: ShaAlgorithm>(&'a mut self) -> ShaDigest<'d, A, &'a mut Self> {
ShaDigest::new(self)
}
pub fn start_owned<A: ShaAlgorithm>(self) -> ShaDigest<'d, A, Self> {
ShaDigest::new(self)
}
fn is_busy(&self, algo: ShaAlgorithmKind) -> bool {
algo.is_busy(&self.sha)
}
fn process_buffer(&self, state: &mut DigestState) {
if state.first_run {
state.algorithm.start(&self.sha);
state.first_run = false;
} else {
state.algorithm.r#continue(&self.sha);
}
}
fn write_data<'a>(
&self,
state: &mut DigestState,
incoming: &'a [u8],
) -> nb::Result<&'a [u8], Infallible> {
if state.message_buffer_is_full {
if self.is_busy(state.algorithm) {
return Err(nb::Error::WouldBlock);
} else {
self.process_buffer(state);
state.message_buffer_is_full = false;
}
}
let chunk_len = state.algorithm.chunk_length();
let mod_cursor = state.cursor % chunk_len;
let (remaining, bound_reached) = state.alignment_helper.aligned_volatile_copy(
m_mem(&self.sha, 0),
incoming,
chunk_len,
mod_cursor,
);
state.cursor += incoming.len() - remaining.len();
if bound_reached {
_ = self.process_buffer_or_wait(state);
}
Ok(remaining)
}
fn process_buffer_or_wait(&self, state: &mut DigestState) -> nb::Result<(), Infallible> {
if self.is_busy(state.algorithm) {
state.message_buffer_is_full = true;
return Err(nb::Error::WouldBlock);
}
self.process_buffer(state);
Ok(())
}
fn finish(&self, state: &mut DigestState, output: &mut [u8]) -> nb::Result<(), Infallible> {
if state.message_buffer_is_full {
if self.is_busy(state.algorithm) {
return Err(nb::Error::WouldBlock);
}
self.process_buffer(state);
state.message_buffer_is_full = false;
}
let chunk_len = state.algorithm.chunk_length();
if state.finalize_state == FinalizeState::NotStarted {
let cursor = state.cursor;
self.update(state, &[0x80])?; state.finished_message_size = cursor;
state.finalize_state = FinalizeState::FlushAlignBuffer;
}
if state.finalize_state == FinalizeState::FlushAlignBuffer {
let flushed = state
.alignment_helper
.flush_to(m_mem(&self.sha, 0), state.cursor % chunk_len);
state.finalize_state = FinalizeState::ZeroPadAlmostFull;
if flushed > 0 {
state.cursor += flushed;
if state.cursor.is_multiple_of(chunk_len) {
self.process_buffer_or_wait(state)?;
}
}
}
let mut mod_cursor = state.cursor % chunk_len;
if state.finalize_state == FinalizeState::ZeroPadAlmostFull {
state.finalize_state = FinalizeState::WriteMessageLength;
let pad_len = chunk_len - mod_cursor;
if pad_len < state.algorithm.message_length_bytes() {
state.alignment_helper.volatile_write(
m_mem(&self.sha, 0),
0_u8,
pad_len,
mod_cursor,
);
state.cursor += pad_len;
self.process_buffer_or_wait(state)?;
mod_cursor = 0;
}
}
if state.finalize_state == FinalizeState::WriteMessageLength {
let message_len_bytes = size_of::<u64>();
let pad_len = chunk_len - mod_cursor - message_len_bytes;
state
.alignment_helper
.volatile_write(m_mem(&self.sha, 0), 0, pad_len, mod_cursor);
let length = state.finished_message_size as u64 * 8;
state.alignment_helper.aligned_volatile_copy(
m_mem(&self.sha, 0),
&length.to_be_bytes(),
chunk_len,
chunk_len - message_len_bytes,
);
state.finalize_state = FinalizeState::ReadResult;
self.process_buffer_or_wait(state)?;
}
if state.finalize_state == FinalizeState::ReadResult {
if state.algorithm.is_busy(&self.sha) {
return Err(nb::Error::WouldBlock);
}
if state.algorithm.load(&self.sha) {
while self.is_busy(state.algorithm) {}
}
state.alignment_helper.volatile_read_regset(
h_mem(&self.sha, 0),
output,
core::cmp::min(output.len(), 32),
);
state.first_run = true;
state.cursor = 0;
state.alignment_helper.reset();
state.finalize_state = FinalizeState::NotStarted;
return Ok(());
}
Err(nb::Error::WouldBlock)
}
fn update<'a>(
&self,
state: &mut DigestState,
incoming: &'a [u8],
) -> nb::Result<&'a [u8], Infallible> {
state.finalize_state = FinalizeState::default();
self.write_data(state, incoming)
}
}
impl crate::private::Sealed for Sha<'_> {}
#[cfg(sha_dma)]
#[instability::unstable]
impl crate::interrupt::InterruptConfigurable for Sha<'_> {
fn set_interrupt_handler(&mut self, handler: crate::interrupt::InterruptHandler) {
self.sha.disable_peri_interrupt_on_all_cores();
self.sha.bind_peri_interrupt(handler);
}
}
pub struct ShaDigest<'d, A, S: BorrowMut<Sha<'d>>> {
sha: S,
state: DigestState,
phantom: PhantomData<(&'d (), A)>,
}
#[derive(Clone, Copy, Debug, PartialEq, Default)]
enum FinalizeState {
#[default]
NotStarted,
FlushAlignBuffer,
ZeroPadAlmostFull,
WriteMessageLength,
ReadResult,
}
#[derive(Clone, Debug)]
struct DigestState {
algorithm: ShaAlgorithmKind,
alignment_helper: AlignmentHelper<SocDependentEndianess>,
cursor: usize,
first_run: bool,
finished_message_size: usize,
message_buffer_is_full: bool,
finalize_state: FinalizeState,
}
impl DigestState {
fn new(algorithm: ShaAlgorithmKind) -> Self {
Self {
algorithm,
alignment_helper: AlignmentHelper::default(),
cursor: 0,
first_run: true,
finished_message_size: 0,
message_buffer_is_full: false,
finalize_state: FinalizeState::default(),
}
}
}
impl<'d, A: ShaAlgorithm, S: BorrowMut<Sha<'d>>> ShaDigest<'d, A, S> {
#[allow(unused_mut)]
pub fn new(mut sha: S) -> Self {
#[cfg(not(esp32))]
sha.borrow_mut()
.sha
.register_block()
.mode()
.write(|w| unsafe { w.mode().bits(A::ALGORITHM_KIND.mode_bits()) });
Self {
sha,
state: DigestState::new(A::ALGORITHM_KIND),
phantom: PhantomData,
}
}
#[cfg(not(esp32))]
pub fn restore(mut sha: S, ctx: &mut Context<A>) -> Self {
sha.borrow_mut()
.sha
.register_block()
.mode()
.write(|w| unsafe { w.mode().bits(A::ALGORITHM_KIND.mode_bits()) });
unsafe {
core::ptr::copy_nonoverlapping(
ctx.buffer.as_ptr(),
m_mem(&sha.borrow_mut().sha, 0),
32,
);
}
ctx.state.alignment_helper.volatile_write_regset(
h_mem(&sha.borrow_mut().sha, 0),
&ctx.saved_digest,
64,
);
Self {
sha,
state: ctx.state.clone(),
phantom: PhantomData,
}
}
pub fn is_busy(&self) -> bool {
A::ALGORITHM_KIND.is_busy(&self.sha.borrow().sha)
}
pub fn update<'a>(&mut self, incoming: &'a [u8]) -> nb::Result<&'a [u8], Infallible> {
self.sha.borrow_mut().update(&mut self.state, incoming)
}
pub fn finish(&mut self, output: &mut [u8]) -> nb::Result<(), Infallible> {
self.sha.borrow_mut().finish(&mut self.state, output)
}
#[cfg(not(esp32))]
pub fn save(&mut self, context: &mut Context<A>) -> nb::Result<(), Infallible> {
if self.is_busy() {
return Err(nb::Error::WouldBlock);
}
context.state = self.state.clone();
self.state.alignment_helper.volatile_read_regset(
h_mem(&self.sha.borrow_mut().sha, 0),
&mut context.saved_digest,
64,
);
unsafe {
core::ptr::copy_nonoverlapping(
m_mem(&self.sha.borrow_mut().sha, 0),
context.buffer.as_mut_ptr(),
32,
);
}
Ok(())
}
pub fn cancel(self) -> S {
self.sha
}
}
#[cfg(not(esp32))]
#[derive(Debug, Clone)]
pub struct Context<A: ShaAlgorithm> {
state: DigestState,
buffer: [u32; 32],
saved_digest: [u8; 64],
phantom: PhantomData<A>,
}
#[cfg(not(esp32))]
impl<A: ShaAlgorithm> Context<A> {
pub fn new() -> Self {
Self {
state: DigestState::new(A::ALGORITHM_KIND),
buffer: [0; 32],
saved_digest: [0; 64],
phantom: PhantomData,
}
}
pub fn first_run(&self) -> bool {
self.state.first_run
}
}
#[cfg(not(esp32))]
impl<A: ShaAlgorithm> Default for Context<A> {
fn default() -> Self {
Self::new()
}
}
pub trait ShaAlgorithm: crate::private::Sealed {
const ALGORITHM: &'static str;
const ALGORITHM_KIND: ShaAlgorithmKind;
const CHUNK_LENGTH: usize;
const DIGEST_LENGTH: usize;
#[doc(hidden)]
type DigestOutputSize: digest::generic_array::ArrayLength<u8> + 'static;
}
impl<'d, A: ShaAlgorithm, S: BorrowMut<Sha<'d>>> digest::HashMarker for ShaDigest<'d, A, S> {}
impl<'d, A: ShaAlgorithm, S: BorrowMut<Sha<'d>>> digest::OutputSizeUser for ShaDigest<'d, A, S> {
type OutputSize = A::DigestOutputSize;
}
impl<'d, A: ShaAlgorithm, S: BorrowMut<Sha<'d>>> digest::Update for ShaDigest<'d, A, S> {
fn update(&mut self, mut remaining: &[u8]) {
while !remaining.is_empty() {
remaining = nb::block!(Self::update(self, remaining)).unwrap();
}
}
}
impl<'d, A: ShaAlgorithm, S: BorrowMut<Sha<'d>>> digest::FixedOutput for ShaDigest<'d, A, S> {
fn finalize_into(mut self, out: &mut digest::Output<Self>) {
nb::block!(self.finish(out)).unwrap();
}
}
for_each_sha_algorithm! {
(algos $( ( $name:ident, $full_name:literal $sizes:tt $security:tt, $mode_bits:literal ) ),*) => {
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[non_exhaustive]
pub enum ShaAlgorithmKind {
$(
#[doc = concat!("The ", $full_name, " algorithm.")]
$name,
)*
}
};
}
for_each_sha_algorithm! {
(algos $( ( $name:ident, $full_name:literal (sizes: $block_size:literal, $digest_len:literal, $message_length_bytes:literal) $security:tt, $mode_bits:literal ) ),*) => {
impl ShaAlgorithmKind {
#[cfg(not(esp32))]
const fn mode_bits(self) -> u8 {
match self {
$(ShaAlgorithmKind::$name => $mode_bits,)*
}
}
const fn chunk_length(self) -> usize {
match self {
$(ShaAlgorithmKind::$name => $block_size,)*
}
}
const fn message_length_bytes(self) -> usize {
match self {
$(ShaAlgorithmKind::$name => $message_length_bytes,)*
}
}
const fn digest_length(self) -> usize {
match self {
$(ShaAlgorithmKind::$name => $digest_len,)*
}
}
}
};
}
impl ShaAlgorithmKind {
fn start(self, sha: &crate::peripherals::SHA<'_>) {
let regs = sha.register_block();
cfg_if::cfg_if! {
if #[cfg(esp32)] {
match self {
ShaAlgorithmKind::Sha1 => regs.sha1_start().write(|w| w.sha1_start().set_bit()),
ShaAlgorithmKind::Sha256 => regs.sha256_start().write(|w| w.sha256_start().set_bit()),
ShaAlgorithmKind::Sha384 => regs.sha384_start().write(|w| w.sha384_start().set_bit()),
ShaAlgorithmKind::Sha512 => regs.sha512_start().write(|w| w.sha512_start().set_bit()),
};
} else {
regs.start().write(|w| w.start().set_bit());
}
}
}
fn r#continue(self, sha: &crate::peripherals::SHA<'_>) {
let regs = sha.register_block();
cfg_if::cfg_if! {
if #[cfg(esp32)] {
match self {
ShaAlgorithmKind::Sha1 => regs.sha1_continue().write(|w| w.sha1_continue().set_bit()),
ShaAlgorithmKind::Sha256 => regs.sha256_continue().write(|w| w.sha256_continue().set_bit()),
ShaAlgorithmKind::Sha384 => regs.sha384_continue().write(|w| w.sha384_continue().set_bit()),
ShaAlgorithmKind::Sha512 => regs.sha512_continue().write(|w| w.sha512_continue().set_bit()),
};
} else {
regs.continue_().write(|w| w.continue_().set_bit());
}
}
}
fn load(self, _sha: &crate::peripherals::SHA<'_>) -> bool {
cfg_if::cfg_if! {
if #[cfg(esp32)] {
let regs = _sha.register_block();
match self {
ShaAlgorithmKind::Sha1 => regs.sha1_load().write(|w| w.sha1_load().set_bit()),
ShaAlgorithmKind::Sha256 => regs.sha256_load().write(|w| w.sha256_load().set_bit()),
ShaAlgorithmKind::Sha384 => regs.sha384_load().write(|w| w.sha384_load().set_bit()),
ShaAlgorithmKind::Sha512 => regs.sha512_load().write(|w| w.sha512_load().set_bit()),
};
true
} else {
false
}
}
}
fn is_busy(self, sha: &crate::peripherals::SHA<'_>) -> bool {
let regs = sha.register_block();
cfg_if::cfg_if! {
if #[cfg(esp32)] {
let bit = match self {
ShaAlgorithmKind::Sha1 => regs.sha1_busy().read().sha1_busy(),
ShaAlgorithmKind::Sha256 => regs.sha256_busy().read().sha256_busy(),
ShaAlgorithmKind::Sha384 => regs.sha384_busy().read().sha384_busy(),
ShaAlgorithmKind::Sha512 => regs.sha512_busy().read().sha512_busy(),
};
} else {
let bit = regs.busy().read().state();
}
}
bit.bit_is_set()
}
}
for_each_sha_algorithm! {
( $name:ident, $full_name:literal (sizes: $block_size:literal, $digest_len:literal, $message_length_bytes:literal) (insecure_against: $($attack_kind:literal),*), $mode_bits:literal ) => {
#[doc = concat!("Hardware-accelerated ", $full_name, " implementation")]
$(
#[doc = ""]
#[doc = concat!(" > ⚠️ Note that this algorithm is known to be insecure against ", $attack_kind, " attacks.")]
)*
#[non_exhaustive]
pub struct $name;
impl crate::private::Sealed for $name {}
impl ShaAlgorithm for $name {
const ALGORITHM: &'static str = stringify!($name);
const ALGORITHM_KIND: ShaAlgorithmKind = ShaAlgorithmKind::$name;
const CHUNK_LENGTH: usize = Self::ALGORITHM_KIND.chunk_length();
const DIGEST_LENGTH: usize = Self::ALGORITHM_KIND.digest_length();
type DigestOutputSize = paste::paste!(digest::consts::[< U $digest_len >]);
}
};
}
fn h_mem(sha: &crate::peripherals::SHA<'_>, index: usize) -> *mut u32 {
let sha = sha.register_block();
cfg_if::cfg_if! {
if #[cfg(esp32)] {
sha.text(index).as_ptr()
} else {
sha.h_mem(index).as_ptr()
}
}
}
fn m_mem(sha: &crate::peripherals::SHA<'_>, index: usize) -> *mut u32 {
let sha = sha.register_block();
cfg_if::cfg_if! {
if #[cfg(esp32)] {
sha.text(index).as_ptr()
} else {
sha.m_mem(index).as_ptr()
}
}
}
#[derive(Clone)]
struct ShaOperation {
operation: ShaOperationKind,
buffer: NonNull<u8>,
buffered_bytes: u8,
message: NonNull<[u8]>,
#[cfg(not(esp32))]
hw_state: NonNull<[u32]>,
state: DigestState,
}
impl ShaOperation {
fn reset(&mut self) {
self.state = DigestState::new(self.state.algorithm);
self.buffered_bytes = 0;
}
}
unsafe impl Sync for ShaOperation {}
unsafe impl Send for ShaOperation {}
static SHA_WORK_QUEUE: WorkQueue<ShaOperation> = WorkQueue::new();
const BLOCKING_SHA_VTABLE: VTable<ShaOperation> = VTable {
post: |driver, item| {
let driver = unsafe { ShaBackend::from_raw(driver) };
if let DriverState::Uninitialized(sha) = &driver.driver {
driver.driver = DriverState::Initialized(Sha::new(unsafe { sha.clone_unchecked() }));
};
Some(driver.process(item))
},
poll: |driver, item| {
let driver = unsafe { ShaBackend::from_raw(driver) };
driver.process(item)
},
cancel: |_driver, _item| {
},
stop: |driver| {
let driver = unsafe { ShaBackend::from_raw(driver) };
driver.deinitialize()
},
};
struct ProcessingState {
message_partially_processed: bool,
message_bytes_processed: usize,
}
enum DriverState<'d> {
Uninitialized(SHA<'d>),
Initialized(Sha<'d>),
}
#[derive(Clone, Copy, PartialEq)]
enum ShaOperationKind {
Update,
Finalize,
}
#[procmacros::doc_replace]
pub struct ShaBackend<'d> {
driver: DriverState<'d>,
processing_state: ProcessingState,
}
impl<'d> ShaBackend<'d> {
pub fn new(sha: SHA<'d>) -> Self {
Self {
driver: DriverState::Uninitialized(sha),
processing_state: ProcessingState {
message_partially_processed: false,
message_bytes_processed: 0,
},
}
}
pub fn start(&mut self) -> ShaWorkQueueDriver<'_, 'd> {
ShaWorkQueueDriver {
inner: WorkQueueDriver::new(self, BLOCKING_SHA_VTABLE, &SHA_WORK_QUEUE),
}
}
unsafe fn from_raw<'any>(ptr: NonNull<()>) -> &'any mut Self {
unsafe { ptr.cast::<ShaBackend<'_>>().as_mut() }
}
fn process(&mut self, item: &mut ShaOperation) -> Poll {
if item.operation == ShaOperationKind::Update {
self.process_update(item)
} else {
self.process_finalize(item)
}
}
#[cfg(not(esp32))]
fn restore_state(driver: &mut Sha<'_>, item: &ShaOperation) {
driver
.sha
.register_block()
.mode()
.write(|w| unsafe { w.mode().bits(item.state.algorithm.mode_bits()) });
if !item.state.first_run {
for (i, reg) in driver.sha.register_block().h_mem_iter().enumerate() {
reg.write(|w| unsafe { w.bits(item.hw_state.as_ref()[i]) });
}
}
}
fn process_update(&mut self, item: &mut ShaOperation) -> Poll {
let driver = if let DriverState::Initialized(sha) = &mut self.driver {
sha
} else {
unreachable!()
};
if !self.processing_state.message_partially_processed {
self.processing_state = ProcessingState {
message_partially_processed: true,
message_bytes_processed: 0,
};
#[cfg(not(esp32))]
Self::restore_state(driver, item);
let buffered = unsafe {
core::slice::from_raw_parts(item.buffer.as_ptr(), item.buffered_bytes as usize)
};
debug!(
"update: restored state with {} buffered bytes",
buffered.len()
);
debug_assert!(buffered.len() < item.state.algorithm.chunk_length());
nb::block!(driver.write_data(&mut item.state, buffered)).unwrap();
}
let remaining_message =
unsafe { &item.message.as_ref()[self.processing_state.message_bytes_processed..] };
if remaining_message.len() >= item.state.algorithm.chunk_length()
|| self.processing_state.message_bytes_processed == 0
{
if let Ok(remaining) = driver.write_data(&mut item.state, remaining_message) {
let total_consumed = item.message.len() - remaining.len();
self.processing_state.message_bytes_processed = total_consumed;
}
return Poll::Pending(true);
} else {
#[cfg(not(esp32))]
if driver.is_busy(item.state.algorithm) {
return Poll::Pending(true);
}
}
#[cfg(not(esp32))]
if !item.state.first_run {
for (i, reg) in driver.sha.register_block().h_mem_iter().enumerate() {
unsafe { item.hw_state.as_mut()[i] = reg.read().bits() };
}
}
if !remaining_message.is_empty() {
debug!(
"Writing back {} unprocessed bytes to buffer",
remaining_message.len()
);
unsafe {
core::ptr::copy_nonoverlapping(
remaining_message.as_ptr(),
item.buffer.as_ptr(),
remaining_message.len(),
);
}
}
item.buffered_bytes = remaining_message.len() as u8;
self.processing_state.message_partially_processed = false;
Poll::Ready(Status::Completed)
}
fn process_finalize(&mut self, item: &mut ShaOperation) -> Poll {
let driver = if let DriverState::Initialized(sha) = &mut self.driver {
sha
} else {
unreachable!()
};
if !self.processing_state.message_partially_processed {
self.processing_state.message_partially_processed = true;
#[cfg(not(esp32))]
Self::restore_state(driver, item);
let buffered = unsafe { item.message.as_ref() };
debug!(
"finalize: restored state with {} buffered bytes",
buffered.len()
);
debug_assert!(buffered.len() < item.state.algorithm.chunk_length());
nb::block!(driver.write_data(&mut item.state, buffered)).unwrap();
}
let result = unsafe {
core::slice::from_raw_parts_mut(
item.buffer.as_ptr(),
item.state.algorithm.digest_length(),
)
};
if driver.finish(&mut item.state, result).is_err() {
return Poll::Pending(true);
}
self.processing_state.message_partially_processed = false;
item.reset();
Poll::Ready(Status::Completed)
}
fn deinitialize(&mut self) {
if let DriverState::Initialized(ref sha) = self.driver {
self.driver = DriverState::Uninitialized(unsafe { sha.sha.clone_unchecked() });
}
}
}
pub struct ShaWorkQueueDriver<'t, 'd> {
inner: WorkQueueDriver<'t, ShaBackend<'d>, ShaOperation>,
}
impl<'t, 'd> ShaWorkQueueDriver<'t, 'd> {
pub fn stop(self) -> impl Future<Output = ()> {
self.inner.stop()
}
}
#[cfg(esp32)]
enum SoftwareHasher {
Sha1(sha1::Sha1),
Sha256(sha2::Sha256),
Sha384(sha2::Sha384),
Sha512(sha2::Sha512),
}
#[cfg_attr(not(esp32), derive(Clone))]
struct ShaContext<const CHUNK_BYTES: usize, const DIGEST_WORDS: usize> {
frontend: WorkQueueFrontend<ShaOperation>,
buffer: [u8; CHUNK_BYTES],
#[cfg(not(esp32))] state: [u32; 16],
#[cfg(esp32)]
use_software: Option<SoftwareHasher>,
}
#[cfg(esp32)]
use portable_atomic::{AtomicBool, Ordering};
#[cfg(esp32)]
static ACCELERATOR_IN_USE: AtomicBool = AtomicBool::new(false);
impl<const CHUNK_BYTES: usize, const DIGEST_WORDS: usize> ShaContext<CHUNK_BYTES, DIGEST_WORDS> {
fn new(algorithm: ShaAlgorithmKind) -> Self {
#[cfg(esp32)]
let use_software = if ACCELERATOR_IN_USE.swap(true, Ordering::SeqCst) {
let hasher = match algorithm {
ShaAlgorithmKind::Sha1 => SoftwareHasher::Sha1(sha1::Sha1::new()),
ShaAlgorithmKind::Sha256 => SoftwareHasher::Sha256(sha2::Sha256::new()),
ShaAlgorithmKind::Sha384 => SoftwareHasher::Sha384(sha2::Sha384::new()),
ShaAlgorithmKind::Sha512 => SoftwareHasher::Sha512(sha2::Sha512::new()),
};
Some(hasher)
} else {
None
};
Self {
frontend: WorkQueueFrontend::new(ShaOperation {
operation: ShaOperationKind::Update,
buffer: NonNull::dangling(),
message: NonNull::from(&mut []),
#[cfg(not(esp32))]
hw_state: NonNull::from(&mut []),
buffered_bytes: 0,
state: DigestState::new(algorithm),
}),
buffer: [0; CHUNK_BYTES],
#[cfg(not(esp32))]
state: [0; 16],
#[cfg(esp32)]
use_software,
}
}
fn update<'t>(&'t mut self, data: &'t [u8]) -> ShaHandle<'t> {
debug!(
"Update {:?} with {} bytes",
self.frontend.data_mut().state.algorithm,
data.len()
);
#[cfg(esp32)]
if let Some(hasher) = self.use_software.as_mut() {
Self::update_using_software(hasher, data);
return ShaHandle(self.frontend.post_completed(&SHA_WORK_QUEUE));
}
let op_data = self.frontend.data_mut();
let buffered = op_data.buffered_bytes as usize;
if data.len() + buffered < CHUNK_BYTES {
op_data.buffered_bytes += data.len() as u8;
op_data.message = NonNull::from(data);
self.buffer[buffered..][..data.len()].copy_from_slice(data);
return ShaHandle(self.frontend.post_completed(&SHA_WORK_QUEUE));
}
op_data.operation = ShaOperationKind::Update;
op_data.message = NonNull::from(data);
op_data.buffer = NonNull::from(&mut self.buffer).cast();
#[cfg(not(esp32))]
{
op_data.hw_state = NonNull::from(&mut self.state);
}
ShaHandle(self.frontend.post(&SHA_WORK_QUEUE))
}
fn finalize<'t>(&'t mut self, result: &mut [u8]) -> ShaHandle<'t> {
debug!(
"Finalize {:?} into buffer of {} bytes",
self.frontend.data_mut().state.algorithm,
result.len()
);
#[cfg(esp32)]
if let Some(hasher) = self.use_software.as_mut() {
Self::finalize_using_software(hasher, result);
return ShaHandle(self.frontend.post_completed(&SHA_WORK_QUEUE));
}
let op_data = self.frontend.data_mut();
debug_assert!((op_data.buffered_bytes as usize) < op_data.state.algorithm.chunk_length());
op_data.operation = ShaOperationKind::Finalize;
op_data.message = NonNull::from(&mut self.buffer[..op_data.buffered_bytes as usize]);
op_data.buffer = NonNull::from(result).cast();
#[cfg(not(esp32))]
{
op_data.hw_state = NonNull::from(&mut self.state);
}
ShaHandle(self.frontend.post(&SHA_WORK_QUEUE))
}
#[cfg(esp32)]
fn update_using_software(hasher: &mut SoftwareHasher, data: &[u8]) {
match hasher {
SoftwareHasher::Sha1(sha) => sha.update(data),
SoftwareHasher::Sha256(sha) => sha.update(data),
SoftwareHasher::Sha384(sha) => sha.update(data),
SoftwareHasher::Sha512(sha) => sha.update(data),
}
}
#[cfg(esp32)]
fn finalize_using_software(hasher: &mut SoftwareHasher, result: &mut [u8]) {
match hasher {
SoftwareHasher::Sha1(sha) => {
let output = sha.finalize_reset();
result.copy_from_slice(output.as_slice())
}
SoftwareHasher::Sha256(sha) => {
let output = sha.finalize_reset();
result.copy_from_slice(output.as_slice())
}
SoftwareHasher::Sha384(sha) => {
let output = sha.finalize_reset();
result.copy_from_slice(output.as_slice())
}
SoftwareHasher::Sha512(sha) => {
let output = sha.finalize_reset();
result.copy_from_slice(output.as_slice())
}
}
}
}
#[cfg(esp32)]
impl<const CHUNK_BYTES: usize, const DIGEST_WORDS: usize> Drop
for ShaContext<CHUNK_BYTES, DIGEST_WORDS>
{
fn drop(&mut self) {
ACCELERATOR_IN_USE.store(false, Ordering::Release);
}
}
pub struct ShaHandle<'t>(Handle<'t, ShaOperation>);
impl ShaHandle<'_> {
#[inline]
pub fn poll(&mut self) -> bool {
self.0.poll()
}
#[inline]
pub fn wait_blocking(self) -> Status {
self.0.wait_blocking()
}
#[inline]
pub fn wait(&mut self) -> impl Future<Output = Status> {
self.0.wait()
}
#[inline]
pub fn cancel(&mut self) -> impl Future<Output = ()> {
self.0.cancel()
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[non_exhaustive]
pub enum FinalizeError {
BufferTooSmall,
}
macro_rules! impl_worker_context {
($name:ident, $full_name:literal, $algo:expr, $digest_len:literal, $block_size:literal ) => {
#[doc = concat!("A ", $full_name, " context.")]
#[cfg_attr(not(esp32), derive(Clone))]
pub struct $name(ShaContext<{ $algo.chunk_length() }, { $algo.digest_length() / 4 }>);
impl $name {
pub fn new() -> Self {
Self(ShaContext::new($algo))
}
pub fn update<'t>(&'t mut self, data: &'t [u8]) -> ShaHandle<'t> {
self.0.update(data)
}
pub fn finalize<'t>(
&'t mut self,
result: &mut [u8; { $algo.digest_length() }],
) -> ShaHandle<'t> {
self.0.finalize(result)
}
pub fn finalize_into_slice<'t>(
&'t mut self,
result: &mut [u8],
) -> Result<ShaHandle<'t>, FinalizeError> {
if result.len() < $algo.digest_length() {
return Err(FinalizeError::BufferTooSmall);
}
Ok(self.0.finalize(result))
}
}
impl Default for $name {
fn default() -> Self {
Self::new()
}
}
impl digest::HashMarker for $name {}
impl digest::OutputSizeUser for $name {
type OutputSize = paste::paste!(digest::consts::[< U $digest_len >]);
}
impl digest::Update for $name {
fn update(&mut self, data: &[u8]) {
Self::update(self, data).wait_blocking();
}
}
impl digest::FixedOutput for $name {
fn finalize_into(mut self, out: &mut digest::Output<Self>) {
Self::finalize(&mut self, out.as_mut()).wait_blocking();
}
}
impl digest::core_api::BlockSizeUser for $name {
type BlockSize = paste::paste!(digest::consts::[< U $block_size >]);
fn block_size() -> usize {
$block_size
}
}
};
}
for_each_sha_algorithm! {
( $name:ident, $full_name:literal (sizes: $block_size:literal, $digest_len:literal, $message_length_bytes:literal) $security:tt, $mode_bits:literal ) => {
paste::paste! {
impl_worker_context!( [<$name Context>], $full_name, ShaAlgorithmKind::$name, $digest_len, $block_size );
}
};
}