use core::convert::Infallible;
use crate::{
peripheral::{Peripheral, PeripheralRef},
peripherals::SHA,
reg_access::AlignmentHelper,
system::PeripheralClockControl,
};
pub struct Sha<'d> {
sha: PeripheralRef<'d, SHA>,
mode: ShaMode,
alignment_helper: AlignmentHelper,
cursor: usize,
first_run: bool,
finished: bool,
}
#[derive(Debug, Clone, Copy)]
pub enum ShaMode {
SHA1,
#[cfg(not(esp32))]
SHA224,
SHA256,
#[cfg(any(esp32s2, esp32s3, esp32))]
SHA384,
#[cfg(any(esp32s2, esp32s3, esp32))]
SHA512,
#[cfg(any(esp32s2, esp32s3))]
SHA512_224,
#[cfg(any(esp32s2, esp32s3))]
SHA512_256,
}
#[cfg(not(esp32))]
fn mode_as_bits(mode: ShaMode) -> u8 {
match mode {
ShaMode::SHA1 => 0,
ShaMode::SHA224 => 1,
ShaMode::SHA256 => 2,
#[cfg(any(esp32s2, esp32s3))]
ShaMode::SHA384 => 3,
#[cfg(any(esp32s2, esp32s3))]
ShaMode::SHA512 => 4,
#[cfg(any(esp32s2, esp32s3))]
ShaMode::SHA512_224 => 5,
#[cfg(any(esp32s2, esp32s3))]
ShaMode::SHA512_256 => 6,
}
}
impl<'d> Sha<'d> {
pub fn new(sha: impl Peripheral<P = SHA> + 'd, mode: ShaMode) -> Self {
crate::into_ref!(sha);
PeripheralClockControl::enable(crate::system::Peripheral::Sha);
#[cfg(not(esp32))]
sha.mode()
.write(|w| unsafe { w.mode().bits(mode_as_bits(mode)) });
Self {
sha,
mode,
cursor: 0,
first_run: true,
finished: false,
alignment_helper: AlignmentHelper::default(),
}
}
pub fn first_run(&self) -> bool {
self.first_run
}
pub fn finished(&self) -> bool {
self.finished
}
#[cfg(not(esp32))]
fn process_buffer(&mut self) {
if self.first_run {
self.sha.start().write(|w| unsafe { w.bits(1) });
self.first_run = false;
} else {
self.sha.continue_().write(|w| unsafe { w.bits(1) });
}
}
#[cfg(esp32)]
fn process_buffer(&mut self) {
if self.first_run {
match self.mode {
ShaMode::SHA1 => self.sha.sha1_start().write(|w| unsafe { w.bits(1) }),
ShaMode::SHA256 => self.sha.sha256_start().write(|w| unsafe { w.bits(1) }),
ShaMode::SHA384 => self.sha.sha384_start().write(|w| unsafe { w.bits(1) }),
ShaMode::SHA512 => self.sha.sha512_start().write(|w| unsafe { w.bits(1) }),
}
self.first_run = false;
} else {
match self.mode {
ShaMode::SHA1 => self.sha.sha1_continue().write(|w| unsafe { w.bits(1) }),
ShaMode::SHA256 => self.sha.sha256_continue().write(|w| unsafe { w.bits(1) }),
ShaMode::SHA384 => self.sha.sha384_continue().write(|w| unsafe { w.bits(1) }),
ShaMode::SHA512 => self.sha.sha512_continue().write(|w| unsafe { w.bits(1) }),
}
}
}
fn chunk_length(&self) -> usize {
match self.mode {
ShaMode::SHA1 | ShaMode::SHA256 => 64,
#[cfg(not(esp32))]
ShaMode::SHA224 => 64,
#[cfg(not(any(esp32c2, esp32c3, esp32c6, esp32h2)))]
_ => 128,
}
}
#[cfg(esp32)]
fn is_busy(&self) -> bool {
match self.mode {
ShaMode::SHA1 => self.sha.sha1_busy().read().sha1_busy().bit_is_set(),
ShaMode::SHA256 => self.sha.sha256_busy().read().sha256_busy().bit_is_set(),
ShaMode::SHA384 => self.sha.sha384_busy().read().sha384_busy().bit_is_set(),
ShaMode::SHA512 => self.sha.sha512_busy().read().sha512_busy().bit_is_set(),
}
}
#[cfg(not(esp32))]
fn is_busy(&self) -> bool {
self.sha.busy().read().bits() != 0
}
pub fn digest_length(&self) -> usize {
match self.mode {
ShaMode::SHA1 => 20,
#[cfg(not(esp32))]
ShaMode::SHA224 => 28,
ShaMode::SHA256 => 32,
#[cfg(any(esp32, esp32s2, esp32s3))]
ShaMode::SHA384 => 48,
#[cfg(any(esp32, esp32s2, esp32s3))]
ShaMode::SHA512 => 64,
#[cfg(any(esp32s2, esp32s3))]
ShaMode::SHA512_224 => 28,
#[cfg(any(esp32s2, esp32s3))]
ShaMode::SHA512_256 => 32,
}
}
fn flush_data(&mut self) -> nb::Result<(), Infallible> {
if self.is_busy() {
return Err(nb::Error::WouldBlock);
}
let chunk_len = self.chunk_length();
let flushed = self.alignment_helper.flush_to(
#[cfg(esp32)]
self.sha.text(0).as_ptr(),
#[cfg(not(esp32))]
self.sha.m_mem(0).as_ptr(),
(self.cursor % chunk_len) / self.alignment_helper.align_size(),
);
self.cursor = self.cursor.wrapping_add(flushed);
if flushed > 0 && self.cursor % chunk_len == 0 {
self.process_buffer();
while self.is_busy() {}
}
Ok(())
}
fn write_data<'a>(&mut self, incoming: &'a [u8]) -> nb::Result<&'a [u8], Infallible> {
let mod_cursor = self.cursor % self.chunk_length();
let chunk_len = self.chunk_length();
let (remaining, bound_reached) = self.alignment_helper.aligned_volatile_copy(
#[cfg(esp32)]
self.sha.text(0).as_ptr(),
#[cfg(not(esp32))]
self.sha.m_mem(0).as_ptr(),
incoming,
chunk_len / self.alignment_helper.align_size(),
mod_cursor / self.alignment_helper.align_size(),
);
self.cursor = self.cursor.wrapping_add(incoming.len() - remaining.len());
if bound_reached {
self.process_buffer();
}
Ok(remaining)
}
pub fn update<'a>(&mut self, buffer: &'a [u8]) -> nb::Result<&'a [u8], Infallible> {
if self.is_busy() {
return Err(nb::Error::WouldBlock);
}
self.finished = false;
let remaining = self.write_data(buffer)?;
Ok(remaining)
}
pub fn finish(&mut self, output: &mut [u8]) -> nb::Result<(), Infallible> {
if self.is_busy() {
return Err(nb::Error::WouldBlock);
}
let chunk_len = self.chunk_length();
let length = (self.cursor as u64 * 8).to_be_bytes();
nb::block!(self.update(&[0x80]))?; nb::block!(self.flush_data())?; debug_assert!(self.cursor % 4 == 0);
let mod_cursor = self.cursor % chunk_len;
if (chunk_len - mod_cursor) < core::mem::size_of::<u64>() {
let pad_len = chunk_len - mod_cursor;
self.alignment_helper.volatile_write_bytes(
#[cfg(esp32)]
self.sha.text(0).as_ptr(),
#[cfg(not(esp32))]
self.sha.m_mem(0).as_ptr(),
0_u8,
pad_len / self.alignment_helper.align_size(),
mod_cursor / self.alignment_helper.align_size(),
);
self.process_buffer();
self.cursor = self.cursor.wrapping_add(pad_len);
debug_assert_eq!(self.cursor % chunk_len, 0);
while self.is_busy() {}
}
let mod_cursor = self.cursor % chunk_len; let pad_len = chunk_len - mod_cursor - core::mem::size_of::<u64>();
self.alignment_helper.volatile_write_bytes(
#[cfg(esp32)]
self.sha.text(0).as_ptr(),
#[cfg(not(esp32))]
self.sha.m_mem(0).as_ptr(),
0_u8,
pad_len / self.alignment_helper.align_size(),
mod_cursor / self.alignment_helper.align_size(),
);
self.alignment_helper.aligned_volatile_copy(
#[cfg(esp32)]
self.sha.text(0).as_ptr(),
#[cfg(not(esp32))]
self.sha.m_mem(0).as_ptr(),
&length,
chunk_len / self.alignment_helper.align_size(),
(chunk_len - core::mem::size_of::<u64>()) / self.alignment_helper.align_size(),
);
self.process_buffer();
while self.is_busy() {}
#[cfg(esp32)]
{
match self.mode {
ShaMode::SHA1 => unsafe { self.sha.sha1_load().write(|w| w.bits(1)) },
ShaMode::SHA256 => unsafe { self.sha.sha256_load().write(|w| w.bits(1)) },
ShaMode::SHA384 => unsafe { self.sha.sha384_load().write(|w| w.bits(1)) },
ShaMode::SHA512 => unsafe { self.sha.sha512_load().write(|w| w.bits(1)) },
}
while self.is_busy() {}
}
self.alignment_helper.volatile_read_regset(
#[cfg(esp32)]
self.sha.text(0).as_ptr(),
#[cfg(not(esp32))]
self.sha.h_mem(0).as_ptr(),
output,
core::cmp::min(output.len(), 32) / self.alignment_helper.align_size(),
);
self.first_run = true;
self.cursor = 0;
self.alignment_helper.reset();
Ok(())
}
}