#![warn(missing_docs)]
pub mod smart_eeprom;
use crate::pac::Nvmctrl;
pub use crate::pac::nvmctrl::ctrla::Prmselect;
use crate::pac::nvmctrl::ctrlb::Cmdselect;
use core::num::NonZeroU32;
use core::ops::Range;
use core::ptr::addr_of;
use bitfield::bitfield;
#[inline(always)]
pub fn retrieve_flash_size() -> u32 {
static mut FLASHSIZE: Option<NonZeroU32> = None;
unsafe {
match FLASHSIZE {
Some(x) => x.into(),
None => {
let nvm = &*Nvmctrl::ptr();
let nvm_params = nvm.param().read();
if !nvm_params.psz().is_512() {
unreachable!("NVM page size is always expected to be 512 bytes");
}
let nvm_pages = nvm_params.nvmp().bits() as u32;
let flash_size = nvm_pages * 512;
FLASHSIZE = Some(NonZeroU32::new_unchecked(flash_size));
flash_size
}
}
}
}
#[inline(always)]
pub fn retrieve_bank_size() -> u32 {
retrieve_flash_size() / 2
}
pub const PAGESIZE: u32 = 512;
pub const BLOCKSIZE: u32 = 512 * 16;
pub const QUADWORDSIZE: u32 = 16;
pub struct Nvm {
nvm: Nvmctrl,
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum PeripheralError {
NvmError,
EccSingleError,
EccDualError,
LockError,
ProgrammingError,
AddressError,
}
#[non_exhaustive]
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum Error {
NonFlash,
Protected,
SmartEepromArea,
Peripheral(PeripheralError),
Dsu(super::dsu::Error),
Alignment,
}
#[derive(PartialEq, Debug)]
pub enum PhysicalBank {
A,
B,
}
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum Bank {
Active,
Inactive,
}
impl Bank {
#[inline]
pub fn address(&self) -> u32 {
match self {
Bank::Active => 0,
Bank::Inactive => retrieve_bank_size(),
}
}
#[inline]
pub fn length(&self) -> u32 {
retrieve_bank_size()
}
}
pub type Result<T> = core::result::Result<T, Error>;
impl Nvm {
pub const USERPAGE_ADDR: *const [u8; 512] = 0x0080_4000 as _;
#[inline]
pub fn new(nvm: Nvmctrl) -> Self {
Self { nvm }
}
pub unsafe fn registers(&self) -> &Nvmctrl {
&self.nvm
}
#[inline]
pub unsafe fn bank_swap(&mut self) -> ! {
let _ = self.command_sync(Cmdselect::Bkswrst);
unreachable!();
}
#[inline]
pub fn power_reduction_mode(&mut self, prm: Prmselect) {
self.nvm.ctrla().modify(|_, w| w.prm().variant(prm));
}
#[inline]
pub fn is_boot_protected(&self) -> bool {
!self.nvm.status().read().bpdis().bit()
}
#[inline]
pub fn first_bank(&self) -> PhysicalBank {
if self.nvm.status().read().afirst().bit() {
PhysicalBank::A
} else {
PhysicalBank::B
}
}
#[inline]
fn set_address(&mut self, address: u32) {
unsafe {
self.nvm
.addr()
.write(|w| w.addr().bits(address & 0x00ff_ffff));
}
}
#[inline]
fn command_sync(&mut self, command: Cmdselect) -> Result<()> {
while !self.nvm.status().read().ready().bit() {}
self.nvm
.ctrlb()
.write(|w| w.cmdex().key().cmd().variant(command));
while !self.nvm.intflag().read().done().bit() {}
self.nvm.intflag().write(|w| w.done().set_bit());
self.manage_error_states()
}
#[inline]
fn manage_error_states(&mut self) -> Result<()> {
let read_intflag = self.nvm.intflag().read();
let state = if read_intflag.addre().bit_is_set() {
Err(Error::Peripheral(PeripheralError::AddressError))
} else if read_intflag.locke().bit_is_set() {
Err(Error::Peripheral(PeripheralError::LockError))
} else if read_intflag.proge().bit_is_set() {
Err(Error::Peripheral(PeripheralError::ProgrammingError))
} else {
Ok(())
};
self.nvm
.intflag()
.write(|w| w.addre().set_bit().locke().set_bit().proge().set_bit());
state
}
#[inline]
pub fn read_userpage(&self) -> Userpage {
let mut userpage = RawUserpage([0_u8; 512]);
userpage
.0
.iter_mut()
.zip((0..512).map(|i| unsafe {
Self::USERPAGE_ADDR
.cast::<u8>()
.wrapping_offset(i)
.read_volatile()
}))
.for_each(|(l, r)| *l = r);
userpage
}
#[inline]
pub unsafe fn modify_userpage(
&mut self,
f: impl FnOnce(&mut Userpage),
) -> Result<UserpageStatus> {
let original = self.read_userpage();
let mut modified = original.clone();
f(&mut modified);
if original != modified {
unsafe { self.erase(NvmErase::Userpage)? };
unsafe { self.write(NvmWrite::Userpage(&modified))? };
Ok(UserpageStatus::Updated)
} else {
Ok(UserpageStatus::Skipped)
}
}
#[inline]
pub fn calibration_area(&self) -> CalibrationArea {
let mut buffer = 0_u64;
let base_addr: *const u8 = 0x0080_0080 as *const u8;
for i in 0..6 {
buffer |=
unsafe { core::ptr::read_volatile(base_addr.offset(i as isize)) as u64 } << (i * 8);
}
CalibrationArea(buffer)
}
#[inline]
pub fn temperatures_calibration_area(&self) -> TemperaturesCalibrationArea {
let mut buffer = 0_u128;
let base_addr: *const u8 = 0x0080_0100 as *const u8;
for i in 0..11 {
buffer |= unsafe { core::ptr::read_volatile(base_addr.offset(i as isize)) as u128 }
<< (i * 8);
}
TemperaturesCalibrationArea(buffer)
}
#[inline]
pub fn enable_security_bit(&mut self) -> Result<()> {
self.command_sync(Cmdselect::Ssb)
}
#[inline]
pub unsafe fn enable_chip_erase_lock(&mut self) -> Result<()> {
self.command_sync(Cmdselect::Celck)
}
#[inline]
pub fn disable_chip_erase_lock(&mut self) -> Result<()> {
self.command_sync(Cmdselect::Ceulck)
}
#[inline]
pub fn boot_protection(&mut self, protect: bool) -> Result<()> {
if self.is_boot_protected() != protect {
if protect {
self.command_sync(Cmdselect::Cbpdis)
} else {
self.command_sync(Cmdselect::Sbpdis)
}
} else {
Ok(())
}
}
#[inline]
pub fn region_lock(&mut self, mask: u32) -> Result<()> {
const REGIONS_COUNT: u32 = 32;
const FLASH_START: u32 = 0;
let flash_end = retrieve_flash_size();
let region_size = (flash_end - FLASH_START) / REGIONS_COUNT;
for (i, address) in (FLASH_START..flash_end)
.step_by(region_size as usize)
.enumerate()
{
self.set_address(address);
let protect = mask & (1 << i) == 0;
self.command_sync(if protect {
Cmdselect::Lr
} else {
Cmdselect::Ur
})?;
}
Ok(())
}
#[inline]
pub unsafe fn write_flash_from_slice(
&mut self,
destination: *mut u32,
source_slice: &[u32],
write_granularity: WriteGranularity,
) -> Result<()> {
let source = source_slice.as_ptr();
let words = source_slice.len() as u32;
unsafe {
self.write(NvmWrite::MainAddressSpace {
destination,
source,
words,
write_granularity,
})
}
}
#[inline]
pub unsafe fn write_flash(
&mut self,
destination: *mut u32,
source: *const u32,
words: u32,
write_granularity: WriteGranularity,
) -> Result<()> {
unsafe {
self.write(NvmWrite::MainAddressSpace {
destination,
source,
words,
write_granularity,
})
}
}
#[inline]
unsafe fn write(&mut self, op: NvmWrite) -> Result<()> {
unsafe {
let (destination_address, source_address, words, granularity) = match op {
NvmWrite::MainAddressSpace {
destination,
source,
words,
write_granularity,
} => (destination as u32, source as u32, words, write_granularity),
NvmWrite::Userpage(userpage) => (
Self::USERPAGE_ADDR as u32,
addr_of!(userpage.0) as u32,
PAGESIZE / core::mem::size_of::<u32>() as u32,
WriteGranularity::QuadWord,
),
};
let step_size = core::mem::size_of::<u32>() as u32;
let length = words * step_size;
let write_size = granularity.size();
let read_addresses = source_address..(source_address + length);
let write_addresses = destination_address..(destination_address + length);
if source_address % step_size != 0 {
return Err(Error::Alignment);
}
if destination_address % step_size != 0 {
return Err(Error::Alignment);
}
match op {
NvmWrite::MainAddressSpace { .. } => {
if self.contains_non_flash_memory_area(&write_addresses) {
return Err(Error::NonFlash);
} else if self.contains_bootprotected(&write_addresses) {
return Err(Error::Protected);
} else if self.contains_smart_eeprom(&write_addresses) {
return Err(Error::SmartEepromArea);
}
}
NvmWrite::Userpage(_) => {
}
}
self.command_sync(Cmdselect::Pbc)?;
let mut dirty = false;
for (destination_address, source_address) in write_addresses
.step_by(step_size as usize)
.zip(read_addresses.step_by(step_size as usize))
{
let value = core::ptr::read_volatile(source_address as *const u32);
core::ptr::write_volatile(destination_address as *mut u32, value);
dirty = true;
if destination_address % write_size >= write_size - step_size {
self.command_sync(granularity.command())?;
dirty = false;
}
}
if dirty {
self.command_sync(granularity.command())?
}
Ok(())
}
}
#[inline]
pub unsafe fn erase_flash(&mut self, address: *mut u32, blocks: u32) -> Result<()> {
unsafe { self.erase(NvmErase::Flash { address, blocks }) }
}
#[inline]
unsafe fn erase(&mut self, op: NvmErase) -> Result<()> {
let (address, length, granularity) = match op {
NvmErase::Flash { address, blocks } => {
(address as u32, blocks, EraseGranularity::Block)
}
NvmErase::Userpage => (Self::USERPAGE_ADDR as u32, 1, EraseGranularity::Page),
};
let flash_address = address - address % granularity.size();
let range_to_erase = flash_address..(flash_address + length * granularity.size());
match op {
NvmErase::Flash { .. } => {
if self.contains_non_flash_memory_area(&range_to_erase) {
return Err(Error::NonFlash);
} else if self.contains_bootprotected(&range_to_erase) {
return Err(Error::Protected);
} else if self.contains_smart_eeprom(&range_to_erase) {
return Err(Error::SmartEepromArea);
}
}
NvmErase::Userpage => {
}
}
for address in range_to_erase.step_by(granularity.size() as usize) {
self.set_address(address);
self.command_sync(granularity.command())?
}
Ok(())
}
#[inline]
fn contains_bootprotected(&self, input: &Range<u32>) -> bool {
let bootprot = self.nvm.status().read().bootprot().bits();
let bp_space = 8 * 1024 * (15 - bootprot) as u32;
let boot = &(Bank::Active.address()..(Bank::Active.address() + bp_space));
self.is_boot_protected() && range_overlap(input, boot)
}
#[inline]
fn contains_smart_eeprom(&self, input: &Range<u32>) -> bool {
let smart_eeprom_allocated_blocks = self.nvm.seestat().read().sblk().bits() as u32;
let smart_eeprom_end = Bank::Inactive.address() + Bank::Inactive.length();
let smart_eeprom_start = smart_eeprom_end - smart_eeprom_allocated_blocks * BLOCKSIZE;
let smart_eeprom = &(smart_eeprom_start..smart_eeprom_end);
range_overlap(input, smart_eeprom)
}
#[inline]
fn contains_non_flash_memory_area(&self, input: &Range<u32>) -> bool {
input.end > retrieve_flash_size()
}
#[inline]
pub fn smart_eeprom(&mut self) -> smart_eeprom::Result<'_> {
smart_eeprom::SmartEepromMode::retrieve(self)
}
}
#[derive(Copy, Clone, Debug)]
pub enum UserpageStatus {
Updated,
Skipped,
}
enum NvmWrite<'a> {
MainAddressSpace {
destination: *mut u32,
source: *const u32,
words: u32,
write_granularity: WriteGranularity,
},
Userpage(&'a Userpage),
}
enum NvmErase {
Flash { address: *mut u32, blocks: u32 },
Userpage,
}
#[derive(Copy, Clone, Debug)]
enum EraseGranularity {
Block,
Page,
}
#[derive(Copy, Clone, Debug)]
pub enum WriteGranularity {
QuadWord,
Page,
}
impl EraseGranularity {
#[inline]
fn command(&self) -> Cmdselect {
match self {
Self::Block => Cmdselect::Eb,
Self::Page => Cmdselect::Ep,
}
}
#[inline]
fn size(&self) -> u32 {
match self {
Self::Block => BLOCKSIZE,
Self::Page => PAGESIZE,
}
}
}
impl WriteGranularity {
#[inline]
fn command(&self) -> Cmdselect {
match self {
Self::QuadWord => Cmdselect::Wqw,
Self::Page => Cmdselect::Wp,
}
}
#[inline]
fn size(&self) -> u32 {
match self {
Self::QuadWord => QUADWORDSIZE,
Self::Page => PAGESIZE,
}
}
}
fn range_overlap(a: &Range<u32>, b: &Range<u32>) -> bool {
a.start < b.end && b.start < a.end
}
pub type Userpage = RawUserpage<[u8; 512]>;
bitfield! {
#[derive(Clone, PartialEq, Eq)]
pub struct RawUserpage([u8]);
impl Debug;
u8;
pub bod33_disable, set_bod33_disable: 0;
pub bod33_level, set_bod33_level: 8, 1;
pub bod33_action, set_bod33_action: 10, 9;
pub bod33_hysteresis, set_bod33_hysteresis: 14, 11;
pub u16, bod12_calibration_parameters, set_bod12_calibration_parameters: 25, 15;
pub nvm_bootloader_size, set_nvm_bootloader_size: 29, 26;
pub reserved_0, set_reserved_0: 31, 30;
pub see_sblk, set_see_sblk: 35, 32;
pub see_psz, set_see_psz: 38, 36;
pub ram_ecc_disable, set_ram_ecc_disable: 39;
pub reserved_1, set_reserved_1: 47, 40;
pub wdt_enable, set_wdt_enable: 48;
pub wdt_always_on, set_wdt_always_on: 49;
pub wdt_period, set_wdt_period: 53, 50;
pub wdt_window, set_wdt_window: 57, 54;
pub wdt_ewoffset, set_wdt_ewoffset: 61, 58;
pub wdt_wen, set_wdt_wen: 62;
pub reserved_2, set_reserved_2: 63;
pub u32, nvm_locks, set_nvm_locks: 95, 64;
pub u32, userpage_0, set_userpage_0: 127, 96;
pub u32, reserved_3, set_reserved_3: 159, 128;
}
impl<T: AsRef<[u8]>> RawUserpage<T> {
#[inline]
pub fn userpage1_as_slice(&self) -> &[u8] {
&self.0.as_ref()[20..512]
}
}
impl<T: AsMut<[u8]>> RawUserpage<T> {
#[inline]
pub fn userpage1_as_slice_mut(&mut self) -> &mut [u8] {
&mut self.0.as_mut()[20..512]
}
}
bitfield! {
#[derive(Copy, Clone, Default)]
pub struct CalibrationArea(u64);
impl Debug;
u32;
pub ac_bias, _: 1, 0;
pub adc0_biascomp, _: 4, 2;
pub adc0_biasrefbuf, _: 7, 5;
pub adc0_biasr2r, _: 10, 8;
pub adc1_biascomp, _: 18, 16;
pub adc1_biasrefbuf, _: 21, 19;
pub adc1_biasr2r, _: 24, 22;
pub usb_transn, _: 36, 32;
pub usb_transp, _: 41, 37;
pub usb_trim, _: 44, 42;
}
bitfield! {
#[derive(Copy, Clone, Default)]
pub struct TemperaturesCalibrationArea(u128);
impl Debug;
u32;
pub tli, _: 7, 0;
pub tld, _: 11, 8;
pub thi, _: 19, 12;
pub thd, _: 23, 20;
pub vpl, _: 51, 40;
pub vph, _: 63, 52;
pub vcl, _: 75, 63;
pub vch, _: 87, 76;
}