use core::cell::UnsafeCell;
use core::ptr;
pub use bare_metal::Peripheral;
use volatile_register::{RO, RW, WO};
use interrupt::Nr;
#[cfg(test)]
mod test;
pub const CPUID: Peripheral<CPUID> = unsafe { Peripheral::new(0xE000_ED00) };
pub const DCB: Peripheral<DCB> = unsafe { Peripheral::new(0xE000_EDF0) };
pub const DWT: Peripheral<DWT> = unsafe { Peripheral::new(0xE000_1000) };
pub const FPB: Peripheral<FPB> = unsafe { Peripheral::new(0xE000_2000) };
pub const FPU: Peripheral<FPU> = unsafe { Peripheral::new(0xE000_EF30) };
pub const ITM: Peripheral<ITM> = unsafe { Peripheral::new(0xE000_0000) };
pub const MPU: Peripheral<MPU> = unsafe { Peripheral::new(0xE000_ED90) };
pub const NVIC: Peripheral<NVIC> = unsafe { Peripheral::new(0xE000_E100) };
pub const SCB: Peripheral<SCB> = unsafe { Peripheral::new(0xE000_ED04) };
pub const SYST: Peripheral<SYST> = unsafe { Peripheral::new(0xE000_E010) };
pub const TPIU: Peripheral<TPIU> = unsafe { Peripheral::new(0xE004_0000) };
#[cfg(armv7m)]
pub const CBP: Peripheral<CBP> = unsafe { Peripheral::new(0xE000_EF50) };
#[repr(C)]
pub struct CPUID {
pub base: RO<u32>,
reserved0: [u32; 15],
pub pfr: [RO<u32>; 2],
pub dfr: RO<u32>,
pub afr: RO<u32>,
pub mmfr: [RO<u32>; 4],
pub isar: [RO<u32>; 5],
reserved1: u32,
#[cfg(any(armv7m, test))]
pub clidr: RO<u32>,
#[cfg(any(armv7m, test))]
pub ctr: RO<u32>,
#[cfg(any(armv7m, test))]
pub ccsidr: RO<u32>,
#[cfg(any(armv7m, test))]
pub csselr: RW<u32>,
}
#[cfg(armv7m)]
pub enum CsselrCacheType {
DataOrUnified = 0,
Instruction = 1,
}
#[cfg(armv7m)]
impl CPUID {
pub fn select_cache(&self, level: u8, ind: CsselrCacheType) {
const CSSELR_IND_POS: u32 = 0;
const CSSELR_IND_MASK: u32 = 1 << CSSELR_IND_POS;
const CSSELR_LEVEL_POS: u32 = 1;
const CSSELR_LEVEL_MASK: u32 = 0x7 << CSSELR_LEVEL_POS;
unsafe { self.csselr.write(
(((level as u32) << CSSELR_LEVEL_POS) & CSSELR_LEVEL_MASK) |
(((ind as u32) << CSSELR_IND_POS) & CSSELR_IND_MASK)
)}
}
pub fn cache_num_sets_ways(&self, level: u8, ind: CsselrCacheType) -> (u16, u16) {
const CCSIDR_NUMSETS_POS: u32 = 13;
const CCSIDR_NUMSETS_MASK: u32 = 0x7FFF << CCSIDR_NUMSETS_POS;
const CCSIDR_ASSOCIATIVITY_POS: u32 = 3;
const CCSIDR_ASSOCIATIVITY_MASK: u32 = 0x3FF << CCSIDR_ASSOCIATIVITY_POS;
self.select_cache(level, ind);
::asm::dsb();
let ccsidr = self.ccsidr.read();
((1 + ((ccsidr & CCSIDR_NUMSETS_MASK) >> CCSIDR_NUMSETS_POS)) as u16,
(1 + ((ccsidr & CCSIDR_ASSOCIATIVITY_MASK) >> CCSIDR_ASSOCIATIVITY_POS)) as u16)
}
}
#[repr(C)]
pub struct DCB {
pub dhcsr: RW<u32>,
pub dcrsr: WO<u32>,
pub dcrdr: RW<u32>,
pub demcr: RW<u32>,
}
#[repr(C)]
pub struct DWT {
pub ctrl: RW<u32>,
pub cyccnt: RW<u32>,
pub cpicnt: RW<u32>,
pub exccnt: RW<u32>,
pub sleepcnt: RW<u32>,
pub lsucnt: RW<u32>,
pub foldcnt: RW<u32>,
pub pcsr: RO<u32>,
pub c: [Comparator; 16],
reserved: [u32; 932],
pub lar: WO<u32>,
pub lsr: RO<u32>,
}
impl DWT {
pub fn enable_cycle_counter(&self) {
unsafe { self.ctrl.modify(|r| r | 1) }
}
}
#[repr(C)]
pub struct Comparator {
pub comp: RW<u32>,
pub mask: RW<u32>,
pub function: RW<u32>,
reserved: u32,
}
#[repr(C)]
pub struct FPB {
pub ctrl: RW<u32>,
pub remap: RW<u32>,
pub comp: [RW<u32>; 127],
reserved: [u32; 875],
pub lar: WO<u32>,
pub lsr: RO<u32>,
}
#[repr(C)]
pub struct FPU {
reserved: u32,
#[cfg(any(has_fpu, test))]
pub fpccr: RW<u32>,
#[cfg(any(has_fpu, test))]
pub fpcar: RW<u32>,
#[cfg(any(has_fpu, test))]
pub fpdscr: RW<u32>,
#[cfg(any(has_fpu, test))]
pub mvfr: [RO<u32>; 3],
}
#[repr(C)]
pub struct ITM {
pub stim: [Stim; 256],
reserved0: [u32; 640],
pub ter: [RW<u32>; 8],
reserved1: [u32; 8],
pub tpr: RW<u32>,
reserved2: [u32; 15],
pub tcr: RW<u32>,
reserved3: [u32; 75],
pub lar: WO<u32>,
pub lsr: RO<u32>,
}
pub struct Stim {
register: UnsafeCell<u32>,
}
impl Stim {
pub fn write_u8(&self, value: u8) {
unsafe { ptr::write_volatile(self.register.get() as *mut u8, value) }
}
pub fn write_u16(&self, value: u16) {
unsafe { ptr::write_volatile(self.register.get() as *mut u16, value) }
}
pub fn write_u32(&self, value: u32) {
unsafe { ptr::write_volatile(self.register.get(), value) }
}
pub fn is_fifo_ready(&self) -> bool {
unsafe { ptr::read_volatile(self.register.get()) == 1 }
}
}
#[repr(C)]
pub struct MPU {
pub _type: RO<u32>,
pub ctrl: RW<u32>,
pub rnr: RW<u32>,
pub rbar: RW<u32>,
pub rasr: RW<u32>,
pub rbar_a1: RW<u32>,
pub rsar_a1: RW<u32>,
pub rbar_a2: RW<u32>,
pub rsar_a2: RW<u32>,
pub rbar_a3: RW<u32>,
pub rsar_a3: RW<u32>,
}
#[repr(C)]
pub struct NVIC {
pub iser: [RW<u32>; 8],
reserved0: [u32; 24],
pub icer: [RW<u32>; 8],
reserved1: [u32; 24],
pub ispr: [RW<u32>; 8],
reserved2: [u32; 24],
pub icpr: [RW<u32>; 8],
reserved3: [u32; 24],
pub iabr: [RO<u32>; 8],
reserved4: [u32; 56],
pub ipr: [RW<u8>; 240],
}
impl NVIC {
pub fn clear_pending<I>(&self, interrupt: I)
where
I: Nr,
{
let nr = interrupt.nr();
unsafe { self.icpr[usize::from(nr / 32)].write(1 << (nr % 32)) }
}
pub fn disable<I>(&self, interrupt: I)
where
I: Nr,
{
let nr = interrupt.nr();
unsafe { self.icer[usize::from(nr / 32)].write(1 << (nr % 32)) }
}
pub fn enable<I>(&self, interrupt: I)
where
I: Nr,
{
let nr = interrupt.nr();
unsafe { self.iser[usize::from(nr / 32)].write(1 << (nr % 32)) }
}
pub fn get_priority<I>(&self, interrupt: I) -> u8
where
I: Nr,
{
let nr = interrupt.nr();
self.ipr[usize::from(nr)].read()
}
pub fn is_active<I>(&self, interrupt: I) -> bool
where
I: Nr,
{
let nr = interrupt.nr();
let mask = 1 << (nr % 32);
(self.iabr[usize::from(nr / 32)].read() & mask) == mask
}
pub fn is_enabled<I>(&self, interrupt: I) -> bool
where
I: Nr,
{
let nr = interrupt.nr();
let mask = 1 << (nr % 32);
(self.iser[usize::from(nr / 32)].read() & mask) == mask
}
pub fn is_pending<I>(&self, interrupt: I) -> bool
where
I: Nr,
{
let nr = interrupt.nr();
let mask = 1 << (nr % 32);
(self.ispr[usize::from(nr / 32)].read() & mask) == mask
}
pub fn set_pending<I>(&self, interrupt: I)
where
I: Nr,
{
let nr = interrupt.nr();
unsafe { self.ispr[usize::from(nr / 32)].write(1 << (nr % 32)) }
}
pub unsafe fn set_priority<I>(&self, interrupt: I, prio: u8)
where
I: Nr,
{
let nr = interrupt.nr();
self.ipr[usize::from(nr)].write(prio)
}
}
#[repr(C)]
pub struct SCB {
pub icsr: RW<u32>,
pub vtor: RW<u32>,
pub aircr: RW<u32>,
pub scr: RW<u32>,
pub ccr: RW<u32>,
pub shpr: [RW<u8>; 12],
pub shpcrs: RW<u32>,
pub cfsr: RW<u32>,
pub hfsr: RW<u32>,
pub dfsr: RW<u32>,
pub mmar: RW<u32>,
pub bfar: RW<u32>,
pub afsr: RW<u32>,
reserved: [u32; 18],
pub cpacr: RW<u32>,
}
#[cfg(has_fpu)]
#[derive(Clone, Copy, Debug)]
pub enum FpuAccessMode {
Disabled,
Enabled,
Privileged,
}
#[cfg(has_fpu)]
mod fpu_consts {
pub const SCB_CPACR_FPU_MASK: u32 = 0b11_11 << 20;
pub const SCB_CPACR_FPU_ENABLE: u32 = 0b01_01 << 20;
pub const SCB_CPACR_FPU_USER: u32 = 0b10_10 << 20;
}
#[cfg(has_fpu)]
use self::fpu_consts::*;
#[cfg(has_fpu)]
impl SCB {
pub fn fpu_access_mode(&self) -> FpuAccessMode {
let cpacr = self.cpacr.read();
if cpacr & SCB_CPACR_FPU_MASK ==
SCB_CPACR_FPU_ENABLE | SCB_CPACR_FPU_USER
{
FpuAccessMode::Enabled
} else if cpacr & SCB_CPACR_FPU_MASK == SCB_CPACR_FPU_ENABLE {
FpuAccessMode::Privileged
} else {
FpuAccessMode::Disabled
}
}
pub fn set_fpu_access_mode(&self, mode: FpuAccessMode) {
let mut cpacr = self.cpacr.read() & !SCB_CPACR_FPU_MASK;
match mode {
FpuAccessMode::Disabled => (),
FpuAccessMode::Privileged => cpacr |= SCB_CPACR_FPU_ENABLE,
FpuAccessMode::Enabled => {
cpacr |= SCB_CPACR_FPU_ENABLE | SCB_CPACR_FPU_USER
}
}
unsafe { self.cpacr.write(cpacr) }
}
pub fn enable_fpu(&self) {
self.set_fpu_access_mode(FpuAccessMode::Enabled)
}
pub fn disable_fpu(&self) {
self.set_fpu_access_mode(FpuAccessMode::Disabled)
}
}
#[cfg(armv7m)]
mod scb_consts {
pub const SCB_CCR_IC_MASK: u32 = (1<<17);
pub const SCB_CCR_DC_MASK: u32 = (1<<16);
}
#[cfg(armv7m)]
use self::scb_consts::*;
#[cfg(armv7m)]
impl SCB {
#[inline]
pub fn enable_icache(&self) {
if self.icache_enabled() {
return;
}
let cbp = unsafe { &mut *CBP.get() };
cbp.iciallu();
unsafe { self.ccr.modify(|r| r | SCB_CCR_IC_MASK) };
::asm::dsb();
::asm::isb();
}
#[inline]
pub fn disable_icache(&self) {
if !self.icache_enabled() {
return;
}
let cbp = unsafe { &mut *CBP.get() };
unsafe { self.ccr.modify(|r| r & !SCB_CCR_IC_MASK) };
cbp.iciallu();
::asm::dsb();
::asm::isb();
}
#[inline]
pub fn icache_enabled(&self) -> bool {
::asm::dsb();
::asm::isb();
self.ccr.read() & SCB_CCR_IC_MASK == SCB_CCR_IC_MASK
}
#[inline]
pub fn invalidate_icache(&self) {
let cbp = unsafe { &mut *CBP.get() };
cbp.iciallu();
::asm::dsb();
::asm::isb();
}
#[inline]
pub fn enable_dcache(&self, cpuid: &CPUID) {
if self.dcache_enabled() {
return;
}
self.invalidate_dcache(cpuid);
unsafe { self.ccr.modify(|r| r | SCB_CCR_DC_MASK) };
::asm::dsb();
::asm::isb();
}
#[inline]
pub fn disable_dcache(&self, cpuid: &CPUID) {
if !self.dcache_enabled() {
return;
}
unsafe { self.ccr.modify(|r| r & !SCB_CCR_DC_MASK) };
self.clean_invalidate_dcache(cpuid);
}
#[inline]
pub fn dcache_enabled(&self) -> bool {
::asm::dsb();
::asm::isb();
self.ccr.read() & SCB_CCR_DC_MASK == SCB_CCR_DC_MASK
}
#[inline]
fn invalidate_dcache(&self, cpuid: &CPUID) {
let cbp = unsafe { &mut *CBP.get() };
let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified);
for set in 0..sets {
for way in 0..ways {
cbp.dcisw(set, way);
}
}
::asm::dsb();
::asm::isb();
}
#[inline]
pub fn clean_dcache(&self, cpuid: &CPUID) {
let cbp = unsafe { &mut *CBP.get() };
let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified);
for set in 0..sets {
for way in 0..ways {
cbp.dccsw(set, way);
}
}
::asm::dsb();
::asm::isb();
}
#[inline]
pub fn clean_invalidate_dcache(&self, cpuid: &CPUID) {
let cbp = unsafe { &mut *CBP.get() };
let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified);
for set in 0..sets {
for way in 0..ways {
cbp.dccisw(set, way);
}
}
::asm::dsb();
::asm::isb();
}
#[inline]
pub fn invalidate_dcache_by_address(&self, addr: usize, size: usize) {
if size == 0 {
return;
}
let cbp = unsafe { &mut *CBP.get() };
::asm::dsb();
const LINESIZE: usize = 32;
let num_lines = ((size - 1)/LINESIZE) + 1;
let mut addr = addr & 0xFFFF_FFE0;
for _ in 0..num_lines {
cbp.dcimvac(addr as u32);
addr += LINESIZE;
}
::asm::dsb();
::asm::isb();
}
#[inline]
pub fn clean_dcache_by_address(&self, addr: usize, size: usize) {
if size == 0 {
return;
}
let cbp = unsafe { &mut *CBP.get() };
::asm::dsb();
const LINESIZE: usize = 32;
let num_lines = ((size - 1)/LINESIZE) + 1;
let mut addr = addr & 0xFFFF_FFE0;
for _ in 0..num_lines {
cbp.dccmvac(addr as u32);
addr += LINESIZE;
}
::asm::dsb();
::asm::isb();
}
#[inline]
pub fn clean_invalidate_dcache_by_address(&self, addr: usize, size: usize) {
if size == 0 {
return;
}
let cbp = unsafe { &mut *CBP.get() };
::asm::dsb();
const LINESIZE: usize = 32;
let num_lines = ((size - 1)/LINESIZE) + 1;
let mut addr = addr & 0xFFFF_FFE0;
for _ in 0..num_lines {
cbp.dccimvac(addr as u32);
addr += LINESIZE;
}
::asm::dsb();
::asm::isb();
}
}
#[repr(C)]
pub struct SYST {
pub csr: RW<u32>,
pub rvr: RW<u32>,
pub cvr: RW<u32>,
pub calib: RO<u32>,
}
#[derive(Clone, Copy, Debug)]
pub enum SystClkSource {
Core,
External,
}
const SYST_COUNTER_MASK: u32 = 0x00ffffff;
const SYST_CSR_ENABLE: u32 = 1 << 0;
const SYST_CSR_TICKINT: u32 = 1 << 1;
const SYST_CSR_CLKSOURCE: u32 = 1 << 2;
const SYST_CSR_COUNTFLAG: u32 = 1 << 16;
const SYST_CALIB_SKEW: u32 = 1 << 30;
const SYST_CALIB_NOREF: u32 = 1 << 31;
impl SYST {
pub fn is_counter_enabled(&self) -> bool {
self.csr.read() & SYST_CSR_ENABLE != 0
}
pub fn enable_counter(&self) {
unsafe { self.csr.modify(|v| v | SYST_CSR_ENABLE) }
}
pub fn disable_counter(&self) {
unsafe { self.csr.modify(|v| v & !SYST_CSR_ENABLE) }
}
pub fn is_interrupt_enabled(&self) -> bool {
self.csr.read() & SYST_CSR_TICKINT != 0
}
pub fn enable_interrupt(&self) {
unsafe { self.csr.modify(|v| v | SYST_CSR_TICKINT) }
}
pub fn disable_interrupt(&self) {
unsafe { self.csr.modify(|v| v & !SYST_CSR_TICKINT) }
}
pub fn get_clock_source(&self) -> SystClkSource {
let clk_source_bit = self.csr.read() & SYST_CSR_CLKSOURCE != 0;
match clk_source_bit {
false => SystClkSource::External,
true => SystClkSource::Core,
}
}
pub fn set_clock_source(&self, clk_source: SystClkSource) {
match clk_source {
SystClkSource::External => unsafe {
self.csr.modify(|v| v & !SYST_CSR_CLKSOURCE)
},
SystClkSource::Core => unsafe {
self.csr.modify(|v| v | SYST_CSR_CLKSOURCE)
},
}
}
pub fn has_wrapped(&self) -> bool {
self.csr.read() & SYST_CSR_COUNTFLAG != 0
}
pub fn get_reload(&self) -> u32 {
self.rvr.read()
}
pub fn set_reload(&self, value: u32) {
unsafe { self.rvr.write(value) }
}
pub fn get_current(&self) -> u32 {
self.cvr.read()
}
pub fn clear_current(&self) {
unsafe { self.cvr.write(0) }
}
pub fn get_ticks_per_10ms(&self) -> u32 {
self.calib.read() & SYST_COUNTER_MASK
}
pub fn is_precise(&self) -> bool {
self.calib.read() & SYST_CALIB_SKEW == 0
}
pub fn has_reference_clock(&self) -> bool {
self.calib.read() & SYST_CALIB_NOREF == 0
}
}
#[repr(C)]
pub struct TPIU {
pub sspsr: RO<u32>,
pub cspsr: RW<u32>,
reserved0: [u32; 2],
pub acpr: RW<u32>,
reserved1: [u32; 55],
pub sppr: RW<u32>,
reserved2: [u32; 943],
pub lar: WO<u32>,
pub lsr: RO<u32>,
reserved3: [u32; 4],
pub _type: RO<u32>,
}
#[repr(C)]
#[cfg(armv7m)]
pub struct CBP {
pub iciallu: WO<u32>,
reserved0: u32,
pub icimvau: WO<u32>,
pub dcimvac: WO<u32>,
pub dcisw: WO<u32>,
pub dccmvau: WO<u32>,
pub dccmvac: WO<u32>,
pub dccsw: WO<u32>,
pub dccimvac: WO<u32>,
pub dccisw: WO<u32>,
pub bpiall: WO<u32>,
}
#[cfg(armv7m)]
mod cbp_consts {
pub const CBP_SW_WAY_POS: u32 = 30;
pub const CBP_SW_WAY_MASK: u32 = 0x3 << CBP_SW_WAY_POS;
pub const CBP_SW_SET_POS: u32 = 5;
pub const CBP_SW_SET_MASK: u32 = 0x1FF << CBP_SW_SET_POS;
}
#[cfg(armv7m)]
use self::cbp_consts::*;
#[cfg(armv7m)]
impl CBP {
#[inline(always)]
pub fn iciallu(&self) {
unsafe { self.iciallu.write(0); }
}
#[inline(always)]
pub fn icimvau(&self, mva: u32) {
unsafe { self.icimvau.write(mva); }
}
#[inline(always)]
pub fn dcimvac(&self, mva: u32) {
unsafe { self.dcimvac.write(mva); }
}
#[inline(always)]
pub fn dcisw(&self, set: u16, way: u16) {
unsafe { self.dcisw.write(
(((way as u32) & (CBP_SW_WAY_MASK >> CBP_SW_WAY_POS)) << CBP_SW_WAY_POS) |
(((set as u32) & (CBP_SW_SET_MASK >> CBP_SW_SET_POS)) << CBP_SW_SET_POS));
}
}
#[inline(always)]
pub fn dccmvau(&self, mva: u32) {
unsafe { self.dccmvau.write(mva); }
}
#[inline(always)]
pub fn dccmvac(&self, mva: u32) {
unsafe { self.dccmvac.write(mva); }
}
#[inline(always)]
pub fn dccsw(&self, set: u16, way: u16) {
unsafe { self.dccsw.write(
(((way as u32) & (CBP_SW_WAY_MASK >> CBP_SW_WAY_POS)) << CBP_SW_WAY_POS) |
(((set as u32) & (CBP_SW_SET_MASK >> CBP_SW_SET_POS)) << CBP_SW_SET_POS));
}
}
#[inline(always)]
pub fn dccimvac(&self, mva: u32) {
unsafe { self.dccimvac.write(mva); }
}
#[inline(always)]
pub fn dccisw(&self, set: u16, way: u16) {
unsafe { self.dccisw.write(
(((way as u32) & (CBP_SW_WAY_MASK >> CBP_SW_WAY_POS)) << CBP_SW_WAY_POS) |
(((set as u32) & (CBP_SW_SET_MASK >> CBP_SW_SET_POS)) << CBP_SW_SET_POS));
}
}
#[inline(always)]
pub fn bpiall(&self) {
unsafe { self.bpiall.write(0); }
}
}