#[cfg(any(mpu_armv6m, mpu_armv7m, mpu_armv7r))]
mod armv7 {
use core::mem::size_of;
use crate::{
align::{Align, ValidAlignment},
bindings::{RT_MPU_ATTR_PERIPHERAL, RT_MPU_ATTR_STACK, rt_mpu_region},
cell::SyncUnsafeCell,
};
const fn log2ceil(size: usize) -> u32 {
if size <= 1 {
0
} else {
32 - (size - 1).leading_zeros()
}
}
const fn sizebits(size: usize) -> u32 {
if size < 256 { 7 } else { log2ceil(size) - 1 }
}
const fn region_size(size: usize) -> usize {
1 << ((sizebits(size) + 1) % 32)
}
const fn region_size_addr_mask(size: usize) -> usize {
!(region_size(size) - 1)
}
const fn subregion_size(size: usize) -> usize {
1 << (sizebits(size) - 2)
}
const fn subregion_size_addr_mask(size: usize) -> usize {
!(subregion_size(size) - 1)
}
const fn use_subregions(size: usize) -> bool {
(size > 0) && (size <= 0x80000000)
}
const fn subregion_offset(addr: usize, size: usize) -> usize {
let subregion_aligned = addr & subregion_size_addr_mask(size);
let region_aligned = addr & region_size_addr_mask(size);
(subregion_aligned - region_aligned) / subregion_size(size)
}
const fn srd_prefix(offset: usize) -> u32 {
(1 << offset) - 1
}
const fn srd_suffix(offset: usize) -> u32 {
(!((1 << (offset + 1)) - 1)) & 0xFF
}
const fn srd(addr: usize, size: usize) -> u32 {
if use_subregions(size) {
srd_prefix(subregion_offset(addr, size))
| srd_suffix(subregion_offset(addr + size - 1, size))
} else {
0
}
}
const fn attr_size(addr: usize, size: usize, attr: u32) -> u32 {
sizebits(size) << 1 | srd(addr, size) << 8 | attr
}
#[repr(transparent)]
pub struct Region(pub(crate) rt_mpu_region);
impl Region {
pub fn new(addr: *const (), size: usize, attr: u32) -> Region {
let addr = addr.addr();
let base_addr = (addr & region_size_addr_mask(size)) as u32;
#[cfg(any(mpu_armv6m, mpu_armv7m))]
{
Region(rt_mpu_region {
base_addr,
attr_size: attr_size(addr, size, attr),
})
}
#[cfg(mpu_armv7r)]
{
Region(rt_mpu_region {
base_addr,
size_enable: attr_size(addr, size, attr) & 0xFFFF,
access_control: attr >> 16,
})
}
}
pub fn from_stack<const N: usize>(stack: &Stack<N>) -> Region
where
Align<N>: ValidAlignment,
{
Region::new(
stack.buf.get().cast(),
size_of::<Stack<N>>(),
RT_MPU_ATTR_STACK,
)
}
}
pub struct Stack<const N: usize>
where
Align<N>: ValidAlignment,
{
_align: Align<N>,
pub buf: SyncUnsafeCell<[u8; N]>,
}
impl<const N: usize> Stack<N>
where
Align<N>: ValidAlignment,
{
pub const fn new() -> Stack<N> {
Stack {
_align: Align::new(),
buf: SyncUnsafeCell::new([0u8; N]),
}
}
}
impl<const N: usize> Default for Stack<N>
where
Align<N>: ValidAlignment,
{
fn default() -> Self {
Self::new()
}
}
pub const ATTR_PERIPHERAL: u32 = RT_MPU_ATTR_PERIPHERAL;
}
#[cfg(any(mpu_armv6m, mpu_armv7m, mpu_armv7r))]
pub use armv7::{ATTR_PERIPHERAL, Region, Stack};
#[cfg(any(mpu_armv8m, mpu_armv8r))]
mod armv8 {
use core::mem::size_of;
use crate::{
bindings::{
RT_MPU_ADDR_MASK, RT_MPU_ATTR_MASK, RT_MPU_ATTR_PERIPHERAL, RT_MPU_ATTR_RLAR_SHIFT,
RT_MPU_ATTR_STACK, rt_mpu_region,
},
cell::SyncUnsafeCell,
};
#[cfg(mpu_armv8m)]
#[repr(align(32))]
pub struct Stack<const N: usize> {
pub buf: SyncUnsafeCell<[u8; N]>,
}
#[cfg(mpu_armv8r)]
#[repr(align(64))]
pub struct Stack<const N: usize> {
pub buf: SyncUnsafeCell<[u8; N]>,
}
impl<const N: usize> Stack<N> {
#[cfg(mpu_armv8m)]
pub const fn new() -> Stack<N> {
const {
assert!(N % 32 == 0, "stack size must be a multiple of 32 bytes");
}
Stack {
buf: SyncUnsafeCell::new([0u8; N]),
}
}
#[cfg(mpu_armv8r)]
pub const fn new() -> Stack<N> {
assert!(N % 64 == 0, "stack size must be a multiple of 64 bytes");
Stack {
buf: SyncUnsafeCell::new([0u8; N]),
}
}
}
impl<const N: usize> Default for Stack<N> {
fn default() -> Self {
Self::new()
}
}
const fn base_addr(addr: usize, attr: u32) -> u32 {
(addr as u32 & RT_MPU_ADDR_MASK) | (attr & RT_MPU_ATTR_MASK)
}
const fn limit_addr(addr: usize, size: usize, attr: u32) -> u32 {
((addr + size - 1) as u32 & RT_MPU_ADDR_MASK)
| ((attr >> RT_MPU_ATTR_RLAR_SHIFT) & RT_MPU_ATTR_MASK)
}
#[repr(transparent)]
pub struct Region(pub(crate) rt_mpu_region);
impl Region {
pub fn new(addr: *const (), size: usize, attr: u32) -> Region {
let addr = addr.addr();
Region(rt_mpu_region {
base_addr: base_addr(addr, attr),
limit_addr: limit_addr(addr, size, attr),
})
}
pub fn from_stack<const N: usize>(stack: *const Stack<N>) -> Region {
Region::new(stack.cast(), size_of::<Stack<N>>(), RT_MPU_ATTR_STACK)
}
}
pub const ATTR_PERIPHERAL: u32 = RT_MPU_ATTR_PERIPHERAL;
}
#[cfg(any(mpu_armv8m, mpu_armv8r))]
pub use armv8::{ATTR_PERIPHERAL, Region, Stack};
#[cfg(mpu_riscv)]
mod riscv {
use core::mem::size_of;
use crate::{
align::{Align, ValidAlignment},
bindings::{RT_MPU_ATTR_PERIPHERAL, RT_MPU_ATTR_STACK, rt_mpu_region},
cell::SyncUnsafeCell,
};
const fn napot_bits(size: usize) -> usize {
if size < 8 {
0
} else if size == usize::MAX {
size >> 3
} else {
(size >> 3) - 1
}
}
const fn pmpaddr(addr: usize, size: usize) -> usize {
addr >> 2 | napot_bits(size)
}
#[repr(transparent)]
pub struct Region(pub(crate) rt_mpu_region);
impl Region {
pub fn new(addr: *const (), size: usize, attr: u8) -> Region {
Region(rt_mpu_region {
pmpaddr: pmpaddr(addr.addr(), size),
pmpxcfg: attr,
})
}
pub fn from_stack<const N: usize>(stack: *const Stack<N>) -> Region
where
Align<N>: ValidAlignment,
{
Region::new(stack.cast(), size_of::<Stack<N>>(), RT_MPU_ATTR_STACK)
}
}
pub struct Stack<const N: usize>
where
Align<N>: ValidAlignment,
{
_align: Align<N>,
pub buf: SyncUnsafeCell<[u8; N]>,
}
impl<const N: usize> Stack<N>
where
Align<N>: ValidAlignment,
{
pub const fn new() -> Stack<N> {
Stack {
_align: Align::new(),
buf: SyncUnsafeCell::new([0u8; N]),
}
}
}
impl<const N: usize> Default for Stack<N>
where
Align<N>: ValidAlignment,
{
fn default() -> Self {
Self::new()
}
}
pub const ATTR_PERIPHERAL: u8 = RT_MPU_ATTR_PERIPHERAL;
}
#[cfg(mpu_riscv)]
pub use riscv::{ATTR_PERIPHERAL, Region, Stack};
#[cfg(feature = "task-mpu")]
use crate::task::Task;
#[cfg(feature = "task-mpu")]
pub fn task_mpu_config_init<const N: usize>(task: *const Task, regions: &[Region; N]) {
use core::cell::UnsafeCell;
use crate::{
bindings::RT_MPU_NUM_TASK_REGIONS_,
ptr_macros::{ptr_to_field, ptr_to_field_mut},
};
const {
assert!(
N <= RT_MPU_NUM_TASK_REGIONS_ as usize,
"too many MPU regions"
);
}
let task_ptr = UnsafeCell::raw_get(ptr_to_field!(task, task));
let config_ptr = ptr_to_field_mut!(task_ptr, mpu_config);
let config = unsafe { &mut *config_ptr };
#[cfg(mpu_riscv)]
for (i, r) in regions.iter().enumerate() {
config.pmpaddr[i] = r.0.pmpaddr;
config.pmpcfg[i / 4] |= (r.0.pmpxcfg as u32) << (8 * (i % 4));
}
#[cfg(not(mpu_riscv))]
for (i, r) in regions.iter().enumerate() {
config.regions[i] = r.0;
}
#[cfg(any(mpu_armv6m, mpu_armv7m))]
{
use crate::bindings::{
RT_MPU_BASE_ADDR_REGION_ID_MASK, RT_MPU_BASE_ADDR_REGION_ID_VALID,
RT_MPU_TASK_REGION_START_ID_,
};
for i in 0..RT_MPU_NUM_TASK_REGIONS_ {
let id = RT_MPU_TASK_REGION_START_ID_ + i;
config.regions[i as usize].base_addr |=
(id & RT_MPU_BASE_ADDR_REGION_ID_MASK) | RT_MPU_BASE_ADDR_REGION_ID_VALID;
}
}
}