use arbitrary_int::{u26, u3};
use crate::register;
#[doc(inline)]
pub use register::hprbar::{AccessPerms as El2AccessPerms, Shareability as El2Shareability};
#[doc(inline)]
pub use register::prbar::{AccessPerms as El1AccessPerms, Shareability as El1Shareability};
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Error {
TooManyRegions,
InvalidMair(u8),
UnalignedRegion(core::ops::RangeInclusive<*mut u8>),
}
pub struct El1Mpu();
impl El1Mpu {
pub unsafe fn new() -> El1Mpu {
El1Mpu()
}
pub fn num_regions(&self) -> u8 {
register::Mpuir::read().dregions()
}
pub fn get_region(&mut self, idx: u8) -> Option<El1Region> {
if idx >= self.num_regions() {
return None;
}
register::Prselr::write(register::Prselr(idx as u32));
let prbar = register::Prbar::read();
let prlar = register::Prlar::read();
let start_addr = (prbar.base().value() << 6) as *mut u8;
let end_addr = ((prlar.limit().value() << 6) | 0x3F) as *mut u8;
Some(El1Region {
range: start_addr..=end_addr,
shareability: prbar.shareability(),
access: prbar.access_perms(),
no_exec: prbar.nx(),
mair: prlar.mair().value(),
enable: prlar.enabled(),
})
}
pub fn set_region(&mut self, idx: u8, region: &El1Region) -> Result<(), Error> {
let start = *(region.range.start()) as usize as u32;
if start & 0x3F != 0 {
return Err(Error::UnalignedRegion(region.range.clone()));
}
let end = *(region.range.end()) as usize as u32;
if end & 0x3F != 0x3F {
return Err(Error::UnalignedRegion(region.range.clone()));
}
if region.mair > 7 {
return Err(Error::InvalidMair(region.mair));
}
register::Prselr::write(register::Prselr(idx as u32));
register::Prbar::write({
let mut bar = register::Prbar::new_with_raw_value(0);
bar.set_base(u26::from_u32(start >> 6));
bar.set_access_perms(region.access);
bar.set_nx(region.no_exec);
bar.set_shareability(region.shareability);
bar
});
register::Prlar::write({
let mut lar = register::Prlar::new_with_raw_value(0);
lar.set_limit(u26::from_u32(end >> 6));
lar.set_enabled(region.enable);
lar.set_mair(u3::from_u8(region.mair));
lar
});
Ok(())
}
pub fn set_regions(
&mut self,
regions_starting_idx: u8,
regions: &[El1Region],
) -> Result<(), Error> {
if regions.len().saturating_add(regions_starting_idx as usize) > self.num_regions() as usize
{
return Err(Error::TooManyRegions);
}
for (idx, region) in regions.iter().enumerate() {
self.set_region(idx as u8 + regions_starting_idx, region)?;
}
Ok(())
}
pub fn set_attributes(&mut self, memattrs: &[MemAttr]) {
let mem_attr0 = memattrs.get(0).map(|m| m.to_bits()).unwrap_or(0) as u32;
let mem_attr1 = memattrs.get(1).map(|m| m.to_bits()).unwrap_or(0) as u32;
let mem_attr2 = memattrs.get(2).map(|m| m.to_bits()).unwrap_or(0) as u32;
let mem_attr3 = memattrs.get(3).map(|m| m.to_bits()).unwrap_or(0) as u32;
let mair0 = mem_attr3 << 24 | mem_attr2 << 16 | mem_attr1 << 8 | mem_attr0;
unsafe {
register::Mair0::write(register::Mair0(mair0));
}
let mem_attr0 = memattrs.get(4).map(|m| m.to_bits()).unwrap_or(0) as u32;
let mem_attr1 = memattrs.get(5).map(|m| m.to_bits()).unwrap_or(0) as u32;
let mem_attr2 = memattrs.get(6).map(|m| m.to_bits()).unwrap_or(0) as u32;
let mem_attr3 = memattrs.get(7).map(|m| m.to_bits()).unwrap_or(0) as u32;
let mair1 = mem_attr3 << 24 | mem_attr2 << 16 | mem_attr1 << 8 | mem_attr0;
unsafe {
register::Mair1::write(register::Mair1(mair1));
}
}
pub fn background_region_enable(&mut self, enable: bool) {
register::Sctlr::modify(|r| {
r.set_br(enable);
});
}
pub fn configure(&mut self, config: &El1Config) -> Result<(), Error> {
self.set_regions(0, config.regions)?;
self.set_attributes(config.memory_attributes);
self.background_region_enable(config.background_config);
Ok(())
}
pub fn enable(&mut self) {
register::Sctlr::modify(|r| {
r.set_m(true);
});
}
pub fn disable(&mut self) {
register::Sctlr::modify(|r| {
r.set_m(false);
});
}
}
pub struct El2Mpu();
impl El2Mpu {
pub unsafe fn new() -> El2Mpu {
El2Mpu()
}
pub fn num_regions(&self) -> u8 {
register::Hmpuir::read().region()
}
pub fn get_region(&mut self, idx: u8) -> Option<El2Region> {
if idx >= self.num_regions() {
return None;
}
register::Hprselr::write(register::Hprselr(idx as u32));
let hprbar = register::Hprbar::read();
let hprlar = register::Hprlar::read();
let start_addr = (hprbar.base().value() << 6) as *mut u8;
let end_addr = ((hprlar.limit().value() << 6) | 0x3F) as *mut u8;
Some(El2Region {
range: start_addr..=end_addr,
shareability: hprbar.shareability(),
access: hprbar.access_perms(),
no_exec: hprbar.nx(),
mair: hprlar.mair().value(),
enable: hprlar.enabled(),
})
}
pub fn set_region(&mut self, idx: u8, region: &El2Region) -> Result<(), Error> {
let start = *(region.range.start()) as usize as u32;
if start & 0x3F != 0 {
return Err(Error::UnalignedRegion(region.range.clone()));
}
let end = *(region.range.end()) as usize as u32;
if end & 0x3F != 0x3F {
return Err(Error::UnalignedRegion(region.range.clone()));
}
if region.mair > 7 {
return Err(Error::InvalidMair(region.mair));
}
register::Hprselr::write(register::Hprselr(idx as u32));
register::Hprbar::write({
let mut bar = register::Hprbar::new_with_raw_value(0);
bar.set_base(u26::from_u32(start >> 6));
bar.set_access_perms(region.access);
bar.set_nx(region.no_exec);
bar.set_shareability(region.shareability);
bar
});
register::Hprlar::write({
let mut lar = register::Hprlar::new_with_raw_value(0);
lar.set_limit(u26::from_u32(end >> 6));
lar.set_enabled(region.enable);
lar.set_mair(u3::from_u8(region.mair));
lar
});
Ok(())
}
pub fn set_regions(
&mut self,
regions_starting_idx: u8,
regions: &[El2Region],
) -> Result<(), Error> {
if regions.len().saturating_add(regions_starting_idx as usize) > self.num_regions() as usize
{
return Err(Error::TooManyRegions);
}
for (idx, region) in regions.iter().enumerate() {
self.set_region(idx as u8 + regions_starting_idx, region)?;
}
Ok(())
}
pub fn set_attributes(&mut self, memattrs: &[MemAttr]) {
let mem_attr0 = memattrs.get(0).map(|m| m.to_bits()).unwrap_or(0) as u32;
let mem_attr1 = memattrs.get(1).map(|m| m.to_bits()).unwrap_or(0) as u32;
let mem_attr2 = memattrs.get(2).map(|m| m.to_bits()).unwrap_or(0) as u32;
let mem_attr3 = memattrs.get(3).map(|m| m.to_bits()).unwrap_or(0) as u32;
let hmair0 = mem_attr3 << 24 | mem_attr2 << 16 | mem_attr1 << 8 | mem_attr0;
unsafe {
register::Hmair0::write(register::Hmair0(hmair0));
}
let mem_attr0 = memattrs.get(4).map(|m| m.to_bits()).unwrap_or(0) as u32;
let mem_attr1 = memattrs.get(5).map(|m| m.to_bits()).unwrap_or(0) as u32;
let mem_attr2 = memattrs.get(6).map(|m| m.to_bits()).unwrap_or(0) as u32;
let mem_attr3 = memattrs.get(7).map(|m| m.to_bits()).unwrap_or(0) as u32;
let hmair1 = mem_attr3 << 24 | mem_attr2 << 16 | mem_attr1 << 8 | mem_attr0;
unsafe {
register::Hmair1::write(register::Hmair1(hmair1));
}
}
pub fn background_region_enable(&mut self, enable: bool) {
register::Hsctlr::modify(|r| {
r.set_br(enable);
});
}
pub fn configure(&mut self, config: &El2Config) -> Result<(), Error> {
self.set_regions(0, config.regions)?;
self.set_attributes(config.memory_attributes);
self.background_region_enable(config.background_config);
Ok(())
}
pub fn enable(&mut self) {
register::Hsctlr::modify(|r| {
r.set_m(true);
});
}
pub fn disable(&mut self) {
register::Hsctlr::modify(|r| {
r.set_m(false);
});
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct El1Config<'a> {
pub background_config: bool,
pub regions: &'a [El1Region],
pub memory_attributes: &'a [MemAttr],
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct El1Region {
pub range: core::ops::RangeInclusive<*mut u8>,
pub shareability: El1Shareability,
pub access: El1AccessPerms,
pub no_exec: bool,
pub mair: u8,
pub enable: bool,
}
unsafe impl Sync for El1Region {}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct El2Config<'a> {
pub background_config: bool,
pub regions: &'a [El2Region],
pub memory_attributes: &'a [MemAttr],
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct El2Region {
pub range: core::ops::RangeInclusive<*mut u8>,
pub shareability: El2Shareability,
pub access: El2AccessPerms,
pub no_exec: bool,
pub mair: u8,
pub enable: bool,
}
unsafe impl Sync for El2Region {}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum MemAttr {
StronglyOrdered,
DeviceMemory,
NormalMemory {
outer: Cacheable,
inner: Cacheable,
},
}
impl MemAttr {
const fn to_bits(&self) -> u8 {
match self {
MemAttr::StronglyOrdered => 0b0000_0000,
MemAttr::DeviceMemory => 0b0000_0100,
MemAttr::NormalMemory { outer, inner } => {
let outer_bits = outer.to_bits();
let inner_bits = inner.to_bits();
outer_bits << 4 | inner_bits
}
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Cacheable {
WriteThroughTransient(RwAllocPolicy),
WriteBackTransient(RwAllocPolicy),
WriteThroughNonTransient(RwAllocPolicy),
WriteBackNonTransient(RwAllocPolicy),
NonCacheable,
}
impl Cacheable {
const fn to_bits(&self) -> u8 {
match self {
Cacheable::WriteThroughTransient(rw_alloc) => 0b0000 | (*rw_alloc as u8),
Cacheable::WriteBackTransient(rw_alloc) => 0b0100 | (*rw_alloc as u8),
Cacheable::WriteThroughNonTransient(rw_alloc) => 0b1000 | (*rw_alloc as u8),
Cacheable::WriteBackNonTransient(rw_alloc) => 0b1100 | (*rw_alloc as u8),
Cacheable::NonCacheable => 0b0100,
}
}
}
#[derive(Copy, Debug, Clone, PartialEq, Eq)]
#[repr(u8)]
pub enum RwAllocPolicy {
W = 0b01,
R = 0b10,
RW = 0b11,
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn mem_attr_strong() {
let mem_attr = MemAttr::StronglyOrdered;
assert_eq!(mem_attr.to_bits(), 0b0000_0000);
}
#[test]
fn mem_attr_device() {
let mem_attr = MemAttr::DeviceMemory;
assert_eq!(mem_attr.to_bits(), 0b0000_0100);
}
#[test]
fn mem_attr_normal() {
let mem_attr = MemAttr::NormalMemory {
outer: Cacheable::NonCacheable,
inner: Cacheable::WriteBackNonTransient(RwAllocPolicy::W),
};
assert_eq!(
mem_attr.to_bits(),
0b0100_1101,
"0b{:08b}",
mem_attr.to_bits()
);
}
}