use core::{marker::PhantomData, ops::Deref};
use arrayvec::ArrayVec;
use memory_addr::{MemoryAddr, PAGE_SIZE_4K, PhysAddr};
use crate::{
GenericPTE, MappingFlags, PageSize, PagingError, PagingHandler, PagingMetaData, PagingResult,
TlbFlusher,
};
#[cfg(target_arch = "arm")]
const ENTRY_COUNT: usize = 4096; #[cfg(not(target_arch = "arm"))]
const ENTRY_COUNT: usize = 512;
const fn p1_index(vaddr: usize) -> usize {
(vaddr >> 20) & 0xFFF }
const fn p2_index(vaddr: usize) -> usize {
(vaddr >> 12) & 0xFF }
pub struct PageTable32<M: PagingMetaData, PTE: GenericPTE, H: PagingHandler> {
root_paddr: PhysAddr,
#[cfg(feature = "copy-from")]
borrowed_entries: [u64; ENTRY_COUNT / 64],
_phantom: PhantomData<(M, PTE, H)>,
}
impl<M: PagingMetaData, PTE: GenericPTE, H: PagingHandler> PageTable32<M, PTE, H> {
pub fn try_new() -> PagingResult<Self> {
let (root_paddr, size_pages) = {
#[cfg(target_arch = "arm")]
{
const L1_SIZE_PAGES: usize = 4; const L1_ALIGN: usize = 16384;
let root_paddr =
H::alloc_frames(L1_SIZE_PAGES, L1_ALIGN).ok_or(PagingError::NoMemory)?;
(root_paddr, L1_SIZE_PAGES)
}
#[cfg(not(target_arch = "arm"))]
{
const SIZE_PAGES: usize = 1; let root_paddr = H::alloc_frame().ok_or(PagingError::NoMemory)?;
(root_paddr, SIZE_PAGES)
}
};
let virt = H::phys_to_virt(root_paddr);
unsafe {
core::ptr::write_bytes(virt.as_mut_ptr(), 0, size_pages * PAGE_SIZE_4K);
}
Ok(Self {
root_paddr,
#[cfg(feature = "copy-from")]
borrowed_entries: [0; ENTRY_COUNT / 64],
_phantom: PhantomData,
})
}
pub const fn root_paddr(&self) -> PhysAddr {
self.root_paddr
}
pub fn query(&self, vaddr: M::VirtAddr) -> PagingResult<(PhysAddr, MappingFlags, PageSize)> {
let (entry, size) = self.get_entry(vaddr)?;
if entry.is_unused() {
return Err(PagingError::NotMapped);
}
let off = vaddr.into() & (size as usize - 1);
Ok((entry.paddr().add(off), entry.flags(), size))
}
pub fn walk<F>(&self, limit: usize, pre_func: Option<&F>, post_func: Option<&F>)
where
F: Fn(usize, usize, M::VirtAddr, &PTE),
{
self.walk_recursive(
self.get_table(self.root_paddr),
0,
0.into(),
limit,
pre_func,
post_func,
)
}
pub fn cursor(&mut self) -> PageTable32Cursor<'_, M, PTE, H> {
PageTable32Cursor::new(self)
}
fn get_entry_mut(&mut self, vaddr: M::VirtAddr) -> PagingResult<(&mut PTE, PageSize)> {
let vaddr_usize = vaddr.into();
let p1 = p1_index(vaddr_usize);
let table = self.get_table_mut(self.root_paddr);
let entry = &mut table[p1];
if entry.is_unused() {
return Err(PagingError::NotMapped);
}
if entry.is_huge() {
return Ok((entry, PageSize::Size1M));
}
let p2_table = self.get_table_mut(entry.paddr());
let p2 = p2_index(vaddr_usize);
Ok((&mut p2_table[p2], PageSize::Size4K))
}
fn get_entry(&self, vaddr: M::VirtAddr) -> PagingResult<(&PTE, PageSize)> {
let vaddr_usize = vaddr.into();
let p1 = p1_index(vaddr_usize);
let table = self.get_table(self.root_paddr);
let entry = &table[p1];
if entry.is_unused() {
return Err(PagingError::NotMapped);
}
if entry.is_huge() {
return Ok((entry, PageSize::Size1M));
}
let p2_table = self.get_table(entry.paddr());
let p2 = p2_index(vaddr_usize);
Ok((&p2_table[p2], PageSize::Size4K))
}
fn get_entry_mut_or_create(
&mut self,
vaddr: M::VirtAddr,
page_size: PageSize,
) -> PagingResult<&mut PTE> {
let vaddr_usize = vaddr.into();
let p1 = p1_index(vaddr_usize);
let table = self.get_table_mut(self.root_paddr);
if page_size == PageSize::Size1M {
return Ok(&mut table[p1]);
}
let entry = &mut table[p1];
if entry.is_unused() {
let paddr = H::alloc_frame().ok_or(PagingError::NoMemory)?;
let virt = H::phys_to_virt(paddr);
unsafe {
core::ptr::write_bytes(virt.as_mut_ptr(), 0, PAGE_SIZE_4K);
}
*entry = GenericPTE::new_table(paddr);
} else if entry.is_huge() {
return Err(PagingError::AlreadyMapped);
}
let p2_table = self.get_table_mut(entry.paddr());
let p2 = p2_index(vaddr_usize);
Ok(&mut p2_table[p2])
}
fn get_table<'a>(&self, paddr: PhysAddr) -> &'a [PTE] {
let ptr = H::phys_to_virt(paddr).as_ptr() as *const PTE;
unsafe { core::slice::from_raw_parts(ptr, ENTRY_COUNT) }
}
fn get_table_mut<'a>(&self, paddr: PhysAddr) -> &'a mut [PTE] {
let ptr = H::phys_to_virt(paddr).as_mut_ptr() as *mut PTE;
unsafe { core::slice::from_raw_parts_mut(ptr, ENTRY_COUNT) }
}
fn walk_recursive<F>(
&self,
table: &[PTE],
level: usize,
start_vaddr: M::VirtAddr,
limit: usize,
pre_func: Option<&F>,
post_func: Option<&F>,
) where
F: Fn(usize, usize, M::VirtAddr, &PTE),
{
let start_vaddr_usize: usize = start_vaddr.into();
let mut n = 0;
for (i, entry) in table.iter().enumerate() {
let shift = if level == 0 { 20 } else { 12 };
let vaddr_usize = start_vaddr_usize + (i << shift);
let vaddr = vaddr_usize.into();
if !entry.is_unused() {
if let Some(func) = pre_func {
func(level, i, vaddr, entry);
}
if level == 0 && !entry.is_huge() {
let next_table = self.get_table(entry.paddr());
self.walk_recursive(next_table, level + 1, vaddr, limit, pre_func, post_func);
}
if let Some(func) = post_func {
func(level, i, vaddr, entry);
}
n += 1;
if n >= limit {
break;
}
}
}
}
}
impl<M: PagingMetaData, PTE: GenericPTE, H: PagingHandler> Drop for PageTable32<M, PTE, H> {
fn drop(&mut self) {
let table = self.get_table(self.root_paddr);
#[allow(unused_variables)]
for (i, entry) in table.iter().enumerate() {
#[cfg(feature = "copy-from")]
if (self.borrowed_entries[i / 64] & (1 << (i % 64))) != 0 {
continue;
}
if !entry.is_unused() && !entry.is_huge() {
H::dealloc_frame(entry.paddr());
}
}
H::dealloc_frames(self.root_paddr, 4);
}
}
pub struct PageTable32Cursor<'a, M: PagingMetaData, PTE: GenericPTE, H: PagingHandler> {
inner: &'a mut PageTable32<M, PTE, H>,
flusher: TlbFlusher<M>,
}
impl<M: PagingMetaData, PTE: GenericPTE, H: PagingHandler> Deref
for PageTable32Cursor<'_, M, PTE, H>
{
type Target = PageTable32<M, PTE, H>;
fn deref(&self) -> &PageTable32<M, PTE, H> {
self.inner
}
}
impl<'a, M: PagingMetaData, PTE: GenericPTE, H: PagingHandler> PageTable32Cursor<'a, M, PTE, H> {
fn new(inner: &'a mut PageTable32<M, PTE, H>) -> Self {
Self {
inner,
flusher: TlbFlusher::None,
}
}
fn push(&mut self, vaddr: M::VirtAddr) {
match self.flusher {
TlbFlusher::None => {
let mut arr = ArrayVec::new();
arr.push(vaddr);
self.flusher = TlbFlusher::Array(arr);
}
TlbFlusher::Array(ref mut arr) => {
if arr.try_push(vaddr).is_err() {
self.flusher = TlbFlusher::Full;
}
}
TlbFlusher::Full => {}
}
}
pub fn map(
&mut self,
vaddr: M::VirtAddr,
target: PhysAddr,
page_size: PageSize,
flags: MappingFlags,
) -> PagingResult {
let entry = self.inner.get_entry_mut_or_create(vaddr, page_size)?;
if !entry.is_unused() {
return Err(PagingError::AlreadyMapped);
}
*entry = GenericPTE::new_page(target.align_down(page_size), flags, page_size.is_huge());
self.push(vaddr);
Ok(())
}
pub fn remap(
&mut self,
vaddr: M::VirtAddr,
paddr: PhysAddr,
flags: MappingFlags,
) -> PagingResult<PageSize> {
let (entry, size) = self.inner.get_entry_mut(vaddr)?;
*entry = GenericPTE::new_page(paddr, flags, size.is_huge());
self.push(vaddr);
Ok(size)
}
pub fn protect(&mut self, vaddr: M::VirtAddr, flags: MappingFlags) -> PagingResult<PageSize> {
let (entry, size) = self.inner.get_entry_mut(vaddr)?;
if entry.is_unused() {
return Err(PagingError::NotMapped);
}
*entry = GenericPTE::new_page(entry.paddr(), flags, size.is_huge());
self.push(vaddr);
Ok(size)
}
pub fn unmap(
&mut self,
vaddr: M::VirtAddr,
) -> PagingResult<(PhysAddr, MappingFlags, PageSize)> {
let (entry, size) = self.inner.get_entry_mut(vaddr)?;
if entry.is_unused() {
return Err(PagingError::NotMapped);
}
let paddr = entry.paddr();
let flags = entry.flags();
entry.clear();
self.push(vaddr);
Ok((paddr, flags, size))
}
pub fn map_region(
&mut self,
vaddr: M::VirtAddr,
get_paddr: impl Fn(M::VirtAddr) -> PhysAddr,
size: usize,
flags: MappingFlags,
allow_huge: bool,
) -> PagingResult {
let mut vaddr_usize: usize = vaddr.into();
let mut size = size;
if !PageSize::Size4K.is_aligned(vaddr_usize) || !PageSize::Size4K.is_aligned(size) {
return Err(PagingError::NotAligned);
}
trace!(
"map_region({:#x}): [{:#x}, {:#x}) {:?}",
self.root_paddr(),
vaddr_usize,
vaddr_usize + size,
flags,
);
while size > 0 {
let vaddr = vaddr_usize.into();
let paddr = get_paddr(vaddr);
let page_size = if allow_huge
&& PageSize::Size1M.is_aligned(vaddr_usize)
&& paddr.is_aligned(PageSize::Size1M)
&& size >= PageSize::Size1M as usize
{
PageSize::Size1M
} else {
PageSize::Size4K
};
self.map(vaddr, paddr, page_size, flags).inspect_err(|e| {
error!("failed to map page: {vaddr_usize:#x?}({page_size:?}) -> {paddr:#x?}, {e:?}")
})?;
vaddr_usize += page_size as usize;
size -= page_size as usize;
}
Ok(())
}
pub fn unmap_region(&mut self, vaddr: M::VirtAddr, size: usize) -> PagingResult {
let mut vaddr_usize: usize = vaddr.into();
let mut size = size;
trace!(
"unmap_region({:#x}) [{:#x}, {:#x})",
self.root_paddr(),
vaddr_usize,
vaddr_usize + size,
);
while size > 0 {
let vaddr = vaddr_usize.into();
let (_, _, page_size) = self
.unmap(vaddr)
.inspect_err(|e| error!("failed to unmap page: {vaddr_usize:#x?}, {e:?}"))?;
assert!(page_size.is_aligned(vaddr_usize));
assert!(page_size as usize <= size);
vaddr_usize += page_size as usize;
size -= page_size as usize;
}
Ok(())
}
pub fn protect_region(
&mut self,
vaddr: M::VirtAddr,
size: usize,
flags: MappingFlags,
) -> PagingResult {
let mut vaddr_usize: usize = vaddr.into();
let mut size = size;
trace!(
"protect_region({:#x}) [{:#x}, {:#x}) {:?}",
self.root_paddr(),
vaddr_usize,
vaddr_usize + size,
flags,
);
while size > 0 {
let vaddr = vaddr_usize.into();
let page_size = match self.inner.get_entry_mut(vaddr) {
Ok((entry, page_size)) => {
if !entry.is_unused() {
entry.set_flags(flags, page_size.is_huge());
self.push(vaddr);
}
page_size
}
Err(PagingError::NotMapped) => PageSize::Size4K,
Err(e) => {
error!("failed to protect page: {vaddr_usize:#x?}, {e:?}");
return Err(e);
}
};
assert!(page_size.is_aligned(vaddr_usize));
assert!(page_size as usize <= size);
vaddr_usize += page_size as usize;
size -= page_size as usize;
}
Ok(())
}
#[cfg(feature = "copy-from")]
pub fn copy_from(&mut self, other: &PageTable32<M, PTE, H>, start: M::VirtAddr, size: usize) {
if size == 0 {
return;
}
let src_table = self.inner.get_table(other.root_paddr);
let dst_table = self.inner.get_table_mut(self.inner.root_paddr);
let start_idx = p1_index(start.into());
let end_idx = p1_index(start.into() + size - 1) + 1;
assert!(start_idx < ENTRY_COUNT);
assert!(end_idx <= ENTRY_COUNT);
for i in start_idx..end_idx {
let entry = &mut dst_table[i];
let is_borrowed = (self.inner.borrowed_entries[i / 64] & (1 << (i % 64))) != 0;
if !is_borrowed {
self.inner.borrowed_entries[i / 64] |= 1 << (i % 64);
if !entry.is_unused() && !entry.is_huge() {
H::dealloc_frame(entry.paddr());
}
}
*entry = src_table[i];
}
self.flusher = TlbFlusher::Full;
}
pub fn flush(&mut self) {
#[cfg(not(docsrs))]
match &self.flusher {
TlbFlusher::None => {}
TlbFlusher::Array(addrs) => {
for vaddr in addrs.iter() {
M::flush_tlb(Some(*vaddr));
}
}
TlbFlusher::Full => {
M::flush_tlb(None);
}
}
self.flusher = TlbFlusher::None;
}
}
impl<M: PagingMetaData, PTE: GenericPTE, H: PagingHandler> Drop
for PageTable32Cursor<'_, M, PTE, H>
{
fn drop(&mut self) {
self.flush();
}
}