use core::{marker::PhantomData, ops::Deref};
use arrayvec::ArrayVec;
use memory_addr::{MemoryAddr, PAGE_SIZE_4K, PhysAddr};
use crate::{
GenericPTE, MappingFlags, PageSize, PagingError, PagingHandler, PagingMetaData, PagingResult,
TlbFlusher,
};
const ENTRY_COUNT: usize = 512;
const fn p4_index(vaddr: usize) -> usize {
(vaddr >> (12 + 27)) & (ENTRY_COUNT - 1)
}
const fn p3_index(vaddr: usize) -> usize {
(vaddr >> (12 + 18)) & (ENTRY_COUNT - 1)
}
const fn p2_index(vaddr: usize) -> usize {
(vaddr >> (12 + 9)) & (ENTRY_COUNT - 1)
}
const fn p1_index(vaddr: usize) -> usize {
(vaddr >> 12) & (ENTRY_COUNT - 1)
}
pub struct PageTable64<M: PagingMetaData, PTE: GenericPTE, H: PagingHandler> {
root_paddr: PhysAddr,
#[cfg(feature = "copy-from")]
borrowed_entries: bitmaps::Bitmap<ENTRY_COUNT>,
_phantom: PhantomData<(M, PTE, H)>,
}
impl<M: PagingMetaData, PTE: GenericPTE, H: PagingHandler> PageTable64<M, PTE, H> {
pub fn try_new() -> PagingResult<Self> {
let root_paddr = Self::alloc_table()?;
Ok(Self {
root_paddr,
#[cfg(feature = "copy-from")]
borrowed_entries: bitmaps::Bitmap::new(),
_phantom: PhantomData,
})
}
pub const fn root_paddr(&self) -> PhysAddr {
self.root_paddr
}
pub fn query(&self, vaddr: M::VirtAddr) -> PagingResult<(PhysAddr, MappingFlags, PageSize)> {
let (entry, size) = self.get_entry(vaddr)?;
if !entry.is_present() {
return Err(PagingError::NotMapped);
}
let off = size.align_offset(vaddr.into());
Ok((entry.paddr().add(off), entry.flags(), size))
}
pub fn walk<F>(&self, limit: usize, pre_func: Option<&F>, post_func: Option<&F>)
where
F: Fn(usize, usize, M::VirtAddr, &PTE),
{
self.walk_recursive(
self.table_of(self.root_paddr()),
0,
0.into(),
limit,
pre_func,
post_func,
)
}
pub fn cursor(&mut self) -> PageTable64Cursor<'_, M, PTE, H> {
PageTable64Cursor::new(self)
}
}
impl<M: PagingMetaData, PTE: GenericPTE, H: PagingHandler> PageTable64<M, PTE, H> {
fn alloc_table() -> PagingResult<PhysAddr> {
if let Some(paddr) = H::alloc_frame() {
let ptr = H::phys_to_virt(paddr).as_mut_ptr();
unsafe { core::ptr::write_bytes(ptr, 0, PAGE_SIZE_4K) };
Ok(paddr)
} else {
Err(PagingError::NoMemory)
}
}
fn table_of<'a>(&self, paddr: PhysAddr) -> &'a [PTE] {
let ptr = H::phys_to_virt(paddr).as_ptr() as _;
unsafe { core::slice::from_raw_parts(ptr, ENTRY_COUNT) }
}
fn table_of_mut<'a>(&mut self, paddr: PhysAddr) -> &'a mut [PTE] {
let ptr = H::phys_to_virt(paddr).as_mut_ptr() as _;
unsafe { core::slice::from_raw_parts_mut(ptr, ENTRY_COUNT) }
}
fn next_table<'a>(&self, entry: &PTE) -> PagingResult<&'a [PTE]> {
if entry.paddr().as_usize() == 0 {
Err(PagingError::NotMapped)
} else if entry.is_huge() {
Err(PagingError::MappedToHugePage)
} else {
Ok(self.table_of(entry.paddr()))
}
}
fn next_table_mut<'a>(&mut self, entry: &PTE) -> PagingResult<&'a mut [PTE]> {
if entry.paddr().as_usize() == 0 {
Err(PagingError::NotMapped)
} else if entry.is_huge() {
Err(PagingError::MappedToHugePage)
} else {
Ok(self.table_of_mut(entry.paddr()))
}
}
fn next_table_mut_or_create<'a>(&mut self, entry: &mut PTE) -> PagingResult<&'a mut [PTE]> {
if entry.is_unused() {
let paddr = Self::alloc_table()?;
*entry = GenericPTE::new_table(paddr);
Ok(self.table_of_mut(paddr))
} else {
self.next_table_mut(entry)
}
}
fn get_entry(&self, vaddr: M::VirtAddr) -> PagingResult<(&PTE, PageSize)> {
let vaddr: usize = vaddr.into();
let p3 = if M::LEVELS == 3 {
self.table_of(self.root_paddr())
} else if M::LEVELS == 4 {
let p4 = self.table_of(self.root_paddr());
let p4e = &p4[p4_index(vaddr)];
self.next_table(p4e)?
} else {
unreachable!()
};
let p3e = &p3[p3_index(vaddr)];
if p3e.is_huge() {
return Ok((p3e, PageSize::Size1G));
}
let p2 = self.next_table(p3e)?;
let p2e = &p2[p2_index(vaddr)];
if p2e.is_huge() {
return Ok((p2e, PageSize::Size2M));
}
let p1 = self.next_table(p2e)?;
let p1e = &p1[p1_index(vaddr)];
Ok((p1e, PageSize::Size4K))
}
fn get_entry_mut(&mut self, vaddr: M::VirtAddr) -> PagingResult<(&mut PTE, PageSize)> {
let vaddr: usize = vaddr.into();
let p3 = if M::LEVELS == 3 {
self.table_of_mut(self.root_paddr())
} else if M::LEVELS == 4 {
let p4 = self.table_of_mut(self.root_paddr());
let p4e = &mut p4[p4_index(vaddr)];
self.next_table_mut(p4e)?
} else {
unreachable!()
};
let p3e = &mut p3[p3_index(vaddr)];
if p3e.is_huge() {
return Ok((p3e, PageSize::Size1G));
}
let p2 = self.next_table_mut(p3e)?;
let p2e = &mut p2[p2_index(vaddr)];
if p2e.is_huge() {
return Ok((p2e, PageSize::Size2M));
}
let p1 = self.next_table_mut(p2e)?;
let p1e = &mut p1[p1_index(vaddr)];
Ok((p1e, PageSize::Size4K))
}
fn get_entry_mut_or_create(
&mut self,
vaddr: M::VirtAddr,
page_size: PageSize,
) -> PagingResult<&mut PTE> {
let vaddr: usize = vaddr.into();
let p3 = if M::LEVELS == 3 {
self.table_of_mut(self.root_paddr())
} else if M::LEVELS == 4 {
let p4 = self.table_of_mut(self.root_paddr());
let p4e = &mut p4[p4_index(vaddr)];
self.next_table_mut_or_create(p4e)?
} else {
unreachable!()
};
let p3e = &mut p3[p3_index(vaddr)];
if page_size == PageSize::Size1G {
return Ok(p3e);
}
let p2 = self.next_table_mut_or_create(p3e)?;
let p2e = &mut p2[p2_index(vaddr)];
if page_size == PageSize::Size2M {
return Ok(p2e);
}
let p1 = self.next_table_mut_or_create(p2e)?;
let p1e = &mut p1[p1_index(vaddr)];
Ok(p1e)
}
fn walk_recursive<F>(
&self,
table: &[PTE],
level: usize,
start_vaddr: M::VirtAddr,
limit: usize,
pre_func: Option<&F>,
post_func: Option<&F>,
) where
F: Fn(usize, usize, M::VirtAddr, &PTE),
{
let start_vaddr_usize: usize = start_vaddr.into();
let mut n = 0;
for (i, entry) in table.iter().enumerate() {
let vaddr_usize = start_vaddr_usize + (i << (12 + (M::LEVELS - 1 - level) * 9));
let vaddr = vaddr_usize.into();
if entry.is_present() {
if let Some(func) = pre_func {
func(level, i, vaddr, entry);
}
if level < M::LEVELS - 1
&& !entry.is_huge()
&& let Ok(table) = self.next_table(entry)
{
self.walk_recursive(table, level + 1, vaddr, limit, pre_func, post_func);
}
if let Some(func) = post_func {
func(level, i, vaddr, entry);
}
n += 1;
if n >= limit {
break;
}
}
}
}
fn dealloc_tree(&self, table_paddr: PhysAddr, level: usize) {
if level < M::LEVELS - 1 {
for entry in self.table_of(table_paddr) {
if self.next_table(entry).is_ok() {
self.dealloc_tree(entry.paddr(), level + 1);
}
}
}
H::dealloc_frame(table_paddr);
}
}
impl<M: PagingMetaData, PTE: GenericPTE, H: PagingHandler> Drop for PageTable64<M, PTE, H> {
fn drop(&mut self) {
let root = self.table_of(self.root_paddr);
#[allow(unused_variables)]
for (i, entry) in root.iter().enumerate() {
#[cfg(feature = "copy-from")]
if self.borrowed_entries.get(i) {
continue;
}
if self.next_table(entry).is_ok() {
self.dealloc_tree(entry.paddr(), 1);
}
}
H::dealloc_frame(self.root_paddr());
}
}
pub struct PageTable64Cursor<'a, M: PagingMetaData, PTE: GenericPTE, H: PagingHandler> {
inner: &'a mut PageTable64<M, PTE, H>,
flusher: TlbFlusher<M>,
}
impl<M: PagingMetaData, PTE: GenericPTE, H: PagingHandler> Deref
for PageTable64Cursor<'_, M, PTE, H>
{
type Target = PageTable64<M, PTE, H>;
fn deref(&self) -> &PageTable64<M, PTE, H> {
self.inner
}
}
impl<'a, M: PagingMetaData, PTE: GenericPTE, H: PagingHandler> PageTable64Cursor<'a, M, PTE, H> {
fn new(inner: &'a mut PageTable64<M, PTE, H>) -> Self {
Self {
inner,
flusher: TlbFlusher::None,
}
}
fn push(&mut self, vaddr: M::VirtAddr) {
match self.flusher {
TlbFlusher::None => {
let mut arr = ArrayVec::new();
arr.push(vaddr);
self.flusher = TlbFlusher::Array(arr);
}
TlbFlusher::Array(ref mut arr) => {
if arr.try_push(vaddr).is_err() {
self.flusher = TlbFlusher::Full;
}
}
TlbFlusher::Full => {}
}
}
pub fn map(
&mut self,
vaddr: M::VirtAddr,
target: PhysAddr,
page_size: PageSize,
flags: MappingFlags,
) -> PagingResult {
let entry = self.inner.get_entry_mut_or_create(vaddr, page_size)?;
if !entry.is_unused() {
return Err(PagingError::AlreadyMapped);
}
*entry = GenericPTE::new_page(target.align_down(page_size), flags, page_size.is_huge());
self.push(vaddr);
Ok(())
}
pub fn remap(
&mut self,
vaddr: M::VirtAddr,
paddr: PhysAddr,
flags: MappingFlags,
) -> PagingResult<PageSize> {
let (entry, size) = self.inner.get_entry_mut(vaddr)?;
entry.set_paddr(paddr);
entry.set_flags(flags, size.is_huge());
self.push(vaddr);
Ok(size)
}
pub fn protect(&mut self, vaddr: M::VirtAddr, flags: MappingFlags) -> PagingResult<PageSize> {
let (entry, size) = self.inner.get_entry_mut(vaddr)?;
if !entry.is_present() {
return Err(PagingError::NotMapped);
}
entry.set_flags(flags, size.is_huge());
self.push(vaddr);
Ok(size)
}
pub fn unmap(
&mut self,
vaddr: M::VirtAddr,
) -> PagingResult<(PhysAddr, MappingFlags, PageSize)> {
let (entry, size) = self.inner.get_entry_mut(vaddr)?;
if !entry.is_present() {
entry.clear();
return Err(PagingError::NotMapped);
}
let paddr = entry.paddr();
let flags = entry.flags();
entry.clear();
self.push(vaddr);
Ok((paddr, flags, size))
}
pub fn map_region(
&mut self,
vaddr: M::VirtAddr,
get_paddr: impl Fn(M::VirtAddr) -> PhysAddr,
size: usize,
flags: MappingFlags,
allow_huge: bool,
) -> PagingResult {
let mut vaddr_usize: usize = vaddr.into();
let mut size = size;
if !PageSize::Size4K.is_aligned(vaddr_usize) || !PageSize::Size4K.is_aligned(size) {
return Err(PagingError::NotAligned);
}
trace!(
"map_region({:#x}): [{:#x}, {:#x}) {:?}",
self.root_paddr(),
vaddr_usize,
vaddr_usize + size,
flags,
);
while size > 0 {
let vaddr = vaddr_usize.into();
let paddr = get_paddr(vaddr);
let page_size = if allow_huge {
if PageSize::Size1G.is_aligned(vaddr_usize)
&& paddr.is_aligned(PageSize::Size1G)
&& size >= PageSize::Size1G as usize
{
PageSize::Size1G
} else if PageSize::Size2M.is_aligned(vaddr_usize)
&& paddr.is_aligned(PageSize::Size2M)
&& size >= PageSize::Size2M as usize
{
PageSize::Size2M
} else {
PageSize::Size4K
}
} else {
PageSize::Size4K
};
self.map(vaddr, paddr, page_size, flags).inspect_err(|e| {
error!("failed to map page: {vaddr_usize:#x?}({page_size:?}) -> {paddr:#x?}, {e:?}")
})?;
vaddr_usize += page_size as usize;
size -= page_size as usize;
}
Ok(())
}
pub fn unmap_region(&mut self, vaddr: M::VirtAddr, size: usize) -> PagingResult {
let mut vaddr_usize: usize = vaddr.into();
let mut size = size;
trace!(
"unmap_region({:#x}) [{:#x}, {:#x})",
self.root_paddr(),
vaddr_usize,
vaddr_usize + size,
);
while size > 0 {
let vaddr = vaddr_usize.into();
let (_, _, page_size) = self
.unmap(vaddr)
.inspect_err(|e| error!("failed to unmap page: {vaddr_usize:#x?}, {e:?}"))?;
assert!(page_size.is_aligned(vaddr_usize));
assert!(page_size as usize <= size);
vaddr_usize += page_size as usize;
size -= page_size as usize;
}
Ok(())
}
pub fn protect_region(
&mut self,
vaddr: M::VirtAddr,
size: usize,
flags: MappingFlags,
) -> PagingResult {
let mut vaddr_usize: usize = vaddr.into();
let mut size = size;
trace!(
"protect_region({:#x}) [{:#x}, {:#x}) {:?}",
self.root_paddr(),
vaddr_usize,
vaddr_usize + size,
flags,
);
while size > 0 {
let vaddr = vaddr_usize.into();
let page_size = match self.inner.get_entry_mut(vaddr) {
Ok((entry, page_size)) => {
if entry.is_present() {
entry.set_flags(flags, page_size.is_huge());
self.push(vaddr);
}
page_size
}
Err(PagingError::NotMapped) => PageSize::Size4K,
Err(e) => {
error!("failed to protect page: {vaddr_usize:#x?}, {e:?}");
return Err(e);
}
};
assert!(page_size.is_aligned(vaddr_usize));
assert!(page_size as usize <= size);
vaddr_usize += page_size as usize;
size -= page_size as usize;
}
Ok(())
}
#[cfg(feature = "copy-from")]
pub fn copy_from(&mut self, other: &PageTable64<M, PTE, H>, start: M::VirtAddr, size: usize) {
if size == 0 {
return;
}
let src_table = self.table_of(other.root_paddr);
let root_paddr = self.root_paddr;
let dst_table = self.inner.table_of_mut(root_paddr);
let index_fn = if M::LEVELS == 3 {
p3_index
} else if M::LEVELS == 4 {
p4_index
} else {
unreachable!()
};
let start_idx = index_fn(start.into());
let end_idx = index_fn(start.into() + size - 1) + 1;
assert!(start_idx < ENTRY_COUNT);
assert!(end_idx <= ENTRY_COUNT);
for i in start_idx..end_idx {
let entry = &mut dst_table[i];
if !self.inner.borrowed_entries.set(i, true) && self.next_table(entry).is_ok() {
self.dealloc_tree(entry.paddr(), 1);
}
*entry = src_table[i];
}
self.flusher = TlbFlusher::Full;
}
pub fn flush(&mut self) {
#[cfg(not(docsrs))]
match &self.flusher {
TlbFlusher::None => {}
TlbFlusher::Array(addrs) => {
for vaddr in addrs.iter() {
M::flush_tlb(Some(*vaddr));
}
}
TlbFlusher::Full => {
M::flush_tlb(None);
}
}
self.flusher = TlbFlusher::None;
}
}
impl<M: PagingMetaData, PTE: GenericPTE, H: PagingHandler> Drop
for PageTable64Cursor<'_, M, PTE, H>
{
fn drop(&mut self) {
self.flush();
}
}