use core::ops::{Deref, DerefMut};
use crate::{
FrameAllocator, PageTableEntry, PagingError, PagingResult, PhysAddr, TableMeta, VirtAddr,
frame::Frame,
map::{MapConfig, MapRecursiveConfig, UnmapConfig, UnmapRecursiveConfig},
walk::{PageTableWalker, WalkConfig},
};
pub struct PageTable<T: TableMeta, A: FrameAllocator> {
inner: PageTableRef<T, A>,
}
impl<T: TableMeta, A: FrameAllocator> PageTable<T, A> {
pub const VALID_BITS: usize = Frame::<T, A>::PT_VALID_BITS;
pub fn new(allocator: A) -> PagingResult<Self> {
let inner = unsafe { PageTableRef::new(allocator) }?;
Ok(Self { inner })
}
pub fn valid_bits(&self) -> usize {
Frame::<T, A>::PT_VALID_BITS
}
}
impl<T: TableMeta, A: FrameAllocator> Drop for PageTable<T, A> {
fn drop(&mut self) {
unsafe {
self.deallocate();
}
}
}
impl<T: TableMeta, A: FrameAllocator> Deref for PageTable<T, A> {
type Target = PageTableRef<T, A>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<T: TableMeta, A: FrameAllocator> DerefMut for PageTable<T, A> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
#[derive(Clone, Copy)]
pub struct PageTableRef<T: TableMeta, A: FrameAllocator> {
pub root: Frame<T, A>,
}
impl<T: TableMeta, A: FrameAllocator> core::fmt::Debug for PageTableRef<T, A>
where
T::P: core::fmt::Debug,
{
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("PageTable")
.field("root_paddr", &format_args!("{:#x}", self.root.paddr.raw()))
.field("table_levels", &T::LEVEL_BITS.len())
.field("max_block_level", &T::MAX_BLOCK_LEVEL)
.field("page_size", &format_args!("{:#x}", T::PAGE_SIZE))
.finish()
}
}
impl<T: TableMeta, A: FrameAllocator> PageTableRef<T, A> {
pub unsafe fn new(allocator: A) -> PagingResult<Self> {
let root = Frame::new(allocator)?;
Ok(Self { root })
}
pub fn from_paddr(paddr: PhysAddr, allocator: A) -> Self {
let root = Frame::from_paddr(paddr, allocator);
Self { root }
}
pub fn map(&mut self, config: &MapConfig) -> PagingResult {
self.validate_map_config(config)?;
if config.vaddr.raw().checked_add(config.size).is_none()
|| config.paddr.raw().checked_add(config.size).is_none()
{
return Err(PagingError::address_overflow(
"Virtual or physical address overflow",
));
}
self.root.map_range_recursive(MapRecursiveConfig {
start_vaddr: config.vaddr,
start_paddr: config.paddr,
end_vaddr: config.vaddr + config.size,
level: Frame::<T, A>::PT_LEVEL,
allow_huge: config.allow_huge,
flush: config.flush,
pte_template: config.pte,
})?;
Ok(())
}
pub fn unmap(&mut self, start_vaddr: VirtAddr, size: usize) -> PagingResult<()> {
self.validate_unmap_params(start_vaddr, size)?;
let end_vaddr: VirtAddr = match start_vaddr.raw().checked_add(size) {
Some(end) => VirtAddr::new(end),
None => {
return Err(PagingError::address_overflow(
"Virtual address overflow in unmap",
));
}
};
self.root.unmap_range_recursive(UnmapRecursiveConfig {
start_vaddr,
end_vaddr,
level: Frame::<T, A>::PT_LEVEL,
flush: true, })?;
Ok(())
}
pub fn unmap_with_config(&mut self, config: &UnmapConfig) -> PagingResult<()> {
self.validate_unmap_params(config.start_vaddr, config.size)?;
let end_vaddr = match config.start_vaddr.raw().checked_add(config.size) {
Some(end) => VirtAddr::new(end),
None => {
return Err(PagingError::address_overflow(
"Virtual address overflow in unmap_with_config",
));
}
};
self.root.unmap_range_recursive(UnmapRecursiveConfig {
start_vaddr: config.start_vaddr,
end_vaddr,
level: Frame::<T, A>::PT_LEVEL,
flush: config.flush,
})?;
Ok(())
}
fn validate_unmap_params(&self, start_vaddr: VirtAddr, size: usize) -> PagingResult<()> {
if size == 0 {
return Err(PagingError::invalid_size("Size cannot be zero in unmap"));
}
if !start_vaddr.raw().is_multiple_of(T::PAGE_SIZE) {
return Err(PagingError::alignment_error(
"Start virtual address not page aligned in unmap",
));
}
if !size.is_multiple_of(T::PAGE_SIZE) {
return Err(PagingError::alignment_error(
"Size not page aligned in unmap",
));
}
Ok(())
}
pub fn walk_all(&self, config: WalkConfig) -> PageTableWalker<'_, T, A> {
PageTableWalker::new(self, config)
}
pub fn walk(
&self,
start_vaddr: VirtAddr,
end_vaddr: VirtAddr,
) -> impl Iterator<Item = crate::walk::PteInfo<T::P>> + '_ {
let config = WalkConfig {
start_vaddr,
end_vaddr,
};
PageTableWalker::new(self, config).filter(|p| p.pte.to_config(false).valid)
}
pub fn walk_valid(&self) -> impl Iterator<Item = crate::walk::PteInfo<T::P>> + '_ {
self.walk(0.into(), usize::MAX.into()).filter(|p| {
let config = p.pte.to_config(false);
config.valid && p.is_final_mapping
})
}
fn validate_map_config(&self, config: &MapConfig) -> PagingResult {
if config.size == 0 {
return Err(PagingError::invalid_size("Size cannot be zero"));
}
if !config.vaddr.raw().is_multiple_of(T::PAGE_SIZE) {
return Err(PagingError::alignment_error(
"Virtual address not page aligned",
));
}
if !config.paddr.raw().is_multiple_of(T::PAGE_SIZE) {
return Err(PagingError::alignment_error(
"Physical address not page aligned",
));
}
Ok(())
}
pub const fn page_size() -> usize {
T::PAGE_SIZE
}
pub const fn table_levels() -> usize {
T::LEVEL_BITS.len()
}
pub const fn valid_bits() -> usize {
Frame::<T, A>::PT_VALID_BITS
}
pub unsafe fn destroy(mut self) {
self.root.deallocate_recursive(Frame::<T, A>::PT_LEVEL);
}
pub unsafe fn deallocate(&mut self) {
self.root.deallocate_recursive(Frame::<T, A>::PT_LEVEL);
}
pub fn deallocate_range(&mut self, start_vaddr: VirtAddr, end_vaddr: VirtAddr) -> PagingResult {
if start_vaddr >= end_vaddr {
return Err(PagingError::invalid_range(
"Start address must be less than end address",
));
}
Ok(())
}
pub fn translate(&self, vaddr: VirtAddr) -> PagingResult<(PhysAddr, T::P)> {
let (pte, level) = self
.root
.translate_recursive_with_level(vaddr, Frame::<T, A>::PT_LEVEL)?;
let pte_config = pte.to_config(level > 1);
let (phys_addr, _) = if pte_config.huge {
let level_size = Frame::<T, A>::level_size(level);
let offset_in_page = vaddr.raw() % level_size;
(
PhysAddr::new(pte_config.paddr.raw() + offset_in_page),
level_size,
)
} else {
let offset_in_page = vaddr.raw() % T::PAGE_SIZE;
(
PhysAddr::new(pte_config.paddr.raw() + offset_in_page),
T::PAGE_SIZE,
)
};
Ok((phys_addr, pte))
}
pub fn translate_phys(&self, vaddr: VirtAddr) -> PagingResult<PhysAddr> {
let (p, _) = self.translate(vaddr)?;
Ok(p)
}
pub fn is_mapped(&self, vaddr: VirtAddr) -> bool {
self.translate(vaddr).is_ok()
}
pub fn root_paddr(&self) -> crate::PhysAddr {
self.root.paddr
}
}