use crate::vmem::{
BasicMapping, CowMapping, Mapping, MappingKind, TableMovabilityBase, TableOps, TableReadOps,
Void,
};
const PAGE_PRESENT: u64 = 1;
const PAGE_RW: u64 = 1 << 1;
const PAGE_NX: u64 = 1 << 63;
const PTE_ADDR_MASK: u64 = 0x000F_FFFF_FFFF_F000;
const PAGE_USER_ACCESS_DISABLED: u64 = 0 << 2; const PAGE_DIRTY_SET: u64 = 1 << 6; const PAGE_ACCESSED_SET: u64 = 1 << 5; const PAGE_CACHE_ENABLED: u64 = 0 << 4; const PAGE_WRITE_BACK: u64 = 0 << 3; const PAGE_PAT_WB: u64 = 0 << 7;
const PTE_AVL_MASK: u64 = 0x0000_0000_0000_0E00;
const PAGE_AVL_COW: u64 = 1 << 9;
#[inline(always)]
const fn page_rw_flag(writable: bool) -> u64 {
if writable { PAGE_RW } else { 0 }
}
#[inline(always)]
const fn page_nx_flag(executable: bool) -> u64 {
if executable { 0 } else { PAGE_NX }
}
#[inline(always)]
unsafe fn read_pte_if_present<Op: TableReadOps>(op: &Op, entry_ptr: Op::TableAddr) -> Option<u64> {
let pte = unsafe { op.read_entry(entry_ptr) };
if (pte & PAGE_PRESENT) != 0 {
Some(pte)
} else {
None
}
}
#[inline(always)]
fn bits<const HIGH_BIT: u8, const LOW_BIT: u8>(x: u64) -> u64 {
(x & ((1 << (HIGH_BIT + 1)) - 1)) >> LOW_BIT
}
#[allow(clippy::identity_op)]
#[allow(clippy::precedence)]
fn pte_for_table<Op: TableOps>(table_addr: Op::TableAddr) -> u64 {
Op::to_phys(table_addr) |
PAGE_ACCESSED_SET | PAGE_CACHE_ENABLED | PAGE_WRITE_BACK | PAGE_USER_ACCESS_DISABLED | PAGE_RW | PAGE_PRESENT }
pub trait TableMovability<Op: TableReadOps + ?Sized, TableMoveInfo> {
type RootUpdateParent: UpdateParent<Op, TableMoveInfo = TableMoveInfo>;
fn root_update_parent() -> Self::RootUpdateParent;
}
impl<Op: TableOps<TableMovability = crate::vmem::MayMoveTable>> TableMovability<Op, Op::TableAddr>
for crate::vmem::MayMoveTable
{
type RootUpdateParent = UpdateParentRoot;
fn root_update_parent() -> Self::RootUpdateParent {
UpdateParentRoot {}
}
}
impl<Op: TableReadOps> TableMovability<Op, Void> for crate::vmem::MayNotMoveTable {
type RootUpdateParent = UpdateParentNone;
fn root_update_parent() -> Self::RootUpdateParent {
UpdateParentNone {}
}
}
unsafe fn write_entry_updating<
Op: TableOps,
P: UpdateParent<
Op,
TableMoveInfo = <Op::TableMovability as TableMovabilityBase<Op>>::TableMoveInfo,
>,
>(
op: &Op,
parent: P,
addr: Op::TableAddr,
entry: u64,
) {
if let Some(again) = unsafe { op.write_entry(addr, entry) } {
parent.update_parent(op, again);
}
}
pub trait UpdateParent<Op: TableReadOps + ?Sized>: Copy {
type TableMoveInfo;
type ChildType: UpdateParent<Op, TableMoveInfo = Self::TableMoveInfo>;
fn update_parent(self, op: &Op, new_ptr: Self::TableMoveInfo);
fn for_child_at_entry(self, entry_ptr: Op::TableAddr) -> Self::ChildType;
}
pub struct UpdateParentTable<Op: TableOps, P: UpdateParent<Op>> {
parent: P,
entry_ptr: Op::TableAddr,
}
impl<Op: TableOps, P: UpdateParent<Op>> Clone for UpdateParentTable<Op, P> {
fn clone(&self) -> Self {
*self
}
}
impl<Op: TableOps, P: UpdateParent<Op>> Copy for UpdateParentTable<Op, P> {}
impl<Op: TableOps, P: UpdateParent<Op>> UpdateParentTable<Op, P> {
fn new(parent: P, entry_ptr: Op::TableAddr) -> Self {
UpdateParentTable { parent, entry_ptr }
}
}
impl<
Op: TableOps<TableMovability = crate::vmem::MayMoveTable>,
P: UpdateParent<Op, TableMoveInfo = Op::TableAddr>,
> UpdateParent<Op> for UpdateParentTable<Op, P>
{
type TableMoveInfo = Op::TableAddr;
type ChildType = UpdateParentTable<Op, Self>;
fn update_parent(self, op: &Op, new_ptr: Op::TableAddr) {
let pte = pte_for_table::<Op>(new_ptr);
unsafe {
write_entry_updating(op, self.parent, self.entry_ptr, pte);
}
}
fn for_child_at_entry(self, entry_ptr: Op::TableAddr) -> Self::ChildType {
Self::ChildType::new(self, entry_ptr)
}
}
#[derive(Copy, Clone)]
pub struct UpdateParentRoot {}
impl<Op: TableOps<TableMovability = crate::vmem::MayMoveTable>> UpdateParent<Op>
for UpdateParentRoot
{
type TableMoveInfo = Op::TableAddr;
type ChildType = UpdateParentTable<Op, Self>;
fn update_parent(self, op: &Op, new_ptr: Op::TableAddr) {
unsafe {
op.update_root(new_ptr);
}
}
fn for_child_at_entry(self, entry_ptr: Op::TableAddr) -> Self::ChildType {
Self::ChildType::new(self, entry_ptr)
}
}
#[derive(Copy, Clone)]
pub struct UpdateParentNone {}
impl<Op: TableReadOps> UpdateParent<Op> for UpdateParentNone {
type TableMoveInfo = Void;
type ChildType = Self;
fn update_parent(self, _op: &Op, impossible: Void) {
match impossible {}
}
fn for_child_at_entry(self, _entry_ptr: Op::TableAddr) -> Self {
self
}
}
struct MapRequest<Op: TableReadOps, P: UpdateParent<Op>> {
table_base: Op::TableAddr,
vmin: VirtAddr,
len: u64,
update_parent: P,
}
struct MapResponse<Op: TableReadOps, P: UpdateParent<Op>> {
entry_ptr: Op::TableAddr,
vmin: VirtAddr,
len: u64,
update_parent: P,
}
struct ModifyPteIterator<
const HIGH_BIT: u8,
const LOW_BIT: u8,
Op: TableReadOps,
P: UpdateParent<Op>,
> {
request: MapRequest<Op, P>,
n: u64,
}
impl<const HIGH_BIT: u8, const LOW_BIT: u8, Op: TableReadOps, P: UpdateParent<Op>> Iterator
for ModifyPteIterator<HIGH_BIT, LOW_BIT, Op, P>
{
type Item = MapResponse<Op, P>;
fn next(&mut self) -> Option<Self::Item> {
let lower_bits_mask = (1 << LOW_BIT) - 1;
let next_vmin = if self.n == 0 {
self.request.vmin
} else {
let aligned_min = self.request.vmin & !lower_bits_mask;
aligned_min.checked_add(self.n << LOW_BIT)?
};
if next_vmin >= self.request.vmin + self.request.len {
return None;
}
let entry_ptr = Op::entry_addr(
self.request.table_base,
bits::<HIGH_BIT, LOW_BIT>(next_vmin) << 3,
);
let len_from_here = self.request.len - (next_vmin - self.request.vmin);
let max_len = (1 << LOW_BIT) - (next_vmin & lower_bits_mask);
let next_len = core::cmp::min(len_from_here, max_len);
self.n += 1;
Some(MapResponse {
entry_ptr,
vmin: next_vmin,
len: next_len,
update_parent: self.request.update_parent,
})
}
}
fn modify_ptes<const HIGH_BIT: u8, const LOW_BIT: u8, Op: TableReadOps, P: UpdateParent<Op>>(
r: MapRequest<Op, P>,
) -> ModifyPteIterator<HIGH_BIT, LOW_BIT, Op, P> {
ModifyPteIterator { request: r, n: 0 }
}
unsafe fn alloc_pte_if_needed<
Op: TableOps,
P: UpdateParent<
Op,
TableMoveInfo = <Op::TableMovability as TableMovabilityBase<Op>>::TableMoveInfo,
>,
>(
op: &Op,
x: MapResponse<Op, P>,
) -> MapRequest<Op, P::ChildType>
where
P::ChildType: UpdateParent<Op>,
{
let new_update_parent = x.update_parent.for_child_at_entry(x.entry_ptr);
if let Some(pte) = unsafe { read_pte_if_present(op, x.entry_ptr) } {
return MapRequest {
table_base: Op::from_phys(pte & PTE_ADDR_MASK),
vmin: x.vmin,
len: x.len,
update_parent: new_update_parent,
};
}
let page_addr = unsafe { op.alloc_table() };
let pte = pte_for_table::<Op>(page_addr);
unsafe {
write_entry_updating(op, x.update_parent, x.entry_ptr, pte);
};
MapRequest {
table_base: page_addr,
vmin: x.vmin,
len: x.len,
update_parent: new_update_parent,
}
}
#[allow(clippy::identity_op)]
#[allow(clippy::precedence)]
unsafe fn map_page<
Op: TableOps,
P: UpdateParent<
Op,
TableMoveInfo = <Op::TableMovability as TableMovabilityBase<Op>>::TableMoveInfo,
>,
>(
op: &Op,
mapping: &Mapping,
r: MapResponse<Op, P>,
) {
let pte = match &mapping.kind {
MappingKind::Basic(bm) =>
{
(mapping.phys_base + (r.vmin - mapping.virt_base)) |
page_nx_flag(bm.executable) | PAGE_PAT_WB | PAGE_DIRTY_SET | PAGE_ACCESSED_SET | PAGE_CACHE_ENABLED | PAGE_WRITE_BACK | PAGE_USER_ACCESS_DISABLED | page_rw_flag(bm.writable) | PAGE_PRESENT }
MappingKind::Cow(cm) => {
(mapping.phys_base + (r.vmin - mapping.virt_base)) |
page_nx_flag(cm.executable) | PAGE_AVL_COW |
PAGE_PAT_WB | PAGE_DIRTY_SET | PAGE_ACCESSED_SET | PAGE_CACHE_ENABLED | PAGE_WRITE_BACK | PAGE_USER_ACCESS_DISABLED | 0 | PAGE_PRESENT }
};
unsafe {
write_entry_updating(op, r.update_parent, r.entry_ptr, pte);
}
}
#[allow(clippy::missing_safety_doc)]
pub unsafe fn map<Op: TableOps>(op: &Op, mapping: Mapping) {
modify_ptes::<47, 39, Op, _>(MapRequest {
table_base: op.root_table(),
vmin: mapping.virt_base,
len: mapping.len,
update_parent: Op::TableMovability::root_update_parent(),
})
.map(|r| unsafe { alloc_pte_if_needed(op, r) })
.flat_map(modify_ptes::<38, 30, Op, _>)
.map(|r| unsafe { alloc_pte_if_needed(op, r) })
.flat_map(modify_ptes::<29, 21, Op, _>)
.map(|r| unsafe { alloc_pte_if_needed(op, r) })
.flat_map(modify_ptes::<20, 12, Op, _>)
.map(|r| unsafe { map_page(op, &mapping, r) })
.for_each(drop);
}
unsafe fn require_pte_exist<Op: TableReadOps, P: UpdateParent<Op>>(
op: &Op,
x: MapResponse<Op, P>,
) -> Option<MapRequest<Op, P::ChildType>>
where
P::ChildType: UpdateParent<Op>,
{
unsafe { read_pte_if_present(op, x.entry_ptr) }.map(|pte| MapRequest {
table_base: Op::from_phys(pte & PTE_ADDR_MASK),
vmin: x.vmin,
len: x.len,
update_parent: x.update_parent.for_child_at_entry(x.entry_ptr),
})
}
#[allow(clippy::missing_safety_doc)]
pub unsafe fn virt_to_phys<'a, Op: TableReadOps + 'a>(
op: impl core::convert::AsRef<Op> + Copy + 'a,
address: u64,
len: u64,
) -> impl Iterator<Item = Mapping> + 'a {
let addr = address & ((1u64 << VA_BITS) - 1);
let vmin = addr & !(PAGE_SIZE as u64 - 1);
let vmax = core::cmp::min(addr + len, 1u64 << VA_BITS);
modify_ptes::<47, 39, Op, _>(MapRequest {
table_base: op.as_ref().root_table(),
vmin,
len: vmax - vmin,
update_parent: UpdateParentNone {},
})
.filter_map(move |r| unsafe { require_pte_exist(op.as_ref(), r) })
.flat_map(modify_ptes::<38, 30, Op, _>)
.filter_map(move |r| unsafe { require_pte_exist(op.as_ref(), r) })
.flat_map(modify_ptes::<29, 21, Op, _>)
.filter_map(move |r| unsafe { require_pte_exist(op.as_ref(), r) })
.flat_map(modify_ptes::<20, 12, Op, _>)
.filter_map(move |r| {
let pte = unsafe { read_pte_if_present(op.as_ref(), r.entry_ptr) }?;
let phys_addr = pte & PTE_ADDR_MASK;
let sgn_bit = r.vmin >> (VA_BITS - 1);
let sgn_bits = 0u64.wrapping_sub(sgn_bit) << VA_BITS;
let virt_addr = sgn_bits | r.vmin;
let executable = (pte & PAGE_NX) == 0;
let avl = pte & PTE_AVL_MASK;
let kind = if avl == PAGE_AVL_COW {
MappingKind::Cow(CowMapping {
readable: true,
executable,
})
} else {
MappingKind::Basic(BasicMapping {
readable: true,
writable: (pte & PAGE_RW) != 0,
executable,
})
};
Some(Mapping {
phys_base: phys_addr,
virt_base: virt_addr,
len: PAGE_SIZE as u64,
kind,
})
})
}
const VA_BITS: usize = 48;
pub const PAGE_SIZE: usize = 4096;
pub const PAGE_TABLE_SIZE: usize = 4096;
pub type PageTableEntry = u64;
pub type VirtAddr = u64;
pub type PhysAddr = u64;
#[cfg(test)]
mod tests {
use alloc::vec;
use alloc::vec::Vec;
use core::cell::RefCell;
use super::*;
use crate::vmem::{
BasicMapping, Mapping, MappingKind, MayNotMoveTable, PAGE_TABLE_ENTRIES_PER_TABLE,
TableOps, TableReadOps, Void,
};
struct MockTableOps {
tables: RefCell<Vec<[u64; PAGE_TABLE_ENTRIES_PER_TABLE]>>,
}
impl core::convert::AsRef<MockTableOps> for MockTableOps {
fn as_ref(&self) -> &Self {
self
}
}
impl MockTableOps {
fn new() -> Self {
Self {
tables: RefCell::new(vec![[0u64; PAGE_TABLE_ENTRIES_PER_TABLE]]),
}
}
fn table_count(&self) -> usize {
self.tables.borrow().len()
}
fn get_entry(&self, table_idx: usize, entry_idx: usize) -> u64 {
self.tables.borrow()[table_idx][entry_idx]
}
}
impl TableReadOps for MockTableOps {
type TableAddr = (usize, usize);
fn entry_addr(addr: Self::TableAddr, entry_offset: u64) -> Self::TableAddr {
let phys = Self::to_phys(addr) + entry_offset;
Self::from_phys(phys)
}
unsafe fn read_entry(&self, addr: Self::TableAddr) -> u64 {
self.tables.borrow()[addr.0][addr.1]
}
fn to_phys(addr: Self::TableAddr) -> PhysAddr {
(addr.0 as u64 * PAGE_TABLE_SIZE as u64) + (addr.1 as u64 * 8)
}
fn from_phys(addr: PhysAddr) -> Self::TableAddr {
let table_idx = (addr / PAGE_TABLE_SIZE as u64) as usize;
let entry_idx = ((addr % PAGE_TABLE_SIZE as u64) / 8) as usize;
(table_idx, entry_idx)
}
fn root_table(&self) -> Self::TableAddr {
(0, 0)
}
}
impl TableOps for MockTableOps {
type TableMovability = MayNotMoveTable;
unsafe fn alloc_table(&self) -> Self::TableAddr {
let mut tables = self.tables.borrow_mut();
let idx = tables.len();
tables.push([0u64; PAGE_TABLE_ENTRIES_PER_TABLE]);
(idx, 0)
}
unsafe fn write_entry(&self, addr: Self::TableAddr, entry: u64) -> Option<Void> {
self.tables.borrow_mut()[addr.0][addr.1] = entry;
None
}
unsafe fn update_root(&self, impossible: Void) {
match impossible {}
}
}
#[test]
fn test_bits_extracts_pml4_index() {
let addr: u64 = 0x0000_0080_0000_0000;
assert_eq!(bits::<47, 39>(addr), 1);
}
#[test]
fn test_bits_extracts_pdpt_index() {
let addr: u64 = 0x4000_0000;
assert_eq!(bits::<38, 30>(addr), 1);
}
#[test]
fn test_bits_extracts_pd_index() {
let addr: u64 = 0x0000_0000_0020_0000;
assert_eq!(bits::<29, 21>(addr), 1);
}
#[test]
fn test_bits_extracts_pt_index() {
let addr: u64 = 0x0000_0000_0000_1000;
assert_eq!(bits::<20, 12>(addr), 1);
}
#[test]
fn test_bits_max_index() {
let addr: u64 = 0x0000_FF80_0000_0000;
assert_eq!(bits::<47, 39>(addr), 511);
}
#[test]
fn test_page_rw_flag_writable() {
assert_eq!(page_rw_flag(true), PAGE_RW);
}
#[test]
fn test_page_rw_flag_readonly() {
assert_eq!(page_rw_flag(false), 0);
}
#[test]
fn test_page_nx_flag_executable() {
assert_eq!(page_nx_flag(true), 0); }
#[test]
fn test_page_nx_flag_not_executable() {
assert_eq!(page_nx_flag(false), PAGE_NX);
}
#[test]
fn test_map_single_page() {
let ops = MockTableOps::new();
let mapping = Mapping {
phys_base: 0x1000,
virt_base: 0x1000,
len: PAGE_SIZE as u64,
kind: MappingKind::Basic(BasicMapping {
readable: true,
writable: true,
executable: false,
}),
};
unsafe { map(&ops, mapping) };
assert_eq!(ops.table_count(), 4);
let pml4_entry = ops.get_entry(0, 0);
assert_ne!(pml4_entry & PAGE_PRESENT, 0, "PML4 entry should be present");
assert_ne!(pml4_entry & PAGE_RW, 0, "PML4 entry should be writable");
let pte = ops.get_entry(3, 1);
assert_ne!(pte & PAGE_PRESENT, 0, "PTE should be present");
assert_ne!(pte & PAGE_RW, 0, "PTE should be writable");
assert_ne!(pte & PAGE_NX, 0, "PTE should have NX set (not executable)");
assert_eq!(pte & PTE_ADDR_MASK, 0x1000, "PTE should map to phys 0x1000");
}
#[test]
fn test_map_executable_page() {
let ops = MockTableOps::new();
let mapping = Mapping {
phys_base: 0x2000,
virt_base: 0x2000,
len: PAGE_SIZE as u64,
kind: MappingKind::Basic(BasicMapping {
readable: true,
writable: false,
executable: true,
}),
};
unsafe { map(&ops, mapping) };
let pte = ops.get_entry(3, 2);
assert_ne!(pte & PAGE_PRESENT, 0, "PTE should be present");
assert_eq!(pte & PAGE_RW, 0, "PTE should be read-only");
assert_eq!(pte & PAGE_NX, 0, "PTE should NOT have NX set (executable)");
}
#[test]
fn test_map_multiple_pages() {
let ops = MockTableOps::new();
let mapping = Mapping {
phys_base: 0x10000,
virt_base: 0x10000,
len: 4 * PAGE_SIZE as u64, kind: MappingKind::Basic(BasicMapping {
readable: true,
writable: true,
executable: false,
}),
};
unsafe { map(&ops, mapping) };
for i in 0..4 {
let entry_idx = 16 + i; let pte = ops.get_entry(3, entry_idx);
assert_ne!(pte & PAGE_PRESENT, 0, "PTE {} should be present", i);
let expected_phys = 0x10000 + (i as u64 * PAGE_SIZE as u64);
assert_eq!(
pte & PTE_ADDR_MASK,
expected_phys,
"PTE {} should map to correct phys addr",
i
);
}
}
#[test]
fn test_map_reuses_existing_tables() {
let ops = MockTableOps::new();
let mapping1 = Mapping {
phys_base: 0x1000,
virt_base: 0x1000,
len: PAGE_SIZE as u64,
kind: MappingKind::Basic(BasicMapping {
readable: true,
writable: true,
executable: false,
}),
};
unsafe { map(&ops, mapping1) };
let tables_after_first = ops.table_count();
let mapping2 = Mapping {
phys_base: 0x5000,
virt_base: 0x5000,
len: PAGE_SIZE as u64,
kind: MappingKind::Basic(BasicMapping {
readable: true,
writable: true,
executable: false,
}),
};
unsafe { map(&ops, mapping2) };
assert_eq!(
ops.table_count(),
tables_after_first,
"Should reuse existing page tables"
);
}
#[test]
fn test_virt_to_phys_mapped_address() {
let ops = MockTableOps::new();
let mapping = Mapping {
phys_base: 0x1000,
virt_base: 0x1000,
len: PAGE_SIZE as u64,
kind: MappingKind::Basic(BasicMapping {
readable: true,
writable: true,
executable: false,
}),
};
unsafe { map(&ops, mapping) };
let result = unsafe { virt_to_phys(&ops, 0x1000, 1).next() };
assert!(result.is_some(), "Should find mapped address");
let mapping = result.unwrap();
assert_eq!(mapping.phys_base, 0x1000);
}
#[test]
fn test_virt_to_phys_unaligned_virt() {
let ops = MockTableOps::new();
let mapping = Mapping {
phys_base: 0x1000,
virt_base: 0x1000,
len: PAGE_SIZE as u64,
kind: MappingKind::Basic(BasicMapping {
readable: true,
writable: true,
executable: false,
}),
};
unsafe { map(&ops, mapping) };
let result = unsafe { virt_to_phys(&ops, 0x1234, 1).next() };
assert!(result.is_some(), "Should find mapped address");
let mapping = result.unwrap();
assert_eq!(mapping.phys_base, 0x1000);
}
#[test]
fn test_virt_to_phys_unaligned_virt_and_across_pages_len() {
let ops = MockTableOps::new();
let mapping = Mapping {
phys_base: 0x1000,
virt_base: 0x1000,
len: 2 * PAGE_SIZE as u64, kind: MappingKind::Basic(BasicMapping {
readable: true,
writable: true,
executable: false,
}),
};
unsafe { map(&ops, mapping) };
let mappings = unsafe { virt_to_phys(&ops, 0x1F00, 0x300).collect::<Vec<_>>() };
assert_eq!(mappings.len(), 2, "Should return 2 mappings for 2 pages");
assert_eq!(mappings[0].phys_base, 0x1000);
assert_eq!(mappings[1].phys_base, 0x2000);
}
#[test]
fn test_virt_to_phys_unaligned_virt_and_multiple_page_len() {
let ops = MockTableOps::new();
let mapping = Mapping {
phys_base: 0x1000,
virt_base: 0x1000,
len: PAGE_SIZE as u64 * 2 + 0x200, kind: MappingKind::Basic(BasicMapping {
readable: true,
writable: true,
executable: false,
}),
};
unsafe { map(&ops, mapping) };
let mappings =
unsafe { virt_to_phys(&ops, 0x1234, PAGE_SIZE as u64 * 2 + 0x10).collect::<Vec<_>>() };
assert_eq!(mappings.len(), 3, "Should return 3 mappings for 3 pages");
assert_eq!(mappings[0].phys_base, 0x1000);
assert_eq!(mappings[1].phys_base, 0x2000);
assert_eq!(mappings[2].phys_base, 0x3000);
}
#[test]
fn test_virt_to_phys_perms() {
let test = |kind| {
let ops = MockTableOps::new();
let mapping = Mapping {
phys_base: 0x1000,
virt_base: 0x1000,
len: PAGE_SIZE as u64,
kind,
};
unsafe { map(&ops, mapping) };
let result = unsafe { virt_to_phys(&ops, 0x1000, 1).next() };
let mapping = result.unwrap();
assert_eq!(mapping.kind, kind);
};
test(MappingKind::Basic(BasicMapping {
readable: true,
writable: false,
executable: false,
}));
test(MappingKind::Basic(BasicMapping {
readable: true,
writable: false,
executable: true,
}));
test(MappingKind::Basic(BasicMapping {
readable: true,
writable: true,
executable: false,
}));
test(MappingKind::Basic(BasicMapping {
readable: true,
writable: true,
executable: true,
}));
test(MappingKind::Cow(CowMapping {
readable: true,
executable: false,
}));
test(MappingKind::Cow(CowMapping {
readable: true,
executable: true,
}));
}
#[test]
fn test_virt_to_phys_unmapped_address() {
let ops = MockTableOps::new();
let result = unsafe { virt_to_phys(&ops, 0x1000, 1).next() };
assert!(result.is_none(), "Should return None for unmapped address");
}
#[test]
fn test_virt_to_phys_partially_mapped() {
let ops = MockTableOps::new();
let mapping = Mapping {
phys_base: 0x1000,
virt_base: 0x1000,
len: PAGE_SIZE as u64,
kind: MappingKind::Basic(BasicMapping {
readable: true,
writable: true,
executable: false,
}),
};
unsafe { map(&ops, mapping) };
let result = unsafe { virt_to_phys(&ops, 0x5000, 1).next() };
assert!(
result.is_none(),
"Should return None for unmapped address in same PT"
);
}
#[test]
fn test_modify_pte_iterator_single_page() {
let ops = MockTableOps::new();
let request = MapRequest {
table_base: ops.root_table(),
vmin: 0x1000,
len: PAGE_SIZE as u64,
update_parent: UpdateParentNone {},
};
let responses: Vec<_> = modify_ptes::<20, 12, MockTableOps, _>(request).collect();
assert_eq!(responses.len(), 1, "Single page should yield one response");
assert_eq!(responses[0].vmin, 0x1000);
assert_eq!(responses[0].len, PAGE_SIZE as u64);
}
#[test]
fn test_modify_pte_iterator_multiple_pages() {
let ops = MockTableOps::new();
let request = MapRequest {
table_base: ops.root_table(),
vmin: 0x1000,
len: 3 * PAGE_SIZE as u64,
update_parent: UpdateParentNone {},
};
let responses: Vec<_> = modify_ptes::<20, 12, MockTableOps, _>(request).collect();
assert_eq!(responses.len(), 3, "3 pages should yield 3 responses");
}
#[test]
fn test_modify_pte_iterator_zero_length() {
let ops = MockTableOps::new();
let request = MapRequest {
table_base: ops.root_table(),
vmin: 0x1000,
len: 0,
update_parent: UpdateParentNone {},
};
let responses: Vec<_> = modify_ptes::<20, 12, MockTableOps, _>(request).collect();
assert_eq!(responses.len(), 0, "Zero length should yield no responses");
}
#[test]
fn test_modify_pte_iterator_unaligned_start() {
let ops = MockTableOps::new();
let request = MapRequest {
table_base: ops.root_table(),
vmin: 0x1800,
len: 0x1000,
update_parent: UpdateParentNone {},
};
let responses: Vec<_> = modify_ptes::<20, 12, MockTableOps, _>(request).collect();
assert_eq!(
responses.len(),
2,
"Unaligned mapping spanning 2 pages should yield 2 responses"
);
assert_eq!(responses[0].vmin, 0x1800);
assert_eq!(responses[0].len, 0x800); assert_eq!(responses[1].vmin, 0x2000);
assert_eq!(responses[1].len, 0x800); }
#[test]
fn test_entry_addr_from_table_base() {
let result = MockTableOps::entry_addr((2, 0), 40);
assert_eq!(result, (2, 5), "Should return (table 2, entry 5)");
}
#[test]
fn test_entry_addr_with_nonzero_base_entry() {
let result = MockTableOps::entry_addr((1, 10), 16);
assert_eq!(result, (1, 12), "Should add offset to base entry");
}
#[test]
fn test_to_phys_from_phys_roundtrip() {
let addr = (3, 42);
let phys = MockTableOps::to_phys(addr);
let back = MockTableOps::from_phys(phys);
assert_eq!(back, addr, "to_phys/from_phys should roundtrip");
}
}