use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use ax_memory_addr::{PAGE_SIZE_4K as PAGE_SIZE, PhysAddr, VirtAddr};
use ax_page_table_multiarch::PagingHandler;
use axaddrspace::{AxMmHal, HostPhysAddr, HostVirtAddr};
use lazy_static::lazy_static;
use spin::Mutex;
pub const BASE_PADDR: usize = 0x1000;
pub static NEXT_PADDR: AtomicUsize = AtomicUsize::new(BASE_PADDR);
pub const MEMORY_LEN: usize = 0x10000;
#[repr(align(4096))]
pub struct AlignedMemory([u8; MEMORY_LEN]);
impl Default for AlignedMemory {
fn default() -> Self {
Self([0; MEMORY_LEN])
}
}
lazy_static! {
pub static ref MEMORY: Mutex<AlignedMemory> = Mutex::new(AlignedMemory::default());
pub static ref TEST_MUTEX: Mutex<()> = Mutex::new(());
}
pub static ALLOC_COUNT: AtomicUsize = AtomicUsize::new(0);
pub static DEALLOC_COUNT: AtomicUsize = AtomicUsize::new(0);
pub static ALLOC_SHOULD_FAIL: AtomicBool = AtomicBool::new(false);
#[derive(Debug)]
pub struct MockHal {}
impl AxMmHal for MockHal {
fn alloc_frame() -> Option<HostPhysAddr> {
Self::mock_alloc_frame()
}
fn dealloc_frame(_paddr: HostPhysAddr) {
Self::mock_dealloc_frame(_paddr)
}
fn phys_to_virt(paddr: HostPhysAddr) -> HostVirtAddr {
Self::mock_phys_to_virt(paddr)
}
fn virt_to_phys(vaddr: HostVirtAddr) -> HostPhysAddr {
Self::mock_virt_to_phys(vaddr)
}
}
impl PagingHandler for MockHal {
fn alloc_frame() -> Option<PhysAddr> {
Self::mock_alloc_frame()
}
fn alloc_frames(count: usize, _align: usize) -> Option<PhysAddr> {
if count == 0 {
return Some(PhysAddr::from(0));
}
let first = Self::mock_alloc_frame()?;
for _ in 1..count {
if Self::mock_alloc_frame().is_none() {
return None;
}
}
Some(first)
}
fn dealloc_frame(_paddr: PhysAddr) {
Self::mock_dealloc_frame(_paddr)
}
fn dealloc_frames(paddr: PhysAddr, count: usize) {
for i in 0..count {
let offset = i * PAGE_SIZE;
Self::mock_dealloc_frame(PhysAddr::from(paddr.as_usize() + offset));
}
}
fn phys_to_virt(paddr: PhysAddr) -> VirtAddr {
Self::mock_phys_to_virt(paddr)
}
}
pub fn mock_hal_test<F, R>(test_fn: F) -> R
where
F: FnOnce() -> R,
{
let _guard = TEST_MUTEX.lock();
MockHal::reset_state();
test_fn()
}
pub fn test_dealloc_count(expected: usize) {
let actual_dealloc_count = DEALLOC_COUNT.load(Ordering::SeqCst);
assert_eq!(
actual_dealloc_count, expected,
"Expected {expected} deallocations, but found {actual_dealloc_count}"
);
}
impl MockHal {
pub fn mock_alloc_frame() -> Option<PhysAddr> {
if ALLOC_SHOULD_FAIL.load(Ordering::SeqCst) {
return None;
}
let paddr = NEXT_PADDR.fetch_add(PAGE_SIZE, Ordering::SeqCst);
if paddr >= MEMORY_LEN + BASE_PADDR {
return None;
}
ALLOC_COUNT.fetch_add(1, Ordering::SeqCst);
Some(PhysAddr::from_usize(paddr))
}
pub fn mock_dealloc_frame(_paddr: PhysAddr) {
DEALLOC_COUNT.fetch_add(1, Ordering::SeqCst);
}
pub fn mock_phys_to_virt(paddr: PhysAddr) -> VirtAddr {
let paddr_usize = paddr.as_usize();
assert!(
paddr_usize >= BASE_PADDR && paddr_usize < BASE_PADDR + MEMORY_LEN,
"Physical address {:#x} out of bounds",
paddr_usize
);
let offset = paddr_usize - BASE_PADDR;
VirtAddr::from_usize(MEMORY.lock().0.as_ptr() as usize + offset)
}
pub fn mock_virt_to_phys(vaddr: VirtAddr) -> PhysAddr {
let base_virt = MEMORY.lock().0.as_ptr() as usize;
let vaddr_usize = vaddr.as_usize();
assert!(
vaddr_usize >= base_virt && vaddr_usize < base_virt + MEMORY_LEN,
"Virtual address {:#x} out of bounds",
vaddr_usize
);
let offset = vaddr_usize - base_virt;
PhysAddr::from_usize(offset + BASE_PADDR)
}
pub fn set_alloc_fail(fail: bool) {
ALLOC_SHOULD_FAIL.store(fail, Ordering::SeqCst);
}
pub fn reset_state() {
NEXT_PADDR.store(BASE_PADDR, Ordering::SeqCst);
ALLOC_SHOULD_FAIL.store(false, Ordering::SeqCst);
ALLOC_COUNT.store(0, Ordering::SeqCst);
DEALLOC_COUNT.store(0, Ordering::SeqCst);
MEMORY.lock().0.fill(0); }
}