mod test_utils;
use core::sync::atomic::Ordering;
use ax_memory_addr::PhysAddr;
use axaddrspace::{AddrSpace, GuestPhysAddr, MappingFlags};
use axin::axin;
use test_utils::{
ALLOC_COUNT, BASE_PADDR, DEALLOC_COUNT, MEMORY_LEN, MockHal, mock_hal_test, test_dealloc_count,
};
fn setup_test_addr_space() -> (AddrSpace<MockHal>, GuestPhysAddr, usize) {
const BASE: GuestPhysAddr = GuestPhysAddr::from_usize(0x10000);
const SIZE: usize = 0x10000;
let addr_space = AddrSpace::<MockHal>::new_empty(4, BASE, SIZE).unwrap();
(addr_space, BASE, SIZE)
}
#[test]
#[axin(decorator(mock_hal_test), on_exit(test_dealloc_count(1)))]
fn test_addrspace_creation() {
let (addr_space, base, size) = setup_test_addr_space();
assert_eq!(addr_space.base(), base);
assert_eq!(addr_space.size(), size);
assert_eq!(addr_space.end(), base + size);
assert_eq!(ALLOC_COUNT.load(Ordering::SeqCst), 1);
}
#[test]
#[axin(decorator(mock_hal_test))]
fn test_contains_range() {
let (addr_space, base, size) = setup_test_addr_space();
assert!(addr_space.contains_range(base, 0x1000));
assert!(addr_space.contains_range(base + 0x1000, 0x2000));
assert!(addr_space.contains_range(base, size));
assert!(!addr_space.contains_range(base - 0x1000, 0x1000));
assert!(!addr_space.contains_range(base + size, 0x1000));
assert!(!addr_space.contains_range(base, size + 0x1000));
assert!(!addr_space.contains_range(base + 0x3000, 0xf000));
}
#[test]
#[axin(decorator(mock_hal_test))]
fn test_map_linear() {
let (mut addr_space, _base, _size) = setup_test_addr_space();
let vaddr = GuestPhysAddr::from_usize(0x18000);
let paddr = PhysAddr::from_usize(0x10000);
let map_linear_size = 0x8000; let flags = MappingFlags::READ | MappingFlags::WRITE;
addr_space
.map_linear(vaddr, paddr, map_linear_size, flags)
.unwrap();
assert_eq!(addr_space.translate(vaddr).unwrap(), paddr);
assert_eq!(
addr_space.translate(vaddr + 0x1000).unwrap(),
paddr + 0x1000
);
}
#[test]
#[axin(decorator(mock_hal_test))]
fn test_map_alloc_populate() {
let (mut addr_space, _base, _size) = setup_test_addr_space();
let vaddr = GuestPhysAddr::from_usize(0x10000);
let map_alloc_size = 0x2000; let flags = MappingFlags::READ | MappingFlags::WRITE;
let initial_allocs = ALLOC_COUNT.load(Ordering::SeqCst);
assert_eq!(initial_allocs, 1);
addr_space
.map_alloc(vaddr, map_alloc_size, flags, true)
.unwrap();
let final_allocs = ALLOC_COUNT.load(Ordering::SeqCst);
assert!(final_allocs > initial_allocs);
let paddr1 = addr_space.translate(vaddr).unwrap();
let paddr2 = addr_space.translate(vaddr + 0x1000).unwrap();
assert!(paddr1.as_usize() >= BASE_PADDR && paddr1.as_usize() < BASE_PADDR + MEMORY_LEN);
assert!(paddr2.as_usize() >= BASE_PADDR && paddr2.as_usize() < BASE_PADDR + MEMORY_LEN);
assert_ne!(paddr1, paddr2);
}
#[test]
#[axin(decorator(mock_hal_test))]
fn test_map_alloc_lazy() {
let (mut addr_space, _base, _size) = setup_test_addr_space();
let vaddr = GuestPhysAddr::from_usize(0x13000);
let map_alloc_size = 0x1000;
let flags = MappingFlags::READ | MappingFlags::WRITE;
let initial_allocs = ALLOC_COUNT.load(Ordering::SeqCst);
addr_space
.map_alloc(vaddr, map_alloc_size, flags, false)
.unwrap();
let after_map_allocs = ALLOC_COUNT.load(Ordering::SeqCst);
assert!(after_map_allocs >= initial_allocs); assert!(addr_space.translate(vaddr).is_none());
}
#[test]
#[axin(decorator(mock_hal_test))]
fn test_page_fault_handling() {
let (mut addr_space, _base, _size) = setup_test_addr_space();
let vaddr = GuestPhysAddr::from_usize(0x14000);
let map_alloc_size = 0x1000;
let flags = MappingFlags::READ | MappingFlags::WRITE;
addr_space
.map_alloc(vaddr, map_alloc_size, flags, false)
.unwrap();
let before_pf_allocs = ALLOC_COUNT.load(Ordering::SeqCst);
let handled = addr_space.handle_page_fault(vaddr, MappingFlags::READ);
assert!(handled);
let after_pf_allocs = ALLOC_COUNT.load(Ordering::SeqCst);
assert!(after_pf_allocs > before_pf_allocs);
let paddr = addr_space.translate(vaddr);
assert!(paddr.is_some());
}
#[test]
#[axin(decorator(mock_hal_test))]
fn test_unmap() {
let (mut addr_space, _base, _size) = setup_test_addr_space();
let vaddr = GuestPhysAddr::from_usize(0x15000);
let map_alloc_size = 0x2000;
let flags = MappingFlags::READ | MappingFlags::WRITE;
addr_space
.map_alloc(vaddr, map_alloc_size, flags, true)
.unwrap();
assert!(addr_space.translate(vaddr).is_some());
assert!(addr_space.translate(vaddr + 0x1000).is_some());
let before_unmap_deallocs = DEALLOC_COUNT.load(Ordering::SeqCst);
addr_space.unmap(vaddr, map_alloc_size).unwrap();
assert!(addr_space.translate(vaddr).is_none());
assert!(addr_space.translate(vaddr + 0x1000).is_none());
let after_unmap_deallocs = DEALLOC_COUNT.load(Ordering::SeqCst);
assert!(after_unmap_deallocs > before_unmap_deallocs);
}
#[test]
#[axin(decorator(mock_hal_test))]
fn test_clear() {
let (mut addr_space, _base, _size) = setup_test_addr_space();
let vaddr1 = GuestPhysAddr::from_usize(0x16000);
let vaddr2 = GuestPhysAddr::from_usize(0x17000);
let flags = MappingFlags::READ | MappingFlags::WRITE;
let map_alloc_size = 0x1000;
addr_space
.map_alloc(vaddr1, map_alloc_size, flags, true)
.unwrap();
addr_space
.map_alloc(vaddr2, map_alloc_size, flags, true)
.unwrap();
assert!(addr_space.translate(vaddr1).is_some());
assert!(addr_space.translate(vaddr2).is_some());
let before_clear_deallocs = DEALLOC_COUNT.load(Ordering::SeqCst);
addr_space.clear();
assert!(addr_space.translate(vaddr1).is_none());
assert!(addr_space.translate(vaddr2).is_none());
let after_clear_deallocs = DEALLOC_COUNT.load(Ordering::SeqCst);
assert!(after_clear_deallocs > before_clear_deallocs);
}
#[test]
#[axin(decorator(mock_hal_test))]
fn test_translate() {
let (mut addr_space, _base, _size) = setup_test_addr_space();
let vaddr = GuestPhysAddr::from_usize(0x18000);
let map_alloc_size = 0x1000;
let flags = MappingFlags::READ | MappingFlags::WRITE;
addr_space
.map_alloc(vaddr, map_alloc_size, flags, true)
.unwrap();
let paddr = addr_space.translate(vaddr).expect("Translation failed");
assert!(paddr.as_usize() >= BASE_PADDR);
assert!(paddr.as_usize() < BASE_PADDR + MEMORY_LEN);
let unmapped_vaddr = GuestPhysAddr::from_usize(0x19000);
assert!(addr_space.translate(unmapped_vaddr).is_none());
let out_of_range = GuestPhysAddr::from_usize(0x30000);
assert!(addr_space.translate(out_of_range).is_none());
}
#[test]
#[axin(decorator(mock_hal_test))]
fn test_translated_byte_buffer() {
let (mut addr_space, _base, _size) = setup_test_addr_space();
let vaddr = GuestPhysAddr::from_usize(0x19000);
let map_alloc_size = 0x2000; let flags = MappingFlags::READ | MappingFlags::WRITE;
let buffer_size = 0x1100;
addr_space
.map_alloc(vaddr, map_alloc_size, flags, true)
.unwrap();
let mut buffer = addr_space
.translated_byte_buffer(vaddr, buffer_size)
.expect("Failed to get byte buffer");
for buffer_segment in buffer.iter_mut() {
for (i, byte) in buffer_segment.iter_mut().enumerate() {
*byte = (i % 0x100) as u8;
}
}
for buffer_segment in buffer.iter_mut() {
for (i, byte) in buffer_segment.iter_mut().enumerate() {
assert_eq!(*byte, (i % 0x100) as u8);
}
}
assert!(
addr_space
.translated_byte_buffer(vaddr, map_alloc_size + 0x1000)
.is_none()
);
let unmapped_vaddr = GuestPhysAddr::from_usize(0x1D000);
assert!(
addr_space
.translated_byte_buffer(unmapped_vaddr, 0x100)
.is_none()
);
}
#[test]
#[axin(decorator(mock_hal_test))]
fn test_translate_and_get_limit() {
let (mut addr_space, _base, _size) = setup_test_addr_space();
let vaddr = GuestPhysAddr::from_usize(0x1A000);
let map_alloc_size = 0x3000; let flags = MappingFlags::READ | MappingFlags::WRITE;
addr_space
.map_alloc(vaddr, map_alloc_size, flags, true)
.unwrap();
let (paddr, area_size) = addr_space.translate_and_get_limit(vaddr).unwrap();
assert!(paddr.as_usize() >= BASE_PADDR && paddr.as_usize() < BASE_PADDR + MEMORY_LEN);
assert_eq!(area_size, map_alloc_size);
let unmapped_vaddr = GuestPhysAddr::from_usize(0x1E000);
assert!(addr_space.translate_and_get_limit(unmapped_vaddr).is_none());
let out_of_range = GuestPhysAddr::from_usize(0x30000);
assert!(addr_space.translate_and_get_limit(out_of_range).is_none());
}