#![no_std]
#[macro_use]
extern crate log;
extern crate alloc;
mod aspace;
mod backend;
use axerrno::{AxError, AxResult};
use axhal::{
mem::{MemRegionFlags, phys_to_virt},
paging::MappingFlags,
};
use kspin::SpinNoIrq;
use lazyinit::LazyInit;
use memory_addr::{MemoryAddr, PhysAddr, VirtAddr};
pub use self::{aspace::AddrSpace, backend::Backend};
static KERNEL_ASPACE: LazyInit<SpinNoIrq<AddrSpace>> = LazyInit::new();
fn reg_flag_to_map_flag(f: MemRegionFlags) -> MappingFlags {
let mut ret = MappingFlags::empty();
if f.contains(MemRegionFlags::READ) {
ret |= MappingFlags::READ;
}
if f.contains(MemRegionFlags::WRITE) {
ret |= MappingFlags::WRITE;
}
if f.contains(MemRegionFlags::EXECUTE) {
ret |= MappingFlags::EXECUTE;
}
if f.contains(MemRegionFlags::DEVICE) {
ret |= MappingFlags::DEVICE;
}
if f.contains(MemRegionFlags::UNCACHED) {
ret |= MappingFlags::UNCACHED;
}
ret
}
#[cfg(feature = "copy")]
pub fn new_user_aspace(base: VirtAddr, size: usize) -> AxResult<AddrSpace> {
let mut aspace = AddrSpace::new_empty(base, size)?;
if !cfg!(target_arch = "aarch64") && !cfg!(target_arch = "loongarch64") {
aspace.copy_mappings_from(&kernel_aspace().lock())?;
}
Ok(aspace)
}
pub fn new_kernel_aspace() -> AxResult<AddrSpace> {
let (base, size) = axhal::mem::kernel_aspace();
let mut aspace = AddrSpace::new_empty(base, size)?;
for r in axhal::mem::memory_regions() {
let start = r.paddr.align_down_4k();
let end = (r.paddr + r.size).align_up_4k();
aspace.map_linear(
phys_to_virt(start),
start,
end - start,
reg_flag_to_map_flag(r.flags),
)?;
}
Ok(aspace)
}
pub fn kernel_aspace() -> &'static SpinNoIrq<AddrSpace> {
&KERNEL_ASPACE
}
pub fn kernel_page_table_root() -> PhysAddr {
KERNEL_ASPACE.lock().page_table_root()
}
pub fn init_memory_management() {
info!("Initialize virtual memory management...");
let kernel_aspace = new_kernel_aspace().expect("failed to initialize kernel address space");
debug!("kernel address space init OK: {kernel_aspace:#x?}");
KERNEL_ASPACE.init_once(SpinNoIrq::new(kernel_aspace));
unsafe {
axhal::asm::write_kernel_page_table(kernel_page_table_root());
axhal::asm::flush_tlb(None);
}
}
pub fn init_memory_management_secondary() {
unsafe {
axhal::asm::write_kernel_page_table(kernel_page_table_root());
axhal::asm::flush_tlb(None);
}
}
pub fn iomap(addr: PhysAddr, size: usize) -> AxResult<VirtAddr> {
let virt = phys_to_virt(addr);
let virt_aligned = virt.align_down_4k();
let addr_aligned = addr.align_down_4k();
let size_aligned = (addr + size).align_up_4k() - addr_aligned;
let flags = MappingFlags::DEVICE | MappingFlags::READ | MappingFlags::WRITE;
let mut tb = kernel_aspace().lock();
match tb.map_linear(virt_aligned, addr_aligned, size_aligned, flags) {
Err(AxError::AlreadyExists) => {}
Err(e) => {
return Err(e);
}
Ok(_) => {}
}
tb.protect(virt_aligned, size_aligned, flags)?;
Ok(virt)
}