use alloc::{boxed::Box, sync::Arc};
use axalloc::{UsageKind, global_allocator};
use axerrno::{AxError, AxResult};
use axhal::{
mem::{phys_to_virt, virt_to_phys},
paging::{MappingFlags, PageSize, PageTable, PageTableMut},
};
use axsync::Mutex;
use enum_dispatch::enum_dispatch;
use memory_addr::{DynPageIter, PAGE_SIZE_4K, PhysAddr, VirtAddr, VirtAddrRange};
use memory_set::MappingBackend;
#[cfg(feature = "fs")]
pub mod cow;
#[cfg(feature = "fs")]
pub mod file;
pub mod linear;
pub mod shared;
pub use shared::SharedPages;
use crate::AddrSpace;
fn divide_page(size: usize, page_size: PageSize) -> usize {
assert!(page_size.is_aligned(size), "unaligned");
size >> (page_size as usize).trailing_zeros()
}
fn alloc_frame(zeroed: bool, size: PageSize) -> AxResult<PhysAddr> {
let page_size = size as usize;
let num_pages = page_size / PAGE_SIZE_4K;
let vaddr =
VirtAddr::from(
global_allocator()
.alloc_pages(num_pages, page_size, UsageKind::VirtMem)
.map_err(|_| AxError::NoMemory)?,
);
if zeroed {
unsafe { core::ptr::write_bytes(vaddr.as_mut_ptr(), 0, page_size) };
}
let paddr = virt_to_phys(vaddr);
Ok(paddr)
}
fn dealloc_frame(frame: PhysAddr, align: PageSize) {
let vaddr = phys_to_virt(frame);
let page_size: usize = align.into();
let num_pages = page_size / PAGE_SIZE_4K;
global_allocator().dealloc_pages(vaddr.as_usize(), num_pages, UsageKind::VirtMem);
}
fn pages_in(range: VirtAddrRange, align: PageSize) -> AxResult<DynPageIter<VirtAddr>> {
DynPageIter::new(range.start, range.end, align as usize).ok_or(AxError::InvalidInput)
}
#[enum_dispatch]
pub trait BackendOps {
fn page_size(&self) -> PageSize;
fn map(&self, range: VirtAddrRange, flags: MappingFlags, pt: &mut PageTableMut) -> AxResult;
fn unmap(&self, range: VirtAddrRange, pt: &mut PageTableMut) -> AxResult;
fn on_protect(
&self,
_range: VirtAddrRange,
_new_flags: MappingFlags,
_pt: &mut PageTableMut,
) -> AxResult {
Ok(())
}
fn populate(
&self,
_range: VirtAddrRange,
_flags: MappingFlags,
_access_flags: MappingFlags,
_pt: &mut PageTableMut,
) -> AxResult<(usize, Option<Box<dyn FnOnce(&mut AddrSpace)>>)> {
Ok((0, None))
}
fn clone_map(
&self,
range: VirtAddrRange,
flags: MappingFlags,
old_pt: &mut PageTableMut,
new_pt: &mut PageTableMut,
new_aspace: &Arc<Mutex<AddrSpace>>,
) -> AxResult<Backend>;
}
#[derive(Clone)]
#[enum_dispatch(BackendOps)]
pub enum Backend {
Linear(linear::LinearBackend),
#[cfg(feature = "fs")]
Cow(cow::CowBackend),
Shared(shared::SharedBackend),
#[cfg(feature = "fs")]
File(file::FileBackend),
}
impl MappingBackend for Backend {
type Addr = VirtAddr;
type Flags = MappingFlags;
type PageTable = PageTable;
fn map(&self, start: VirtAddr, size: usize, flags: MappingFlags, pt: &mut PageTable) -> bool {
let range = VirtAddrRange::from_start_size(start, size);
if let Err(err) = BackendOps::map(self, range, flags, &mut pt.modify()) {
warn!("Failed to map area: {:?}", err);
false
} else {
true
}
}
fn unmap(&self, start: VirtAddr, size: usize, pt: &mut PageTable) -> bool {
let range = VirtAddrRange::from_start_size(start, size);
if let Err(err) = BackendOps::unmap(self, range, &mut pt.modify()) {
warn!("Failed to unmap area: {:?}", err);
false
} else {
true
}
}
fn protect(
&self,
start: Self::Addr,
size: usize,
new_flags: Self::Flags,
pt: &mut Self::PageTable,
) -> bool {
pt.modify().protect_region(start, size, new_flags).is_ok()
}
}