use alloc::sync::Arc;
use core::sync::atomic::{AtomicUsize, Ordering};
use crate::space::{
addr::Uaddr,
mm::{
MmError, MmStruct, MmapPage, Result, VmFlags,
pgtabledef::{
DEVICE_NONE, DEVICE_RDONLY, DEVICE_READWRITE, PAGE_EXECONLY, PAGE_NONE, PAGE_RDONLY,
PAGE_RDONLY_EXEC, PAGE_READWRITE, PAGE_READWRITE_EXEC, PAGE_SIZE, SECTION_DEVICE_NONE,
SECTION_DEVICE_RDONLY, SECTION_DEVICE_READWRITE, SECTION_PAGE_EXECONLY,
SECTION_PAGE_NONE, SECTION_PAGE_RDONLY, SECTION_PAGE_RDONLY_EXEC,
SECTION_PAGE_READWRITE, SECTION_PAGE_READWRITE_EXEC, SECTION_SIZE,
},
tlb::tlb_gather_mmu,
},
uaccess::USER_SPACE_SIZE,
};
const PROTECTION_MAP: [usize; 28] = [
PAGE_NONE, PAGE_RDONLY, PAGE_NONE, PAGE_READWRITE, PAGE_EXECONLY, PAGE_RDONLY_EXEC, PAGE_NONE, PAGE_READWRITE_EXEC, DEVICE_NONE, DEVICE_RDONLY, DEVICE_NONE, DEVICE_READWRITE, DEVICE_NONE, DEVICE_NONE, DEVICE_NONE, DEVICE_NONE, SECTION_DEVICE_NONE, SECTION_PAGE_RDONLY, SECTION_DEVICE_NONE, SECTION_PAGE_READWRITE, SECTION_PAGE_EXECONLY, SECTION_PAGE_RDONLY_EXEC, SECTION_PAGE_NONE, SECTION_PAGE_READWRITE_EXEC, SECTION_PAGE_NONE, SECTION_DEVICE_RDONLY, SECTION_DEVICE_NONE, SECTION_DEVICE_READWRITE, ];
const fn to_vm_prot(vm_flags: VmFlags) -> usize {
let idx = vm_flags.bits() & VmFlags::VM_PROT_MASK.bits();
PROTECTION_MAP[idx]
}
pub(crate) struct VmAreaStruct {
vm_start: Uaddr,
vm_size: usize,
vm_prot: usize,
vm_flags: VmFlags,
vm_page_size: usize,
vm_prev_end: AtomicUsize,
vm_prev_start: AtomicUsize,
vm_next_start: AtomicUsize,
mmap_page: Arc<MmapPage>,
mm: Arc<MmStruct>,
}
impl VmFlags {
#[inline(always)]
pub fn page_size(&self) -> usize {
if self.contains(VmFlags::VM_HUGE) { SECTION_SIZE } else { PAGE_SIZE }
}
}
impl VmAreaStruct {
pub(super) fn create(
vm_start: Uaddr,
vm_size: usize,
vm_flags: VmFlags,
mm: Arc<MmStruct>,
mmap_page: Arc<MmapPage>,
) -> Result<Self> {
let page_size = vm_flags.page_size();
if (vm_start.to_value() & (page_size - 1)) != 0 || (vm_size & (page_size - 1)) != 0 {
return Err(MmError::EAligned);
}
let vm_prot = to_vm_prot(vm_flags);
if vm_prot == PAGE_NONE
|| vm_prot == DEVICE_NONE
|| vm_prot == SECTION_DEVICE_NONE
|| vm_prot == SECTION_PAGE_NONE
{
return Err(MmError::EVmFlags);
}
if vm_size != mmap_page.get_size() {
return Err(MmError::ESpace);
}
Ok(Self {
vm_start,
vm_size,
vm_prot,
vm_flags,
vm_page_size: page_size,
vm_prev_end: AtomicUsize::new(0),
vm_prev_start: AtomicUsize::new(0),
vm_next_start: AtomicUsize::new(USER_SPACE_SIZE),
mmap_page,
mm,
})
}
#[inline(always)]
pub(crate) fn get_vm_start(&self) -> Uaddr {
self.vm_start
}
#[inline(always)]
pub(crate) fn get_vm_size(&self) -> usize {
self.vm_size
}
#[inline(always)]
pub(crate) fn get_vm_port(&self) -> usize {
self.vm_prot
}
#[inline(always)]
pub(crate) fn get_vm_flags(&self) -> VmFlags {
self.vm_flags
}
#[inline(always)]
pub(crate) fn get_vm_page_size(&self) -> usize {
self.vm_page_size
}
#[inline(always)]
pub(crate) fn get_prev_end(&self) -> usize {
self.vm_prev_end.load(Ordering::Relaxed)
}
#[inline(always)]
pub(crate) fn set_prev_end(&self, end: usize) {
self.vm_prev_end.store(end, Ordering::Relaxed);
}
#[inline(always)]
pub(crate) fn get_prev_start(&self) -> usize {
self.vm_prev_start.load(Ordering::Relaxed)
}
#[inline(always)]
pub(crate) fn set_prev_start(&self, start: usize) {
self.vm_prev_start.store(start, Ordering::Relaxed);
}
#[inline(always)]
pub fn get_next_start(&self) -> usize {
self.vm_next_start.load(Ordering::Relaxed)
}
#[inline(always)]
pub(crate) fn set_next_start(&self, start: usize) {
self.vm_next_start.store(start, Ordering::Relaxed);
}
#[inline(always)]
pub(crate) fn get_mmap_page(&self) -> &MmapPage {
&self.mmap_page
}
#[inline(always)]
pub(crate) fn get_mm(&self) -> &MmStruct {
&self.mm
}
}
impl Drop for VmAreaStruct {
fn drop(&mut self) {
let mm = self.get_mm();
let pgd = &mm.pgd;
let start = self.vm_start;
let end = self.vm_start + self.vm_size;
let mut tlb = tlb_gather_mmu(mm, start, end);
pgd.pgd_unmap(start, end, &mut tlb, self);
pgd.pgd_free_pgtables(start, end, &mut tlb, self);
tlb.tlb_finish_mmu(start, end);
}
}