use crate::space::{
addr::{Paddr, Uaddr, Vaddr},
kalloc::{GfpFlags, alloc_page, free_page, kfree, kmalloc},
mm::{
MmError, MmStruct, Result, VmFlags,
pgtabledef::{
PAGE_PTRS, PAGE_SHIFT, PAGE_SIZE, PAGE_TYPE_VALID, PGD_MASK, PGD_PTRS, PGD_SHIFT,
PGD_SIZE, PGD_TYPE_TABLE, PGTABLE_ADDR_MASK, PMD_MASK, PMD_PTRS, PMD_SHIFT, PMD_SIZE,
PMD_TYPE_TABLE, PUD_MASK, PUD_PTRS, PUD_SHIFT, PUD_SIZE, PUD_TYPE_TABLE,
SECT_TYPE_VALID, SECTION_ADDR_MASK,
},
tlb::MmuGather,
vmarea::VmAreaStruct,
},
};
pub(super) struct Pgd(Vaddr);
#[derive(Clone, Copy)]
struct Pud(pub Vaddr);
#[derive(Clone, Copy)]
struct Pmd(pub Vaddr);
#[derive(Clone, Copy)]
struct Pte(Vaddr);
struct PgdEntry(Vaddr);
struct PudEntry(Vaddr);
struct PmdEntry(Vaddr);
struct PteEntry(Vaddr);
impl Drop for Pgd {
fn drop(&mut self) {
free_page(self.0);
}
}
impl Pgd {
#[allow(unused)]
#[inline(always)]
pub(super) fn to_phys(&self) -> Paddr {
self.0.to_phys()
}
pub(super) fn create() -> Result<Self> {
let pgd = alloc_page(GfpFlags::Clean).map_err(|_| MmError::ENomem)?;
Ok(Pgd(pgd))
}
#[inline(always)]
fn pgd_entries(&self, uaddr: Uaddr) -> PgdEntry {
let start = self.0.to_value();
let index = (uaddr.to_value() >> PGD_SHIFT) & (PGD_PTRS - 1);
let addr = start + index * size_of::<usize>();
PgdEntry(Vaddr::from(addr))
}
pub(super) fn pgd_unmap(
&self,
start: Uaddr,
end: Uaddr,
tlb: &mut MmuGather,
vma: &VmAreaStruct,
) {
tlb.tlb_start_vma(vma);
let mut this = start;
loop {
let pgd_entry = self.pgd_entries(this);
let next = entry_addr_end(this, end, PGD_SIZE, PGD_MASK);
if let Some(pud) = pgd_entry.get_pud() {
pud.pud_unmap(this, next, tlb, vma);
}
this = next;
if this >= end {
break;
}
}
tlb.tlb_end_vma(vma);
}
pub(super) fn pgd_free_pgtables(
&self,
start: Uaddr,
end: Uaddr,
tlb: &mut MmuGather,
vma: &VmAreaStruct,
) {
let floor = vma.get_prev_end();
let ceiling = vma.get_next_start();
let mut this = start;
loop {
let pgd_entry = self.pgd_entries(this);
let next = entry_addr_end(this, end, PGD_SIZE, PGD_MASK);
if let Some(pud) = pgd_entry.get_pud() {
pud.pud_free_pgtables(this, next, tlb, vma, floor, ceiling);
let this_f = this & PGD_MASK;
if this_f.to_value() >= floor {
let ceiling_c = ceiling & PGD_MASK;
if ceiling_c != 0 && end.to_value() - 1 < ceiling {
assert!(pgd_entry.clear(vma));
tlb.pud_free_tlb(this);
}
}
}
this = next;
if this >= end {
break;
}
}
debug_assert_eq!(floor, vma.get_prev_end());
debug_assert_eq!(ceiling, vma.get_next_start());
}
}
impl Pud {
#[inline(always)]
fn pud_entries(&self, uaddr: Uaddr) -> PudEntry {
let start = self.0.to_value();
let index = (uaddr.to_value() >> PUD_SHIFT) & (PUD_PTRS - 1);
let addr = start + index * size_of::<usize>();
PudEntry(Vaddr::from(addr))
}
fn pud_unmap(&self, start: Uaddr, end: Uaddr, tlb: &mut MmuGather, vma: &VmAreaStruct) {
let mut this = start;
loop {
let pud_entry = self.pud_entries(this);
let next = entry_addr_end(this, end, PUD_SIZE, PUD_MASK);
if let Some(pud) = pud_entry.get_pmd() {
pud.pmd_unmap(this, next, tlb, vma);
}
this = next;
if this >= end {
break;
}
}
}
fn pud_free_pgtables(
&self,
start: Uaddr,
end: Uaddr,
tlb: &mut MmuGather,
vma: &VmAreaStruct,
floor: usize,
ceiling: usize,
) {
let mut this = start;
loop {
let pud_entry = self.pud_entries(this);
let next = entry_addr_end(this, end, PUD_SIZE, PUD_MASK);
if let Some(pmd) = pud_entry.get_pmd() {
pmd.pmd_free_pgtables(this, next, tlb, vma, floor, ceiling);
let this_f = this & PUD_MASK;
if this_f.to_value() >= floor {
let ceiling_c = ceiling & PUD_MASK;
if ceiling_c != 0 && end.to_value() - 1 < ceiling {
assert!(pud_entry.clear(vma));
tlb.pmd_free_tlb(this);
}
}
}
this = next;
if this >= end {
break;
}
}
}
}
impl Pmd {
#[inline(always)]
fn pmd_entries(&self, uaddr: Uaddr) -> PmdEntry {
let start = self.0.to_value();
let index = (uaddr.to_value() >> PMD_SHIFT) & (PMD_PTRS - 1);
let addr = start + index * size_of::<usize>();
PmdEntry(Vaddr::from(addr))
}
fn pmd_unmap(&self, start: Uaddr, end: Uaddr, tlb: &mut MmuGather, vma: &VmAreaStruct) {
let mut this = start;
loop {
let pmd_entry = self.pmd_entries(this);
let next = entry_addr_end(this, end, PMD_SIZE, PMD_MASK);
if vma.get_vm_flags().contains(VmFlags::VM_HUGE) {
if pmd_entry.clear(vma) {
tlb.tlb_flush_pmd_range(this, PMD_SIZE);
}
} else if let Some(pte) = pmd_entry.get_pte() {
pte.pte_unmap(this, next, tlb, vma);
}
this = next;
if this >= end {
break;
}
}
}
fn pmd_free_pgtables(
&self,
start: Uaddr,
end: Uaddr,
tlb: &mut MmuGather,
vma: &VmAreaStruct,
floor: usize,
ceiling: usize,
) {
let mut this = start;
loop {
let pmd_entry = self.pmd_entries(this);
let next = entry_addr_end(this, end, PMD_SIZE, PMD_MASK);
if !vma.get_vm_flags().contains(VmFlags::VM_HUGE) {
let this_f = this & PMD_MASK;
if this_f.to_value() >= floor {
let ceiling_c = ceiling & PMD_MASK;
if ceiling_c != 0 && end.to_value() - 1 < ceiling && pmd_entry.clear(vma) {
tlb.pte_free_tlb(this);
}
}
}
this = next;
if this >= end {
break;
}
}
}
}
impl Pte {
#[inline(always)]
fn pte_entries(&self, uaddr: Uaddr) -> PteEntry {
let start = self.0.to_value();
let index = (uaddr.to_value() >> PAGE_SHIFT) & (PAGE_PTRS - 1);
let addr = start + index * size_of::<usize>();
PteEntry(Vaddr::from(addr))
}
fn pte_unmap(&self, start: Uaddr, end: Uaddr, tlb: &mut MmuGather, vma: &VmAreaStruct) {
let mut this = start;
loop {
let pte_entry = self.pte_entries(this);
if pte_entry.clear(vma) {
tlb.tlb_flush_pte_range(this, PAGE_SIZE);
}
this += PAGE_SIZE;
if this >= end {
break;
}
}
}
}
impl PgdEntry {
#[inline(always)]
fn set_pud(&self, pud: Pud) {
let vaddr = self.0.to_value() as *mut usize;
let port = PGD_TYPE_TABLE;
let paddr = pud.0.to_phys().to_value();
unsafe {
*vaddr = paddr | port;
}
}
fn get_pud_alloc(&self, vma: &VmAreaStruct) -> Result<Pud> {
let vaddr = self.0.to_value() as *mut usize;
let mut lock = vma.get_mm().entries_lock.lock();
let pud = unsafe {
if *vaddr == 0 {
let va = kmalloc(PAGE_SIZE, GfpFlags::Clean).map_err(|_| MmError::ENomem)?;
let pud = Pud(va);
self.set_pud(pud);
vma.get_mm().inc_nr_pgtalbes();
pud
} else {
let paddr = *vaddr & PGTABLE_ADDR_MASK;
let vaddr = Paddr::from(paddr).to_virt().unwrap();
Pud(vaddr)
}
};
*lock = 1;
Ok(pud)
}
fn get_pud(&self) -> Option<Pud> {
let vaddr = self.0.to_value() as *mut usize;
unsafe {
if *vaddr == 0 {
return None;
}
debug_assert_eq!(*vaddr & PGD_TYPE_TABLE, PGD_TYPE_TABLE);
let paddr = *vaddr & PGTABLE_ADDR_MASK;
let vaddr = Paddr::from(paddr).to_virt().unwrap();
Some(Pud(vaddr))
}
}
fn clear(&self, vma: &VmAreaStruct) -> bool {
let page = self.get_pud();
if let Some(pg) = page {
kfree(pg.0);
vma.get_mm().dec_nr_pgtalbes();
let vaddr = self.0.to_value() as *mut usize;
unsafe {
*vaddr = 0;
}
return true;
}
false
}
}
impl PudEntry {
#[inline(always)]
fn set_pmd(&self, pmd: Pmd) {
let vaddr = self.0.to_value() as *mut usize;
let port = PUD_TYPE_TABLE;
let paddr = pmd.0.to_phys().to_value();
unsafe {
*vaddr = paddr | port;
}
}
fn get_pmd_alloc(&self, vma: &VmAreaStruct) -> Result<Pmd> {
let vaddr = self.0.to_value() as *mut usize;
let mut lock = vma.get_mm().entries_lock.lock();
let pmd = unsafe {
if *vaddr == 0 {
let va = kmalloc(PAGE_SIZE, GfpFlags::Clean).map_err(|_| MmError::ENomem)?;
let pmd = Pmd(va);
self.set_pmd(pmd);
vma.get_mm().inc_nr_pgtalbes();
pmd
} else {
let paddr = *vaddr & PGTABLE_ADDR_MASK;
let vaddr = Paddr::from(paddr).to_virt().unwrap();
Pmd(vaddr)
}
};
*lock = 1;
Ok(pmd)
}
fn get_pmd(&self) -> Option<Pmd> {
let vaddr = self.0.to_value() as *mut usize;
unsafe {
if *vaddr == 0 {
return None;
}
debug_assert_eq!(*vaddr & PUD_TYPE_TABLE, PUD_TYPE_TABLE);
let paddr = *vaddr & PGTABLE_ADDR_MASK;
let vaddr = Paddr::from(paddr).to_virt().unwrap();
Some(Pmd(vaddr))
}
}
fn clear(&self, vma: &VmAreaStruct) -> bool {
let page = self.get_pmd();
if let Some(pg) = page {
kfree(pg.0);
vma.get_mm().dec_nr_pgtalbes();
let vaddr = self.0.to_value() as *mut usize;
unsafe {
*vaddr = 0;
}
return true;
}
false
}
}
impl PmdEntry {
#[inline(always)]
fn set_pte(&self, pte: Pte) {
let vaddr = self.0.to_value() as *mut usize;
let port = PMD_TYPE_TABLE;
let paddr = pte.0.to_phys().to_value();
unsafe {
*vaddr = paddr | port;
}
}
fn get_pte_alloc(&self, vma: &VmAreaStruct) -> Result<Pte> {
let vaddr = self.0.to_value() as *mut usize;
let mut lock = vma.get_mm().entries_lock.lock();
let pte = unsafe {
if *vaddr == 0 {
let va = kmalloc(PAGE_SIZE, GfpFlags::Clean).map_err(|_| MmError::ENomem)?;
let pte = Pte(va);
self.set_pte(pte);
vma.get_mm().inc_nr_pgtalbes();
pte
} else {
let paddr = *vaddr & PGTABLE_ADDR_MASK;
let vaddr = Paddr::from(paddr).to_virt().unwrap();
Pte(vaddr)
}
};
*lock = 1;
Ok(pte)
}
#[inline(always)]
fn set_huge(&self, vma: &VmAreaStruct, uaddr: Uaddr) -> Result<()> {
set_huge_or_page(self.0.to_value(), vma, uaddr)
}
fn get_pte(&self) -> Option<Pte> {
let vaddr = self.0.to_value() as *mut usize;
unsafe {
if *vaddr == 0 {
return None;
}
debug_assert_eq!(*vaddr & PMD_TYPE_TABLE, PMD_TYPE_TABLE);
let paddr = *vaddr & PGTABLE_ADDR_MASK;
let vaddr = Paddr::from(paddr).to_virt().unwrap();
Some(Pte(vaddr))
}
}
fn get_huge(&self) -> Option<Vaddr> {
let vaddr = self.0.to_value() as *mut usize;
unsafe {
if *vaddr == 0 {
return None;
}
debug_assert_eq!(*vaddr & SECT_TYPE_VALID, SECT_TYPE_VALID);
let paddr = *vaddr & SECTION_ADDR_MASK;
let vaddr = Paddr::from(paddr).to_virt().unwrap();
Some(vaddr)
}
}
fn clear(&self, vma: &VmAreaStruct) -> bool {
if vma.get_vm_flags().contains(VmFlags::VM_HUGE) {
let huge = self.get_huge();
if huge.is_none() {
return false;
}
} else if let Some(pte) = self.get_pte() {
kfree(pte.0);
} else {
return false;
}
vma.get_mm().dec_nr_pgtalbes();
let vaddr = self.0.to_value() as *mut usize;
unsafe {
*vaddr = 0;
}
true
}
}
impl PteEntry {
#[inline(always)]
fn set_page(&self, vma: &VmAreaStruct, uaddr: Uaddr) -> Result<()> {
set_huge_or_page(self.0.to_value(), vma, uaddr)
}
fn get_page(&self) -> Option<Vaddr> {
let vaddr = self.0.to_value() as *mut usize;
unsafe {
if *vaddr == 0 {
return None;
}
debug_assert_eq!(*vaddr & PAGE_TYPE_VALID, PAGE_TYPE_VALID);
let paddr = *vaddr & PGTABLE_ADDR_MASK;
let vaddr = Paddr::from(paddr).to_virt().unwrap();
Some(vaddr)
}
}
fn clear(&self, vma: &VmAreaStruct) -> bool {
let page = self.get_page();
if page.is_none() {
return false;
}
vma.get_mm().dec_nr_pgtalbes();
let vaddr = self.0.to_value() as *mut usize;
unsafe {
*vaddr = 0;
}
true
}
}
fn set_huge_or_page(vaddr: usize, vma: &VmAreaStruct, uaddr: Uaddr) -> Result<()> {
let index = (uaddr - vma.get_vm_start()) / vma.get_vm_page_size();
let page = vma.get_mmap_page().get_vaddr(index)?;
set_last(vaddr as *mut usize, vma, page, vma.get_vm_port());
Ok(())
}
#[inline(always)]
fn set_last(vaddr: *mut usize, vma: &VmAreaStruct, last: Vaddr, port: usize) {
vma.get_mm().inc_nr_pgtalbes();
unsafe {
debug_assert_eq!(*vaddr, 0);
*vaddr = last.to_phys().to_value() | port;
}
}
#[inline(always)]
fn entry_addr_end(addr: Uaddr, end: Uaddr, size: usize, mask: usize) -> Uaddr {
let bound = (addr + size).to_value() & mask;
if bound - 1 < end.to_value() - 1 { Uaddr::from(bound) } else { end }
}
impl MmStruct {
#[allow(unused)]
pub(crate) fn handle_mm_fault(&self, uaddr: Uaddr, vm_flags: VmFlags) -> Result<()> {
let vma = self.vma_find(uaddr)?;
if !vma.get_vm_flags().contains(vm_flags) {
return Err(MmError::EFault);
}
let pgd = &self.pgd;
let pgd_entry = pgd.pgd_entries(uaddr);
let pud = pgd_entry.get_pud_alloc(&vma)?;
let pud_entry = pud.pud_entries(uaddr);
let pmd = pud_entry.get_pmd_alloc(&vma)?;
let pmd_entry = pmd.pmd_entries(uaddr);
if vma.get_vm_flags().contains(VmFlags::VM_HUGE) {
let mut lock = vma.get_mm().entries_lock.lock();
let res = if pmd_entry.get_huge().is_none() {
pmd_entry.set_huge(&vma, uaddr)
} else {
Ok(())
};
*lock = 1;
return res;
}
let pte = pmd_entry.get_pte_alloc(&vma)?;
let pte_entry = pte.pte_entries(uaddr);
let mut lock = vma.get_mm().entries_lock.lock();
let res =
if pte_entry.get_page().is_none() { pte_entry.set_page(&vma, uaddr) } else { Ok(()) };
*lock = 1;
res
}
}