use crate::space::{
addr::Uaddr,
mm::{
MmStruct,
pgtabledef::{PAGE_SHIFT, PAGE_SIZE, PGD_SHIFT, PMD_SHIFT, PUD_SHIFT},
vmarea::VmAreaStruct,
},
uaccess::USER_SPACE_SIZE,
};
cfg_if::cfg_if! {
if #[cfg(aarch64_seminix)] {
mod aarch64;
use aarch64::arch_local_flush_tlb_all;
} else {
mod dummy;
use dummy::arch_local_flush_tlb_all;
}
}
#[allow(unused)]
#[inline(always)]
pub(crate) fn local_flush_tlb_all() {
arch_local_flush_tlb_all();
}
#[allow(clippy::struct_excessive_bools)]
pub(crate) struct MmuGather<'a> {
mm: &'a MmStruct,
start: Uaddr,
end: Uaddr,
fullmm: bool,
freed_tables: bool,
cleared_ptes: bool,
cleared_pmds: bool,
cleared_puds: bool,
cleared_pgds: bool,
}
impl MmuGather<'_> {
#[inline(always)]
fn tlb_flush_mmu(&mut self) {
self.tlb_flush_mmu_tlbonly();
}
#[inline(always)]
fn tlb_flush_mmu_tlbonly(&mut self) {
if self.end.to_value() == 0 {
return;
}
self.arch_tlb_flush();
self.tlb_reset_range();
}
#[allow(unused)]
#[inline(always)]
pub(crate) fn tlb_get_unmap_size(&self) -> usize {
let shift = if self.cleared_ptes {
PAGE_SHIFT
} else if self.cleared_pmds {
PMD_SHIFT
} else if self.cleared_puds {
PUD_SHIFT
} else if self.cleared_pgds {
PGD_SHIFT
} else {
PAGE_SHIFT
};
1 << shift
}
#[inline(always)]
fn tlb_reset_range(&mut self) {
if self.fullmm {
self.start = Uaddr::from(0);
self.end = Uaddr::from(USER_SPACE_SIZE);
} else {
self.start = Uaddr::from(USER_SPACE_SIZE);
self.end = Uaddr::from(0);
}
self.freed_tables = false;
self.cleared_ptes = false;
self.cleared_pmds = false;
self.cleared_puds = false;
self.cleared_pgds = false;
}
#[inline(always)]
fn __tlb_finish_mmu(&mut self, start: Uaddr, end: Uaddr, force: bool) {
if force {
self.tlb_reset_range();
self.tlb_adjust_range(start, end - start);
}
self.tlb_flush_mmu();
}
pub(crate) fn tlb_finish_mmu(&mut self, start: Uaddr, end: Uaddr) {
let force = self.mm.mm_tlb_flush_nested();
self.__tlb_finish_mmu(start, end, force);
self.mm.dec_tlb_flush_pending();
}
#[allow(clippy::unused_self)]
#[inline(always)]
pub(crate) fn tlb_start_vma(&self, _: &VmAreaStruct) {}
#[inline(always)]
pub(crate) fn tlb_end_vma(&mut self, _: &VmAreaStruct) {
if !self.fullmm {
self.tlb_flush_mmu_tlbonly();
}
}
#[inline(always)]
fn tlb_adjust_range(&mut self, addr: Uaddr, range_size: usize) {
self.start = self.start.min(addr);
self.end = self.end.max(addr + range_size);
}
#[inline(always)]
pub(crate) fn tlb_flush_pte_range(&mut self, addr: Uaddr, size: usize) {
self.tlb_adjust_range(addr, size);
self.cleared_ptes = true;
}
#[inline(always)]
pub(crate) fn tlb_flush_pmd_range(&mut self, addr: Uaddr, size: usize) {
self.tlb_adjust_range(addr, size);
self.cleared_pmds = true;
}
#[inline(always)]
fn tlb_flush_pud_range(&mut self, addr: Uaddr, size: usize) {
self.tlb_adjust_range(addr, size);
self.cleared_puds = true;
}
#[inline(always)]
fn tlb_flush_pgd_range(&mut self, addr: Uaddr, size: usize) {
self.tlb_adjust_range(addr, size);
self.cleared_pgds = true;
}
#[inline(always)]
pub(crate) fn pte_free_tlb(&mut self, addr: Uaddr) {
self.tlb_flush_pmd_range(addr, PAGE_SIZE);
self.freed_tables = true;
self.arch_pte_free_tlb(addr);
}
#[inline(always)]
pub(crate) fn pmd_free_tlb(&mut self, addr: Uaddr) {
self.tlb_flush_pud_range(addr, PAGE_SIZE);
self.freed_tables = true;
self.arch_pmd_free_tlb(addr);
}
#[inline(always)]
pub(crate) fn pud_free_tlb(&mut self, addr: Uaddr) {
self.tlb_flush_pgd_range(addr, PAGE_SIZE);
self.freed_tables = true;
self.arch_pud_free_tlb(addr);
}
}
pub(crate) fn tlb_gather_mmu(mm: &MmStruct, start: Uaddr, end: Uaddr) -> MmuGather<'_> {
let mut tlb = MmuGather {
mm,
start,
end,
fullmm: start.to_value() == 0 && end.to_value() == USER_SPACE_SIZE,
freed_tables: false,
cleared_ptes: false,
cleared_pmds: false,
cleared_puds: false,
cleared_pgds: false,
};
tlb.tlb_reset_range();
mm.inc_tlb_flush_pending();
tlb
}