use alloc::{collections::btree_map::BTreeMap, sync::Arc, vec::Vec};
use core::{
result,
sync::atomic::{AtomicUsize, Ordering},
};
use crate::{
space::{
addr::{Uaddr, Vaddr},
kalloc::{GfpFlags, kfree, kmalloc},
mm::{
context::{MmContext, context_init, context_init_cpu},
entries::Pgd,
vmarea::VmAreaStruct,
},
},
sync::spinlock::Spinlock,
};
mod context;
mod entries;
pub mod pgtabledef;
mod tlb;
mod vmarea;
pub type Result<T> = result::Result<T, MmError>;
#[derive(Clone, Copy, Debug)]
pub enum MmError {
ENomem,
EAligned,
EVmaExist,
EVmaNoExist,
EFault,
EVmFlags,
ESpace,
}
bitflags::bitflags! {
#[derive(Clone, Copy, Default)]
pub struct VmFlags: usize {
const VM_NONE = 0b0000_0000;
const VM_READ = 0b0000_0001;
const VM_WRITE = 0b0000_0010;
const VM_EXEC = 0b0000_0100;
const VM_IO = 0b0000_1000;
const VM_HUGE = 0b0001_0000;
const VM_PROT_MASK = 0b0001_1111;
}
}
pub struct MmStruct {
pgd: Pgd,
entries_lock: Spinlock<usize>,
nr_pgtalbes: AtomicUsize,
tlb_flush_pending: AtomicUsize,
vm_area_root: Spinlock<BTreeMap<Uaddr, Arc<VmAreaStruct>>>,
#[allow(unused)]
context: MmContext,
}
impl MmStruct {
pub fn create() -> Result<Self> {
Ok(Self {
pgd: Pgd::create()?,
entries_lock: Spinlock::new(0),
nr_pgtalbes: AtomicUsize::new(0),
tlb_flush_pending: AtomicUsize::new(0),
vm_area_root: Spinlock::new(BTreeMap::new()),
context: MmContext::new(),
})
}
#[inline(always)]
fn inc_nr_pgtalbes(&self) {
self.nr_pgtalbes.fetch_add(1, Ordering::Relaxed);
}
#[inline(always)]
fn dec_nr_pgtalbes(&self) {
self.nr_pgtalbes.fetch_sub(1, Ordering::Relaxed);
}
#[inline(always)]
fn inc_tlb_flush_pending(&self) {
self.tlb_flush_pending.fetch_add(1, Ordering::Relaxed);
}
#[inline(always)]
fn dec_tlb_flush_pending(&self) {
self.tlb_flush_pending.fetch_sub(1, Ordering::Relaxed);
}
#[inline(always)]
fn mm_tlb_flush_nested(&self) -> bool {
self.tlb_flush_pending.load(Ordering::Relaxed) > 1
}
fn vma_insert(&self, vma: Arc<VmAreaStruct>) -> Result<()> {
let start = vma.get_vm_start();
let end = start + vma.get_vm_size();
let mut root = self.vm_area_root.lock_irq_save();
let mut prev = None;
let mut next = None;
for (_, this) in root.iter() {
if end <= this.get_vm_start() {
next = Some(this);
break;
}
if start < this.get_vm_start() + this.get_vm_size() {
return Err(MmError::EVmaExist);
}
prev = Some(this);
}
if let Some(p) = prev {
p.set_next_start(vma.get_vm_start().to_value());
vma.set_prev_start(p.get_vm_start().to_value());
vma.set_prev_end(p.get_vm_start().to_value() + p.get_vm_size());
}
if let Some(n) = next {
n.set_prev_start(vma.get_vm_start().to_value());
n.set_prev_end(vma.get_vm_start().to_value() + vma.get_vm_size());
vma.set_next_start(n.get_vm_start().to_value());
}
root.insert(start, vma);
Ok(())
}
fn vma_find(&self, addr: Uaddr) -> Result<Arc<VmAreaStruct>> {
let root = self.vm_area_root.lock_irq_save();
for (_, this) in root.iter() {
if addr >= this.get_vm_start() && addr < this.get_vm_start() + this.get_vm_size() {
return Ok(this.clone());
}
if addr < this.get_vm_start() {
return Err(MmError::EVmaNoExist);
}
}
Err(MmError::EVmaNoExist)
}
fn vma_remove(&self, vma: &VmAreaStruct) -> Result<()> {
let mut root = self.vm_area_root.lock_irq_save();
let prev_vma = root.get(&Uaddr::from(vma.get_prev_start()));
let next_vma = root.get(&Uaddr::from(vma.get_next_start()));
if let Some(p_vma) = prev_vma {
p_vma.set_next_start(vma.get_next_start());
}
if let Some(n_vma) = next_vma {
n_vma.set_prev_end(vma.get_prev_end());
n_vma.set_prev_start(vma.get_prev_start());
}
root.remove(&vma.get_vm_start()).ok_or(MmError::EVmaNoExist)?;
Ok(())
}
pub(crate) fn switch_mm(&self) {
self.check_and_switch_context();
}
}
impl Drop for MmStruct {
fn drop(&mut self) {
assert_eq!(self.nr_pgtalbes.load(Ordering::Relaxed), 0);
assert_eq!(self.tlb_flush_pending.load(Ordering::Relaxed), 0);
debug_assert!(self.vm_area_root.lock_irq_save().is_empty());
}
}
pub struct MmapPage {
page_size: usize,
vec: Spinlock<Vec<usize>>,
}
impl MmapPage {
pub fn create(vm_flags: VmFlags, count: usize) -> Self {
let v = alloc::vec![0; count];
let page_size = vm_flags.page_size();
Self { page_size, vec: Spinlock::new(v) }
}
pub fn get_vaddr(&self, idx: usize) -> Result<Vaddr> {
let mut lock = self.vec.lock_irq_save();
debug_assert!(idx < lock.len());
let val = lock[idx];
let addr = if val == 0 {
let vaddr = kmalloc(self.page_size, GfpFlags::Clean).map_err(|_| MmError::ENomem)?;
lock[idx] = vaddr.to_value();
vaddr
} else {
Vaddr::from(val)
};
Ok(addr)
}
pub fn get_size(&self) -> usize {
let count = self.vec.lock_irq_save().capacity();
self.page_size * count
}
}
impl Drop for MmapPage {
fn drop(&mut self) {
for v in self.vec.lock_irq_save().iter() {
if *v != 0 {
let vaddr = Vaddr::from(*v);
kfree(vaddr);
}
}
}
}
pub fn mmap(
mm: Arc<MmStruct>,
start: Uaddr,
size: usize,
vm_flags: VmFlags,
mmap_page: Arc<MmapPage>,
) -> Result<()> {
let vma = VmAreaStruct::create(start, size, vm_flags, mm.clone(), mmap_page)?;
mm.vma_insert(Arc::new(vma))
}
pub fn munmap(mm: Arc<MmStruct>, addr: Uaddr) -> Result<()> {
let vma = mm.vma_find(addr)?;
assert!(mm.vma_remove(&vma).is_ok());
Ok(())
}
pub(crate) fn mm_init() {
context_init();
}
pub(crate) fn mm_init_cpu() {
context_init_cpu();
}