use core::{
mem::size_of,
ops::{Add, AddAssign, BitAnd, BitAndAssign, Sub},
};
use super::kalloc::Page;
use crate::{
bsp::__mems_define,
println,
space::{
mm::pgtabledef::{PAGE_SHIFT, PAGE_SIZE},
uaccess::USER_SPACE_SIZE,
},
};
cfg_if::cfg_if! {
if #[cfg(target_arch = "aarch64")] {
mod aarch64;
use aarch64::*;
} else {
mod dummy;
use dummy::*;
}
}
#[inline]
pub(crate) fn init_stack_base() -> Kaddr {
Kaddr(arch_init_stack_base())
}
const KIMAGE_OFFSET: usize = ARCH_KIMAGE_OFFSET;
const KMEM_OFFSET: usize = ARCH_KMEM_OFFSET;
const KPAGE_OFFSET: usize = ARCH_KPAGE_OFFSET;
const KIO_OFFSET: usize = ARCH_KIO_OFFSET;
#[inline]
fn start_text() -> Kaddr {
Kaddr(arch_start_text())
}
#[inline]
fn end_bss() -> Kaddr {
Kaddr(arch_end_bss() - 1)
}
#[inline(always)]
pub(crate) fn memstart() -> Paddr {
unsafe { Paddr(__mems_define().mem(0).0) }
}
#[inline(always)]
fn memsize() -> usize {
debug_assert!(unsafe { __mems_define().nr_mems() == 1 });
unsafe { __mems_define().mem(0).1 }
}
#[inline(always)]
fn memend() -> Paddr {
memstart() + memsize() - 1
}
const KPAGE_SIZE: usize = size_of::<Page>();
#[inline(always)]
fn kpage_count() -> usize {
memsize() / PAGE_SIZE
}
#[inline(always)]
fn kpage_mem() -> usize {
KPAGE_SIZE * kpage_count()
}
pub fn kpage_start_phys() -> usize {
memend().to_value() + 1 - kpage_mem()
}
#[inline(always)]
fn kpage_start() -> usize {
KPAGE_OFFSET + kpage_start_phys()
}
#[inline(always)]
fn kpage_end() -> usize {
kpage_start() + kpage_mem() - 1
}
#[derive(Debug, Clone, Copy, PartialEq, PartialOrd)]
pub struct Paddr(usize);
impl Add<usize> for Paddr {
type Output = Paddr;
#[inline(always)]
fn add(self, rhs: usize) -> Self::Output {
Paddr(self.0 + rhs)
}
}
impl Sub<usize> for Paddr {
type Output = Paddr;
#[inline(always)]
fn sub(self, rhs: usize) -> Self::Output {
Paddr(self.0 - rhs)
}
}
impl Sub for Paddr {
type Output = usize;
#[inline(always)]
fn sub(self, rhs: Self) -> Self::Output {
self.0 - rhs.0
}
}
impl Paddr {
pub const fn from(value: usize) -> Paddr {
Paddr(value)
}
pub const fn to_value(&self) -> usize {
self.0
}
#[inline(always)]
pub fn to_kimg(&self) -> Option<Kaddr> {
let value = self.0 + KIMAGE_OFFSET;
if value >= start_text().to_value() && value <= end_bss().to_value() {
Some(Kaddr(value))
} else {
None
}
}
#[inline(always)]
pub fn to_virt(&self) -> Option<Vaddr> {
if self.phys_valid() {
return Some(Vaddr(self.0 + KMEM_OFFSET));
}
None
}
#[inline(always)]
pub fn to_page(&self) -> Option<&'static Page> {
let idx = self.to_pfn()?;
unsafe {
let page = (kpage_start() as *const Page).add(idx.to_value());
if page as usize > kpage_end() { None } else { Some(&*page) }
}
}
#[inline(always)]
pub fn to_page_mut(&self) -> Option<&'static mut Page> {
let idx = self.to_pfn()?;
unsafe {
let page = (kpage_start() as *mut Page).add(idx.to_value());
if page as usize > kpage_end() { None } else { Some(&mut *page) }
}
}
#[inline(always)]
pub fn to_io(&self) -> Option<Iaddr> {
if self.0 >= memstart().to_value() && self.0 <= memend().to_value() {
None
} else {
Some(Iaddr(self.0 + KIO_OFFSET))
}
}
pub const fn to_io_const(&self) -> Iaddr {
Iaddr(self.0 + KIO_OFFSET)
}
#[inline(always)]
pub fn to_pfn(&self) -> Option<Pfn> {
if self.0 < memstart().to_value() || self.0 > memend().to_value() {
None
} else {
Some(Pfn((self.0 - memstart().to_value()) >> PAGE_SHIFT))
}
}
#[inline(always)]
pub fn to_pfn_up(&self) -> Option<Pfn> {
let value = self.0 + PAGE_SIZE - 1;
Paddr::from(value).to_pfn()
}
pub fn phys_valid(&self) -> bool {
unsafe {
let index = MEMBLOCK_RANGE.index;
for i in 0..index {
let start = MEMBLOCK_RANGE.range[i].start;
let end = start + MEMBLOCK_RANGE.range[i].size;
if *self >= start && *self < end {
return true;
}
}
}
false
}
}
pub struct Kaddr(usize);
impl From<usize> for Kaddr {
#[inline(always)]
fn from(value: usize) -> Self {
assert!(value >= start_text().to_value() && value <= end_bss().to_value());
Kaddr(value)
}
}
impl Kaddr {
pub const fn to_value(&self) -> usize {
self.0
}
#[inline(always)]
pub fn to_phys(&self) -> Paddr {
debug_assert!(self.0 >= start_text().to_value() && self.0 <= end_bss().to_value());
Paddr(self.0 - KIMAGE_OFFSET)
}
#[inline(always)]
pub fn to_page(&self) -> &'static Page {
self.to_phys().to_page().unwrap()
}
}
#[derive(Clone, Copy)]
pub struct Vaddr(usize);
impl From<usize> for Vaddr {
#[inline(always)]
fn from(value: usize) -> Self {
let vaddr = Vaddr(value);
debug_assert!(vaddr.to_phys().phys_valid());
vaddr
}
}
impl Vaddr {
pub const fn to_value(&self) -> usize {
self.0
}
#[inline(always)]
pub fn to_phys(&self) -> Paddr {
let phys = Paddr(self.0 - KMEM_OFFSET);
debug_assert!(phys.phys_valid());
phys
}
#[inline(always)]
pub fn to_page(&self) -> &'static Page {
self.to_phys().to_page().unwrap()
}
#[inline(always)]
pub fn to_page_mut(&self) -> &'static mut Page {
self.to_phys().to_page_mut().unwrap()
}
#[inline(always)]
pub fn to_pfn(&self) -> Pfn {
self.to_phys().to_pfn().unwrap()
}
}
impl Page {
#[inline(always)]
pub fn to_phys(&self) -> Paddr {
let p = self as *const Page as usize;
debug_assert!(p >= kpage_start() && p <= kpage_end());
Paddr((((p - kpage_start()) / KPAGE_SIZE) << PAGE_SHIFT) + memstart().to_value())
}
#[inline(always)]
pub fn to_kimg(&self) -> Option<Kaddr> {
self.to_phys().to_kimg()
}
#[inline(always)]
pub fn to_virt(&self) -> Option<Vaddr> {
self.to_phys().to_virt()
}
#[inline(always)]
pub fn to_pfn(&self) -> Pfn {
self.to_phys().to_pfn().unwrap()
}
}
pub struct Iaddr(usize);
impl Iaddr {
#[inline(always)]
pub const fn to_value(&self) -> usize {
self.0
}
#[inline(always)]
pub fn to_phys(&self) -> Paddr {
let phys = Paddr(self.0 - KIO_OFFSET);
debug_assert!(phys < memstart() || phys > memend());
phys
}
}
#[derive(Clone, Copy, PartialEq, PartialOrd)]
pub struct Pfn(usize);
impl From<usize> for Pfn {
#[inline(always)]
fn from(value: usize) -> Self {
debug_assert!((memstart() + (value << PAGE_SHIFT)) <= memend());
Pfn(value)
}
}
impl Add<usize> for Pfn {
type Output = Pfn;
#[inline(always)]
fn add(self, rhs: usize) -> Self::Output {
Self(self.0 + rhs)
}
}
impl Sub for Pfn {
type Output = usize;
#[inline(always)]
fn sub(self, rhs: Self) -> Self::Output {
self.0 - rhs.0
}
}
impl Sub<usize> for Pfn {
type Output = Pfn;
#[inline(always)]
fn sub(self, rhs: usize) -> Self::Output {
Pfn(self.0 - rhs)
}
}
impl BitAnd for Pfn {
type Output = Pfn;
#[inline(always)]
fn bitand(self, rhs: Self) -> Self::Output {
Pfn(self.0 & rhs.0)
}
}
impl Pfn {
pub(crate) const fn from_error() -> Self {
Pfn(usize::MAX)
}
pub const fn to_value(&self) -> usize {
self.0
}
#[inline(always)]
pub fn to_phys(&self) -> Paddr {
let phys = memstart() + (self.0 << PAGE_SHIFT);
debug_assert!(phys <= memend());
phys
}
#[inline(always)]
pub fn to_page(&self) -> Option<&'static Page> {
self.to_phys().to_page()
}
#[inline(always)]
pub fn to_page_mut(&self) -> Option<&'static mut Page> {
self.to_phys().to_page_mut()
}
#[inline(always)]
pub fn pfn_valid(&self) -> bool {
let phys = self.to_phys();
phys.phys_valid()
}
}
#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord)]
pub struct Uaddr(usize);
impl From<usize> for Uaddr {
#[inline(always)]
fn from(value: usize) -> Self {
debug_assert!(value <= USER_SPACE_SIZE);
Uaddr(value)
}
}
impl Sub<usize> for Uaddr {
type Output = Uaddr;
#[inline(always)]
fn sub(self, rhs: usize) -> Self::Output {
Uaddr(self.0 - rhs)
}
}
impl Sub for Uaddr {
type Output = usize;
#[inline(always)]
fn sub(self, rhs: Self) -> Self::Output {
self.0 - rhs.0
}
}
impl Add<usize> for Uaddr {
type Output = Uaddr;
#[inline(always)]
fn add(self, rhs: usize) -> Self::Output {
Uaddr(self.0 + rhs)
}
}
impl BitAnd<usize> for Uaddr {
type Output = Uaddr;
#[inline(always)]
fn bitand(self, rhs: usize) -> Self::Output {
Uaddr(self.0 & rhs)
}
}
impl BitAndAssign<usize> for Uaddr {
#[inline(always)]
fn bitand_assign(&mut self, rhs: usize) {
self.0 = self.0 & rhs;
}
}
impl AddAssign<usize> for Uaddr {
#[inline(always)]
fn add_assign(&mut self, rhs: usize) {
self.0 += rhs;
}
}
impl Uaddr {
pub const fn to_value(&self) -> usize {
self.0
}
}
#[derive(Clone, Copy)]
struct MemBlockRange {
start: Paddr,
size: usize,
}
const MEM_RANG_MAX: usize = 16;
struct MemBlock {
index: usize,
range: [MemBlockRange; MEM_RANG_MAX],
}
static mut MEMBLOCK_RANGE: MemBlock =
MemBlock { index: 0, range: [MemBlockRange { start: Paddr(0), size: 0 }; MEM_RANG_MAX] };
fn memblock_add(start: Paddr, size: usize) {
let end = start + size;
unsafe {
let index = MEMBLOCK_RANGE.index;
assert!(index < MEM_RANG_MAX);
for i in 0..index {
let mem_end = MEMBLOCK_RANGE.range[i].start + MEMBLOCK_RANGE.range[i].size;
assert!(end <= MEMBLOCK_RANGE.range[i].start || mem_end <= start);
}
MEMBLOCK_RANGE.range[index].start = start;
MEMBLOCK_RANGE.range[index].size = size;
MEMBLOCK_RANGE.index += 1;
}
}
pub(crate) fn memblock_foreach<F>(f: F)
where
F: Fn(Paddr, usize, usize),
{
unsafe {
let index = MEMBLOCK_RANGE.index;
for i in 0..index {
let start = MEMBLOCK_RANGE.range[i].start;
let size = MEMBLOCK_RANGE.range[i].size;
f(start, size, i);
}
}
}
pub(crate) fn mem_setup<F>(f: F)
where
F: Fn(Paddr, usize, bool),
{
let end = start_text().to_phys().to_pfn().unwrap().to_phys();
let size = end - memstart();
memblock_add(memstart(), size);
f(memstart(), size, true);
let start = end;
let end = end_bss().to_phys().to_pfn_up().unwrap().to_phys();
f(start, end - start, false);
let start = end;
let end = Paddr(kpage_start_phys()).to_pfn().unwrap().to_phys();
memblock_add(start, end - start);
f(start, end - start, true);
f(end, memend() + 1 - end, false);
}
pub fn mem_dump() {
println!("phys: [{:#018x}-{:#018x}]", memstart().to_value(), memend().to_value());
println!("kimg: [{:#018x}-{:#018x}]", start_text().to_value(), end_bss().to_value());
println!("page: [{:#018x}-{:#018x}]", kpage_start(), kpage_end());
memblock_foreach(|start, size, idx| {
println!(
"virt{}: [{:#018x}-{:#018x}]",
idx,
start.to_virt().unwrap().to_value(),
(start + size - 1).to_virt().unwrap().to_value()
);
});
}