#![allow(dead_code)]
use core::marker::PhantomData;
use core::mem;
use core::ptr;
use multiboot::Multiboot;
use x86::controlregs;
use x86::irq::PageFaultError;
use crate::arch::x86_64::kernel::apic;
use crate::arch::x86_64::kernel::get_mbinfo;
use crate::arch::x86_64::kernel::irq;
use crate::arch::x86_64::kernel::processor;
use crate::arch::x86_64::mm::paddr_to_slice;
use crate::arch::x86_64::mm::physicalmem;
use crate::environment;
use crate::mm;
use crate::scheduler;
const BOOT_GDT: usize = 0x1000;
const PML4_ADDRESS: *mut PageTable<PML4> = 0xFFFF_FFFF_FFFF_F000 as *mut PageTable<PML4>;
const PAGE_BITS: usize = 12;
const PAGE_MAP_BITS: usize = 9;
const PAGE_MAP_MASK: usize = 0x1FF;
bitflags! {
pub struct PageTableEntryFlags: usize {
const PRESENT = 1 << 0;
const WRITABLE = 1 << 1;
const USER_ACCESSIBLE = 1 << 2;
const WRITE_THROUGH = 1 << 3;
const CACHE_DISABLE = 1 << 4;
const ACCESSED = 1 << 5;
const DIRTY = 1 << 6;
const HUGE_PAGE = 1 << 7;
const GLOBAL = 1 << 8;
const EXECUTE_DISABLE = 1 << 63;
}
}
impl PageTableEntryFlags {
const BLANK: PageTableEntryFlags = PageTableEntryFlags { bits: 0 };
pub fn device(&mut self) -> &mut Self {
self.insert(PageTableEntryFlags::CACHE_DISABLE);
self
}
pub fn normal(&mut self) -> &mut Self {
self.remove(PageTableEntryFlags::CACHE_DISABLE);
self
}
pub fn read_only(&mut self) -> &mut Self {
self.remove(PageTableEntryFlags::WRITABLE);
self
}
pub fn writable(&mut self) -> &mut Self {
self.insert(PageTableEntryFlags::WRITABLE);
self
}
pub fn execute_disable(&mut self) -> &mut Self {
self.insert(PageTableEntryFlags::EXECUTE_DISABLE);
self
}
}
#[derive(Clone, Copy)]
pub struct PageTableEntry {
physical_address_and_flags: usize,
}
impl PageTableEntry {
pub fn address(self) -> usize {
self.physical_address_and_flags
& !(BasePageSize::SIZE - 1)
& !(PageTableEntryFlags::EXECUTE_DISABLE).bits()
}
fn is_present(self) -> bool {
(self.physical_address_and_flags & PageTableEntryFlags::PRESENT.bits()) != 0
}
fn is_huge(self) -> bool {
(self.physical_address_and_flags & PageTableEntryFlags::HUGE_PAGE.bits()) != 0
}
fn is_user(self) -> bool {
(self.physical_address_and_flags & PageTableEntryFlags::USER_ACCESSIBLE.bits()) != 0
}
fn set(&mut self, physical_address: usize, flags: PageTableEntryFlags) {
if flags.contains(PageTableEntryFlags::HUGE_PAGE) {
assert_eq!(
physical_address % LargePageSize::SIZE,
0,
"Physical address is not on a 2 MiB page boundary (physical_address = {:#X})",
physical_address
);
} else {
assert_eq!(
physical_address % BasePageSize::SIZE,
0,
"Physical address is not on a 4 KiB page boundary (physical_address = {:#X})",
physical_address
);
}
assert_eq!(
physical_address >> processor::get_physical_address_bits(),
0,
"Physical address exceeds CPU's physical address width (physical_address = {:#X})",
physical_address
);
let mut flags_to_set = flags;
flags_to_set.insert(PageTableEntryFlags::PRESENT);
flags_to_set.insert(PageTableEntryFlags::ACCESSED);
self.physical_address_and_flags = physical_address | flags_to_set.bits();
}
}
pub trait PageSize: Copy {
const SIZE: usize;
const MAP_LEVEL: usize;
const MAP_EXTRA_FLAG: PageTableEntryFlags;
}
#[derive(Clone, Copy)]
pub enum BasePageSize {}
impl PageSize for BasePageSize {
const SIZE: usize = 4096;
const MAP_LEVEL: usize = 0;
const MAP_EXTRA_FLAG: PageTableEntryFlags = PageTableEntryFlags::BLANK;
}
#[derive(Clone, Copy)]
pub enum LargePageSize {}
impl PageSize for LargePageSize {
const SIZE: usize = 2 * 1024 * 1024;
const MAP_LEVEL: usize = 1;
const MAP_EXTRA_FLAG: PageTableEntryFlags = PageTableEntryFlags::HUGE_PAGE;
}
#[derive(Clone, Copy)]
pub enum HugePageSize {}
impl PageSize for HugePageSize {
const SIZE: usize = 1024 * 1024 * 1024;
const MAP_LEVEL: usize = 2;
const MAP_EXTRA_FLAG: PageTableEntryFlags = PageTableEntryFlags::HUGE_PAGE;
}
#[derive(Clone, Copy)]
struct Page<S: PageSize> {
virtual_address: usize,
size: PhantomData<S>,
}
impl<S: PageSize> Page<S> {
fn address(self) -> usize {
self.virtual_address
}
fn flush_from_tlb(self) {
unsafe {
llvm_asm!("invlpg ($0)" :: "r"(self.virtual_address) : "memory" : "volatile");
}
}
fn is_valid_address(virtual_address: usize) -> bool {
virtual_address < 0x8000_0000_0000 || virtual_address >= 0xFFFF_8000_0000_0000
}
fn including_address(virtual_address: usize) -> Self {
assert!(
Self::is_valid_address(virtual_address),
"Virtual address {:#X} is invalid",
virtual_address
);
if S::SIZE == 1024 * 1024 * 1024 {
assert!(processor::supports_1gib_pages());
}
Self {
virtual_address: align_down!(virtual_address, S::SIZE),
size: PhantomData,
}
}
fn range(first: Self, last: Self) -> PageIter<S> {
assert!(first.virtual_address <= last.virtual_address);
PageIter {
current: first,
last,
}
}
fn table_index<L: PageTableLevel>(self) -> usize {
assert!(L::LEVEL >= S::MAP_LEVEL);
self.virtual_address >> PAGE_BITS >> (L::LEVEL * PAGE_MAP_BITS) & PAGE_MAP_MASK
}
}
struct PageIter<S: PageSize> {
current: Page<S>,
last: Page<S>,
}
impl<S: PageSize> Iterator for PageIter<S> {
type Item = Page<S>;
fn next(&mut self) -> Option<Page<S>> {
if self.current.virtual_address <= self.last.virtual_address {
let p = self.current;
self.current.virtual_address += S::SIZE;
Some(p)
} else {
None
}
}
}
trait PageTableLevel {
const LEVEL: usize;
}
trait PageTableLevelWithSubtables: PageTableLevel {
type SubtableLevel;
}
enum PML4 {}
impl PageTableLevel for PML4 {
const LEVEL: usize = 3;
}
impl PageTableLevelWithSubtables for PML4 {
type SubtableLevel = PDPT;
}
enum PDPT {}
impl PageTableLevel for PDPT {
const LEVEL: usize = 2;
}
impl PageTableLevelWithSubtables for PDPT {
type SubtableLevel = PD;
}
enum PD {}
impl PageTableLevel for PD {
const LEVEL: usize = 1;
}
impl PageTableLevelWithSubtables for PD {
type SubtableLevel = PT;
}
enum PT {}
impl PageTableLevel for PT {
const LEVEL: usize = 0;
}
#[repr(C)]
struct PageTable<L> {
entries: [PageTableEntry; 1 << PAGE_MAP_BITS],
level: PhantomData<L>,
}
trait PageTableMethods {
fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry>;
fn map_page_in_this_table<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool;
fn map_page<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool;
}
impl<L: PageTableLevel> PageTableMethods for PageTable<L> {
fn map_page_in_this_table<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool {
assert_eq!(L::LEVEL, S::MAP_LEVEL);
let index = page.table_index::<L>();
let flush = self.entries[index].is_present();
if flags == PageTableEntryFlags::BLANK {
self.entries[index].set(physical_address, flags);
} else {
self.entries[index].set(
physical_address,
PageTableEntryFlags::DIRTY | S::MAP_EXTRA_FLAG | flags,
);
}
if flush {
page.flush_from_tlb();
}
flush
}
default fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry> {
assert_eq!(L::LEVEL, S::MAP_LEVEL);
let index = page.table_index::<L>();
if self.entries[index].is_present() {
Some(self.entries[index])
} else {
None
}
}
default fn map_page<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool {
self.map_page_in_this_table::<S>(page, physical_address, flags)
}
}
impl<L: PageTableLevelWithSubtables> PageTableMethods for PageTable<L>
where
L::SubtableLevel: PageTableLevel,
{
fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry> {
assert!(L::LEVEL >= S::MAP_LEVEL);
let index = page.table_index::<L>();
if self.entries[index].is_present() {
if L::LEVEL > S::MAP_LEVEL {
let subtable = self.subtable::<S>(page);
subtable.get_page_table_entry::<S>(page)
} else {
Some(self.entries[index])
}
} else {
None
}
}
fn map_page<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool {
assert!(L::LEVEL >= S::MAP_LEVEL);
if L::LEVEL > S::MAP_LEVEL {
let index = page.table_index::<L>();
if !self.entries[index].is_present() {
let physical_address = physicalmem::allocate(BasePageSize::SIZE).unwrap();
self.entries[index].set(physical_address, PageTableEntryFlags::WRITABLE);
let subtable = self.subtable::<S>(page);
for entry in subtable.entries.iter_mut() {
entry.physical_address_and_flags = 0;
}
}
let subtable = self.subtable::<S>(page);
subtable.map_page::<S>(page, physical_address, flags)
} else {
self.map_page_in_this_table::<S>(page, physical_address, flags)
}
}
}
impl<L: PageTableLevelWithSubtables> PageTable<L>
where
L::SubtableLevel: PageTableLevel,
{
fn subtable<S: PageSize>(&self, page: Page<S>) -> &mut PageTable<L::SubtableLevel> {
assert!(L::LEVEL > S::MAP_LEVEL);
let index = page.table_index::<L>();
let table_address = self as *const PageTable<L> as usize;
let subtable_address = (table_address << PAGE_MAP_BITS) | (index << PAGE_BITS);
unsafe { &mut *(subtable_address as *mut PageTable<L::SubtableLevel>) }
}
fn map_pages<S: PageSize>(
&mut self,
range: PageIter<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) {
let mut current_physical_address = physical_address;
let mut send_ipi = false;
for page in range {
send_ipi |= self.map_page::<S>(page, current_physical_address, flags);
current_physical_address += S::SIZE;
}
if send_ipi {
apic::ipi_tlb_flush();
}
}
}
pub extern "x86-interrupt" fn page_fault_handler(
stack_frame: &mut irq::ExceptionStackFrame,
error_code: u64,
) {
let virtual_address = unsafe { controlregs::cr2() };
let pferror = PageFaultError::from_bits_truncate(error_code as u32);
error!("Page Fault (#PF) Exception: {:#?}", stack_frame);
error!(
"virtual_address = {:#X}, page fault error = {}",
virtual_address, pferror
);
error!(
"fs = {:#X}, gs = {:#X}",
processor::readfs(),
processor::readgs()
);
unsafe {
controlregs::cr2_write(0);
}
scheduler::abort();
}
#[inline]
fn get_page_range<S: PageSize>(virtual_address: usize, count: usize) -> PageIter<S> {
let first_page = Page::<S>::including_address(virtual_address);
let last_page = Page::<S>::including_address(virtual_address + (count - 1) * S::SIZE);
Page::range(first_page, last_page)
}
pub fn get_page_table_entry<S: PageSize>(virtual_address: usize) -> Option<PageTableEntry> {
trace!("Looking up Page Table Entry for {:#X}", virtual_address);
let page = Page::<S>::including_address(virtual_address);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.get_page_table_entry(page)
}
pub fn get_physical_address<S: PageSize>(virtual_address: usize) -> usize {
trace!("Getting physical address for {:#X}", virtual_address);
let page = Page::<S>::including_address(virtual_address);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
let address = root_pagetable
.get_page_table_entry(page)
.expect("Entry not present")
.address();
let offset = virtual_address & (S::SIZE - 1);
address | offset
}
pub fn virtual_to_physical(virtual_address: usize) -> usize {
let mut page_bits: usize = 39;
static SELF: [usize; 4] = {
[
0xFFFFFF8000000000usize,
0xFFFFFFFFC0000000usize,
0xFFFFFFFFFFE00000usize,
0xFFFFFFFFFFFFF000usize,
]
};
for i in (0..3).rev() {
page_bits -= PAGE_MAP_BITS;
let vpn = (virtual_address >> page_bits) as isize;
let ptr = SELF[i] as *const usize;
let entry = unsafe { *ptr.offset(vpn) };
if entry & PageTableEntryFlags::HUGE_PAGE.bits() != 0 || i == 0 {
let off = virtual_address
& !(((!0usize) << page_bits) & !PageTableEntryFlags::EXECUTE_DISABLE.bits());
let phys =
entry & (((!0usize) << page_bits) & !PageTableEntryFlags::EXECUTE_DISABLE.bits());
return off | phys;
}
}
panic!("virtual_to_physical should never reach this point");
}
#[no_mangle]
pub extern "C" fn virt_to_phys(virtual_address: usize) -> usize {
virtual_to_physical(virtual_address)
}
pub fn map<S: PageSize>(
virtual_address: usize,
physical_address: usize,
count: usize,
flags: PageTableEntryFlags,
) {
trace!(
"Mapping virtual address {:#X} to physical address {:#X} ({} pages)",
virtual_address,
physical_address,
count
);
let range = get_page_range::<S>(virtual_address, count);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.map_pages(range, physical_address, flags);
}
pub fn unmap<S: PageSize>(virtual_address: usize, count: usize) {
trace!(
"Unmapping virtual address {:#X} ({} pages)",
virtual_address,
count
);
let range = get_page_range::<S>(virtual_address, count);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.map_pages(range, 0, PageTableEntryFlags::BLANK);
}
pub fn identity_map(start_address: usize, end_address: usize) {
let first_page = Page::<BasePageSize>::including_address(start_address);
let last_page = Page::<BasePageSize>::including_address(end_address);
assert!(
last_page.address() < mm::kernel_start_address(),
"Address {:#X} to be identity-mapped is not below Kernel start address",
last_page.address()
);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
let range = Page::<BasePageSize>::range(first_page, last_page);
let mut flags = PageTableEntryFlags::empty();
flags.normal().read_only().execute_disable();
root_pagetable.map_pages(range, first_page.address(), flags);
}
#[inline]
pub fn get_application_page_size() -> usize {
LargePageSize::SIZE
}
pub fn init() {}
pub fn init_page_tables() {
debug!("Create new view to the kernel space");
unsafe {
let pml4 = controlregs::cr3();
let pde = pml4 + 2 * BasePageSize::SIZE as u64;
debug!("Found PML4 at 0x{:x}", pml4);
let start = pde
+ ((mm::kernel_end_address() >> (PAGE_MAP_BITS + PAGE_BITS)) * mem::size_of::<u64>())
as u64;
let size = (512 - (mm::kernel_end_address() >> (PAGE_MAP_BITS + PAGE_BITS)))
* mem::size_of::<u64>();
ptr::write_bytes(start as *mut u8, 0, size);
controlregs::cr3_write(pml4);
let mb_info = get_mbinfo();
if mb_info > 0 {
info!("Found Multiboot info at 0x{:x}", mb_info);
identity_map(mb_info, mb_info);
let mb = Multiboot::new(mb_info as u64, paddr_to_slice).unwrap();
let memory_map_address = mb
.memory_regions()
.expect("Could not find a memory map in the Multiboot information")
.next()
.expect("Could not first map address")
.base_address() as usize;
identity_map(memory_map_address, memory_map_address);
}
let cmdsize = environment::get_cmdsize();
if cmdsize > 0 {
let cmdline = environment::get_cmdline();
info!("Found cmdline at 0x{:x} (size {})", cmdline, cmdsize);
identity_map(cmdline, cmdline + cmdsize - 1);
}
}
}