use crate::memorytype::*;
use crate::poolalloc::PoolAllocator;
use crate::EfiMemoryType::*;
use crate::PhysicalAddress;
use crate::Placement::*;
use crate::{guid, Guid};
use alloc::collections::BTreeMap;
use alloc::vec::Vec;
use core::alloc::Layout;
use core::cell::RefCell;
use core::mem::MaybeUninit;
use core::ops::{Deref, DerefMut, Range};
use core::ptr::*;
use core::slice;
use core::sync::atomic::{AtomicUsize, Ordering};
pub enum Placement {
Max(u64),
Fixed(u64),
Anywhere,
Random(u32, u64),
Aligned(u64),
MaxAlignMask(u64, u64),
}
pub fn size_to_pages(size: usize) -> usize {
(size + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT
}
const EFI_MEMORY_ATTRIBUTES_FLAGS_RT_FORWARD_CONTROL_FLOW_GUARD: u32 = 0x1;
const EFI_MEMORY_ATTRIBUTES_TABLE_GUID: Guid = guid!(
0xdcfa911d,
0x26eb,
0x469f,
[0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20]
);
#[derive(Debug)]
#[repr(C)]
pub(crate) struct MemoryAttributesTable<const N: usize> {
version: u32,
number_of_entries: u32,
descriptor_size: u32,
flags: u32,
entry: [EfiMemoryDescriptor; N],
}
impl<const N: usize> MemoryAttributesTable<N> {
pub fn new(descs: &[EfiMemoryDescriptor]) -> Self {
let mut s = MemoryAttributesTable {
version: 2,
number_of_entries: 0,
descriptor_size: core::mem::size_of::<EfiMemoryDescriptor>() as u32,
flags: 0 & EFI_MEMORY_ATTRIBUTES_FLAGS_RT_FORWARD_CONTROL_FLOW_GUARD,
entry: [EfiMemoryDescriptor::zeroed(); N],
};
s.update(descs);
s
}
pub fn update(&mut self, descs: &[EfiMemoryDescriptor]) {
if descs.len() > self.entry.len() {
return;
}
for (i, d) in descs.iter().enumerate() {
self.entry[i] = *d;
}
self.number_of_entries = descs.len() as u32;
}
}
type MemMap = BTreeMap<PhysicalAddress, EfiMemoryDescriptor>;
struct PoolAllocDb {
allocators: BTreeMap<EfiMemoryType, PoolAllocator>,
allocations: BTreeMap<*const u8, (EfiMemoryType, Layout)>,
}
pub struct MemoryMap {
memmap: RefCell<MemMap>,
pool_alloc_db: RefCell<PoolAllocDb>,
mapkey: AtomicUsize,
mem_attr_mapkey: AtomicUsize,
}
impl MemoryMap {
pub fn new() -> Self {
let alloc_db = PoolAllocDb {
allocators: BTreeMap::new(),
allocations: BTreeMap::new(),
};
MemoryMap {
memmap: RefCell::new(BTreeMap::new()),
pool_alloc_db: RefCell::new(alloc_db),
mapkey: AtomicUsize::new(1),
mem_attr_mapkey: AtomicUsize::new(0),
}
}
pub(crate) fn allocate_pool<T>(
&self,
pool_type: EfiMemoryType,
count: usize,
) -> Result<NonNull<T>, &'static str> {
let size = count * core::mem::size_of::<T>();
let align = core::mem::align_of::<T>().max(16);
let layout = Layout::from_size_align(size, align).or(Err("Layout error"))?;
let mut db = self.pool_alloc_db.borrow_mut();
let alloc = &mut db.allocators;
if !alloc.contains_key(&pool_type) {
alloc.insert(
pool_type,
PoolAllocator::new(pool_type, self)
.or(Err("Failed to insert new pool allocator"))?,
);
}
let p = alloc
.get_mut(&pool_type)
.unwrap()
.allocate(layout, self)
.or(Err("Failed to allocate from pool"))?;
db.allocations.insert(p.as_ptr(), (pool_type, layout));
unsafe { Ok(core::mem::transmute::<NonNull<u8>, NonNull<T>>(p)) }
}
pub(crate) fn free_pool(&self, buffer: *const u8) -> Result<(), ()> {
let mut db = self.pool_alloc_db.borrow_mut();
if let Some((pool_type, layout)) = db.allocations.remove(&buffer) {
Ok(db
.allocators
.get_mut(&pool_type)
.unwrap()
.deallocate(buffer, layout))
} else {
Err(())
}
}
pub fn declare_pool(
&self,
pool_type: EfiMemoryType,
pool: &'static mut [MaybeUninit<u8>],
) -> Result<(), ()> {
let phys = pool.as_ptr() as u64;
let num_pages = pool.len() >> EFI_PAGE_SHIFT;
let memmap = self.memmap.borrow();
memmap
.values()
.find(|&d| d.encompasses(phys, num_pages as u64) && d.r#type == pool_type)
.map_or(Err(()), |_| Ok(()))?;
let mut db = self.pool_alloc_db.borrow_mut();
let alloc = &mut db.allocators;
alloc.insert(pool_type, PoolAllocator::from_slice(pool_type, pool));
Ok(())
}
fn inc_map_key(&self) {
self.mapkey.fetch_add(1, Ordering::Release);
}
fn get_memattr_table(&self, mm: &MemMap, mapkey: usize) -> Option<MemoryAttributesTable<8>> {
if self.mem_attr_mapkey.swap(mapkey, Ordering::Acquire) == mapkey {
return None;
}
let vec = mm
.values()
.cloned()
.filter_map(|desc| desc.to_memattr_table_entry())
.collect::<Vec<_>>();
Some(MemoryAttributesTable::new(vec.as_slice()))
}
fn insert_region(&self, mm: &mut MemMap, desc: &EfiMemoryDescriptor) {
debug_assert!(desc.physical_start as usize & EFI_PAGE_MASK == 0);
if let Some(_) = mm.insert(desc.physical_start, *desc) {
panic!("Conflicting entries in memory map!\n");
}
}
fn declare_region(
&self,
mm: &mut MemMap,
phys: u64,
num_pages: u64,
_type: EfiMemoryType,
attr: u64,
rtattr: u64,
) -> Result<(), ()> {
if phys & EFI_PAGE_MASK as u64 != 0 {
return Err(());
}
mm.values()
.find(|&d| d.intersects(phys, num_pages))
.map_or(Ok(()), |_| Err(()))?;
let num_pages = {
let mut l = num_pages;
mm.retain(|p, d| {
if *p == phys + (num_pages << EFI_PAGE_SHIFT)
&& d.r#type == _type
&& d.attribute == attr
{
l += d.number_of_pages;
false
} else {
true
}
});
l
};
if let Some(desc) = mm.values_mut().find(|d| {
d.physical_start + (d.number_of_pages << EFI_PAGE_SHIFT) == phys
&& d.r#type == _type
&& d.attribute == attr
}) {
desc.number_of_pages += num_pages;
} else {
let d = EfiMemoryDescriptor {
r#type: _type,
physical_start: phys,
virtual_start: 0,
number_of_pages: num_pages,
attribute: attr,
rt_attribute: rtattr,
};
self.insert_region(mm, &d);
}
self.inc_map_key();
Ok(())
}
pub fn declare_memory_region(&self, range: &Range<usize>) -> Result<(), ()> {
let mut mm = self.memmap.borrow_mut();
let phys = range.start as PhysicalAddress;
let pages = (range.end - range.start) as u64 >> EFI_PAGE_SHIFT;
self.declare_region(
&mut mm,
phys,
pages,
EfiConventionalMemory,
EFI_MEMORY_WB,
0,
)
}
pub fn declare_runtime_region(
&self,
range: &Range<usize>,
_type: EfiMemoryType,
attr: u64,
rtattr: u64,
) -> Result<(), ()> {
let mut mm = self.memmap.borrow_mut();
let phys = range.start as PhysicalAddress;
let pages = (range.end - range.start) as u64 >> EFI_PAGE_SHIFT;
self.declare_region(
&mut mm,
phys,
pages,
_type,
attr | EFI_MEMORY_RUNTIME,
rtattr | EFI_MEMORY_RUNTIME,
)?;
Ok(())
}
fn split_region(
&self,
mm: &mut MemMap,
phys: PhysicalAddress,
size: usize,
_type: Option<EfiMemoryType>,
) -> Result<(), ()> {
let desc = mm
.values_mut()
.find(|d| {
d.r#type == _type.unwrap_or(d.r#type)
&& d.physical_start < phys
&& d.physical_start + (d.number_of_pages << EFI_PAGE_SHIFT)
>= phys + size as u64
})
.ok_or(())?;
let num_pages = (phys - desc.physical_start) >> EFI_PAGE_SHIFT;
let d = EfiMemoryDescriptor {
r#type: desc.r#type,
physical_start: phys,
virtual_start: 0,
number_of_pages: desc.number_of_pages - num_pages,
attribute: desc.attribute,
rt_attribute: desc.rt_attribute,
};
desc.number_of_pages = num_pages;
self.insert_region(mm, &d);
self.inc_map_key();
Ok(())
}
pub(crate) fn convert_region(
&self,
phys: PhysicalAddress,
size: usize,
from: Option<EfiMemoryType>,
to: EfiMemoryType,
rtattr: u64,
) -> Result<(), ()> {
let pages = size as u64 >> EFI_PAGE_SHIFT;
let (attr, rtattr) = if to == EfiRuntimeServicesCode || to == EfiRuntimeServicesData {
(
EFI_MEMORY_RUNTIME | EFI_MEMORY_WB,
EFI_MEMORY_RUNTIME | rtattr,
)
} else {
(EFI_MEMORY_WB, rtattr)
};
if phys & EFI_PAGE_MASK as u64 != 0 {
return Err(());
}
let mut mm = self.memmap.borrow_mut();
if !mm.contains_key(&phys) {
self.split_region(&mut mm, phys, size, from)?;
}
let mut desc = mm.remove(&phys).unwrap();
if desc.r#type != from.unwrap_or(desc.r#type) || pages > desc.number_of_pages {
self.insert_region(&mut mm, &desc);
return Err(());
}
desc.number_of_pages -= pages;
desc.physical_start += size as u64;
if desc.number_of_pages > 0 {
self.insert_region(&mut mm, &desc);
}
self.declare_region(&mut mm, phys, pages, to, attr, rtattr)
}
pub fn allocate_region(
&self,
range: &Range<usize>,
_type: EfiMemoryType,
rtattr: u64,
) -> Result<(), ()> {
self.convert_region(
range.start as PhysicalAddress,
range.end - range.start,
Some(EfiConventionalMemory),
_type,
rtattr,
)
}
pub(crate) fn free_pages(&self, base: u64, pages: usize) -> Result<(), ()> {
let size = pages << EFI_PAGE_SHIFT;
self.convert_region(
base,
size,
None,
EfiConventionalMemory,
0,
)
}
pub(crate) fn allocate_pages(
&self,
pages: usize,
_type: EfiMemoryType,
placement: Placement,
) -> Option<&'static mut [MaybeUninit<u8>]> {
let mm = self.memmap.borrow();
let p = pages as u64;
let placement = match placement {
Max(max) => MaxAlignMask(max, EFI_PAGE_MASK as u64),
Anywhere => MaxAlignMask(u64::MAX, EFI_PAGE_MASK as u64),
Aligned(align) => MaxAlignMask(u64::MAX, align - 1),
pl => pl,
};
let base = match placement {
MaxAlignMask(max, mask) => {
if let Some(desc) = mm
.values()
.take_while(|d| ((d.physical_start - 1) | mask) + (p << EFI_PAGE_SHIFT) <= max)
.filter(|d| {
let num_pages =
p + (mask - ((d.physical_start - 1) & mask) >> EFI_PAGE_SHIFT);
d.r#type == EfiConventionalMemory && d.number_of_pages >= num_pages
})
.last()
{
let highest_base = max - (p << EFI_PAGE_SHIFT) + 1;
let offset = (desc.number_of_pages - p) << EFI_PAGE_SHIFT;
highest_base.min(desc.physical_start + offset) & !mask as u64
} else {
return None;
}
}
Placement::Random(seed, align) => {
let mask = align - 1;
let mut slots: u64 = 0;
let descs: Vec<(Range<u64>, &EfiMemoryDescriptor)> = mm
.values()
.filter_map(|d| {
let num_pages =
p + (mask - ((d.physical_start - 1) & mask) >> EFI_PAGE_SHIFT);
if d.r#type == EfiConventionalMemory && d.number_of_pages >= num_pages {
let sl =
1 + ((d.number_of_pages - num_pages) << EFI_PAGE_SHIFT) / align;
let end = slots + sl;
let r = slots..end;
slots = end;
Some((r, d))
} else {
None
}
})
.collect();
let index = (slots * seed as u64) >> 32;
if let Some(entry) = descs
.into_iter()
.find(|e: &(Range<u64>, &EfiMemoryDescriptor)| e.0.contains(&index))
{
let offset = (index - entry.0.start) * align;
((entry.1.physical_start - 1) | mask) + 1 + offset
} else {
return None;
}
}
Placement::Fixed(base) => base,
_ => {
return None; }
};
drop(mm);
let size = pages << EFI_PAGE_SHIFT;
self.convert_region(base, size, Some(EfiConventionalMemory), _type, 0)
.ok()?;
unsafe {
Some(slice::from_raw_parts_mut(
base as *mut MaybeUninit<u8>,
size,
))
}
}
pub(crate) fn get_memory_map(&self, tbl: &mut [EfiMemoryDescriptor]) -> Option<(usize, usize)> {
let (mm, key) = {
let mm = self.memmap.borrow();
let key = self.mapkey.load(Ordering::Acquire);
if let Some(table) = self.get_memattr_table(&mm, key) {
drop(mm);
let table = self.box_new(EfiACPIReclaimMemory, table).ok()?;
EFI.install_configtable(&EFI_MEMORY_ATTRIBUTES_TABLE_GUID, table);
(self.memmap.borrow(), self.mapkey.load(Ordering::Acquire))
} else {
(mm, key)
}
};
let vec = mm.values().cloned().collect::<Vec<_>>();
if tbl.len() < vec.len() {
return None;
}
tbl[..vec.len()].copy_from_slice(vec.as_slice());
Some((key, vec.len()))
}
pub(crate) fn len(&self) -> usize {
self.memmap.borrow().len()
}
pub(crate) fn key(&self) -> usize {
self.mapkey.load(Ordering::Relaxed)
}
pub(crate) fn box_new<T>(&self, memtype: EfiMemoryType, value: T) -> Result<PoolBox<T>, &str> {
let mut p = self.allocate_pool::<T>(memtype, 1)?;
unsafe {
*p.as_mut() = value;
}
Ok(PoolBox(Some(p)))
}
}
use crate::EFI;
pub(crate) struct PoolBox<T: ?Sized>(Option<NonNull<T>>);
impl<T> PoolBox<T> {
pub(crate) fn take(mut self) -> NonNull<T> {
self.0.take().unwrap()
}
}
impl<T: ?Sized> Drop for PoolBox<T> {
fn drop(&mut self) {
self.0.map(|p| {
EFI.memmap.free_pool(p.as_ptr() as *const u8).ok();
});
}
}
impl<T: ?Sized> Deref for PoolBox<T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { self.0.unwrap().as_ref() }
}
}
impl<T: ?Sized> DerefMut for PoolBox<T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { self.0.unwrap().as_mut() }
}
}