use crate::pecoff::{self, UefiPeInfo};
use alloc::{boxed::Box, slice, vec, vec::Vec};
use core::{fmt::Display, ptr};
use patina::{base::DEFAULT_CACHE_ATTR, error::EfiError};
use mu_rust_helpers::function;
use patina::{
base::{SIZE_4GB, UEFI_PAGE_MASK, UEFI_PAGE_SHIFT, UEFI_PAGE_SIZE, align_up},
guids::CACHE_ATTRIBUTE_CHANGE_EVENT_GROUP,
pi::{
dxe_services::{self, GcdMemoryType},
hob::{self, EFiMemoryTypeInformation},
},
uefi_pages_to_size,
};
use patina_internal_collections::{Error as SliceError, Rbt, SliceKey, node_size};
use r_efi::efi;
use crate::{
GCD, allocator::DEFAULT_ALLOCATION_STRATEGY, ensure, error, events::EVENT_DB, protocol_db,
protocol_db::INVALID_HANDLE, tpl_lock,
};
use patina_internal_cpu::paging::{CacheAttributeValue, PatinaPageTable, create_cpu_paging};
use patina_paging::{MemoryAttributes, PtError, page_allocator::PageAllocator};
use patina::pi::hob::{Hob, HobList};
use super::{
io_block::{self, Error as IoBlockError, IoBlock, IoBlockSplit, StateTransition as IoStateTransition},
memory_block::{
self, Error as MemoryBlockError, MemoryBlock, MemoryBlockSplit, StateTransition as MemoryStateTransition,
},
};
const MEMORY_BLOCK_SLICE_LEN: usize = 4096;
pub const MEMORY_BLOCK_SLICE_SIZE: usize = MEMORY_BLOCK_SLICE_LEN * node_size::<MemoryBlock>();
const IO_BLOCK_SLICE_LEN: usize = 4096;
const IO_BLOCK_SLICE_SIZE: usize = IO_BLOCK_SLICE_LEN * node_size::<IoBlock>();
const PAGE_POOL_CAPACITY: usize = 512;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum InternalError {
MemoryBlock(MemoryBlockError),
IoBlock(IoBlockError),
Slice(SliceError),
}
#[derive(Debug, Clone, Copy)]
pub enum AllocateType {
BottomUp(Option<usize>),
TopDown(Option<usize>),
Address(usize),
}
#[derive(Clone, Copy)]
struct GcdAttributeConversionEntry {
attribute: u32,
capability: u64,
memory: bool,
}
const ATTRIBUTE_CONVERSION_TABLE: [GcdAttributeConversionEntry; 15] = [
GcdAttributeConversionEntry {
attribute: hob::EFI_RESOURCE_ATTRIBUTE_UNCACHEABLE,
capability: efi::MEMORY_UC,
memory: true,
},
GcdAttributeConversionEntry {
attribute: hob::EFI_RESOURCE_ATTRIBUTE_UNCACHED_EXPORTED,
capability: efi::MEMORY_UCE,
memory: true,
},
GcdAttributeConversionEntry {
attribute: hob::EFI_RESOURCE_ATTRIBUTE_WRITE_COMBINEABLE,
capability: efi::MEMORY_WC,
memory: true,
},
GcdAttributeConversionEntry {
attribute: hob::EFI_RESOURCE_ATTRIBUTE_WRITE_THROUGH_CACHEABLE,
capability: efi::MEMORY_WT,
memory: true,
},
GcdAttributeConversionEntry {
attribute: hob::EFI_RESOURCE_ATTRIBUTE_WRITE_BACK_CACHEABLE,
capability: efi::MEMORY_WB,
memory: true,
},
GcdAttributeConversionEntry {
attribute: hob::EFI_RESOURCE_ATTRIBUTE_READ_PROTECTABLE,
capability: efi::MEMORY_RP,
memory: true,
},
GcdAttributeConversionEntry {
attribute: hob::EFI_RESOURCE_ATTRIBUTE_WRITE_PROTECTABLE,
capability: efi::MEMORY_WP,
memory: true,
},
GcdAttributeConversionEntry {
attribute: hob::EFI_RESOURCE_ATTRIBUTE_EXECUTION_PROTECTABLE,
capability: efi::MEMORY_XP,
memory: true,
},
GcdAttributeConversionEntry {
attribute: hob::EFI_RESOURCE_ATTRIBUTE_READ_ONLY_PROTECTABLE,
capability: efi::MEMORY_RO,
memory: true,
},
GcdAttributeConversionEntry {
attribute: hob::EFI_RESOURCE_ATTRIBUTE_PRESENT,
capability: hob::EFI_MEMORY_PRESENT,
memory: false,
},
GcdAttributeConversionEntry {
attribute: hob::EFI_RESOURCE_ATTRIBUTE_INITIALIZED,
capability: hob::EFI_MEMORY_INITIALIZED,
memory: false,
},
GcdAttributeConversionEntry {
attribute: hob::EFI_RESOURCE_ATTRIBUTE_TESTED,
capability: hob::EFI_MEMORY_TESTED,
memory: false,
},
GcdAttributeConversionEntry {
attribute: hob::EFI_RESOURCE_ATTRIBUTE_PERSISTABLE,
capability: hob::EFI_MEMORY_NV,
memory: true,
},
GcdAttributeConversionEntry {
attribute: hob::EFI_RESOURCE_ATTRIBUTE_MORE_RELIABLE,
capability: hob::EFI_MEMORY_MORE_RELIABLE,
memory: true,
},
GcdAttributeConversionEntry { attribute: 0, capability: 0, memory: false },
];
pub fn get_capabilities(gcd_mem_type: dxe_services::GcdMemoryType, attributes: u64) -> u64 {
let mut capabilities = 0;
for conversion in ATTRIBUTE_CONVERSION_TABLE.iter() {
if conversion.attribute == 0 {
break;
}
if (conversion.memory
|| (gcd_mem_type != dxe_services::GcdMemoryType::SystemMemory
&& gcd_mem_type != dxe_services::GcdMemoryType::MoreReliable))
&& (attributes & (conversion.attribute as u64) != 0)
{
capabilities |= conversion.capability;
}
}
capabilities
}
type GcdAllocateFn = fn(
gcd: &mut GCD,
allocate_type: AllocateType,
memory_type: dxe_services::GcdMemoryType,
alignment: usize,
len: usize,
image_handle: efi::Handle,
device_handle: Option<efi::Handle>,
) -> Result<usize, EfiError>;
type GcdFreeFn =
fn(gcd: &mut GCD, base_address: usize, len: usize, transition: MemoryStateTransition) -> Result<(), EfiError>;
#[derive(Debug)]
struct PagingAllocator<'a> {
page_pool: Vec<efi::PhysicalAddress>,
gcd: &'a SpinLockedGcd,
}
impl<'a> PagingAllocator<'a> {
fn new(gcd: &'a SpinLockedGcd) -> Self {
Self { page_pool: Vec::with_capacity(PAGE_POOL_CAPACITY), gcd }
}
}
impl PageAllocator for PagingAllocator<'_> {
fn allocate_page(&mut self, align: u64, size: u64, is_root: bool) -> Result<u64, PtError> {
if align != UEFI_PAGE_SIZE as u64 || size != UEFI_PAGE_SIZE as u64 {
log::error!("Invalid alignment or size for page allocation: align: {align:#x}, size: {size:#x}");
return Err(PtError::InvalidParameter);
}
if is_root {
let len = 1;
let addr: u64 = (SIZE_4GB - 1) as u64;
let res = self.gcd.memory.lock().allocate_memory_space(
AllocateType::BottomUp(Some(addr as usize)),
dxe_services::GcdMemoryType::SystemMemory,
UEFI_PAGE_SHIFT,
uefi_pages_to_size!(len),
protocol_db::EFI_BOOT_SERVICES_DATA_ALLOCATOR_HANDLE,
None,
);
match res {
Ok(root_page) => Ok(root_page as u64),
Err(_) => {
log::error!(
"Failed to allocate root page for the page table page pool, retrying with normal allocation"
);
match self.gcd.memory.lock().allocate_memory_space(
DEFAULT_ALLOCATION_STRATEGY,
dxe_services::GcdMemoryType::SystemMemory,
UEFI_PAGE_SHIFT,
uefi_pages_to_size!(len),
protocol_db::EFI_BOOT_SERVICES_DATA_ALLOCATOR_HANDLE,
None,
) {
Ok(root_page) => Ok(root_page as u64),
Err(e) => {
panic!("Failed to allocate root page for the page table page pool: {e:?}");
}
}
}
}
} else {
match self.page_pool.pop() {
Some(page) => Ok(page),
None => {
let len = PAGE_POOL_CAPACITY;
match self.gcd.memory.lock().allocate_memory_space(
DEFAULT_ALLOCATION_STRATEGY,
dxe_services::GcdMemoryType::SystemMemory,
UEFI_PAGE_SHIFT,
uefi_pages_to_size!(len),
protocol_db::EFI_BOOT_SERVICES_DATA_ALLOCATOR_HANDLE,
None,
) {
Ok(addr) => {
for i in 0..len {
self.page_pool.push(addr as u64 + ((i * UEFI_PAGE_SIZE) as u64));
}
self.page_pool.pop().ok_or(PtError::OutOfResources)
}
Err(e) => {
panic!("Failed to allocate pages for the page table page pool {e:?}");
}
}
}
}
}
}
}
#[allow(clippy::upper_case_acronyms)]
struct GCD {
maximum_address: usize,
memory_blocks: Rbt<'static, MemoryBlock>,
allocate_memory_space_fn: GcdAllocateFn,
free_memory_space_fn: GcdFreeFn,
default_attributes: u64,
prioritize_32_bit_memory: bool,
}
impl GCD {
pub fn is_ready(&self) -> bool {
self.maximum_address != 0
}
}
impl core::fmt::Debug for GCD {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("GCD")
.field("maximum_address", &self.maximum_address)
.field("memory_blocks", &self.memory_blocks)
.finish()
}
}
impl GCD {
#[cfg(test)]
pub(crate) const fn new(processor_address_bits: u32) -> Self {
assert!(processor_address_bits > 0);
Self {
memory_blocks: Rbt::new(),
maximum_address: 1 << processor_address_bits,
allocate_memory_space_fn: Self::allocate_memory_space_internal,
free_memory_space_fn: Self::free_memory_space_worker,
default_attributes: efi::MEMORY_XP,
prioritize_32_bit_memory: false,
}
}
pub fn lock_memory_space(&mut self) {
self.allocate_memory_space_fn = Self::allocate_memory_space_null;
self.free_memory_space_fn = Self::free_memory_space_worker_null;
log::info!("Disallowing alloc/free during ExitBootServices.");
}
pub fn unlock_memory_space(&mut self) {
self.allocate_memory_space_fn = Self::allocate_memory_space_internal;
self.free_memory_space_fn = Self::free_memory_space_worker;
}
pub fn init(&mut self, processor_address_bits: u32) {
self.maximum_address = 1 << processor_address_bits;
}
unsafe fn init_memory_blocks(
&mut self,
memory_type: dxe_services::GcdMemoryType,
base_address: usize,
len: usize,
capabilities: u64,
) -> Result<usize, EfiError> {
ensure!(self.maximum_address != 0, EfiError::NotReady);
ensure!(
memory_type == dxe_services::GcdMemoryType::SystemMemory && len >= MEMORY_BLOCK_SLICE_SIZE,
EfiError::OutOfResources
);
log::trace!(target: "allocations", "[{}] Initializing memory blocks at {:#x}", function!(), base_address);
log::trace!(target: "allocations", "[{}] Length: {:#x}", function!(), len);
log::trace!(target: "allocations", "[{}] Memory Type: {:?}", function!(), memory_type);
log::trace!(target: "allocations", "[{}] Capabilities: {:#x}", function!(), capabilities);
let unallocated_memory_space = MemoryBlock::Unallocated(dxe_services::MemorySpaceDescriptor {
memory_type: dxe_services::GcdMemoryType::NonExistent,
base_address: 0,
length: self.maximum_address as u64,
..Default::default()
});
self.memory_blocks
.resize(unsafe { slice::from_raw_parts_mut::<'static>(base_address as *mut u8, MEMORY_BLOCK_SLICE_SIZE) });
self.memory_blocks.add(unallocated_memory_space).map_err(|_| EfiError::OutOfResources)?;
let idx = unsafe { self.add_memory_space(memory_type, base_address, len, capabilities) }?;
match self.set_memory_space_attributes(
base_address,
len,
(MemoryAttributes::Writeback | MemoryAttributes::ExecuteProtect).bits(),
) {
Ok(_) | Err(EfiError::NotReady) => Ok(()),
Err(err) => Err(err),
}?;
self.allocate_memory_space(
AllocateType::Address(base_address),
dxe_services::GcdMemoryType::SystemMemory,
UEFI_PAGE_SHIFT,
MEMORY_BLOCK_SLICE_SIZE,
protocol_db::EFI_BOOT_SERVICES_DATA_ALLOCATOR_HANDLE,
None,
)?;
if len > MEMORY_BLOCK_SLICE_SIZE {
match self.set_memory_space_attributes(
base_address + MEMORY_BLOCK_SLICE_SIZE,
len - MEMORY_BLOCK_SLICE_SIZE,
(MemoryAttributes::Writeback | MemoryAttributes::ReadProtect).bits(),
) {
Ok(_) | Err(EfiError::NotReady) => Ok(()),
Err(err) => Err(err),
}?;
}
Ok(idx)
}
pub unsafe fn add_memory_space(
&mut self,
memory_type: dxe_services::GcdMemoryType,
base_address: usize,
len: usize,
mut capabilities: u64,
) -> Result<usize, EfiError> {
ensure!(self.maximum_address != 0, EfiError::NotReady);
ensure!(len > 0, EfiError::InvalidParameter);
ensure!(base_address.checked_add(len).is_some_and(|sum| sum <= self.maximum_address), EfiError::Unsupported);
log::trace!(target: "allocations", "[{}] Adding memory space at {:#x}", function!(), base_address);
log::trace!(target: "allocations", "[{}] Length: {:#x}", function!(), len);
log::trace!(target: "allocations", "[{}] Memory Type: {:?}", function!(), memory_type);
log::trace!(target: "allocations", "[{}] Capabilities: {:#x}\n", function!(), capabilities);
capabilities |= efi::MEMORY_ACCESS_MASK | efi::MEMORY_RUNTIME;
if memory_type == dxe_services::GcdMemoryType::MemoryMappedIo {
capabilities |= efi::MEMORY_ISA_VALID;
}
if self.memory_blocks.capacity() == 0 {
return unsafe { self.init_memory_blocks(memory_type, base_address, len, capabilities) };
}
let memory_blocks = &mut self.memory_blocks;
log::trace!(target: "gcd_measure", "search");
let idx = memory_blocks.get_closest_idx(&(base_address as u64)).ok_or(EfiError::NotFound)?;
let block = memory_blocks.get_with_idx(idx).ok_or(EfiError::NotFound)?;
ensure!(block.as_ref().memory_type == dxe_services::GcdMemoryType::NonExistent, EfiError::AccessDenied);
match Self::split_state_transition_at_idx(
memory_blocks,
idx,
base_address,
len,
MemoryStateTransition::Add(memory_type, capabilities, efi::MEMORY_RP),
) {
Ok(idx) => Ok(idx),
Err(InternalError::MemoryBlock(MemoryBlockError::BlockOutsideRange)) => error!(EfiError::AccessDenied),
Err(InternalError::MemoryBlock(MemoryBlockError::InvalidStateTransition)) => {
error!(EfiError::InvalidParameter)
}
Err(InternalError::Slice(SliceError::OutOfSpace)) => error!(EfiError::OutOfResources),
Err(e) => panic!("{e:?}"),
}
}
pub fn remove_memory_space(&mut self, base_address: usize, len: usize) -> Result<(), EfiError> {
ensure!(self.maximum_address != 0, EfiError::NotReady);
ensure!(len > 0, EfiError::InvalidParameter);
ensure!(base_address + len <= self.maximum_address, EfiError::Unsupported);
log::trace!(target: "allocations", "[{}] Removing memory space at {:#x} of length {:#x}", function!(), base_address, len);
let memory_blocks = &mut self.memory_blocks;
log::trace!(target: "gcd_measure", "search");
let idx = memory_blocks.get_closest_idx(&(base_address as u64)).ok_or(EfiError::NotFound)?;
let block = *memory_blocks.get_with_idx(idx).ok_or(EfiError::NotFound)?;
match Self::split_state_transition_at_idx(memory_blocks, idx, base_address, len, MemoryStateTransition::Remove)
{
Ok(_) => Ok(()),
Err(InternalError::MemoryBlock(MemoryBlockError::BlockOutsideRange)) => error!(EfiError::NotFound),
Err(InternalError::MemoryBlock(MemoryBlockError::InvalidStateTransition)) => match block {
MemoryBlock::Unallocated(_) => error!(EfiError::NotFound),
MemoryBlock::Allocated(_) => error!(EfiError::AccessDenied),
},
Err(InternalError::Slice(SliceError::OutOfSpace)) => error!(EfiError::OutOfResources),
Err(e) => panic!("{e:?}"),
}
}
fn allocate_memory_space(
&mut self,
allocate_type: AllocateType,
memory_type: dxe_services::GcdMemoryType,
alignment: usize,
len: usize,
image_handle: efi::Handle,
device_handle: Option<efi::Handle>,
) -> Result<usize, EfiError> {
(self.allocate_memory_space_fn)(self, allocate_type, memory_type, alignment, len, image_handle, device_handle)
}
fn allocate_memory_space_internal(
gcd: &mut GCD,
allocate_type: AllocateType,
memory_type: dxe_services::GcdMemoryType,
alignment: usize,
len: usize,
image_handle: efi::Handle,
device_handle: Option<efi::Handle>,
) -> Result<usize, EfiError> {
ensure!(gcd.maximum_address != 0, EfiError::NotReady);
ensure!(
len > 0 && image_handle > ptr::null_mut() && memory_type != dxe_services::GcdMemoryType::Unaccepted,
EfiError::InvalidParameter
);
log::trace!(target: "allocations", "[{}] Allocating memory space: {:x?}", function!(), allocate_type);
log::trace!(target: "allocations", "[{}] Length: {:#x}", function!(), len);
log::trace!(target: "allocations", "[{}] Memory Type: {:?}", function!(), memory_type);
log::trace!(target: "allocations", "[{}] Alignment: {:#x}", function!(), alignment);
log::trace!(target: "allocations", "[{}] Image Handle: {:#x?}", function!(), image_handle);
log::trace!(target: "allocations", "[{}] Device Handle: {:#x?}\n", function!(), device_handle.unwrap_or(ptr::null_mut()));
match allocate_type {
AllocateType::BottomUp(max_address) => gcd.allocate_bottom_up(
memory_type,
alignment,
len,
image_handle,
device_handle,
max_address.unwrap_or(usize::MAX),
),
AllocateType::TopDown(max_address) => gcd.allocate_top_down(
memory_type,
alignment,
len,
image_handle,
device_handle,
max_address.unwrap_or(usize::MAX),
),
AllocateType::Address(address) => {
ensure!(address + len <= gcd.maximum_address, EfiError::NotFound);
gcd.allocate_address(memory_type, alignment, len, image_handle, device_handle, address)
}
}
}
fn allocate_memory_space_null(
_gcd: &mut GCD,
_allocate_type: AllocateType,
_memory_type: dxe_services::GcdMemoryType,
_alignment: usize,
_len: usize,
_image_handle: efi::Handle,
_device_handle: Option<efi::Handle>,
) -> Result<usize, EfiError> {
log::error!("GCD not allowed to allocate after EBS has started!");
debug_assert!(false);
Err(EfiError::AccessDenied)
}
fn free_memory_space_worker(
&mut self,
base_address: usize,
len: usize,
transition: MemoryStateTransition,
) -> Result<(), EfiError> {
ensure!(self.maximum_address != 0, EfiError::NotReady);
ensure!(len > 0, EfiError::InvalidParameter);
ensure!(base_address + len <= self.maximum_address, EfiError::Unsupported);
ensure!((base_address & UEFI_PAGE_MASK) == 0 && (len & UEFI_PAGE_MASK) == 0, EfiError::InvalidParameter);
log::trace!(target: "allocations", "[{}] Freeing memory space at {:#x}", function!(), base_address);
log::trace!(target: "allocations", "[{}] Length: {:#x}", function!(), len);
log::trace!(target: "allocations", "[{}] Memory State Transition: {:?}\n", function!(), transition);
let memory_blocks = &mut self.memory_blocks;
log::trace!(target: "gcd_measure", "search");
let idx = memory_blocks.get_closest_idx(&(base_address as u64)).ok_or(EfiError::NotFound)?;
match Self::split_state_transition_at_idx(memory_blocks, idx, base_address, len, transition) {
Ok(_) => {}
Err(InternalError::MemoryBlock(_)) => error!(EfiError::NotFound),
Err(InternalError::Slice(SliceError::OutOfSpace)) => error!(EfiError::OutOfResources),
Err(e) => panic!("{e:?}"),
}
let desc = self.get_memory_descriptor_for_address(base_address as efi::PhysicalAddress)?;
match self.set_gcd_memory_attributes(
base_address,
len,
efi::MEMORY_RP | (desc.attributes & efi::CACHE_ATTRIBUTE_MASK),
) {
Ok(_) => Ok(()),
Err(e) => {
log::error!(
"Failed to set memory attributes for {:#x?} of length {:#x?} with attributes {:#x?}. Status: {:#x?}",
base_address,
len,
efi::MEMORY_RP,
e
);
debug_assert!(false);
Err(e)
}
}
}
fn free_memory_space_worker_null(
_gcd: &mut GCD,
_base_address: usize,
_len: usize,
_transition: MemoryStateTransition,
) -> Result<(), EfiError> {
log::error!("GCD not allowed to free after EBS has started! Silently failing, returning success");
Ok(())
}
fn allocate_bottom_up(
&mut self,
memory_type: dxe_services::GcdMemoryType,
align_shift: usize,
len: usize,
image_handle: efi::Handle,
device_handle: Option<efi::Handle>,
max_address: usize,
) -> Result<usize, EfiError> {
ensure!(len > 0, EfiError::InvalidParameter);
log::trace!(target: "allocations", "[{}] Bottom up GCD allocation: {:#?}", function!(), memory_type);
log::trace!(target: "allocations", "[{}] Max Address: {:#x}", function!(), max_address);
log::trace!(target: "allocations", "[{}] Length: {:#x}", function!(), len);
log::trace!(target: "allocations", "[{}] Align Shift: {:#x}", function!(), align_shift);
log::trace!(target: "allocations", "[{}] Image Handle: {:#x?}", function!(), image_handle);
log::trace!(target: "allocations", "[{}] Device Handle: {:#x?}\n", function!(), device_handle.unwrap_or(ptr::null_mut()));
let memory_blocks = &mut self.memory_blocks;
let alignment = 1 << align_shift;
log::trace!(target: "gcd_measure", "search");
let mut current = memory_blocks.first_idx();
while let Some(idx) = current {
let mb = memory_blocks.get_with_idx(idx).expect("idx is valid from next_idx");
if mb.len() < len {
current = memory_blocks.next_idx(idx);
continue;
}
let address = mb.start();
let mut addr = address & (usize::MAX << align_shift);
if addr < address {
addr += alignment;
}
ensure!(addr + len <= max_address, EfiError::NotFound);
if mb.as_ref().memory_type != memory_type {
current = memory_blocks.next_idx(idx);
continue;
}
if addr == 0 {
addr = align_up(UEFI_PAGE_SIZE, alignment)?;
if addr + len >= max_address || mb.len() - addr < len {
current = memory_blocks.next_idx(idx);
continue;
}
}
match Self::split_state_transition_at_idx(
memory_blocks,
idx,
addr,
len,
MemoryStateTransition::AllocateRespectingOwnership(image_handle, device_handle),
) {
Ok(_) => return Ok(addr),
Err(InternalError::MemoryBlock(_)) => {
current = memory_blocks.next_idx(idx);
continue;
}
Err(InternalError::Slice(SliceError::OutOfSpace)) => error!(EfiError::OutOfResources),
Err(e) => panic!("{e:?}"),
}
}
if max_address == usize::MAX { Err(EfiError::OutOfResources) } else { Err(EfiError::NotFound) }
}
fn allocate_top_down(
&mut self,
memory_type: dxe_services::GcdMemoryType,
align_shift: usize,
len: usize,
image_handle: efi::Handle,
device_handle: Option<efi::Handle>,
max_address: usize,
) -> Result<usize, EfiError> {
ensure!(len > 0, EfiError::InvalidParameter);
if self.prioritize_32_bit_memory && max_address > u32::MAX as usize {
match self.allocate_top_down(memory_type, align_shift, len, image_handle, device_handle, u32::MAX as usize)
{
Ok(addr) => return Ok(addr),
Err(error) => {
log::trace!(target: "allocations", "[{}] Top down GCD low memory attempt failed: {:?}", function!(), error);
}
}
}
log::trace!(target: "allocations", "[{}] Top down GCD allocation: {:#?}", function!(), memory_type);
log::trace!(target: "allocations", "[{}] Max Address: {:#x}", function!(), max_address);
log::trace!(target: "allocations", "[{}] Length: {:#x}", function!(), len);
log::trace!(target: "allocations", "[{}] Align Shift: {:#x}", function!(), align_shift);
log::trace!(target: "allocations", "[{}] Image Handle: {:#x?}", function!(), image_handle);
log::trace!(target: "allocations", "[{}] Device Handle: {:#x?}\n", function!(), device_handle.unwrap_or(ptr::null_mut()));
let memory_blocks = &mut self.memory_blocks;
log::trace!(target: "gcd_measure", "search");
let mut current = memory_blocks.get_closest_idx(&(max_address as u64));
while let Some(idx) = current {
let mb = memory_blocks.get_with_idx(idx).expect("idx is valid from prev_idx");
let usable_len =
if mb.end() - 1 > max_address { max_address.checked_sub(mb.start()).unwrap() + 1 } else { mb.len() };
if usable_len < len {
current = memory_blocks.prev_idx(idx);
continue;
}
let addr = (mb.start() + usable_len - len) & (usize::MAX << align_shift);
if addr < mb.start() {
current = memory_blocks.prev_idx(idx);
continue;
}
if mb.as_ref().memory_type != memory_type {
current = memory_blocks.prev_idx(idx);
continue;
}
if addr == 0 {
break;
}
match Self::split_state_transition_at_idx(
memory_blocks,
idx,
addr,
len,
MemoryStateTransition::AllocateRespectingOwnership(image_handle, device_handle),
) {
Ok(_) => return Ok(addr),
Err(InternalError::MemoryBlock(_)) => {
current = memory_blocks.prev_idx(idx);
continue;
}
Err(InternalError::Slice(SliceError::OutOfSpace)) => error!(EfiError::OutOfResources),
Err(e) => panic!("{e:?}"),
}
}
if max_address == usize::MAX { Err(EfiError::OutOfResources) } else { Err(EfiError::NotFound) }
}
fn allocate_address(
&mut self,
memory_type: dxe_services::GcdMemoryType,
align_shift: usize,
len: usize,
image_handle: efi::Handle,
device_handle: Option<efi::Handle>,
address: usize,
) -> Result<usize, EfiError> {
ensure!(len > 0, EfiError::InvalidParameter);
log::trace!(target: "allocations", "[{}] Exact address GCD allocation: {:#?}", function!(), memory_type);
log::trace!(target: "allocations", "[{}] Address: {:#x}", function!(), address);
log::trace!(target: "allocations", "[{}] Length: {:#x}", function!(), len);
log::trace!(target: "allocations", "[{}] Memory Type: {:?}", function!(), memory_type);
log::trace!(target: "allocations", "[{}] Align Shift: {:#x}", function!(), align_shift);
log::trace!(target: "allocations", "[{}] Image Handle: {:#x?}", function!(), image_handle);
log::trace!(target: "allocations", "[{}] Device Handle: {:#x?}\n", function!(), device_handle.unwrap_or(ptr::null_mut()));
let memory_blocks = &mut self.memory_blocks;
log::trace!(target: "gcd_measure", "search");
let idx = memory_blocks.get_closest_idx(&(address as u64)).ok_or(EfiError::NotFound)?;
let block = memory_blocks.get_with_idx(idx).ok_or(EfiError::NotFound)?;
ensure!(
block.as_ref().memory_type == memory_type && address == address & (usize::MAX << align_shift),
EfiError::NotFound
);
match Self::split_state_transition_at_idx(
memory_blocks,
idx,
address,
len,
MemoryStateTransition::Allocate(image_handle, device_handle),
) {
Ok(_) => Ok(address),
Err(InternalError::MemoryBlock(_)) => error!(EfiError::NotFound),
Err(InternalError::Slice(SliceError::OutOfSpace)) => error!(EfiError::OutOfResources),
Err(e) => panic!("{e:?}"),
}
}
pub fn free_memory_space(&mut self, base_address: usize, len: usize) -> Result<(), EfiError> {
(self.free_memory_space_fn)(self, base_address, len, MemoryStateTransition::Free)
}
pub fn free_memory_space_preserving_ownership(&mut self, base_address: usize, len: usize) -> Result<(), EfiError> {
(self.free_memory_space_fn)(self, base_address, len, MemoryStateTransition::FreePreservingOwnership)
}
pub fn set_memory_space_attributes(
&mut self,
base_address: usize,
len: usize,
attributes: u64,
) -> Result<(), EfiError> {
ensure!(self.maximum_address != 0, EfiError::NotReady);
ensure!(len > 0, EfiError::InvalidParameter);
ensure!(base_address + len <= self.maximum_address, EfiError::Unsupported);
ensure!((base_address & UEFI_PAGE_MASK) == 0 && (len & UEFI_PAGE_MASK) == 0, EfiError::InvalidParameter);
self.set_gcd_memory_attributes(base_address, len, attributes)
}
fn set_gcd_memory_attributes(&mut self, base_address: usize, len: usize, attributes: u64) -> Result<(), EfiError> {
log::trace!(target: "allocations", "[{}] Setting memory space attributes for {:#x}", function!(), base_address);
log::trace!(target: "allocations", "[{}] Length: {:#x}", function!(), len);
log::trace!(target: "allocations", "[{}] Attributes: {:#x}\n", function!(), attributes);
let memory_blocks = &mut self.memory_blocks;
log::trace!(target: "gcd_measure", "search");
let idx = memory_blocks.get_closest_idx(&(base_address as u64)).ok_or(EfiError::NotFound)?;
match Self::split_state_transition_at_idx(
memory_blocks,
idx,
base_address,
len,
MemoryStateTransition::SetAttributes(attributes),
) {
Ok(_) => Ok(()),
Err(InternalError::MemoryBlock(e)) => {
log::error!(
"GCD failed to set attributes on range {base_address:#x?} of length {len:#x?} with attributes {attributes:#x?}. error {e:?}",
);
debug_assert!(false);
error!(EfiError::Unsupported)
}
Err(InternalError::Slice(SliceError::OutOfSpace)) => {
log::error!(
"GCD failed to set attributes on range {base_address:#x?} of length {len:#x?} with attributes {attributes:#x?} due to space",
);
debug_assert!(false);
error!(EfiError::OutOfResources)
}
Err(e) => panic!("{e:?}"),
}
}
pub fn set_memory_space_capabilities(
&mut self,
base_address: usize,
len: usize,
capabilities: u64,
) -> Result<(), EfiError> {
ensure!(self.maximum_address != 0, EfiError::NotReady);
ensure!(len > 0, EfiError::InvalidParameter);
ensure!(base_address + len <= self.maximum_address, EfiError::Unsupported);
ensure!((base_address & UEFI_PAGE_MASK) == 0 && (len & UEFI_PAGE_MASK) == 0, EfiError::InvalidParameter);
log::trace!(target: "allocations", "[{}] Setting memory space capabilities for {:#x}", function!(), base_address);
log::trace!(target: "allocations", "[{}] Length: {:#x}", function!(), len);
log::trace!(target: "allocations", "[{}] Capabilities: {:#x}\n", function!(), capabilities);
let memory_blocks = &mut self.memory_blocks;
log::trace!(target: "gcd_measure", "search");
let idx = memory_blocks.get_closest_idx(&(base_address as u64)).ok_or(EfiError::NotFound)?;
match Self::split_state_transition_at_idx(
memory_blocks,
idx,
base_address,
len,
MemoryStateTransition::SetCapabilities(capabilities),
) {
Ok(_) => Ok(()),
Err(InternalError::MemoryBlock(_)) => error!(EfiError::Unsupported),
Err(InternalError::Slice(SliceError::OutOfSpace)) => error!(EfiError::OutOfResources),
Err(e) => panic!("{e:?}"),
}
}
pub fn get_memory_descriptors(
&mut self,
buffer: &mut Vec<dxe_services::MemorySpaceDescriptor>,
) -> Result<(), EfiError> {
ensure!(self.maximum_address != 0, EfiError::NotReady);
ensure!(buffer.capacity() >= self.memory_descriptor_count(), EfiError::InvalidParameter);
ensure!(buffer.is_empty(), EfiError::InvalidParameter);
log::trace!(target: "allocations", "[{}] Enter\n", function!(), );
let blocks = &self.memory_blocks;
let mut current = blocks.first_idx();
while let Some(idx) = current {
let mb = blocks.get_with_idx(idx).expect("idx is valid from next_idx");
match mb {
MemoryBlock::Allocated(descriptor) | MemoryBlock::Unallocated(descriptor) => buffer.push(*descriptor),
}
current = blocks.next_idx(idx);
}
Ok(())
}
fn get_allocated_memory_descriptors(
&self,
buffer: &mut Vec<dxe_services::MemorySpaceDescriptor>,
) -> Result<(), EfiError> {
ensure!(self.maximum_address != 0, EfiError::NotReady);
ensure!(buffer.capacity() >= self.memory_descriptor_count(), EfiError::InvalidParameter);
ensure!(buffer.is_empty(), EfiError::InvalidParameter);
let blocks = &self.memory_blocks;
let mut current = blocks.first_idx();
while let Some(idx) = current {
let mb = blocks.get_with_idx(idx).expect("idx is valid from next_idx");
if let MemoryBlock::Allocated(descriptor) = mb {
buffer.push(*descriptor);
}
current = blocks.next_idx(idx);
}
Ok(())
}
fn get_mmio_and_reserved_descriptors(
&self,
buffer: &mut Vec<dxe_services::MemorySpaceDescriptor>,
) -> Result<(), EfiError> {
ensure!(self.maximum_address != 0, EfiError::NotReady);
ensure!(buffer.is_empty(), EfiError::InvalidParameter);
let blocks = &self.memory_blocks;
let mut current = blocks.first_idx();
while let Some(idx) = current {
let mb = blocks.get_with_idx(idx).expect("idx is valid from next_idx");
if let MemoryBlock::Unallocated(descriptor) = mb
&& (descriptor.memory_type == dxe_services::GcdMemoryType::MemoryMappedIo
|| descriptor.memory_type == dxe_services::GcdMemoryType::Reserved)
{
buffer.push(*descriptor);
}
current = blocks.next_idx(idx);
}
Ok(())
}
pub fn get_memory_descriptor_for_address(
&mut self,
address: efi::PhysicalAddress,
) -> Result<dxe_services::MemorySpaceDescriptor, EfiError> {
ensure!(self.maximum_address != 0, EfiError::NotReady);
let memory_blocks = &self.memory_blocks;
log::trace!(target: "gcd_measure", "search");
let idx = memory_blocks.get_closest_idx(&(address)).ok_or(EfiError::NotFound)?;
let mb = memory_blocks.get_with_idx(idx).expect("idx is valid from get_closest_idx");
match mb {
MemoryBlock::Allocated(descriptor) | MemoryBlock::Unallocated(descriptor) => Ok(*descriptor),
}
}
fn split_state_transition_at_idx(
memory_blocks: &mut Rbt<MemoryBlock>,
idx: usize,
base_address: usize,
len: usize,
transition: MemoryStateTransition,
) -> Result<usize, InternalError> {
let mb_before_split = *memory_blocks.get_with_idx(idx).expect("Caller should ensure idx is valid.");
log::trace!(target: "allocations", "[{}] Splitting memory block at {:#x}", function!(), base_address);
log::trace!(target: "allocations", "[{}] Total Memory Blocks Right Now: {:#}", function!(), memory_blocks.len());
log::trace!(target: "allocations", "[{}] Length: {:#x}", function!(), len);
log::trace!(target: "allocations", "[{}] Block Index: {:#x}", function!(), idx);
log::trace!(target: "allocations", "[{}] Transition:\n {:#?}", function!(), transition);
let new_idx = unsafe {
match memory_blocks.get_with_idx_mut(idx).expect("idx valid above").split_state_transition(
base_address,
len,
transition,
)? {
MemoryBlockSplit::Same(_) => Ok(idx),
MemoryBlockSplit::After(_, next) => {
log::trace!(target: "gcd_measure", "add");
log::trace!(target: "allocations", "[{}] MemoryBlockSplit (After) -> Next: {:#x?}\n", function!(), next);
memory_blocks.add(next)
}
MemoryBlockSplit::Before(_, next) => {
log::trace!(target: "gcd_measure", "add");
log::trace!(target: "allocations", "[{}] MemoryBlockSplit (Before) -> Next: {:#x?}\n", function!(), next);
memory_blocks.add(next).map(|_| idx)
}
MemoryBlockSplit::Middle(_, next, next2) => {
log::trace!(target: "gcd_measure", "add");
log::trace!(target: "gcd_measure", "add");
log::trace!(target: "allocations", "[{}] MemoryBlockSplit (Middle) -> Next: {:#x?}. Next2: {:#x?}\n", function!(), next, next2);
memory_blocks.add_many([next2, next])
}
}
};
log::trace!(target: "allocations", "[{}] Next Index is {:x?}\n", function!(), new_idx);
let idx = match new_idx {
Ok(idx) => idx,
Err(e) => {
log::error!("[{}] Memory block split failed! -> Error: {:#?}", function!(), e);
unsafe {
*memory_blocks.get_with_idx_mut(idx).expect("idx valid above") = mb_before_split;
}
error!(e);
}
};
if let Some(next_idx) = memory_blocks.next_idx(idx) {
let mut next = *memory_blocks.get_with_idx(next_idx).expect("idx valid from insert");
unsafe {
if memory_blocks.get_with_idx_mut(idx).expect("idx valid from insert").merge(&mut next) {
memory_blocks.delete_with_idx(next_idx).expect("Index already verified.");
}
}
}
if let Some(prev_idx) = memory_blocks.prev_idx(idx) {
let mut block = *memory_blocks.get_with_idx(idx).expect("idx valid from insert");
unsafe {
if memory_blocks.get_with_idx_mut(prev_idx).expect("idx valid from insert").merge(&mut block) {
memory_blocks.delete_with_idx(idx).expect("Index already verified.");
return Ok(prev_idx);
}
}
}
Ok(idx)
}
pub fn memory_descriptor_count(&self) -> usize {
self.memory_blocks.len()
}
#[cfg(feature = "compatibility_mode_allowed")]
fn activate_compatibility_mode(&mut self) {
self.default_attributes = 0;
}
const GCD_MEMORY_TYPE_NAMES: [&'static str; 8] = [
"NonExist ", "Reserved ", "SystemMem", "MMIO ", "PersisMem", "MoreRelia", "Unaccepte", "Unknown ", ];
}
impl Display for GCD {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
writeln!(
f,
"GCDMemType Range Capabilities Attributes ImageHandle DeviceHandle"
)?;
writeln!(
f,
"========== ================================= ================ ================ ================ ================"
)?;
let blocks = &self.memory_blocks;
let mut current = blocks.first_idx();
while let Some(idx) = current {
let mb = blocks.get_with_idx(idx).expect("idx is valid from next_idx");
match mb {
MemoryBlock::Allocated(descriptor) | MemoryBlock::Unallocated(descriptor) => {
let mem_type_str_idx =
usize::min(descriptor.memory_type as usize, Self::GCD_MEMORY_TYPE_NAMES.len() - 1);
writeln!(
f,
"{} {:016x?}-{:016x?} {:016x?} {:016x?} {:016x?} {:016x?}",
GCD::GCD_MEMORY_TYPE_NAMES[mem_type_str_idx],
descriptor.base_address,
descriptor.base_address + descriptor.length - 1,
descriptor.capabilities,
descriptor.attributes,
descriptor.image_handle,
descriptor.device_handle
)?;
}
}
current = blocks.next_idx(idx);
}
Ok(())
}
}
impl SliceKey for MemoryBlock {
type Key = u64;
fn key(&self) -> &Self::Key {
&self.as_ref().base_address
}
}
impl From<SliceError> for InternalError {
fn from(value: SliceError) -> Self {
InternalError::Slice(value)
}
}
impl From<memory_block::Error> for InternalError {
fn from(value: memory_block::Error) -> Self {
InternalError::MemoryBlock(value)
}
}
#[derive(Debug)]
pub struct IoGCD {
maximum_address: usize,
io_blocks: Rbt<'static, IoBlock>,
}
impl IoGCD {
#[cfg(test)]
pub(crate) const fn _new(io_address_bits: u32) -> Self {
assert!(io_address_bits > 0);
Self { io_blocks: Rbt::new(), maximum_address: 1 << io_address_bits }
}
pub fn init(&mut self, io_address_bits: u32) {
self.maximum_address = 1 << io_address_bits;
}
fn init_io_blocks(&mut self) -> Result<(), EfiError> {
ensure!(self.maximum_address != 0, EfiError::NotReady);
self.io_blocks.resize(unsafe {
Box::into_raw(vec![0_u8; IO_BLOCK_SLICE_SIZE].into_boxed_slice())
.as_mut()
.expect("RBT given null pointer in initialization.")
});
self.io_blocks
.add(IoBlock::Unallocated(dxe_services::IoSpaceDescriptor {
io_type: dxe_services::GcdIoType::NonExistent,
base_address: 0,
length: self.maximum_address as u64,
..Default::default()
}))
.map_err(|_| EfiError::OutOfResources)?;
Ok(())
}
pub fn add_io_space(
&mut self,
io_type: dxe_services::GcdIoType,
base_address: usize,
len: usize,
) -> Result<usize, EfiError> {
ensure!(self.maximum_address != 0, EfiError::NotReady);
ensure!(len > 0, EfiError::InvalidParameter);
ensure!(base_address + len <= self.maximum_address, EfiError::Unsupported);
log::trace!(target: "allocations", "[{}] Adding IO space at {:#x}", function!(), base_address);
log::trace!(target: "allocations", "[{}] Length: {:#x}", function!(), len);
log::trace!(target: "allocations", "[{}] IO Type: {:?}\n", function!(), io_type);
if self.io_blocks.capacity() == 0 {
self.init_io_blocks()?;
}
let io_blocks = &mut self.io_blocks;
log::trace!(target: "gcd_measure", "search");
let idx = io_blocks.get_closest_idx(&(base_address as u64)).ok_or(EfiError::NotFound)?;
let block = io_blocks.get_with_idx(idx).ok_or(EfiError::NotFound)?;
ensure!(block.as_ref().io_type == dxe_services::GcdIoType::NonExistent, EfiError::AccessDenied);
match Self::split_state_transition_at_idx(io_blocks, idx, base_address, len, IoStateTransition::Add(io_type)) {
Ok(idx) => Ok(idx),
Err(InternalError::IoBlock(IoBlockError::BlockOutsideRange)) => error!(EfiError::AccessDenied),
Err(InternalError::IoBlock(IoBlockError::InvalidStateTransition)) => error!(EfiError::InvalidParameter),
Err(InternalError::Slice(SliceError::OutOfSpace)) => error!(EfiError::OutOfResources),
Err(e) => panic!("{e:?}"),
}
}
pub fn remove_io_space(&mut self, base_address: usize, len: usize) -> Result<(), EfiError> {
ensure!(self.maximum_address != 0, EfiError::NotReady);
ensure!(len > 0, EfiError::InvalidParameter);
ensure!(base_address + len <= self.maximum_address, EfiError::Unsupported);
log::trace!(target: "allocations", "[{}] Removing IO space at {:#x}", function!(), base_address);
log::trace!(target: "allocations", "[{}] Length: {:#x}\n", function!(), len);
if self.io_blocks.capacity() == 0 {
self.init_io_blocks()?;
}
let io_blocks = &mut self.io_blocks;
log::trace!(target: "gcd_measure", "search");
let idx = io_blocks.get_closest_idx(&(base_address as u64)).ok_or(EfiError::NotFound)?;
let block = *io_blocks.get_with_idx(idx).expect("Idx valid from get_closest_idx");
match Self::split_state_transition_at_idx(io_blocks, idx, base_address, len, IoStateTransition::Remove) {
Ok(_) => Ok(()),
Err(InternalError::IoBlock(IoBlockError::BlockOutsideRange)) => error!(EfiError::NotFound),
Err(InternalError::IoBlock(IoBlockError::InvalidStateTransition)) => match block {
IoBlock::Unallocated(_) => error!(EfiError::NotFound),
IoBlock::Allocated(_) => error!(EfiError::AccessDenied),
},
Err(InternalError::Slice(SliceError::OutOfSpace)) => error!(EfiError::OutOfResources),
Err(e) => panic!("{e:?}"),
}
}
pub fn allocate_io_space(
&mut self,
allocate_type: AllocateType,
io_type: dxe_services::GcdIoType,
alignment: usize,
len: usize,
image_handle: efi::Handle,
device_handle: Option<efi::Handle>,
) -> Result<usize, EfiError> {
ensure!(self.maximum_address != 0, EfiError::NotReady);
ensure!(len > 0 && image_handle > ptr::null_mut(), EfiError::InvalidParameter);
log::trace!(target: "allocations", "[{}] Allocating IO space: {:x?}", function!(), allocate_type);
log::trace!(target: "allocations", "[{}] Length: {:#x}", function!(), len);
log::trace!(target: "allocations", "[{}] IO Type: {:?}", function!(), io_type);
log::trace!(target: "allocations", "[{}] Alignment: {:#x}", function!(), alignment);
log::trace!(target: "allocations", "[{}] Image Handle: {:#x?}", function!(), image_handle);
log::trace!(target: "allocations", "[{}] Device Handle: {:#x?}\n", function!(), device_handle.unwrap_or(ptr::null_mut()));
match allocate_type {
AllocateType::BottomUp(max_address) => self.allocate_bottom_up(
io_type,
alignment,
len,
image_handle,
device_handle,
max_address.unwrap_or(usize::MAX),
),
AllocateType::TopDown(max_address) => self.allocate_top_down(
io_type,
alignment,
len,
image_handle,
device_handle,
max_address.unwrap_or(usize::MAX),
),
AllocateType::Address(address) => {
ensure!(address + len <= self.maximum_address, EfiError::Unsupported);
self.allocate_address(io_type, alignment, len, image_handle, device_handle, address)
}
}
}
fn allocate_bottom_up(
&mut self,
io_type: dxe_services::GcdIoType,
alignment: usize,
len: usize,
image_handle: efi::Handle,
device_handle: Option<efi::Handle>,
max_address: usize,
) -> Result<usize, EfiError> {
ensure!(len > 0, EfiError::InvalidParameter);
log::trace!(target: "allocations", "[{}] Bottom up IO allocation: {:#?}", function!(), io_type);
log::trace!(target: "allocations", "[{}] Max Address: {:#x}", function!(), max_address);
log::trace!(target: "allocations", "[{}] Length: {:#x}", function!(), len);
log::trace!(target: "allocations", "[{}] Alignment: {:#x}", function!(), alignment);
log::trace!(target: "allocations", "[{}] Image Handle: {:#x?}", function!(), image_handle);
log::trace!(target: "allocations", "[{}] Device Handle: {:#x?}\n", function!(), device_handle.unwrap_or(ptr::null_mut()));
if self.io_blocks.capacity() == 0 {
self.init_io_blocks()?;
}
let io_blocks = &mut self.io_blocks;
log::trace!(target: "gcd_measure", "search");
let mut current = io_blocks.first_idx();
while let Some(idx) = current {
let ib = io_blocks.get_with_idx(idx).expect("idx is valid from next_idx");
if ib.len() < len {
current = io_blocks.next_idx(idx);
continue;
}
let address = ib.start();
let mut addr = address & (usize::MAX << alignment);
if addr < address {
addr += 1 << alignment;
}
ensure!(addr + len <= max_address, EfiError::NotFound);
if ib.as_ref().io_type != io_type {
current = io_blocks.next_idx(idx);
continue;
}
match Self::split_state_transition_at_idx(
io_blocks,
idx,
addr,
len,
IoStateTransition::Allocate(image_handle, device_handle),
) {
Ok(_) => return Ok(addr),
Err(InternalError::IoBlock(_)) => {
current = io_blocks.next_idx(idx);
continue;
}
Err(InternalError::Slice(SliceError::OutOfSpace)) => error!(EfiError::OutOfResources),
Err(e) => panic!("{e:?}"),
}
}
Err(EfiError::NotFound)
}
fn allocate_top_down(
&mut self,
io_type: dxe_services::GcdIoType,
align_shift: usize,
len: usize,
image_handle: efi::Handle,
device_handle: Option<efi::Handle>,
max_address: usize,
) -> Result<usize, EfiError> {
ensure!(len > 0, EfiError::InvalidParameter);
log::trace!(target: "allocations", "[{}] Top down IO allocation: {:#?}", function!(), io_type);
log::trace!(target: "allocations", "[{}] Max Address: {:#x}", function!(), max_address);
log::trace!(target: "allocations", "[{}] Length: {:#x}", function!(), len);
log::trace!(target: "allocations", "[{}] Align Shift: {:#x}", function!(), align_shift);
log::trace!(target: "allocations", "[{}] Image Handle: {:#x?}", function!(), image_handle);
log::trace!(target: "allocations", "[{}] Device Handle: {:#x?}\n", function!(), device_handle.unwrap_or(ptr::null_mut()));
if self.io_blocks.capacity() == 0 {
self.init_io_blocks()?;
}
let io_blocks = &mut self.io_blocks;
log::trace!(target: "gcd_measure", "search");
let mut current = io_blocks.get_closest_idx(&(max_address as u64));
while let Some(idx) = current {
let ib = io_blocks.get_with_idx(idx).expect("idx is valid from prev_idx");
let usable_len = if ib.end() - 1 > max_address { max_address - ib.start() + 1 } else { ib.len() };
if usable_len < len {
current = io_blocks.prev_idx(idx);
continue;
}
let addr = (ib.start() + usable_len - len) & (usize::MAX << align_shift);
if addr < ib.start() {
current = io_blocks.prev_idx(idx);
continue;
}
if ib.as_ref().io_type != io_type {
current = io_blocks.prev_idx(idx);
continue;
}
match Self::split_state_transition_at_idx(
io_blocks,
idx,
addr,
len,
IoStateTransition::Allocate(image_handle, device_handle),
) {
Ok(_) => return Ok(addr),
Err(InternalError::IoBlock(_)) => {
current = io_blocks.prev_idx(idx);
continue;
}
Err(InternalError::Slice(SliceError::OutOfSpace)) => error!(EfiError::OutOfResources),
Err(e) => panic!("{e:?}"),
}
}
Err(EfiError::NotFound)
}
fn allocate_address(
&mut self,
io_type: dxe_services::GcdIoType,
alignment: usize,
len: usize,
image_handle: efi::Handle,
device_handle: Option<efi::Handle>,
address: usize,
) -> Result<usize, EfiError> {
ensure!(len > 0, EfiError::InvalidParameter);
log::trace!(target: "allocations", "[{}] Exact address IO allocation: {:#?}", function!(), io_type);
log::trace!(target: "allocations", "[{}] Address: {:#x}", function!(), address);
log::trace!(target: "allocations", "[{}] Length: {:#x}", function!(), len);
log::trace!(target: "allocations", "[{}] IO Type: {:?}", function!(), io_type);
log::trace!(target: "allocations", "[{}] Alignment: {:#x}", function!(), alignment);
log::trace!(target: "allocations", "[{}] Image Handle: {:#x?}", function!(), image_handle);
log::trace!(target: "allocations", "[{}] Device Handle: {:#x?}\n", function!(), device_handle.unwrap_or(ptr::null_mut()));
if self.io_blocks.capacity() == 0 {
self.init_io_blocks()?;
}
let io_blocks = &mut self.io_blocks;
log::trace!(target: "gcd_measure", "search");
let idx = io_blocks.get_closest_idx(&(address as u64)).ok_or(EfiError::NotFound)?;
let block = io_blocks.get_with_idx(idx).ok_or(EfiError::NotFound)?;
ensure!(
block.as_ref().io_type == io_type && address == address & (usize::MAX << alignment),
EfiError::NotFound
);
match Self::split_state_transition_at_idx(
io_blocks,
idx,
address,
len,
IoStateTransition::Allocate(image_handle, device_handle),
) {
Ok(_) => Ok(address),
Err(InternalError::IoBlock(_)) => error!(EfiError::NotFound),
Err(InternalError::Slice(SliceError::OutOfSpace)) => error!(EfiError::OutOfResources),
Err(e) => panic!("{e:?}"),
}
}
pub fn free_io_space(&mut self, base_address: usize, len: usize) -> Result<(), EfiError> {
ensure!(self.maximum_address != 0, EfiError::NotReady);
ensure!(len > 0, EfiError::InvalidParameter);
ensure!(base_address + len <= self.maximum_address, EfiError::Unsupported);
log::trace!(target: "allocations", "[{}] Free IO space at {:#?}", function!(), base_address);
log::trace!(target: "allocations", "[{}] Length: {:#x}\n", function!(), len);
if self.io_blocks.capacity() == 0 {
self.init_io_blocks()?;
}
let io_blocks = &mut self.io_blocks;
log::trace!(target: "gcd_measure", "search");
let idx = io_blocks.get_closest_idx(&(base_address as u64)).ok_or(EfiError::NotFound)?;
match Self::split_state_transition_at_idx(io_blocks, idx, base_address, len, IoStateTransition::Free) {
Ok(_) => Ok(()),
Err(InternalError::IoBlock(_)) => error!(EfiError::NotFound),
Err(InternalError::Slice(SliceError::OutOfSpace)) => error!(EfiError::OutOfResources),
Err(e) => panic!("{e:?}"),
}
}
pub fn get_io_descriptors(&mut self, buffer: &mut Vec<dxe_services::IoSpaceDescriptor>) -> Result<(), EfiError> {
ensure!(self.maximum_address != 0, EfiError::NotReady);
ensure!(buffer.capacity() >= self.io_descriptor_count(), EfiError::InvalidParameter);
ensure!(buffer.is_empty(), EfiError::InvalidParameter);
log::trace!(target: "allocations", "[{}] Enter\n", function!(), );
if self.io_blocks.capacity() == 0 {
self.init_io_blocks()?;
}
let blocks = &self.io_blocks;
let mut current = blocks.first_idx();
while let Some(idx) = current {
let ib = blocks.get_with_idx(idx).expect("Index comes from dfs and should be valid");
match ib {
IoBlock::Allocated(descriptor) | IoBlock::Unallocated(descriptor) => buffer.push(*descriptor),
}
current = blocks.next_idx(idx);
}
Ok(())
}
fn split_state_transition_at_idx(
io_blocks: &mut Rbt<IoBlock>,
idx: usize,
base_address: usize,
len: usize,
transition: IoStateTransition,
) -> Result<usize, InternalError> {
let ib_before_split = *io_blocks.get_with_idx(idx).expect("Caller should ensure idx is valid.");
log::trace!(target: "allocations", "[{}] Splitting IO block at {:#x}", function!(), base_address);
log::trace!(target: "allocations", "[{}] Total IO Blocks Right Now: {:#}", function!(), io_blocks.len());
log::trace!(target: "allocations", "[{}] Length: {:#x}", function!(), len);
log::trace!(target: "allocations", "[{}] Block Index: {:#x}", function!(), idx);
log::trace!(target: "allocations", "[{}] Transition: {:?}\n", function!(), transition);
let new_idx = unsafe {
match io_blocks.get_with_idx_mut(idx).expect("idx valid above").split_state_transition(
base_address,
len,
transition,
)? {
IoBlockSplit::Same(_) => Ok(idx),
IoBlockSplit::After(_, next) => {
log::trace!(target: "gcd_measure", "add");
log::trace!(target: "allocations", "[{}] IoBlockSplit (After) -> Next: {:#x?}\n", function!(), next);
io_blocks.add(next)
}
IoBlockSplit::Before(_, next) => {
log::trace!(target: "gcd_measure", "add");
log::trace!(target: "allocations", "[{}] IoBlockSplit (Before) -> Next: {:#x?}\n", function!(), next);
io_blocks.add(next).map(|_| idx)
}
IoBlockSplit::Middle(_, next, next2) => {
log::trace!(target: "gcd_measure", "add");
log::trace!(target: "gcd_measure", "add");
log::trace!(target: "allocations", "[{}] IoBlockSplit (Middle) -> Next: {:#x?}. Next2: {:#x?}\n", function!(), next, next2);
io_blocks.add_many([next2, next])
}
}
};
let idx = match new_idx {
Ok(idx) => idx,
Err(e) => {
log::error!("[{}] IO block split failed! -> Error: {:#?}", function!(), e);
unsafe {
*io_blocks.get_with_idx_mut(idx).expect("idx valid above") = ib_before_split;
}
error!(e);
}
};
if let Some(next_idx) = io_blocks.next_idx(idx) {
let mut next = *io_blocks.get_with_idx(next_idx).expect("idx valid from insert");
unsafe {
if io_blocks.get_with_idx_mut(idx).expect("idx valid from insert").merge(&mut next) {
io_blocks.delete_with_idx(next_idx).expect("Index already verified.");
}
}
}
if let Some(prev_idx) = io_blocks.prev_idx(idx) {
let mut block = *io_blocks.get_with_idx(idx).expect("idx valid from insert");
unsafe {
if io_blocks.get_with_idx_mut(prev_idx).expect("idx valid from insert").merge(&mut block) {
io_blocks.delete_with_idx(idx).expect("Index already verified.");
return Ok(prev_idx);
}
}
}
Ok(idx)
}
pub fn io_descriptor_count(&self) -> usize {
self.io_blocks.len()
}
const GCD_IO_TYPE_NAMES: [&'static str; 4] = [
"NonExist", "Reserved", "I/O ", "Unknown ", ];
}
impl Display for IoGCD {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
writeln!(f, "GCDIoType Range ")?;
writeln!(f, "========== =================================")?;
let blocks = &self.io_blocks;
let mut current = blocks.first_idx();
while let Some(idx) = current {
let ib = blocks.get_with_idx(idx).expect("idx is valid from next_idx");
match ib {
IoBlock::Allocated(descriptor) | IoBlock::Unallocated(descriptor) => {
let io_type_str_idx = usize::min(descriptor.io_type as usize, Self::GCD_IO_TYPE_NAMES.len() - 1);
writeln!(
f,
"{} {:016x?}-{:016x?}{}",
IoGCD::GCD_IO_TYPE_NAMES[io_type_str_idx],
descriptor.base_address,
descriptor.base_address + descriptor.length - 1,
{ if descriptor.image_handle == INVALID_HANDLE { "" } else { "*" } }
)?;
}
}
current = blocks.next_idx(idx);
}
Ok(())
}
}
impl SliceKey for IoBlock {
type Key = u64;
fn key(&self) -> &Self::Key {
&self.as_ref().base_address
}
}
impl From<io_block::Error> for InternalError {
fn from(value: io_block::Error) -> Self {
InternalError::IoBlock(value)
}
}
#[derive(Debug, PartialEq, Eq)]
pub enum MapChangeType {
AddMemorySpace,
RemoveMemorySpace,
AllocateMemorySpace,
FreeMemorySpace,
SetMemoryAttributes,
SetMemoryCapabilities,
}
pub type MapChangeCallback = fn(MapChangeType);
pub struct SpinLockedGcd {
memory: tpl_lock::TplMutex<GCD>,
io: tpl_lock::TplMutex<IoGCD>,
memory_change_callback: Option<MapChangeCallback>,
memory_type_info_table: [EFiMemoryTypeInformation; 17],
page_table: tpl_lock::TplMutex<Option<Box<dyn PatinaPageTable>>>,
}
impl SpinLockedGcd {
pub fn is_ready(&self) -> bool {
self.memory.lock().is_ready()
}
pub const fn new(memory_change_callback: Option<MapChangeCallback>) -> Self {
Self {
memory: tpl_lock::TplMutex::new(
efi::TPL_HIGH_LEVEL,
GCD {
maximum_address: 0,
memory_blocks: Rbt::new(),
allocate_memory_space_fn: GCD::allocate_memory_space_internal,
free_memory_space_fn: GCD::free_memory_space_worker,
default_attributes: efi::MEMORY_XP,
prioritize_32_bit_memory: false,
},
"GcdMemLock",
),
io: tpl_lock::TplMutex::new(
efi::TPL_HIGH_LEVEL,
IoGCD { maximum_address: 0, io_blocks: Rbt::new() },
"GcdIoLock",
),
memory_change_callback,
memory_type_info_table: [
EFiMemoryTypeInformation { memory_type: efi::RESERVED_MEMORY_TYPE, number_of_pages: 0 },
EFiMemoryTypeInformation { memory_type: efi::LOADER_CODE, number_of_pages: 0 },
EFiMemoryTypeInformation { memory_type: efi::LOADER_DATA, number_of_pages: 0 },
EFiMemoryTypeInformation { memory_type: efi::BOOT_SERVICES_CODE, number_of_pages: 0 },
EFiMemoryTypeInformation { memory_type: efi::BOOT_SERVICES_DATA, number_of_pages: 0 },
EFiMemoryTypeInformation { memory_type: efi::RUNTIME_SERVICES_CODE, number_of_pages: 0 },
EFiMemoryTypeInformation { memory_type: efi::RUNTIME_SERVICES_DATA, number_of_pages: 0 },
EFiMemoryTypeInformation { memory_type: efi::CONVENTIONAL_MEMORY, number_of_pages: 0 },
EFiMemoryTypeInformation { memory_type: efi::UNUSABLE_MEMORY, number_of_pages: 0 },
EFiMemoryTypeInformation { memory_type: efi::ACPI_RECLAIM_MEMORY, number_of_pages: 0 },
EFiMemoryTypeInformation { memory_type: efi::ACPI_MEMORY_NVS, number_of_pages: 0 },
EFiMemoryTypeInformation { memory_type: efi::MEMORY_MAPPED_IO, number_of_pages: 0 },
EFiMemoryTypeInformation { memory_type: efi::MEMORY_MAPPED_IO_PORT_SPACE, number_of_pages: 0 },
EFiMemoryTypeInformation { memory_type: efi::PAL_CODE, number_of_pages: 0 },
EFiMemoryTypeInformation { memory_type: efi::PERSISTENT_MEMORY, number_of_pages: 0 },
EFiMemoryTypeInformation { memory_type: efi::UNACCEPTED_MEMORY_TYPE, number_of_pages: 0 },
EFiMemoryTypeInformation { memory_type: 16 , number_of_pages: 0 },
],
page_table: tpl_lock::TplMutex::new(efi::TPL_HIGH_LEVEL, None, "GcdPageTableLock"),
}
}
pub fn prioritize_32_bit_memory(&self, value: bool) {
self.memory.lock().prioritize_32_bit_memory = value;
}
pub const fn memory_type_info_table(&self) -> &[EFiMemoryTypeInformation; 17] {
&self.memory_type_info_table
}
pub const fn memory_type_info(&self, memory_type: u32) -> &EFiMemoryTypeInformation {
&self.memory_type_info_table[memory_type as usize]
}
fn set_paging_attributes(&self, base_address: usize, len: usize, attributes: u64) -> Result<(), EfiError> {
if let Some(page_table) = &mut *self.page_table.lock() {
let paging_attrs = MemoryAttributes::from_bits_truncate(attributes)
& (MemoryAttributes::AccessAttributesMask | MemoryAttributes::CacheAttributesMask);
let mut unmapped = false;
let mut update_cache_attributes = true;
let region_attributes = match page_table.query_memory_region(base_address as u64, len as u64) {
Ok(attrs) => Some(attrs),
Err((PtError::NoMapping, attrs)) => {
unmapped = true;
match attrs {
CacheAttributeValue::Valid(cache_attributes) => {
Some(cache_attributes)
}
CacheAttributeValue::Unmapped => {
None
}
CacheAttributeValue::NotSupported => {
update_cache_attributes = false;
None
}
}
}
Err(e) => {
log::error!(
"query memory region {base_address:#x?} of length {len:#x?} with attributes {attributes:#x?}. Status: {e:#x?}",
);
log::error!("GCD and page table are out of sync. This is a critical error.");
log::info!("GCD {GCD}");
debug_assert!(false);
return Err(EfiError::InvalidParameter);
}
};
if let Some(region_attrs) = region_attributes
&& (region_attrs & (MemoryAttributes::AccessAttributesMask | MemoryAttributes::CacheAttributesMask))
== paging_attrs
&& !unmapped
{
log::trace!(
target: "paging",
"Memory region {base_address:#x?} of length {len:#x?} with attributes {attributes:#x?}. No paging action taken: Region already mapped with these attributes.",
);
return Ok(());
}
if paging_attrs & MemoryAttributes::ReadProtect == MemoryAttributes::ReadProtect {
if unmapped {
log::error!(
"Memory region {base_address:#x?} of length {len:#x?} with attributes {attributes:#x?} already unmapped. GCD and page table are out of sync. This is a critical error.",
);
debug_assert!(false);
return Ok(());
}
match page_table.unmap_memory_region(base_address as u64, len as u64) {
Ok(_) => {
log::trace!(
target: "paging",
"Memory region {base_address:#x?} of length {len:#x?} unmapped",
);
return Ok(());
}
Err(e) => {
log::error!(
"Failed to unmap memory region {base_address:#x?} of length {len:#x?} with attributes {attributes:#x?}. Status: {e:#x?}",
);
debug_assert!(false);
return Err(EfiError::InvalidParameter);
}
}
}
match page_table.map_memory_region(base_address as u64, len as u64, paging_attrs) {
Ok(_) => {
let new_cache_attributes = paging_attrs & MemoryAttributes::CacheAttributesMask;
let old_cache_attributes =
region_attributes.map(|attrs| attrs & MemoryAttributes::CacheAttributesMask);
if new_cache_attributes != MemoryAttributes::empty() && update_cache_attributes {
if let Some(old_cache_attrs) = old_cache_attributes
&& old_cache_attrs != new_cache_attributes
{
log::trace!(
target: "paging",
"Cache attributes for memory region {base_address:#x?} of length {len:#x?} were updated to {new_cache_attributes:#x?} from {old_cache_attrs:#x?}, sending cache attributes changed event",
);
EVENT_DB.signal_group(CACHE_ATTRIBUTE_CHANGE_EVENT_GROUP);
} else if unmapped && old_cache_attributes.is_none() {
log::trace!(
target: "paging",
"Cache attributes for memory region {base_address:#x?} of length {len:#x?} were updated to {new_cache_attributes:#x?} from an unmapped state, sending cache attributes changed event",
);
EVENT_DB.signal_group(CACHE_ATTRIBUTE_CHANGE_EVENT_GROUP);
}
}
log::trace!(
target: "paging",
"Memory region {base_address:#x?} of length {len:#x?} mapped with attributes {paging_attrs:#x?}",
);
Ok(())
}
Err(e) => {
log::error!(
"Failed to map memory region {base_address:#x?} of length {len:#x?} with attributes {attributes:#x?}. Status: {e:#x?}",
);
debug_assert!(false);
match e {
PtError::OutOfResources => Err(EfiError::OutOfResources),
PtError::NoMapping => Err(EfiError::NotFound),
_ => Err(EfiError::InvalidParameter),
}
}
}
} else {
Err(EfiError::NotReady)
}
}
pub fn lock_memory_space(&self) {
self.memory.lock().lock_memory_space();
}
pub fn unlock_memory_space(&self) {
self.memory.lock().unlock_memory_space();
}
#[cfg(test)]
pub unsafe fn reset(&self) {
let (mut mem, mut io) = (self.memory.lock(), self.io.lock());
mem.maximum_address = 0;
mem.memory_blocks = Rbt::new();
io.maximum_address = 0;
io.io_blocks = Rbt::new();
}
pub fn init(&self, memory_address_bits: u32, io_address_bits: u32) {
self.memory.lock().init(memory_address_bits);
self.io.lock().init(io_address_bits);
}
pub(crate) fn init_paging(&self, hob_list: &HobList) {
log::info!("Initializing paging for the GCD");
let page_allocator = PagingAllocator::new(&GCD);
*self.page_table.lock() = Some(create_cpu_paging(page_allocator).expect("Failed to create CPU page table"));
let mut mmio_res_descs: Vec<dxe_services::MemorySpaceDescriptor> = Vec::new();
self.memory
.lock()
.get_mmio_and_reserved_descriptors(mmio_res_descs.as_mut())
.expect("Failed to get MMIO descriptors!");
let mut descriptors: Vec<dxe_services::MemorySpaceDescriptor> =
Vec::with_capacity(self.memory_descriptor_count() + 10);
self.memory
.lock()
.get_allocated_memory_descriptors(&mut descriptors)
.expect("Failed to get allocated memory descriptors!");
for desc in descriptors {
log::trace!(
target: "paging",
"Mapping memory region {:#x?} of length {:#x?} with attributes {:#x?}",
desc.base_address,
desc.length,
desc.attributes
);
if let Err(err) = self.set_memory_space_attributes(
desc.base_address as usize,
desc.length as usize,
(desc.attributes & efi::CACHE_ATTRIBUTE_MASK) | efi::MEMORY_XP,
) {
log::error!(
"Failed to map memory region {:#x?} of length {:#x?} with attributes {:#x?}. Error: {:?}",
desc.base_address,
desc.length,
desc.attributes,
err
);
debug_assert!(false);
}
}
let dxe_core_hob = hob_list
.iter()
.find_map(|x| if let Hob::MemoryAllocationModule(module) = x { Some(module) } else { None })
.expect("Did not find MemoryAllocationModule Hob for DxeCore");
let pe_info = unsafe {
UefiPeInfo::parse(core::slice::from_raw_parts(
dxe_core_hob.alloc_descriptor.memory_base_address as *const u8,
dxe_core_hob.alloc_descriptor.memory_length as usize,
))
.expect("Failed to parse PE info for DXE Core")
};
let dxe_core_cache_attr =
match self.get_memory_descriptor_for_address(dxe_core_hob.alloc_descriptor.memory_base_address) {
Ok(desc) => desc.attributes & efi::CACHE_ATTRIBUTE_MASK,
Err(e) => panic!("DXE Core not mapped in GCD {e:?}"),
};
self.set_memory_space_attributes(
dxe_core_hob.alloc_descriptor.memory_base_address as usize,
dxe_core_hob.alloc_descriptor.memory_length as usize,
efi::MEMORY_XP | dxe_core_cache_attr,
)
.unwrap_or_else(|_| {
panic!(
"Failed to map DXE Core image {:#x?} of length {:#x?} with attributes {:#x?}.",
dxe_core_hob.alloc_descriptor.memory_base_address, 0x1000, 0
)
});
for section in pe_info.sections {
let section_base_address =
dxe_core_hob.alloc_descriptor.memory_base_address + (section.virtual_address as u64);
let mut attributes = efi::MEMORY_XP;
if section.characteristics & pecoff::IMAGE_SCN_CNT_CODE == pecoff::IMAGE_SCN_CNT_CODE {
attributes = efi::MEMORY_RO;
}
let aligned_virtual_size = match align_up(section.virtual_size, pe_info.section_alignment) {
Ok(size) => size as u64,
Err(_) => {
panic!(
"Failed to align section size {:#x?} with alignment {:#x?}",
section.virtual_size, pe_info.section_alignment
);
}
};
log::trace!(
target: "paging",
"Mapping DXE Core image memory region {section_base_address:#x?} of length {aligned_virtual_size:#x?} with attributes {attributes:#x?}",
);
attributes |=
match self.get_memory_descriptor_for_address(dxe_core_hob.alloc_descriptor.memory_base_address) {
Ok(desc) => desc.attributes & efi::CACHE_ATTRIBUTE_MASK,
Err(e) => panic!("DXE Core section not mapped in GCD {e:?}"),
};
self.set_memory_space_attributes(section_base_address as usize, aligned_virtual_size as usize, attributes)
.unwrap_or_else(|_| {
panic!(
"Failed to map DXE Core image {:#x?} of length {:#x?} with attributes {:#x?}.",
dxe_core_hob.alloc_descriptor.memory_base_address, 0x1000, 0
)
});
}
for desc in mmio_res_descs {
let base_address = desc.base_address as usize & !UEFI_PAGE_MASK;
let len = (desc.length as usize + UEFI_PAGE_MASK) & !UEFI_PAGE_MASK;
let new_attributes = (desc.attributes & efi::CACHE_ATTRIBUTE_MASK) | efi::MEMORY_XP;
log::trace!(
target: "paging",
"Mapping {:?} region {:#x?} of length {:#x?} with attributes {:#x?}",
desc.memory_type,
base_address,
len,
new_attributes
);
if let Err(err) = self.set_memory_space_attributes(base_address, len, new_attributes) {
log::error!(
"Failed to map {:?} region {:#x?} of length {:#x?} with attributes {:#x?}. Error: {:?}",
desc.memory_type,
base_address,
len,
new_attributes,
err
);
debug_assert!(false);
}
}
if let Ok(descriptor) = self.get_memory_descriptor_for_address(0)
&& descriptor.memory_type != GcdMemoryType::NonExistent
&& let Err(err) = self.set_memory_space_attributes(0, UEFI_PAGE_SIZE, efi::MEMORY_RP)
{
log::error!("Failed to unmap page 0, which is reserved for null pointer detection. Error: {err:?}");
debug_assert!(false);
}
self.page_table.lock().as_mut().unwrap().install_page_table().expect("Failed to install the page table");
log::info!("Paging initialized for the GCD");
}
pub unsafe fn add_memory_space(
&self,
memory_type: dxe_services::GcdMemoryType,
base_address: usize,
len: usize,
capabilities: u64,
) -> Result<usize, EfiError> {
let result = unsafe { self.memory.lock().add_memory_space(memory_type, base_address, len, capabilities) };
if result.is_ok()
&& let Some(callback) = self.memory_change_callback
{
callback(MapChangeType::AddMemorySpace);
}
result
}
pub fn remove_memory_space(&self, base_address: usize, len: usize) -> Result<(), EfiError> {
let result = self.memory.lock().remove_memory_space(base_address, len);
if result.is_ok() {
if let Some(page_table) = &mut *self.page_table.lock() {
match page_table.unmap_memory_region(base_address as u64, len as u64) {
Ok(_) => {}
Err(status) => {
log::error!(
"Failed to unmap memory region {base_address:#x?} of length {len:#x?}. Status: {status:#x?} during
remove_memory_space removal. This is expected if this region was not previously mapped",
);
}
}
}
if let Some(callback) = self.memory_change_callback {
callback(MapChangeType::RemoveMemorySpace);
}
}
result
}
pub fn allocate_memory_space(
&self,
allocate_type: AllocateType,
memory_type: dxe_services::GcdMemoryType,
alignment: usize,
len: usize,
image_handle: efi::Handle,
device_handle: Option<efi::Handle>,
) -> Result<usize, EfiError> {
let result = self.memory.lock().allocate_memory_space(
allocate_type,
memory_type,
alignment,
len,
image_handle,
device_handle,
);
if result.is_ok() {
if let Ok(base_address) = result.as_ref() {
let attributes = match self.get_memory_descriptor_for_address(*base_address as efi::PhysicalAddress) {
Ok(descriptor) => descriptor.attributes,
Err(_) => DEFAULT_CACHE_ATTR,
};
let default_attributes = self.memory.lock().default_attributes;
match self.set_memory_space_attributes(
*base_address,
len,
(attributes & efi::CACHE_ATTRIBUTE_MASK) | default_attributes,
) {
Ok(_) => (),
Err(EfiError::NotReady) => {
}
Err(e) => {
log::error!(
"Could not set NX for memory address {:#X} for len {:#X} with error {:?}",
*base_address,
len,
e
);
debug_assert!(false);
}
}
} else {
log::error!("Could not extract base address from allocation result, unable to set memory attributes.");
debug_assert!(false);
}
if let Some(callback) = self.memory_change_callback {
callback(MapChangeType::AllocateMemorySpace);
}
}
result
}
pub fn free_memory_space(&self, base_address: usize, len: usize) -> Result<(), EfiError> {
let mut result = self.memory.lock().free_memory_space(base_address, len);
match result {
Ok(_) => {
if let Some(page_table) = &mut *self.page_table.lock() {
match page_table.unmap_memory_region(base_address as u64, len as u64) {
Ok(_) => {}
Err(status) => {
log::error!(
"Failed to unmap memory region {base_address:#x?} of length {len:#x?}. Status: {status:#x?}",
);
debug_assert!(false);
match status {
PtError::OutOfResources => EfiError::OutOfResources,
PtError::NoMapping => EfiError::NotFound,
_ => EfiError::InvalidParameter,
};
}
}
}
if let Some(callback) = self.memory_change_callback {
callback(MapChangeType::FreeMemorySpace);
}
}
Err(EfiError::AccessDenied) => result = Ok(()),
_ => {}
}
result
}
pub fn free_memory_space_preserving_ownership(&self, base_address: usize, len: usize) -> Result<(), EfiError> {
let result = self.memory.lock().free_memory_space_preserving_ownership(base_address, len);
if result.is_ok()
&& let Some(callback) = self.memory_change_callback
{
callback(MapChangeType::FreeMemorySpace);
}
result
}
pub fn set_memory_space_attributes(
&self,
base_address: usize,
len: usize,
attributes: u64,
) -> Result<(), EfiError> {
let mut current_base = base_address as u64;
let mut res = Ok(());
let range_end = (base_address + len) as u64;
while current_base < range_end {
let descriptor = self.get_memory_descriptor_for_address(current_base as efi::PhysicalAddress)?;
let descriptor_end = descriptor.base_address + descriptor.length;
let next_base = u64::min(descriptor_end, range_end);
let current_len = next_base - current_base;
match self.memory.lock().set_memory_space_attributes(
current_base as usize,
current_len as usize,
attributes,
) {
Ok(()) => {}
Err(e) => {
log::error!(
"Failed to set GCD memory attributes for memory region {current_base:#x?} of length {current_len:#x?} with attributes {attributes:#x?}. Status: {e:#x?}",
);
debug_assert!(false);
}
}
if attributes & (efi::CACHE_ATTRIBUTE_MASK | efi::MEMORY_ACCESS_MASK) != 0 {
match self.set_paging_attributes(current_base as usize, current_len as usize, attributes) {
Ok(_) => {}
Err(EfiError::NotReady) => {
res = Err(EfiError::NotReady);
}
Err(e) => {
log::error!(
"Failed to set page table memory attributes for memory region {current_base:#x?} of length {current_len:#x?} with attributes {attributes:#x?}. Status: {e:#x?}",
);
debug_assert!(false);
if let Err(rollback_err) = self.memory.lock().set_memory_space_attributes(
current_base as usize,
current_len as usize,
descriptor.attributes,
) {
log::error!(
"Failed to roll back GCD attributes after page table attribute set failure. This is a critical error. GCD and page table are now out of sync. Rollback error: {:?}",
rollback_err
);
}
return Err(e);
}
}
}
current_base = next_base;
}
if let Some(callback) = self.memory_change_callback {
callback(MapChangeType::SetMemoryAttributes);
}
res
}
pub fn set_memory_space_capabilities(
&self,
base_address: usize,
len: usize,
capabilities: u64,
) -> Result<(), EfiError> {
let result = self.memory.lock().set_memory_space_capabilities(base_address, len, capabilities);
if result.is_ok()
&& let Some(callback) = self.memory_change_callback
{
callback(MapChangeType::SetMemoryCapabilities);
}
result
}
pub fn get_memory_descriptors(
&self,
buffer: &mut Vec<dxe_services::MemorySpaceDescriptor>,
) -> Result<(), EfiError> {
self.memory.lock().get_memory_descriptors(buffer)
}
pub fn get_memory_descriptor_for_address(
&self,
address: efi::PhysicalAddress,
) -> Result<dxe_services::MemorySpaceDescriptor, EfiError> {
self.memory.lock().get_memory_descriptor_for_address(address)
}
pub fn memory_descriptor_count(&self) -> usize {
self.memory.lock().memory_descriptor_count()
}
pub fn add_io_space(
&self,
io_type: dxe_services::GcdIoType,
base_address: usize,
len: usize,
) -> Result<usize, EfiError> {
self.io.lock().add_io_space(io_type, base_address, len)
}
pub fn remove_io_space(&self, base_address: usize, len: usize) -> Result<(), EfiError> {
self.io.lock().remove_io_space(base_address, len)
}
pub fn allocate_io_space(
&self,
allocate_type: AllocateType,
io_type: dxe_services::GcdIoType,
alignment: usize,
len: usize,
image_handle: efi::Handle,
device_handle: Option<efi::Handle>,
) -> Result<usize, EfiError> {
self.io.lock().allocate_io_space(allocate_type, io_type, alignment, len, image_handle, device_handle)
}
pub fn free_io_space(&self, base_address: usize, len: usize) -> Result<(), EfiError> {
self.io.lock().free_io_space(base_address, len)
}
pub fn get_io_descriptors(&self, buffer: &mut Vec<dxe_services::IoSpaceDescriptor>) -> Result<(), EfiError> {
self.io.lock().get_io_descriptors(buffer)
}
pub fn io_descriptor_count(&self) -> usize {
self.io.lock().io_descriptor_count()
}
#[cfg(feature = "compatibility_mode_allowed")]
pub(crate) fn activate_compatibility_mode(&self) {
const LEGACY_BIOS_WB_ADDRESS: usize = 0xA0000;
if let Ok(descriptor) = self.get_memory_descriptor_for_address(0)
&& descriptor.memory_type != dxe_services::GcdMemoryType::NonExistent
&& let Err(e) = self.set_memory_space_attributes(0, UEFI_PAGE_SIZE, efi::MEMORY_WB)
{
log::error!("Failed to map page 0 for compat mode. Status: {e:#x?}");
debug_assert!(false);
}
let mut address = UEFI_PAGE_SIZE; while address < LEGACY_BIOS_WB_ADDRESS {
let mut size = UEFI_PAGE_SIZE;
if let Ok(descriptor) = self.get_memory_descriptor_for_address(address as efi::PhysicalAddress) {
if descriptor.memory_type == dxe_services::GcdMemoryType::SystemMemory {
size = match address + descriptor.length as usize {
end_addr if end_addr > LEGACY_BIOS_WB_ADDRESS => LEGACY_BIOS_WB_ADDRESS - address,
_ => descriptor.length as usize,
};
match self.set_memory_space_attributes(
address,
size,
descriptor.attributes & efi::CACHE_ATTRIBUTE_MASK,
) {
Ok(_) => {}
Err(e) => {
log::error!(
"Failed to map legacy bios region at {:#x?} of length {:#x?} with attributes {:#x?}. Status: {:#x?}",
address,
size,
descriptor.attributes & efi::CACHE_ATTRIBUTE_MASK,
e
);
debug_assert!(false);
}
}
}
}
address += size;
}
self.memory.lock().activate_compatibility_mode();
}
}
impl Display for SpinLockedGcd {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
if let Some(gcd) = self.memory.try_lock() {
writeln!(f, "{gcd}")?;
} else {
writeln!(f, "Locked: {:?}", self.memory.try_lock())?;
}
if let Some(gcd) = self.io.try_lock() {
writeln!(f, "{gcd}")?;
} else {
writeln!(f, "Locked: {:?}", self.io.try_lock())?;
}
Ok(())
}
}
impl core::fmt::Debug for SpinLockedGcd {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
writeln!(f, "{:?}", self.memory.try_lock())?;
writeln!(f, "{:?}", self.io.try_lock())?;
Ok(())
}
}
unsafe impl Sync for SpinLockedGcd {}
unsafe impl Send for SpinLockedGcd {}
#[cfg(test)]
#[coverage(off)]
mod tests {
extern crate std;
use core::{alloc::Layout, sync::atomic::AtomicBool};
use patina::base::align_up;
use crate::test_support;
use super::*;
use alloc::vec::Vec;
use r_efi::efi;
fn with_locked_state<F: Fn() + std::panic::RefUnwindSafe>(f: F) {
test_support::with_global_lock(|| {
f();
})
.unwrap();
}
#[test]
fn test_gcd_initialization() {
let gdc = GCD::new(48);
assert_eq!(2_usize.pow(48), gdc.maximum_address);
assert_eq!(gdc.memory_blocks.capacity(), 0);
assert_eq!(0, gdc.memory_descriptor_count())
}
#[test]
fn test_add_memory_space_before_memory_blocks_instantiated() {
let mem = unsafe { get_memory(MEMORY_BLOCK_SLICE_SIZE) };
let address = mem.as_ptr() as usize;
let mut gcd = GCD::new(48);
assert_eq!(
Err(EfiError::OutOfResources),
unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::Reserved, address, MEMORY_BLOCK_SLICE_SIZE, 0) },
"First add memory space should be a system memory."
);
assert_eq!(0, gcd.memory_descriptor_count());
assert_eq!(
Err(EfiError::OutOfResources),
unsafe {
gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, address, MEMORY_BLOCK_SLICE_SIZE - 1, 0)
},
"First add memory space with system memory should contain enough space to contain the block list."
);
assert_eq!(0, gcd.memory_descriptor_count());
}
#[test]
fn test_add_memory_space_with_all_memory_type() {
let (mut gcd, _) = create_gcd();
assert_eq!(Ok(0), unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::Reserved, 0, 1, 0) });
assert_eq!(Ok(3), unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 1, 1, 0) });
assert_eq!(Ok(4), unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::Persistent, 2, 1, 0) });
assert_eq!(Ok(5), unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::MoreReliable, 3, 1, 0) });
assert_eq!(Ok(6), unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::Unaccepted, 4, 1, 0) });
assert_eq!(Ok(7), unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::MemoryMappedIo, 5, 1, 0) });
let snapshot = copy_memory_block(&gcd);
assert_eq!(
Err(EfiError::InvalidParameter),
unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::NonExistent, 10, 1, 0) },
"Can't manually add NonExistent memory space manually."
);
assert!(is_gcd_memory_slice_valid(&gcd));
assert_eq!(snapshot, copy_memory_block(&gcd));
}
#[test]
fn test_add_memory_space_with_0_len_block() {
let (mut gcd, _) = create_gcd();
let snapshot = copy_memory_block(&gcd);
assert_eq!(Err(EfiError::InvalidParameter), unsafe {
gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0, 0, 0)
});
assert_eq!(snapshot, copy_memory_block(&gcd));
}
#[test]
fn test_add_memory_space_when_memory_block_full() {
let (mut gcd, address) = create_gcd();
let addr = address + MEMORY_BLOCK_SLICE_SIZE;
let mut n = 0;
while gcd.memory_descriptor_count() < MEMORY_BLOCK_SLICE_LEN {
assert!(
unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, addr + n, 1, n as u64) }
.is_ok()
);
n += 1;
}
assert!(is_gcd_memory_slice_valid(&gcd));
let memory_blocks_snapshot = copy_memory_block(&gcd);
let res = unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, addr + n, 1, n as u64) };
assert_eq!(
Err(EfiError::OutOfResources),
res,
"Should return out of memory if there is no space in memory blocks."
);
assert_eq!(memory_blocks_snapshot, copy_memory_block(&gcd),);
}
#[test]
fn test_add_memory_space_outside_processor_range() {
let (mut gcd, _) = create_gcd();
let snapshot = copy_memory_block(&gcd);
assert_eq!(Err(EfiError::Unsupported), unsafe {
gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, gcd.maximum_address + 1, 1, 0)
});
assert_eq!(Err(EfiError::Unsupported), unsafe {
gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, gcd.maximum_address, 1, 0)
});
assert_eq!(Err(EfiError::Unsupported), unsafe {
gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, gcd.maximum_address - 1, 2, 0)
});
assert_eq!(snapshot, copy_memory_block(&gcd));
}
#[test]
fn test_add_memory_space_in_range_already_added() {
let (mut gcd, _) = create_gcd();
unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 1000, 10, 0) }.unwrap();
let snapshot = copy_memory_block(&gcd);
assert_eq!(
Err(EfiError::AccessDenied),
unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::Reserved, 1002, 5, 0) },
"Can't add inside a range previously added."
);
assert_eq!(
Err(EfiError::AccessDenied),
unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::Reserved, 998, 5, 0) },
"Can't add partially inside a range previously added (Start)."
);
assert_eq!(
Err(EfiError::AccessDenied),
unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::Reserved, 1009, 5, 0) },
"Can't add partially inside a range previously added (End)."
);
assert_eq!(snapshot, copy_memory_block(&gcd));
}
#[test]
fn test_add_memory_space_in_range_already_allocated() {
let (mut gcd, address) = create_gcd();
unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, address - 100, 100, 0) }.unwrap();
let snapshot = copy_memory_block(&gcd);
assert_eq!(
Err(EfiError::AccessDenied),
unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, address, 5, 0) },
"Can't add inside a range previously allocated."
);
assert_eq!(
Err(EfiError::AccessDenied),
unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::Reserved, address - 100, 200, 0) },
"Can't add partially inside a range previously allocated."
);
assert_eq!(snapshot, copy_memory_block(&gcd));
}
#[test]
fn test_add_memory_space_block_merging() {
let (mut gcd, _) = create_gcd();
assert_eq!(Ok(4), unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 1000, 10, 0) });
let block_count = gcd.memory_descriptor_count();
match unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 1010, 10, 0) } {
Ok(idx) => {
let mb = gcd.memory_blocks.get_with_idx(idx).unwrap();
assert_eq!(1000, mb.as_ref().base_address);
assert_eq!(20, mb.as_ref().length);
assert_eq!(block_count, gcd.memory_descriptor_count());
}
Err(e) => panic!("{e:?}"),
}
match unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 990, 10, 0) } {
Ok(idx) => {
let mb = gcd.memory_blocks.get_with_idx(idx).unwrap();
assert_eq!(990, mb.as_ref().base_address);
assert_eq!(30, mb.as_ref().length);
assert_eq!(block_count, gcd.memory_descriptor_count());
}
Err(e) => panic!("{e:?}"),
}
assert!(
unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::Reserved, 1020, 10, 0) }.is_ok(),
"A different memory type should note result in a merge."
);
assert_eq!(block_count + 1, gcd.memory_descriptor_count());
assert!(
unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::Reserved, 1030, 10, 1) }.is_ok(),
"A different capabilities should note result in a merge."
);
assert_eq!(block_count + 2, gcd.memory_descriptor_count());
assert!(is_gcd_memory_slice_valid(&gcd));
}
#[test]
fn test_add_memory_space_state() {
let (mut gcd, _) = create_gcd();
match unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 100, 10, 123) } {
Ok(idx) => {
let mb = *gcd.memory_blocks.get_with_idx(idx).unwrap();
match mb {
MemoryBlock::Unallocated(md) => {
assert_eq!(100, md.base_address);
assert_eq!(10, md.length);
assert_eq!(efi::MEMORY_RUNTIME | efi::MEMORY_ACCESS_MASK | 123, md.capabilities);
assert_eq!(0, md.image_handle as usize);
assert_eq!(0, md.device_handle as usize);
}
MemoryBlock::Allocated(_) => panic!("Add should keep the block unallocated"),
}
}
Err(e) => panic!("{e:?}"),
}
}
#[test]
fn test_remove_memory_space_before_memory_blocks_instantiated() {
let mem = unsafe { get_memory(MEMORY_BLOCK_SLICE_SIZE) };
let address = mem.as_ptr() as usize;
let mut gcd = GCD::new(48);
assert_eq!(Err(EfiError::NotFound), gcd.remove_memory_space(address, MEMORY_BLOCK_SLICE_SIZE));
}
#[test]
fn test_remove_memory_space_with_0_len_block() {
let (mut gcd, _) = create_gcd();
assert!(unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0, 10, 0) }.is_ok());
let snapshot = copy_memory_block(&gcd);
assert_eq!(Err(EfiError::InvalidParameter), gcd.remove_memory_space(5, 0));
assert_eq!(
Err(EfiError::InvalidParameter),
gcd.remove_memory_space(10, 0),
"If there is no allocate done first, 0 length invalid param should have priority."
);
assert_eq!(snapshot, copy_memory_block(&gcd));
}
#[test]
fn test_remove_memory_space_outside_processor_range() {
let (mut gcd, _) = create_gcd();
assert!(
unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, gcd.maximum_address - 10, 10, 0) }
.is_ok()
);
let snapshot = copy_memory_block(&gcd);
assert_eq!(
Err(EfiError::Unsupported),
gcd.remove_memory_space(gcd.maximum_address - 10, 11),
"An address outside the processor range support is invalid."
);
assert_eq!(
Err(EfiError::Unsupported),
gcd.remove_memory_space(gcd.maximum_address, 10),
"An address outside the processor range support is invalid."
);
assert_eq!(snapshot, copy_memory_block(&gcd));
}
#[test]
fn test_remove_memory_space_in_range_not_added() {
let (mut gcd, _) = create_gcd();
assert!(unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 100, 10, 0) }.is_ok());
let snapshot = copy_memory_block(&gcd);
assert_eq!(
Err(EfiError::NotFound),
gcd.remove_memory_space(95, 10),
"Can't remove memory space partially added."
);
assert_eq!(
Err(EfiError::NotFound),
gcd.remove_memory_space(105, 10),
"Can't remove memory space partially added."
);
assert_eq!(
Err(EfiError::NotFound),
gcd.remove_memory_space(10, 10),
"Can't remove memory space not previously added."
);
assert_eq!(snapshot, copy_memory_block(&gcd));
}
#[test]
fn test_remove_memory_space_in_range_allocated() {
let (mut gcd, address) = create_gcd();
let snapshot = copy_memory_block(&gcd);
assert_eq!(
Err(EfiError::NotFound),
gcd.remove_memory_space(address - 5, 10),
"Can't remove memory space partially allocated."
);
assert_eq!(
Err(EfiError::NotFound),
gcd.remove_memory_space(address + MEMORY_BLOCK_SLICE_SIZE - 5, 10),
"Can't remove memory space partially allocated."
);
assert_eq!(
Err(EfiError::AccessDenied),
gcd.remove_memory_space(address + 10, 10),
"Can't remove memory space not previously allocated."
);
assert_eq!(snapshot, copy_memory_block(&gcd));
}
#[test]
fn test_remove_memory_space_when_memory_block_full() {
let (mut gcd, address) = create_gcd();
let addr = address + MEMORY_BLOCK_SLICE_SIZE;
assert!(unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, addr, 10, 0_u64) }.is_ok());
let mut n = 1;
while gcd.memory_descriptor_count() < MEMORY_BLOCK_SLICE_LEN {
assert!(
unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, addr + 10 + n, 1, n as u64) }
.is_ok()
);
n += 1;
}
assert!(is_gcd_memory_slice_valid(&gcd));
let memory_blocks_snapshot = copy_memory_block(&gcd);
assert_eq!(
Err(EfiError::OutOfResources),
gcd.remove_memory_space(addr, 5),
"Should return out of memory if there is no space in memory blocks."
);
assert_eq!(memory_blocks_snapshot, copy_memory_block(&gcd),);
}
#[test]
fn test_remove_memory_space_block_merging() {
let (mut gcd, address) = create_gcd();
let page_size = 0x1000;
let aligned_address = address & !(page_size - 1);
let aligned_length = page_size * 10;
let aligned_address = if aligned_address > aligned_length {
aligned_address - aligned_length
} else {
aligned_address + aligned_length
};
assert!(
unsafe {
gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, aligned_address, aligned_length, 0)
}
.is_ok()
);
let block_count = gcd.memory_descriptor_count();
for i in 0..5 {
assert!(gcd.remove_memory_space(aligned_address + i * page_size, page_size).is_ok());
}
assert_eq!(aligned_address, copy_memory_block(&gcd)[1].as_ref().base_address as usize);
assert_eq!(aligned_length / 2, copy_memory_block(&gcd)[1].as_ref().length as usize);
assert_eq!(block_count + 1, gcd.memory_descriptor_count());
assert!(is_gcd_memory_slice_valid(&gcd));
assert!(gcd.remove_memory_space(aligned_address + page_size * 5, page_size).is_ok());
assert_eq!(block_count + 1, gcd.memory_descriptor_count());
assert!(is_gcd_memory_slice_valid(&gcd));
}
#[test]
fn test_remove_memory_space_state() {
let (mut gcd, address) = create_gcd();
assert!(unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0, address, 123) }.is_ok());
match gcd.remove_memory_space(0, 10) {
Ok(_) => {
let mb = copy_memory_block(&gcd)[0];
match mb {
MemoryBlock::Unallocated(md) => {
assert_eq!(0, md.base_address);
assert_eq!(10, md.length);
assert_eq!(0, md.capabilities);
assert_eq!(0, md.image_handle as usize);
assert_eq!(0, md.device_handle as usize);
}
MemoryBlock::Allocated(_) => panic!("remove should keep the block unallocated"),
}
}
Err(e) => panic!("{e:?}"),
}
}
#[test]
fn test_allocate_memory_space_before_memory_blocks_instantiated() {
let mut gcd = GCD::new(48);
assert_eq!(
Err(EfiError::NotFound),
gcd.allocate_memory_space(
AllocateType::Address(0),
dxe_services::GcdMemoryType::SystemMemory,
UEFI_PAGE_SHIFT,
10,
1 as _,
None
)
);
}
#[test]
fn test_allocate_memory_space_with_0_len_block() {
let (mut gcd, _) = create_gcd();
let snapshot = copy_memory_block(&gcd);
assert_eq!(
Err(EfiError::InvalidParameter),
gcd.allocate_memory_space(
AllocateType::BottomUp(None),
dxe_services::GcdMemoryType::Reserved,
UEFI_PAGE_SHIFT,
0,
1 as _,
None
),
);
assert_eq!(snapshot, copy_memory_block(&gcd));
}
#[test]
fn test_allocate_memory_space_with_null_image_handle() {
let (mut gcd, _) = create_gcd();
let snapshot = copy_memory_block(&gcd);
assert_eq!(
Err(EfiError::InvalidParameter),
gcd.allocate_memory_space(
AllocateType::BottomUp(None),
dxe_services::GcdMemoryType::Reserved,
0,
10,
ptr::null_mut(),
None
),
);
assert_eq!(snapshot, copy_memory_block(&gcd));
}
#[test]
fn test_allocate_memory_space_with_address_outside_processor_range() {
let (mut gcd, _) = create_gcd();
let snapshot = copy_memory_block(&gcd);
assert_eq!(
Err(EfiError::NotFound),
gcd.allocate_memory_space(
AllocateType::Address(gcd.maximum_address - 100),
dxe_services::GcdMemoryType::Reserved,
0,
1000,
1 as _,
None
),
);
assert_eq!(
Err(EfiError::NotFound),
gcd.allocate_memory_space(
AllocateType::Address(gcd.maximum_address + 100),
dxe_services::GcdMemoryType::Reserved,
0,
1000,
1 as _,
None
),
);
assert_eq!(snapshot, copy_memory_block(&gcd));
}
#[test]
fn test_allocate_memory_space_with_all_memory_type() {
let (mut gcd, _) = create_gcd();
for (i, memory_type) in [
dxe_services::GcdMemoryType::Reserved,
dxe_services::GcdMemoryType::SystemMemory,
dxe_services::GcdMemoryType::Persistent,
dxe_services::GcdMemoryType::MemoryMappedIo,
dxe_services::GcdMemoryType::MoreReliable,
dxe_services::GcdMemoryType::Unaccepted,
]
.into_iter()
.enumerate()
{
unsafe { gcd.add_memory_space(memory_type, (i + 1) * 10, 10, 0) }.unwrap();
let res = gcd.allocate_memory_space(AllocateType::Address((i + 1) * 10), memory_type, 0, 10, 1 as _, None);
match memory_type {
dxe_services::GcdMemoryType::Unaccepted => assert_eq!(Err(EfiError::InvalidParameter), res),
_ => assert!(res.is_ok()),
}
}
}
#[test]
fn test_allocate_memory_space_with_no_memory_space_available() {
let (mut gcd, _) = create_gcd();
unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0, 100, 0) }.unwrap();
unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 1000, 100, 0) }.unwrap();
unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, gcd.maximum_address - 100, 100, 0) }
.unwrap();
let memory_blocks_snapshot = copy_memory_block(&gcd);
for allocate_type in [AllocateType::BottomUp(None), AllocateType::TopDown(None)] {
assert_eq!(
Err(EfiError::OutOfResources),
gcd.allocate_memory_space(
allocate_type,
dxe_services::GcdMemoryType::SystemMemory,
0,
1000,
1 as _,
None
),
"Assert fail with allocate type: {allocate_type:?}"
);
}
for allocate_type in
[AllocateType::BottomUp(Some(10_000)), AllocateType::TopDown(Some(10_000)), AllocateType::Address(10_000)]
{
assert_eq!(
Err(EfiError::NotFound),
gcd.allocate_memory_space(
allocate_type,
dxe_services::GcdMemoryType::SystemMemory,
0,
1000,
1 as _,
None
),
"Assert fail with allocate type: {allocate_type:?}"
);
}
assert_eq!(memory_blocks_snapshot, copy_memory_block(&gcd));
}
#[test]
fn test_allocate_memory_space_alignment() {
let (mut gcd, _) = create_gcd();
unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0x1000, 0x1000, 0) }.unwrap();
assert_eq!(
Ok(0x1000),
gcd.allocate_memory_space(
AllocateType::BottomUp(None),
dxe_services::GcdMemoryType::SystemMemory,
0,
0x0f,
1 as _,
None
),
"Allocate bottom up without alignment"
);
assert_eq!(
Ok(0x1010),
gcd.allocate_memory_space(
AllocateType::BottomUp(None),
dxe_services::GcdMemoryType::SystemMemory,
4,
0x10,
1 as _,
None
),
"Allocate bottom up with alignment of 4 bits (find first address that is aligned)"
);
assert_eq!(
Ok(0x1020),
gcd.allocate_memory_space(
AllocateType::BottomUp(None),
dxe_services::GcdMemoryType::SystemMemory,
4,
100,
1 as _,
None
),
"Allocate bottom up with alignment of 4 bits (already aligned)"
);
assert_eq!(
Ok(0x1ff1),
gcd.allocate_memory_space(
AllocateType::TopDown(None),
dxe_services::GcdMemoryType::SystemMemory,
0,
0x0f,
1 as _,
None
),
"Allocate top down without alignment"
);
assert_eq!(
Ok(0x1fe0),
gcd.allocate_memory_space(
AllocateType::TopDown(None),
dxe_services::GcdMemoryType::SystemMemory,
4,
0x0f,
1 as _,
None
),
"Allocate top down with alignment of 4 bits (find first address that is aligned)"
);
assert_eq!(
Ok(0x1f00),
gcd.allocate_memory_space(
AllocateType::TopDown(None),
dxe_services::GcdMemoryType::SystemMemory,
4,
0xe0,
1 as _,
None
),
"Allocate top down with alignment of 4 bits (already aligned)"
);
assert_eq!(
Ok(0x1a00),
gcd.allocate_memory_space(
AllocateType::Address(0x1a00),
dxe_services::GcdMemoryType::SystemMemory,
4,
100,
1 as _,
None
),
"Allocate Address with alignment of 4 bits (already aligned)"
);
assert!(is_gcd_memory_slice_valid(&gcd));
let memory_blocks_snapshot = copy_memory_block(&gcd);
assert_eq!(
Err(EfiError::NotFound),
gcd.allocate_memory_space(
AllocateType::Address(0x1a0f),
dxe_services::GcdMemoryType::SystemMemory,
4,
100,
1 as _,
None
),
);
assert_eq!(memory_blocks_snapshot, copy_memory_block(&gcd));
}
#[test]
fn test_allocate_memory_space_block_merging() {
let (mut gcd, _) = create_gcd();
unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0x1000, 0x1000, 0) }.unwrap();
for allocate_type in [AllocateType::BottomUp(None), AllocateType::TopDown(None)] {
let block_count = gcd.memory_descriptor_count();
assert!(
gcd.allocate_memory_space(allocate_type, dxe_services::GcdMemoryType::SystemMemory, 0, 1, 1 as _, None)
.is_ok(),
"{allocate_type:?}"
);
assert_eq!(block_count + 1, gcd.memory_descriptor_count());
assert!(
gcd.allocate_memory_space(allocate_type, dxe_services::GcdMemoryType::SystemMemory, 0, 1, 1 as _, None)
.is_ok(),
"{allocate_type:?}"
);
assert_eq!(block_count + 1, gcd.memory_descriptor_count());
assert!(
gcd.allocate_memory_space(allocate_type, dxe_services::GcdMemoryType::SystemMemory, 0, 1, 2 as _, None)
.is_ok(),
"{allocate_type:?}: A different image handle should not result in a merge."
);
assert_eq!(block_count + 2, gcd.memory_descriptor_count());
assert!(
gcd.allocate_memory_space(
allocate_type,
dxe_services::GcdMemoryType::SystemMemory,
0,
1,
2 as _,
Some(1 as _)
)
.is_ok(),
"{allocate_type:?}: A different device handle should not result in a merge."
);
assert_eq!(block_count + 3, gcd.memory_descriptor_count());
}
let block_count = gcd.memory_descriptor_count();
assert_eq!(
Ok(0x1000 + 4),
gcd.allocate_memory_space(
AllocateType::Address(0x1000 + 4),
dxe_services::GcdMemoryType::SystemMemory,
0,
1,
2 as _,
Some(1 as _)
),
"Merge should work with address allocation too."
);
assert_eq!(block_count, gcd.memory_descriptor_count());
assert!(is_gcd_memory_slice_valid(&gcd));
}
#[test]
fn test_allocate_memory_space_with_address_not_added() {
let (mut gcd, _) = create_gcd();
unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0x100, 10, 0) }.unwrap();
let snapshot = copy_memory_block(&gcd);
assert_eq!(
Err(EfiError::NotFound),
gcd.allocate_memory_space(
AllocateType::Address(0x100),
dxe_services::GcdMemoryType::SystemMemory,
0,
11,
1 as _,
None
),
);
assert_eq!(
Err(EfiError::NotFound),
gcd.allocate_memory_space(
AllocateType::Address(0x95),
dxe_services::GcdMemoryType::SystemMemory,
0,
10,
1 as _,
None
),
);
assert_eq!(
Err(EfiError::NotFound),
gcd.allocate_memory_space(
AllocateType::Address(110),
dxe_services::GcdMemoryType::SystemMemory,
0,
5,
1 as _,
None
),
);
assert_eq!(
Err(EfiError::NotFound),
gcd.allocate_memory_space(
AllocateType::Address(0),
dxe_services::GcdMemoryType::SystemMemory,
0,
5,
1 as _,
None
),
);
assert_eq!(snapshot, copy_memory_block(&gcd));
}
#[test]
fn test_allocate_memory_space_with_address_allocated() {
let (mut gcd, address) = create_gcd();
assert_eq!(
Err(EfiError::NotFound),
gcd.allocate_memory_space(
AllocateType::Address(address),
dxe_services::GcdMemoryType::SystemMemory,
0,
5,
1 as _,
None
),
);
}
#[test]
fn test_free_memory_space_before_memory_blocks_instantiated() {
let mut gcd = GCD::new(48);
assert_eq!(Err(EfiError::NotFound), gcd.free_memory_space(0x1000, 0x1000));
}
#[test]
fn test_free_memory_space_when_0_len_block() {
let (mut gcd, _) = create_gcd();
let snapshot = copy_memory_block(&gcd);
assert_eq!(Err(EfiError::InvalidParameter), gcd.remove_memory_space(0, 0));
assert_eq!(snapshot, copy_memory_block(&gcd));
}
#[test]
fn test_free_memory_space_outside_processor_range() {
let (mut gcd, _) = create_gcd();
unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, gcd.maximum_address - 100, 100, 0) }
.unwrap();
gcd.allocate_memory_space(
AllocateType::Address(gcd.maximum_address - 100),
dxe_services::GcdMemoryType::SystemMemory,
0,
100,
1 as _,
None,
)
.unwrap();
let snapshot = copy_memory_block(&gcd);
assert_eq!(Err(EfiError::Unsupported), gcd.free_memory_space(gcd.maximum_address, 10));
assert_eq!(Err(EfiError::Unsupported), gcd.free_memory_space(gcd.maximum_address - 99, 100));
assert_eq!(Err(EfiError::Unsupported), gcd.free_memory_space(gcd.maximum_address + 1, 100));
assert_eq!(snapshot, copy_memory_block(&gcd));
}
#[test]
fn test_free_memory_space_in_range_not_allocated() {
let (mut gcd, _) = create_gcd();
unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0x3000, 0x3000, 0) }.unwrap();
gcd.allocate_memory_space(
AllocateType::Address(0x3000),
dxe_services::GcdMemoryType::SystemMemory,
0,
0x1000,
1 as _,
None,
)
.unwrap();
assert_eq!(Err(EfiError::NotFound), gcd.free_memory_space(0x2000, 0x1000));
assert_eq!(Err(EfiError::NotFound), gcd.free_memory_space(0x4000, 0x1000));
assert_eq!(Err(EfiError::NotFound), gcd.free_memory_space(0, 0x1000));
}
#[test]
fn test_free_memory_space_merging() {
let (mut gcd, _) = create_gcd();
unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0x1000, 0x10000, 0) }.unwrap();
gcd.allocate_memory_space(
AllocateType::Address(0x1000),
dxe_services::GcdMemoryType::SystemMemory,
0,
0x10000,
1 as _,
None,
)
.unwrap();
let block_count = gcd.memory_descriptor_count();
assert_eq!(Ok(()), gcd.free_memory_space(0x1000, 0x1000), "Free beginning of a block.");
assert_eq!(block_count + 1, gcd.memory_descriptor_count());
assert_eq!(Ok(()), gcd.free_memory_space(0x5000, 0x1000), "Free in the middle of a block");
assert_eq!(block_count + 3, gcd.memory_descriptor_count());
assert_eq!(Ok(()), gcd.free_memory_space(0x9000, 0x1000), "Free at the end of a block");
assert_eq!(block_count + 5, gcd.memory_descriptor_count());
let block_count = gcd.memory_descriptor_count();
assert_eq!(Ok(()), gcd.free_memory_space(0x2000, 0x2000));
assert_eq!(block_count, gcd.memory_descriptor_count());
let blocks = copy_memory_block(&gcd);
let mb = blocks[0];
assert_eq!(0, mb.as_ref().base_address);
assert_eq!(0x1000, mb.as_ref().length);
assert_eq!(Ok(()), gcd.free_memory_space(0x6000, 0x1000));
assert_eq!(block_count, gcd.memory_descriptor_count());
let blocks = copy_memory_block(&gcd);
let mb = blocks[2];
assert_eq!(0x4000, mb.as_ref().base_address);
assert_eq!(0x1000, mb.as_ref().length);
assert_eq!(Ok(()), gcd.free_memory_space(0x8000, 0x1000));
assert_eq!(block_count, gcd.memory_descriptor_count());
let blocks = copy_memory_block(&gcd);
let mb = blocks[4];
assert_eq!(0x7000, mb.as_ref().base_address);
assert_eq!(0x1000, mb.as_ref().length);
assert!(is_gcd_memory_slice_valid(&gcd));
}
#[test]
fn test_set_memory_space_attributes_with_invalid_parameters() {
let mut gcd = GCD {
memory_blocks: Rbt::new(),
maximum_address: 0,
allocate_memory_space_fn: GCD::allocate_memory_space_internal,
free_memory_space_fn: GCD::free_memory_space_worker,
default_attributes: efi::MEMORY_XP,
prioritize_32_bit_memory: false,
};
assert_eq!(Err(EfiError::NotReady), gcd.set_memory_space_attributes(0, 0x50000, 0b1111));
let (mut gcd, _) = create_gcd();
assert_eq!(Err(EfiError::Unsupported), gcd.set_memory_space_attributes(0x100000000000000, 50, 0b1111));
assert_eq!(Err(EfiError::InvalidParameter), gcd.set_memory_space_attributes(0, 0, 0b1111));
assert_eq!(Err(EfiError::InvalidParameter), gcd.set_memory_space_attributes(0, 0, 0));
assert_eq!(
Err(EfiError::InvalidParameter),
gcd.set_memory_space_attributes(0xFFFFFFFF, 0x1000, efi::MEMORY_WB)
);
assert_eq!(
Err(EfiError::InvalidParameter),
gcd.set_memory_space_attributes(0xFFFFFFFF, 0x1000, efi::MEMORY_RUNTIME | efi::MEMORY_WB)
);
assert_eq!(Err(EfiError::InvalidParameter), gcd.set_memory_space_attributes(0x1000, 0xFFF, efi::MEMORY_WB));
assert_eq!(
Err(EfiError::InvalidParameter),
gcd.set_memory_space_attributes(0x1000, 0xFFF, efi::MEMORY_RUNTIME | efi::MEMORY_WB)
);
assert_eq!(
Err(EfiError::InvalidParameter),
gcd.set_memory_space_attributes(0xFFFFFFFF, 0xFFF, efi::MEMORY_RUNTIME | efi::MEMORY_WB)
);
}
#[test]
fn test_set_capabilities_and_attributes() {
let (mut gcd, address) = create_gcd();
unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0x1000, address - 0x1000, 0) }
.unwrap();
gcd.allocate_memory_space(
AllocateType::BottomUp(None),
dxe_services::GcdMemoryType::SystemMemory,
0,
0x2000,
1 as _,
None,
)
.unwrap();
assert_eq!(Err(EfiError::Unsupported), gcd.set_memory_space_capabilities(0x1000, 0x3000, 0b1111));
gcd.set_memory_space_capabilities(0x1000, 0x2000, efi::MEMORY_RP | efi::MEMORY_RO | efi::MEMORY_XP).unwrap();
gcd.set_gcd_memory_attributes(0x1000, 0x2000, efi::MEMORY_RO).unwrap();
}
#[test]
#[should_panic]
fn test_set_attributes_panic() {
let (mut gcd, address) = create_gcd();
unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0, address, 0) }.unwrap();
gcd.allocate_memory_space(
AllocateType::BottomUp(None),
dxe_services::GcdMemoryType::SystemMemory,
0,
0x2000,
1 as _,
None,
)
.unwrap();
gcd.set_memory_space_capabilities(0, 0x2000, efi::MEMORY_RP | efi::MEMORY_RO).unwrap();
gcd.set_memory_space_attributes(0, 0x3000, 0b1).unwrap();
}
#[test]
fn test_invalid_add_io_space() {
let mut gcd = IoGCD::_new(16);
assert!(gcd.add_io_space(dxe_services::GcdIoType::Io, 0, 10).is_ok());
assert_eq!(Err(EfiError::AccessDenied), gcd.add_io_space(dxe_services::GcdIoType::Io, 0, 10));
assert_eq!(Err(EfiError::InvalidParameter), gcd.add_io_space(dxe_services::GcdIoType::NonExistent, 10, 10));
for i in 1..IO_BLOCK_SLICE_LEN {
if i % 2 == 0 {
gcd.add_io_space(dxe_services::GcdIoType::Maximum, i * 10, 10).unwrap();
} else {
gcd.add_io_space(dxe_services::GcdIoType::Io, i * 10, 10).unwrap();
}
}
assert_eq!(
Err(EfiError::OutOfResources),
gcd.add_io_space(dxe_services::GcdIoType::Io, (IO_BLOCK_SLICE_LEN + 1) * 10, 10)
);
}
#[test]
fn test_invalid_remove_io_space() {
let mut gcd = IoGCD::_new(16);
assert_eq!(Err(EfiError::InvalidParameter), gcd.remove_io_space(0, 0));
assert_eq!(Err(EfiError::Unsupported), gcd.remove_io_space(0, 70_000));
assert_eq!(Err(EfiError::NotFound), gcd.remove_io_space(0, 10));
gcd.add_io_space(dxe_services::GcdIoType::Io, 0, 10).unwrap();
gcd.allocate_io_space(AllocateType::Address(0), dxe_services::GcdIoType::Io, 0, 10, 1 as _, None).unwrap();
assert_eq!(Err(EfiError::AccessDenied), gcd.remove_io_space(0, 10));
let mut gcd = IoGCD::_new(16);
for i in 2..IO_BLOCK_SLICE_LEN {
if i % 2 == 0 {
gcd.add_io_space(dxe_services::GcdIoType::Maximum, i * 10, 10).unwrap();
} else {
gcd.add_io_space(dxe_services::GcdIoType::Io, i * 10, 10).unwrap();
}
}
assert_eq!(Err(EfiError::OutOfResources), gcd.remove_io_space(25, 3));
assert!(gcd.remove_io_space(20, 10).is_ok());
}
#[test]
fn test_ensure_allocate_io_space_conformance() {
let mut gcd = IoGCD::_new(16);
assert_eq!(Ok(0), gcd.add_io_space(dxe_services::GcdIoType::Io, 0, 0x4000));
assert_eq!(
Ok(0),
gcd.allocate_io_space(AllocateType::Address(0), dxe_services::GcdIoType::Io, 0, 0x100, 1 as _, None)
);
assert_eq!(
Ok(0x100),
gcd.allocate_io_space(AllocateType::BottomUp(None), dxe_services::GcdIoType::Io, 0, 0x100, 1 as _, None)
);
assert_eq!(
Ok(0x3F00),
gcd.allocate_io_space(AllocateType::TopDown(None), dxe_services::GcdIoType::Io, 0, 0x100, 1 as _, None)
);
assert_eq!(
Ok(0x1000),
gcd.allocate_io_space(AllocateType::Address(0x1000), dxe_services::GcdIoType::Io, 0, 0x100, 1 as _, None)
);
}
#[test]
fn test_ensure_allocations_fail_when_out_of_resources() {
let mut gcd = IoGCD::_new(16);
for i in 0..IO_BLOCK_SLICE_LEN - 1 {
if i % 2 == 0 {
gcd.add_io_space(dxe_services::GcdIoType::Maximum, i * 10, 10).unwrap();
} else {
gcd.add_io_space(dxe_services::GcdIoType::Io, i * 10, 10).unwrap();
}
}
assert_eq!(
Err(EfiError::OutOfResources),
gcd.allocate_bottom_up(dxe_services::GcdIoType::Io, 0, 5, 2 as _, None, 0x4000)
);
assert_eq!(
Err(EfiError::OutOfResources),
gcd.allocate_top_down(dxe_services::GcdIoType::Io, 0, 5, 2 as _, None, usize::MAX)
);
assert_eq!(
Err(EfiError::OutOfResources),
gcd.allocate_address(dxe_services::GcdIoType::Io, 0, 5, 2 as _, None, 210)
);
}
#[test]
fn test_allocate_bottom_up_conformance() {
let mut gcd = IoGCD::_new(16);
assert_eq!(
Err(EfiError::NotFound),
gcd.allocate_bottom_up(dxe_services::GcdIoType::Io, 0, 0x100, 1 as _, None, 0x4000)
);
assert_eq!(Ok(0), gcd.add_io_space(dxe_services::GcdIoType::Io, 0, 0x100));
assert_eq!(Ok(1), gcd.add_io_space(dxe_services::GcdIoType::Maximum, 0x100, 0x100));
assert_eq!(Ok(2), gcd.add_io_space(dxe_services::GcdIoType::Io, 0x200, 0x200));
assert_eq!(Ok(3), gcd.add_io_space(dxe_services::GcdIoType::Maximum, 0x400, 0x200));
assert_eq!(Ok(0x200), gcd.allocate_bottom_up(dxe_services::GcdIoType::Io, 0, 0x150, 1 as _, None, 0x4000));
assert_eq!(
Ok(0x400),
gcd.allocate_bottom_up(dxe_services::GcdIoType::Maximum, 0b1001, 0x50, 1 as _, None, 0x4000)
);
}
#[test]
fn test_allocate_top_down_conformance() {
let mut gcd = IoGCD::_new(16);
assert_eq!(
Err(EfiError::NotFound),
gcd.allocate_top_down(dxe_services::GcdIoType::Io, 0, 0x100, 1 as _, None, 0x4000)
);
assert_eq!(Ok(0), gcd.add_io_space(dxe_services::GcdIoType::Io, 0, 0x200));
assert_eq!(Ok(1), gcd.add_io_space(dxe_services::GcdIoType::Maximum, 0x200, 0x200));
assert_eq!(Ok(2), gcd.add_io_space(dxe_services::GcdIoType::Io, 0x400, 0x100));
assert_eq!(Ok(3), gcd.add_io_space(dxe_services::GcdIoType::Maximum, 0x500, 0x100));
assert_eq!(Ok(0xB0), gcd.allocate_top_down(dxe_services::GcdIoType::Io, 0, 0x150, 1 as _, None, usize::MAX));
assert_eq!(
Err(EfiError::NotFound),
gcd.allocate_top_down(dxe_services::GcdIoType::Reserved, 0, 0x150, 1 as _, None, usize::MAX)
);
}
#[test]
fn test_allocate_address_conformance() {
let mut gcd = IoGCD::_new(16);
assert_eq!(
Err(EfiError::NotFound),
gcd.allocate_address(dxe_services::GcdIoType::Io, 0, 0x100, 1 as _, None, 0x200)
);
assert_eq!(Ok(0), gcd.add_io_space(dxe_services::GcdIoType::Io, 0, 0x200));
assert_eq!(Ok(1), gcd.add_io_space(dxe_services::GcdIoType::Maximum, 0x200, 0x200));
assert_eq!(Ok(2), gcd.add_io_space(dxe_services::GcdIoType::Io, 0x400, 0x100));
assert_eq!(Ok(3), gcd.add_io_space(dxe_services::GcdIoType::Maximum, 0x500, 0x100));
assert_eq!(
Err(EfiError::NotFound),
gcd.allocate_address(dxe_services::GcdIoType::Reserved, 0, 0x100, 1 as _, None, 0)
);
}
#[test]
fn test_free_io_space_conformance() {
let mut gcd = IoGCD::_new(16);
assert_eq!(Err(EfiError::InvalidParameter), gcd.free_io_space(0, 0));
assert_eq!(Err(EfiError::Unsupported), gcd.free_io_space(0, 70_000));
assert_eq!(Err(EfiError::NotFound), gcd.free_io_space(0, 10));
gcd.add_io_space(dxe_services::GcdIoType::Io, 0, 10).unwrap();
gcd.allocate_io_space(AllocateType::Address(0), dxe_services::GcdIoType::Io, 0, 10, 1 as _, None).unwrap();
assert_eq!(Ok(()), gcd.free_io_space(0, 10));
let mut gcd = IoGCD::_new(16);
for i in 2..IO_BLOCK_SLICE_LEN {
if i % 2 == 0 {
gcd.add_io_space(dxe_services::GcdIoType::Maximum, i * 10, 10).unwrap();
} else {
gcd.add_io_space(dxe_services::GcdIoType::Io, i * 10, 10).unwrap();
}
}
gcd.allocate_address(dxe_services::GcdIoType::Maximum, 0, 10, 1 as _, None, 100).unwrap();
assert_eq!(Err(EfiError::OutOfResources), gcd.free_io_space(105, 3));
assert_eq!(Ok(()), gcd.free_io_space(100, 10));
}
fn create_gcd() -> (GCD, usize) {
let mem = unsafe { get_memory(MEMORY_BLOCK_SLICE_SIZE) };
let address = mem.as_ptr() as usize;
let mut gcd = GCD::new(48);
unsafe {
gcd.add_memory_space(
dxe_services::GcdMemoryType::SystemMemory,
address,
MEMORY_BLOCK_SLICE_SIZE,
efi::MEMORY_WB,
)
.unwrap();
}
(gcd, address)
}
fn copy_memory_block(gcd: &GCD) -> Vec<MemoryBlock> {
gcd.memory_blocks.dfs()
}
fn is_gcd_memory_slice_valid(gcd: &GCD) -> bool {
let memory_blocks = &gcd.memory_blocks;
match memory_blocks.first_idx().map(|idx| memory_blocks.get_with_idx(idx).unwrap().start()) {
Some(0) => (),
_ => return false,
}
let mut last_addr = 0;
let blocks = copy_memory_block(gcd);
let mut w = blocks.windows(2);
while let Some([a, b]) = w.next() {
if a.end() != b.start() || a.is_same_state(b) {
return false;
}
last_addr = b.end();
}
if last_addr != gcd.maximum_address {
return false;
}
true
}
unsafe fn get_memory(size: usize) -> &'static mut [u8] {
let addr = unsafe { alloc::alloc::alloc(alloc::alloc::Layout::from_size_align(size, UEFI_PAGE_SIZE).unwrap()) };
unsafe { core::slice::from_raw_parts_mut(addr, size) }
}
#[test]
fn spin_locked_allocator_should_error_if_not_initialized() {
with_locked_state(|| {
static GCD: SpinLockedGcd = SpinLockedGcd::new(None);
assert_eq!(GCD.memory.lock().maximum_address, 0);
let add_result = unsafe { GCD.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0, 100, 0) };
assert_eq!(add_result, Err(EfiError::NotReady));
let allocate_result = GCD.allocate_memory_space(
AllocateType::Address(0),
dxe_services::GcdMemoryType::SystemMemory,
0,
10,
1 as _,
None,
);
assert_eq!(allocate_result, Err(EfiError::NotReady));
let free_result = GCD.free_memory_space(0, 10);
assert_eq!(free_result, Err(EfiError::NotReady));
let remove_result = GCD.remove_memory_space(0, 10);
assert_eq!(remove_result, Err(EfiError::NotReady));
});
}
#[test]
fn spin_locked_allocator_init_should_initialize() {
with_locked_state(|| {
static GCD: SpinLockedGcd = SpinLockedGcd::new(None);
assert_eq!(GCD.memory.lock().maximum_address, 0);
let mem = unsafe { get_memory(MEMORY_BLOCK_SLICE_SIZE) };
let address = mem.as_ptr() as usize;
GCD.init(48, 16);
unsafe {
GCD.add_memory_space(
dxe_services::GcdMemoryType::SystemMemory,
address,
MEMORY_BLOCK_SLICE_SIZE,
efi::MEMORY_WB,
)
.unwrap();
}
GCD.add_io_space(dxe_services::GcdIoType::Io, 0, 100).unwrap();
GCD.allocate_io_space(AllocateType::Address(0), dxe_services::GcdIoType::Io, 0, 10, 1 as _, None).unwrap();
GCD.free_io_space(0, 10).unwrap();
GCD.remove_io_space(0, 10).unwrap();
});
}
#[test]
fn callback_should_fire_when_map_changes() {
with_locked_state(|| {
static CALLBACK_INVOKED: AtomicBool = AtomicBool::new(false);
fn map_callback(map_change_type: MapChangeType) {
CALLBACK_INVOKED.store(true, core::sync::atomic::Ordering::SeqCst);
assert_eq!(map_change_type, MapChangeType::AddMemorySpace);
}
static GCD: SpinLockedGcd = SpinLockedGcd::new(Some(map_callback));
assert_eq!(GCD.memory.lock().maximum_address, 0);
let mem = unsafe { get_memory(MEMORY_BLOCK_SLICE_SIZE) };
let address = mem.as_ptr() as usize;
GCD.init(48, 16);
unsafe {
GCD.add_memory_space(
dxe_services::GcdMemoryType::SystemMemory,
address,
MEMORY_BLOCK_SLICE_SIZE,
efi::MEMORY_WB,
)
.unwrap();
}
assert!(CALLBACK_INVOKED.load(core::sync::atomic::Ordering::SeqCst));
});
}
#[test]
fn test_spin_locked_set_attributes_capabilities() {
with_locked_state(|| {
static CALLBACK2: AtomicBool = AtomicBool::new(false);
fn map_callback(map_change_type: MapChangeType) {
if map_change_type == MapChangeType::SetMemoryCapabilities {
CALLBACK2.store(true, core::sync::atomic::Ordering::SeqCst);
}
}
static GCD: SpinLockedGcd = SpinLockedGcd::new(Some(map_callback));
assert_eq!(GCD.memory.lock().maximum_address, 0);
let mem = unsafe { get_memory(MEMORY_BLOCK_SLICE_SIZE * 2) };
let address = align_up(mem.as_ptr() as usize, 0x1000).unwrap();
GCD.init(48, 16);
unsafe {
GCD.add_memory_space(
dxe_services::GcdMemoryType::SystemMemory,
address,
MEMORY_BLOCK_SLICE_SIZE,
efi::MEMORY_WB,
)
.unwrap();
}
GCD.set_memory_space_capabilities(
address,
0x1000,
efi::MEMORY_RP | efi::MEMORY_RO | efi::MEMORY_XP | efi::MEMORY_WB,
)
.unwrap();
assert!(CALLBACK2.load(core::sync::atomic::Ordering::SeqCst));
});
}
#[test]
fn allocate_bottom_up_should_allocate_increasing_addresses() {
with_locked_state(|| {
use std::{alloc::GlobalAlloc, println};
const GCD_SIZE: usize = 0x100000;
static GCD: SpinLockedGcd = SpinLockedGcd::new(None);
GCD.init(48, 16);
let layout = Layout::from_size_align(GCD_SIZE, 0x1000).unwrap();
let base = unsafe { std::alloc::System.alloc(layout) as u64 };
unsafe {
GCD.add_memory_space(
dxe_services::GcdMemoryType::SystemMemory,
base as usize,
GCD_SIZE,
efi::MEMORY_WB,
)
.unwrap();
}
println!("GCD base: {base:#x?}");
let mut last_allocation = 0;
loop {
let allocate_result = GCD.allocate_memory_space(
AllocateType::BottomUp(None),
dxe_services::GcdMemoryType::SystemMemory,
12,
0x1000,
1 as _,
None,
);
println!("Allocation result: {allocate_result:#x?}");
if let Ok(address) = allocate_result {
assert!(
address > last_allocation,
"address {address:#x?} is lower than previously allocated address {last_allocation:#x?}",
);
last_allocation = address;
} else {
break;
}
}
});
}
#[test]
fn allocate_top_down_should_allocate_decreasing_addresses() {
with_locked_state(|| {
use std::{alloc::GlobalAlloc, println};
const GCD_SIZE: usize = 0x100000;
static GCD: SpinLockedGcd = SpinLockedGcd::new(None);
GCD.init(48, 16);
let layout = Layout::from_size_align(GCD_SIZE, 0x1000).unwrap();
let base = unsafe { std::alloc::System.alloc(layout) as u64 };
unsafe {
GCD.add_memory_space(
dxe_services::GcdMemoryType::SystemMemory,
base as usize,
GCD_SIZE,
efi::MEMORY_WB,
)
.unwrap();
}
println!("GCD base: {base:#x?}");
let mut last_allocation = usize::MAX;
loop {
let allocate_result = GCD.allocate_memory_space(
AllocateType::TopDown(None),
dxe_services::GcdMemoryType::SystemMemory,
12,
0x1000,
1 as _,
None,
);
println!("Allocation result: {allocate_result:#x?}");
if let Ok(address) = allocate_result {
assert!(
address < last_allocation,
"address {address:#x?} is higher than previously allocated address {last_allocation:#x?}",
);
last_allocation = address;
} else {
break;
}
}
});
}
#[test]
fn test_allocate_page_zero_should_fail() {
let (mut gcd, _) = create_gcd();
unsafe {
gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0, 0x2000, efi::MEMORY_WB).unwrap();
}
let res = gcd.allocate_memory_space(
AllocateType::BottomUp(None),
dxe_services::GcdMemoryType::SystemMemory,
0,
0x1000,
1 as _,
None,
);
assert_eq!(res.unwrap(), 0x1000, "Should not be able to allocate page 0");
let res = gcd.allocate_memory_space(
AllocateType::TopDown(None),
dxe_services::GcdMemoryType::SystemMemory,
0,
0x1000,
1 as _,
None,
);
assert_eq!(res, Err(EfiError::OutOfResources), "Should not be able to allocate page 0");
unsafe {
gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0x2000, 0x2000, efi::MEMORY_WB).unwrap();
}
let res = gcd.allocate_memory_space(
AllocateType::BottomUp(None),
dxe_services::GcdMemoryType::SystemMemory,
0,
0x2000,
1 as _,
None,
);
assert_eq!(res.unwrap(), 0x2000, "Should be able to allocate page 0x2000");
let res = gcd.allocate_memory_space(
AllocateType::Address(0),
dxe_services::GcdMemoryType::SystemMemory,
0,
UEFI_PAGE_SIZE,
1 as _,
None,
);
assert_eq!(res.unwrap(), 0x0, "Should be able to allocate page 0 by address");
}
#[test]
fn test_prioritize_32_bit_memory_top_down() {
let (mut gcd, _) = create_gcd();
gcd.prioritize_32_bit_memory = true;
unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0, 2 * SIZE_4GB, 0) }.unwrap();
let res = gcd.allocate_memory_space(
AllocateType::TopDown(None),
dxe_services::GcdMemoryType::SystemMemory,
UEFI_PAGE_SHIFT,
0x10000,
1 as _,
None,
);
assert_eq!(res.unwrap(), SIZE_4GB - 0x10000, "Should allocate below 4GB when prioritizing 32-bit memory");
let res = gcd.allocate_memory_space(
AllocateType::TopDown(None),
dxe_services::GcdMemoryType::SystemMemory,
UEFI_PAGE_SHIFT,
SIZE_4GB,
1 as _,
None,
);
assert_eq!(res.unwrap(), SIZE_4GB, "Failed to fall back to higher memory as expected");
gcd.free_memory_space(SIZE_4GB - 0x10000, 0x10000).unwrap();
gcd.free_memory_space(SIZE_4GB, SIZE_4GB).unwrap();
let res = gcd.allocate_memory_space(
AllocateType::TopDown(None),
dxe_services::GcdMemoryType::SystemMemory,
UEFI_PAGE_SHIFT,
SIZE_4GB + 0x1000,
1 as _,
None,
);
assert!(res.is_ok(), "Failed to fallback to higher memory as expected");
}
}