use std::fmt::Debug;
use std::mem::{offset_of, size_of};
use hyperlight_common::mem::{GuestStackData, HyperlightPEB, RunMode, PAGE_SIZE_USIZE};
use paste::paste;
use rand::rngs::OsRng;
use rand::RngCore;
use tracing::{instrument, Span};
use super::memory_region::MemoryRegionType::{
BootStack, Code, GuardPage, GuestErrorData, Heap, HostExceptionData, HostFunctionDefinitions,
InputData, KernelStack, OutputData, PageTables, PanicContext, Peb, Stack,
};
use super::memory_region::{MemoryRegion, MemoryRegionFlags, MemoryRegionVecBuilder};
use super::mgr::AMOUNT_OF_MEMORY_PER_PT;
use super::shared_mem::{ExclusiveSharedMemory, GuestSharedMemory, SharedMemory};
use crate::error::HyperlightError::{GuestOffsetIsInvalid, MemoryRequestTooBig};
use crate::sandbox::SandboxConfiguration;
use crate::{log_then_return, new_error, Result};
#[derive(Copy, Clone)]
pub(crate) struct SandboxMemoryLayout {
pub(super) sandbox_memory_config: SandboxConfiguration,
pub(super) stack_size: usize,
pub(super) heap_size: usize,
peb_offset: usize,
peb_security_cookie_seed_offset: usize,
peb_guest_dispatch_function_ptr_offset: usize, pub(super) peb_host_function_definitions_offset: usize,
pub(crate) peb_host_exception_offset: usize,
peb_guest_error_offset: usize,
peb_code_and_outb_pointer_offset: usize,
peb_runmode_offset: usize,
peb_input_data_offset: usize,
peb_output_data_offset: usize,
peb_guest_panic_context_offset: usize,
peb_heap_data_offset: usize,
peb_guest_stack_data_offset: usize,
pub(crate) host_function_definitions_buffer_offset: usize,
pub(crate) host_exception_buffer_offset: usize,
pub(super) guest_error_buffer_offset: usize,
pub(super) input_data_buffer_offset: usize,
pub(super) output_data_buffer_offset: usize,
guest_panic_context_buffer_offset: usize,
guest_heap_buffer_offset: usize,
guard_page_offset: usize,
guest_user_stack_buffer_offset: usize, user_stack_guard_page_offset: usize,
kernel_stack_buffer_offset: usize,
kernel_stack_guard_page_offset: usize,
#[allow(dead_code)]
pub(super) kernel_stack_size_rounded: usize,
boot_stack_buffer_offset: usize,
pub(crate) peb_address: usize,
code_size: usize,
total_page_table_size: usize,
guest_code_offset: usize,
}
impl Debug for SandboxMemoryLayout {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("SandboxMemoryLayout")
.field(
"Total Memory Size",
&format_args!("{:#x}", self.get_memory_size().unwrap_or(0)),
)
.field("Stack Size", &format_args!("{:#x}", self.stack_size))
.field("Heap Size", &format_args!("{:#x}", self.heap_size))
.field("PEB Address", &format_args!("{:#x}", self.peb_address))
.field("PEB Offset", &format_args!("{:#x}", self.peb_offset))
.field("Code Size", &format_args!("{:#x}", self.code_size))
.field(
"Security Cookie Seed Offset",
&format_args!("{:#x}", self.peb_security_cookie_seed_offset),
)
.field(
"Guest Dispatch Function Pointer Offset",
&format_args!("{:#x}", self.peb_guest_dispatch_function_ptr_offset),
)
.field(
"Host Function Definitions Offset",
&format_args!("{:#x}", self.peb_host_function_definitions_offset),
)
.field(
"Host Exception Offset",
&format_args!("{:#x}", self.peb_host_exception_offset),
)
.field(
"Guest Error Offset",
&format_args!("{:#x}", self.peb_guest_error_offset),
)
.field(
"Code and OutB Pointer Offset",
&format_args!("{:#x}", self.peb_code_and_outb_pointer_offset),
)
.field(
"Input Data Offset",
&format_args!("{:#x}", self.peb_input_data_offset),
)
.field(
"Output Data Offset",
&format_args!("{:#x}", self.peb_output_data_offset),
)
.field(
"Guest Panic Context Offset",
&format_args!("{:#x}", self.peb_guest_panic_context_offset),
)
.field(
"Guest Heap Offset",
&format_args!("{:#x}", self.peb_heap_data_offset),
)
.field(
"Guest Stack Offset",
&format_args!("{:#x}", self.peb_guest_stack_data_offset),
)
.field(
"Host Function Definitions Buffer Offset",
&format_args!("{:#x}", self.host_function_definitions_buffer_offset),
)
.field(
"Host Exception Buffer Offset",
&format_args!("{:#x}", self.host_exception_buffer_offset),
)
.field(
"Guest Error Buffer Offset",
&format_args!("{:#x}", self.guest_error_buffer_offset),
)
.field(
"Input Data Buffer Offset",
&format_args!("{:#x}", self.input_data_buffer_offset),
)
.field(
"Output Data Buffer Offset",
&format_args!("{:#x}", self.output_data_buffer_offset),
)
.field(
"Guest Panic Context Buffer Offset",
&format_args!("{:#x}", self.guest_panic_context_buffer_offset),
)
.field(
"Guest Heap Buffer Offset",
&format_args!("{:#x}", self.guest_heap_buffer_offset),
)
.field(
"Guard Page Offset",
&format_args!("{:#x}", self.guard_page_offset),
)
.field(
"Guest User Stack Buffer Offset",
&format_args!("{:#x}", self.guest_user_stack_buffer_offset),
)
.field(
"Page Table Size",
&format_args!("{:#x}", self.total_page_table_size),
)
.field(
"Guest Code Offset",
&format_args!("{:#x}", self.guest_code_offset),
)
.field(
"User Stack Guard Page Offset",
&format_args!("{:#x}", self.user_stack_guard_page_offset),
)
.field(
"Kernel Stack Buffer Offset",
&format_args!("{:#x}", self.kernel_stack_buffer_offset),
)
.field(
"Kernel Stack Guard Page Offset",
&format_args!("{:#x}", self.kernel_stack_guard_page_offset),
)
.field(
"Boot Stack Buffer Offset",
&format_args!("{:#x}", self.boot_stack_buffer_offset),
)
.finish()
}
}
impl SandboxMemoryLayout {
pub(crate) const PML4_OFFSET: usize = 0x0000;
pub(super) const PDPT_OFFSET: usize = 0x1000;
pub(super) const PD_OFFSET: usize = 0x2000;
pub(super) const PT_OFFSET: usize = 0x3000;
pub(super) const PD_GUEST_ADDRESS: usize = Self::BASE_ADDRESS + Self::PD_OFFSET;
pub(super) const PDPT_GUEST_ADDRESS: usize = Self::BASE_ADDRESS + Self::PDPT_OFFSET;
pub(super) const PT_GUEST_ADDRESS: usize = Self::BASE_ADDRESS + Self::PT_OFFSET;
const MAX_MEMORY_SIZE: usize = 0x40000000 - Self::BASE_ADDRESS;
pub(crate) const BASE_ADDRESS: usize = 0x0200000;
const STACK_POINTER_SIZE_BYTES: u64 = 8;
#[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
pub(super) fn new(
cfg: SandboxConfiguration,
code_size: usize,
stack_size: usize,
heap_size: usize,
) -> Result<Self> {
let total_page_table_size =
Self::get_total_page_table_size(cfg, code_size, stack_size, heap_size);
let guest_code_offset = total_page_table_size;
let peb_offset = total_page_table_size + round_up_to(code_size, PAGE_SIZE_USIZE);
let peb_security_cookie_seed_offset =
peb_offset + offset_of!(HyperlightPEB, security_cookie_seed);
let peb_guest_dispatch_function_ptr_offset =
peb_offset + offset_of!(HyperlightPEB, guest_function_dispatch_ptr);
let peb_host_function_definitions_offset =
peb_offset + offset_of!(HyperlightPEB, hostFunctionDefinitions);
let peb_host_exception_offset = peb_offset + offset_of!(HyperlightPEB, hostException);
let peb_guest_error_offset = peb_offset + offset_of!(HyperlightPEB, guestErrorData);
let peb_code_and_outb_pointer_offset = peb_offset + offset_of!(HyperlightPEB, pCode);
let peb_runmode_offset = peb_offset + offset_of!(HyperlightPEB, runMode);
let peb_input_data_offset = peb_offset + offset_of!(HyperlightPEB, inputdata);
let peb_output_data_offset = peb_offset + offset_of!(HyperlightPEB, outputdata);
let peb_guest_panic_context_offset =
peb_offset + offset_of!(HyperlightPEB, guestPanicContextData);
let peb_heap_data_offset = peb_offset + offset_of!(HyperlightPEB, guestheapData);
let peb_guest_stack_data_offset = peb_offset + offset_of!(HyperlightPEB, gueststackData);
let peb_address = Self::BASE_ADDRESS + peb_offset;
let host_function_definitions_buffer_offset = round_up_to(
peb_guest_stack_data_offset + size_of::<GuestStackData>(),
PAGE_SIZE_USIZE,
);
let host_exception_buffer_offset = round_up_to(
host_function_definitions_buffer_offset + cfg.get_host_function_definition_size(),
PAGE_SIZE_USIZE,
);
let guest_error_buffer_offset = round_up_to(
host_exception_buffer_offset + cfg.get_host_exception_size(),
PAGE_SIZE_USIZE,
);
let input_data_buffer_offset = round_up_to(
guest_error_buffer_offset + cfg.get_guest_error_buffer_size(),
PAGE_SIZE_USIZE,
);
let output_data_buffer_offset = round_up_to(
input_data_buffer_offset + cfg.get_input_data_size(),
PAGE_SIZE_USIZE,
);
let guest_panic_context_buffer_offset = round_up_to(
output_data_buffer_offset + cfg.get_output_data_size(),
PAGE_SIZE_USIZE,
);
let guest_heap_buffer_offset = round_up_to(
guest_panic_context_buffer_offset + cfg.get_guest_panic_context_buffer_size(),
PAGE_SIZE_USIZE,
);
let guard_page_offset = round_up_to(guest_heap_buffer_offset + heap_size, PAGE_SIZE_USIZE);
let guest_user_stack_buffer_offset = guard_page_offset + PAGE_SIZE_USIZE;
let stack_size_rounded = round_up_to(stack_size, PAGE_SIZE_USIZE);
let user_stack_guard_page_offset = guest_user_stack_buffer_offset + stack_size_rounded;
let kernel_stack_buffer_offset = user_stack_guard_page_offset + PAGE_SIZE_USIZE;
let kernel_stack_size_rounded = round_up_to(cfg.get_kernel_stack_size(), PAGE_SIZE_USIZE);
let kernel_stack_guard_page_offset = kernel_stack_buffer_offset + kernel_stack_size_rounded;
let boot_stack_buffer_offset = kernel_stack_guard_page_offset + PAGE_SIZE_USIZE;
Ok(Self {
peb_offset,
stack_size: stack_size_rounded,
heap_size,
peb_security_cookie_seed_offset,
peb_guest_dispatch_function_ptr_offset,
peb_host_function_definitions_offset,
peb_host_exception_offset,
peb_guest_error_offset,
peb_code_and_outb_pointer_offset,
peb_runmode_offset,
peb_input_data_offset,
peb_output_data_offset,
peb_guest_panic_context_offset,
peb_heap_data_offset,
peb_guest_stack_data_offset,
guest_error_buffer_offset,
sandbox_memory_config: cfg,
code_size,
host_function_definitions_buffer_offset,
host_exception_buffer_offset,
input_data_buffer_offset,
output_data_buffer_offset,
guest_heap_buffer_offset,
guest_user_stack_buffer_offset,
peb_address,
guest_panic_context_buffer_offset,
guard_page_offset,
total_page_table_size,
guest_code_offset,
user_stack_guard_page_offset,
kernel_stack_buffer_offset,
kernel_stack_guard_page_offset,
kernel_stack_size_rounded,
boot_stack_buffer_offset,
})
}
pub fn get_run_mode_offset(&self) -> usize {
self.peb_runmode_offset
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(super) fn get_host_exception_size_offset(&self) -> usize {
self.peb_host_exception_offset
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(super) fn get_guest_error_buffer_size_offset(&self) -> usize {
self.peb_guest_error_offset
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
fn get_guest_error_buffer_pointer_offset(&self) -> usize {
self.peb_guest_error_offset + size_of::<u64>()
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(super) fn get_output_data_size_offset(&self) -> usize {
self.peb_output_data_offset
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(super) fn get_host_function_definitions_size_offset(&self) -> usize {
self.peb_host_function_definitions_offset
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
fn get_host_function_definitions_pointer_offset(&self) -> usize {
self.peb_host_function_definitions_offset + size_of::<u64>()
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
fn get_min_guest_stack_address_offset(&self) -> usize {
self.peb_guest_stack_data_offset
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(super) fn get_guest_stack_size(&self) -> usize {
self.stack_size
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(super) fn get_host_exception_offset(&self) -> usize {
self.host_exception_buffer_offset
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(super) fn get_outb_pointer_offset(&self) -> usize {
self.peb_code_and_outb_pointer_offset + size_of::<u64>()
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(super) fn get_outb_context_offset(&self) -> usize {
self.get_outb_pointer_offset() + size_of::<u64>()
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
fn get_output_data_pointer_offset(&self) -> usize {
self.get_output_data_size_offset() + size_of::<u64>()
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(crate) fn get_output_data_offset(&self) -> usize {
self.output_data_buffer_offset
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(super) fn get_input_data_size_offset(&self) -> usize {
self.peb_input_data_offset
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
fn get_input_data_pointer_offset(&self) -> usize {
self.get_input_data_size_offset() + size_of::<u64>()
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(super) fn get_code_pointer_offset(&self) -> usize {
self.peb_code_and_outb_pointer_offset
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(super) fn get_dispatch_function_pointer_offset(&self) -> usize {
self.peb_guest_dispatch_function_ptr_offset
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(super) fn get_in_process_peb_offset(&self) -> usize {
self.peb_offset
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
fn get_heap_size_offset(&self) -> usize {
self.peb_heap_data_offset
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
fn get_heap_pointer_offset(&self) -> usize {
self.get_heap_size_offset() + size_of::<u64>()
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(super) fn get_top_of_user_stack_offset(&self) -> usize {
self.guest_user_stack_buffer_offset
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
fn get_user_stack_pointer_offset(&self) -> usize {
self.get_min_guest_stack_address_offset() + size_of::<u64>()
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
fn get_kernel_stack_pointer_offset(&self) -> usize {
self.get_user_stack_pointer_offset() + size_of::<u64>()
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
fn get_boot_stack_pointer_offset(&self) -> usize {
self.get_kernel_stack_pointer_offset() + size_of::<u64>()
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(crate) fn get_guest_panic_context_offset(&self) -> usize {
self.peb_guest_panic_context_offset
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(crate) fn get_guest_panic_context_size_offset(&self) -> usize {
self.peb_guest_panic_context_offset
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(crate) fn get_guest_panic_context_buffer_pointer_offset(&self) -> usize {
self.get_guest_panic_context_size_offset() + size_of::<u64>()
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(crate) fn get_guest_panic_context_buffer_offset(&self) -> usize {
self.guest_panic_context_buffer_offset
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub fn get_guard_page_offset(&self) -> usize {
self.guard_page_offset
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
fn get_unaligned_memory_size(&self) -> usize {
self.get_boot_stack_buffer_offset() + PAGE_SIZE_USIZE
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(super) fn get_guest_code_offset(&self) -> usize {
self.guest_code_offset
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(super) fn get_guest_code_address(&self) -> usize {
Self::BASE_ADDRESS + self.guest_code_offset
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(super) fn get_user_stack_guard_page_offset(&self) -> usize {
self.user_stack_guard_page_offset
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(super) fn get_kernel_stack_buffer_offset(&self) -> usize {
self.kernel_stack_buffer_offset
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(super) fn get_kernel_stack_guard_page_offset(&self) -> usize {
self.kernel_stack_guard_page_offset
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(super) fn get_boot_stack_buffer_offset(&self) -> usize {
self.boot_stack_buffer_offset
}
#[cfg(test)]
fn get_page_table_size(&self) -> usize {
self.total_page_table_size
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
fn get_total_page_table_size(
cfg: SandboxConfiguration,
code_size: usize,
stack_size: usize,
heap_size: usize,
) -> usize {
let mut total_mapped_memory_size: usize = round_up_to(code_size, PAGE_SIZE_USIZE);
total_mapped_memory_size += round_up_to(stack_size, PAGE_SIZE_USIZE);
total_mapped_memory_size += round_up_to(heap_size, PAGE_SIZE_USIZE);
total_mapped_memory_size += round_up_to(cfg.get_host_exception_size(), PAGE_SIZE_USIZE);
total_mapped_memory_size +=
round_up_to(cfg.get_host_function_definition_size(), PAGE_SIZE_USIZE);
total_mapped_memory_size += round_up_to(cfg.get_guest_error_buffer_size(), PAGE_SIZE_USIZE);
total_mapped_memory_size += round_up_to(cfg.get_input_data_size(), PAGE_SIZE_USIZE);
total_mapped_memory_size += round_up_to(cfg.get_output_data_size(), PAGE_SIZE_USIZE);
total_mapped_memory_size +=
round_up_to(cfg.get_guest_panic_context_buffer_size(), PAGE_SIZE_USIZE);
total_mapped_memory_size += round_up_to(size_of::<HyperlightPEB>(), PAGE_SIZE_USIZE);
total_mapped_memory_size += Self::BASE_ADDRESS;
total_mapped_memory_size += 3 * PAGE_SIZE_USIZE;
total_mapped_memory_size += 512 * PAGE_SIZE_USIZE;
let num_pages: usize = ((total_mapped_memory_size + AMOUNT_OF_MEMORY_PER_PT - 1)
/ AMOUNT_OF_MEMORY_PER_PT)
+ 1 + 3;
num_pages * PAGE_SIZE_USIZE
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(super) fn get_memory_size(&self) -> Result<usize> {
let total_memory = self.get_unaligned_memory_size();
let remainder = total_memory % PAGE_SIZE_USIZE;
let multiples = total_memory / PAGE_SIZE_USIZE;
let size = match remainder {
0 => total_memory,
_ => (multiples + 1) * PAGE_SIZE_USIZE,
};
if size > Self::MAX_MEMORY_SIZE {
Err(MemoryRequestTooBig(size, Self::MAX_MEMORY_SIZE))
} else {
Ok(size)
}
}
pub fn get_memory_regions(&self, shared_mem: &GuestSharedMemory) -> Result<Vec<MemoryRegion>> {
let mut builder = MemoryRegionVecBuilder::new(Self::BASE_ADDRESS, shared_mem.base_addr());
let code_offset = builder.push_page_aligned(
self.total_page_table_size,
MemoryRegionFlags::READ | MemoryRegionFlags::WRITE,
PageTables,
);
if code_offset != self.guest_code_offset {
return Err(new_error!(
"Code offset does not match expected code offset expected: {}, actual: {}",
self.guest_code_offset,
code_offset
));
}
let peb_offset = builder.push_page_aligned(
self.code_size,
MemoryRegionFlags::READ | MemoryRegionFlags::WRITE | MemoryRegionFlags::EXECUTE,
Code,
);
let expected_peb_offset = TryInto::<usize>::try_into(self.peb_offset)?;
if peb_offset != expected_peb_offset {
return Err(new_error!(
"PEB offset does not match expected PEB offset expected: {}, actual: {}",
expected_peb_offset,
peb_offset
));
}
let host_functions_definitions_offset = builder.push_page_aligned(
size_of::<HyperlightPEB>(),
MemoryRegionFlags::READ | MemoryRegionFlags::WRITE,
Peb,
);
let expected_host_functions_definitions_offset =
TryInto::<usize>::try_into(self.host_function_definitions_buffer_offset)?;
if host_functions_definitions_offset != expected_host_functions_definitions_offset {
return Err(new_error!(
"Host Function Definitions offset does not match expected Host Function Definitions offset expected: {}, actual: {}",
expected_host_functions_definitions_offset,
host_functions_definitions_offset
));
}
let host_exception_offset = builder.push_page_aligned(
self.sandbox_memory_config
.get_host_function_definition_size(),
MemoryRegionFlags::READ,
HostFunctionDefinitions,
);
let expected_host_exception_offset =
TryInto::<usize>::try_into(self.host_exception_buffer_offset)?;
if host_exception_offset != expected_host_exception_offset {
return Err(new_error!(
"Host Exception offset does not match expected Host Exception offset expected: {}, actual: {}",
expected_host_exception_offset,
host_exception_offset
));
}
let guest_error_offset = builder.push_page_aligned(
self.sandbox_memory_config.get_host_exception_size(),
MemoryRegionFlags::READ | MemoryRegionFlags::WRITE,
HostExceptionData,
);
let expected_guest_error_offset =
TryInto::<usize>::try_into(self.guest_error_buffer_offset)?;
if guest_error_offset != expected_guest_error_offset {
return Err(new_error!(
"Guest Error offset does not match expected Guest Error offset expected: {}, actual: {}",
expected_guest_error_offset,
guest_error_offset
));
}
let input_data_offset = builder.push_page_aligned(
self.sandbox_memory_config.get_guest_error_buffer_size(),
MemoryRegionFlags::READ | MemoryRegionFlags::WRITE,
GuestErrorData,
);
let expected_input_data_offset = TryInto::<usize>::try_into(self.input_data_buffer_offset)?;
if input_data_offset != expected_input_data_offset {
return Err(new_error!(
"Input Data offset does not match expected Input Data offset expected: {}, actual: {}",
expected_input_data_offset,
input_data_offset
));
}
let output_data_offset = builder.push_page_aligned(
self.sandbox_memory_config.get_input_data_size(),
MemoryRegionFlags::READ | MemoryRegionFlags::WRITE,
InputData,
);
let expected_output_data_offset =
TryInto::<usize>::try_into(self.output_data_buffer_offset)?;
if output_data_offset != expected_output_data_offset {
return Err(new_error!(
"Output Data offset does not match expected Output Data offset expected: {}, actual: {}",
expected_output_data_offset,
output_data_offset
));
}
let guest_panic_context_offset = builder.push_page_aligned(
self.sandbox_memory_config.get_output_data_size(),
MemoryRegionFlags::READ | MemoryRegionFlags::WRITE,
OutputData,
);
let expected_guest_panic_context_offset =
TryInto::<usize>::try_into(self.guest_panic_context_buffer_offset)?;
if guest_panic_context_offset != expected_guest_panic_context_offset {
return Err(new_error!(
"Guest Panic Context offset does not match expected Guest Panic Context offset expected: {}, actual: {}",
expected_guest_panic_context_offset,
guest_panic_context_offset
));
}
let heap_offset = builder.push_page_aligned(
self.sandbox_memory_config
.get_guest_panic_context_buffer_size(),
MemoryRegionFlags::READ | MemoryRegionFlags::WRITE,
PanicContext,
);
let expected_heap_offset = TryInto::<usize>::try_into(self.guest_heap_buffer_offset)?;
if heap_offset != expected_heap_offset {
return Err(new_error!(
"Guest Heap offset does not match expected Guest Heap offset expected: {}, actual: {}",
expected_heap_offset,
heap_offset
));
}
#[cfg(feature = "executable_heap")]
let guard_page_offset = builder.push_page_aligned(
self.heap_size,
MemoryRegionFlags::READ | MemoryRegionFlags::WRITE | MemoryRegionFlags::EXECUTE,
Heap,
);
#[cfg(not(feature = "executable_heap"))]
let guard_page_offset = builder.push_page_aligned(
self.heap_size,
MemoryRegionFlags::READ | MemoryRegionFlags::WRITE,
Heap,
);
let expected_guard_page_offset = TryInto::<usize>::try_into(self.guard_page_offset)?;
if guard_page_offset != expected_guard_page_offset {
return Err(new_error!(
"Guard Page offset does not match expected Guard Page offset expected: {}, actual: {}",
expected_guard_page_offset,
guard_page_offset
));
}
let stack_offset = builder.push_page_aligned(
PAGE_SIZE_USIZE,
MemoryRegionFlags::READ | MemoryRegionFlags::STACK_GUARD,
GuardPage,
);
let expected_stack_offset =
TryInto::<usize>::try_into(self.guest_user_stack_buffer_offset)?;
if stack_offset != expected_stack_offset {
return Err(new_error!(
"Stack offset does not match expected Stack offset expected: {}, actual: {}",
expected_stack_offset,
stack_offset
));
}
let user_stack_guard_page_offset = builder.push_page_aligned(
self.get_guest_stack_size(),
MemoryRegionFlags::READ | MemoryRegionFlags::WRITE,
Stack,
);
let expected_user_stack_guard_page_offset =
TryInto::<usize>::try_into(self.get_top_of_user_stack_offset())?
+ self.get_guest_stack_size();
if user_stack_guard_page_offset != expected_user_stack_guard_page_offset {
return Err(new_error!(
"User Guard Page offset does not match expected User Guard Page offset expected: {}, actual: {}",
expected_user_stack_guard_page_offset,
user_stack_guard_page_offset
));
}
let kernel_stack_offset = builder.push_page_aligned(
PAGE_SIZE_USIZE,
MemoryRegionFlags::READ | MemoryRegionFlags::STACK_GUARD,
GuardPage,
);
let expected_kernel_stack_offset =
TryInto::<usize>::try_into(self.kernel_stack_buffer_offset)?;
if kernel_stack_offset != expected_kernel_stack_offset {
return Err(new_error!(
"Kernel Stack offset does not match expected Kernel Stack offset expected: {}, actual: {}",
expected_kernel_stack_offset,
kernel_stack_offset
));
}
let kernel_stack_guard_page_offset = builder.push_page_aligned(
self.kernel_stack_size_rounded,
MemoryRegionFlags::READ | MemoryRegionFlags::WRITE,
KernelStack,
);
let expected_kernel_stack_guard_page_offset =
TryInto::<usize>::try_into(self.kernel_stack_guard_page_offset)?;
if kernel_stack_guard_page_offset != expected_kernel_stack_guard_page_offset {
return Err(new_error!(
"Kernel Guard Page offset does not match expected Kernel Guard Page offset expected: {}, actual: {}",
expected_kernel_stack_guard_page_offset,
kernel_stack_guard_page_offset
));
}
let boot_stack_offset = builder.push_page_aligned(
PAGE_SIZE_USIZE,
MemoryRegionFlags::READ | MemoryRegionFlags::STACK_GUARD,
GuardPage,
);
let expected_boot_stack_offset = TryInto::<usize>::try_into(self.boot_stack_buffer_offset)?;
if boot_stack_offset != expected_boot_stack_offset {
return Err(new_error!(
"Boot Stack offset does not match expected Boot Stack offset expected: {}, actual: {}",
expected_boot_stack_offset,
boot_stack_offset
));
}
let final_offset = builder.push_page_aligned(
PAGE_SIZE_USIZE,
MemoryRegionFlags::READ | MemoryRegionFlags::WRITE,
BootStack,
);
let expected_final_offset = TryInto::<usize>::try_into(self.get_memory_size()?)?;
if final_offset != expected_final_offset {
return Err(new_error!(
"Final offset does not match expected Final offset expected: {}, actual: {}",
expected_final_offset,
final_offset
));
}
Ok(builder.build())
}
#[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
pub(crate) fn write(
&self,
shared_mem: &mut ExclusiveSharedMemory,
guest_offset: usize,
size: usize,
run_inprocess: bool,
) -> Result<()> {
macro_rules! get_address {
($something:ident) => {
paste! {
if guest_offset == 0 {
let offset = self.[<$something _offset>];
let calculated_addr = shared_mem.calculate_address(offset)?;
u64::try_from(calculated_addr)?
} else {
u64::try_from(guest_offset + self.[<$something _offset>])?
}
}
};
}
if guest_offset != SandboxMemoryLayout::BASE_ADDRESS
&& guest_offset != shared_mem.base_addr()
{
return Err(GuestOffsetIsInvalid(guest_offset));
}
let mut security_cookie_seed = [0u8; 8];
OsRng.fill_bytes(&mut security_cookie_seed);
shared_mem.copy_from_slice(&security_cookie_seed, self.peb_security_cookie_seed_offset)?;
shared_mem.write_u64(
self.get_host_function_definitions_size_offset(),
self.sandbox_memory_config
.get_host_function_definition_size()
.try_into()?,
)?;
let addr = get_address!(host_function_definitions_buffer);
shared_mem.write_u64(self.get_host_function_definitions_pointer_offset(), addr)?;
shared_mem.write_u64(
self.get_host_exception_size_offset(),
self.sandbox_memory_config
.get_host_exception_size()
.try_into()?,
)?;
let addr = get_address!(guest_error_buffer);
shared_mem.write_u64(self.get_guest_error_buffer_pointer_offset(), addr)?;
shared_mem.write_u64(
self.get_guest_error_buffer_size_offset(),
u64::try_from(self.sandbox_memory_config.get_guest_error_buffer_size())?,
)?;
shared_mem.write_u64(
self.get_run_mode_offset(),
match (
run_inprocess,
cfg!(target_os = "windows"),
cfg!(target_os = "linux"),
) {
(false, _, _) => RunMode::Hypervisor as u64,
(true, true, _) => RunMode::InProcessWindows as u64,
(true, _, true) => RunMode::InProcessLinux as u64,
(true, _, _) => log_then_return!("Unsupported OS for in-process mode"),
},
)?;
shared_mem.write_u64(
self.get_input_data_size_offset(),
self.sandbox_memory_config
.get_input_data_size()
.try_into()?,
)?;
let addr = get_address!(input_data_buffer);
shared_mem.write_u64(self.get_input_data_pointer_offset(), addr)?;
shared_mem.write_u64(
self.get_output_data_size_offset(),
self.sandbox_memory_config
.get_output_data_size()
.try_into()?,
)?;
let addr = get_address!(output_data_buffer);
shared_mem.write_u64(self.get_output_data_pointer_offset(), addr)?;
let addr = get_address!(guest_panic_context_buffer);
shared_mem.write_u64(
self.get_guest_panic_context_size_offset(),
self.sandbox_memory_config
.get_guest_panic_context_buffer_size()
.try_into()?,
)?;
shared_mem.write_u64(self.get_guest_panic_context_buffer_pointer_offset(), addr)?;
let addr = get_address!(guest_heap_buffer);
shared_mem.write_u64(self.get_heap_size_offset(), self.heap_size.try_into()?)?;
shared_mem.write_u64(self.get_heap_pointer_offset(), addr)?;
let bottom = guest_offset + size;
let min_user_stack_address = bottom
- self.stack_size
- self.kernel_stack_size_rounded
- PAGE_SIZE_USIZE
- PAGE_SIZE_USIZE
- PAGE_SIZE_USIZE;
shared_mem.write_u64(
self.get_min_guest_stack_address_offset(),
min_user_stack_address.try_into()?,
)?;
let start_of_user_stack: u64 = (min_user_stack_address + self.stack_size).try_into()?;
shared_mem.write_u64(self.get_user_stack_pointer_offset(), start_of_user_stack)?;
let start_of_kernel_stack: u64 =
start_of_user_stack + (PAGE_SIZE_USIZE + self.kernel_stack_size_rounded) as u64;
shared_mem.write_u64(
self.get_kernel_stack_pointer_offset(),
start_of_kernel_stack,
)?;
let start_of_boot_stack: u64 = start_of_kernel_stack + (PAGE_SIZE_USIZE * 2) as u64;
shared_mem.write_u64(self.get_boot_stack_pointer_offset(), start_of_boot_stack)?;
shared_mem.write_u64(
self.input_data_buffer_offset,
Self::STACK_POINTER_SIZE_BYTES,
)?;
shared_mem.write_u64(
self.output_data_buffer_offset,
Self::STACK_POINTER_SIZE_BYTES,
)?;
Ok(())
}
}
fn round_up_to(value: usize, multiple: usize) -> usize {
(value + multiple - 1) & !(multiple - 1)
}
#[cfg(test)]
mod tests {
use hyperlight_common::mem::PAGE_SIZE_USIZE;
use super::*;
#[test]
fn test_round_up() {
assert_eq!(0, round_up_to(0, 4));
assert_eq!(4, round_up_to(1, 4));
assert_eq!(4, round_up_to(2, 4));
assert_eq!(4, round_up_to(3, 4));
assert_eq!(4, round_up_to(4, 4));
assert_eq!(8, round_up_to(5, 4));
assert_eq!(8, round_up_to(6, 4));
assert_eq!(8, round_up_to(7, 4));
assert_eq!(8, round_up_to(8, 4));
assert_eq!(PAGE_SIZE_USIZE, round_up_to(44, PAGE_SIZE_USIZE));
assert_eq!(PAGE_SIZE_USIZE, round_up_to(4095, PAGE_SIZE_USIZE));
assert_eq!(PAGE_SIZE_USIZE, round_up_to(4096, PAGE_SIZE_USIZE));
assert_eq!(PAGE_SIZE_USIZE * 2, round_up_to(4097, PAGE_SIZE_USIZE));
assert_eq!(PAGE_SIZE_USIZE * 2, round_up_to(8191, PAGE_SIZE_USIZE));
}
fn get_expected_memory_size(layout: &SandboxMemoryLayout) -> usize {
let cfg = layout.sandbox_memory_config;
let mut expected_size = 0;
expected_size += layout.get_page_table_size();
expected_size += layout.code_size;
expected_size += round_up_to(size_of::<HyperlightPEB>(), PAGE_SIZE_USIZE);
expected_size += round_up_to(cfg.get_host_function_definition_size(), PAGE_SIZE_USIZE);
expected_size += round_up_to(cfg.get_host_exception_size(), PAGE_SIZE_USIZE);
expected_size += round_up_to(cfg.get_guest_error_buffer_size(), PAGE_SIZE_USIZE);
expected_size += round_up_to(cfg.get_input_data_size(), PAGE_SIZE_USIZE);
expected_size += round_up_to(cfg.get_output_data_size(), PAGE_SIZE_USIZE);
expected_size += round_up_to(cfg.get_guest_panic_context_buffer_size(), PAGE_SIZE_USIZE);
expected_size += round_up_to(layout.heap_size, PAGE_SIZE_USIZE);
expected_size += PAGE_SIZE_USIZE;
expected_size += round_up_to(layout.stack_size, PAGE_SIZE_USIZE);
expected_size += PAGE_SIZE_USIZE;
expected_size += round_up_to(layout.kernel_stack_size_rounded, PAGE_SIZE_USIZE);
expected_size += PAGE_SIZE_USIZE;
expected_size += PAGE_SIZE_USIZE;
expected_size
}
#[test]
fn test_get_memory_size() {
let sbox_cfg = SandboxConfiguration::default();
let sbox_mem_layout = SandboxMemoryLayout::new(sbox_cfg, 4096, 2048, 4096).unwrap();
assert_eq!(
sbox_mem_layout.get_memory_size().unwrap(),
get_expected_memory_size(&sbox_mem_layout)
);
}
}