use std::any::type_name;
use std::ffi::c_void;
use std::io::Error;
use std::mem::{align_of, size_of};
#[cfg(target_os = "linux")]
use std::ptr::null_mut;
use std::sync::{Arc, RwLock};
use hyperlight_common::mem::PAGE_SIZE_USIZE;
use tracing::{Span, instrument};
#[cfg(target_os = "windows")]
use windows::Win32::Foundation::{CloseHandle, HANDLE, INVALID_HANDLE_VALUE};
#[cfg(target_os = "windows")]
use windows::Win32::System::Memory::PAGE_READWRITE;
#[cfg(target_os = "windows")]
use windows::Win32::System::Memory::{
CreateFileMappingA, FILE_MAP_ALL_ACCESS, MEMORY_MAPPED_VIEW_ADDRESS, MapViewOfFile,
PAGE_NOACCESS, PAGE_PROTECTION_FLAGS, UnmapViewOfFile, VirtualProtect,
};
#[cfg(target_os = "windows")]
use windows::core::PCSTR;
use super::memory_region::{
HostGuestMemoryRegion, MemoryRegion, MemoryRegionFlags, MemoryRegionKind, MemoryRegionType,
};
#[cfg(target_os = "windows")]
use crate::HyperlightError::WindowsAPIError;
use crate::{HyperlightError, Result, log_then_return, new_error};
macro_rules! bounds_check {
($offset:expr, $size:expr, $mem_size:expr) => {
if $offset.checked_add($size).is_none_or(|end| end > $mem_size) {
return Err(new_error!(
"Cannot read value from offset {} with size {} in memory of size {}",
$offset,
$size,
$mem_size
));
}
};
}
macro_rules! generate_reader {
($fname:ident, $ty:ty) => {
#[allow(dead_code)]
#[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
pub(crate) fn $fname(&self, offset: usize) -> Result<$ty> {
let data = self.as_slice();
bounds_check!(offset, std::mem::size_of::<$ty>(), data.len());
Ok(<$ty>::from_le_bytes(
data[offset..offset + std::mem::size_of::<$ty>()].try_into()?,
))
}
};
}
macro_rules! generate_writer {
($fname:ident, $ty:ty) => {
#[allow(dead_code)]
pub(crate) fn $fname(&mut self, offset: usize, value: $ty) -> Result<()> {
let data = self.as_mut_slice();
bounds_check!(offset, std::mem::size_of::<$ty>(), data.len());
data[offset..offset + std::mem::size_of::<$ty>()].copy_from_slice(&value.to_le_bytes());
Ok(())
}
};
}
#[derive(Debug)]
pub struct HostMapping {
ptr: *mut u8,
size: usize,
#[cfg(target_os = "windows")]
handle: HANDLE,
}
impl Drop for HostMapping {
#[cfg(target_os = "linux")]
fn drop(&mut self) {
use libc::munmap;
unsafe {
munmap(self.ptr as *mut c_void, self.size);
}
}
#[cfg(target_os = "windows")]
fn drop(&mut self) {
let mem_mapped_address = MEMORY_MAPPED_VIEW_ADDRESS {
Value: self.ptr as *mut c_void,
};
if let Err(e) = unsafe { UnmapViewOfFile(mem_mapped_address) } {
tracing::error!(
"Failed to drop HostMapping (UnmapViewOfFile failed): {:?}",
e
);
}
let file_handle: HANDLE = self.handle;
if let Err(e) = unsafe { CloseHandle(file_handle) } {
tracing::error!("Failed to drop HostMapping (CloseHandle failed): {:?}", e);
}
}
}
#[derive(Debug)]
pub struct ExclusiveSharedMemory {
region: Arc<HostMapping>,
}
unsafe impl Send for ExclusiveSharedMemory {}
#[derive(Debug)]
pub struct GuestSharedMemory {
region: Arc<HostMapping>,
pub lock: Arc<RwLock<()>>,
}
unsafe impl Send for GuestSharedMemory {}
#[derive(Clone, Debug)]
pub struct HostSharedMemory {
region: Arc<HostMapping>,
lock: Arc<RwLock<()>>,
}
unsafe impl Send for HostSharedMemory {}
impl ExclusiveSharedMemory {
#[cfg(target_os = "linux")]
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub fn new(min_size_bytes: usize) -> Result<Self> {
use libc::{
MAP_ANONYMOUS, MAP_FAILED, MAP_PRIVATE, PROT_READ, PROT_WRITE, c_int, mmap, off_t,
size_t,
};
#[cfg(not(miri))]
use libc::{MAP_NORESERVE, PROT_NONE, mprotect};
if min_size_bytes == 0 {
return Err(new_error!("Cannot create shared memory with size 0"));
}
let total_size = min_size_bytes
.checked_add(2 * PAGE_SIZE_USIZE) .ok_or_else(|| new_error!("Memory required for sandbox exceeded usize::MAX"))?;
if total_size % PAGE_SIZE_USIZE != 0 {
return Err(new_error!(
"shared memory must be a multiple of {}",
PAGE_SIZE_USIZE
));
}
if total_size > isize::MAX as usize {
return Err(HyperlightError::MemoryRequestTooBig(
total_size,
isize::MAX as usize,
));
}
#[cfg(not(miri))]
let flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
#[cfg(miri)]
let flags = MAP_ANONYMOUS | MAP_PRIVATE;
let addr = unsafe {
mmap(
null_mut(),
total_size as size_t,
PROT_READ | PROT_WRITE,
flags,
-1 as c_int,
0 as off_t,
)
};
if addr == MAP_FAILED {
log_then_return!(HyperlightError::MmapFailed(
Error::last_os_error().raw_os_error()
));
}
#[cfg(not(miri))]
{
let res = unsafe { mprotect(addr, PAGE_SIZE_USIZE, PROT_NONE) };
if res != 0 {
return Err(HyperlightError::MprotectFailed(
Error::last_os_error().raw_os_error(),
));
}
let res = unsafe {
mprotect(
(addr as *const u8).add(total_size - PAGE_SIZE_USIZE) as *mut c_void,
PAGE_SIZE_USIZE,
PROT_NONE,
)
};
if res != 0 {
return Err(HyperlightError::MprotectFailed(
Error::last_os_error().raw_os_error(),
));
}
}
Ok(Self {
#[allow(clippy::arc_with_non_send_sync)]
region: Arc::new(HostMapping {
ptr: addr as *mut u8,
size: total_size,
}),
})
}
#[cfg(target_os = "windows")]
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub fn new(min_size_bytes: usize) -> Result<Self> {
if min_size_bytes == 0 {
return Err(new_error!("Cannot create shared memory with size 0"));
}
let total_size = min_size_bytes
.checked_add(2 * PAGE_SIZE_USIZE)
.ok_or_else(|| new_error!("Memory required for sandbox exceeded {}", usize::MAX))?;
if total_size % PAGE_SIZE_USIZE != 0 {
return Err(new_error!(
"shared memory must be a multiple of {}",
PAGE_SIZE_USIZE
));
}
if total_size > isize::MAX as usize {
return Err(HyperlightError::MemoryRequestTooBig(
total_size,
isize::MAX as usize,
));
}
let mut dwmaximumsizehigh = 0;
let mut dwmaximumsizelow = 0;
if std::mem::size_of::<usize>() == 8 {
dwmaximumsizehigh = (total_size >> 32) as u32;
dwmaximumsizelow = (total_size & 0xFFFFFFFF) as u32;
}
let flags = PAGE_READWRITE;
let handle = unsafe {
CreateFileMappingA(
INVALID_HANDLE_VALUE,
None,
flags,
dwmaximumsizehigh,
dwmaximumsizelow,
PCSTR::null(),
)?
};
if handle.is_invalid() {
log_then_return!(HyperlightError::MemoryAllocationFailed(
Error::last_os_error().raw_os_error()
));
}
let file_map = FILE_MAP_ALL_ACCESS;
let addr = unsafe { MapViewOfFile(handle, file_map, 0, 0, 0) };
if addr.Value.is_null() {
log_then_return!(HyperlightError::MemoryAllocationFailed(
Error::last_os_error().raw_os_error()
));
}
let mut unused_out_old_prot_flags = PAGE_PROTECTION_FLAGS(0);
let first_guard_page_start = addr.Value;
if let Err(e) = unsafe {
VirtualProtect(
first_guard_page_start,
PAGE_SIZE_USIZE,
PAGE_NOACCESS,
&mut unused_out_old_prot_flags,
)
} {
log_then_return!(WindowsAPIError(e.clone()));
}
let last_guard_page_start = unsafe { addr.Value.add(total_size - PAGE_SIZE_USIZE) };
if let Err(e) = unsafe {
VirtualProtect(
last_guard_page_start,
PAGE_SIZE_USIZE,
PAGE_NOACCESS,
&mut unused_out_old_prot_flags,
)
} {
log_then_return!(WindowsAPIError(e.clone()));
}
Ok(Self {
#[allow(clippy::arc_with_non_send_sync)]
region: Arc::new(HostMapping {
ptr: addr.Value as *mut u8,
size: total_size,
handle,
}),
})
}
pub(super) fn as_mut_slice(&mut self) -> &mut [u8] {
unsafe { std::slice::from_raw_parts_mut(self.base_ptr(), self.mem_size()) }
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub fn as_slice<'a>(&'a self) -> &'a [u8] {
unsafe { std::slice::from_raw_parts(self.base_ptr(), self.mem_size()) }
}
#[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
#[cfg(test)]
pub(crate) fn copy_all_to_vec(&self) -> Result<Vec<u8>> {
let data = self.as_slice();
Ok(data.to_vec())
}
#[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
pub fn copy_from_slice(&mut self, src: &[u8], offset: usize) -> Result<()> {
let data = self.as_mut_slice();
bounds_check!(offset, src.len(), data.len());
data[offset..offset + src.len()].copy_from_slice(src);
Ok(())
}
generate_reader!(read_u8, u8);
generate_reader!(read_i8, i8);
generate_reader!(read_u16, u16);
generate_reader!(read_i16, i16);
generate_reader!(read_u32, u32);
generate_reader!(read_i32, i32);
generate_reader!(read_u64, u64);
generate_reader!(read_i64, i64);
generate_reader!(read_usize, usize);
generate_reader!(read_isize, isize);
generate_writer!(write_u8, u8);
generate_writer!(write_i8, i8);
generate_writer!(write_u16, u16);
generate_writer!(write_i16, i16);
generate_writer!(write_u32, u32);
generate_writer!(write_i32, i32);
generate_writer!(write_u64, u64);
generate_writer!(write_i64, i64);
generate_writer!(write_usize, usize);
generate_writer!(write_isize, isize);
pub fn build(self) -> (HostSharedMemory, GuestSharedMemory) {
let lock = Arc::new(RwLock::new(()));
let hshm = HostSharedMemory {
region: self.region.clone(),
lock: lock.clone(),
};
(
hshm,
GuestSharedMemory {
region: self.region.clone(),
lock,
},
)
}
#[cfg(target_os = "windows")]
pub fn get_mmap_file_handle(&self) -> HANDLE {
self.region.handle
}
#[cfg(all(test, feature = "nanvix-unstable"))]
pub(crate) fn as_host_shared_memory(&self) -> HostSharedMemory {
let lock = Arc::new(RwLock::new(()));
HostSharedMemory {
region: self.region.clone(),
lock,
}
}
}
fn mapping_at(
s: &impl SharedMemory,
gpa: u64,
region_type: MemoryRegionType,
flags: MemoryRegionFlags,
) -> MemoryRegion {
let guest_base = gpa as usize;
MemoryRegion {
guest_region: guest_base..(guest_base + s.mem_size()),
host_region: s.host_region_base()..s.host_region_end(),
region_type,
flags,
}
}
impl GuestSharedMemory {
pub(crate) fn mapping_at(
&self,
guest_base: u64,
region_type: MemoryRegionType,
) -> MemoryRegion {
let flags = match region_type {
MemoryRegionType::Scratch => {
MemoryRegionFlags::READ | MemoryRegionFlags::WRITE | MemoryRegionFlags::EXECUTE
}
#[cfg(unshared_snapshot_mem)]
MemoryRegionType::Snapshot => {
MemoryRegionFlags::READ | MemoryRegionFlags::WRITE | MemoryRegionFlags::EXECUTE
}
#[allow(clippy::panic)]
_ => panic!(
"GuestSharedMemory::mapping_at should only be used for Scratch or Snapshot regions"
),
};
mapping_at(self, guest_base, region_type, flags)
}
}
pub trait SharedMemory {
fn region(&self) -> &HostMapping;
fn base_addr(&self) -> usize {
self.region().ptr as usize + PAGE_SIZE_USIZE
}
fn base_ptr(&self) -> *mut u8 {
self.region().ptr.wrapping_add(PAGE_SIZE_USIZE)
}
fn mem_size(&self) -> usize {
self.region().size - 2 * PAGE_SIZE_USIZE
}
fn raw_ptr(&self) -> *mut u8 {
self.region().ptr
}
fn raw_mem_size(&self) -> usize {
self.region().size
}
fn host_region_base(&self) -> <HostGuestMemoryRegion as MemoryRegionKind>::HostBaseType {
#[cfg(not(windows))]
{
self.base_addr()
}
#[cfg(windows)]
{
super::memory_region::HostRegionBase {
from_handle: self.region().handle.into(),
handle_base: self.region().ptr as usize,
handle_size: self.region().size,
offset: PAGE_SIZE_USIZE,
}
}
}
fn host_region_end(&self) -> <HostGuestMemoryRegion as MemoryRegionKind>::HostBaseType {
<HostGuestMemoryRegion as MemoryRegionKind>::add(self.host_region_base(), self.mem_size())
}
fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
&mut self,
f: F,
) -> Result<T>;
fn with_contents<T, F: FnOnce(&[u8]) -> T>(&mut self, f: F) -> Result<T> {
self.with_exclusivity(|m| f(m.as_slice()))
}
fn zero(&mut self) -> Result<()> {
self.with_exclusivity(|e| {
#[allow(unused_mut)] let mut do_copy = true;
#[cfg(all(target_os = "linux", feature = "kvm", not(any(feature = "mshv3"))))]
unsafe {
let ret = libc::madvise(
e.region.ptr as *mut libc::c_void,
e.region.size,
libc::MADV_DONTNEED,
);
if ret == 0 {
do_copy = false;
}
}
if do_copy {
e.as_mut_slice().fill(0);
}
})
}
}
impl SharedMemory for ExclusiveSharedMemory {
fn region(&self) -> &HostMapping {
&self.region
}
fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
&mut self,
f: F,
) -> Result<T> {
Ok(f(self))
}
}
impl SharedMemory for GuestSharedMemory {
fn region(&self) -> &HostMapping {
&self.region
}
fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
&mut self,
f: F,
) -> Result<T> {
let guard = self
.lock
.try_write()
.map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
let mut excl = ExclusiveSharedMemory {
region: self.region.clone(),
};
let ret = f(&mut excl);
drop(excl);
drop(guard);
Ok(ret)
}
}
pub unsafe trait AllValid {}
unsafe impl AllValid for u8 {}
unsafe impl AllValid for u16 {}
unsafe impl AllValid for u32 {}
unsafe impl AllValid for u64 {}
unsafe impl AllValid for i8 {}
unsafe impl AllValid for i16 {}
unsafe impl AllValid for i32 {}
unsafe impl AllValid for i64 {}
unsafe impl AllValid for [u8; 16] {}
impl HostSharedMemory {
pub fn read<T: AllValid>(&self, offset: usize) -> Result<T> {
bounds_check!(offset, std::mem::size_of::<T>(), self.mem_size());
unsafe {
let mut ret: core::mem::MaybeUninit<T> = core::mem::MaybeUninit::uninit();
{
let slice: &mut [u8] = core::slice::from_raw_parts_mut(
ret.as_mut_ptr() as *mut u8,
std::mem::size_of::<T>(),
);
self.copy_to_slice(slice, offset)?;
}
Ok(ret.assume_init())
}
}
pub fn write<T: AllValid>(&self, offset: usize, data: T) -> Result<()> {
bounds_check!(offset, std::mem::size_of::<T>(), self.mem_size());
unsafe {
let slice: &[u8] = core::slice::from_raw_parts(
core::ptr::addr_of!(data) as *const u8,
std::mem::size_of::<T>(),
);
self.copy_from_slice(slice, offset)?;
}
Ok(())
}
pub fn copy_to_slice(&self, slice: &mut [u8], offset: usize) -> Result<()> {
bounds_check!(offset, slice.len(), self.mem_size());
let base = self.base_ptr().wrapping_add(offset);
let guard = self
.lock
.try_read()
.map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
const CHUNK: usize = size_of::<u128>();
let len = slice.len();
let mut i = 0;
let align_offset = base.align_offset(align_of::<u128>());
let head_len = align_offset.min(len);
while i < head_len {
unsafe {
slice[i] = base.add(i).read_volatile();
}
i += 1;
}
let dst = slice.as_mut_ptr();
while i + CHUNK <= len {
unsafe {
let value = (base.add(i) as *const u128).read_volatile();
std::ptr::write_unaligned(dst.add(i) as *mut u128, value);
}
i += CHUNK;
}
while i < len {
unsafe {
slice[i] = base.add(i).read_volatile();
}
i += 1;
}
drop(guard);
Ok(())
}
pub fn copy_from_slice(&self, slice: &[u8], offset: usize) -> Result<()> {
bounds_check!(offset, slice.len(), self.mem_size());
let base = self.base_ptr().wrapping_add(offset);
let guard = self
.lock
.try_read()
.map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
const CHUNK: usize = size_of::<u128>();
let len = slice.len();
let mut i = 0;
let align_offset = base.align_offset(align_of::<u128>());
let head_len = align_offset.min(len);
while i < head_len {
unsafe {
base.add(i).write_volatile(slice[i]);
}
i += 1;
}
let src = slice.as_ptr();
while i + CHUNK <= len {
unsafe {
let value = std::ptr::read_unaligned(src.add(i) as *const u128);
(base.add(i) as *mut u128).write_volatile(value);
}
i += CHUNK;
}
while i < len {
unsafe {
base.add(i).write_volatile(slice[i]);
}
i += 1;
}
drop(guard);
Ok(())
}
#[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
pub fn fill(&mut self, value: u8, offset: usize, len: usize) -> Result<()> {
bounds_check!(offset, len, self.mem_size());
let base = self.base_ptr().wrapping_add(offset);
let guard = self
.lock
.try_read()
.map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
const CHUNK: usize = size_of::<u128>();
let value_u128 = u128::from_ne_bytes([value; CHUNK]);
let mut i = 0;
let align_offset = base.align_offset(align_of::<u128>());
let head_len = align_offset.min(len);
while i < head_len {
unsafe {
base.add(i).write_volatile(value);
}
i += 1;
}
while i + CHUNK <= len {
unsafe {
(base.add(i) as *mut u128).write_volatile(value_u128);
}
i += CHUNK;
}
while i < len {
unsafe {
base.add(i).write_volatile(value);
}
i += 1;
}
drop(guard);
Ok(())
}
#[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
pub fn push_buffer(
&mut self,
buffer_start_offset: usize,
buffer_size: usize,
data: &[u8],
) -> Result<()> {
let stack_pointer_rel = self.read::<u64>(buffer_start_offset)? as usize;
let buffer_size_u64: u64 = buffer_size.try_into()?;
if stack_pointer_rel > buffer_size || stack_pointer_rel < 8 {
return Err(new_error!(
"Unable to push data to buffer: Stack pointer is out of bounds. Stack pointer: {}, Buffer size: {}",
stack_pointer_rel,
buffer_size_u64
));
}
let size_required = data.len() + 8;
let size_available = buffer_size - stack_pointer_rel;
if size_required > size_available {
return Err(new_error!(
"Not enough space in buffer to push data. Required: {}, Available: {}",
size_required,
size_available
));
}
let stack_pointer_abs = stack_pointer_rel + buffer_start_offset;
self.copy_from_slice(data, stack_pointer_abs)?;
self.write::<u64>(stack_pointer_abs + data.len(), stack_pointer_rel as u64)?;
self.write::<u64>(
buffer_start_offset,
(stack_pointer_rel + data.len() + 8) as u64,
)?;
Ok(())
}
pub fn try_pop_buffer_into<T>(
&mut self,
buffer_start_offset: usize,
buffer_size: usize,
) -> Result<T>
where
T: for<'b> TryFrom<&'b [u8]>,
{
let stack_pointer_rel = self.read::<u64>(buffer_start_offset)? as usize;
if stack_pointer_rel > buffer_size || stack_pointer_rel < 16 {
return Err(new_error!(
"Unable to pop data from buffer: Stack pointer is out of bounds. Stack pointer: {}, Buffer size: {}",
stack_pointer_rel,
buffer_size
));
}
let last_element_offset_abs = stack_pointer_rel + buffer_start_offset;
let last_element_offset_rel: usize =
self.read::<u64>(last_element_offset_abs - 8)? as usize;
if last_element_offset_rel > stack_pointer_rel.saturating_sub(16)
|| last_element_offset_rel < 8
{
return Err(new_error!(
"Corrupt buffer back-pointer: element offset {} is outside valid range [8, {}].",
last_element_offset_rel,
stack_pointer_rel.saturating_sub(16),
));
}
let last_element_offset_abs = last_element_offset_rel + buffer_start_offset;
let max_element_size = stack_pointer_rel - last_element_offset_rel - 8;
let fb_buffer_size = {
let raw_prefix = self.read::<u32>(last_element_offset_abs)?;
let total = raw_prefix.checked_add(4).ok_or_else(|| {
new_error!(
"Corrupt buffer size prefix: value {} overflows when adding 4-byte header.",
raw_prefix
)
})?;
usize::try_from(total)
}?;
if fb_buffer_size > max_element_size {
return Err(new_error!(
"Corrupt buffer size prefix: flatbuffer claims {} bytes but the element slot is only {} bytes.",
fb_buffer_size,
max_element_size
));
}
let mut result_buffer = vec![0; fb_buffer_size];
self.copy_to_slice(&mut result_buffer, last_element_offset_abs)?;
let to_return = T::try_from(result_buffer.as_slice()).map_err(|_e| {
new_error!(
"pop_buffer_into: failed to convert buffer to {}",
type_name::<T>()
)
})?;
self.write::<u64>(buffer_start_offset, last_element_offset_rel as u64)?;
let num_bytes_to_zero = stack_pointer_rel - last_element_offset_rel;
self.fill(0, last_element_offset_abs, num_bytes_to_zero)?;
Ok(to_return)
}
}
impl SharedMemory for HostSharedMemory {
fn region(&self) -> &HostMapping {
&self.region
}
fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
&mut self,
f: F,
) -> Result<T> {
let guard = self
.lock
.try_write()
.map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
let mut excl = ExclusiveSharedMemory {
region: self.region.clone(),
};
let ret = f(&mut excl);
drop(excl);
drop(guard);
Ok(ret)
}
}
#[cfg(test)]
mod tests {
use hyperlight_common::mem::PAGE_SIZE_USIZE;
#[cfg(not(miri))]
use proptest::prelude::*;
#[cfg(not(miri))]
use super::HostSharedMemory;
use super::{ExclusiveSharedMemory, SharedMemory};
use crate::Result;
#[cfg(not(miri))]
use crate::mem::shared_mem_tests::read_write_test_suite;
#[test]
fn fill() {
let mem_size: usize = 4096;
let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
let (mut hshm, _) = eshm.build();
hshm.fill(1, 0, 1024).unwrap();
hshm.fill(2, 1024, 1024).unwrap();
hshm.fill(3, 2048, 1024).unwrap();
hshm.fill(4, 3072, 1024).unwrap();
let vec = hshm
.with_exclusivity(|e| e.copy_all_to_vec().unwrap())
.unwrap();
assert!(vec[0..1024].iter().all(|&x| x == 1));
assert!(vec[1024..2048].iter().all(|&x| x == 2));
assert!(vec[2048..3072].iter().all(|&x| x == 3));
assert!(vec[3072..4096].iter().all(|&x| x == 4));
hshm.fill(5, 0, 4096).unwrap();
let vec2 = hshm
.with_exclusivity(|e| e.copy_all_to_vec().unwrap())
.unwrap();
assert!(vec2.iter().all(|&x| x == 5));
assert!(hshm.fill(0, 0, mem_size + 1).is_err());
assert!(hshm.fill(0, mem_size, 1).is_err());
}
#[test]
fn bounds_check_overflow() {
let mem_size: usize = 4096;
let mut eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
assert!(eshm.read_i32(usize::MAX).is_err());
assert!(eshm.write_i32(usize::MAX, 0).is_err());
assert!(eshm.copy_from_slice(&[0u8; 1], usize::MAX).is_err());
let (mut hshm, _) = eshm.build();
assert!(hshm.read::<u8>(usize::MAX).is_err());
assert!(hshm.read::<u64>(usize::MAX - 3).is_err());
assert!(hshm.write::<u8>(usize::MAX, 0).is_err());
assert!(hshm.write::<u64>(usize::MAX - 3, 0).is_err());
let mut buf = [0u8; 1];
assert!(hshm.copy_to_slice(&mut buf, usize::MAX).is_err());
assert!(hshm.copy_from_slice(&[0u8; 1], usize::MAX).is_err());
assert!(hshm.fill(0, usize::MAX, 1).is_err());
assert!(hshm.fill(0, 1, usize::MAX).is_err());
}
#[test]
fn copy_into_from() -> Result<()> {
let mem_size: usize = 4096;
let vec_len = 10;
let eshm = ExclusiveSharedMemory::new(mem_size)?;
let (hshm, _) = eshm.build();
let vec = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
hshm.copy_from_slice(&vec, 0)?;
let mut vec2 = vec![0; vec_len];
hshm.copy_to_slice(vec2.as_mut_slice(), 0)?;
assert_eq!(vec, vec2);
let offset = mem_size - vec.len();
hshm.copy_from_slice(&vec, offset)?;
let mut vec3 = vec![0; vec_len];
hshm.copy_to_slice(&mut vec3, offset)?;
assert_eq!(vec, vec3);
let offset = mem_size / 2;
hshm.copy_from_slice(&vec, offset)?;
let mut vec4 = vec![0; vec_len];
hshm.copy_to_slice(&mut vec4, offset)?;
assert_eq!(vec, vec4);
let mut vec5 = vec![0; vec_len];
assert!(hshm.copy_to_slice(&mut vec5, mem_size).is_err());
assert!(hshm.copy_from_slice(&vec5, mem_size).is_err());
let mut vec6 = vec![0; vec_len];
assert!(hshm.copy_to_slice(&mut vec6, mem_size * 2).is_err());
assert!(hshm.copy_from_slice(&vec6, mem_size * 2).is_err());
let mut vec7 = vec![0; mem_size * 2];
assert!(hshm.copy_to_slice(&mut vec7, 0).is_err());
assert!(hshm.copy_from_slice(&vec7, 0).is_err());
Ok(())
}
#[cfg(not(miri))]
proptest! {
#[test]
fn read_write_i32(val in -0x1000_i32..0x1000_i32) {
read_write_test_suite(
val,
ExclusiveSharedMemory::new,
Box::new(ExclusiveSharedMemory::read_i32),
Box::new(ExclusiveSharedMemory::write_i32),
)
.unwrap();
read_write_test_suite(
val,
|s| {
let e = ExclusiveSharedMemory::new(s)?;
let (h, _) = e.build();
Ok(h)
},
Box::new(HostSharedMemory::read::<i32>),
Box::new(|h, o, v| h.write::<i32>(o, v)),
)
.unwrap();
}
}
#[test]
fn alloc_fail() {
let gm = ExclusiveSharedMemory::new(0);
assert!(gm.is_err());
let gm = ExclusiveSharedMemory::new(usize::MAX);
assert!(gm.is_err());
}
#[test]
fn clone() {
let eshm = ExclusiveSharedMemory::new(PAGE_SIZE_USIZE).unwrap();
let (hshm1, _) = eshm.build();
let hshm2 = hshm1.clone();
assert_eq!(hshm1.mem_size(), hshm2.mem_size());
assert_eq!(hshm1.base_addr(), hshm2.base_addr());
hshm1.copy_from_slice(b"a", 0).unwrap();
hshm2.copy_from_slice(b"b", 1).unwrap();
for (raw_offset, expected) in &[(0, b'a'), (1, b'b')] {
assert_eq!(hshm1.read::<u8>(*raw_offset).unwrap(), *expected);
assert_eq!(hshm2.read::<u8>(*raw_offset).unwrap(), *expected);
}
drop(hshm1);
for (raw_offset, expected) in &[(0, b'a'), (1, b'b')] {
assert_eq!(hshm2.read::<u8>(*raw_offset).unwrap(), *expected);
}
hshm2.copy_from_slice(b"c", 2).unwrap();
assert_eq!(hshm2.read::<u8>(2).unwrap(), b'c');
drop(hshm2);
}
#[test]
fn copy_all_to_vec() {
let mut data = vec![b'a', b'b', b'c'];
data.resize(4096, 0);
let mut eshm = ExclusiveSharedMemory::new(data.len()).unwrap();
eshm.copy_from_slice(data.as_slice(), 0).unwrap();
let ret_vec = eshm.copy_all_to_vec().unwrap();
assert_eq!(data, ret_vec);
}
#[test]
#[cfg(all(target_os = "linux", not(miri)))]
fn test_drop() {
use proc_maps::get_process_maps;
const UNIQUE_SIZE: usize = PAGE_SIZE_USIZE * 17;
let pid = std::process::id();
let eshm = ExclusiveSharedMemory::new(UNIQUE_SIZE).unwrap();
let (hshm1, gshm) = eshm.build();
let hshm2 = hshm1.clone();
let base_ptr = hshm1.base_ptr() as usize;
let mem_size = hshm1.mem_size();
let has_exact_mapping = |ptr: usize, size: usize| -> bool {
get_process_maps(pid.try_into().unwrap())
.unwrap()
.iter()
.any(|m| m.start() == ptr && m.size() == size)
};
assert!(
has_exact_mapping(base_ptr, mem_size),
"shared memory mapping not found at {:#x} with size {}",
base_ptr,
mem_size
);
drop(hshm1);
drop(hshm2);
drop(gshm);
assert!(
!has_exact_mapping(base_ptr, mem_size),
"shared memory mapping still exists at {:#x} with size {} after drop",
base_ptr,
mem_size
);
}
mod alignment_tests {
use super::*;
const CHUNK_SIZE: usize = size_of::<u128>();
#[test]
fn copy_with_various_alignments() {
let mem_size: usize = 4096;
let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
let (hshm, _) = eshm.build();
for start_offset in 0..CHUNK_SIZE {
let test_len = 64; let test_data: Vec<u8> = (0..test_len).map(|i| (i + start_offset) as u8).collect();
hshm.copy_from_slice(&test_data, start_offset).unwrap();
let mut read_buf = vec![0u8; test_len];
hshm.copy_to_slice(&mut read_buf, start_offset).unwrap();
assert_eq!(
test_data, read_buf,
"Mismatch at alignment offset {}",
start_offset
);
}
}
#[test]
fn copy_small_lengths() {
let mem_size: usize = 4096;
let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
let (hshm, _) = eshm.build();
for len in 0..CHUNK_SIZE {
let test_data: Vec<u8> = (0..len).map(|i| i as u8).collect();
hshm.copy_from_slice(&test_data, 0).unwrap();
let mut read_buf = vec![0u8; len];
hshm.copy_to_slice(&mut read_buf, 0).unwrap();
assert_eq!(test_data, read_buf, "Mismatch for length {}", len);
}
}
#[test]
fn copy_non_aligned_lengths() {
let mem_size: usize = 4096;
let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
let (hshm, _) = eshm.build();
let test_lengths = [17, 31, 33, 47, 63, 65, 100, 127, 129, 255, 257];
for &len in &test_lengths {
let test_data: Vec<u8> = (0..len).map(|i| (i % 256) as u8).collect();
hshm.copy_from_slice(&test_data, 0).unwrap();
let mut read_buf = vec![0u8; len];
hshm.copy_to_slice(&mut read_buf, 0).unwrap();
assert_eq!(test_data, read_buf, "Mismatch for length {}", len);
}
}
#[test]
fn copy_exact_chunk_size() {
let mem_size: usize = 4096;
let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
let (hshm, _) = eshm.build();
let test_data: Vec<u8> = (0..CHUNK_SIZE).map(|i| i as u8).collect();
hshm.copy_from_slice(&test_data, 0).unwrap();
let mut read_buf = vec![0u8; CHUNK_SIZE];
hshm.copy_to_slice(&mut read_buf, 0).unwrap();
assert_eq!(test_data, read_buf);
}
#[test]
fn fill_with_various_alignments() {
let mem_size: usize = 4096;
let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
let (mut hshm, _) = eshm.build();
for start_offset in 0..CHUNK_SIZE {
let fill_len = 64;
let fill_value = (start_offset % 256) as u8;
hshm.fill(0, 0, mem_size).unwrap();
hshm.fill(fill_value, start_offset, fill_len).unwrap();
let mut read_buf = vec![0u8; fill_len];
hshm.copy_to_slice(&mut read_buf, start_offset).unwrap();
assert!(
read_buf.iter().all(|&b| b == fill_value),
"Fill mismatch at alignment offset {}",
start_offset
);
}
}
#[test]
fn fill_small_lengths() {
let mem_size: usize = 4096;
let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
let (mut hshm, _) = eshm.build();
for len in 0..CHUNK_SIZE {
let fill_value = 0xAB;
hshm.fill(0, 0, mem_size).unwrap(); hshm.fill(fill_value, 0, len).unwrap();
let mut read_buf = vec![0u8; len];
hshm.copy_to_slice(&mut read_buf, 0).unwrap();
assert!(
read_buf.iter().all(|&b| b == fill_value),
"Fill mismatch for length {}",
len
);
}
}
#[test]
fn fill_non_aligned_lengths() {
let mem_size: usize = 4096;
let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
let (mut hshm, _) = eshm.build();
let test_lengths = [17, 31, 33, 47, 63, 65, 100, 127, 129, 255, 257];
for &len in &test_lengths {
let fill_value = 0xCD;
hshm.fill(0, 0, mem_size).unwrap(); hshm.fill(fill_value, 0, len).unwrap();
let mut read_buf = vec![0u8; len];
hshm.copy_to_slice(&mut read_buf, 0).unwrap();
assert!(
read_buf.iter().all(|&b| b == fill_value),
"Fill mismatch for length {}",
len
);
}
}
#[test]
fn copy_edge_cases() {
let mem_size: usize = 4096;
let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
let (hshm, _) = eshm.build();
let empty: Vec<u8> = vec![];
hshm.copy_from_slice(&empty, 0).unwrap();
let mut read_buf: Vec<u8> = vec![];
hshm.copy_to_slice(&mut read_buf, 0).unwrap();
assert!(read_buf.is_empty());
let single = vec![0x42u8];
hshm.copy_from_slice(&single, 0).unwrap();
let mut read_buf = vec![0u8; 1];
hshm.copy_to_slice(&mut read_buf, 0).unwrap();
assert_eq!(single, read_buf);
}
#[test]
fn copy_unaligned_start_and_length() {
let mem_size: usize = 4096;
let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
let (hshm, _) = eshm.build();
let start_offset = 7;
let len = 37;
let test_data: Vec<u8> = (0..len).map(|i| (i * 3) as u8).collect();
hshm.copy_from_slice(&test_data, start_offset).unwrap();
let mut read_buf = vec![0u8; len];
hshm.copy_to_slice(&mut read_buf, start_offset).unwrap();
assert_eq!(test_data, read_buf);
}
}
mod try_pop_buffer_bounds {
use super::*;
#[derive(Debug, PartialEq)]
struct RawBytes(Vec<u8>);
impl TryFrom<&[u8]> for RawBytes {
type Error = String;
fn try_from(value: &[u8]) -> std::result::Result<Self, Self::Error> {
Ok(RawBytes(value.to_vec()))
}
}
fn make_buffer(mem_size: usize) -> super::super::HostSharedMemory {
let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
let (hshm, _) = eshm.build();
hshm.write::<u64>(0, 8u64).unwrap();
hshm
}
#[test]
fn normal_push_pop_roundtrip() {
let mem_size = 4096;
let mut hshm = make_buffer(mem_size);
let payload = b"hello";
let mut data = Vec::new();
data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
data.extend_from_slice(payload);
hshm.push_buffer(0, mem_size, &data).unwrap();
let result: RawBytes = hshm.try_pop_buffer_into(0, mem_size).unwrap();
assert_eq!(result.0, data);
}
#[test]
fn malicious_flatbuffer_size_prefix() {
let mem_size = 4096;
let mut hshm = make_buffer(mem_size);
let payload = b"small";
let mut data = Vec::new();
data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
data.extend_from_slice(payload);
hshm.push_buffer(0, mem_size, &data).unwrap();
hshm.write::<u32>(8, 0xFFFF_FFFBu32).unwrap();
let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
let err_msg = format!("{}", result.unwrap_err());
assert!(
err_msg.contains("Corrupt buffer size prefix: flatbuffer claims 4294967295 bytes but the element slot is only 9 bytes"),
"Unexpected error message: {}",
err_msg
);
}
#[test]
fn malicious_element_offset_too_small() {
let mem_size = 4096;
let mut hshm = make_buffer(mem_size);
let payload = b"test";
let mut data = Vec::new();
data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
data.extend_from_slice(payload);
hshm.push_buffer(0, mem_size, &data).unwrap();
hshm.write::<u64>(16, 0u64).unwrap();
let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
let err_msg = format!("{}", result.unwrap_err());
assert!(
err_msg.contains(
"Corrupt buffer back-pointer: element offset 0 is outside valid range [8, 8]"
),
"Unexpected error message: {}",
err_msg
);
}
#[test]
fn malicious_element_offset_past_stack_pointer() {
let mem_size = 4096;
let mut hshm = make_buffer(mem_size);
let payload = b"test";
let mut data = Vec::new();
data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
data.extend_from_slice(payload);
hshm.push_buffer(0, mem_size, &data).unwrap();
hshm.write::<u64>(16, 9999u64).unwrap();
let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
let err_msg = format!("{}", result.unwrap_err());
assert!(
err_msg.contains(
"Corrupt buffer back-pointer: element offset 9999 is outside valid range [8, 8]"
),
"Unexpected error message: {}",
err_msg
);
}
#[test]
fn malicious_flatbuffer_size_off_by_one() {
let mem_size = 4096;
let mut hshm = make_buffer(mem_size);
let payload = b"abcd";
let mut data = Vec::new();
data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
data.extend_from_slice(payload);
hshm.push_buffer(0, mem_size, &data).unwrap();
hshm.write::<u32>(8, 5u32).unwrap();
let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
let err_msg = format!("{}", result.unwrap_err());
assert!(
err_msg.contains("Corrupt buffer size prefix: flatbuffer claims 9 bytes but the element slot is only 8 bytes"),
"Unexpected error message: {}",
err_msg
);
}
#[test]
fn back_pointer_near_stack_pointer_underflow() {
let mem_size = 4096;
let mut hshm = make_buffer(mem_size);
let payload = b"test";
let mut data = Vec::new();
data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
data.extend_from_slice(payload);
hshm.push_buffer(0, mem_size, &data).unwrap();
hshm.write::<u64>(16, 23u64).unwrap();
let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
let err_msg = format!("{}", result.unwrap_err());
assert!(
err_msg.contains(
"Corrupt buffer back-pointer: element offset 23 is outside valid range [8, 8]"
),
"Unexpected error message: {}",
err_msg
);
}
#[test]
fn size_prefix_u32_overflow() {
let mem_size = 4096;
let mut hshm = make_buffer(mem_size);
let payload = b"test";
let mut data = Vec::new();
data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
data.extend_from_slice(payload);
hshm.push_buffer(0, mem_size, &data).unwrap();
hshm.write::<u32>(8, 0xFFFF_FFFDu32).unwrap();
let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
let err_msg = format!("{}", result.unwrap_err());
assert!(
err_msg.contains("Corrupt buffer size prefix: value 4294967293 overflows when adding 4-byte header"),
"Unexpected error message: {}",
err_msg
);
}
}
#[cfg(target_os = "linux")]
mod guard_page_crash_test {
use crate::mem::shared_mem::{ExclusiveSharedMemory, SharedMemory};
const TEST_EXIT_CODE: u8 = 211;
fn setup_signal_handler() {
unsafe {
signal_hook_registry::register_signal_unchecked(libc::SIGSEGV, || {
std::process::exit(TEST_EXIT_CODE.into());
})
.unwrap();
}
}
#[test]
#[ignore] fn read() {
setup_signal_handler();
let eshm = ExclusiveSharedMemory::new(4096).unwrap();
let (hshm, _) = eshm.build();
let guard_page_ptr = hshm.raw_ptr();
unsafe { std::ptr::read_volatile(guard_page_ptr) };
}
#[test]
#[ignore] fn write() {
setup_signal_handler();
let eshm = ExclusiveSharedMemory::new(4096).unwrap();
let (hshm, _) = eshm.build();
let guard_page_ptr = hshm.raw_ptr();
unsafe { std::ptr::write_volatile(guard_page_ptr, 0u8) };
}
#[test]
#[ignore] fn exec() {
setup_signal_handler();
let eshm = ExclusiveSharedMemory::new(4096).unwrap();
let (hshm, _) = eshm.build();
let guard_page_ptr = hshm.raw_ptr();
let func: fn() = unsafe { std::mem::transmute(guard_page_ptr) };
func();
}
#[test]
#[cfg_attr(miri, ignore)] fn guard_page_testing_shim() {
let tests = vec!["read", "write", "exec"];
for test in tests {
let triple = std::env::var("TARGET_TRIPLE").ok();
let target_args = if let Some(triple) = triple.filter(|t| !t.is_empty()) {
vec!["--target".to_string(), triple.to_string()]
} else {
vec![]
};
let output = std::process::Command::new("cargo")
.args(["test", "-p", "hyperlight-host", "--lib"])
.args(target_args)
.args(["--", "--ignored", test])
.stdin(std::process::Stdio::null())
.output()
.expect("Unable to launch tests");
let exit_code = output.status.code();
if exit_code != Some(TEST_EXIT_CODE.into()) {
eprintln!("=== Guard Page test '{}' failed ===", test);
eprintln!("Exit code: {:?} (expected {})", exit_code, TEST_EXIT_CODE);
eprintln!("=== STDOUT ===");
eprintln!("{}", String::from_utf8_lossy(&output.stdout));
eprintln!("=== STDERR ===");
eprintln!("{}", String::from_utf8_lossy(&output.stderr));
panic!(
"Guard Page test failed: {} (exit code {:?}, expected {})",
test, exit_code, TEST_EXIT_CODE
);
}
}
}
}
}
#[derive(Clone, Debug)]
pub struct ReadonlySharedMemory {
region: Arc<HostMapping>,
}
unsafe impl Send for ReadonlySharedMemory {}
unsafe impl Sync for ReadonlySharedMemory {}
impl ReadonlySharedMemory {
pub(crate) fn from_bytes(contents: &[u8]) -> Result<Self> {
let mut anon = ExclusiveSharedMemory::new(contents.len())?;
anon.copy_from_slice(contents, 0)?;
Ok(ReadonlySharedMemory {
region: anon.region,
})
}
pub(crate) fn as_slice(&self) -> &[u8] {
unsafe { std::slice::from_raw_parts(self.base_ptr(), self.mem_size()) }
}
#[cfg(unshared_snapshot_mem)]
pub(crate) fn copy_to_writable(&self) -> Result<ExclusiveSharedMemory> {
let mut writable = ExclusiveSharedMemory::new(self.mem_size())?;
writable.copy_from_slice(self.as_slice(), 0)?;
Ok(writable)
}
#[cfg(not(unshared_snapshot_mem))]
pub(crate) fn build(self) -> (Self, Self) {
(self.clone(), self)
}
#[cfg(not(unshared_snapshot_mem))]
pub(crate) fn mapping_at(
&self,
guest_base: u64,
region_type: MemoryRegionType,
) -> MemoryRegion {
#[allow(clippy::panic)]
if region_type != MemoryRegionType::Snapshot {
panic!("ReadonlySharedMemory::mapping_at should only be used for Snapshot regions");
}
mapping_at(
self,
guest_base,
region_type,
MemoryRegionFlags::READ | MemoryRegionFlags::EXECUTE,
)
}
}
impl SharedMemory for ReadonlySharedMemory {
fn region(&self) -> &HostMapping {
&self.region
}
fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
&mut self,
_: F,
) -> Result<T> {
Err(new_error!(
"Cannot take exclusive access to a ReadonlySharedMemory"
))
}
fn with_contents<T, F: FnOnce(&[u8]) -> T>(&mut self, f: F) -> Result<T> {
Ok(f(self.as_slice()))
}
}
impl<S: SharedMemory> PartialEq<S> for ReadonlySharedMemory {
fn eq(&self, other: &S) -> bool {
self.raw_ptr() == other.raw_ptr()
}
}