use std::any::type_name;
use std::ffi::c_void;
use std::io::Error;
use std::ptr::null_mut;
use std::sync::{Arc, RwLock};
use hyperlight_common::mem::PAGE_SIZE_USIZE;
use tracing::{instrument, Span};
#[cfg(target_os = "windows")]
use windows::Win32::System::Memory::{VirtualAlloc, MEM_COMMIT, PAGE_EXECUTE_READWRITE};
#[cfg(target_os = "windows")]
use crate::HyperlightError::{MemoryRequestTooBig, WindowsAPIError};
use crate::{log_then_return, new_error, Result};
macro_rules! bounds_check {
($offset:expr, $size:expr, $mem_size:expr) => {
if $offset + $size > $mem_size {
return Err(new_error!(
"Cannot read value from offset {} with size {} in memory of size {}",
$offset,
$size,
$mem_size
));
}
};
}
macro_rules! generate_reader {
($fname:ident, $ty:ty) => {
#[allow(dead_code)]
#[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
pub(crate) fn $fname(&self, offset: usize) -> Result<$ty> {
let data = self.as_slice();
bounds_check!(offset, std::mem::size_of::<$ty>(), data.len());
Ok(<$ty>::from_le_bytes(
data[offset..offset + std::mem::size_of::<$ty>()].try_into()?,
))
}
};
}
macro_rules! generate_writer {
($fname:ident, $ty:ty) => {
#[allow(dead_code)]
#[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
pub(crate) fn $fname(&mut self, offset: usize, value: $ty) -> Result<()> {
let data = self.as_mut_slice();
bounds_check!(offset, std::mem::size_of::<$ty>(), data.len());
data[offset..offset + std::mem::size_of::<$ty>()].copy_from_slice(&value.to_le_bytes());
Ok(())
}
};
}
#[derive(Debug)]
pub struct HostMapping {
ptr: *mut u8,
size: usize,
}
impl Drop for HostMapping {
#[cfg(target_os = "linux")]
fn drop(&mut self) {
use libc::munmap;
unsafe {
munmap(self.ptr as *mut c_void, self.size);
}
}
#[cfg(target_os = "windows")]
fn drop(&mut self) {
use windows::Win32::System::Memory::{VirtualFree, MEM_DECOMMIT};
if let Err(e) = unsafe { VirtualFree(self.ptr as *mut c_void, self.size, MEM_DECOMMIT) } {
tracing::error!("Failed to free shared memory (VirtualFree failed): {:?}", e);
}
}
}
#[derive(Debug)]
pub struct ExclusiveSharedMemory {
region: Arc<HostMapping>,
}
unsafe impl Send for ExclusiveSharedMemory {}
#[derive(Debug)]
pub struct GuestSharedMemory {
region: Arc<HostMapping>,
pub lock: Arc<RwLock<()>>,
}
unsafe impl Send for GuestSharedMemory {}
#[derive(Clone, Debug)]
pub struct HostSharedMemory {
region: Arc<HostMapping>,
lock: Arc<RwLock<()>>,
}
unsafe impl Send for HostSharedMemory {}
impl ExclusiveSharedMemory {
#[cfg(target_os = "linux")]
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub fn new(min_size_bytes: usize) -> Result<Self> {
use libc::{
c_int, mmap, mprotect, off_t, size_t, MAP_ANONYMOUS, MAP_FAILED, MAP_NORESERVE,
MAP_SHARED, PROT_NONE, PROT_READ, PROT_WRITE,
};
use crate::error::HyperlightError::{MemoryRequestTooBig, MmapFailed, MprotectFailed};
if min_size_bytes == 0 {
return Err(new_error!("Cannot create shared memory with size 0"));
}
let total_size = min_size_bytes
.checked_add(2 * PAGE_SIZE_USIZE) .ok_or_else(|| new_error!("Memory required for sandbox exceeded usize::MAX"))?;
assert!(
total_size % PAGE_SIZE_USIZE == 0,
"shared memory must be a multiple of 4096"
);
if total_size > isize::MAX as usize {
return Err(MemoryRequestTooBig(total_size, isize::MAX as usize));
}
let addr = unsafe {
mmap(
null_mut(),
total_size as size_t,
PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_SHARED | MAP_NORESERVE,
-1 as c_int,
0 as off_t,
)
};
if addr == MAP_FAILED {
log_then_return!(MmapFailed(Error::last_os_error().raw_os_error()));
}
let res = unsafe { mprotect(addr, PAGE_SIZE_USIZE, PROT_NONE) };
if res != 0 {
return Err(MprotectFailed(Error::last_os_error().raw_os_error()));
}
let res = unsafe {
mprotect(
(addr as *const u8).add(total_size - PAGE_SIZE_USIZE) as *mut c_void,
PAGE_SIZE_USIZE,
PROT_NONE,
)
};
if res != 0 {
return Err(MprotectFailed(Error::last_os_error().raw_os_error()));
}
Ok(Self {
#[allow(clippy::arc_with_non_send_sync)]
region: Arc::new(HostMapping {
ptr: addr as *mut u8,
size: total_size,
}),
})
}
#[cfg(target_os = "windows")]
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub fn new(min_size_bytes: usize) -> Result<Self> {
use windows::Win32::System::Memory::PAGE_READWRITE;
use crate::HyperlightError::MemoryAllocationFailed;
if min_size_bytes == 0 {
return Err(new_error!("Cannot create shared memory with size 0"));
}
let total_size = min_size_bytes
.checked_add(2 * PAGE_SIZE_USIZE)
.ok_or_else(|| new_error!("Memory required for sandbox exceeded {}", usize::MAX))?;
if total_size % PAGE_SIZE_USIZE != 0 {
return Err(new_error!(
"shared memory must be a multiple of {}",
PAGE_SIZE_USIZE
));
}
if total_size > isize::MAX as usize {
return Err(MemoryRequestTooBig(total_size, isize::MAX as usize));
}
let addr =
unsafe { VirtualAlloc(Some(null_mut()), total_size, MEM_COMMIT, PAGE_READWRITE) };
if addr.is_null() {
log_then_return!(MemoryAllocationFailed(
Error::last_os_error().raw_os_error()
));
}
Ok(Self {
#[allow(clippy::arc_with_non_send_sync)]
region: Arc::new(HostMapping {
ptr: addr as *mut u8,
size: total_size,
}),
})
}
pub(super) fn make_memory_executable(&self) -> Result<()> {
#[cfg(target_os = "windows")]
{
use windows::Win32::System::Memory::{VirtualProtect, PAGE_PROTECTION_FLAGS};
let mut _old_flags = PAGE_PROTECTION_FLAGS::default();
if let Err(e) = unsafe {
VirtualProtect(
self.region.ptr as *const c_void,
self.region.size,
PAGE_EXECUTE_READWRITE,
&mut _old_flags as *mut PAGE_PROTECTION_FLAGS,
)
} {
log_then_return!(WindowsAPIError(e.clone()));
}
}
#[cfg(target_os = "linux")]
{
use libc::{mprotect, PROT_EXEC, PROT_READ, PROT_WRITE};
let res = unsafe {
mprotect(
self.region.ptr as *mut c_void,
self.region.size,
PROT_READ | PROT_WRITE | PROT_EXEC,
)
};
if res != 0 {
return Err(new_error!(
"Failed to make memory executable: {:#?}",
Error::last_os_error().raw_os_error()
));
}
}
Ok(())
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(super) fn as_mut_slice<'a>(&'a mut self) -> &'a mut [u8] {
unsafe { std::slice::from_raw_parts_mut(self.base_ptr(), self.mem_size()) }
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub fn as_slice<'a>(&'a self) -> &'a [u8] {
unsafe { std::slice::from_raw_parts(self.base_ptr(), self.mem_size()) }
}
#[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
pub(crate) fn copy_all_to_vec(&self) -> Result<Vec<u8>> {
let data = self.as_slice();
Ok(data.to_vec())
}
#[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
pub fn copy_from_slice(&mut self, src: &[u8], offset: usize) -> Result<()> {
let data = self.as_mut_slice();
bounds_check!(offset, src.len(), data.len());
data[offset..offset + src.len()].copy_from_slice(src);
Ok(())
}
#[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
pub(crate) fn calculate_address(&self, offset: usize) -> Result<usize> {
bounds_check!(offset, 0, self.mem_size());
Ok(self.base_addr() + offset)
}
generate_reader!(read_u8, u8);
generate_reader!(read_i8, i8);
generate_reader!(read_u16, u16);
generate_reader!(read_i16, i16);
generate_reader!(read_u32, u32);
generate_reader!(read_i32, i32);
generate_reader!(read_u64, u64);
generate_reader!(read_i64, i64);
generate_reader!(read_usize, usize);
generate_reader!(read_isize, isize);
generate_writer!(write_u8, u8);
generate_writer!(write_i8, i8);
generate_writer!(write_u16, u16);
generate_writer!(write_i16, i16);
generate_writer!(write_u32, u32);
generate_writer!(write_i32, i32);
generate_writer!(write_u64, u64);
generate_writer!(write_i64, i64);
generate_writer!(write_usize, usize);
generate_writer!(write_isize, isize);
pub fn build(self) -> (HostSharedMemory, GuestSharedMemory) {
let lock = Arc::new(RwLock::new(()));
(
HostSharedMemory {
region: self.region.clone(),
lock: lock.clone(),
},
GuestSharedMemory {
region: self.region.clone(),
lock: lock.clone(),
},
)
}
}
pub trait SharedMemory {
fn region(&self) -> &HostMapping;
fn base_addr(&self) -> usize {
self.region().ptr as usize + PAGE_SIZE_USIZE
}
fn base_ptr(&self) -> *mut u8 {
self.base_addr() as *mut u8
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
fn mem_size(&self) -> usize {
self.region().size - 2 * PAGE_SIZE_USIZE
}
fn raw_ptr(&self) -> *mut u8 {
self.region().ptr
}
fn raw_mem_size(&self) -> usize {
self.region().size
}
fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
&mut self,
f: F,
) -> Result<T>;
}
impl SharedMemory for ExclusiveSharedMemory {
fn region(&self) -> &HostMapping {
&self.region
}
fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
&mut self,
f: F,
) -> Result<T> {
Ok(f(self))
}
}
impl SharedMemory for GuestSharedMemory {
fn region(&self) -> &HostMapping {
&self.region
}
fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
&mut self,
f: F,
) -> Result<T> {
let guard = self
.lock
.try_write()
.map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
let mut excl = ExclusiveSharedMemory {
region: self.region.clone(),
};
let ret = f(&mut excl);
drop(excl);
drop(guard);
Ok(ret)
}
}
pub unsafe trait AllValid {}
unsafe impl AllValid for u8 {}
unsafe impl AllValid for u16 {}
unsafe impl AllValid for u32 {}
unsafe impl AllValid for u64 {}
unsafe impl AllValid for i8 {}
unsafe impl AllValid for i16 {}
unsafe impl AllValid for i32 {}
unsafe impl AllValid for i64 {}
unsafe impl AllValid for [u8; 16] {}
impl HostSharedMemory {
pub fn read<T: AllValid>(&self, offset: usize) -> Result<T> {
bounds_check!(offset, std::mem::size_of::<T>(), self.mem_size());
let ret = unsafe {
let mut ret: core::mem::MaybeUninit<T> = core::mem::MaybeUninit::uninit();
{
let slice: &mut [u8] = core::slice::from_raw_parts_mut(
ret.as_mut_ptr() as *mut u8,
std::mem::size_of::<T>(),
);
self.copy_to_slice(slice, offset)?;
}
Ok(ret.assume_init())
};
ret
}
pub fn write<T: AllValid>(&self, offset: usize, data: T) -> Result<()> {
bounds_check!(offset, std::mem::size_of::<T>(), self.mem_size());
unsafe {
let slice: &[u8] = core::slice::from_raw_parts(
core::ptr::addr_of!(data) as *const u8,
std::mem::size_of::<T>(),
);
self.copy_from_slice(slice, offset)?;
}
Ok(())
}
pub fn copy_to_slice(&self, slice: &mut [u8], offset: usize) -> Result<()> {
bounds_check!(offset, slice.len(), self.mem_size());
let base = self.base_ptr().wrapping_add(offset);
let guard = self
.lock
.try_read()
.map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
for (i, b) in slice.iter_mut().enumerate() {
unsafe {
*b = base.wrapping_add(i).read_volatile();
}
}
drop(guard);
Ok(())
}
pub fn copy_from_slice(&self, slice: &[u8], offset: usize) -> Result<()> {
bounds_check!(offset, slice.len(), self.mem_size());
let base = self.base_ptr().wrapping_add(offset);
let guard = self
.lock
.try_read()
.map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
for (i, b) in slice.iter().enumerate() {
unsafe {
base.wrapping_add(i).write_volatile(*b);
}
}
drop(guard);
Ok(())
}
#[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
pub fn fill(&mut self, value: u8, offset: usize, len: usize) -> Result<()> {
bounds_check!(offset, len, self.mem_size());
let base = self.base_ptr().wrapping_add(offset);
let guard = self
.lock
.try_read()
.map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
for i in 0..len {
unsafe { base.wrapping_add(i).write_volatile(value) };
}
drop(guard);
Ok(())
}
#[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
pub fn push_buffer(
&mut self,
buffer_start_offset: usize,
buffer_size: usize,
data: &[u8],
) -> Result<()> {
let stack_pointer_rel = self.read::<u64>(buffer_start_offset).unwrap() as usize;
let buffer_size_u64: u64 = buffer_size.try_into()?;
if stack_pointer_rel > buffer_size || stack_pointer_rel < 8 {
return Err(new_error!(
"Unable to push data to buffer: Stack pointer is out of bounds. Stack pointer: {}, Buffer size: {}",
stack_pointer_rel,
buffer_size_u64
));
}
let size_required = data.len() + 8;
let size_available = buffer_size - stack_pointer_rel;
if size_required > size_available {
return Err(new_error!(
"Not enough space in buffer to push data. Required: {}, Available: {}",
size_required,
size_available
));
}
let stack_pointer_abs = stack_pointer_rel + buffer_start_offset;
self.copy_from_slice(data, stack_pointer_abs)?;
self.write::<u64>(stack_pointer_abs + data.len(), stack_pointer_rel as u64)?;
self.write::<u64>(
buffer_start_offset,
(stack_pointer_rel + data.len() + 8) as u64,
)?;
Ok(())
}
#[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
pub fn try_pop_buffer_into<T>(
&mut self,
buffer_start_offset: usize,
buffer_size: usize,
) -> Result<T>
where
T: for<'b> TryFrom<&'b [u8]>,
{
let stack_pointer_rel = self.read::<u64>(buffer_start_offset)? as usize;
if stack_pointer_rel > buffer_size || stack_pointer_rel < 16 {
return Err(new_error!(
"Unable to pop data from buffer: Stack pointer is out of bounds. Stack pointer: {}, Buffer size: {}",
stack_pointer_rel,
buffer_size
));
}
let last_element_offset_abs = stack_pointer_rel + buffer_start_offset;
let last_element_offset_rel: usize =
self.read::<u64>(last_element_offset_abs - 8).unwrap() as usize;
let last_element_offset_abs = last_element_offset_rel + buffer_start_offset;
let fb_buffer_size = {
let size_i32 = self.read::<u32>(last_element_offset_abs)? + 4;
usize::try_from(size_i32)
}?;
let mut result_buffer = vec![0; fb_buffer_size];
self.copy_to_slice(&mut result_buffer, last_element_offset_abs)?;
let to_return = T::try_from(result_buffer.as_slice()).map_err(|_e| {
new_error!(
"pop_buffer_into: failed to convert buffer to {}",
type_name::<T>()
)
})?;
self.write::<u64>(buffer_start_offset, last_element_offset_rel as u64)?;
let num_bytes_to_zero = stack_pointer_rel - last_element_offset_rel;
self.fill(0, last_element_offset_abs, num_bytes_to_zero)?;
Ok(to_return)
}
}
impl SharedMemory for HostSharedMemory {
fn region(&self) -> &HostMapping {
&self.region
}
fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
&mut self,
f: F,
) -> Result<T> {
let guard = self
.lock
.try_write()
.map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
let mut excl = ExclusiveSharedMemory {
region: self.region.clone(),
};
let ret = f(&mut excl);
drop(excl);
drop(guard);
Ok(ret)
}
}
#[cfg(test)]
mod tests {
use hyperlight_common::mem::PAGE_SIZE_USIZE;
use proptest::prelude::*;
use super::{ExclusiveSharedMemory, HostSharedMemory, SharedMemory};
use crate::mem::shared_mem_tests::read_write_test_suite;
use crate::Result;
#[test]
fn fill() {
let mem_size: usize = 4096;
let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
let (mut hshm, _) = eshm.build();
hshm.fill(1, 0, 1024).unwrap();
hshm.fill(2, 1024, 1024).unwrap();
hshm.fill(3, 2048, 1024).unwrap();
hshm.fill(4, 3072, 1024).unwrap();
let vec = hshm
.with_exclusivity(|e| e.copy_all_to_vec().unwrap())
.unwrap();
assert!(vec[0..1024].iter().all(|&x| x == 1));
assert!(vec[1024..2048].iter().all(|&x| x == 2));
assert!(vec[2048..3072].iter().all(|&x| x == 3));
assert!(vec[3072..4096].iter().all(|&x| x == 4));
hshm.fill(5, 0, 4096).unwrap();
let vec2 = hshm
.with_exclusivity(|e| e.copy_all_to_vec().unwrap())
.unwrap();
assert!(vec2.iter().all(|&x| x == 5));
assert!(hshm.fill(0, 0, mem_size + 1).is_err());
assert!(hshm.fill(0, mem_size, 1).is_err());
}
#[test]
fn copy_into_from() -> Result<()> {
let mem_size: usize = 4096;
let vec_len = 10;
let eshm = ExclusiveSharedMemory::new(mem_size)?;
let (hshm, _) = eshm.build();
let vec = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
hshm.copy_from_slice(&vec, 0)?;
let mut vec2 = vec![0; vec_len];
hshm.copy_to_slice(vec2.as_mut_slice(), 0)?;
assert_eq!(vec, vec2);
let offset = mem_size - vec.len();
hshm.copy_from_slice(&vec, offset)?;
let mut vec3 = vec![0; vec_len];
hshm.copy_to_slice(&mut vec3, offset)?;
assert_eq!(vec, vec3);
let offset = mem_size / 2;
hshm.copy_from_slice(&vec, offset)?;
let mut vec4 = vec![0; vec_len];
hshm.copy_to_slice(&mut vec4, offset)?;
assert_eq!(vec, vec4);
let mut vec5 = vec![0; vec_len];
assert!(hshm.copy_to_slice(&mut vec5, mem_size).is_err());
assert!(hshm.copy_from_slice(&vec5, mem_size).is_err());
let mut vec6 = vec![0; vec_len];
assert!(hshm.copy_to_slice(&mut vec6, mem_size * 2).is_err());
assert!(hshm.copy_from_slice(&vec6, mem_size * 2).is_err());
let mut vec7 = vec![0; mem_size * 2];
assert!(hshm.copy_to_slice(&mut vec7, 0).is_err());
assert!(hshm.copy_from_slice(&vec7, 0).is_err());
Ok(())
}
proptest! {
#[test]
fn read_write_i32(val in -0x1000_i32..0x1000_i32) {
read_write_test_suite(
val,
ExclusiveSharedMemory::new,
Box::new(ExclusiveSharedMemory::read_i32),
Box::new(ExclusiveSharedMemory::write_i32),
)
.unwrap();
read_write_test_suite(
val,
|s| {
let e = ExclusiveSharedMemory::new(s)?;
let (h, _) = e.build();
Ok(h)
},
Box::new(HostSharedMemory::read::<i32>),
Box::new(|h, o, v| h.write::<i32>(o, v)),
)
.unwrap();
}
}
#[test]
fn alloc_fail() {
let gm = ExclusiveSharedMemory::new(0);
assert!(gm.is_err());
let gm = ExclusiveSharedMemory::new(usize::MAX);
assert!(gm.is_err());
}
#[test]
fn clone() {
let eshm = ExclusiveSharedMemory::new(PAGE_SIZE_USIZE).unwrap();
let (hshm1, _) = eshm.build();
let hshm2 = hshm1.clone();
assert_eq!(hshm1.mem_size(), hshm2.mem_size());
assert_eq!(hshm1.base_addr(), hshm2.base_addr());
hshm1.copy_from_slice(b"a", 0).unwrap();
hshm2.copy_from_slice(b"b", 1).unwrap();
for (raw_offset, expected) in &[(0, b'a'), (1, b'b')] {
assert_eq!(hshm1.read::<u8>(*raw_offset).unwrap(), *expected);
assert_eq!(hshm2.read::<u8>(*raw_offset).unwrap(), *expected);
}
drop(hshm1);
for (raw_offset, expected) in &[(0, b'a'), (1, b'b')] {
assert_eq!(hshm2.read::<u8>(*raw_offset).unwrap(), *expected);
}
hshm2.copy_from_slice(b"c", 2).unwrap();
assert_eq!(hshm2.read::<u8>(2).unwrap(), b'c');
drop(hshm2);
}
#[test]
fn copy_all_to_vec() {
let mut data = vec![b'a', b'b', b'c'];
data.resize(4096, 0);
let mut eshm = ExclusiveSharedMemory::new(data.len()).unwrap();
eshm.copy_from_slice(data.as_slice(), 0).unwrap();
let ret_vec = eshm.copy_all_to_vec().unwrap();
assert_eq!(data, ret_vec);
}
#[test]
#[ignore]
#[cfg(target_os = "linux")]
fn test_drop() {
use proc_maps::maps_contain_addr;
let pid = std::process::id();
let eshm = ExclusiveSharedMemory::new(PAGE_SIZE_USIZE).unwrap();
let (hshm1, gshm) = eshm.build();
let hshm2 = hshm1.clone();
let addr = hshm1.raw_ptr() as usize;
let maps_before_drop = proc_maps::get_process_maps(pid.try_into().unwrap()).unwrap();
assert!(
maps_contain_addr(addr, &maps_before_drop),
"shared memory address {:#x} was not found in process map, but should be",
addr,
);
drop(hshm1);
drop(hshm2);
drop(gshm);
let maps_after_drop = proc_maps::get_process_maps(pid.try_into().unwrap()).unwrap();
assert!(
!maps_contain_addr(addr, &maps_after_drop),
"shared memory address {:#x} was found in the process map, but shouldn't be",
addr
);
}
#[cfg(target_os = "linux")]
mod guard_page_crash_test {
use crate::mem::shared_mem::{ExclusiveSharedMemory, SharedMemory};
const TEST_EXIT_CODE: u8 = 211;
fn setup_signal_handler() {
unsafe {
signal_hook_registry::register_signal_unchecked(libc::SIGSEGV, || {
std::process::exit(TEST_EXIT_CODE.into());
})
.unwrap();
}
}
#[test]
#[ignore] fn read() {
setup_signal_handler();
let eshm = ExclusiveSharedMemory::new(4096).unwrap();
let (hshm, _) = eshm.build();
let guard_page_ptr = hshm.raw_ptr();
unsafe { std::ptr::read_volatile(guard_page_ptr) };
}
#[test]
#[ignore] fn write() {
setup_signal_handler();
let eshm = ExclusiveSharedMemory::new(4096).unwrap();
let (hshm, _) = eshm.build();
let guard_page_ptr = hshm.raw_ptr();
unsafe { std::ptr::write_volatile(guard_page_ptr, 0u8) };
}
#[test]
#[ignore] fn exec() {
setup_signal_handler();
let eshm = ExclusiveSharedMemory::new(4096).unwrap();
let (hshm, _) = eshm.build();
let guard_page_ptr = hshm.raw_ptr();
let func: fn() = unsafe { std::mem::transmute(guard_page_ptr) };
func();
}
#[test]
fn guard_page_testing_shim() {
let tests = vec!["read", "write", "exec"];
for test in tests {
let status = std::process::Command::new("cargo")
.args(["test", "-p", "hyperlight-host", "--", "--ignored", test])
.stdin(std::process::Stdio::null())
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.status()
.expect("Unable to launch tests");
assert_eq!(
status.code(),
Some(TEST_EXIT_CODE.into()),
"Guard Page test failed: {}",
test
);
}
}
}
}