1use std::any::type_name;
18use std::ffi::c_void;
19use std::io::Error;
20use std::mem::{align_of, size_of};
21#[cfg(target_os = "linux")]
22use std::ptr::null_mut;
23use std::sync::{Arc, RwLock};
24
25use hyperlight_common::mem::PAGE_SIZE_USIZE;
26use tracing::{Span, instrument};
27#[cfg(target_os = "windows")]
28use windows::Win32::Foundation::{CloseHandle, HANDLE, INVALID_HANDLE_VALUE};
29#[cfg(target_os = "windows")]
30use windows::Win32::System::Memory::PAGE_READWRITE;
31#[cfg(target_os = "windows")]
32use windows::Win32::System::Memory::{
33 CreateFileMappingA, FILE_MAP_ALL_ACCESS, MEMORY_MAPPED_VIEW_ADDRESS, MapViewOfFile,
34 PAGE_NOACCESS, PAGE_PROTECTION_FLAGS, UnmapViewOfFile, VirtualProtect,
35};
36#[cfg(target_os = "windows")]
37use windows::core::PCSTR;
38
39use super::memory_region::{
40 HostGuestMemoryRegion, MemoryRegion, MemoryRegionFlags, MemoryRegionKind, MemoryRegionType,
41};
42use crate::HyperlightError::SnapshotSizeMismatch;
43#[cfg(target_os = "windows")]
44use crate::HyperlightError::WindowsAPIError;
45use crate::sandbox::snapshot::Snapshot;
46use crate::{HyperlightError, Result, log_then_return, new_error};
47
48macro_rules! bounds_check {
50 ($offset:expr, $size:expr, $mem_size:expr) => {
51 if $offset.checked_add($size).is_none_or(|end| end > $mem_size) {
52 return Err(new_error!(
53 "Cannot read value from offset {} with size {} in memory of size {}",
54 $offset,
55 $size,
56 $mem_size
57 ));
58 }
59 };
60}
61
62macro_rules! generate_reader {
64 ($fname:ident, $ty:ty) => {
65 #[allow(dead_code)]
67 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
68 pub(crate) fn $fname(&self, offset: usize) -> Result<$ty> {
69 let data = self.as_slice();
70 bounds_check!(offset, std::mem::size_of::<$ty>(), data.len());
71 Ok(<$ty>::from_le_bytes(
72 data[offset..offset + std::mem::size_of::<$ty>()].try_into()?,
73 ))
74 }
75 };
76}
77
78macro_rules! generate_writer {
80 ($fname:ident, $ty:ty) => {
81 #[allow(dead_code)]
83 pub(crate) fn $fname(&mut self, offset: usize, value: $ty) -> Result<()> {
84 let data = self.as_mut_slice();
85 bounds_check!(offset, std::mem::size_of::<$ty>(), data.len());
86 data[offset..offset + std::mem::size_of::<$ty>()].copy_from_slice(&value.to_le_bytes());
87 Ok(())
88 }
89 };
90}
91
92#[derive(Debug)]
97pub struct HostMapping {
98 ptr: *mut u8,
99 size: usize,
100 #[cfg(target_os = "windows")]
101 handle: HANDLE,
102}
103
104impl Drop for HostMapping {
105 #[cfg(target_os = "linux")]
106 fn drop(&mut self) {
107 use libc::munmap;
108
109 unsafe {
110 munmap(self.ptr as *mut c_void, self.size);
111 }
112 }
113 #[cfg(target_os = "windows")]
114 fn drop(&mut self) {
115 let mem_mapped_address = MEMORY_MAPPED_VIEW_ADDRESS {
116 Value: self.ptr as *mut c_void,
117 };
118 if let Err(e) = unsafe { UnmapViewOfFile(mem_mapped_address) } {
119 tracing::error!(
120 "Failed to drop HostMapping (UnmapViewOfFile failed): {:?}",
121 e
122 );
123 }
124
125 let file_handle: HANDLE = self.handle;
126 if let Err(e) = unsafe { CloseHandle(file_handle) } {
127 tracing::error!("Failed to drop HostMapping (CloseHandle failed): {:?}", e);
128 }
129 }
130}
131
132#[derive(Debug)]
138pub struct ExclusiveSharedMemory {
139 region: Arc<HostMapping>,
140}
141unsafe impl Send for ExclusiveSharedMemory {}
142
143#[derive(Debug)]
152pub struct GuestSharedMemory {
153 region: Arc<HostMapping>,
154 pub lock: Arc<RwLock<()>>,
165}
166unsafe impl Send for GuestSharedMemory {}
167
168#[derive(Clone, Debug)]
323pub struct HostSharedMemory {
324 region: Arc<HostMapping>,
325 lock: Arc<RwLock<()>>,
326}
327unsafe impl Send for HostSharedMemory {}
328
329impl ExclusiveSharedMemory {
330 #[cfg(target_os = "linux")]
335 #[instrument(skip_all, parent = Span::current(), level= "Trace")]
336 pub fn new(min_size_bytes: usize) -> Result<Self> {
337 use libc::{
338 MAP_ANONYMOUS, MAP_FAILED, MAP_PRIVATE, PROT_READ, PROT_WRITE, c_int, mmap, off_t,
339 size_t,
340 };
341 #[cfg(not(miri))]
342 use libc::{MAP_NORESERVE, PROT_NONE, mprotect};
343
344 if min_size_bytes == 0 {
345 return Err(new_error!("Cannot create shared memory with size 0"));
346 }
347
348 let total_size = min_size_bytes
349 .checked_add(2 * PAGE_SIZE_USIZE) .ok_or_else(|| new_error!("Memory required for sandbox exceeded usize::MAX"))?;
351
352 if total_size % PAGE_SIZE_USIZE != 0 {
353 return Err(new_error!(
354 "shared memory must be a multiple of {}",
355 PAGE_SIZE_USIZE
356 ));
357 }
358
359 if total_size > isize::MAX as usize {
362 return Err(HyperlightError::MemoryRequestTooBig(
363 total_size,
364 isize::MAX as usize,
365 ));
366 }
367
368 #[cfg(not(miri))]
370 let flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
371 #[cfg(miri)]
372 let flags = MAP_ANONYMOUS | MAP_PRIVATE;
373
374 let addr = unsafe {
375 mmap(
376 null_mut(),
377 total_size as size_t,
378 PROT_READ | PROT_WRITE,
379 flags,
380 -1 as c_int,
381 0 as off_t,
382 )
383 };
384 if addr == MAP_FAILED {
385 log_then_return!(HyperlightError::MmapFailed(
386 Error::last_os_error().raw_os_error()
387 ));
388 }
389
390 #[cfg(not(miri))]
392 {
393 let res = unsafe { mprotect(addr, PAGE_SIZE_USIZE, PROT_NONE) };
394 if res != 0 {
395 return Err(HyperlightError::MprotectFailed(
396 Error::last_os_error().raw_os_error(),
397 ));
398 }
399 let res = unsafe {
400 mprotect(
401 (addr as *const u8).add(total_size - PAGE_SIZE_USIZE) as *mut c_void,
402 PAGE_SIZE_USIZE,
403 PROT_NONE,
404 )
405 };
406 if res != 0 {
407 return Err(HyperlightError::MprotectFailed(
408 Error::last_os_error().raw_os_error(),
409 ));
410 }
411 }
412
413 Ok(Self {
414 #[allow(clippy::arc_with_non_send_sync)]
423 region: Arc::new(HostMapping {
424 ptr: addr as *mut u8,
425 size: total_size,
426 }),
427 })
428 }
429
430 #[cfg(target_os = "windows")]
435 #[instrument(skip_all, parent = Span::current(), level= "Trace")]
436 pub fn new(min_size_bytes: usize) -> Result<Self> {
437 if min_size_bytes == 0 {
438 return Err(new_error!("Cannot create shared memory with size 0"));
439 }
440
441 let total_size = min_size_bytes
442 .checked_add(2 * PAGE_SIZE_USIZE)
443 .ok_or_else(|| new_error!("Memory required for sandbox exceeded {}", usize::MAX))?;
444
445 if total_size % PAGE_SIZE_USIZE != 0 {
446 return Err(new_error!(
447 "shared memory must be a multiple of {}",
448 PAGE_SIZE_USIZE
449 ));
450 }
451
452 if total_size > isize::MAX as usize {
455 return Err(HyperlightError::MemoryRequestTooBig(
456 total_size,
457 isize::MAX as usize,
458 ));
459 }
460
461 let mut dwmaximumsizehigh = 0;
462 let mut dwmaximumsizelow = 0;
463
464 if std::mem::size_of::<usize>() == 8 {
465 dwmaximumsizehigh = (total_size >> 32) as u32;
466 dwmaximumsizelow = (total_size & 0xFFFFFFFF) as u32;
467 }
468
469 let flags = PAGE_READWRITE;
473
474 let handle = unsafe {
475 CreateFileMappingA(
476 INVALID_HANDLE_VALUE,
477 None,
478 flags,
479 dwmaximumsizehigh,
480 dwmaximumsizelow,
481 PCSTR::null(),
482 )?
483 };
484
485 if handle.is_invalid() {
486 log_then_return!(HyperlightError::MemoryAllocationFailed(
487 Error::last_os_error().raw_os_error()
488 ));
489 }
490
491 let file_map = FILE_MAP_ALL_ACCESS;
492 let addr = unsafe { MapViewOfFile(handle, file_map, 0, 0, 0) };
493
494 if addr.Value.is_null() {
495 log_then_return!(HyperlightError::MemoryAllocationFailed(
496 Error::last_os_error().raw_os_error()
497 ));
498 }
499
500 let mut unused_out_old_prot_flags = PAGE_PROTECTION_FLAGS(0);
503
504 let first_guard_page_start = addr.Value;
507 if let Err(e) = unsafe {
508 VirtualProtect(
509 first_guard_page_start,
510 PAGE_SIZE_USIZE,
511 PAGE_NOACCESS,
512 &mut unused_out_old_prot_flags,
513 )
514 } {
515 log_then_return!(WindowsAPIError(e.clone()));
516 }
517
518 let last_guard_page_start = unsafe { addr.Value.add(total_size - PAGE_SIZE_USIZE) };
519 if let Err(e) = unsafe {
520 VirtualProtect(
521 last_guard_page_start,
522 PAGE_SIZE_USIZE,
523 PAGE_NOACCESS,
524 &mut unused_out_old_prot_flags,
525 )
526 } {
527 log_then_return!(WindowsAPIError(e.clone()));
528 }
529
530 Ok(Self {
531 #[allow(clippy::arc_with_non_send_sync)]
540 region: Arc::new(HostMapping {
541 ptr: addr.Value as *mut u8,
542 size: total_size,
543 handle,
544 }),
545 })
546 }
547
548 pub(super) fn as_mut_slice(&mut self) -> &mut [u8] {
584 unsafe { std::slice::from_raw_parts_mut(self.base_ptr(), self.mem_size()) }
585 }
586
587 #[instrument(skip_all, parent = Span::current(), level= "Trace")]
600 pub fn as_slice<'a>(&'a self) -> &'a [u8] {
601 unsafe { std::slice::from_raw_parts(self.base_ptr(), self.mem_size()) }
602 }
603
604 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
606 #[cfg(test)]
607 pub(crate) fn copy_all_to_vec(&self) -> Result<Vec<u8>> {
608 let data = self.as_slice();
609 Ok(data.to_vec())
610 }
611
612 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
614 pub fn copy_from_slice(&mut self, src: &[u8], offset: usize) -> Result<()> {
615 let data = self.as_mut_slice();
616 bounds_check!(offset, src.len(), data.len());
617 data[offset..offset + src.len()].copy_from_slice(src);
618 Ok(())
619 }
620
621 generate_reader!(read_u8, u8);
622 generate_reader!(read_i8, i8);
623 generate_reader!(read_u16, u16);
624 generate_reader!(read_i16, i16);
625 generate_reader!(read_u32, u32);
626 generate_reader!(read_i32, i32);
627 generate_reader!(read_u64, u64);
628 generate_reader!(read_i64, i64);
629 generate_reader!(read_usize, usize);
630 generate_reader!(read_isize, isize);
631
632 generate_writer!(write_u8, u8);
633 generate_writer!(write_i8, i8);
634 generate_writer!(write_u16, u16);
635 generate_writer!(write_i16, i16);
636 generate_writer!(write_u32, u32);
637 generate_writer!(write_i32, i32);
638 generate_writer!(write_u64, u64);
639 generate_writer!(write_i64, i64);
640 generate_writer!(write_usize, usize);
641 generate_writer!(write_isize, isize);
642
643 pub fn build(self) -> (HostSharedMemory, GuestSharedMemory) {
650 let lock = Arc::new(RwLock::new(()));
651 (
652 HostSharedMemory {
653 region: self.region.clone(),
654 lock: lock.clone(),
655 },
656 GuestSharedMemory {
657 region: self.region.clone(),
658 lock: lock.clone(),
659 },
660 )
661 }
662
663 #[cfg(target_os = "windows")]
665 pub fn get_mmap_file_handle(&self) -> HANDLE {
666 self.region.handle
667 }
668}
669
670impl GuestSharedMemory {
671 pub(crate) fn mapping_at(
674 &self,
675 guest_base: u64,
676 region_type: MemoryRegionType,
677 ) -> MemoryRegion {
678 let flags = match region_type {
679 MemoryRegionType::Scratch => {
680 MemoryRegionFlags::READ | MemoryRegionFlags::WRITE | MemoryRegionFlags::EXECUTE
681 }
682 MemoryRegionType::Snapshot => MemoryRegionFlags::READ | MemoryRegionFlags::EXECUTE,
683 #[allow(clippy::panic)]
684 _ => panic!(
692 "GuestSharedMemory::mapping_at should only be used for Scratch or Snapshot regions"
693 ),
694 };
695 let guest_base = guest_base as usize;
696 #[cfg(not(windows))]
697 let host_base = self.base_addr();
698 #[cfg(windows)]
699 let host_base = self.host_region_base();
700 let host_end = <HostGuestMemoryRegion as MemoryRegionKind>::add(host_base, self.mem_size());
701 MemoryRegion {
702 guest_region: guest_base..(guest_base + self.mem_size()),
703 host_region: host_base..host_end,
704 region_type,
705 flags,
706 }
707 }
708}
709
710pub trait SharedMemory {
715 fn region(&self) -> &HostMapping;
717
718 fn base_addr(&self) -> usize {
723 self.region().ptr as usize + PAGE_SIZE_USIZE
724 }
725
726 fn base_ptr(&self) -> *mut u8 {
731 self.region().ptr.wrapping_add(PAGE_SIZE_USIZE)
732 }
733
734 fn mem_size(&self) -> usize {
738 self.region().size - 2 * PAGE_SIZE_USIZE
739 }
740
741 fn raw_ptr(&self) -> *mut u8 {
744 self.region().ptr
745 }
746
747 fn raw_mem_size(&self) -> usize {
750 self.region().size
751 }
752
753 #[cfg(target_os = "windows")]
756 fn host_region_base(&self) -> super::memory_region::HostRegionBase {
757 super::memory_region::HostRegionBase {
758 from_handle: self.region().handle.into(),
759 handle_base: self.region().ptr as usize,
760 handle_size: self.region().size,
761 offset: PAGE_SIZE_USIZE,
762 }
763 }
764
765 fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
771 &mut self,
772 f: F,
773 ) -> Result<T>;
774
775 fn restore_from_snapshot(&mut self, snapshot: &Snapshot) -> Result<()> {
777 if snapshot.memory().len() != self.mem_size() {
778 return Err(SnapshotSizeMismatch(self.mem_size(), snapshot.mem_size()));
779 }
780 self.with_exclusivity(|e| e.copy_from_slice(snapshot.memory(), 0))?
781 }
782
783 fn zero(&mut self) -> Result<()> {
785 self.with_exclusivity(|e| {
786 #[allow(unused_mut)] let mut do_copy = true;
788 #[cfg(all(target_os = "linux", feature = "kvm", not(any(feature = "mshv3"))))]
792 unsafe {
793 let ret = libc::madvise(
794 e.region.ptr as *mut libc::c_void,
795 e.region.size,
796 libc::MADV_DONTNEED,
797 );
798 if ret == 0 {
799 do_copy = false;
800 }
801 }
802 if do_copy {
803 e.as_mut_slice().fill(0);
804 }
805 })
806 }
807}
808
809impl SharedMemory for ExclusiveSharedMemory {
810 fn region(&self) -> &HostMapping {
811 &self.region
812 }
813 fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
814 &mut self,
815 f: F,
816 ) -> Result<T> {
817 Ok(f(self))
818 }
819}
820
821impl SharedMemory for GuestSharedMemory {
822 fn region(&self) -> &HostMapping {
823 &self.region
824 }
825 fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
826 &mut self,
827 f: F,
828 ) -> Result<T> {
829 let guard = self
830 .lock
831 .try_write()
832 .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
833 let mut excl = ExclusiveSharedMemory {
834 region: self.region.clone(),
835 };
836 let ret = f(&mut excl);
837 drop(excl);
838 drop(guard);
839 Ok(ret)
840 }
841}
842
843pub unsafe trait AllValid {}
852unsafe impl AllValid for u8 {}
853unsafe impl AllValid for u16 {}
854unsafe impl AllValid for u32 {}
855unsafe impl AllValid for u64 {}
856unsafe impl AllValid for i8 {}
857unsafe impl AllValid for i16 {}
858unsafe impl AllValid for i32 {}
859unsafe impl AllValid for i64 {}
860unsafe impl AllValid for [u8; 16] {}
861
862impl HostSharedMemory {
863 pub fn read<T: AllValid>(&self, offset: usize) -> Result<T> {
867 bounds_check!(offset, std::mem::size_of::<T>(), self.mem_size());
868 unsafe {
869 let mut ret: core::mem::MaybeUninit<T> = core::mem::MaybeUninit::uninit();
870 {
871 let slice: &mut [u8] = core::slice::from_raw_parts_mut(
872 ret.as_mut_ptr() as *mut u8,
873 std::mem::size_of::<T>(),
874 );
875 self.copy_to_slice(slice, offset)?;
876 }
877 Ok(ret.assume_init())
878 }
879 }
880
881 pub fn write<T: AllValid>(&self, offset: usize, data: T) -> Result<()> {
885 bounds_check!(offset, std::mem::size_of::<T>(), self.mem_size());
886 unsafe {
887 let slice: &[u8] = core::slice::from_raw_parts(
888 core::ptr::addr_of!(data) as *const u8,
889 std::mem::size_of::<T>(),
890 );
891 self.copy_from_slice(slice, offset)?;
892 }
893 Ok(())
894 }
895
896 pub fn copy_to_slice(&self, slice: &mut [u8], offset: usize) -> Result<()> {
899 bounds_check!(offset, slice.len(), self.mem_size());
900 let base = self.base_ptr().wrapping_add(offset);
901 let guard = self
902 .lock
903 .try_read()
904 .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
905
906 const CHUNK: usize = size_of::<u128>();
907 let len = slice.len();
908 let mut i = 0;
909
910 let align_offset = base.align_offset(align_of::<u128>());
915 let head_len = align_offset.min(len);
916 while i < head_len {
917 unsafe {
918 slice[i] = base.add(i).read_volatile();
919 }
920 i += 1;
921 }
922
923 let dst = slice.as_mut_ptr();
927 while i + CHUNK <= len {
928 unsafe {
929 let value = (base.add(i) as *const u128).read_volatile();
930 std::ptr::write_unaligned(dst.add(i) as *mut u128, value);
931 }
932 i += CHUNK;
933 }
934
935 while i < len {
937 unsafe {
938 slice[i] = base.add(i).read_volatile();
939 }
940 i += 1;
941 }
942
943 drop(guard);
944 Ok(())
945 }
946
947 pub fn copy_from_slice(&self, slice: &[u8], offset: usize) -> Result<()> {
950 bounds_check!(offset, slice.len(), self.mem_size());
951 let base = self.base_ptr().wrapping_add(offset);
952 let guard = self
953 .lock
954 .try_read()
955 .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
956
957 const CHUNK: usize = size_of::<u128>();
958 let len = slice.len();
959 let mut i = 0;
960
961 let align_offset = base.align_offset(align_of::<u128>());
966 let head_len = align_offset.min(len);
967 while i < head_len {
968 unsafe {
969 base.add(i).write_volatile(slice[i]);
970 }
971 i += 1;
972 }
973
974 let src = slice.as_ptr();
978 while i + CHUNK <= len {
979 unsafe {
980 let value = std::ptr::read_unaligned(src.add(i) as *const u128);
981 (base.add(i) as *mut u128).write_volatile(value);
982 }
983 i += CHUNK;
984 }
985
986 while i < len {
988 unsafe {
989 base.add(i).write_volatile(slice[i]);
990 }
991 i += 1;
992 }
993
994 drop(guard);
995 Ok(())
996 }
997
998 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
1000 pub fn fill(&mut self, value: u8, offset: usize, len: usize) -> Result<()> {
1001 bounds_check!(offset, len, self.mem_size());
1002 let base = self.base_ptr().wrapping_add(offset);
1003 let guard = self
1004 .lock
1005 .try_read()
1006 .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
1007
1008 const CHUNK: usize = size_of::<u128>();
1009 let value_u128 = u128::from_ne_bytes([value; CHUNK]);
1010 let mut i = 0;
1011
1012 let align_offset = base.align_offset(align_of::<u128>());
1017 let head_len = align_offset.min(len);
1018 while i < head_len {
1019 unsafe {
1020 base.add(i).write_volatile(value);
1021 }
1022 i += 1;
1023 }
1024
1025 while i + CHUNK <= len {
1028 unsafe {
1029 (base.add(i) as *mut u128).write_volatile(value_u128);
1030 }
1031 i += CHUNK;
1032 }
1033
1034 while i < len {
1036 unsafe {
1037 base.add(i).write_volatile(value);
1038 }
1039 i += 1;
1040 }
1041
1042 drop(guard);
1043 Ok(())
1044 }
1045
1046 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
1049 pub fn push_buffer(
1050 &mut self,
1051 buffer_start_offset: usize,
1052 buffer_size: usize,
1053 data: &[u8],
1054 ) -> Result<()> {
1055 let stack_pointer_rel = self.read::<u64>(buffer_start_offset)? as usize;
1056 let buffer_size_u64: u64 = buffer_size.try_into()?;
1057
1058 if stack_pointer_rel > buffer_size || stack_pointer_rel < 8 {
1059 return Err(new_error!(
1060 "Unable to push data to buffer: Stack pointer is out of bounds. Stack pointer: {}, Buffer size: {}",
1061 stack_pointer_rel,
1062 buffer_size_u64
1063 ));
1064 }
1065
1066 let size_required = data.len() + 8;
1067 let size_available = buffer_size - stack_pointer_rel;
1068
1069 if size_required > size_available {
1070 return Err(new_error!(
1071 "Not enough space in buffer to push data. Required: {}, Available: {}",
1072 size_required,
1073 size_available
1074 ));
1075 }
1076
1077 let stack_pointer_abs = stack_pointer_rel + buffer_start_offset;
1079
1080 self.copy_from_slice(data, stack_pointer_abs)?;
1082
1083 self.write::<u64>(stack_pointer_abs + data.len(), stack_pointer_rel as u64)?;
1086
1087 self.write::<u64>(
1089 buffer_start_offset,
1090 (stack_pointer_rel + data.len() + 8) as u64,
1091 )?;
1092 Ok(())
1093 }
1094
1095 pub fn try_pop_buffer_into<T>(
1099 &mut self,
1100 buffer_start_offset: usize,
1101 buffer_size: usize,
1102 ) -> Result<T>
1103 where
1104 T: for<'b> TryFrom<&'b [u8]>,
1105 {
1106 let stack_pointer_rel = self.read::<u64>(buffer_start_offset)? as usize;
1108
1109 if stack_pointer_rel > buffer_size || stack_pointer_rel < 16 {
1110 return Err(new_error!(
1111 "Unable to pop data from buffer: Stack pointer is out of bounds. Stack pointer: {}, Buffer size: {}",
1112 stack_pointer_rel,
1113 buffer_size
1114 ));
1115 }
1116
1117 let last_element_offset_abs = stack_pointer_rel + buffer_start_offset;
1119
1120 let last_element_offset_rel: usize =
1122 self.read::<u64>(last_element_offset_abs - 8)? as usize;
1123
1124 if last_element_offset_rel > stack_pointer_rel.saturating_sub(16)
1128 || last_element_offset_rel < 8
1129 {
1130 return Err(new_error!(
1131 "Corrupt buffer back-pointer: element offset {} is outside valid range [8, {}].",
1132 last_element_offset_rel,
1133 stack_pointer_rel.saturating_sub(16),
1134 ));
1135 }
1136
1137 let last_element_offset_abs = last_element_offset_rel + buffer_start_offset;
1139
1140 let max_element_size = stack_pointer_rel - last_element_offset_rel - 8;
1142
1143 let fb_buffer_size = {
1145 let raw_prefix = self.read::<u32>(last_element_offset_abs)?;
1146 let total = raw_prefix.checked_add(4).ok_or_else(|| {
1149 new_error!(
1150 "Corrupt buffer size prefix: value {} overflows when adding 4-byte header.",
1151 raw_prefix
1152 )
1153 })?;
1154 usize::try_from(total)
1155 }?;
1156
1157 if fb_buffer_size > max_element_size {
1158 return Err(new_error!(
1159 "Corrupt buffer size prefix: flatbuffer claims {} bytes but the element slot is only {} bytes.",
1160 fb_buffer_size,
1161 max_element_size
1162 ));
1163 }
1164
1165 let mut result_buffer = vec![0; fb_buffer_size];
1166
1167 self.copy_to_slice(&mut result_buffer, last_element_offset_abs)?;
1168 let to_return = T::try_from(result_buffer.as_slice()).map_err(|_e| {
1169 new_error!(
1170 "pop_buffer_into: failed to convert buffer to {}",
1171 type_name::<T>()
1172 )
1173 })?;
1174
1175 self.write::<u64>(buffer_start_offset, last_element_offset_rel as u64)?;
1177
1178 let num_bytes_to_zero = stack_pointer_rel - last_element_offset_rel;
1180 self.fill(0, last_element_offset_abs, num_bytes_to_zero)?;
1181
1182 Ok(to_return)
1183 }
1184}
1185
1186impl SharedMemory for HostSharedMemory {
1187 fn region(&self) -> &HostMapping {
1188 &self.region
1189 }
1190 fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
1191 &mut self,
1192 f: F,
1193 ) -> Result<T> {
1194 let guard = self
1195 .lock
1196 .try_write()
1197 .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
1198 let mut excl = ExclusiveSharedMemory {
1199 region: self.region.clone(),
1200 };
1201 let ret = f(&mut excl);
1202 drop(excl);
1203 drop(guard);
1204 Ok(ret)
1205 }
1206}
1207
1208#[cfg(test)]
1209mod tests {
1210 use hyperlight_common::mem::PAGE_SIZE_USIZE;
1211 #[cfg(not(miri))]
1212 use proptest::prelude::*;
1213
1214 #[cfg(not(miri))]
1215 use super::HostSharedMemory;
1216 use super::{ExclusiveSharedMemory, SharedMemory};
1217 use crate::Result;
1218 #[cfg(not(miri))]
1219 use crate::mem::shared_mem_tests::read_write_test_suite;
1220
1221 #[test]
1222 fn fill() {
1223 let mem_size: usize = 4096;
1224 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1225 let (mut hshm, _) = eshm.build();
1226
1227 hshm.fill(1, 0, 1024).unwrap();
1228 hshm.fill(2, 1024, 1024).unwrap();
1229 hshm.fill(3, 2048, 1024).unwrap();
1230 hshm.fill(4, 3072, 1024).unwrap();
1231
1232 let vec = hshm
1233 .with_exclusivity(|e| e.copy_all_to_vec().unwrap())
1234 .unwrap();
1235
1236 assert!(vec[0..1024].iter().all(|&x| x == 1));
1237 assert!(vec[1024..2048].iter().all(|&x| x == 2));
1238 assert!(vec[2048..3072].iter().all(|&x| x == 3));
1239 assert!(vec[3072..4096].iter().all(|&x| x == 4));
1240
1241 hshm.fill(5, 0, 4096).unwrap();
1242
1243 let vec2 = hshm
1244 .with_exclusivity(|e| e.copy_all_to_vec().unwrap())
1245 .unwrap();
1246 assert!(vec2.iter().all(|&x| x == 5));
1247
1248 assert!(hshm.fill(0, 0, mem_size + 1).is_err());
1249 assert!(hshm.fill(0, mem_size, 1).is_err());
1250 }
1251
1252 #[test]
1255 fn bounds_check_overflow() {
1256 let mem_size: usize = 4096;
1257 let mut eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1258
1259 assert!(eshm.read_i32(usize::MAX).is_err());
1261 assert!(eshm.write_i32(usize::MAX, 0).is_err());
1262 assert!(eshm.copy_from_slice(&[0u8; 1], usize::MAX).is_err());
1263
1264 let (mut hshm, _) = eshm.build();
1266
1267 assert!(hshm.read::<u8>(usize::MAX).is_err());
1268 assert!(hshm.read::<u64>(usize::MAX - 3).is_err());
1269 assert!(hshm.write::<u8>(usize::MAX, 0).is_err());
1270 assert!(hshm.write::<u64>(usize::MAX - 3, 0).is_err());
1271
1272 let mut buf = [0u8; 1];
1273 assert!(hshm.copy_to_slice(&mut buf, usize::MAX).is_err());
1274 assert!(hshm.copy_from_slice(&[0u8; 1], usize::MAX).is_err());
1275
1276 assert!(hshm.fill(0, usize::MAX, 1).is_err());
1277 assert!(hshm.fill(0, 1, usize::MAX).is_err());
1278 }
1279
1280 #[test]
1281 fn copy_into_from() -> Result<()> {
1282 let mem_size: usize = 4096;
1283 let vec_len = 10;
1284 let eshm = ExclusiveSharedMemory::new(mem_size)?;
1285 let (hshm, _) = eshm.build();
1286 let vec = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
1287 hshm.copy_from_slice(&vec, 0)?;
1289
1290 let mut vec2 = vec![0; vec_len];
1291 hshm.copy_to_slice(vec2.as_mut_slice(), 0)?;
1293 assert_eq!(vec, vec2);
1294
1295 let offset = mem_size - vec.len();
1296 hshm.copy_from_slice(&vec, offset)?;
1298
1299 let mut vec3 = vec![0; vec_len];
1300 hshm.copy_to_slice(&mut vec3, offset)?;
1302 assert_eq!(vec, vec3);
1303
1304 let offset = mem_size / 2;
1305 hshm.copy_from_slice(&vec, offset)?;
1307
1308 let mut vec4 = vec![0; vec_len];
1309 hshm.copy_to_slice(&mut vec4, offset)?;
1311 assert_eq!(vec, vec4);
1312
1313 let mut vec5 = vec![0; vec_len];
1315 assert!(hshm.copy_to_slice(&mut vec5, mem_size).is_err());
1316
1317 assert!(hshm.copy_from_slice(&vec5, mem_size).is_err());
1319
1320 let mut vec6 = vec![0; vec_len];
1322 assert!(hshm.copy_to_slice(&mut vec6, mem_size * 2).is_err());
1323
1324 assert!(hshm.copy_from_slice(&vec6, mem_size * 2).is_err());
1326
1327 let mut vec7 = vec![0; mem_size * 2];
1329 assert!(hshm.copy_to_slice(&mut vec7, 0).is_err());
1330
1331 assert!(hshm.copy_from_slice(&vec7, 0).is_err());
1333
1334 Ok(())
1335 }
1336
1337 #[cfg(not(miri))]
1339 proptest! {
1340 #[test]
1341 fn read_write_i32(val in -0x1000_i32..0x1000_i32) {
1342 read_write_test_suite(
1343 val,
1344 ExclusiveSharedMemory::new,
1345 Box::new(ExclusiveSharedMemory::read_i32),
1346 Box::new(ExclusiveSharedMemory::write_i32),
1347 )
1348 .unwrap();
1349 read_write_test_suite(
1350 val,
1351 |s| {
1352 let e = ExclusiveSharedMemory::new(s)?;
1353 let (h, _) = e.build();
1354 Ok(h)
1355 },
1356 Box::new(HostSharedMemory::read::<i32>),
1357 Box::new(|h, o, v| h.write::<i32>(o, v)),
1358 )
1359 .unwrap();
1360 }
1361 }
1362
1363 #[test]
1364 fn alloc_fail() {
1365 let gm = ExclusiveSharedMemory::new(0);
1366 assert!(gm.is_err());
1367 let gm = ExclusiveSharedMemory::new(usize::MAX);
1368 assert!(gm.is_err());
1369 }
1370
1371 #[test]
1372 fn clone() {
1373 let eshm = ExclusiveSharedMemory::new(PAGE_SIZE_USIZE).unwrap();
1374 let (hshm1, _) = eshm.build();
1375 let hshm2 = hshm1.clone();
1376
1377 assert_eq!(hshm1.mem_size(), hshm2.mem_size());
1380 assert_eq!(hshm1.base_addr(), hshm2.base_addr());
1381
1382 hshm1.copy_from_slice(b"a", 0).unwrap();
1385 hshm2.copy_from_slice(b"b", 1).unwrap();
1386
1387 for (raw_offset, expected) in &[(0, b'a'), (1, b'b')] {
1390 assert_eq!(hshm1.read::<u8>(*raw_offset).unwrap(), *expected);
1391 assert_eq!(hshm2.read::<u8>(*raw_offset).unwrap(), *expected);
1392 }
1393
1394 drop(hshm1);
1397
1398 for (raw_offset, expected) in &[(0, b'a'), (1, b'b')] {
1400 assert_eq!(hshm2.read::<u8>(*raw_offset).unwrap(), *expected);
1401 }
1402 hshm2.copy_from_slice(b"c", 2).unwrap();
1403 assert_eq!(hshm2.read::<u8>(2).unwrap(), b'c');
1404 drop(hshm2);
1405 }
1406
1407 #[test]
1408 fn copy_all_to_vec() {
1409 let mut data = vec![b'a', b'b', b'c'];
1410 data.resize(4096, 0);
1411 let mut eshm = ExclusiveSharedMemory::new(data.len()).unwrap();
1412 eshm.copy_from_slice(data.as_slice(), 0).unwrap();
1413 let ret_vec = eshm.copy_all_to_vec().unwrap();
1414 assert_eq!(data, ret_vec);
1415 }
1416
1417 #[test]
1420 #[cfg(all(target_os = "linux", not(miri)))]
1421 fn test_drop() {
1422 use proc_maps::get_process_maps;
1423
1424 const UNIQUE_SIZE: usize = PAGE_SIZE_USIZE * 17;
1434
1435 let pid = std::process::id();
1436
1437 let eshm = ExclusiveSharedMemory::new(UNIQUE_SIZE).unwrap();
1438 let (hshm1, gshm) = eshm.build();
1439 let hshm2 = hshm1.clone();
1440
1441 let base_ptr = hshm1.base_ptr() as usize;
1443 let mem_size = hshm1.mem_size();
1444
1445 let has_exact_mapping = |ptr: usize, size: usize| -> bool {
1447 get_process_maps(pid.try_into().unwrap())
1448 .unwrap()
1449 .iter()
1450 .any(|m| m.start() == ptr && m.size() == size)
1451 };
1452
1453 assert!(
1455 has_exact_mapping(base_ptr, mem_size),
1456 "shared memory mapping not found at {:#x} with size {}",
1457 base_ptr,
1458 mem_size
1459 );
1460
1461 drop(hshm1);
1463 drop(hshm2);
1464 drop(gshm);
1465
1466 assert!(
1468 !has_exact_mapping(base_ptr, mem_size),
1469 "shared memory mapping still exists at {:#x} with size {} after drop",
1470 base_ptr,
1471 mem_size
1472 );
1473 }
1474
1475 mod alignment_tests {
1479 use super::*;
1480
1481 const CHUNK_SIZE: usize = size_of::<u128>();
1482
1483 #[test]
1485 fn copy_with_various_alignments() {
1486 let mem_size: usize = 4096;
1488 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1489 let (hshm, _) = eshm.build();
1490
1491 for start_offset in 0..CHUNK_SIZE {
1493 let test_len = 64; let test_data: Vec<u8> = (0..test_len).map(|i| (i + start_offset) as u8).collect();
1495
1496 hshm.copy_from_slice(&test_data, start_offset).unwrap();
1498
1499 let mut read_buf = vec![0u8; test_len];
1501 hshm.copy_to_slice(&mut read_buf, start_offset).unwrap();
1502
1503 assert_eq!(
1504 test_data, read_buf,
1505 "Mismatch at alignment offset {}",
1506 start_offset
1507 );
1508 }
1509 }
1510
1511 #[test]
1513 fn copy_small_lengths() {
1514 let mem_size: usize = 4096;
1515 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1516 let (hshm, _) = eshm.build();
1517
1518 for len in 0..CHUNK_SIZE {
1519 let test_data: Vec<u8> = (0..len).map(|i| i as u8).collect();
1520
1521 hshm.copy_from_slice(&test_data, 0).unwrap();
1522
1523 let mut read_buf = vec![0u8; len];
1524 hshm.copy_to_slice(&mut read_buf, 0).unwrap();
1525
1526 assert_eq!(test_data, read_buf, "Mismatch for length {}", len);
1527 }
1528 }
1529
1530 #[test]
1532 fn copy_non_aligned_lengths() {
1533 let mem_size: usize = 4096;
1534 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1535 let (hshm, _) = eshm.build();
1536
1537 let test_lengths = [17, 31, 33, 47, 63, 65, 100, 127, 129, 255, 257];
1539
1540 for &len in &test_lengths {
1541 let test_data: Vec<u8> = (0..len).map(|i| (i % 256) as u8).collect();
1542
1543 hshm.copy_from_slice(&test_data, 0).unwrap();
1544
1545 let mut read_buf = vec![0u8; len];
1546 hshm.copy_to_slice(&mut read_buf, 0).unwrap();
1547
1548 assert_eq!(test_data, read_buf, "Mismatch for length {}", len);
1549 }
1550 }
1551
1552 #[test]
1554 fn copy_exact_chunk_size() {
1555 let mem_size: usize = 4096;
1556 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1557 let (hshm, _) = eshm.build();
1558
1559 let test_data: Vec<u8> = (0..CHUNK_SIZE).map(|i| i as u8).collect();
1560
1561 hshm.copy_from_slice(&test_data, 0).unwrap();
1562
1563 let mut read_buf = vec![0u8; CHUNK_SIZE];
1564 hshm.copy_to_slice(&mut read_buf, 0).unwrap();
1565
1566 assert_eq!(test_data, read_buf);
1567 }
1568
1569 #[test]
1571 fn fill_with_various_alignments() {
1572 let mem_size: usize = 4096;
1573 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1574 let (mut hshm, _) = eshm.build();
1575
1576 for start_offset in 0..CHUNK_SIZE {
1577 let fill_len = 64;
1578 let fill_value = (start_offset % 256) as u8;
1579
1580 hshm.fill(0, 0, mem_size).unwrap();
1582
1583 hshm.fill(fill_value, start_offset, fill_len).unwrap();
1585
1586 let mut read_buf = vec![0u8; fill_len];
1588 hshm.copy_to_slice(&mut read_buf, start_offset).unwrap();
1589
1590 assert!(
1591 read_buf.iter().all(|&b| b == fill_value),
1592 "Fill mismatch at alignment offset {}",
1593 start_offset
1594 );
1595 }
1596 }
1597
1598 #[test]
1600 fn fill_small_lengths() {
1601 let mem_size: usize = 4096;
1602 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1603 let (mut hshm, _) = eshm.build();
1604
1605 for len in 0..CHUNK_SIZE {
1606 let fill_value = 0xAB;
1607
1608 hshm.fill(0, 0, mem_size).unwrap(); hshm.fill(fill_value, 0, len).unwrap();
1610
1611 let mut read_buf = vec![0u8; len];
1612 hshm.copy_to_slice(&mut read_buf, 0).unwrap();
1613
1614 assert!(
1615 read_buf.iter().all(|&b| b == fill_value),
1616 "Fill mismatch for length {}",
1617 len
1618 );
1619 }
1620 }
1621
1622 #[test]
1624 fn fill_non_aligned_lengths() {
1625 let mem_size: usize = 4096;
1626 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1627 let (mut hshm, _) = eshm.build();
1628
1629 let test_lengths = [17, 31, 33, 47, 63, 65, 100, 127, 129, 255, 257];
1630
1631 for &len in &test_lengths {
1632 let fill_value = 0xCD;
1633
1634 hshm.fill(0, 0, mem_size).unwrap(); hshm.fill(fill_value, 0, len).unwrap();
1636
1637 let mut read_buf = vec![0u8; len];
1638 hshm.copy_to_slice(&mut read_buf, 0).unwrap();
1639
1640 assert!(
1641 read_buf.iter().all(|&b| b == fill_value),
1642 "Fill mismatch for length {}",
1643 len
1644 );
1645 }
1646 }
1647
1648 #[test]
1650 fn copy_edge_cases() {
1651 let mem_size: usize = 4096;
1652 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1653 let (hshm, _) = eshm.build();
1654
1655 let empty: Vec<u8> = vec![];
1657 hshm.copy_from_slice(&empty, 0).unwrap();
1658 let mut read_buf: Vec<u8> = vec![];
1659 hshm.copy_to_slice(&mut read_buf, 0).unwrap();
1660 assert!(read_buf.is_empty());
1661
1662 let single = vec![0x42u8];
1664 hshm.copy_from_slice(&single, 0).unwrap();
1665 let mut read_buf = vec![0u8; 1];
1666 hshm.copy_to_slice(&mut read_buf, 0).unwrap();
1667 assert_eq!(single, read_buf);
1668 }
1669
1670 #[test]
1672 fn copy_unaligned_start_and_length() {
1673 let mem_size: usize = 4096;
1674 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1675 let (hshm, _) = eshm.build();
1676
1677 let start_offset = 7;
1679 let len = 37;
1680 let test_data: Vec<u8> = (0..len).map(|i| (i * 3) as u8).collect();
1681
1682 hshm.copy_from_slice(&test_data, start_offset).unwrap();
1683
1684 let mut read_buf = vec![0u8; len];
1685 hshm.copy_to_slice(&mut read_buf, start_offset).unwrap();
1686
1687 assert_eq!(test_data, read_buf);
1688 }
1689 }
1690
1691 mod try_pop_buffer_bounds {
1693 use super::*;
1694
1695 #[derive(Debug, PartialEq)]
1696 struct RawBytes(Vec<u8>);
1697
1698 impl TryFrom<&[u8]> for RawBytes {
1699 type Error = String;
1700 fn try_from(value: &[u8]) -> std::result::Result<Self, Self::Error> {
1701 Ok(RawBytes(value.to_vec()))
1702 }
1703 }
1704
1705 fn make_buffer(mem_size: usize) -> super::super::HostSharedMemory {
1707 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1708 let (hshm, _) = eshm.build();
1709 hshm.write::<u64>(0, 8u64).unwrap();
1710 hshm
1711 }
1712
1713 #[test]
1714 fn normal_push_pop_roundtrip() {
1715 let mem_size = 4096;
1716 let mut hshm = make_buffer(mem_size);
1717
1718 let payload = b"hello";
1720 let mut data = Vec::new();
1721 data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
1722 data.extend_from_slice(payload);
1723
1724 hshm.push_buffer(0, mem_size, &data).unwrap();
1725 let result: RawBytes = hshm.try_pop_buffer_into(0, mem_size).unwrap();
1726 assert_eq!(result.0, data);
1727 }
1728
1729 #[test]
1730 fn malicious_flatbuffer_size_prefix() {
1731 let mem_size = 4096;
1732 let mut hshm = make_buffer(mem_size);
1733
1734 let payload = b"small";
1735 let mut data = Vec::new();
1736 data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
1737 data.extend_from_slice(payload);
1738 hshm.push_buffer(0, mem_size, &data).unwrap();
1739
1740 hshm.write::<u32>(8, 0xFFFF_FFFBu32).unwrap(); let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
1744 let err_msg = format!("{}", result.unwrap_err());
1745 assert!(
1746 err_msg.contains("Corrupt buffer size prefix: flatbuffer claims 4294967295 bytes but the element slot is only 9 bytes"),
1747 "Unexpected error message: {}",
1748 err_msg
1749 );
1750 }
1751
1752 #[test]
1753 fn malicious_element_offset_too_small() {
1754 let mem_size = 4096;
1755 let mut hshm = make_buffer(mem_size);
1756
1757 let payload = b"test";
1758 let mut data = Vec::new();
1759 data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
1760 data.extend_from_slice(payload);
1761 hshm.push_buffer(0, mem_size, &data).unwrap();
1762
1763 hshm.write::<u64>(16, 0u64).unwrap();
1765
1766 let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
1767 let err_msg = format!("{}", result.unwrap_err());
1768 assert!(
1769 err_msg.contains(
1770 "Corrupt buffer back-pointer: element offset 0 is outside valid range [8, 8]"
1771 ),
1772 "Unexpected error message: {}",
1773 err_msg
1774 );
1775 }
1776
1777 #[test]
1778 fn malicious_element_offset_past_stack_pointer() {
1779 let mem_size = 4096;
1780 let mut hshm = make_buffer(mem_size);
1781
1782 let payload = b"test";
1783 let mut data = Vec::new();
1784 data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
1785 data.extend_from_slice(payload);
1786 hshm.push_buffer(0, mem_size, &data).unwrap();
1787
1788 hshm.write::<u64>(16, 9999u64).unwrap();
1790
1791 let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
1792 let err_msg = format!("{}", result.unwrap_err());
1793 assert!(
1794 err_msg.contains(
1795 "Corrupt buffer back-pointer: element offset 9999 is outside valid range [8, 8]"
1796 ),
1797 "Unexpected error message: {}",
1798 err_msg
1799 );
1800 }
1801
1802 #[test]
1803 fn malicious_flatbuffer_size_off_by_one() {
1804 let mem_size = 4096;
1805 let mut hshm = make_buffer(mem_size);
1806
1807 let payload = b"abcd";
1808 let mut data = Vec::new();
1809 data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
1810 data.extend_from_slice(payload);
1811 hshm.push_buffer(0, mem_size, &data).unwrap();
1812
1813 hshm.write::<u32>(8, 5u32).unwrap(); let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
1817 let err_msg = format!("{}", result.unwrap_err());
1818 assert!(
1819 err_msg.contains("Corrupt buffer size prefix: flatbuffer claims 9 bytes but the element slot is only 8 bytes"),
1820 "Unexpected error message: {}",
1821 err_msg
1822 );
1823 }
1824
1825 #[test]
1828 fn back_pointer_near_stack_pointer_underflow() {
1829 let mem_size = 4096;
1830 let mut hshm = make_buffer(mem_size);
1831
1832 let payload = b"test";
1833 let mut data = Vec::new();
1834 data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
1835 data.extend_from_slice(payload);
1836 hshm.push_buffer(0, mem_size, &data).unwrap();
1837
1838 hshm.write::<u64>(16, 23u64).unwrap();
1840
1841 let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
1842 let err_msg = format!("{}", result.unwrap_err());
1843 assert!(
1844 err_msg.contains(
1845 "Corrupt buffer back-pointer: element offset 23 is outside valid range [8, 8]"
1846 ),
1847 "Unexpected error message: {}",
1848 err_msg
1849 );
1850 }
1851
1852 #[test]
1854 fn size_prefix_u32_overflow() {
1855 let mem_size = 4096;
1856 let mut hshm = make_buffer(mem_size);
1857
1858 let payload = b"test";
1859 let mut data = Vec::new();
1860 data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
1861 data.extend_from_slice(payload);
1862 hshm.push_buffer(0, mem_size, &data).unwrap();
1863
1864 hshm.write::<u32>(8, 0xFFFF_FFFDu32).unwrap();
1866
1867 let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
1868 let err_msg = format!("{}", result.unwrap_err());
1869 assert!(
1870 err_msg.contains("Corrupt buffer size prefix: value 4294967293 overflows when adding 4-byte header"),
1871 "Unexpected error message: {}",
1872 err_msg
1873 );
1874 }
1875 }
1876
1877 #[cfg(target_os = "linux")]
1878 mod guard_page_crash_test {
1879 use crate::mem::shared_mem::{ExclusiveSharedMemory, SharedMemory};
1880
1881 const TEST_EXIT_CODE: u8 = 211; fn setup_signal_handler() {
1887 unsafe {
1888 signal_hook_registry::register_signal_unchecked(libc::SIGSEGV, || {
1889 std::process::exit(TEST_EXIT_CODE.into());
1890 })
1891 .unwrap();
1892 }
1893 }
1894
1895 #[test]
1896 #[ignore] fn read() {
1898 setup_signal_handler();
1899
1900 let eshm = ExclusiveSharedMemory::new(4096).unwrap();
1901 let (hshm, _) = eshm.build();
1902 let guard_page_ptr = hshm.raw_ptr();
1903 unsafe { std::ptr::read_volatile(guard_page_ptr) };
1904 }
1905
1906 #[test]
1907 #[ignore] fn write() {
1909 setup_signal_handler();
1910
1911 let eshm = ExclusiveSharedMemory::new(4096).unwrap();
1912 let (hshm, _) = eshm.build();
1913 let guard_page_ptr = hshm.raw_ptr();
1914 unsafe { std::ptr::write_volatile(guard_page_ptr, 0u8) };
1915 }
1916
1917 #[test]
1918 #[ignore] fn exec() {
1920 setup_signal_handler();
1921
1922 let eshm = ExclusiveSharedMemory::new(4096).unwrap();
1923 let (hshm, _) = eshm.build();
1924 let guard_page_ptr = hshm.raw_ptr();
1925 let func: fn() = unsafe { std::mem::transmute(guard_page_ptr) };
1926 func();
1927 }
1928
1929 #[test]
1931 #[cfg_attr(miri, ignore)] fn guard_page_testing_shim() {
1933 let tests = vec!["read", "write", "exec"];
1934 for test in tests {
1935 let triple = std::env::var("TARGET_TRIPLE").ok();
1936 let target_args = if let Some(triple) = triple.filter(|t| !t.is_empty()) {
1937 vec!["--target".to_string(), triple.to_string()]
1938 } else {
1939 vec![]
1940 };
1941 let output = std::process::Command::new("cargo")
1942 .args(["test", "-p", "hyperlight-host", "--lib"])
1943 .args(target_args)
1944 .args(["--", "--ignored", test])
1945 .stdin(std::process::Stdio::null())
1946 .output()
1947 .expect("Unable to launch tests");
1948 let exit_code = output.status.code();
1949 if exit_code != Some(TEST_EXIT_CODE.into()) {
1950 eprintln!("=== Guard Page test '{}' failed ===", test);
1951 eprintln!("Exit code: {:?} (expected {})", exit_code, TEST_EXIT_CODE);
1952 eprintln!("=== STDOUT ===");
1953 eprintln!("{}", String::from_utf8_lossy(&output.stdout));
1954 eprintln!("=== STDERR ===");
1955 eprintln!("{}", String::from_utf8_lossy(&output.stderr));
1956 panic!(
1957 "Guard Page test failed: {} (exit code {:?}, expected {})",
1958 test, exit_code, TEST_EXIT_CODE
1959 );
1960 }
1961 }
1962 }
1963 }
1964}