1use std::any::type_name;
18use std::ffi::c_void;
19use std::io::Error;
20use std::mem::{align_of, size_of};
21#[cfg(target_os = "linux")]
22use std::ptr::null_mut;
23use std::sync::{Arc, RwLock};
24
25use hyperlight_common::mem::PAGE_SIZE_USIZE;
26use tracing::{Span, instrument};
27#[cfg(target_os = "windows")]
28use windows::Win32::Foundation::{CloseHandle, HANDLE, INVALID_HANDLE_VALUE};
29#[cfg(target_os = "windows")]
30use windows::Win32::System::Memory::PAGE_READWRITE;
31#[cfg(target_os = "windows")]
32use windows::Win32::System::Memory::{
33 CreateFileMappingA, FILE_MAP_ALL_ACCESS, MEMORY_MAPPED_VIEW_ADDRESS, MapViewOfFile,
34 PAGE_NOACCESS, PAGE_PROTECTION_FLAGS, UnmapViewOfFile, VirtualProtect,
35};
36#[cfg(target_os = "windows")]
37use windows::core::PCSTR;
38
39use super::memory_region::{
40 HostGuestMemoryRegion, MemoryRegion, MemoryRegionFlags, MemoryRegionKind, MemoryRegionType,
41};
42use crate::HyperlightError::SnapshotSizeMismatch;
43#[cfg(target_os = "windows")]
44use crate::HyperlightError::WindowsAPIError;
45use crate::sandbox::snapshot::Snapshot;
46use crate::{HyperlightError, Result, log_then_return, new_error};
47
48macro_rules! bounds_check {
50 ($offset:expr, $size:expr, $mem_size:expr) => {
51 if $offset.checked_add($size).is_none_or(|end| end > $mem_size) {
52 return Err(new_error!(
53 "Cannot read value from offset {} with size {} in memory of size {}",
54 $offset,
55 $size,
56 $mem_size
57 ));
58 }
59 };
60}
61
62macro_rules! generate_reader {
64 ($fname:ident, $ty:ty) => {
65 #[allow(dead_code)]
67 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
68 pub(crate) fn $fname(&self, offset: usize) -> Result<$ty> {
69 let data = self.as_slice();
70 bounds_check!(offset, std::mem::size_of::<$ty>(), data.len());
71 Ok(<$ty>::from_le_bytes(
72 data[offset..offset + std::mem::size_of::<$ty>()].try_into()?,
73 ))
74 }
75 };
76}
77
78macro_rules! generate_writer {
80 ($fname:ident, $ty:ty) => {
81 #[allow(dead_code)]
83 pub(crate) fn $fname(&mut self, offset: usize, value: $ty) -> Result<()> {
84 let data = self.as_mut_slice();
85 bounds_check!(offset, std::mem::size_of::<$ty>(), data.len());
86 data[offset..offset + std::mem::size_of::<$ty>()].copy_from_slice(&value.to_le_bytes());
87 Ok(())
88 }
89 };
90}
91
92#[derive(Debug)]
97pub struct HostMapping {
98 ptr: *mut u8,
99 size: usize,
100 #[cfg(target_os = "windows")]
101 handle: HANDLE,
102}
103
104impl Drop for HostMapping {
105 #[cfg(target_os = "linux")]
106 fn drop(&mut self) {
107 use libc::munmap;
108
109 unsafe {
110 munmap(self.ptr as *mut c_void, self.size);
111 }
112 }
113 #[cfg(target_os = "windows")]
114 fn drop(&mut self) {
115 let mem_mapped_address = MEMORY_MAPPED_VIEW_ADDRESS {
116 Value: self.ptr as *mut c_void,
117 };
118 if let Err(e) = unsafe { UnmapViewOfFile(mem_mapped_address) } {
119 tracing::error!(
120 "Failed to drop HostMapping (UnmapViewOfFile failed): {:?}",
121 e
122 );
123 }
124
125 let file_handle: HANDLE = self.handle;
126 if let Err(e) = unsafe { CloseHandle(file_handle) } {
127 tracing::error!("Failed to drop HostMapping (CloseHandle failed): {:?}", e);
128 }
129 }
130}
131
132#[derive(Debug)]
138pub struct ExclusiveSharedMemory {
139 region: Arc<HostMapping>,
140}
141unsafe impl Send for ExclusiveSharedMemory {}
142
143#[derive(Debug)]
152pub struct GuestSharedMemory {
153 region: Arc<HostMapping>,
154 pub lock: Arc<RwLock<()>>,
165}
166unsafe impl Send for GuestSharedMemory {}
167
168#[derive(Clone, Debug)]
323pub struct HostSharedMemory {
324 region: Arc<HostMapping>,
325 lock: Arc<RwLock<()>>,
326}
327unsafe impl Send for HostSharedMemory {}
328
329impl ExclusiveSharedMemory {
330 #[cfg(target_os = "linux")]
335 #[instrument(skip_all, parent = Span::current(), level= "Trace")]
336 pub fn new(min_size_bytes: usize) -> Result<Self> {
337 use libc::{
338 MAP_ANONYMOUS, MAP_FAILED, MAP_PRIVATE, PROT_READ, PROT_WRITE, c_int, mmap, off_t,
339 size_t,
340 };
341 #[cfg(not(miri))]
342 use libc::{MAP_NORESERVE, PROT_NONE, mprotect};
343
344 if min_size_bytes == 0 {
345 return Err(new_error!("Cannot create shared memory with size 0"));
346 }
347
348 let total_size = min_size_bytes
349 .checked_add(2 * PAGE_SIZE_USIZE) .ok_or_else(|| new_error!("Memory required for sandbox exceeded usize::MAX"))?;
351
352 if total_size % PAGE_SIZE_USIZE != 0 {
353 return Err(new_error!(
354 "shared memory must be a multiple of {}",
355 PAGE_SIZE_USIZE
356 ));
357 }
358
359 if total_size > isize::MAX as usize {
362 return Err(HyperlightError::MemoryRequestTooBig(
363 total_size,
364 isize::MAX as usize,
365 ));
366 }
367
368 #[cfg(not(miri))]
370 let flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
371 #[cfg(miri)]
372 let flags = MAP_ANONYMOUS | MAP_PRIVATE;
373
374 let addr = unsafe {
375 mmap(
376 null_mut(),
377 total_size as size_t,
378 PROT_READ | PROT_WRITE,
379 flags,
380 -1 as c_int,
381 0 as off_t,
382 )
383 };
384 if addr == MAP_FAILED {
385 log_then_return!(HyperlightError::MmapFailed(
386 Error::last_os_error().raw_os_error()
387 ));
388 }
389
390 #[cfg(not(miri))]
392 {
393 let res = unsafe { mprotect(addr, PAGE_SIZE_USIZE, PROT_NONE) };
394 if res != 0 {
395 return Err(HyperlightError::MprotectFailed(
396 Error::last_os_error().raw_os_error(),
397 ));
398 }
399 let res = unsafe {
400 mprotect(
401 (addr as *const u8).add(total_size - PAGE_SIZE_USIZE) as *mut c_void,
402 PAGE_SIZE_USIZE,
403 PROT_NONE,
404 )
405 };
406 if res != 0 {
407 return Err(HyperlightError::MprotectFailed(
408 Error::last_os_error().raw_os_error(),
409 ));
410 }
411 }
412
413 Ok(Self {
414 #[allow(clippy::arc_with_non_send_sync)]
423 region: Arc::new(HostMapping {
424 ptr: addr as *mut u8,
425 size: total_size,
426 }),
427 })
428 }
429
430 #[cfg(target_os = "windows")]
435 #[instrument(skip_all, parent = Span::current(), level= "Trace")]
436 pub fn new(min_size_bytes: usize) -> Result<Self> {
437 if min_size_bytes == 0 {
438 return Err(new_error!("Cannot create shared memory with size 0"));
439 }
440
441 let total_size = min_size_bytes
442 .checked_add(2 * PAGE_SIZE_USIZE)
443 .ok_or_else(|| new_error!("Memory required for sandbox exceeded {}", usize::MAX))?;
444
445 if total_size % PAGE_SIZE_USIZE != 0 {
446 return Err(new_error!(
447 "shared memory must be a multiple of {}",
448 PAGE_SIZE_USIZE
449 ));
450 }
451
452 if total_size > isize::MAX as usize {
455 return Err(HyperlightError::MemoryRequestTooBig(
456 total_size,
457 isize::MAX as usize,
458 ));
459 }
460
461 let mut dwmaximumsizehigh = 0;
462 let mut dwmaximumsizelow = 0;
463
464 if std::mem::size_of::<usize>() == 8 {
465 dwmaximumsizehigh = (total_size >> 32) as u32;
466 dwmaximumsizelow = (total_size & 0xFFFFFFFF) as u32;
467 }
468
469 let flags = PAGE_READWRITE;
473
474 let handle = unsafe {
475 CreateFileMappingA(
476 INVALID_HANDLE_VALUE,
477 None,
478 flags,
479 dwmaximumsizehigh,
480 dwmaximumsizelow,
481 PCSTR::null(),
482 )?
483 };
484
485 if handle.is_invalid() {
486 log_then_return!(HyperlightError::MemoryAllocationFailed(
487 Error::last_os_error().raw_os_error()
488 ));
489 }
490
491 let file_map = FILE_MAP_ALL_ACCESS;
492 let addr = unsafe { MapViewOfFile(handle, file_map, 0, 0, 0) };
493
494 if addr.Value.is_null() {
495 log_then_return!(HyperlightError::MemoryAllocationFailed(
496 Error::last_os_error().raw_os_error()
497 ));
498 }
499
500 let mut unused_out_old_prot_flags = PAGE_PROTECTION_FLAGS(0);
503
504 let first_guard_page_start = addr.Value;
507 if let Err(e) = unsafe {
508 VirtualProtect(
509 first_guard_page_start,
510 PAGE_SIZE_USIZE,
511 PAGE_NOACCESS,
512 &mut unused_out_old_prot_flags,
513 )
514 } {
515 log_then_return!(WindowsAPIError(e.clone()));
516 }
517
518 let last_guard_page_start = unsafe { addr.Value.add(total_size - PAGE_SIZE_USIZE) };
519 if let Err(e) = unsafe {
520 VirtualProtect(
521 last_guard_page_start,
522 PAGE_SIZE_USIZE,
523 PAGE_NOACCESS,
524 &mut unused_out_old_prot_flags,
525 )
526 } {
527 log_then_return!(WindowsAPIError(e.clone()));
528 }
529
530 Ok(Self {
531 #[allow(clippy::arc_with_non_send_sync)]
540 region: Arc::new(HostMapping {
541 ptr: addr.Value as *mut u8,
542 size: total_size,
543 handle,
544 }),
545 })
546 }
547
548 pub(super) fn as_mut_slice(&mut self) -> &mut [u8] {
584 unsafe { std::slice::from_raw_parts_mut(self.base_ptr(), self.mem_size()) }
585 }
586
587 #[instrument(skip_all, parent = Span::current(), level= "Trace")]
600 pub fn as_slice<'a>(&'a self) -> &'a [u8] {
601 unsafe { std::slice::from_raw_parts(self.base_ptr(), self.mem_size()) }
602 }
603
604 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
606 #[cfg(test)]
607 pub(crate) fn copy_all_to_vec(&self) -> Result<Vec<u8>> {
608 let data = self.as_slice();
609 Ok(data.to_vec())
610 }
611
612 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
614 pub fn copy_from_slice(&mut self, src: &[u8], offset: usize) -> Result<()> {
615 let data = self.as_mut_slice();
616 bounds_check!(offset, src.len(), data.len());
617 data[offset..offset + src.len()].copy_from_slice(src);
618 Ok(())
619 }
620
621 generate_reader!(read_u8, u8);
622 generate_reader!(read_i8, i8);
623 generate_reader!(read_u16, u16);
624 generate_reader!(read_i16, i16);
625 generate_reader!(read_u32, u32);
626 generate_reader!(read_i32, i32);
627 generate_reader!(read_u64, u64);
628 generate_reader!(read_i64, i64);
629 generate_reader!(read_usize, usize);
630 generate_reader!(read_isize, isize);
631
632 generate_writer!(write_u8, u8);
633 generate_writer!(write_i8, i8);
634 generate_writer!(write_u16, u16);
635 generate_writer!(write_i16, i16);
636 generate_writer!(write_u32, u32);
637 generate_writer!(write_i32, i32);
638 generate_writer!(write_u64, u64);
639 generate_writer!(write_i64, i64);
640 generate_writer!(write_usize, usize);
641 generate_writer!(write_isize, isize);
642
643 pub fn build(self) -> (HostSharedMemory, GuestSharedMemory) {
650 let lock = Arc::new(RwLock::new(()));
651 let hshm = HostSharedMemory {
652 region: self.region.clone(),
653 lock: lock.clone(),
654 };
655 (
656 hshm,
657 GuestSharedMemory {
658 region: self.region.clone(),
659 lock,
660 },
661 )
662 }
663
664 #[cfg(target_os = "windows")]
666 pub fn get_mmap_file_handle(&self) -> HANDLE {
667 self.region.handle
668 }
669
670 #[cfg(all(test, feature = "nanvix-unstable"))]
674 pub(crate) fn as_host_shared_memory(&self) -> HostSharedMemory {
675 let lock = Arc::new(RwLock::new(()));
676 HostSharedMemory {
677 region: self.region.clone(),
678 lock,
679 }
680 }
681}
682
683impl GuestSharedMemory {
684 pub(crate) fn mapping_at(
687 &self,
688 guest_base: u64,
689 region_type: MemoryRegionType,
690 ) -> MemoryRegion {
691 let flags = match region_type {
692 MemoryRegionType::Scratch => {
693 MemoryRegionFlags::READ | MemoryRegionFlags::WRITE | MemoryRegionFlags::EXECUTE
694 }
695 MemoryRegionType::Snapshot => {
702 #[cfg(not(feature = "nanvix-unstable"))]
703 {
704 MemoryRegionFlags::READ | MemoryRegionFlags::EXECUTE
705 }
706 #[cfg(feature = "nanvix-unstable")]
707 {
708 MemoryRegionFlags::READ | MemoryRegionFlags::WRITE | MemoryRegionFlags::EXECUTE
709 }
710 }
711 #[allow(clippy::panic)]
712 _ => panic!(
720 "GuestSharedMemory::mapping_at should only be used for Scratch or Snapshot regions"
721 ),
722 };
723 let guest_base = guest_base as usize;
724 #[cfg(not(windows))]
725 let host_base = self.base_addr();
726 #[cfg(windows)]
727 let host_base = self.host_region_base();
728 let host_end = <HostGuestMemoryRegion as MemoryRegionKind>::add(host_base, self.mem_size());
729 MemoryRegion {
730 guest_region: guest_base..(guest_base + self.mem_size()),
731 host_region: host_base..host_end,
732 region_type,
733 flags,
734 }
735 }
736}
737
738pub trait SharedMemory {
743 fn region(&self) -> &HostMapping;
745
746 fn base_addr(&self) -> usize {
751 self.region().ptr as usize + PAGE_SIZE_USIZE
752 }
753
754 fn base_ptr(&self) -> *mut u8 {
759 self.region().ptr.wrapping_add(PAGE_SIZE_USIZE)
760 }
761
762 fn mem_size(&self) -> usize {
766 self.region().size - 2 * PAGE_SIZE_USIZE
767 }
768
769 fn raw_ptr(&self) -> *mut u8 {
772 self.region().ptr
773 }
774
775 fn raw_mem_size(&self) -> usize {
778 self.region().size
779 }
780
781 #[cfg(target_os = "windows")]
784 fn host_region_base(&self) -> super::memory_region::HostRegionBase {
785 super::memory_region::HostRegionBase {
786 from_handle: self.region().handle.into(),
787 handle_base: self.region().ptr as usize,
788 handle_size: self.region().size,
789 offset: PAGE_SIZE_USIZE,
790 }
791 }
792
793 fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
799 &mut self,
800 f: F,
801 ) -> Result<T>;
802
803 fn restore_from_snapshot(&mut self, snapshot: &Snapshot) -> Result<()> {
805 if snapshot.memory().len() != self.mem_size() {
806 return Err(SnapshotSizeMismatch(self.mem_size(), snapshot.mem_size()));
807 }
808 self.with_exclusivity(|e| e.copy_from_slice(snapshot.memory(), 0))?
809 }
810
811 fn zero(&mut self) -> Result<()> {
813 self.with_exclusivity(|e| {
814 #[allow(unused_mut)] let mut do_copy = true;
816 #[cfg(all(target_os = "linux", feature = "kvm", not(any(feature = "mshv3"))))]
820 unsafe {
821 let ret = libc::madvise(
822 e.region.ptr as *mut libc::c_void,
823 e.region.size,
824 libc::MADV_DONTNEED,
825 );
826 if ret == 0 {
827 do_copy = false;
828 }
829 }
830 if do_copy {
831 e.as_mut_slice().fill(0);
832 }
833 })
834 }
835}
836
837impl SharedMemory for ExclusiveSharedMemory {
838 fn region(&self) -> &HostMapping {
839 &self.region
840 }
841 fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
842 &mut self,
843 f: F,
844 ) -> Result<T> {
845 Ok(f(self))
846 }
847}
848
849impl SharedMemory for GuestSharedMemory {
850 fn region(&self) -> &HostMapping {
851 &self.region
852 }
853 fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
854 &mut self,
855 f: F,
856 ) -> Result<T> {
857 let guard = self
858 .lock
859 .try_write()
860 .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
861 let mut excl = ExclusiveSharedMemory {
862 region: self.region.clone(),
863 };
864 let ret = f(&mut excl);
865 drop(excl);
866 drop(guard);
867 Ok(ret)
868 }
869}
870
871pub unsafe trait AllValid {}
880unsafe impl AllValid for u8 {}
881unsafe impl AllValid for u16 {}
882unsafe impl AllValid for u32 {}
883unsafe impl AllValid for u64 {}
884unsafe impl AllValid for i8 {}
885unsafe impl AllValid for i16 {}
886unsafe impl AllValid for i32 {}
887unsafe impl AllValid for i64 {}
888unsafe impl AllValid for [u8; 16] {}
889
890impl HostSharedMemory {
891 pub fn read<T: AllValid>(&self, offset: usize) -> Result<T> {
895 bounds_check!(offset, std::mem::size_of::<T>(), self.mem_size());
896 unsafe {
897 let mut ret: core::mem::MaybeUninit<T> = core::mem::MaybeUninit::uninit();
898 {
899 let slice: &mut [u8] = core::slice::from_raw_parts_mut(
900 ret.as_mut_ptr() as *mut u8,
901 std::mem::size_of::<T>(),
902 );
903 self.copy_to_slice(slice, offset)?;
904 }
905 Ok(ret.assume_init())
906 }
907 }
908
909 pub fn write<T: AllValid>(&self, offset: usize, data: T) -> Result<()> {
913 bounds_check!(offset, std::mem::size_of::<T>(), self.mem_size());
914 unsafe {
915 let slice: &[u8] = core::slice::from_raw_parts(
916 core::ptr::addr_of!(data) as *const u8,
917 std::mem::size_of::<T>(),
918 );
919 self.copy_from_slice(slice, offset)?;
920 }
921 Ok(())
922 }
923
924 pub fn copy_to_slice(&self, slice: &mut [u8], offset: usize) -> Result<()> {
927 bounds_check!(offset, slice.len(), self.mem_size());
928 let base = self.base_ptr().wrapping_add(offset);
929 let guard = self
930 .lock
931 .try_read()
932 .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
933
934 const CHUNK: usize = size_of::<u128>();
935 let len = slice.len();
936 let mut i = 0;
937
938 let align_offset = base.align_offset(align_of::<u128>());
943 let head_len = align_offset.min(len);
944 while i < head_len {
945 unsafe {
946 slice[i] = base.add(i).read_volatile();
947 }
948 i += 1;
949 }
950
951 let dst = slice.as_mut_ptr();
955 while i + CHUNK <= len {
956 unsafe {
957 let value = (base.add(i) as *const u128).read_volatile();
958 std::ptr::write_unaligned(dst.add(i) as *mut u128, value);
959 }
960 i += CHUNK;
961 }
962
963 while i < len {
965 unsafe {
966 slice[i] = base.add(i).read_volatile();
967 }
968 i += 1;
969 }
970
971 drop(guard);
972 Ok(())
973 }
974
975 pub fn copy_from_slice(&self, slice: &[u8], offset: usize) -> Result<()> {
978 bounds_check!(offset, slice.len(), self.mem_size());
979 let base = self.base_ptr().wrapping_add(offset);
980 let guard = self
981 .lock
982 .try_read()
983 .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
984
985 const CHUNK: usize = size_of::<u128>();
986 let len = slice.len();
987 let mut i = 0;
988
989 let align_offset = base.align_offset(align_of::<u128>());
994 let head_len = align_offset.min(len);
995 while i < head_len {
996 unsafe {
997 base.add(i).write_volatile(slice[i]);
998 }
999 i += 1;
1000 }
1001
1002 let src = slice.as_ptr();
1006 while i + CHUNK <= len {
1007 unsafe {
1008 let value = std::ptr::read_unaligned(src.add(i) as *const u128);
1009 (base.add(i) as *mut u128).write_volatile(value);
1010 }
1011 i += CHUNK;
1012 }
1013
1014 while i < len {
1016 unsafe {
1017 base.add(i).write_volatile(slice[i]);
1018 }
1019 i += 1;
1020 }
1021
1022 drop(guard);
1023 Ok(())
1024 }
1025
1026 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
1028 pub fn fill(&mut self, value: u8, offset: usize, len: usize) -> Result<()> {
1029 bounds_check!(offset, len, self.mem_size());
1030 let base = self.base_ptr().wrapping_add(offset);
1031 let guard = self
1032 .lock
1033 .try_read()
1034 .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
1035
1036 const CHUNK: usize = size_of::<u128>();
1037 let value_u128 = u128::from_ne_bytes([value; CHUNK]);
1038 let mut i = 0;
1039
1040 let align_offset = base.align_offset(align_of::<u128>());
1045 let head_len = align_offset.min(len);
1046 while i < head_len {
1047 unsafe {
1048 base.add(i).write_volatile(value);
1049 }
1050 i += 1;
1051 }
1052
1053 while i + CHUNK <= len {
1056 unsafe {
1057 (base.add(i) as *mut u128).write_volatile(value_u128);
1058 }
1059 i += CHUNK;
1060 }
1061
1062 while i < len {
1064 unsafe {
1065 base.add(i).write_volatile(value);
1066 }
1067 i += 1;
1068 }
1069
1070 drop(guard);
1071 Ok(())
1072 }
1073
1074 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
1077 pub fn push_buffer(
1078 &mut self,
1079 buffer_start_offset: usize,
1080 buffer_size: usize,
1081 data: &[u8],
1082 ) -> Result<()> {
1083 let stack_pointer_rel = self.read::<u64>(buffer_start_offset)? as usize;
1084 let buffer_size_u64: u64 = buffer_size.try_into()?;
1085
1086 if stack_pointer_rel > buffer_size || stack_pointer_rel < 8 {
1087 return Err(new_error!(
1088 "Unable to push data to buffer: Stack pointer is out of bounds. Stack pointer: {}, Buffer size: {}",
1089 stack_pointer_rel,
1090 buffer_size_u64
1091 ));
1092 }
1093
1094 let size_required = data.len() + 8;
1095 let size_available = buffer_size - stack_pointer_rel;
1096
1097 if size_required > size_available {
1098 return Err(new_error!(
1099 "Not enough space in buffer to push data. Required: {}, Available: {}",
1100 size_required,
1101 size_available
1102 ));
1103 }
1104
1105 let stack_pointer_abs = stack_pointer_rel + buffer_start_offset;
1107
1108 self.copy_from_slice(data, stack_pointer_abs)?;
1110
1111 self.write::<u64>(stack_pointer_abs + data.len(), stack_pointer_rel as u64)?;
1114
1115 self.write::<u64>(
1117 buffer_start_offset,
1118 (stack_pointer_rel + data.len() + 8) as u64,
1119 )?;
1120 Ok(())
1121 }
1122
1123 pub fn try_pop_buffer_into<T>(
1127 &mut self,
1128 buffer_start_offset: usize,
1129 buffer_size: usize,
1130 ) -> Result<T>
1131 where
1132 T: for<'b> TryFrom<&'b [u8]>,
1133 {
1134 let stack_pointer_rel = self.read::<u64>(buffer_start_offset)? as usize;
1136
1137 if stack_pointer_rel > buffer_size || stack_pointer_rel < 16 {
1138 return Err(new_error!(
1139 "Unable to pop data from buffer: Stack pointer is out of bounds. Stack pointer: {}, Buffer size: {}",
1140 stack_pointer_rel,
1141 buffer_size
1142 ));
1143 }
1144
1145 let last_element_offset_abs = stack_pointer_rel + buffer_start_offset;
1147
1148 let last_element_offset_rel: usize =
1150 self.read::<u64>(last_element_offset_abs - 8)? as usize;
1151
1152 if last_element_offset_rel > stack_pointer_rel.saturating_sub(16)
1156 || last_element_offset_rel < 8
1157 {
1158 return Err(new_error!(
1159 "Corrupt buffer back-pointer: element offset {} is outside valid range [8, {}].",
1160 last_element_offset_rel,
1161 stack_pointer_rel.saturating_sub(16),
1162 ));
1163 }
1164
1165 let last_element_offset_abs = last_element_offset_rel + buffer_start_offset;
1167
1168 let max_element_size = stack_pointer_rel - last_element_offset_rel - 8;
1170
1171 let fb_buffer_size = {
1173 let raw_prefix = self.read::<u32>(last_element_offset_abs)?;
1174 let total = raw_prefix.checked_add(4).ok_or_else(|| {
1177 new_error!(
1178 "Corrupt buffer size prefix: value {} overflows when adding 4-byte header.",
1179 raw_prefix
1180 )
1181 })?;
1182 usize::try_from(total)
1183 }?;
1184
1185 if fb_buffer_size > max_element_size {
1186 return Err(new_error!(
1187 "Corrupt buffer size prefix: flatbuffer claims {} bytes but the element slot is only {} bytes.",
1188 fb_buffer_size,
1189 max_element_size
1190 ));
1191 }
1192
1193 let mut result_buffer = vec![0; fb_buffer_size];
1194
1195 self.copy_to_slice(&mut result_buffer, last_element_offset_abs)?;
1196 let to_return = T::try_from(result_buffer.as_slice()).map_err(|_e| {
1197 new_error!(
1198 "pop_buffer_into: failed to convert buffer to {}",
1199 type_name::<T>()
1200 )
1201 })?;
1202
1203 self.write::<u64>(buffer_start_offset, last_element_offset_rel as u64)?;
1205
1206 let num_bytes_to_zero = stack_pointer_rel - last_element_offset_rel;
1208 self.fill(0, last_element_offset_abs, num_bytes_to_zero)?;
1209
1210 Ok(to_return)
1211 }
1212}
1213
1214impl SharedMemory for HostSharedMemory {
1215 fn region(&self) -> &HostMapping {
1216 &self.region
1217 }
1218 fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
1219 &mut self,
1220 f: F,
1221 ) -> Result<T> {
1222 let guard = self
1223 .lock
1224 .try_write()
1225 .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
1226 let mut excl = ExclusiveSharedMemory {
1227 region: self.region.clone(),
1228 };
1229 let ret = f(&mut excl);
1230 drop(excl);
1231 drop(guard);
1232 Ok(ret)
1233 }
1234}
1235
1236#[cfg(test)]
1237mod tests {
1238 use hyperlight_common::mem::PAGE_SIZE_USIZE;
1239 #[cfg(not(miri))]
1240 use proptest::prelude::*;
1241
1242 #[cfg(not(miri))]
1243 use super::HostSharedMemory;
1244 use super::{ExclusiveSharedMemory, SharedMemory};
1245 use crate::Result;
1246 #[cfg(not(miri))]
1247 use crate::mem::shared_mem_tests::read_write_test_suite;
1248
1249 #[test]
1250 fn fill() {
1251 let mem_size: usize = 4096;
1252 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1253 let (mut hshm, _) = eshm.build();
1254
1255 hshm.fill(1, 0, 1024).unwrap();
1256 hshm.fill(2, 1024, 1024).unwrap();
1257 hshm.fill(3, 2048, 1024).unwrap();
1258 hshm.fill(4, 3072, 1024).unwrap();
1259
1260 let vec = hshm
1261 .with_exclusivity(|e| e.copy_all_to_vec().unwrap())
1262 .unwrap();
1263
1264 assert!(vec[0..1024].iter().all(|&x| x == 1));
1265 assert!(vec[1024..2048].iter().all(|&x| x == 2));
1266 assert!(vec[2048..3072].iter().all(|&x| x == 3));
1267 assert!(vec[3072..4096].iter().all(|&x| x == 4));
1268
1269 hshm.fill(5, 0, 4096).unwrap();
1270
1271 let vec2 = hshm
1272 .with_exclusivity(|e| e.copy_all_to_vec().unwrap())
1273 .unwrap();
1274 assert!(vec2.iter().all(|&x| x == 5));
1275
1276 assert!(hshm.fill(0, 0, mem_size + 1).is_err());
1277 assert!(hshm.fill(0, mem_size, 1).is_err());
1278 }
1279
1280 #[test]
1283 fn bounds_check_overflow() {
1284 let mem_size: usize = 4096;
1285 let mut eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1286
1287 assert!(eshm.read_i32(usize::MAX).is_err());
1289 assert!(eshm.write_i32(usize::MAX, 0).is_err());
1290 assert!(eshm.copy_from_slice(&[0u8; 1], usize::MAX).is_err());
1291
1292 let (mut hshm, _) = eshm.build();
1294
1295 assert!(hshm.read::<u8>(usize::MAX).is_err());
1296 assert!(hshm.read::<u64>(usize::MAX - 3).is_err());
1297 assert!(hshm.write::<u8>(usize::MAX, 0).is_err());
1298 assert!(hshm.write::<u64>(usize::MAX - 3, 0).is_err());
1299
1300 let mut buf = [0u8; 1];
1301 assert!(hshm.copy_to_slice(&mut buf, usize::MAX).is_err());
1302 assert!(hshm.copy_from_slice(&[0u8; 1], usize::MAX).is_err());
1303
1304 assert!(hshm.fill(0, usize::MAX, 1).is_err());
1305 assert!(hshm.fill(0, 1, usize::MAX).is_err());
1306 }
1307
1308 #[test]
1309 fn copy_into_from() -> Result<()> {
1310 let mem_size: usize = 4096;
1311 let vec_len = 10;
1312 let eshm = ExclusiveSharedMemory::new(mem_size)?;
1313 let (hshm, _) = eshm.build();
1314 let vec = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
1315 hshm.copy_from_slice(&vec, 0)?;
1317
1318 let mut vec2 = vec![0; vec_len];
1319 hshm.copy_to_slice(vec2.as_mut_slice(), 0)?;
1321 assert_eq!(vec, vec2);
1322
1323 let offset = mem_size - vec.len();
1324 hshm.copy_from_slice(&vec, offset)?;
1326
1327 let mut vec3 = vec![0; vec_len];
1328 hshm.copy_to_slice(&mut vec3, offset)?;
1330 assert_eq!(vec, vec3);
1331
1332 let offset = mem_size / 2;
1333 hshm.copy_from_slice(&vec, offset)?;
1335
1336 let mut vec4 = vec![0; vec_len];
1337 hshm.copy_to_slice(&mut vec4, offset)?;
1339 assert_eq!(vec, vec4);
1340
1341 let mut vec5 = vec![0; vec_len];
1343 assert!(hshm.copy_to_slice(&mut vec5, mem_size).is_err());
1344
1345 assert!(hshm.copy_from_slice(&vec5, mem_size).is_err());
1347
1348 let mut vec6 = vec![0; vec_len];
1350 assert!(hshm.copy_to_slice(&mut vec6, mem_size * 2).is_err());
1351
1352 assert!(hshm.copy_from_slice(&vec6, mem_size * 2).is_err());
1354
1355 let mut vec7 = vec![0; mem_size * 2];
1357 assert!(hshm.copy_to_slice(&mut vec7, 0).is_err());
1358
1359 assert!(hshm.copy_from_slice(&vec7, 0).is_err());
1361
1362 Ok(())
1363 }
1364
1365 #[cfg(not(miri))]
1367 proptest! {
1368 #[test]
1369 fn read_write_i32(val in -0x1000_i32..0x1000_i32) {
1370 read_write_test_suite(
1371 val,
1372 ExclusiveSharedMemory::new,
1373 Box::new(ExclusiveSharedMemory::read_i32),
1374 Box::new(ExclusiveSharedMemory::write_i32),
1375 )
1376 .unwrap();
1377 read_write_test_suite(
1378 val,
1379 |s| {
1380 let e = ExclusiveSharedMemory::new(s)?;
1381 let (h, _) = e.build();
1382 Ok(h)
1383 },
1384 Box::new(HostSharedMemory::read::<i32>),
1385 Box::new(|h, o, v| h.write::<i32>(o, v)),
1386 )
1387 .unwrap();
1388 }
1389 }
1390
1391 #[test]
1392 fn alloc_fail() {
1393 let gm = ExclusiveSharedMemory::new(0);
1394 assert!(gm.is_err());
1395 let gm = ExclusiveSharedMemory::new(usize::MAX);
1396 assert!(gm.is_err());
1397 }
1398
1399 #[test]
1400 fn clone() {
1401 let eshm = ExclusiveSharedMemory::new(PAGE_SIZE_USIZE).unwrap();
1402 let (hshm1, _) = eshm.build();
1403 let hshm2 = hshm1.clone();
1404
1405 assert_eq!(hshm1.mem_size(), hshm2.mem_size());
1408 assert_eq!(hshm1.base_addr(), hshm2.base_addr());
1409
1410 hshm1.copy_from_slice(b"a", 0).unwrap();
1413 hshm2.copy_from_slice(b"b", 1).unwrap();
1414
1415 for (raw_offset, expected) in &[(0, b'a'), (1, b'b')] {
1418 assert_eq!(hshm1.read::<u8>(*raw_offset).unwrap(), *expected);
1419 assert_eq!(hshm2.read::<u8>(*raw_offset).unwrap(), *expected);
1420 }
1421
1422 drop(hshm1);
1425
1426 for (raw_offset, expected) in &[(0, b'a'), (1, b'b')] {
1428 assert_eq!(hshm2.read::<u8>(*raw_offset).unwrap(), *expected);
1429 }
1430 hshm2.copy_from_slice(b"c", 2).unwrap();
1431 assert_eq!(hshm2.read::<u8>(2).unwrap(), b'c');
1432 drop(hshm2);
1433 }
1434
1435 #[test]
1436 fn copy_all_to_vec() {
1437 let mut data = vec![b'a', b'b', b'c'];
1438 data.resize(4096, 0);
1439 let mut eshm = ExclusiveSharedMemory::new(data.len()).unwrap();
1440 eshm.copy_from_slice(data.as_slice(), 0).unwrap();
1441 let ret_vec = eshm.copy_all_to_vec().unwrap();
1442 assert_eq!(data, ret_vec);
1443 }
1444
1445 #[test]
1448 #[cfg(all(target_os = "linux", not(miri)))]
1449 fn test_drop() {
1450 use proc_maps::get_process_maps;
1451
1452 const UNIQUE_SIZE: usize = PAGE_SIZE_USIZE * 17;
1462
1463 let pid = std::process::id();
1464
1465 let eshm = ExclusiveSharedMemory::new(UNIQUE_SIZE).unwrap();
1466 let (hshm1, gshm) = eshm.build();
1467 let hshm2 = hshm1.clone();
1468
1469 let base_ptr = hshm1.base_ptr() as usize;
1471 let mem_size = hshm1.mem_size();
1472
1473 let has_exact_mapping = |ptr: usize, size: usize| -> bool {
1475 get_process_maps(pid.try_into().unwrap())
1476 .unwrap()
1477 .iter()
1478 .any(|m| m.start() == ptr && m.size() == size)
1479 };
1480
1481 assert!(
1483 has_exact_mapping(base_ptr, mem_size),
1484 "shared memory mapping not found at {:#x} with size {}",
1485 base_ptr,
1486 mem_size
1487 );
1488
1489 drop(hshm1);
1491 drop(hshm2);
1492 drop(gshm);
1493
1494 assert!(
1496 !has_exact_mapping(base_ptr, mem_size),
1497 "shared memory mapping still exists at {:#x} with size {} after drop",
1498 base_ptr,
1499 mem_size
1500 );
1501 }
1502
1503 mod alignment_tests {
1507 use super::*;
1508
1509 const CHUNK_SIZE: usize = size_of::<u128>();
1510
1511 #[test]
1513 fn copy_with_various_alignments() {
1514 let mem_size: usize = 4096;
1516 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1517 let (hshm, _) = eshm.build();
1518
1519 for start_offset in 0..CHUNK_SIZE {
1521 let test_len = 64; let test_data: Vec<u8> = (0..test_len).map(|i| (i + start_offset) as u8).collect();
1523
1524 hshm.copy_from_slice(&test_data, start_offset).unwrap();
1526
1527 let mut read_buf = vec![0u8; test_len];
1529 hshm.copy_to_slice(&mut read_buf, start_offset).unwrap();
1530
1531 assert_eq!(
1532 test_data, read_buf,
1533 "Mismatch at alignment offset {}",
1534 start_offset
1535 );
1536 }
1537 }
1538
1539 #[test]
1541 fn copy_small_lengths() {
1542 let mem_size: usize = 4096;
1543 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1544 let (hshm, _) = eshm.build();
1545
1546 for len in 0..CHUNK_SIZE {
1547 let test_data: Vec<u8> = (0..len).map(|i| i as u8).collect();
1548
1549 hshm.copy_from_slice(&test_data, 0).unwrap();
1550
1551 let mut read_buf = vec![0u8; len];
1552 hshm.copy_to_slice(&mut read_buf, 0).unwrap();
1553
1554 assert_eq!(test_data, read_buf, "Mismatch for length {}", len);
1555 }
1556 }
1557
1558 #[test]
1560 fn copy_non_aligned_lengths() {
1561 let mem_size: usize = 4096;
1562 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1563 let (hshm, _) = eshm.build();
1564
1565 let test_lengths = [17, 31, 33, 47, 63, 65, 100, 127, 129, 255, 257];
1567
1568 for &len in &test_lengths {
1569 let test_data: Vec<u8> = (0..len).map(|i| (i % 256) as u8).collect();
1570
1571 hshm.copy_from_slice(&test_data, 0).unwrap();
1572
1573 let mut read_buf = vec![0u8; len];
1574 hshm.copy_to_slice(&mut read_buf, 0).unwrap();
1575
1576 assert_eq!(test_data, read_buf, "Mismatch for length {}", len);
1577 }
1578 }
1579
1580 #[test]
1582 fn copy_exact_chunk_size() {
1583 let mem_size: usize = 4096;
1584 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1585 let (hshm, _) = eshm.build();
1586
1587 let test_data: Vec<u8> = (0..CHUNK_SIZE).map(|i| i as u8).collect();
1588
1589 hshm.copy_from_slice(&test_data, 0).unwrap();
1590
1591 let mut read_buf = vec![0u8; CHUNK_SIZE];
1592 hshm.copy_to_slice(&mut read_buf, 0).unwrap();
1593
1594 assert_eq!(test_data, read_buf);
1595 }
1596
1597 #[test]
1599 fn fill_with_various_alignments() {
1600 let mem_size: usize = 4096;
1601 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1602 let (mut hshm, _) = eshm.build();
1603
1604 for start_offset in 0..CHUNK_SIZE {
1605 let fill_len = 64;
1606 let fill_value = (start_offset % 256) as u8;
1607
1608 hshm.fill(0, 0, mem_size).unwrap();
1610
1611 hshm.fill(fill_value, start_offset, fill_len).unwrap();
1613
1614 let mut read_buf = vec![0u8; fill_len];
1616 hshm.copy_to_slice(&mut read_buf, start_offset).unwrap();
1617
1618 assert!(
1619 read_buf.iter().all(|&b| b == fill_value),
1620 "Fill mismatch at alignment offset {}",
1621 start_offset
1622 );
1623 }
1624 }
1625
1626 #[test]
1628 fn fill_small_lengths() {
1629 let mem_size: usize = 4096;
1630 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1631 let (mut hshm, _) = eshm.build();
1632
1633 for len in 0..CHUNK_SIZE {
1634 let fill_value = 0xAB;
1635
1636 hshm.fill(0, 0, mem_size).unwrap(); hshm.fill(fill_value, 0, len).unwrap();
1638
1639 let mut read_buf = vec![0u8; len];
1640 hshm.copy_to_slice(&mut read_buf, 0).unwrap();
1641
1642 assert!(
1643 read_buf.iter().all(|&b| b == fill_value),
1644 "Fill mismatch for length {}",
1645 len
1646 );
1647 }
1648 }
1649
1650 #[test]
1652 fn fill_non_aligned_lengths() {
1653 let mem_size: usize = 4096;
1654 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1655 let (mut hshm, _) = eshm.build();
1656
1657 let test_lengths = [17, 31, 33, 47, 63, 65, 100, 127, 129, 255, 257];
1658
1659 for &len in &test_lengths {
1660 let fill_value = 0xCD;
1661
1662 hshm.fill(0, 0, mem_size).unwrap(); hshm.fill(fill_value, 0, len).unwrap();
1664
1665 let mut read_buf = vec![0u8; len];
1666 hshm.copy_to_slice(&mut read_buf, 0).unwrap();
1667
1668 assert!(
1669 read_buf.iter().all(|&b| b == fill_value),
1670 "Fill mismatch for length {}",
1671 len
1672 );
1673 }
1674 }
1675
1676 #[test]
1678 fn copy_edge_cases() {
1679 let mem_size: usize = 4096;
1680 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1681 let (hshm, _) = eshm.build();
1682
1683 let empty: Vec<u8> = vec![];
1685 hshm.copy_from_slice(&empty, 0).unwrap();
1686 let mut read_buf: Vec<u8> = vec![];
1687 hshm.copy_to_slice(&mut read_buf, 0).unwrap();
1688 assert!(read_buf.is_empty());
1689
1690 let single = vec![0x42u8];
1692 hshm.copy_from_slice(&single, 0).unwrap();
1693 let mut read_buf = vec![0u8; 1];
1694 hshm.copy_to_slice(&mut read_buf, 0).unwrap();
1695 assert_eq!(single, read_buf);
1696 }
1697
1698 #[test]
1700 fn copy_unaligned_start_and_length() {
1701 let mem_size: usize = 4096;
1702 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1703 let (hshm, _) = eshm.build();
1704
1705 let start_offset = 7;
1707 let len = 37;
1708 let test_data: Vec<u8> = (0..len).map(|i| (i * 3) as u8).collect();
1709
1710 hshm.copy_from_slice(&test_data, start_offset).unwrap();
1711
1712 let mut read_buf = vec![0u8; len];
1713 hshm.copy_to_slice(&mut read_buf, start_offset).unwrap();
1714
1715 assert_eq!(test_data, read_buf);
1716 }
1717 }
1718
1719 mod try_pop_buffer_bounds {
1721 use super::*;
1722
1723 #[derive(Debug, PartialEq)]
1724 struct RawBytes(Vec<u8>);
1725
1726 impl TryFrom<&[u8]> for RawBytes {
1727 type Error = String;
1728 fn try_from(value: &[u8]) -> std::result::Result<Self, Self::Error> {
1729 Ok(RawBytes(value.to_vec()))
1730 }
1731 }
1732
1733 fn make_buffer(mem_size: usize) -> super::super::HostSharedMemory {
1735 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1736 let (hshm, _) = eshm.build();
1737 hshm.write::<u64>(0, 8u64).unwrap();
1738 hshm
1739 }
1740
1741 #[test]
1742 fn normal_push_pop_roundtrip() {
1743 let mem_size = 4096;
1744 let mut hshm = make_buffer(mem_size);
1745
1746 let payload = b"hello";
1748 let mut data = Vec::new();
1749 data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
1750 data.extend_from_slice(payload);
1751
1752 hshm.push_buffer(0, mem_size, &data).unwrap();
1753 let result: RawBytes = hshm.try_pop_buffer_into(0, mem_size).unwrap();
1754 assert_eq!(result.0, data);
1755 }
1756
1757 #[test]
1758 fn malicious_flatbuffer_size_prefix() {
1759 let mem_size = 4096;
1760 let mut hshm = make_buffer(mem_size);
1761
1762 let payload = b"small";
1763 let mut data = Vec::new();
1764 data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
1765 data.extend_from_slice(payload);
1766 hshm.push_buffer(0, mem_size, &data).unwrap();
1767
1768 hshm.write::<u32>(8, 0xFFFF_FFFBu32).unwrap(); let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
1772 let err_msg = format!("{}", result.unwrap_err());
1773 assert!(
1774 err_msg.contains("Corrupt buffer size prefix: flatbuffer claims 4294967295 bytes but the element slot is only 9 bytes"),
1775 "Unexpected error message: {}",
1776 err_msg
1777 );
1778 }
1779
1780 #[test]
1781 fn malicious_element_offset_too_small() {
1782 let mem_size = 4096;
1783 let mut hshm = make_buffer(mem_size);
1784
1785 let payload = b"test";
1786 let mut data = Vec::new();
1787 data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
1788 data.extend_from_slice(payload);
1789 hshm.push_buffer(0, mem_size, &data).unwrap();
1790
1791 hshm.write::<u64>(16, 0u64).unwrap();
1793
1794 let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
1795 let err_msg = format!("{}", result.unwrap_err());
1796 assert!(
1797 err_msg.contains(
1798 "Corrupt buffer back-pointer: element offset 0 is outside valid range [8, 8]"
1799 ),
1800 "Unexpected error message: {}",
1801 err_msg
1802 );
1803 }
1804
1805 #[test]
1806 fn malicious_element_offset_past_stack_pointer() {
1807 let mem_size = 4096;
1808 let mut hshm = make_buffer(mem_size);
1809
1810 let payload = b"test";
1811 let mut data = Vec::new();
1812 data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
1813 data.extend_from_slice(payload);
1814 hshm.push_buffer(0, mem_size, &data).unwrap();
1815
1816 hshm.write::<u64>(16, 9999u64).unwrap();
1818
1819 let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
1820 let err_msg = format!("{}", result.unwrap_err());
1821 assert!(
1822 err_msg.contains(
1823 "Corrupt buffer back-pointer: element offset 9999 is outside valid range [8, 8]"
1824 ),
1825 "Unexpected error message: {}",
1826 err_msg
1827 );
1828 }
1829
1830 #[test]
1831 fn malicious_flatbuffer_size_off_by_one() {
1832 let mem_size = 4096;
1833 let mut hshm = make_buffer(mem_size);
1834
1835 let payload = b"abcd";
1836 let mut data = Vec::new();
1837 data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
1838 data.extend_from_slice(payload);
1839 hshm.push_buffer(0, mem_size, &data).unwrap();
1840
1841 hshm.write::<u32>(8, 5u32).unwrap(); let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
1845 let err_msg = format!("{}", result.unwrap_err());
1846 assert!(
1847 err_msg.contains("Corrupt buffer size prefix: flatbuffer claims 9 bytes but the element slot is only 8 bytes"),
1848 "Unexpected error message: {}",
1849 err_msg
1850 );
1851 }
1852
1853 #[test]
1856 fn back_pointer_near_stack_pointer_underflow() {
1857 let mem_size = 4096;
1858 let mut hshm = make_buffer(mem_size);
1859
1860 let payload = b"test";
1861 let mut data = Vec::new();
1862 data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
1863 data.extend_from_slice(payload);
1864 hshm.push_buffer(0, mem_size, &data).unwrap();
1865
1866 hshm.write::<u64>(16, 23u64).unwrap();
1868
1869 let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
1870 let err_msg = format!("{}", result.unwrap_err());
1871 assert!(
1872 err_msg.contains(
1873 "Corrupt buffer back-pointer: element offset 23 is outside valid range [8, 8]"
1874 ),
1875 "Unexpected error message: {}",
1876 err_msg
1877 );
1878 }
1879
1880 #[test]
1882 fn size_prefix_u32_overflow() {
1883 let mem_size = 4096;
1884 let mut hshm = make_buffer(mem_size);
1885
1886 let payload = b"test";
1887 let mut data = Vec::new();
1888 data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
1889 data.extend_from_slice(payload);
1890 hshm.push_buffer(0, mem_size, &data).unwrap();
1891
1892 hshm.write::<u32>(8, 0xFFFF_FFFDu32).unwrap();
1894
1895 let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
1896 let err_msg = format!("{}", result.unwrap_err());
1897 assert!(
1898 err_msg.contains("Corrupt buffer size prefix: value 4294967293 overflows when adding 4-byte header"),
1899 "Unexpected error message: {}",
1900 err_msg
1901 );
1902 }
1903 }
1904
1905 #[cfg(target_os = "linux")]
1906 mod guard_page_crash_test {
1907 use crate::mem::shared_mem::{ExclusiveSharedMemory, SharedMemory};
1908
1909 const TEST_EXIT_CODE: u8 = 211; fn setup_signal_handler() {
1915 unsafe {
1916 signal_hook_registry::register_signal_unchecked(libc::SIGSEGV, || {
1917 std::process::exit(TEST_EXIT_CODE.into());
1918 })
1919 .unwrap();
1920 }
1921 }
1922
1923 #[test]
1924 #[ignore] fn read() {
1926 setup_signal_handler();
1927
1928 let eshm = ExclusiveSharedMemory::new(4096).unwrap();
1929 let (hshm, _) = eshm.build();
1930 let guard_page_ptr = hshm.raw_ptr();
1931 unsafe { std::ptr::read_volatile(guard_page_ptr) };
1932 }
1933
1934 #[test]
1935 #[ignore] fn write() {
1937 setup_signal_handler();
1938
1939 let eshm = ExclusiveSharedMemory::new(4096).unwrap();
1940 let (hshm, _) = eshm.build();
1941 let guard_page_ptr = hshm.raw_ptr();
1942 unsafe { std::ptr::write_volatile(guard_page_ptr, 0u8) };
1943 }
1944
1945 #[test]
1946 #[ignore] fn exec() {
1948 setup_signal_handler();
1949
1950 let eshm = ExclusiveSharedMemory::new(4096).unwrap();
1951 let (hshm, _) = eshm.build();
1952 let guard_page_ptr = hshm.raw_ptr();
1953 let func: fn() = unsafe { std::mem::transmute(guard_page_ptr) };
1954 func();
1955 }
1956
1957 #[test]
1959 #[cfg_attr(miri, ignore)] fn guard_page_testing_shim() {
1961 let tests = vec!["read", "write", "exec"];
1962 for test in tests {
1963 let triple = std::env::var("TARGET_TRIPLE").ok();
1964 let target_args = if let Some(triple) = triple.filter(|t| !t.is_empty()) {
1965 vec!["--target".to_string(), triple.to_string()]
1966 } else {
1967 vec![]
1968 };
1969 let output = std::process::Command::new("cargo")
1970 .args(["test", "-p", "hyperlight-host", "--lib"])
1971 .args(target_args)
1972 .args(["--", "--ignored", test])
1973 .stdin(std::process::Stdio::null())
1974 .output()
1975 .expect("Unable to launch tests");
1976 let exit_code = output.status.code();
1977 if exit_code != Some(TEST_EXIT_CODE.into()) {
1978 eprintln!("=== Guard Page test '{}' failed ===", test);
1979 eprintln!("Exit code: {:?} (expected {})", exit_code, TEST_EXIT_CODE);
1980 eprintln!("=== STDOUT ===");
1981 eprintln!("{}", String::from_utf8_lossy(&output.stdout));
1982 eprintln!("=== STDERR ===");
1983 eprintln!("{}", String::from_utf8_lossy(&output.stderr));
1984 panic!(
1985 "Guard Page test failed: {} (exit code {:?}, expected {})",
1986 test, exit_code, TEST_EXIT_CODE
1987 );
1988 }
1989 }
1990 }
1991 }
1992}