1use std::any::type_name;
18use std::ffi::c_void;
19use std::io::Error;
20use std::mem::{align_of, size_of};
21#[cfg(target_os = "linux")]
22use std::ptr::null_mut;
23use std::sync::{Arc, RwLock};
24
25use hyperlight_common::mem::PAGE_SIZE_USIZE;
26use tracing::{Span, instrument};
27#[cfg(target_os = "windows")]
28use windows::Win32::Foundation::{CloseHandle, HANDLE, INVALID_HANDLE_VALUE};
29#[cfg(target_os = "windows")]
30use windows::Win32::System::Memory::PAGE_READWRITE;
31#[cfg(target_os = "windows")]
32use windows::Win32::System::Memory::{
33 CreateFileMappingA, FILE_MAP_ALL_ACCESS, MEMORY_MAPPED_VIEW_ADDRESS, MapViewOfFile,
34 PAGE_NOACCESS, PAGE_PROTECTION_FLAGS, UnmapViewOfFile, VirtualProtect,
35};
36#[cfg(target_os = "windows")]
37use windows::core::PCSTR;
38
39use super::memory_region::{
40 HostGuestMemoryRegion, MemoryRegion, MemoryRegionFlags, MemoryRegionKind, MemoryRegionType,
41};
42#[cfg(target_os = "windows")]
43use crate::HyperlightError::WindowsAPIError;
44use crate::{HyperlightError, Result, log_then_return, new_error};
45
46macro_rules! bounds_check {
48 ($offset:expr, $size:expr, $mem_size:expr) => {
49 if $offset.checked_add($size).is_none_or(|end| end > $mem_size) {
50 return Err(new_error!(
51 "Cannot read value from offset {} with size {} in memory of size {}",
52 $offset,
53 $size,
54 $mem_size
55 ));
56 }
57 };
58}
59
60macro_rules! generate_reader {
62 ($fname:ident, $ty:ty) => {
63 #[allow(dead_code)]
65 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
66 pub(crate) fn $fname(&self, offset: usize) -> Result<$ty> {
67 let data = self.as_slice();
68 bounds_check!(offset, std::mem::size_of::<$ty>(), data.len());
69 Ok(<$ty>::from_le_bytes(
70 data[offset..offset + std::mem::size_of::<$ty>()].try_into()?,
71 ))
72 }
73 };
74}
75
76macro_rules! generate_writer {
78 ($fname:ident, $ty:ty) => {
79 #[allow(dead_code)]
81 pub(crate) fn $fname(&mut self, offset: usize, value: $ty) -> Result<()> {
82 let data = self.as_mut_slice();
83 bounds_check!(offset, std::mem::size_of::<$ty>(), data.len());
84 data[offset..offset + std::mem::size_of::<$ty>()].copy_from_slice(&value.to_le_bytes());
85 Ok(())
86 }
87 };
88}
89
90#[derive(Debug)]
95pub struct HostMapping {
96 ptr: *mut u8,
97 size: usize,
98 #[cfg(target_os = "windows")]
99 handle: HANDLE,
100}
101
102impl Drop for HostMapping {
103 #[cfg(target_os = "linux")]
104 fn drop(&mut self) {
105 use libc::munmap;
106
107 unsafe {
108 munmap(self.ptr as *mut c_void, self.size);
109 }
110 }
111 #[cfg(target_os = "windows")]
112 fn drop(&mut self) {
113 let mem_mapped_address = MEMORY_MAPPED_VIEW_ADDRESS {
114 Value: self.ptr as *mut c_void,
115 };
116 if let Err(e) = unsafe { UnmapViewOfFile(mem_mapped_address) } {
117 tracing::error!(
118 "Failed to drop HostMapping (UnmapViewOfFile failed): {:?}",
119 e
120 );
121 }
122
123 let file_handle: HANDLE = self.handle;
124 if let Err(e) = unsafe { CloseHandle(file_handle) } {
125 tracing::error!("Failed to drop HostMapping (CloseHandle failed): {:?}", e);
126 }
127 }
128}
129
130#[derive(Debug)]
136pub struct ExclusiveSharedMemory {
137 region: Arc<HostMapping>,
138}
139unsafe impl Send for ExclusiveSharedMemory {}
140
141#[derive(Debug)]
150pub struct GuestSharedMemory {
151 region: Arc<HostMapping>,
152 pub lock: Arc<RwLock<()>>,
163}
164unsafe impl Send for GuestSharedMemory {}
165
166#[derive(Clone, Debug)]
321pub struct HostSharedMemory {
322 region: Arc<HostMapping>,
323 lock: Arc<RwLock<()>>,
324}
325unsafe impl Send for HostSharedMemory {}
326
327impl ExclusiveSharedMemory {
328 #[cfg(target_os = "linux")]
333 #[instrument(skip_all, parent = Span::current(), level= "Trace")]
334 pub fn new(min_size_bytes: usize) -> Result<Self> {
335 use libc::{
336 MAP_ANONYMOUS, MAP_FAILED, MAP_PRIVATE, PROT_READ, PROT_WRITE, c_int, mmap, off_t,
337 size_t,
338 };
339 #[cfg(not(miri))]
340 use libc::{MAP_NORESERVE, PROT_NONE, mprotect};
341
342 if min_size_bytes == 0 {
343 return Err(new_error!("Cannot create shared memory with size 0"));
344 }
345
346 let total_size = min_size_bytes
347 .checked_add(2 * PAGE_SIZE_USIZE) .ok_or_else(|| new_error!("Memory required for sandbox exceeded usize::MAX"))?;
349
350 if total_size % PAGE_SIZE_USIZE != 0 {
351 return Err(new_error!(
352 "shared memory must be a multiple of {}",
353 PAGE_SIZE_USIZE
354 ));
355 }
356
357 if total_size > isize::MAX as usize {
360 return Err(HyperlightError::MemoryRequestTooBig(
361 total_size,
362 isize::MAX as usize,
363 ));
364 }
365
366 #[cfg(not(miri))]
368 let flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
369 #[cfg(miri)]
370 let flags = MAP_ANONYMOUS | MAP_PRIVATE;
371
372 let addr = unsafe {
373 mmap(
374 null_mut(),
375 total_size as size_t,
376 PROT_READ | PROT_WRITE,
377 flags,
378 -1 as c_int,
379 0 as off_t,
380 )
381 };
382 if addr == MAP_FAILED {
383 log_then_return!(HyperlightError::MmapFailed(
384 Error::last_os_error().raw_os_error()
385 ));
386 }
387
388 #[cfg(not(miri))]
390 {
391 let res = unsafe { mprotect(addr, PAGE_SIZE_USIZE, PROT_NONE) };
392 if res != 0 {
393 return Err(HyperlightError::MprotectFailed(
394 Error::last_os_error().raw_os_error(),
395 ));
396 }
397 let res = unsafe {
398 mprotect(
399 (addr as *const u8).add(total_size - PAGE_SIZE_USIZE) as *mut c_void,
400 PAGE_SIZE_USIZE,
401 PROT_NONE,
402 )
403 };
404 if res != 0 {
405 return Err(HyperlightError::MprotectFailed(
406 Error::last_os_error().raw_os_error(),
407 ));
408 }
409 }
410
411 Ok(Self {
412 #[allow(clippy::arc_with_non_send_sync)]
421 region: Arc::new(HostMapping {
422 ptr: addr as *mut u8,
423 size: total_size,
424 }),
425 })
426 }
427
428 #[cfg(target_os = "windows")]
433 #[instrument(skip_all, parent = Span::current(), level= "Trace")]
434 pub fn new(min_size_bytes: usize) -> Result<Self> {
435 if min_size_bytes == 0 {
436 return Err(new_error!("Cannot create shared memory with size 0"));
437 }
438
439 let total_size = min_size_bytes
440 .checked_add(2 * PAGE_SIZE_USIZE)
441 .ok_or_else(|| new_error!("Memory required for sandbox exceeded {}", usize::MAX))?;
442
443 if total_size % PAGE_SIZE_USIZE != 0 {
444 return Err(new_error!(
445 "shared memory must be a multiple of {}",
446 PAGE_SIZE_USIZE
447 ));
448 }
449
450 if total_size > isize::MAX as usize {
453 return Err(HyperlightError::MemoryRequestTooBig(
454 total_size,
455 isize::MAX as usize,
456 ));
457 }
458
459 let mut dwmaximumsizehigh = 0;
460 let mut dwmaximumsizelow = 0;
461
462 if std::mem::size_of::<usize>() == 8 {
463 dwmaximumsizehigh = (total_size >> 32) as u32;
464 dwmaximumsizelow = (total_size & 0xFFFFFFFF) as u32;
465 }
466
467 let flags = PAGE_READWRITE;
471
472 let handle = unsafe {
473 CreateFileMappingA(
474 INVALID_HANDLE_VALUE,
475 None,
476 flags,
477 dwmaximumsizehigh,
478 dwmaximumsizelow,
479 PCSTR::null(),
480 )?
481 };
482
483 if handle.is_invalid() {
484 log_then_return!(HyperlightError::MemoryAllocationFailed(
485 Error::last_os_error().raw_os_error()
486 ));
487 }
488
489 let file_map = FILE_MAP_ALL_ACCESS;
490 let addr = unsafe { MapViewOfFile(handle, file_map, 0, 0, 0) };
491
492 if addr.Value.is_null() {
493 log_then_return!(HyperlightError::MemoryAllocationFailed(
494 Error::last_os_error().raw_os_error()
495 ));
496 }
497
498 let mut unused_out_old_prot_flags = PAGE_PROTECTION_FLAGS(0);
501
502 let first_guard_page_start = addr.Value;
505 if let Err(e) = unsafe {
506 VirtualProtect(
507 first_guard_page_start,
508 PAGE_SIZE_USIZE,
509 PAGE_NOACCESS,
510 &mut unused_out_old_prot_flags,
511 )
512 } {
513 log_then_return!(WindowsAPIError(e.clone()));
514 }
515
516 let last_guard_page_start = unsafe { addr.Value.add(total_size - PAGE_SIZE_USIZE) };
517 if let Err(e) = unsafe {
518 VirtualProtect(
519 last_guard_page_start,
520 PAGE_SIZE_USIZE,
521 PAGE_NOACCESS,
522 &mut unused_out_old_prot_flags,
523 )
524 } {
525 log_then_return!(WindowsAPIError(e.clone()));
526 }
527
528 Ok(Self {
529 #[allow(clippy::arc_with_non_send_sync)]
538 region: Arc::new(HostMapping {
539 ptr: addr.Value as *mut u8,
540 size: total_size,
541 handle,
542 }),
543 })
544 }
545
546 pub(super) fn as_mut_slice(&mut self) -> &mut [u8] {
582 unsafe { std::slice::from_raw_parts_mut(self.base_ptr(), self.mem_size()) }
583 }
584
585 #[instrument(skip_all, parent = Span::current(), level= "Trace")]
598 pub fn as_slice<'a>(&'a self) -> &'a [u8] {
599 unsafe { std::slice::from_raw_parts(self.base_ptr(), self.mem_size()) }
600 }
601
602 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
604 #[cfg(test)]
605 pub(crate) fn copy_all_to_vec(&self) -> Result<Vec<u8>> {
606 let data = self.as_slice();
607 Ok(data.to_vec())
608 }
609
610 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
612 pub fn copy_from_slice(&mut self, src: &[u8], offset: usize) -> Result<()> {
613 let data = self.as_mut_slice();
614 bounds_check!(offset, src.len(), data.len());
615 data[offset..offset + src.len()].copy_from_slice(src);
616 Ok(())
617 }
618
619 generate_reader!(read_u8, u8);
620 generate_reader!(read_i8, i8);
621 generate_reader!(read_u16, u16);
622 generate_reader!(read_i16, i16);
623 generate_reader!(read_u32, u32);
624 generate_reader!(read_i32, i32);
625 generate_reader!(read_u64, u64);
626 generate_reader!(read_i64, i64);
627 generate_reader!(read_usize, usize);
628 generate_reader!(read_isize, isize);
629
630 generate_writer!(write_u8, u8);
631 generate_writer!(write_i8, i8);
632 generate_writer!(write_u16, u16);
633 generate_writer!(write_i16, i16);
634 generate_writer!(write_u32, u32);
635 generate_writer!(write_i32, i32);
636 generate_writer!(write_u64, u64);
637 generate_writer!(write_i64, i64);
638 generate_writer!(write_usize, usize);
639 generate_writer!(write_isize, isize);
640
641 pub fn build(self) -> (HostSharedMemory, GuestSharedMemory) {
648 let lock = Arc::new(RwLock::new(()));
649 let hshm = HostSharedMemory {
650 region: self.region.clone(),
651 lock: lock.clone(),
652 };
653 (
654 hshm,
655 GuestSharedMemory {
656 region: self.region.clone(),
657 lock,
658 },
659 )
660 }
661
662 #[cfg(target_os = "windows")]
664 pub fn get_mmap_file_handle(&self) -> HANDLE {
665 self.region.handle
666 }
667
668 #[cfg(all(test, feature = "nanvix-unstable"))]
672 pub(crate) fn as_host_shared_memory(&self) -> HostSharedMemory {
673 let lock = Arc::new(RwLock::new(()));
674 HostSharedMemory {
675 region: self.region.clone(),
676 lock,
677 }
678 }
679}
680
681fn mapping_at(
682 s: &impl SharedMemory,
683 gpa: u64,
684 region_type: MemoryRegionType,
685 flags: MemoryRegionFlags,
686) -> MemoryRegion {
687 let guest_base = gpa as usize;
688
689 MemoryRegion {
690 guest_region: guest_base..(guest_base + s.mem_size()),
691 host_region: s.host_region_base()..s.host_region_end(),
692 region_type,
693 flags,
694 }
695}
696
697impl GuestSharedMemory {
698 pub(crate) fn mapping_at(
701 &self,
702 guest_base: u64,
703 region_type: MemoryRegionType,
704 ) -> MemoryRegion {
705 let flags = match region_type {
706 MemoryRegionType::Scratch => {
707 MemoryRegionFlags::READ | MemoryRegionFlags::WRITE | MemoryRegionFlags::EXECUTE
708 }
709 #[cfg(unshared_snapshot_mem)]
710 MemoryRegionType::Snapshot => {
711 MemoryRegionFlags::READ | MemoryRegionFlags::WRITE | MemoryRegionFlags::EXECUTE
712 }
713 #[allow(clippy::panic)]
714 _ => panic!(
723 "GuestSharedMemory::mapping_at should only be used for Scratch or Snapshot regions"
724 ),
725 };
726 mapping_at(self, guest_base, region_type, flags)
727 }
728}
729
730pub trait SharedMemory {
735 fn region(&self) -> &HostMapping;
737
738 fn base_addr(&self) -> usize {
743 self.region().ptr as usize + PAGE_SIZE_USIZE
744 }
745
746 fn base_ptr(&self) -> *mut u8 {
751 self.region().ptr.wrapping_add(PAGE_SIZE_USIZE)
752 }
753
754 fn mem_size(&self) -> usize {
758 self.region().size - 2 * PAGE_SIZE_USIZE
759 }
760
761 fn raw_ptr(&self) -> *mut u8 {
764 self.region().ptr
765 }
766
767 fn raw_mem_size(&self) -> usize {
770 self.region().size
771 }
772
773 fn host_region_base(&self) -> <HostGuestMemoryRegion as MemoryRegionKind>::HostBaseType {
780 #[cfg(not(windows))]
781 {
782 self.base_addr()
783 }
784 #[cfg(windows)]
785 {
786 super::memory_region::HostRegionBase {
787 from_handle: self.region().handle.into(),
788 handle_base: self.region().ptr as usize,
789 handle_size: self.region().size,
790 offset: PAGE_SIZE_USIZE,
791 }
792 }
793 }
794
795 fn host_region_end(&self) -> <HostGuestMemoryRegion as MemoryRegionKind>::HostBaseType {
797 <HostGuestMemoryRegion as MemoryRegionKind>::add(self.host_region_base(), self.mem_size())
798 }
799
800 fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
806 &mut self,
807 f: F,
808 ) -> Result<T>;
809
810 fn with_contents<T, F: FnOnce(&[u8]) -> T>(&mut self, f: F) -> Result<T> {
816 self.with_exclusivity(|m| f(m.as_slice()))
817 }
818
819 fn zero(&mut self) -> Result<()> {
821 self.with_exclusivity(|e| {
822 #[allow(unused_mut)] let mut do_copy = true;
824 #[cfg(all(target_os = "linux", feature = "kvm", not(any(feature = "mshv3"))))]
828 unsafe {
829 let ret = libc::madvise(
830 e.region.ptr as *mut libc::c_void,
831 e.region.size,
832 libc::MADV_DONTNEED,
833 );
834 if ret == 0 {
835 do_copy = false;
836 }
837 }
838 if do_copy {
839 e.as_mut_slice().fill(0);
840 }
841 })
842 }
843}
844
845impl SharedMemory for ExclusiveSharedMemory {
846 fn region(&self) -> &HostMapping {
847 &self.region
848 }
849 fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
850 &mut self,
851 f: F,
852 ) -> Result<T> {
853 Ok(f(self))
854 }
855}
856
857impl SharedMemory for GuestSharedMemory {
858 fn region(&self) -> &HostMapping {
859 &self.region
860 }
861 fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
862 &mut self,
863 f: F,
864 ) -> Result<T> {
865 let guard = self
866 .lock
867 .try_write()
868 .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
869 let mut excl = ExclusiveSharedMemory {
870 region: self.region.clone(),
871 };
872 let ret = f(&mut excl);
873 drop(excl);
874 drop(guard);
875 Ok(ret)
876 }
877}
878
879pub unsafe trait AllValid {}
888unsafe impl AllValid for u8 {}
889unsafe impl AllValid for u16 {}
890unsafe impl AllValid for u32 {}
891unsafe impl AllValid for u64 {}
892unsafe impl AllValid for i8 {}
893unsafe impl AllValid for i16 {}
894unsafe impl AllValid for i32 {}
895unsafe impl AllValid for i64 {}
896unsafe impl AllValid for [u8; 16] {}
897
898impl HostSharedMemory {
899 pub fn read<T: AllValid>(&self, offset: usize) -> Result<T> {
903 bounds_check!(offset, std::mem::size_of::<T>(), self.mem_size());
904 unsafe {
905 let mut ret: core::mem::MaybeUninit<T> = core::mem::MaybeUninit::uninit();
906 {
907 let slice: &mut [u8] = core::slice::from_raw_parts_mut(
908 ret.as_mut_ptr() as *mut u8,
909 std::mem::size_of::<T>(),
910 );
911 self.copy_to_slice(slice, offset)?;
912 }
913 Ok(ret.assume_init())
914 }
915 }
916
917 pub fn write<T: AllValid>(&self, offset: usize, data: T) -> Result<()> {
921 bounds_check!(offset, std::mem::size_of::<T>(), self.mem_size());
922 unsafe {
923 let slice: &[u8] = core::slice::from_raw_parts(
924 core::ptr::addr_of!(data) as *const u8,
925 std::mem::size_of::<T>(),
926 );
927 self.copy_from_slice(slice, offset)?;
928 }
929 Ok(())
930 }
931
932 pub fn copy_to_slice(&self, slice: &mut [u8], offset: usize) -> Result<()> {
935 bounds_check!(offset, slice.len(), self.mem_size());
936 let base = self.base_ptr().wrapping_add(offset);
937 let guard = self
938 .lock
939 .try_read()
940 .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
941
942 const CHUNK: usize = size_of::<u128>();
943 let len = slice.len();
944 let mut i = 0;
945
946 let align_offset = base.align_offset(align_of::<u128>());
951 let head_len = align_offset.min(len);
952 while i < head_len {
953 unsafe {
954 slice[i] = base.add(i).read_volatile();
955 }
956 i += 1;
957 }
958
959 let dst = slice.as_mut_ptr();
963 while i + CHUNK <= len {
964 unsafe {
965 let value = (base.add(i) as *const u128).read_volatile();
966 std::ptr::write_unaligned(dst.add(i) as *mut u128, value);
967 }
968 i += CHUNK;
969 }
970
971 while i < len {
973 unsafe {
974 slice[i] = base.add(i).read_volatile();
975 }
976 i += 1;
977 }
978
979 drop(guard);
980 Ok(())
981 }
982
983 pub fn copy_from_slice(&self, slice: &[u8], offset: usize) -> Result<()> {
986 bounds_check!(offset, slice.len(), self.mem_size());
987 let base = self.base_ptr().wrapping_add(offset);
988 let guard = self
989 .lock
990 .try_read()
991 .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
992
993 const CHUNK: usize = size_of::<u128>();
994 let len = slice.len();
995 let mut i = 0;
996
997 let align_offset = base.align_offset(align_of::<u128>());
1002 let head_len = align_offset.min(len);
1003 while i < head_len {
1004 unsafe {
1005 base.add(i).write_volatile(slice[i]);
1006 }
1007 i += 1;
1008 }
1009
1010 let src = slice.as_ptr();
1014 while i + CHUNK <= len {
1015 unsafe {
1016 let value = std::ptr::read_unaligned(src.add(i) as *const u128);
1017 (base.add(i) as *mut u128).write_volatile(value);
1018 }
1019 i += CHUNK;
1020 }
1021
1022 while i < len {
1024 unsafe {
1025 base.add(i).write_volatile(slice[i]);
1026 }
1027 i += 1;
1028 }
1029
1030 drop(guard);
1031 Ok(())
1032 }
1033
1034 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
1036 pub fn fill(&mut self, value: u8, offset: usize, len: usize) -> Result<()> {
1037 bounds_check!(offset, len, self.mem_size());
1038 let base = self.base_ptr().wrapping_add(offset);
1039 let guard = self
1040 .lock
1041 .try_read()
1042 .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
1043
1044 const CHUNK: usize = size_of::<u128>();
1045 let value_u128 = u128::from_ne_bytes([value; CHUNK]);
1046 let mut i = 0;
1047
1048 let align_offset = base.align_offset(align_of::<u128>());
1053 let head_len = align_offset.min(len);
1054 while i < head_len {
1055 unsafe {
1056 base.add(i).write_volatile(value);
1057 }
1058 i += 1;
1059 }
1060
1061 while i + CHUNK <= len {
1064 unsafe {
1065 (base.add(i) as *mut u128).write_volatile(value_u128);
1066 }
1067 i += CHUNK;
1068 }
1069
1070 while i < len {
1072 unsafe {
1073 base.add(i).write_volatile(value);
1074 }
1075 i += 1;
1076 }
1077
1078 drop(guard);
1079 Ok(())
1080 }
1081
1082 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
1085 pub fn push_buffer(
1086 &mut self,
1087 buffer_start_offset: usize,
1088 buffer_size: usize,
1089 data: &[u8],
1090 ) -> Result<()> {
1091 let stack_pointer_rel = self.read::<u64>(buffer_start_offset)? as usize;
1092 let buffer_size_u64: u64 = buffer_size.try_into()?;
1093
1094 if stack_pointer_rel > buffer_size || stack_pointer_rel < 8 {
1095 return Err(new_error!(
1096 "Unable to push data to buffer: Stack pointer is out of bounds. Stack pointer: {}, Buffer size: {}",
1097 stack_pointer_rel,
1098 buffer_size_u64
1099 ));
1100 }
1101
1102 let size_required = data.len() + 8;
1103 let size_available = buffer_size - stack_pointer_rel;
1104
1105 if size_required > size_available {
1106 return Err(new_error!(
1107 "Not enough space in buffer to push data. Required: {}, Available: {}",
1108 size_required,
1109 size_available
1110 ));
1111 }
1112
1113 let stack_pointer_abs = stack_pointer_rel + buffer_start_offset;
1115
1116 self.copy_from_slice(data, stack_pointer_abs)?;
1118
1119 self.write::<u64>(stack_pointer_abs + data.len(), stack_pointer_rel as u64)?;
1122
1123 self.write::<u64>(
1125 buffer_start_offset,
1126 (stack_pointer_rel + data.len() + 8) as u64,
1127 )?;
1128 Ok(())
1129 }
1130
1131 pub fn try_pop_buffer_into<T>(
1135 &mut self,
1136 buffer_start_offset: usize,
1137 buffer_size: usize,
1138 ) -> Result<T>
1139 where
1140 T: for<'b> TryFrom<&'b [u8]>,
1141 {
1142 let stack_pointer_rel = self.read::<u64>(buffer_start_offset)? as usize;
1144
1145 if stack_pointer_rel > buffer_size || stack_pointer_rel < 16 {
1146 return Err(new_error!(
1147 "Unable to pop data from buffer: Stack pointer is out of bounds. Stack pointer: {}, Buffer size: {}",
1148 stack_pointer_rel,
1149 buffer_size
1150 ));
1151 }
1152
1153 let last_element_offset_abs = stack_pointer_rel + buffer_start_offset;
1155
1156 let last_element_offset_rel: usize =
1158 self.read::<u64>(last_element_offset_abs - 8)? as usize;
1159
1160 if last_element_offset_rel > stack_pointer_rel.saturating_sub(16)
1164 || last_element_offset_rel < 8
1165 {
1166 return Err(new_error!(
1167 "Corrupt buffer back-pointer: element offset {} is outside valid range [8, {}].",
1168 last_element_offset_rel,
1169 stack_pointer_rel.saturating_sub(16),
1170 ));
1171 }
1172
1173 let last_element_offset_abs = last_element_offset_rel + buffer_start_offset;
1175
1176 let max_element_size = stack_pointer_rel - last_element_offset_rel - 8;
1178
1179 let fb_buffer_size = {
1181 let raw_prefix = self.read::<u32>(last_element_offset_abs)?;
1182 let total = raw_prefix.checked_add(4).ok_or_else(|| {
1185 new_error!(
1186 "Corrupt buffer size prefix: value {} overflows when adding 4-byte header.",
1187 raw_prefix
1188 )
1189 })?;
1190 usize::try_from(total)
1191 }?;
1192
1193 if fb_buffer_size > max_element_size {
1194 return Err(new_error!(
1195 "Corrupt buffer size prefix: flatbuffer claims {} bytes but the element slot is only {} bytes.",
1196 fb_buffer_size,
1197 max_element_size
1198 ));
1199 }
1200
1201 let mut result_buffer = vec![0; fb_buffer_size];
1202
1203 self.copy_to_slice(&mut result_buffer, last_element_offset_abs)?;
1204 let to_return = T::try_from(result_buffer.as_slice()).map_err(|_e| {
1205 new_error!(
1206 "pop_buffer_into: failed to convert buffer to {}",
1207 type_name::<T>()
1208 )
1209 })?;
1210
1211 self.write::<u64>(buffer_start_offset, last_element_offset_rel as u64)?;
1213
1214 let num_bytes_to_zero = stack_pointer_rel - last_element_offset_rel;
1216 self.fill(0, last_element_offset_abs, num_bytes_to_zero)?;
1217
1218 Ok(to_return)
1219 }
1220}
1221
1222impl SharedMemory for HostSharedMemory {
1223 fn region(&self) -> &HostMapping {
1224 &self.region
1225 }
1226 fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
1227 &mut self,
1228 f: F,
1229 ) -> Result<T> {
1230 let guard = self
1231 .lock
1232 .try_write()
1233 .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
1234 let mut excl = ExclusiveSharedMemory {
1235 region: self.region.clone(),
1236 };
1237 let ret = f(&mut excl);
1238 drop(excl);
1239 drop(guard);
1240 Ok(ret)
1241 }
1242}
1243
1244#[cfg(test)]
1245mod tests {
1246 use hyperlight_common::mem::PAGE_SIZE_USIZE;
1247 #[cfg(not(miri))]
1248 use proptest::prelude::*;
1249
1250 #[cfg(not(miri))]
1251 use super::HostSharedMemory;
1252 use super::{ExclusiveSharedMemory, SharedMemory};
1253 use crate::Result;
1254 #[cfg(not(miri))]
1255 use crate::mem::shared_mem_tests::read_write_test_suite;
1256
1257 #[test]
1258 fn fill() {
1259 let mem_size: usize = 4096;
1260 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1261 let (mut hshm, _) = eshm.build();
1262
1263 hshm.fill(1, 0, 1024).unwrap();
1264 hshm.fill(2, 1024, 1024).unwrap();
1265 hshm.fill(3, 2048, 1024).unwrap();
1266 hshm.fill(4, 3072, 1024).unwrap();
1267
1268 let vec = hshm
1269 .with_exclusivity(|e| e.copy_all_to_vec().unwrap())
1270 .unwrap();
1271
1272 assert!(vec[0..1024].iter().all(|&x| x == 1));
1273 assert!(vec[1024..2048].iter().all(|&x| x == 2));
1274 assert!(vec[2048..3072].iter().all(|&x| x == 3));
1275 assert!(vec[3072..4096].iter().all(|&x| x == 4));
1276
1277 hshm.fill(5, 0, 4096).unwrap();
1278
1279 let vec2 = hshm
1280 .with_exclusivity(|e| e.copy_all_to_vec().unwrap())
1281 .unwrap();
1282 assert!(vec2.iter().all(|&x| x == 5));
1283
1284 assert!(hshm.fill(0, 0, mem_size + 1).is_err());
1285 assert!(hshm.fill(0, mem_size, 1).is_err());
1286 }
1287
1288 #[test]
1291 fn bounds_check_overflow() {
1292 let mem_size: usize = 4096;
1293 let mut eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1294
1295 assert!(eshm.read_i32(usize::MAX).is_err());
1297 assert!(eshm.write_i32(usize::MAX, 0).is_err());
1298 assert!(eshm.copy_from_slice(&[0u8; 1], usize::MAX).is_err());
1299
1300 let (mut hshm, _) = eshm.build();
1302
1303 assert!(hshm.read::<u8>(usize::MAX).is_err());
1304 assert!(hshm.read::<u64>(usize::MAX - 3).is_err());
1305 assert!(hshm.write::<u8>(usize::MAX, 0).is_err());
1306 assert!(hshm.write::<u64>(usize::MAX - 3, 0).is_err());
1307
1308 let mut buf = [0u8; 1];
1309 assert!(hshm.copy_to_slice(&mut buf, usize::MAX).is_err());
1310 assert!(hshm.copy_from_slice(&[0u8; 1], usize::MAX).is_err());
1311
1312 assert!(hshm.fill(0, usize::MAX, 1).is_err());
1313 assert!(hshm.fill(0, 1, usize::MAX).is_err());
1314 }
1315
1316 #[test]
1317 fn copy_into_from() -> Result<()> {
1318 let mem_size: usize = 4096;
1319 let vec_len = 10;
1320 let eshm = ExclusiveSharedMemory::new(mem_size)?;
1321 let (hshm, _) = eshm.build();
1322 let vec = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
1323 hshm.copy_from_slice(&vec, 0)?;
1325
1326 let mut vec2 = vec![0; vec_len];
1327 hshm.copy_to_slice(vec2.as_mut_slice(), 0)?;
1329 assert_eq!(vec, vec2);
1330
1331 let offset = mem_size - vec.len();
1332 hshm.copy_from_slice(&vec, offset)?;
1334
1335 let mut vec3 = vec![0; vec_len];
1336 hshm.copy_to_slice(&mut vec3, offset)?;
1338 assert_eq!(vec, vec3);
1339
1340 let offset = mem_size / 2;
1341 hshm.copy_from_slice(&vec, offset)?;
1343
1344 let mut vec4 = vec![0; vec_len];
1345 hshm.copy_to_slice(&mut vec4, offset)?;
1347 assert_eq!(vec, vec4);
1348
1349 let mut vec5 = vec![0; vec_len];
1351 assert!(hshm.copy_to_slice(&mut vec5, mem_size).is_err());
1352
1353 assert!(hshm.copy_from_slice(&vec5, mem_size).is_err());
1355
1356 let mut vec6 = vec![0; vec_len];
1358 assert!(hshm.copy_to_slice(&mut vec6, mem_size * 2).is_err());
1359
1360 assert!(hshm.copy_from_slice(&vec6, mem_size * 2).is_err());
1362
1363 let mut vec7 = vec![0; mem_size * 2];
1365 assert!(hshm.copy_to_slice(&mut vec7, 0).is_err());
1366
1367 assert!(hshm.copy_from_slice(&vec7, 0).is_err());
1369
1370 Ok(())
1371 }
1372
1373 #[cfg(not(miri))]
1375 proptest! {
1376 #[test]
1377 fn read_write_i32(val in -0x1000_i32..0x1000_i32) {
1378 read_write_test_suite(
1379 val,
1380 ExclusiveSharedMemory::new,
1381 Box::new(ExclusiveSharedMemory::read_i32),
1382 Box::new(ExclusiveSharedMemory::write_i32),
1383 )
1384 .unwrap();
1385 read_write_test_suite(
1386 val,
1387 |s| {
1388 let e = ExclusiveSharedMemory::new(s)?;
1389 let (h, _) = e.build();
1390 Ok(h)
1391 },
1392 Box::new(HostSharedMemory::read::<i32>),
1393 Box::new(|h, o, v| h.write::<i32>(o, v)),
1394 )
1395 .unwrap();
1396 }
1397 }
1398
1399 #[test]
1400 fn alloc_fail() {
1401 let gm = ExclusiveSharedMemory::new(0);
1402 assert!(gm.is_err());
1403 let gm = ExclusiveSharedMemory::new(usize::MAX);
1404 assert!(gm.is_err());
1405 }
1406
1407 #[test]
1408 fn clone() {
1409 let eshm = ExclusiveSharedMemory::new(PAGE_SIZE_USIZE).unwrap();
1410 let (hshm1, _) = eshm.build();
1411 let hshm2 = hshm1.clone();
1412
1413 assert_eq!(hshm1.mem_size(), hshm2.mem_size());
1416 assert_eq!(hshm1.base_addr(), hshm2.base_addr());
1417
1418 hshm1.copy_from_slice(b"a", 0).unwrap();
1421 hshm2.copy_from_slice(b"b", 1).unwrap();
1422
1423 for (raw_offset, expected) in &[(0, b'a'), (1, b'b')] {
1426 assert_eq!(hshm1.read::<u8>(*raw_offset).unwrap(), *expected);
1427 assert_eq!(hshm2.read::<u8>(*raw_offset).unwrap(), *expected);
1428 }
1429
1430 drop(hshm1);
1433
1434 for (raw_offset, expected) in &[(0, b'a'), (1, b'b')] {
1436 assert_eq!(hshm2.read::<u8>(*raw_offset).unwrap(), *expected);
1437 }
1438 hshm2.copy_from_slice(b"c", 2).unwrap();
1439 assert_eq!(hshm2.read::<u8>(2).unwrap(), b'c');
1440 drop(hshm2);
1441 }
1442
1443 #[test]
1444 fn copy_all_to_vec() {
1445 let mut data = vec![b'a', b'b', b'c'];
1446 data.resize(4096, 0);
1447 let mut eshm = ExclusiveSharedMemory::new(data.len()).unwrap();
1448 eshm.copy_from_slice(data.as_slice(), 0).unwrap();
1449 let ret_vec = eshm.copy_all_to_vec().unwrap();
1450 assert_eq!(data, ret_vec);
1451 }
1452
1453 #[test]
1456 #[cfg(all(target_os = "linux", not(miri)))]
1457 fn test_drop() {
1458 use proc_maps::get_process_maps;
1459
1460 const UNIQUE_SIZE: usize = PAGE_SIZE_USIZE * 17;
1470
1471 let pid = std::process::id();
1472
1473 let eshm = ExclusiveSharedMemory::new(UNIQUE_SIZE).unwrap();
1474 let (hshm1, gshm) = eshm.build();
1475 let hshm2 = hshm1.clone();
1476
1477 let base_ptr = hshm1.base_ptr() as usize;
1479 let mem_size = hshm1.mem_size();
1480
1481 let has_exact_mapping = |ptr: usize, size: usize| -> bool {
1483 get_process_maps(pid.try_into().unwrap())
1484 .unwrap()
1485 .iter()
1486 .any(|m| m.start() == ptr && m.size() == size)
1487 };
1488
1489 assert!(
1491 has_exact_mapping(base_ptr, mem_size),
1492 "shared memory mapping not found at {:#x} with size {}",
1493 base_ptr,
1494 mem_size
1495 );
1496
1497 drop(hshm1);
1499 drop(hshm2);
1500 drop(gshm);
1501
1502 assert!(
1504 !has_exact_mapping(base_ptr, mem_size),
1505 "shared memory mapping still exists at {:#x} with size {} after drop",
1506 base_ptr,
1507 mem_size
1508 );
1509 }
1510
1511 mod alignment_tests {
1515 use super::*;
1516
1517 const CHUNK_SIZE: usize = size_of::<u128>();
1518
1519 #[test]
1521 fn copy_with_various_alignments() {
1522 let mem_size: usize = 4096;
1524 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1525 let (hshm, _) = eshm.build();
1526
1527 for start_offset in 0..CHUNK_SIZE {
1529 let test_len = 64; let test_data: Vec<u8> = (0..test_len).map(|i| (i + start_offset) as u8).collect();
1531
1532 hshm.copy_from_slice(&test_data, start_offset).unwrap();
1534
1535 let mut read_buf = vec![0u8; test_len];
1537 hshm.copy_to_slice(&mut read_buf, start_offset).unwrap();
1538
1539 assert_eq!(
1540 test_data, read_buf,
1541 "Mismatch at alignment offset {}",
1542 start_offset
1543 );
1544 }
1545 }
1546
1547 #[test]
1549 fn copy_small_lengths() {
1550 let mem_size: usize = 4096;
1551 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1552 let (hshm, _) = eshm.build();
1553
1554 for len in 0..CHUNK_SIZE {
1555 let test_data: Vec<u8> = (0..len).map(|i| i as u8).collect();
1556
1557 hshm.copy_from_slice(&test_data, 0).unwrap();
1558
1559 let mut read_buf = vec![0u8; len];
1560 hshm.copy_to_slice(&mut read_buf, 0).unwrap();
1561
1562 assert_eq!(test_data, read_buf, "Mismatch for length {}", len);
1563 }
1564 }
1565
1566 #[test]
1568 fn copy_non_aligned_lengths() {
1569 let mem_size: usize = 4096;
1570 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1571 let (hshm, _) = eshm.build();
1572
1573 let test_lengths = [17, 31, 33, 47, 63, 65, 100, 127, 129, 255, 257];
1575
1576 for &len in &test_lengths {
1577 let test_data: Vec<u8> = (0..len).map(|i| (i % 256) as u8).collect();
1578
1579 hshm.copy_from_slice(&test_data, 0).unwrap();
1580
1581 let mut read_buf = vec![0u8; len];
1582 hshm.copy_to_slice(&mut read_buf, 0).unwrap();
1583
1584 assert_eq!(test_data, read_buf, "Mismatch for length {}", len);
1585 }
1586 }
1587
1588 #[test]
1590 fn copy_exact_chunk_size() {
1591 let mem_size: usize = 4096;
1592 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1593 let (hshm, _) = eshm.build();
1594
1595 let test_data: Vec<u8> = (0..CHUNK_SIZE).map(|i| i as u8).collect();
1596
1597 hshm.copy_from_slice(&test_data, 0).unwrap();
1598
1599 let mut read_buf = vec![0u8; CHUNK_SIZE];
1600 hshm.copy_to_slice(&mut read_buf, 0).unwrap();
1601
1602 assert_eq!(test_data, read_buf);
1603 }
1604
1605 #[test]
1607 fn fill_with_various_alignments() {
1608 let mem_size: usize = 4096;
1609 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1610 let (mut hshm, _) = eshm.build();
1611
1612 for start_offset in 0..CHUNK_SIZE {
1613 let fill_len = 64;
1614 let fill_value = (start_offset % 256) as u8;
1615
1616 hshm.fill(0, 0, mem_size).unwrap();
1618
1619 hshm.fill(fill_value, start_offset, fill_len).unwrap();
1621
1622 let mut read_buf = vec![0u8; fill_len];
1624 hshm.copy_to_slice(&mut read_buf, start_offset).unwrap();
1625
1626 assert!(
1627 read_buf.iter().all(|&b| b == fill_value),
1628 "Fill mismatch at alignment offset {}",
1629 start_offset
1630 );
1631 }
1632 }
1633
1634 #[test]
1636 fn fill_small_lengths() {
1637 let mem_size: usize = 4096;
1638 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1639 let (mut hshm, _) = eshm.build();
1640
1641 for len in 0..CHUNK_SIZE {
1642 let fill_value = 0xAB;
1643
1644 hshm.fill(0, 0, mem_size).unwrap(); hshm.fill(fill_value, 0, len).unwrap();
1646
1647 let mut read_buf = vec![0u8; len];
1648 hshm.copy_to_slice(&mut read_buf, 0).unwrap();
1649
1650 assert!(
1651 read_buf.iter().all(|&b| b == fill_value),
1652 "Fill mismatch for length {}",
1653 len
1654 );
1655 }
1656 }
1657
1658 #[test]
1660 fn fill_non_aligned_lengths() {
1661 let mem_size: usize = 4096;
1662 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1663 let (mut hshm, _) = eshm.build();
1664
1665 let test_lengths = [17, 31, 33, 47, 63, 65, 100, 127, 129, 255, 257];
1666
1667 for &len in &test_lengths {
1668 let fill_value = 0xCD;
1669
1670 hshm.fill(0, 0, mem_size).unwrap(); hshm.fill(fill_value, 0, len).unwrap();
1672
1673 let mut read_buf = vec![0u8; len];
1674 hshm.copy_to_slice(&mut read_buf, 0).unwrap();
1675
1676 assert!(
1677 read_buf.iter().all(|&b| b == fill_value),
1678 "Fill mismatch for length {}",
1679 len
1680 );
1681 }
1682 }
1683
1684 #[test]
1686 fn copy_edge_cases() {
1687 let mem_size: usize = 4096;
1688 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1689 let (hshm, _) = eshm.build();
1690
1691 let empty: Vec<u8> = vec![];
1693 hshm.copy_from_slice(&empty, 0).unwrap();
1694 let mut read_buf: Vec<u8> = vec![];
1695 hshm.copy_to_slice(&mut read_buf, 0).unwrap();
1696 assert!(read_buf.is_empty());
1697
1698 let single = vec![0x42u8];
1700 hshm.copy_from_slice(&single, 0).unwrap();
1701 let mut read_buf = vec![0u8; 1];
1702 hshm.copy_to_slice(&mut read_buf, 0).unwrap();
1703 assert_eq!(single, read_buf);
1704 }
1705
1706 #[test]
1708 fn copy_unaligned_start_and_length() {
1709 let mem_size: usize = 4096;
1710 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1711 let (hshm, _) = eshm.build();
1712
1713 let start_offset = 7;
1715 let len = 37;
1716 let test_data: Vec<u8> = (0..len).map(|i| (i * 3) as u8).collect();
1717
1718 hshm.copy_from_slice(&test_data, start_offset).unwrap();
1719
1720 let mut read_buf = vec![0u8; len];
1721 hshm.copy_to_slice(&mut read_buf, start_offset).unwrap();
1722
1723 assert_eq!(test_data, read_buf);
1724 }
1725 }
1726
1727 mod try_pop_buffer_bounds {
1729 use super::*;
1730
1731 #[derive(Debug, PartialEq)]
1732 struct RawBytes(Vec<u8>);
1733
1734 impl TryFrom<&[u8]> for RawBytes {
1735 type Error = String;
1736 fn try_from(value: &[u8]) -> std::result::Result<Self, Self::Error> {
1737 Ok(RawBytes(value.to_vec()))
1738 }
1739 }
1740
1741 fn make_buffer(mem_size: usize) -> super::super::HostSharedMemory {
1743 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1744 let (hshm, _) = eshm.build();
1745 hshm.write::<u64>(0, 8u64).unwrap();
1746 hshm
1747 }
1748
1749 #[test]
1750 fn normal_push_pop_roundtrip() {
1751 let mem_size = 4096;
1752 let mut hshm = make_buffer(mem_size);
1753
1754 let payload = b"hello";
1756 let mut data = Vec::new();
1757 data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
1758 data.extend_from_slice(payload);
1759
1760 hshm.push_buffer(0, mem_size, &data).unwrap();
1761 let result: RawBytes = hshm.try_pop_buffer_into(0, mem_size).unwrap();
1762 assert_eq!(result.0, data);
1763 }
1764
1765 #[test]
1766 fn malicious_flatbuffer_size_prefix() {
1767 let mem_size = 4096;
1768 let mut hshm = make_buffer(mem_size);
1769
1770 let payload = b"small";
1771 let mut data = Vec::new();
1772 data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
1773 data.extend_from_slice(payload);
1774 hshm.push_buffer(0, mem_size, &data).unwrap();
1775
1776 hshm.write::<u32>(8, 0xFFFF_FFFBu32).unwrap(); let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
1780 let err_msg = format!("{}", result.unwrap_err());
1781 assert!(
1782 err_msg.contains("Corrupt buffer size prefix: flatbuffer claims 4294967295 bytes but the element slot is only 9 bytes"),
1783 "Unexpected error message: {}",
1784 err_msg
1785 );
1786 }
1787
1788 #[test]
1789 fn malicious_element_offset_too_small() {
1790 let mem_size = 4096;
1791 let mut hshm = make_buffer(mem_size);
1792
1793 let payload = b"test";
1794 let mut data = Vec::new();
1795 data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
1796 data.extend_from_slice(payload);
1797 hshm.push_buffer(0, mem_size, &data).unwrap();
1798
1799 hshm.write::<u64>(16, 0u64).unwrap();
1801
1802 let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
1803 let err_msg = format!("{}", result.unwrap_err());
1804 assert!(
1805 err_msg.contains(
1806 "Corrupt buffer back-pointer: element offset 0 is outside valid range [8, 8]"
1807 ),
1808 "Unexpected error message: {}",
1809 err_msg
1810 );
1811 }
1812
1813 #[test]
1814 fn malicious_element_offset_past_stack_pointer() {
1815 let mem_size = 4096;
1816 let mut hshm = make_buffer(mem_size);
1817
1818 let payload = b"test";
1819 let mut data = Vec::new();
1820 data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
1821 data.extend_from_slice(payload);
1822 hshm.push_buffer(0, mem_size, &data).unwrap();
1823
1824 hshm.write::<u64>(16, 9999u64).unwrap();
1826
1827 let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
1828 let err_msg = format!("{}", result.unwrap_err());
1829 assert!(
1830 err_msg.contains(
1831 "Corrupt buffer back-pointer: element offset 9999 is outside valid range [8, 8]"
1832 ),
1833 "Unexpected error message: {}",
1834 err_msg
1835 );
1836 }
1837
1838 #[test]
1839 fn malicious_flatbuffer_size_off_by_one() {
1840 let mem_size = 4096;
1841 let mut hshm = make_buffer(mem_size);
1842
1843 let payload = b"abcd";
1844 let mut data = Vec::new();
1845 data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
1846 data.extend_from_slice(payload);
1847 hshm.push_buffer(0, mem_size, &data).unwrap();
1848
1849 hshm.write::<u32>(8, 5u32).unwrap(); let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
1853 let err_msg = format!("{}", result.unwrap_err());
1854 assert!(
1855 err_msg.contains("Corrupt buffer size prefix: flatbuffer claims 9 bytes but the element slot is only 8 bytes"),
1856 "Unexpected error message: {}",
1857 err_msg
1858 );
1859 }
1860
1861 #[test]
1864 fn back_pointer_near_stack_pointer_underflow() {
1865 let mem_size = 4096;
1866 let mut hshm = make_buffer(mem_size);
1867
1868 let payload = b"test";
1869 let mut data = Vec::new();
1870 data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
1871 data.extend_from_slice(payload);
1872 hshm.push_buffer(0, mem_size, &data).unwrap();
1873
1874 hshm.write::<u64>(16, 23u64).unwrap();
1876
1877 let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
1878 let err_msg = format!("{}", result.unwrap_err());
1879 assert!(
1880 err_msg.contains(
1881 "Corrupt buffer back-pointer: element offset 23 is outside valid range [8, 8]"
1882 ),
1883 "Unexpected error message: {}",
1884 err_msg
1885 );
1886 }
1887
1888 #[test]
1890 fn size_prefix_u32_overflow() {
1891 let mem_size = 4096;
1892 let mut hshm = make_buffer(mem_size);
1893
1894 let payload = b"test";
1895 let mut data = Vec::new();
1896 data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
1897 data.extend_from_slice(payload);
1898 hshm.push_buffer(0, mem_size, &data).unwrap();
1899
1900 hshm.write::<u32>(8, 0xFFFF_FFFDu32).unwrap();
1902
1903 let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
1904 let err_msg = format!("{}", result.unwrap_err());
1905 assert!(
1906 err_msg.contains("Corrupt buffer size prefix: value 4294967293 overflows when adding 4-byte header"),
1907 "Unexpected error message: {}",
1908 err_msg
1909 );
1910 }
1911 }
1912
1913 #[cfg(target_os = "linux")]
1914 mod guard_page_crash_test {
1915 use crate::mem::shared_mem::{ExclusiveSharedMemory, SharedMemory};
1916
1917 const TEST_EXIT_CODE: u8 = 211; fn setup_signal_handler() {
1923 unsafe {
1924 signal_hook_registry::register_signal_unchecked(libc::SIGSEGV, || {
1925 std::process::exit(TEST_EXIT_CODE.into());
1926 })
1927 .unwrap();
1928 }
1929 }
1930
1931 #[test]
1932 #[ignore] fn read() {
1934 setup_signal_handler();
1935
1936 let eshm = ExclusiveSharedMemory::new(4096).unwrap();
1937 let (hshm, _) = eshm.build();
1938 let guard_page_ptr = hshm.raw_ptr();
1939 unsafe { std::ptr::read_volatile(guard_page_ptr) };
1940 }
1941
1942 #[test]
1943 #[ignore] fn write() {
1945 setup_signal_handler();
1946
1947 let eshm = ExclusiveSharedMemory::new(4096).unwrap();
1948 let (hshm, _) = eshm.build();
1949 let guard_page_ptr = hshm.raw_ptr();
1950 unsafe { std::ptr::write_volatile(guard_page_ptr, 0u8) };
1951 }
1952
1953 #[test]
1954 #[ignore] fn exec() {
1956 setup_signal_handler();
1957
1958 let eshm = ExclusiveSharedMemory::new(4096).unwrap();
1959 let (hshm, _) = eshm.build();
1960 let guard_page_ptr = hshm.raw_ptr();
1961 let func: fn() = unsafe { std::mem::transmute(guard_page_ptr) };
1962 func();
1963 }
1964
1965 #[test]
1967 #[cfg_attr(miri, ignore)] fn guard_page_testing_shim() {
1969 let tests = vec!["read", "write", "exec"];
1970 for test in tests {
1971 let triple = std::env::var("TARGET_TRIPLE").ok();
1972 let target_args = if let Some(triple) = triple.filter(|t| !t.is_empty()) {
1973 vec!["--target".to_string(), triple.to_string()]
1974 } else {
1975 vec![]
1976 };
1977 let output = std::process::Command::new("cargo")
1978 .args(["test", "-p", "hyperlight-host", "--lib"])
1979 .args(target_args)
1980 .args(["--", "--ignored", test])
1981 .stdin(std::process::Stdio::null())
1982 .output()
1983 .expect("Unable to launch tests");
1984 let exit_code = output.status.code();
1985 if exit_code != Some(TEST_EXIT_CODE.into()) {
1986 eprintln!("=== Guard Page test '{}' failed ===", test);
1987 eprintln!("Exit code: {:?} (expected {})", exit_code, TEST_EXIT_CODE);
1988 eprintln!("=== STDOUT ===");
1989 eprintln!("{}", String::from_utf8_lossy(&output.stdout));
1990 eprintln!("=== STDERR ===");
1991 eprintln!("{}", String::from_utf8_lossy(&output.stderr));
1992 panic!(
1993 "Guard Page test failed: {} (exit code {:?}, expected {})",
1994 test, exit_code, TEST_EXIT_CODE
1995 );
1996 }
1997 }
1998 }
1999 }
2000}
2001
2002#[derive(Clone, Debug)]
2007pub struct ReadonlySharedMemory {
2008 region: Arc<HostMapping>,
2009}
2010unsafe impl Send for ReadonlySharedMemory {}
2021unsafe impl Sync for ReadonlySharedMemory {}
2022
2023impl ReadonlySharedMemory {
2024 pub(crate) fn from_bytes(contents: &[u8]) -> Result<Self> {
2025 let mut anon = ExclusiveSharedMemory::new(contents.len())?;
2026 anon.copy_from_slice(contents, 0)?;
2027 Ok(ReadonlySharedMemory {
2028 region: anon.region,
2029 })
2030 }
2031
2032 pub(crate) fn as_slice(&self) -> &[u8] {
2033 unsafe { std::slice::from_raw_parts(self.base_ptr(), self.mem_size()) }
2034 }
2035
2036 #[cfg(unshared_snapshot_mem)]
2037 pub(crate) fn copy_to_writable(&self) -> Result<ExclusiveSharedMemory> {
2038 let mut writable = ExclusiveSharedMemory::new(self.mem_size())?;
2039 writable.copy_from_slice(self.as_slice(), 0)?;
2040 Ok(writable)
2041 }
2042
2043 #[cfg(not(unshared_snapshot_mem))]
2044 pub(crate) fn build(self) -> (Self, Self) {
2045 (self.clone(), self)
2046 }
2047
2048 #[cfg(not(unshared_snapshot_mem))]
2049 pub(crate) fn mapping_at(
2050 &self,
2051 guest_base: u64,
2052 region_type: MemoryRegionType,
2053 ) -> MemoryRegion {
2054 #[allow(clippy::panic)]
2055 if region_type != MemoryRegionType::Snapshot {
2059 panic!("ReadonlySharedMemory::mapping_at should only be used for Snapshot regions");
2060 }
2061 mapping_at(
2062 self,
2063 guest_base,
2064 region_type,
2065 MemoryRegionFlags::READ | MemoryRegionFlags::EXECUTE,
2066 )
2067 }
2068}
2069
2070impl SharedMemory for ReadonlySharedMemory {
2071 fn region(&self) -> &HostMapping {
2072 &self.region
2073 }
2074 fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
2077 &mut self,
2078 _: F,
2079 ) -> Result<T> {
2080 Err(new_error!(
2081 "Cannot take exclusive access to a ReadonlySharedMemory"
2082 ))
2083 }
2084 fn with_contents<T, F: FnOnce(&[u8]) -> T>(&mut self, f: F) -> Result<T> {
2086 Ok(f(self.as_slice()))
2087 }
2088}
2089
2090impl<S: SharedMemory> PartialEq<S> for ReadonlySharedMemory {
2091 fn eq(&self, other: &S) -> bool {
2092 self.raw_ptr() == other.raw_ptr()
2093 }
2094}