1use std::any::type_name;
18use std::ffi::c_void;
19use std::io::Error;
20use std::mem::{align_of, size_of};
21#[cfg(target_os = "linux")]
22use std::ptr::null_mut;
23use std::sync::{Arc, RwLock};
24
25use hyperlight_common::mem::PAGE_SIZE_USIZE;
26use tracing::{Span, instrument};
27#[cfg(target_os = "windows")]
28use windows::Win32::Foundation::{CloseHandle, HANDLE, INVALID_HANDLE_VALUE};
29#[cfg(target_os = "windows")]
30use windows::Win32::System::Memory::PAGE_READWRITE;
31#[cfg(target_os = "windows")]
32use windows::Win32::System::Memory::{
33 CreateFileMappingA, FILE_MAP_ALL_ACCESS, MEMORY_MAPPED_VIEW_ADDRESS, MapViewOfFile,
34 PAGE_NOACCESS, PAGE_PROTECTION_FLAGS, UnmapViewOfFile, VirtualProtect,
35};
36#[cfg(target_os = "windows")]
37use windows::core::PCSTR;
38
39use super::memory_region::{
40 HostGuestMemoryRegion, MemoryRegion, MemoryRegionFlags, MemoryRegionKind, MemoryRegionType,
41};
42#[cfg(target_os = "windows")]
43use crate::HyperlightError::WindowsAPIError;
44use crate::{HyperlightError, Result, log_then_return, new_error};
45
46macro_rules! bounds_check {
48 ($offset:expr, $size:expr, $mem_size:expr) => {
49 if $offset.checked_add($size).is_none_or(|end| end > $mem_size) {
50 return Err(new_error!(
51 "Cannot read value from offset {} with size {} in memory of size {}",
52 $offset,
53 $size,
54 $mem_size
55 ));
56 }
57 };
58}
59
60macro_rules! generate_reader {
62 ($fname:ident, $ty:ty) => {
63 #[allow(dead_code)]
65 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
66 pub(crate) fn $fname(&self, offset: usize) -> Result<$ty> {
67 let data = self.as_slice();
68 bounds_check!(offset, std::mem::size_of::<$ty>(), data.len());
69 Ok(<$ty>::from_le_bytes(
70 data[offset..offset + std::mem::size_of::<$ty>()].try_into()?,
71 ))
72 }
73 };
74}
75
76macro_rules! generate_writer {
78 ($fname:ident, $ty:ty) => {
79 #[allow(dead_code)]
81 pub(crate) fn $fname(&mut self, offset: usize, value: $ty) -> Result<()> {
82 let data = self.as_mut_slice();
83 bounds_check!(offset, std::mem::size_of::<$ty>(), data.len());
84 data[offset..offset + std::mem::size_of::<$ty>()].copy_from_slice(&value.to_le_bytes());
85 Ok(())
86 }
87 };
88}
89
90#[derive(Debug)]
95pub struct HostMapping {
96 ptr: *mut u8,
97 size: usize,
98 #[cfg(target_os = "windows")]
99 handle: HANDLE,
100}
101
102impl Drop for HostMapping {
103 #[cfg(target_os = "linux")]
104 fn drop(&mut self) {
105 use libc::munmap;
106
107 unsafe {
108 munmap(self.ptr as *mut c_void, self.size);
109 }
110 }
111 #[cfg(target_os = "windows")]
112 fn drop(&mut self) {
113 let mem_mapped_address = MEMORY_MAPPED_VIEW_ADDRESS {
114 Value: self.ptr as *mut c_void,
115 };
116 if let Err(e) = unsafe { UnmapViewOfFile(mem_mapped_address) } {
117 tracing::error!(
118 "Failed to drop HostMapping (UnmapViewOfFile failed): {:?}",
119 e
120 );
121 }
122
123 let file_handle: HANDLE = self.handle;
124 if let Err(e) = unsafe { CloseHandle(file_handle) } {
125 tracing::error!("Failed to drop HostMapping (CloseHandle failed): {:?}", e);
126 }
127 }
128}
129
130#[derive(Debug)]
136pub struct ExclusiveSharedMemory {
137 region: Arc<HostMapping>,
138}
139unsafe impl Send for ExclusiveSharedMemory {}
140
141#[derive(Debug)]
150pub struct GuestSharedMemory {
151 region: Arc<HostMapping>,
152 pub lock: Arc<RwLock<()>>,
163}
164unsafe impl Send for GuestSharedMemory {}
165
166#[derive(Clone, Debug)]
321pub struct HostSharedMemory {
322 region: Arc<HostMapping>,
323 lock: Arc<RwLock<()>>,
324}
325unsafe impl Send for HostSharedMemory {}
326
327impl ExclusiveSharedMemory {
328 #[cfg(target_os = "linux")]
333 #[instrument(skip_all, parent = Span::current(), level= "Trace")]
334 pub fn new(min_size_bytes: usize) -> Result<Self> {
335 use libc::{
336 MAP_ANONYMOUS, MAP_FAILED, MAP_PRIVATE, PROT_READ, PROT_WRITE, c_int, mmap, off_t,
337 size_t,
338 };
339 #[cfg(not(miri))]
340 use libc::{MAP_NORESERVE, PROT_NONE, mprotect};
341
342 if min_size_bytes == 0 {
343 return Err(new_error!("Cannot create shared memory with size 0"));
344 }
345
346 let total_size = min_size_bytes
347 .checked_add(2 * PAGE_SIZE_USIZE) .ok_or_else(|| new_error!("Memory required for sandbox exceeded usize::MAX"))?;
349
350 if total_size % PAGE_SIZE_USIZE != 0 {
351 return Err(new_error!(
352 "shared memory must be a multiple of {}",
353 PAGE_SIZE_USIZE
354 ));
355 }
356
357 if total_size > isize::MAX as usize {
360 return Err(HyperlightError::MemoryRequestTooBig(
361 total_size,
362 isize::MAX as usize,
363 ));
364 }
365
366 #[cfg(not(miri))]
368 let flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
369 #[cfg(miri)]
370 let flags = MAP_ANONYMOUS | MAP_PRIVATE;
371
372 let addr = unsafe {
373 mmap(
374 null_mut(),
375 total_size as size_t,
376 PROT_READ | PROT_WRITE,
377 flags,
378 -1 as c_int,
379 0 as off_t,
380 )
381 };
382 if addr == MAP_FAILED {
383 log_then_return!(HyperlightError::MmapFailed(
384 Error::last_os_error().raw_os_error()
385 ));
386 }
387
388 #[cfg(not(miri))]
390 {
391 let res = unsafe { mprotect(addr, PAGE_SIZE_USIZE, PROT_NONE) };
392 if res != 0 {
393 return Err(HyperlightError::MprotectFailed(
394 Error::last_os_error().raw_os_error(),
395 ));
396 }
397 let res = unsafe {
398 mprotect(
399 (addr as *const u8).add(total_size - PAGE_SIZE_USIZE) as *mut c_void,
400 PAGE_SIZE_USIZE,
401 PROT_NONE,
402 )
403 };
404 if res != 0 {
405 return Err(HyperlightError::MprotectFailed(
406 Error::last_os_error().raw_os_error(),
407 ));
408 }
409 }
410
411 Ok(Self {
412 #[allow(clippy::arc_with_non_send_sync)]
421 region: Arc::new(HostMapping {
422 ptr: addr as *mut u8,
423 size: total_size,
424 }),
425 })
426 }
427
428 #[cfg(target_os = "windows")]
433 #[instrument(skip_all, parent = Span::current(), level= "Trace")]
434 pub fn new(min_size_bytes: usize) -> Result<Self> {
435 if min_size_bytes == 0 {
436 return Err(new_error!("Cannot create shared memory with size 0"));
437 }
438
439 let total_size = min_size_bytes
440 .checked_add(2 * PAGE_SIZE_USIZE)
441 .ok_or_else(|| new_error!("Memory required for sandbox exceeded {}", usize::MAX))?;
442
443 if total_size % PAGE_SIZE_USIZE != 0 {
444 return Err(new_error!(
445 "shared memory must be a multiple of {}",
446 PAGE_SIZE_USIZE
447 ));
448 }
449
450 if total_size > isize::MAX as usize {
453 return Err(HyperlightError::MemoryRequestTooBig(
454 total_size,
455 isize::MAX as usize,
456 ));
457 }
458
459 let mut dwmaximumsizehigh = 0;
460 let mut dwmaximumsizelow = 0;
461
462 if std::mem::size_of::<usize>() == 8 {
463 dwmaximumsizehigh = (total_size >> 32) as u32;
464 dwmaximumsizelow = (total_size & 0xFFFFFFFF) as u32;
465 }
466
467 let flags = PAGE_READWRITE;
471
472 let handle = unsafe {
473 CreateFileMappingA(
474 INVALID_HANDLE_VALUE,
475 None,
476 flags,
477 dwmaximumsizehigh,
478 dwmaximumsizelow,
479 PCSTR::null(),
480 )?
481 };
482
483 if handle.is_invalid() {
484 log_then_return!(HyperlightError::MemoryAllocationFailed(
485 Error::last_os_error().raw_os_error()
486 ));
487 }
488
489 let file_map = FILE_MAP_ALL_ACCESS;
490 let addr = unsafe { MapViewOfFile(handle, file_map, 0, 0, 0) };
491
492 if addr.Value.is_null() {
493 log_then_return!(HyperlightError::MemoryAllocationFailed(
494 Error::last_os_error().raw_os_error()
495 ));
496 }
497
498 let mut unused_out_old_prot_flags = PAGE_PROTECTION_FLAGS(0);
501
502 let first_guard_page_start = addr.Value;
505 if let Err(e) = unsafe {
506 VirtualProtect(
507 first_guard_page_start,
508 PAGE_SIZE_USIZE,
509 PAGE_NOACCESS,
510 &mut unused_out_old_prot_flags,
511 )
512 } {
513 log_then_return!(WindowsAPIError(e.clone()));
514 }
515
516 let last_guard_page_start = unsafe { addr.Value.add(total_size - PAGE_SIZE_USIZE) };
517 if let Err(e) = unsafe {
518 VirtualProtect(
519 last_guard_page_start,
520 PAGE_SIZE_USIZE,
521 PAGE_NOACCESS,
522 &mut unused_out_old_prot_flags,
523 )
524 } {
525 log_then_return!(WindowsAPIError(e.clone()));
526 }
527
528 Ok(Self {
529 #[allow(clippy::arc_with_non_send_sync)]
538 region: Arc::new(HostMapping {
539 ptr: addr.Value as *mut u8,
540 size: total_size,
541 handle,
542 }),
543 })
544 }
545
546 pub(super) fn as_mut_slice(&mut self) -> &mut [u8] {
582 unsafe { std::slice::from_raw_parts_mut(self.base_ptr(), self.mem_size()) }
583 }
584
585 #[instrument(skip_all, parent = Span::current(), level= "Trace")]
598 pub fn as_slice<'a>(&'a self) -> &'a [u8] {
599 unsafe { std::slice::from_raw_parts(self.base_ptr(), self.mem_size()) }
600 }
601
602 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
604 #[cfg(test)]
605 pub(crate) fn copy_all_to_vec(&self) -> Result<Vec<u8>> {
606 let data = self.as_slice();
607 Ok(data.to_vec())
608 }
609
610 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
612 pub fn copy_from_slice(&mut self, src: &[u8], offset: usize) -> Result<()> {
613 let data = self.as_mut_slice();
614 bounds_check!(offset, src.len(), data.len());
615 data[offset..offset + src.len()].copy_from_slice(src);
616 Ok(())
617 }
618
619 generate_reader!(read_u8, u8);
620 generate_reader!(read_i8, i8);
621 generate_reader!(read_u16, u16);
622 generate_reader!(read_i16, i16);
623 generate_reader!(read_u32, u32);
624 generate_reader!(read_i32, i32);
625 generate_reader!(read_u64, u64);
626 generate_reader!(read_i64, i64);
627 generate_reader!(read_usize, usize);
628 generate_reader!(read_isize, isize);
629
630 generate_writer!(write_u8, u8);
631 generate_writer!(write_i8, i8);
632 generate_writer!(write_u16, u16);
633 generate_writer!(write_i16, i16);
634 generate_writer!(write_u32, u32);
635 generate_writer!(write_i32, i32);
636 generate_writer!(write_u64, u64);
637 generate_writer!(write_i64, i64);
638 generate_writer!(write_usize, usize);
639 generate_writer!(write_isize, isize);
640
641 pub fn build(self) -> (HostSharedMemory, GuestSharedMemory) {
648 let lock = Arc::new(RwLock::new(()));
649 let hshm = HostSharedMemory {
650 region: self.region.clone(),
651 lock: lock.clone(),
652 };
653 (
654 hshm,
655 GuestSharedMemory {
656 region: self.region.clone(),
657 lock,
658 },
659 )
660 }
661
662 #[cfg(target_os = "windows")]
664 pub fn get_mmap_file_handle(&self) -> HANDLE {
665 self.region.handle
666 }
667
668 #[cfg(all(test, feature = "guest-counter"))]
672 pub(crate) fn as_host_shared_memory(&self) -> HostSharedMemory {
673 let lock = Arc::new(RwLock::new(()));
674 HostSharedMemory {
675 region: self.region.clone(),
676 lock,
677 }
678 }
679}
680
681fn mapping_at(
682 s: &impl SharedMemory,
683 gpa: u64,
684 size: usize,
685 region_type: MemoryRegionType,
686 flags: MemoryRegionFlags,
687) -> MemoryRegion {
688 let guest_base = gpa as usize;
689
690 MemoryRegion {
691 guest_region: guest_base..(guest_base + size),
692 host_region: s.host_region_base()
693 ..<HostGuestMemoryRegion as MemoryRegionKind>::add(s.host_region_base(), size),
694 region_type,
695 flags,
696 }
697}
698
699impl GuestSharedMemory {
700 pub(crate) fn mapping_at(
703 &self,
704 guest_base: u64,
705 region_type: MemoryRegionType,
706 ) -> MemoryRegion {
707 let flags = match region_type {
708 MemoryRegionType::Scratch => {
709 MemoryRegionFlags::READ | MemoryRegionFlags::WRITE | MemoryRegionFlags::EXECUTE
710 }
711 #[cfg(unshared_snapshot_mem)]
712 MemoryRegionType::Snapshot => {
713 MemoryRegionFlags::READ | MemoryRegionFlags::WRITE | MemoryRegionFlags::EXECUTE
714 }
715 #[allow(clippy::panic)]
716 _ => panic!(
725 "GuestSharedMemory::mapping_at should only be used for Scratch or Snapshot regions"
726 ),
727 };
728 mapping_at(self, guest_base, self.mem_size(), region_type, flags)
729 }
730}
731
732pub trait SharedMemory {
737 fn region(&self) -> &HostMapping;
739
740 fn base_addr(&self) -> usize {
745 self.region().ptr as usize + PAGE_SIZE_USIZE
746 }
747
748 fn base_ptr(&self) -> *mut u8 {
753 self.region().ptr.wrapping_add(PAGE_SIZE_USIZE)
754 }
755
756 fn mem_size(&self) -> usize {
760 self.region().size - 2 * PAGE_SIZE_USIZE
761 }
762
763 fn raw_ptr(&self) -> *mut u8 {
766 self.region().ptr
767 }
768
769 fn raw_mem_size(&self) -> usize {
772 self.region().size
773 }
774
775 fn host_region_base(&self) -> <HostGuestMemoryRegion as MemoryRegionKind>::HostBaseType {
782 #[cfg(not(windows))]
783 {
784 self.base_addr()
785 }
786 #[cfg(windows)]
787 {
788 super::memory_region::HostRegionBase {
789 from_handle: self.region().handle.into(),
790 handle_base: self.region().ptr as usize,
791 handle_size: self.region().size,
792 offset: PAGE_SIZE_USIZE,
793 }
794 }
795 }
796
797 fn host_region_end(&self) -> <HostGuestMemoryRegion as MemoryRegionKind>::HostBaseType {
799 <HostGuestMemoryRegion as MemoryRegionKind>::add(self.host_region_base(), self.mem_size())
800 }
801
802 fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
808 &mut self,
809 f: F,
810 ) -> Result<T>;
811
812 fn with_contents<T, F: FnOnce(&[u8]) -> T>(&mut self, f: F) -> Result<T> {
818 self.with_exclusivity(|m| f(m.as_slice()))
819 }
820
821 fn zero(&mut self) -> Result<()> {
823 self.with_exclusivity(|e| {
824 #[allow(unused_mut)] let mut do_copy = true;
826 #[cfg(all(target_os = "linux", feature = "kvm", not(any(feature = "mshv3"))))]
830 unsafe {
831 let ret = libc::madvise(
832 e.region.ptr as *mut libc::c_void,
833 e.region.size,
834 libc::MADV_DONTNEED,
835 );
836 if ret == 0 {
837 do_copy = false;
838 }
839 }
840 if do_copy {
841 e.as_mut_slice().fill(0);
842 }
843 })
844 }
845}
846
847impl SharedMemory for ExclusiveSharedMemory {
848 fn region(&self) -> &HostMapping {
849 &self.region
850 }
851 fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
852 &mut self,
853 f: F,
854 ) -> Result<T> {
855 Ok(f(self))
856 }
857}
858
859impl SharedMemory for GuestSharedMemory {
860 fn region(&self) -> &HostMapping {
861 &self.region
862 }
863 fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
864 &mut self,
865 f: F,
866 ) -> Result<T> {
867 let guard = self
868 .lock
869 .try_write()
870 .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
871 let mut excl = ExclusiveSharedMemory {
872 region: self.region.clone(),
873 };
874 let ret = f(&mut excl);
875 drop(excl);
876 drop(guard);
877 Ok(ret)
878 }
879}
880
881pub unsafe trait AllValid {}
890unsafe impl AllValid for u8 {}
891unsafe impl AllValid for u16 {}
892unsafe impl AllValid for u32 {}
893unsafe impl AllValid for u64 {}
894unsafe impl AllValid for i8 {}
895unsafe impl AllValid for i16 {}
896unsafe impl AllValid for i32 {}
897unsafe impl AllValid for i64 {}
898unsafe impl AllValid for [u8; 16] {}
899
900impl HostSharedMemory {
901 pub fn read<T: AllValid>(&self, offset: usize) -> Result<T> {
905 bounds_check!(offset, std::mem::size_of::<T>(), self.mem_size());
906 unsafe {
907 let mut ret: core::mem::MaybeUninit<T> = core::mem::MaybeUninit::uninit();
908 {
909 let slice: &mut [u8] = core::slice::from_raw_parts_mut(
910 ret.as_mut_ptr() as *mut u8,
911 std::mem::size_of::<T>(),
912 );
913 self.copy_to_slice(slice, offset)?;
914 }
915 Ok(ret.assume_init())
916 }
917 }
918
919 pub fn write<T: AllValid>(&self, offset: usize, data: T) -> Result<()> {
923 bounds_check!(offset, std::mem::size_of::<T>(), self.mem_size());
924 unsafe {
925 let slice: &[u8] = core::slice::from_raw_parts(
926 core::ptr::addr_of!(data) as *const u8,
927 std::mem::size_of::<T>(),
928 );
929 self.copy_from_slice(slice, offset)?;
930 }
931 Ok(())
932 }
933
934 pub fn copy_to_slice(&self, slice: &mut [u8], offset: usize) -> Result<()> {
937 bounds_check!(offset, slice.len(), self.mem_size());
938 let base = self.base_ptr().wrapping_add(offset);
939 let guard = self
940 .lock
941 .try_read()
942 .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
943
944 const CHUNK: usize = size_of::<u128>();
945 let len = slice.len();
946 let mut i = 0;
947
948 let align_offset = base.align_offset(align_of::<u128>());
953 let head_len = align_offset.min(len);
954 while i < head_len {
955 unsafe {
956 slice[i] = base.add(i).read_volatile();
957 }
958 i += 1;
959 }
960
961 let dst = slice.as_mut_ptr();
965 while i + CHUNK <= len {
966 unsafe {
967 let value = (base.add(i) as *const u128).read_volatile();
968 std::ptr::write_unaligned(dst.add(i) as *mut u128, value);
969 }
970 i += CHUNK;
971 }
972
973 while i < len {
975 unsafe {
976 slice[i] = base.add(i).read_volatile();
977 }
978 i += 1;
979 }
980
981 drop(guard);
982 Ok(())
983 }
984
985 pub fn copy_from_slice(&self, slice: &[u8], offset: usize) -> Result<()> {
988 bounds_check!(offset, slice.len(), self.mem_size());
989 let base = self.base_ptr().wrapping_add(offset);
990 let guard = self
991 .lock
992 .try_read()
993 .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
994
995 const CHUNK: usize = size_of::<u128>();
996 let len = slice.len();
997 let mut i = 0;
998
999 let align_offset = base.align_offset(align_of::<u128>());
1004 let head_len = align_offset.min(len);
1005 while i < head_len {
1006 unsafe {
1007 base.add(i).write_volatile(slice[i]);
1008 }
1009 i += 1;
1010 }
1011
1012 let src = slice.as_ptr();
1016 while i + CHUNK <= len {
1017 unsafe {
1018 let value = std::ptr::read_unaligned(src.add(i) as *const u128);
1019 (base.add(i) as *mut u128).write_volatile(value);
1020 }
1021 i += CHUNK;
1022 }
1023
1024 while i < len {
1026 unsafe {
1027 base.add(i).write_volatile(slice[i]);
1028 }
1029 i += 1;
1030 }
1031
1032 drop(guard);
1033 Ok(())
1034 }
1035
1036 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
1038 pub fn fill(&mut self, value: u8, offset: usize, len: usize) -> Result<()> {
1039 bounds_check!(offset, len, self.mem_size());
1040 let base = self.base_ptr().wrapping_add(offset);
1041 let guard = self
1042 .lock
1043 .try_read()
1044 .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
1045
1046 const CHUNK: usize = size_of::<u128>();
1047 let value_u128 = u128::from_ne_bytes([value; CHUNK]);
1048 let mut i = 0;
1049
1050 let align_offset = base.align_offset(align_of::<u128>());
1055 let head_len = align_offset.min(len);
1056 while i < head_len {
1057 unsafe {
1058 base.add(i).write_volatile(value);
1059 }
1060 i += 1;
1061 }
1062
1063 while i + CHUNK <= len {
1066 unsafe {
1067 (base.add(i) as *mut u128).write_volatile(value_u128);
1068 }
1069 i += CHUNK;
1070 }
1071
1072 while i < len {
1074 unsafe {
1075 base.add(i).write_volatile(value);
1076 }
1077 i += 1;
1078 }
1079
1080 drop(guard);
1081 Ok(())
1082 }
1083
1084 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
1087 pub fn push_buffer(
1088 &mut self,
1089 buffer_start_offset: usize,
1090 buffer_size: usize,
1091 data: &[u8],
1092 ) -> Result<()> {
1093 let stack_pointer_rel = self.read::<u64>(buffer_start_offset)? as usize;
1094 let buffer_size_u64: u64 = buffer_size.try_into()?;
1095
1096 if stack_pointer_rel > buffer_size || stack_pointer_rel < 8 {
1097 return Err(new_error!(
1098 "Unable to push data to buffer: Stack pointer is out of bounds. Stack pointer: {}, Buffer size: {}",
1099 stack_pointer_rel,
1100 buffer_size_u64
1101 ));
1102 }
1103
1104 let size_required = data.len() + 8;
1105 let size_available = buffer_size - stack_pointer_rel;
1106
1107 if size_required > size_available {
1108 return Err(new_error!(
1109 "Not enough space in buffer to push data. Required: {}, Available: {}",
1110 size_required,
1111 size_available
1112 ));
1113 }
1114
1115 let stack_pointer_abs = stack_pointer_rel + buffer_start_offset;
1117
1118 self.copy_from_slice(data, stack_pointer_abs)?;
1120
1121 self.write::<u64>(stack_pointer_abs + data.len(), stack_pointer_rel as u64)?;
1124
1125 self.write::<u64>(
1127 buffer_start_offset,
1128 (stack_pointer_rel + data.len() + 8) as u64,
1129 )?;
1130 Ok(())
1131 }
1132
1133 pub fn try_pop_buffer_into<T>(
1137 &mut self,
1138 buffer_start_offset: usize,
1139 buffer_size: usize,
1140 ) -> Result<T>
1141 where
1142 T: for<'b> TryFrom<&'b [u8]>,
1143 {
1144 let stack_pointer_rel = self.read::<u64>(buffer_start_offset)? as usize;
1146
1147 if stack_pointer_rel > buffer_size || stack_pointer_rel < 16 {
1148 return Err(new_error!(
1149 "Unable to pop data from buffer: Stack pointer is out of bounds. Stack pointer: {}, Buffer size: {}",
1150 stack_pointer_rel,
1151 buffer_size
1152 ));
1153 }
1154
1155 let last_element_offset_abs = stack_pointer_rel + buffer_start_offset;
1157
1158 let last_element_offset_rel: usize =
1160 self.read::<u64>(last_element_offset_abs - 8)? as usize;
1161
1162 if last_element_offset_rel > stack_pointer_rel.saturating_sub(16)
1166 || last_element_offset_rel < 8
1167 {
1168 return Err(new_error!(
1169 "Corrupt buffer back-pointer: element offset {} is outside valid range [8, {}].",
1170 last_element_offset_rel,
1171 stack_pointer_rel.saturating_sub(16),
1172 ));
1173 }
1174
1175 let last_element_offset_abs = last_element_offset_rel + buffer_start_offset;
1177
1178 let max_element_size = stack_pointer_rel - last_element_offset_rel - 8;
1180
1181 let fb_buffer_size = {
1183 let raw_prefix = self.read::<u32>(last_element_offset_abs)?;
1184 let total = raw_prefix.checked_add(4).ok_or_else(|| {
1187 new_error!(
1188 "Corrupt buffer size prefix: value {} overflows when adding 4-byte header.",
1189 raw_prefix
1190 )
1191 })?;
1192 usize::try_from(total)
1193 }?;
1194
1195 if fb_buffer_size > max_element_size {
1196 return Err(new_error!(
1197 "Corrupt buffer size prefix: flatbuffer claims {} bytes but the element slot is only {} bytes.",
1198 fb_buffer_size,
1199 max_element_size
1200 ));
1201 }
1202
1203 let mut result_buffer = vec![0; fb_buffer_size];
1204
1205 self.copy_to_slice(&mut result_buffer, last_element_offset_abs)?;
1206 let to_return = T::try_from(result_buffer.as_slice()).map_err(|_e| {
1207 new_error!(
1208 "pop_buffer_into: failed to convert buffer to {}",
1209 type_name::<T>()
1210 )
1211 })?;
1212
1213 self.write::<u64>(buffer_start_offset, last_element_offset_rel as u64)?;
1215
1216 let num_bytes_to_zero = stack_pointer_rel - last_element_offset_rel;
1218 self.fill(0, last_element_offset_abs, num_bytes_to_zero)?;
1219
1220 Ok(to_return)
1221 }
1222}
1223
1224impl SharedMemory for HostSharedMemory {
1225 fn region(&self) -> &HostMapping {
1226 &self.region
1227 }
1228 fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
1229 &mut self,
1230 f: F,
1231 ) -> Result<T> {
1232 let guard = self
1233 .lock
1234 .try_write()
1235 .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
1236 let mut excl = ExclusiveSharedMemory {
1237 region: self.region.clone(),
1238 };
1239 let ret = f(&mut excl);
1240 drop(excl);
1241 drop(guard);
1242 Ok(ret)
1243 }
1244}
1245
1246#[cfg(test)]
1247mod tests {
1248 use hyperlight_common::mem::PAGE_SIZE_USIZE;
1249 #[cfg(not(miri))]
1250 use proptest::prelude::*;
1251
1252 #[cfg(not(miri))]
1253 use super::HostSharedMemory;
1254 use super::{ExclusiveSharedMemory, SharedMemory};
1255 use crate::Result;
1256 #[cfg(not(miri))]
1257 use crate::mem::shared_mem_tests::read_write_test_suite;
1258
1259 #[test]
1260 fn fill() {
1261 let mem_size: usize = 4096;
1262 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1263 let (mut hshm, _) = eshm.build();
1264
1265 hshm.fill(1, 0, 1024).unwrap();
1266 hshm.fill(2, 1024, 1024).unwrap();
1267 hshm.fill(3, 2048, 1024).unwrap();
1268 hshm.fill(4, 3072, 1024).unwrap();
1269
1270 let vec = hshm
1271 .with_exclusivity(|e| e.copy_all_to_vec().unwrap())
1272 .unwrap();
1273
1274 assert!(vec[0..1024].iter().all(|&x| x == 1));
1275 assert!(vec[1024..2048].iter().all(|&x| x == 2));
1276 assert!(vec[2048..3072].iter().all(|&x| x == 3));
1277 assert!(vec[3072..4096].iter().all(|&x| x == 4));
1278
1279 hshm.fill(5, 0, 4096).unwrap();
1280
1281 let vec2 = hshm
1282 .with_exclusivity(|e| e.copy_all_to_vec().unwrap())
1283 .unwrap();
1284 assert!(vec2.iter().all(|&x| x == 5));
1285
1286 assert!(hshm.fill(0, 0, mem_size + 1).is_err());
1287 assert!(hshm.fill(0, mem_size, 1).is_err());
1288 }
1289
1290 #[test]
1293 fn bounds_check_overflow() {
1294 let mem_size: usize = 4096;
1295 let mut eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1296
1297 assert!(eshm.read_i32(usize::MAX).is_err());
1299 assert!(eshm.write_i32(usize::MAX, 0).is_err());
1300 assert!(eshm.copy_from_slice(&[0u8; 1], usize::MAX).is_err());
1301
1302 let (mut hshm, _) = eshm.build();
1304
1305 assert!(hshm.read::<u8>(usize::MAX).is_err());
1306 assert!(hshm.read::<u64>(usize::MAX - 3).is_err());
1307 assert!(hshm.write::<u8>(usize::MAX, 0).is_err());
1308 assert!(hshm.write::<u64>(usize::MAX - 3, 0).is_err());
1309
1310 let mut buf = [0u8; 1];
1311 assert!(hshm.copy_to_slice(&mut buf, usize::MAX).is_err());
1312 assert!(hshm.copy_from_slice(&[0u8; 1], usize::MAX).is_err());
1313
1314 assert!(hshm.fill(0, usize::MAX, 1).is_err());
1315 assert!(hshm.fill(0, 1, usize::MAX).is_err());
1316 }
1317
1318 #[test]
1319 fn copy_into_from() -> Result<()> {
1320 let mem_size: usize = 4096;
1321 let vec_len = 10;
1322 let eshm = ExclusiveSharedMemory::new(mem_size)?;
1323 let (hshm, _) = eshm.build();
1324 let vec = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
1325 hshm.copy_from_slice(&vec, 0)?;
1327
1328 let mut vec2 = vec![0; vec_len];
1329 hshm.copy_to_slice(vec2.as_mut_slice(), 0)?;
1331 assert_eq!(vec, vec2);
1332
1333 let offset = mem_size - vec.len();
1334 hshm.copy_from_slice(&vec, offset)?;
1336
1337 let mut vec3 = vec![0; vec_len];
1338 hshm.copy_to_slice(&mut vec3, offset)?;
1340 assert_eq!(vec, vec3);
1341
1342 let offset = mem_size / 2;
1343 hshm.copy_from_slice(&vec, offset)?;
1345
1346 let mut vec4 = vec![0; vec_len];
1347 hshm.copy_to_slice(&mut vec4, offset)?;
1349 assert_eq!(vec, vec4);
1350
1351 let mut vec5 = vec![0; vec_len];
1353 assert!(hshm.copy_to_slice(&mut vec5, mem_size).is_err());
1354
1355 assert!(hshm.copy_from_slice(&vec5, mem_size).is_err());
1357
1358 let mut vec6 = vec![0; vec_len];
1360 assert!(hshm.copy_to_slice(&mut vec6, mem_size * 2).is_err());
1361
1362 assert!(hshm.copy_from_slice(&vec6, mem_size * 2).is_err());
1364
1365 let mut vec7 = vec![0; mem_size * 2];
1367 assert!(hshm.copy_to_slice(&mut vec7, 0).is_err());
1368
1369 assert!(hshm.copy_from_slice(&vec7, 0).is_err());
1371
1372 Ok(())
1373 }
1374
1375 #[cfg(not(miri))]
1377 proptest! {
1378 #[test]
1379 fn read_write_i32(val in -0x1000_i32..0x1000_i32) {
1380 read_write_test_suite(
1381 val,
1382 ExclusiveSharedMemory::new,
1383 Box::new(ExclusiveSharedMemory::read_i32),
1384 Box::new(ExclusiveSharedMemory::write_i32),
1385 )
1386 .unwrap();
1387 read_write_test_suite(
1388 val,
1389 |s| {
1390 let e = ExclusiveSharedMemory::new(s)?;
1391 let (h, _) = e.build();
1392 Ok(h)
1393 },
1394 Box::new(HostSharedMemory::read::<i32>),
1395 Box::new(|h, o, v| h.write::<i32>(o, v)),
1396 )
1397 .unwrap();
1398 }
1399 }
1400
1401 #[test]
1402 fn alloc_fail() {
1403 let gm = ExclusiveSharedMemory::new(0);
1404 assert!(gm.is_err());
1405 let gm = ExclusiveSharedMemory::new(usize::MAX);
1406 assert!(gm.is_err());
1407 }
1408
1409 #[test]
1410 fn clone() {
1411 let eshm = ExclusiveSharedMemory::new(PAGE_SIZE_USIZE).unwrap();
1412 let (hshm1, _) = eshm.build();
1413 let hshm2 = hshm1.clone();
1414
1415 assert_eq!(hshm1.mem_size(), hshm2.mem_size());
1418 assert_eq!(hshm1.base_addr(), hshm2.base_addr());
1419
1420 hshm1.copy_from_slice(b"a", 0).unwrap();
1423 hshm2.copy_from_slice(b"b", 1).unwrap();
1424
1425 for (raw_offset, expected) in &[(0, b'a'), (1, b'b')] {
1428 assert_eq!(hshm1.read::<u8>(*raw_offset).unwrap(), *expected);
1429 assert_eq!(hshm2.read::<u8>(*raw_offset).unwrap(), *expected);
1430 }
1431
1432 drop(hshm1);
1435
1436 for (raw_offset, expected) in &[(0, b'a'), (1, b'b')] {
1438 assert_eq!(hshm2.read::<u8>(*raw_offset).unwrap(), *expected);
1439 }
1440 hshm2.copy_from_slice(b"c", 2).unwrap();
1441 assert_eq!(hshm2.read::<u8>(2).unwrap(), b'c');
1442 drop(hshm2);
1443 }
1444
1445 #[test]
1446 fn copy_all_to_vec() {
1447 let mut data = vec![b'a', b'b', b'c'];
1448 data.resize(4096, 0);
1449 let mut eshm = ExclusiveSharedMemory::new(data.len()).unwrap();
1450 eshm.copy_from_slice(data.as_slice(), 0).unwrap();
1451 let ret_vec = eshm.copy_all_to_vec().unwrap();
1452 assert_eq!(data, ret_vec);
1453 }
1454
1455 #[test]
1458 #[cfg(all(target_os = "linux", not(miri)))]
1459 fn test_drop() {
1460 use proc_maps::get_process_maps;
1461
1462 const UNIQUE_SIZE: usize = PAGE_SIZE_USIZE * 17;
1472
1473 let pid = std::process::id();
1474
1475 let eshm = ExclusiveSharedMemory::new(UNIQUE_SIZE).unwrap();
1476 let (hshm1, gshm) = eshm.build();
1477 let hshm2 = hshm1.clone();
1478
1479 let base_ptr = hshm1.base_ptr() as usize;
1481 let mem_size = hshm1.mem_size();
1482
1483 let has_exact_mapping = |ptr: usize, size: usize| -> bool {
1485 get_process_maps(pid.try_into().unwrap())
1486 .unwrap()
1487 .iter()
1488 .any(|m| m.start() == ptr && m.size() == size)
1489 };
1490
1491 assert!(
1493 has_exact_mapping(base_ptr, mem_size),
1494 "shared memory mapping not found at {:#x} with size {}",
1495 base_ptr,
1496 mem_size
1497 );
1498
1499 drop(hshm1);
1501 drop(hshm2);
1502 drop(gshm);
1503
1504 assert!(
1506 !has_exact_mapping(base_ptr, mem_size),
1507 "shared memory mapping still exists at {:#x} with size {} after drop",
1508 base_ptr,
1509 mem_size
1510 );
1511 }
1512
1513 mod alignment_tests {
1517 use super::*;
1518
1519 const CHUNK_SIZE: usize = size_of::<u128>();
1520
1521 #[test]
1523 fn copy_with_various_alignments() {
1524 let mem_size: usize = 4096;
1526 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1527 let (hshm, _) = eshm.build();
1528
1529 for start_offset in 0..CHUNK_SIZE {
1531 let test_len = 64; let test_data: Vec<u8> = (0..test_len).map(|i| (i + start_offset) as u8).collect();
1533
1534 hshm.copy_from_slice(&test_data, start_offset).unwrap();
1536
1537 let mut read_buf = vec![0u8; test_len];
1539 hshm.copy_to_slice(&mut read_buf, start_offset).unwrap();
1540
1541 assert_eq!(
1542 test_data, read_buf,
1543 "Mismatch at alignment offset {}",
1544 start_offset
1545 );
1546 }
1547 }
1548
1549 #[test]
1551 fn copy_small_lengths() {
1552 let mem_size: usize = 4096;
1553 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1554 let (hshm, _) = eshm.build();
1555
1556 for len in 0..CHUNK_SIZE {
1557 let test_data: Vec<u8> = (0..len).map(|i| i as u8).collect();
1558
1559 hshm.copy_from_slice(&test_data, 0).unwrap();
1560
1561 let mut read_buf = vec![0u8; len];
1562 hshm.copy_to_slice(&mut read_buf, 0).unwrap();
1563
1564 assert_eq!(test_data, read_buf, "Mismatch for length {}", len);
1565 }
1566 }
1567
1568 #[test]
1570 fn copy_non_aligned_lengths() {
1571 let mem_size: usize = 4096;
1572 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1573 let (hshm, _) = eshm.build();
1574
1575 let test_lengths = [17, 31, 33, 47, 63, 65, 100, 127, 129, 255, 257];
1577
1578 for &len in &test_lengths {
1579 let test_data: Vec<u8> = (0..len).map(|i| (i % 256) as u8).collect();
1580
1581 hshm.copy_from_slice(&test_data, 0).unwrap();
1582
1583 let mut read_buf = vec![0u8; len];
1584 hshm.copy_to_slice(&mut read_buf, 0).unwrap();
1585
1586 assert_eq!(test_data, read_buf, "Mismatch for length {}", len);
1587 }
1588 }
1589
1590 #[test]
1592 fn copy_exact_chunk_size() {
1593 let mem_size: usize = 4096;
1594 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1595 let (hshm, _) = eshm.build();
1596
1597 let test_data: Vec<u8> = (0..CHUNK_SIZE).map(|i| i as u8).collect();
1598
1599 hshm.copy_from_slice(&test_data, 0).unwrap();
1600
1601 let mut read_buf = vec![0u8; CHUNK_SIZE];
1602 hshm.copy_to_slice(&mut read_buf, 0).unwrap();
1603
1604 assert_eq!(test_data, read_buf);
1605 }
1606
1607 #[test]
1609 fn fill_with_various_alignments() {
1610 let mem_size: usize = 4096;
1611 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1612 let (mut hshm, _) = eshm.build();
1613
1614 for start_offset in 0..CHUNK_SIZE {
1615 let fill_len = 64;
1616 let fill_value = (start_offset % 256) as u8;
1617
1618 hshm.fill(0, 0, mem_size).unwrap();
1620
1621 hshm.fill(fill_value, start_offset, fill_len).unwrap();
1623
1624 let mut read_buf = vec![0u8; fill_len];
1626 hshm.copy_to_slice(&mut read_buf, start_offset).unwrap();
1627
1628 assert!(
1629 read_buf.iter().all(|&b| b == fill_value),
1630 "Fill mismatch at alignment offset {}",
1631 start_offset
1632 );
1633 }
1634 }
1635
1636 #[test]
1638 fn fill_small_lengths() {
1639 let mem_size: usize = 4096;
1640 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1641 let (mut hshm, _) = eshm.build();
1642
1643 for len in 0..CHUNK_SIZE {
1644 let fill_value = 0xAB;
1645
1646 hshm.fill(0, 0, mem_size).unwrap(); hshm.fill(fill_value, 0, len).unwrap();
1648
1649 let mut read_buf = vec![0u8; len];
1650 hshm.copy_to_slice(&mut read_buf, 0).unwrap();
1651
1652 assert!(
1653 read_buf.iter().all(|&b| b == fill_value),
1654 "Fill mismatch for length {}",
1655 len
1656 );
1657 }
1658 }
1659
1660 #[test]
1662 fn fill_non_aligned_lengths() {
1663 let mem_size: usize = 4096;
1664 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1665 let (mut hshm, _) = eshm.build();
1666
1667 let test_lengths = [17, 31, 33, 47, 63, 65, 100, 127, 129, 255, 257];
1668
1669 for &len in &test_lengths {
1670 let fill_value = 0xCD;
1671
1672 hshm.fill(0, 0, mem_size).unwrap(); hshm.fill(fill_value, 0, len).unwrap();
1674
1675 let mut read_buf = vec![0u8; len];
1676 hshm.copy_to_slice(&mut read_buf, 0).unwrap();
1677
1678 assert!(
1679 read_buf.iter().all(|&b| b == fill_value),
1680 "Fill mismatch for length {}",
1681 len
1682 );
1683 }
1684 }
1685
1686 #[test]
1688 fn copy_edge_cases() {
1689 let mem_size: usize = 4096;
1690 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1691 let (hshm, _) = eshm.build();
1692
1693 let empty: Vec<u8> = vec![];
1695 hshm.copy_from_slice(&empty, 0).unwrap();
1696 let mut read_buf: Vec<u8> = vec![];
1697 hshm.copy_to_slice(&mut read_buf, 0).unwrap();
1698 assert!(read_buf.is_empty());
1699
1700 let single = vec![0x42u8];
1702 hshm.copy_from_slice(&single, 0).unwrap();
1703 let mut read_buf = vec![0u8; 1];
1704 hshm.copy_to_slice(&mut read_buf, 0).unwrap();
1705 assert_eq!(single, read_buf);
1706 }
1707
1708 #[test]
1710 fn copy_unaligned_start_and_length() {
1711 let mem_size: usize = 4096;
1712 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1713 let (hshm, _) = eshm.build();
1714
1715 let start_offset = 7;
1717 let len = 37;
1718 let test_data: Vec<u8> = (0..len).map(|i| (i * 3) as u8).collect();
1719
1720 hshm.copy_from_slice(&test_data, start_offset).unwrap();
1721
1722 let mut read_buf = vec![0u8; len];
1723 hshm.copy_to_slice(&mut read_buf, start_offset).unwrap();
1724
1725 assert_eq!(test_data, read_buf);
1726 }
1727 }
1728
1729 mod try_pop_buffer_bounds {
1731 use super::*;
1732
1733 #[derive(Debug, PartialEq)]
1734 struct RawBytes(Vec<u8>);
1735
1736 impl TryFrom<&[u8]> for RawBytes {
1737 type Error = String;
1738 fn try_from(value: &[u8]) -> std::result::Result<Self, Self::Error> {
1739 Ok(RawBytes(value.to_vec()))
1740 }
1741 }
1742
1743 fn make_buffer(mem_size: usize) -> super::super::HostSharedMemory {
1745 let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1746 let (hshm, _) = eshm.build();
1747 hshm.write::<u64>(0, 8u64).unwrap();
1748 hshm
1749 }
1750
1751 #[test]
1752 fn normal_push_pop_roundtrip() {
1753 let mem_size = 4096;
1754 let mut hshm = make_buffer(mem_size);
1755
1756 let payload = b"hello";
1758 let mut data = Vec::new();
1759 data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
1760 data.extend_from_slice(payload);
1761
1762 hshm.push_buffer(0, mem_size, &data).unwrap();
1763 let result: RawBytes = hshm.try_pop_buffer_into(0, mem_size).unwrap();
1764 assert_eq!(result.0, data);
1765 }
1766
1767 #[test]
1768 fn malicious_flatbuffer_size_prefix() {
1769 let mem_size = 4096;
1770 let mut hshm = make_buffer(mem_size);
1771
1772 let payload = b"small";
1773 let mut data = Vec::new();
1774 data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
1775 data.extend_from_slice(payload);
1776 hshm.push_buffer(0, mem_size, &data).unwrap();
1777
1778 hshm.write::<u32>(8, 0xFFFF_FFFBu32).unwrap(); let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
1782 let err_msg = format!("{}", result.unwrap_err());
1783 assert!(
1784 err_msg.contains("Corrupt buffer size prefix: flatbuffer claims 4294967295 bytes but the element slot is only 9 bytes"),
1785 "Unexpected error message: {}",
1786 err_msg
1787 );
1788 }
1789
1790 #[test]
1791 fn malicious_element_offset_too_small() {
1792 let mem_size = 4096;
1793 let mut hshm = make_buffer(mem_size);
1794
1795 let payload = b"test";
1796 let mut data = Vec::new();
1797 data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
1798 data.extend_from_slice(payload);
1799 hshm.push_buffer(0, mem_size, &data).unwrap();
1800
1801 hshm.write::<u64>(16, 0u64).unwrap();
1803
1804 let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
1805 let err_msg = format!("{}", result.unwrap_err());
1806 assert!(
1807 err_msg.contains(
1808 "Corrupt buffer back-pointer: element offset 0 is outside valid range [8, 8]"
1809 ),
1810 "Unexpected error message: {}",
1811 err_msg
1812 );
1813 }
1814
1815 #[test]
1816 fn malicious_element_offset_past_stack_pointer() {
1817 let mem_size = 4096;
1818 let mut hshm = make_buffer(mem_size);
1819
1820 let payload = b"test";
1821 let mut data = Vec::new();
1822 data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
1823 data.extend_from_slice(payload);
1824 hshm.push_buffer(0, mem_size, &data).unwrap();
1825
1826 hshm.write::<u64>(16, 9999u64).unwrap();
1828
1829 let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
1830 let err_msg = format!("{}", result.unwrap_err());
1831 assert!(
1832 err_msg.contains(
1833 "Corrupt buffer back-pointer: element offset 9999 is outside valid range [8, 8]"
1834 ),
1835 "Unexpected error message: {}",
1836 err_msg
1837 );
1838 }
1839
1840 #[test]
1841 fn malicious_flatbuffer_size_off_by_one() {
1842 let mem_size = 4096;
1843 let mut hshm = make_buffer(mem_size);
1844
1845 let payload = b"abcd";
1846 let mut data = Vec::new();
1847 data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
1848 data.extend_from_slice(payload);
1849 hshm.push_buffer(0, mem_size, &data).unwrap();
1850
1851 hshm.write::<u32>(8, 5u32).unwrap(); let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
1855 let err_msg = format!("{}", result.unwrap_err());
1856 assert!(
1857 err_msg.contains("Corrupt buffer size prefix: flatbuffer claims 9 bytes but the element slot is only 8 bytes"),
1858 "Unexpected error message: {}",
1859 err_msg
1860 );
1861 }
1862
1863 #[test]
1866 fn back_pointer_near_stack_pointer_underflow() {
1867 let mem_size = 4096;
1868 let mut hshm = make_buffer(mem_size);
1869
1870 let payload = b"test";
1871 let mut data = Vec::new();
1872 data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
1873 data.extend_from_slice(payload);
1874 hshm.push_buffer(0, mem_size, &data).unwrap();
1875
1876 hshm.write::<u64>(16, 23u64).unwrap();
1878
1879 let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
1880 let err_msg = format!("{}", result.unwrap_err());
1881 assert!(
1882 err_msg.contains(
1883 "Corrupt buffer back-pointer: element offset 23 is outside valid range [8, 8]"
1884 ),
1885 "Unexpected error message: {}",
1886 err_msg
1887 );
1888 }
1889
1890 #[test]
1892 fn size_prefix_u32_overflow() {
1893 let mem_size = 4096;
1894 let mut hshm = make_buffer(mem_size);
1895
1896 let payload = b"test";
1897 let mut data = Vec::new();
1898 data.extend_from_slice(&(payload.len() as u32).to_le_bytes());
1899 data.extend_from_slice(payload);
1900 hshm.push_buffer(0, mem_size, &data).unwrap();
1901
1902 hshm.write::<u32>(8, 0xFFFF_FFFDu32).unwrap();
1904
1905 let result: Result<RawBytes> = hshm.try_pop_buffer_into(0, mem_size);
1906 let err_msg = format!("{}", result.unwrap_err());
1907 assert!(
1908 err_msg.contains("Corrupt buffer size prefix: value 4294967293 overflows when adding 4-byte header"),
1909 "Unexpected error message: {}",
1910 err_msg
1911 );
1912 }
1913 }
1914
1915 #[cfg(target_os = "linux")]
1916 mod guard_page_crash_test {
1917 use crate::mem::shared_mem::{ExclusiveSharedMemory, SharedMemory};
1918
1919 const TEST_EXIT_CODE: u8 = 211; fn setup_signal_handler() {
1925 unsafe {
1926 signal_hook_registry::register_signal_unchecked(libc::SIGSEGV, || {
1927 std::process::exit(TEST_EXIT_CODE.into());
1928 })
1929 .unwrap();
1930 }
1931 }
1932
1933 #[test]
1934 #[ignore] fn read() {
1936 setup_signal_handler();
1937
1938 let eshm = ExclusiveSharedMemory::new(4096).unwrap();
1939 let (hshm, _) = eshm.build();
1940 let guard_page_ptr = hshm.raw_ptr();
1941 unsafe { std::ptr::read_volatile(guard_page_ptr) };
1942 }
1943
1944 #[test]
1945 #[ignore] fn write() {
1947 setup_signal_handler();
1948
1949 let eshm = ExclusiveSharedMemory::new(4096).unwrap();
1950 let (hshm, _) = eshm.build();
1951 let guard_page_ptr = hshm.raw_ptr();
1952 unsafe { std::ptr::write_volatile(guard_page_ptr, 0u8) };
1953 }
1954
1955 #[test]
1956 #[ignore] fn exec() {
1958 setup_signal_handler();
1959
1960 let eshm = ExclusiveSharedMemory::new(4096).unwrap();
1961 let (hshm, _) = eshm.build();
1962 let guard_page_ptr = hshm.raw_ptr();
1963 let func: fn() = unsafe { std::mem::transmute(guard_page_ptr) };
1964 func();
1965 }
1966
1967 #[test]
1969 #[cfg_attr(miri, ignore)] fn guard_page_testing_shim() {
1971 let tests = vec!["read", "write", "exec"];
1972 for test in tests {
1973 let triple = std::env::var("TARGET_TRIPLE").ok();
1974 let target_args = if let Some(triple) = triple.filter(|t| !t.is_empty()) {
1975 vec!["--target".to_string(), triple.to_string()]
1976 } else {
1977 vec![]
1978 };
1979 let output = std::process::Command::new("cargo")
1980 .args(["test", "-p", "hyperlight-host", "--lib"])
1981 .args(target_args)
1982 .args(["--", "--ignored", test])
1983 .stdin(std::process::Stdio::null())
1984 .output()
1985 .expect("Unable to launch tests");
1986 let exit_code = output.status.code();
1987 if exit_code != Some(TEST_EXIT_CODE.into()) {
1988 eprintln!("=== Guard Page test '{}' failed ===", test);
1989 eprintln!("Exit code: {:?} (expected {})", exit_code, TEST_EXIT_CODE);
1990 eprintln!("=== STDOUT ===");
1991 eprintln!("{}", String::from_utf8_lossy(&output.stdout));
1992 eprintln!("=== STDERR ===");
1993 eprintln!("{}", String::from_utf8_lossy(&output.stderr));
1994 panic!(
1995 "Guard Page test failed: {} (exit code {:?}, expected {})",
1996 test, exit_code, TEST_EXIT_CODE
1997 );
1998 }
1999 }
2000 }
2001 }
2002}
2003
2004#[derive(Clone, Debug)]
2009pub struct ReadonlySharedMemory {
2010 region: Arc<HostMapping>,
2011 #[cfg_attr(unshared_snapshot_mem, allow(dead_code))]
2014 guest_mapped_size: Option<usize>,
2015}
2016unsafe impl Send for ReadonlySharedMemory {}
2027unsafe impl Sync for ReadonlySharedMemory {}
2028
2029impl ReadonlySharedMemory {
2030 pub(crate) fn from_bytes(contents: &[u8]) -> Result<Self> {
2031 let mut anon = ExclusiveSharedMemory::new(contents.len())?;
2032 anon.copy_from_slice(contents, 0)?;
2033 Ok(ReadonlySharedMemory {
2034 region: anon.region,
2035 guest_mapped_size: None,
2036 })
2037 }
2038
2039 pub(crate) fn from_bytes_with_mapped_size(
2040 contents: &[u8],
2041 guest_mapped_size: usize,
2042 ) -> Result<Self> {
2043 let mut anon = ExclusiveSharedMemory::new(contents.len())?;
2044 anon.copy_from_slice(contents, 0)?;
2045 Ok(ReadonlySharedMemory {
2046 region: anon.region,
2047 guest_mapped_size: Some(guest_mapped_size),
2048 })
2049 }
2050
2051 #[cfg(not(unshared_snapshot_mem))]
2054 pub(crate) fn guest_mapped_size(&self) -> usize {
2055 self.guest_mapped_size.unwrap_or_else(|| self.mem_size())
2056 }
2057
2058 pub(crate) fn as_slice(&self) -> &[u8] {
2059 unsafe { std::slice::from_raw_parts(self.base_ptr(), self.mem_size()) }
2060 }
2061
2062 #[cfg(unshared_snapshot_mem)]
2063 pub(crate) fn copy_to_writable(&self) -> Result<ExclusiveSharedMemory> {
2064 let mut writable = ExclusiveSharedMemory::new(self.mem_size())?;
2065 writable.copy_from_slice(self.as_slice(), 0)?;
2066 Ok(writable)
2067 }
2068
2069 #[cfg(not(unshared_snapshot_mem))]
2070 pub(crate) fn build(self) -> (Self, Self) {
2071 (self.clone(), self)
2072 }
2073
2074 #[cfg(not(unshared_snapshot_mem))]
2075 pub(crate) fn mapping_at(
2076 &self,
2077 guest_base: u64,
2078 region_type: MemoryRegionType,
2079 ) -> MemoryRegion {
2080 #[allow(clippy::panic)]
2081 if region_type != MemoryRegionType::Snapshot {
2085 panic!("ReadonlySharedMemory::mapping_at should only be used for Snapshot regions");
2086 }
2087 mapping_at(
2088 self,
2089 guest_base,
2090 self.guest_mapped_size(),
2091 region_type,
2092 MemoryRegionFlags::READ | MemoryRegionFlags::EXECUTE,
2093 )
2094 }
2095}
2096
2097impl SharedMemory for ReadonlySharedMemory {
2098 fn region(&self) -> &HostMapping {
2099 &self.region
2100 }
2101 fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
2104 &mut self,
2105 _: F,
2106 ) -> Result<T> {
2107 Err(new_error!(
2108 "Cannot take exclusive access to a ReadonlySharedMemory"
2109 ))
2110 }
2111 fn with_contents<T, F: FnOnce(&[u8]) -> T>(&mut self, f: F) -> Result<T> {
2113 Ok(f(self.as_slice()))
2114 }
2115}
2116
2117impl<S: SharedMemory> PartialEq<S> for ReadonlySharedMemory {
2118 fn eq(&self, other: &S) -> bool {
2119 self.raw_ptr() == other.raw_ptr()
2120 }
2121}