hyperlight_host/mem/
shared_mem.rs

1/*
2Copyright 2025  The Hyperlight Authors.
3
4Licensed under the Apache License, Version 2.0 (the "License");
5you may not use this file except in compliance with the License.
6You may obtain a copy of the License at
7
8    http://www.apache.org/licenses/LICENSE-2.0
9
10Unless required by applicable law or agreed to in writing, software
11distributed under the License is distributed on an "AS IS" BASIS,
12WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13See the License for the specific language governing permissions and
14limitations under the License.
15*/
16
17use std::any::type_name;
18use std::ffi::c_void;
19use std::io::Error;
20#[cfg(target_os = "linux")]
21use std::ptr::null_mut;
22use std::sync::{Arc, RwLock};
23
24use hyperlight_common::mem::PAGE_SIZE_USIZE;
25use tracing::{Span, instrument};
26#[cfg(target_os = "windows")]
27use windows::Win32::Foundation::{CloseHandle, HANDLE, INVALID_HANDLE_VALUE};
28#[cfg(target_os = "windows")]
29use windows::Win32::System::Memory::PAGE_READWRITE;
30#[cfg(target_os = "windows")]
31use windows::Win32::System::Memory::{
32    CreateFileMappingA, FILE_MAP_ALL_ACCESS, MEMORY_MAPPED_VIEW_ADDRESS, MapViewOfFile,
33    PAGE_NOACCESS, PAGE_PROTECTION_FLAGS, UnmapViewOfFile, VirtualProtect,
34};
35#[cfg(target_os = "windows")]
36use windows::core::PCSTR;
37
38#[cfg(target_os = "windows")]
39use crate::HyperlightError::MemoryAllocationFailed;
40#[cfg(target_os = "windows")]
41use crate::HyperlightError::{MemoryRequestTooBig, WindowsAPIError};
42use crate::{Result, log_then_return, new_error};
43
44/// Makes sure that the given `offset` and `size` are within the bounds of the memory with size `mem_size`.
45macro_rules! bounds_check {
46    ($offset:expr, $size:expr, $mem_size:expr) => {
47        if $offset + $size > $mem_size {
48            return Err(new_error!(
49                "Cannot read value from offset {} with size {} in memory of size {}",
50                $offset,
51                $size,
52                $mem_size
53            ));
54        }
55    };
56}
57
58/// generates a reader function for the given type
59macro_rules! generate_reader {
60    ($fname:ident, $ty:ty) => {
61        /// Read a value of type `$ty` from the memory at the given offset.
62        #[allow(dead_code)]
63        #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
64        pub(crate) fn $fname(&self, offset: usize) -> Result<$ty> {
65            let data = self.as_slice();
66            bounds_check!(offset, std::mem::size_of::<$ty>(), data.len());
67            Ok(<$ty>::from_le_bytes(
68                data[offset..offset + std::mem::size_of::<$ty>()].try_into()?,
69            ))
70        }
71    };
72}
73
74/// generates a writer function for the given type
75macro_rules! generate_writer {
76    ($fname:ident, $ty:ty) => {
77        /// Write a value of type `$ty` to the memory at the given offset.
78        #[allow(dead_code)]
79        pub(crate) fn $fname(&mut self, offset: usize, value: $ty) -> Result<()> {
80            let data = self.as_mut_slice();
81            bounds_check!(offset, std::mem::size_of::<$ty>(), data.len());
82            data[offset..offset + std::mem::size_of::<$ty>()].copy_from_slice(&value.to_le_bytes());
83            Ok(())
84        }
85    };
86}
87
88/// A representation of a host mapping of a shared memory region,
89/// which will be released when this structure is Drop'd. This is not
90/// individually Clone (since it holds ownership of the mapping), or
91/// Send or Sync, since it doesn't ensure any particular synchronization.
92#[derive(Debug)]
93pub struct HostMapping {
94    ptr: *mut u8,
95    size: usize,
96    #[cfg(target_os = "windows")]
97    handle: HANDLE,
98}
99
100impl Drop for HostMapping {
101    #[cfg(target_os = "linux")]
102    fn drop(&mut self) {
103        use libc::munmap;
104
105        unsafe {
106            munmap(self.ptr as *mut c_void, self.size);
107        }
108    }
109    #[cfg(target_os = "windows")]
110    fn drop(&mut self) {
111        let mem_mapped_address = MEMORY_MAPPED_VIEW_ADDRESS {
112            Value: self.ptr as *mut c_void,
113        };
114        if let Err(e) = unsafe { UnmapViewOfFile(mem_mapped_address) } {
115            tracing::error!(
116                "Failed to drop HostMapping (UnmapViewOfFile failed): {:?}",
117                e
118            );
119        }
120
121        let file_handle: HANDLE = self.handle;
122        if let Err(e) = unsafe { CloseHandle(file_handle) } {
123            tracing::error!("Failed to  drop HostMapping (CloseHandle failed): {:?}", e);
124        }
125    }
126}
127
128/// These three structures represent various phases of the lifecycle of
129/// a memory buffer that is shared with the guest. An
130/// ExclusiveSharedMemory is used for certain operations that
131/// unrestrictedly write to the shared memory, including setting it up
132/// and taking snapshots.
133#[derive(Debug)]
134pub struct ExclusiveSharedMemory {
135    region: Arc<HostMapping>,
136}
137unsafe impl Send for ExclusiveSharedMemory {}
138
139/// A GuestSharedMemory is used to represent
140/// the reference to all-of-memory that is taken by the virtual cpu.
141/// Because of the memory model limitations that affect
142/// HostSharedMemory, it is likely fairly important (to ensure that
143/// our UB remains limited to interaction with an external compilation
144/// unit that likely can't be discovered by the compiler) that _rust_
145/// users do not perform racy accesses to the guest communication
146/// buffers that are also accessed by HostSharedMemory.
147#[derive(Debug)]
148pub struct GuestSharedMemory {
149    region: Arc<HostMapping>,
150    /// The lock that indicates this shared memory is being used by non-Rust code
151    ///
152    /// This lock _must_ be held whenever the guest is executing,
153    /// because it prevents the host from converting its
154    /// HostSharedMemory to an ExclusiveSharedMemory. Since the guest
155    /// may arbitrarily mutate the shared memory, only synchronized
156    /// accesses from Rust should be allowed!
157    ///
158    /// We cannot enforce this in the type system, because the memory
159    /// is mapped in to the VM at VM creation time.
160    pub lock: Arc<RwLock<()>>,
161}
162unsafe impl Send for GuestSharedMemory {}
163
164/// A HostSharedMemory allows synchronized accesses to guest
165/// communication buffers, allowing it to be used concurrently with a
166/// GuestSharedMemory.
167///
168/// Given future requirements for asynchronous I/O with a minimum
169/// amount of copying (e.g. WASIp3 streams), we would like it to be
170/// possible to safely access these buffers concurrently with the
171/// guest, ensuring that (1) data is read appropriately if the guest
172/// is well-behaved; and (2) the host's behaviour is defined
173/// regardless of whether or not the guest is well-behaved.
174///
175/// The ideal (future) flow for a guest->host message is something like
176///   - Guest writes (unordered) bytes describing a work item into a buffer
177///   - Guest reveals buffer via a release-store of a pointer into an
178///     MMIO ring-buffer
179///   - Host acquire-loads the buffer pointer from the "MMIO" ring
180///     buffer
181///   - Host (unordered) reads the bytes from the buffer
182///   - Host performs validation of those bytes and uses them
183///
184/// Unfortunately, there appears to be no way to do this with defined
185/// behaviour in present Rust (see
186/// e.g. <https://github.com/rust-lang/unsafe-code-guidelines/issues/152>).
187/// Rust does not yet have its own defined memory model, but in the
188/// interim, it is widely treated as inheriting the current C/C++
189/// memory models.  The most immediate problem is that regardless of
190/// anything else, under those memory models \[1, p. 17-18; 2, p. 88\],
191///
192///   > The execution of a program contains a _data race_ if it
193///   > contains two [C++23: "potentially concurrent"] conflicting
194///   > actions [C23: "in different threads"], at least one of which
195///   > is not atomic, and neither happens before the other [C++23: ",
196///   > except for the special case for signal handlers described
197///   > below"].  Any such data race results in undefined behavior.
198///
199/// Consequently, if a misbehaving guest fails to correctly
200/// synchronize its stores with the host, the host's innocent loads
201/// will trigger undefined behaviour for the entire program, including
202/// the host.  Note that this also applies if the guest makes an
203/// unsynchronized read of a location that the host is writing!
204///
205/// Despite Rust's de jure inheritance of the C memory model at the
206/// present time, the compiler in many cases de facto adheres to LLVM
207/// semantics, so it is worthwhile to consider what LLVM does in this
208/// case as well.  According to the the LangRef \[3\] memory model,
209/// loads which are involved in a race that includes at least one
210/// non-atomic access (whether the load or a store) return `undef`,
211/// making them roughly equivalent to reading uninitialized
212/// memory. While this is much better, it is still bad.
213///
214/// Considering a different direction, recent C++ papers have seemed
215/// to lean towards using `volatile` for similar use cases. For
216/// example, in P1152R0 \[4\], JF Bastien notes that
217///
218///   > We’ve shown that volatile is purposely defined to denote
219///   > external modifications. This happens for:
220///   >   - Shared memory with untrusted code, where volatile is the
221///   >     right way to avoid time-of-check time-of-use (ToCToU)
222///   >     races which lead to security bugs such as \[PWN2OWN\] and
223///   >     \[XENXSA155\].
224///
225/// Unfortunately, although this paper was adopted for C++20 (and,
226/// sadly, mostly un-adopted for C++23, although that does not concern
227/// us), the paper did not actually redefine volatile accesses or data
228/// races to prevent volatile accesses from racing with other accesses
229/// and causing undefined behaviour.  P1382R1 \[5\] would have amended
230/// the wording of the data race definition to specifically exclude
231/// volatile, but, unfortunately, despite receiving a
232/// generally-positive reception at its first WG21 meeting more than
233/// five years ago, it has not progressed.
234///
235/// Separately from the data race issue, there is also a concern that
236/// according to the various memory models in use, there may be ways
237/// in which the guest can semantically obtain uninitialized memory
238/// and write it into the shared buffer, which may also result in
239/// undefined behaviour on reads.  The degree to which this is a
240/// concern is unclear, however, since it is unclear to what degree
241/// the Rust abstract machine's conception of uninitialized memory
242/// applies to the sandbox.  Returning briefly to the LLVM level,
243/// rather than the Rust level, this, combined with the fact that
244/// racing loads in LLVM return `undef`, as discussed above, we would
245/// ideally `llvm.freeze` the result of any load out of the sandbox.
246///
247/// It would furthermore be ideal if we could run the flatbuffers
248/// parsing code directly on the guest memory, in order to avoid
249/// unnecessary copies.  That is unfortunately probably not viable at
250/// the present time: because the generated flatbuffers parsing code
251/// doesn't use atomic or volatile accesses, it is likely to introduce
252/// double-read vulnerabilities.
253///
254/// In short, none of the Rust-level operations available to us do the
255/// right thing, at the Rust spec level or the LLVM spec level. Our
256/// major remaining options are therefore:
257///   - Choose one of the options that is available to us, and accept
258///     that we are doing something unsound according to the spec, but
259///     hope that no reasonable compiler could possibly notice.
260///   - Use inline assembly per architecture, for which we would only
261///     need to worry about the _architecture_'s memory model (which
262///     is far less demanding).
263///
264/// The leading candidate for the first option would seem to be to
265/// simply use volatile accesses; there seems to be wide agreement
266/// that this _should_ be a valid use case for them (even if it isn't
267/// now), and projects like Linux and rust-vmm already use C11
268/// `volatile` for this purpose.  It is also worth noting that because
269/// we still do need to synchronize with the guest when it _is_ being
270/// well-behaved, we would ideally use volatile acquire loads and
271/// volatile release stores for interacting with the stack pointer in
272/// the guest in this case.  Unfortunately, while those operations are
273/// defined in LLVM, they are not presently exposed to Rust. While
274/// atomic fences that are not associated with memory accesses
275/// ([`std::sync::atomic::fence`]) might at first glance seem to help with
276/// this problem, they unfortunately do not \[6\]:
277///
278///    > A fence ‘A’ which has (at least) Release ordering semantics,
279///    > synchronizes with a fence ‘B’ with (at least) Acquire
280///    > semantics, if and only if there exist operations X and Y,
281///    > both operating on some atomic object ‘M’ such that A is
282///    > sequenced before X, Y is sequenced before B and Y observes
283///    > the change to M. This provides a happens-before dependence
284///    > between A and B.
285///
286/// Note that the X and Y must be to an _atomic_ object.
287///
288/// We consequently assume that there has been a strong architectural
289/// fence on a vmenter/vmexit between data being read and written.
290/// This is unsafe (not guaranteed in the type system)!
291///
292/// \[1\] N3047 C23 Working Draft. <https://www.open-std.org/jtc1/sc22/wg14/www/docs/n3047.pdf>
293/// \[2\] N4950 C++23 Working Draft. <https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2023/n4950.pdf>
294/// \[3\] LLVM Language Reference Manual, Memory Model for Concurrent Operations. <https://llvm.org/docs/LangRef.html#memmodel>
295/// \[4\] P1152R0: Deprecating `volatile`. JF Bastien. <https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p1152r0.html>
296/// \[5\] P1382R1: `volatile_load<T>` and `volatile_store<T>`. JF Bastien, Paul McKenney, Jeffrey Yasskin, and the indefatigable TBD. <https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p1382r1.pdf>
297/// \[6\] Documentation for std::sync::atomic::fence. <https://doc.rust-lang.org/std/sync/atomic/fn.fence.html>
298#[derive(Clone, Debug)]
299pub struct HostSharedMemory {
300    region: Arc<HostMapping>,
301    lock: Arc<RwLock<()>>,
302}
303unsafe impl Send for HostSharedMemory {}
304
305impl ExclusiveSharedMemory {
306    /// Create a new region of shared memory with the given minimum
307    /// size in bytes. The region will be surrounded by guard pages.
308    ///
309    /// Return `Err` if shared memory could not be allocated.
310    #[cfg(target_os = "linux")]
311    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
312    pub fn new(min_size_bytes: usize) -> Result<Self> {
313        use libc::{
314            MAP_ANONYMOUS, MAP_FAILED, MAP_NORESERVE, MAP_SHARED, PROT_NONE, PROT_READ, PROT_WRITE,
315            c_int, mmap, mprotect, off_t, size_t,
316        };
317
318        use crate::error::HyperlightError::{MemoryRequestTooBig, MmapFailed, MprotectFailed};
319
320        if min_size_bytes == 0 {
321            return Err(new_error!("Cannot create shared memory with size 0"));
322        }
323
324        let total_size = min_size_bytes
325            .checked_add(2 * PAGE_SIZE_USIZE) // guard page around the memory
326            .ok_or_else(|| new_error!("Memory required for sandbox exceeded usize::MAX"))?;
327
328        if total_size % PAGE_SIZE_USIZE != 0 {
329            return Err(new_error!(
330                "shared memory must be a multiple of {}",
331                PAGE_SIZE_USIZE
332            ));
333        }
334
335        // usize and isize are guaranteed to be the same size, and
336        // isize::MAX should be positive, so this cast should be safe.
337        if total_size > isize::MAX as usize {
338            return Err(MemoryRequestTooBig(total_size, isize::MAX as usize));
339        }
340
341        // allocate the memory
342        let addr = unsafe {
343            mmap(
344                null_mut(),
345                total_size as size_t,
346                PROT_READ | PROT_WRITE,
347                MAP_ANONYMOUS | MAP_SHARED | MAP_NORESERVE,
348                -1 as c_int,
349                0 as off_t,
350            )
351        };
352        if addr == MAP_FAILED {
353            log_then_return!(MmapFailed(Error::last_os_error().raw_os_error()));
354        }
355
356        // protect the guard pages
357
358        let res = unsafe { mprotect(addr, PAGE_SIZE_USIZE, PROT_NONE) };
359        if res != 0 {
360            return Err(MprotectFailed(Error::last_os_error().raw_os_error()));
361        }
362        let res = unsafe {
363            mprotect(
364                (addr as *const u8).add(total_size - PAGE_SIZE_USIZE) as *mut c_void,
365                PAGE_SIZE_USIZE,
366                PROT_NONE,
367            )
368        };
369        if res != 0 {
370            return Err(MprotectFailed(Error::last_os_error().raw_os_error()));
371        }
372
373        Ok(Self {
374            // HostMapping is only non-Send/Sync because raw pointers
375            // are not ("as a lint", as the Rust docs say). We don't
376            // want to mark HostMapping Send/Sync immediately, because
377            // that could socially imply that it's "safe" to use
378            // unsafe accesses from multiple threads at once. Instead, we
379            // directly impl Send and Sync on this type. Since this
380            // type does have Send and Sync manually impl'd, the Arc
381            // is not pointless as the lint suggests.
382            #[allow(clippy::arc_with_non_send_sync)]
383            region: Arc::new(HostMapping {
384                ptr: addr as *mut u8,
385                size: total_size,
386            }),
387        })
388    }
389
390    /// Create a new region of shared memory with the given minimum
391    /// size in bytes. The region will be surrounded by guard pages.
392    ///
393    /// Return `Err` if shared memory could not be allocated.
394    #[cfg(target_os = "windows")]
395    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
396    pub fn new(min_size_bytes: usize) -> Result<Self> {
397        if min_size_bytes == 0 {
398            return Err(new_error!("Cannot create shared memory with size 0"));
399        }
400
401        let total_size = min_size_bytes
402            .checked_add(2 * PAGE_SIZE_USIZE)
403            .ok_or_else(|| new_error!("Memory required for sandbox exceeded {}", usize::MAX))?;
404
405        if total_size % PAGE_SIZE_USIZE != 0 {
406            return Err(new_error!(
407                "shared memory must be a multiple of {}",
408                PAGE_SIZE_USIZE
409            ));
410        }
411
412        // usize and isize are guaranteed to be the same size, and
413        // isize::MAX should be positive, so this cast should be safe.
414        if total_size > isize::MAX as usize {
415            return Err(MemoryRequestTooBig(total_size, isize::MAX as usize));
416        }
417
418        let mut dwmaximumsizehigh = 0;
419        let mut dwmaximumsizelow = 0;
420
421        if std::mem::size_of::<usize>() == 8 {
422            dwmaximumsizehigh = (total_size >> 32) as u32;
423            dwmaximumsizelow = (total_size & 0xFFFFFFFF) as u32;
424        }
425
426        // Allocate the memory use CreateFileMapping instead of VirtualAlloc
427        // This allows us to map the memory into the surrogate process using MapViewOfFile2
428
429        let flags = PAGE_READWRITE;
430
431        let handle = unsafe {
432            CreateFileMappingA(
433                INVALID_HANDLE_VALUE,
434                None,
435                flags,
436                dwmaximumsizehigh,
437                dwmaximumsizelow,
438                PCSTR::null(),
439            )?
440        };
441
442        if handle.is_invalid() {
443            log_then_return!(MemoryAllocationFailed(
444                Error::last_os_error().raw_os_error()
445            ));
446        }
447
448        let file_map = FILE_MAP_ALL_ACCESS;
449        let addr = unsafe { MapViewOfFile(handle, file_map, 0, 0, 0) };
450
451        if addr.Value.is_null() {
452            log_then_return!(MemoryAllocationFailed(
453                Error::last_os_error().raw_os_error()
454            ));
455        }
456
457        // Set the first and last pages to be guard pages
458
459        let mut unused_out_old_prot_flags = PAGE_PROTECTION_FLAGS(0);
460
461        // If the following calls to VirtualProtect are changed make sure to update the calls to VirtualProtectEx in surrogate_process_manager.rs
462
463        let first_guard_page_start = addr.Value;
464        if let Err(e) = unsafe {
465            VirtualProtect(
466                first_guard_page_start,
467                PAGE_SIZE_USIZE,
468                PAGE_NOACCESS,
469                &mut unused_out_old_prot_flags,
470            )
471        } {
472            log_then_return!(WindowsAPIError(e.clone()));
473        }
474
475        let last_guard_page_start = unsafe { addr.Value.add(total_size - PAGE_SIZE_USIZE) };
476        if let Err(e) = unsafe {
477            VirtualProtect(
478                last_guard_page_start,
479                PAGE_SIZE_USIZE,
480                PAGE_NOACCESS,
481                &mut unused_out_old_prot_flags,
482            )
483        } {
484            log_then_return!(WindowsAPIError(e.clone()));
485        }
486
487        Ok(Self {
488            // HostMapping is only non-Send/Sync because raw pointers
489            // are not ("as a lint", as the Rust docs say). We don't
490            // want to mark HostMapping Send/Sync immediately, because
491            // that could socially imply that it's "safe" to use
492            // unsafe accesses from multiple threads at once. Instead, we
493            // directly impl Send and Sync on this type. Since this
494            // type does have Send and Sync manually impl'd, the Arc
495            // is not pointless as the lint suggests.
496            #[allow(clippy::arc_with_non_send_sync)]
497            region: Arc::new(HostMapping {
498                ptr: addr.Value as *mut u8,
499                size: total_size,
500                handle,
501            }),
502        })
503    }
504
505    /// Internal helper method to get the backing memory as a mutable slice.
506    ///
507    /// # Safety
508    /// As per std::slice::from_raw_parts_mut:
509    /// - self.base_addr() must be valid for both reads and writes for
510    ///   self.mem_size() * mem::size_of::<u8>() many bytes, and it
511    ///   must be properly aligned.
512    ///
513    ///   The rules on validity are still somewhat unspecified, but we
514    ///   assume that the result of our calls to mmap/CreateFileMappings may
515    ///   be considered a single "allocated object". The use of
516    ///   non-atomic accesses is alright from a Safe Rust standpoint,
517    ///   because SharedMemoryBuilder is  not Sync.
518    /// - self.base_addr() must point to self.mem_size() consecutive
519    ///   properly initialized values of type u8
520    ///
521    ///   Again, the exact provenance restrictions on what is
522    ///   considered to be initialized values are unclear, but we make
523    ///   sure to use mmap(MAP_ANONYMOUS) and
524    ///   CreateFileMapping(SEC_COMMIT), so the pages in question are
525    ///   zero-initialized, which we hope counts for u8.
526    /// - The memory referenced by the returned slice must not be
527    ///   accessed through any other pointer (not derived from the
528    ///   return value) for the duration of the lifetime 'a. Both read
529    ///   and write accesses are forbidden.
530    ///
531    ///   Accesses from Safe Rust necessarily follow this rule,
532    ///   because the returned slice's lifetime is the same as that of
533    ///   a mutable borrow of self.
534    /// - The total size self.mem_size() * mem::size_of::<u8>() of the
535    ///   slice must be no larger than isize::MAX, and adding that
536    ///   size to data must not "wrap around" the address space. See
537    ///   the safety documentation of pointer::offset.
538    ///
539    ///   This is ensured by a check in ::new()
540    pub(super) fn as_mut_slice(&mut self) -> &mut [u8] {
541        unsafe { std::slice::from_raw_parts_mut(self.base_ptr(), self.mem_size()) }
542    }
543
544    /// Internal helper method to get the backing memory as a slice.
545    ///
546    /// # Safety
547    /// See the discussion on as_mut_slice, with the third point
548    /// replaced by:
549    /// - The memory referenced by the returned slice must not be
550    ///   mutated for the duration of lifetime 'a, except inside an
551    ///   UnsafeCell.
552    ///
553    ///   Host accesses from Safe Rust necessarily follow this rule,
554    ///   because the returned slice's lifetime is the same as that of
555    ///   a borrow of self, preventing mutations via other methods.
556    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
557    pub fn as_slice<'a>(&'a self) -> &'a [u8] {
558        unsafe { std::slice::from_raw_parts(self.base_ptr(), self.mem_size()) }
559    }
560
561    /// Copy the entire contents of `self` into a `Vec<u8>`, then return it
562    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
563    pub(crate) fn copy_all_to_vec(&self) -> Result<Vec<u8>> {
564        let data = self.as_slice();
565        Ok(data.to_vec())
566    }
567
568    /// Copies all bytes from `src` to `self` starting at offset
569    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
570    pub fn copy_from_slice(&mut self, src: &[u8], offset: usize) -> Result<()> {
571        let data = self.as_mut_slice();
572        bounds_check!(offset, src.len(), data.len());
573        data[offset..offset + src.len()].copy_from_slice(src);
574        Ok(())
575    }
576
577    generate_reader!(read_u8, u8);
578    generate_reader!(read_i8, i8);
579    generate_reader!(read_u16, u16);
580    generate_reader!(read_i16, i16);
581    generate_reader!(read_u32, u32);
582    generate_reader!(read_i32, i32);
583    generate_reader!(read_u64, u64);
584    generate_reader!(read_i64, i64);
585    generate_reader!(read_usize, usize);
586    generate_reader!(read_isize, isize);
587
588    generate_writer!(write_u8, u8);
589    generate_writer!(write_i8, i8);
590    generate_writer!(write_u16, u16);
591    generate_writer!(write_i16, i16);
592    generate_writer!(write_u32, u32);
593    generate_writer!(write_i32, i32);
594    generate_writer!(write_u64, u64);
595    generate_writer!(write_i64, i64);
596    generate_writer!(write_usize, usize);
597    generate_writer!(write_isize, isize);
598
599    /// Convert the ExclusiveSharedMemory, which may be freely
600    /// modified, into a GuestSharedMemory, which may be somewhat
601    /// freely modified (mostly by the guest), and a HostSharedMemory,
602    /// which may only make certain kinds of accesses that do not race
603    /// in the presence of malicious code inside the guest mutating
604    /// the GuestSharedMemory.
605    pub fn build(self) -> (HostSharedMemory, GuestSharedMemory) {
606        let lock = Arc::new(RwLock::new(()));
607        (
608            HostSharedMemory {
609                region: self.region.clone(),
610                lock: lock.clone(),
611            },
612            GuestSharedMemory {
613                region: self.region.clone(),
614                lock: lock.clone(),
615            },
616        )
617    }
618
619    /// Gets the file handle of the shared memory region for this Sandbox
620    #[cfg(target_os = "windows")]
621    pub fn get_mmap_file_handle(&self) -> HANDLE {
622        self.region.handle
623    }
624}
625
626/// A trait that abstracts over the particular kind of SharedMemory,
627/// used when invoking operations from Rust that absolutely must have
628/// exclusive control over the shared memory for correctness +
629/// performance, like snapshotting.
630pub trait SharedMemory {
631    /// Return a readonly reference to the host mapping backing this SharedMemory
632    fn region(&self) -> &HostMapping;
633
634    /// Return the base address of the host mapping of this
635    /// region. Following the general Rust philosophy, this does not
636    /// need to be marked as `unsafe` because doing anything with this
637    /// pointer itself requires `unsafe`.
638    fn base_addr(&self) -> usize {
639        self.region().ptr as usize + PAGE_SIZE_USIZE
640    }
641
642    /// Return the base address of the host mapping of this region as
643    /// a pointer. Following the general Rust philosophy, this does
644    /// not need to be marked as `unsafe` because doing anything with
645    /// this pointer itself requires `unsafe`.
646    fn base_ptr(&self) -> *mut u8 {
647        self.base_addr() as *mut u8
648    }
649
650    /// Return the length of usable memory contained in `self`.
651    /// The returned size does not include the size of the surrounding
652    /// guard pages.
653    fn mem_size(&self) -> usize {
654        self.region().size - 2 * PAGE_SIZE_USIZE
655    }
656
657    /// Return the raw base address of the host mapping, including the
658    /// guard pages.
659    fn raw_ptr(&self) -> *mut u8 {
660        self.region().ptr
661    }
662
663    /// Return the raw size of the host mapping, including the guard
664    /// pages.
665    fn raw_mem_size(&self) -> usize {
666        self.region().size
667    }
668
669    /// Run some code with exclusive access to the SharedMemory
670    /// underlying this.  If the SharedMemory is not an
671    /// ExclusiveSharedMemory, any concurrent accesses to the relevant
672    /// HostSharedMemory/GuestSharedMemory may make this fail, or be
673    /// made to fail by this, and should be avoided.
674    fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
675        &mut self,
676        f: F,
677    ) -> Result<T>;
678}
679
680impl SharedMemory for ExclusiveSharedMemory {
681    fn region(&self) -> &HostMapping {
682        &self.region
683    }
684    fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
685        &mut self,
686        f: F,
687    ) -> Result<T> {
688        Ok(f(self))
689    }
690}
691
692impl SharedMemory for GuestSharedMemory {
693    fn region(&self) -> &HostMapping {
694        &self.region
695    }
696    fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
697        &mut self,
698        f: F,
699    ) -> Result<T> {
700        let guard = self
701            .lock
702            .try_write()
703            .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
704        let mut excl = ExclusiveSharedMemory {
705            region: self.region.clone(),
706        };
707        let ret = f(&mut excl);
708        drop(excl);
709        drop(guard);
710        Ok(ret)
711    }
712}
713
714/// An unsafe marker trait for types for which all bit patterns are valid.
715/// This is required in order for it to be safe to read a value of a particular
716/// type out of the sandbox from the HostSharedMemory.
717///
718/// # Safety
719/// This must only be implemented for types for which all bit patterns
720/// are valid. It requires that any (non-undef/poison) value of the
721/// correct size can be transmuted to the type.
722pub unsafe trait AllValid {}
723unsafe impl AllValid for u8 {}
724unsafe impl AllValid for u16 {}
725unsafe impl AllValid for u32 {}
726unsafe impl AllValid for u64 {}
727unsafe impl AllValid for i8 {}
728unsafe impl AllValid for i16 {}
729unsafe impl AllValid for i32 {}
730unsafe impl AllValid for i64 {}
731unsafe impl AllValid for [u8; 16] {}
732
733impl HostSharedMemory {
734    /// Read a value of type T, whose representation is the same
735    /// between the sandbox and the host, and which has no invalid bit
736    /// patterns
737    pub fn read<T: AllValid>(&self, offset: usize) -> Result<T> {
738        bounds_check!(offset, std::mem::size_of::<T>(), self.mem_size());
739        unsafe {
740            let mut ret: core::mem::MaybeUninit<T> = core::mem::MaybeUninit::uninit();
741            {
742                let slice: &mut [u8] = core::slice::from_raw_parts_mut(
743                    ret.as_mut_ptr() as *mut u8,
744                    std::mem::size_of::<T>(),
745                );
746                self.copy_to_slice(slice, offset)?;
747            }
748            Ok(ret.assume_init())
749        }
750    }
751
752    /// Write a value of type T, whose representation is the same
753    /// between the sandbox and the host, and which has no invalid bit
754    /// patterns
755    pub fn write<T: AllValid>(&self, offset: usize, data: T) -> Result<()> {
756        bounds_check!(offset, std::mem::size_of::<T>(), self.mem_size());
757        unsafe {
758            let slice: &[u8] = core::slice::from_raw_parts(
759                core::ptr::addr_of!(data) as *const u8,
760                std::mem::size_of::<T>(),
761            );
762            self.copy_from_slice(slice, offset)?;
763        }
764        Ok(())
765    }
766
767    /// Copy the contents of the slice into the sandbox at the
768    /// specified offset
769    pub fn copy_to_slice(&self, slice: &mut [u8], offset: usize) -> Result<()> {
770        bounds_check!(offset, slice.len(), self.mem_size());
771        let base = self.base_ptr().wrapping_add(offset);
772        let guard = self
773            .lock
774            .try_read()
775            .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
776        // todo: replace with something a bit more optimized + correct
777        for (i, b) in slice.iter_mut().enumerate() {
778            unsafe {
779                *b = base.wrapping_add(i).read_volatile();
780            }
781        }
782        drop(guard);
783        Ok(())
784    }
785
786    /// Copy the contents of the sandbox at the specified offset into
787    /// the slice
788    pub fn copy_from_slice(&self, slice: &[u8], offset: usize) -> Result<()> {
789        bounds_check!(offset, slice.len(), self.mem_size());
790        let base = self.base_ptr().wrapping_add(offset);
791        let guard = self
792            .lock
793            .try_read()
794            .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
795        // todo: replace with something a bit more optimized + correct
796        for (i, b) in slice.iter().enumerate() {
797            unsafe {
798                base.wrapping_add(i).write_volatile(*b);
799            }
800        }
801        drop(guard);
802        Ok(())
803    }
804
805    /// Fill the memory in the range `[offset, offset + len)` with `value`
806    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
807    pub fn fill(&mut self, value: u8, offset: usize, len: usize) -> Result<()> {
808        bounds_check!(offset, len, self.mem_size());
809        let base = self.base_ptr().wrapping_add(offset);
810        let guard = self
811            .lock
812            .try_read()
813            .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
814        // todo: replace with something a bit more optimized + correct
815        for i in 0..len {
816            unsafe { base.wrapping_add(i).write_volatile(value) };
817        }
818        drop(guard);
819        Ok(())
820    }
821
822    /// Pushes the given data onto shared memory to the buffer at the given offset.
823    /// NOTE! buffer_start_offset must point to the beginning of the buffer
824    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
825    pub fn push_buffer(
826        &mut self,
827        buffer_start_offset: usize,
828        buffer_size: usize,
829        data: &[u8],
830    ) -> Result<()> {
831        let stack_pointer_rel = self.read::<u64>(buffer_start_offset)? as usize;
832        let buffer_size_u64: u64 = buffer_size.try_into()?;
833
834        if stack_pointer_rel > buffer_size || stack_pointer_rel < 8 {
835            return Err(new_error!(
836                "Unable to push data to buffer: Stack pointer is out of bounds. Stack pointer: {}, Buffer size: {}",
837                stack_pointer_rel,
838                buffer_size_u64
839            ));
840        }
841
842        let size_required = data.len() + 8;
843        let size_available = buffer_size - stack_pointer_rel;
844
845        if size_required > size_available {
846            return Err(new_error!(
847                "Not enough space in buffer to push data. Required: {}, Available: {}",
848                size_required,
849                size_available
850            ));
851        }
852
853        // get absolute
854        let stack_pointer_abs = stack_pointer_rel + buffer_start_offset;
855
856        // write the actual data to the top of stack
857        self.copy_from_slice(data, stack_pointer_abs)?;
858
859        // write the offset to the newly written data, to the top of stack.
860        // this is used when popping the stack, to know how far back to jump
861        self.write::<u64>(stack_pointer_abs + data.len(), stack_pointer_rel as u64)?;
862
863        // update stack pointer to point to the next free address
864        self.write::<u64>(
865            buffer_start_offset,
866            (stack_pointer_rel + data.len() + 8) as u64,
867        )?;
868        Ok(())
869    }
870
871    /// Pops the given given buffer into a `T` and returns it.
872    /// NOTE! the data must be a size-prefixed flatbuffer, and
873    /// buffer_start_offset must point to the beginning of the buffer
874    pub fn try_pop_buffer_into<T>(
875        &mut self,
876        buffer_start_offset: usize,
877        buffer_size: usize,
878    ) -> Result<T>
879    where
880        T: for<'b> TryFrom<&'b [u8]>,
881    {
882        // get the stackpointer
883        let stack_pointer_rel = self.read::<u64>(buffer_start_offset)? as usize;
884
885        if stack_pointer_rel > buffer_size || stack_pointer_rel < 16 {
886            return Err(new_error!(
887                "Unable to pop data from buffer: Stack pointer is out of bounds. Stack pointer: {}, Buffer size: {}",
888                stack_pointer_rel,
889                buffer_size
890            ));
891        }
892
893        // make it absolute
894        let last_element_offset_abs = stack_pointer_rel + buffer_start_offset;
895
896        // go back 8 bytes to get offset to element on top of stack
897        let last_element_offset_rel: usize =
898            self.read::<u64>(last_element_offset_abs - 8)? as usize;
899
900        // make it absolute
901        let last_element_offset_abs = last_element_offset_rel + buffer_start_offset;
902
903        // Get the size of the flatbuffer buffer from memory
904        let fb_buffer_size = {
905            let size_i32 = self.read::<u32>(last_element_offset_abs)? + 4;
906            // ^^^ flatbuffer byte arrays are prefixed by 4 bytes
907            // indicating its size, so, to get the actual size, we need
908            // to add 4.
909            usize::try_from(size_i32)
910        }?;
911
912        let mut result_buffer = vec![0; fb_buffer_size];
913
914        self.copy_to_slice(&mut result_buffer, last_element_offset_abs)?;
915        let to_return = T::try_from(result_buffer.as_slice()).map_err(|_e| {
916            new_error!(
917                "pop_buffer_into: failed to convert buffer to {}",
918                type_name::<T>()
919            )
920        })?;
921
922        // update the stack pointer to point to the element we just popped off since that is now free
923        self.write::<u64>(buffer_start_offset, last_element_offset_rel as u64)?;
924
925        // zero out the memory we just popped off
926        let num_bytes_to_zero = stack_pointer_rel - last_element_offset_rel;
927        self.fill(0, last_element_offset_abs, num_bytes_to_zero)?;
928
929        Ok(to_return)
930    }
931}
932
933impl SharedMemory for HostSharedMemory {
934    fn region(&self) -> &HostMapping {
935        &self.region
936    }
937    fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
938        &mut self,
939        f: F,
940    ) -> Result<T> {
941        let guard = self
942            .lock
943            .try_write()
944            .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
945        let mut excl = ExclusiveSharedMemory {
946            region: self.region.clone(),
947        };
948        let ret = f(&mut excl);
949        drop(excl);
950        drop(guard);
951        Ok(ret)
952    }
953}
954
955#[cfg(test)]
956mod tests {
957    use hyperlight_common::mem::PAGE_SIZE_USIZE;
958    use proptest::prelude::*;
959
960    use super::{ExclusiveSharedMemory, HostSharedMemory, SharedMemory};
961    use crate::Result;
962    use crate::mem::shared_mem_tests::read_write_test_suite;
963
964    #[test]
965    fn fill() {
966        let mem_size: usize = 4096;
967        let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
968        let (mut hshm, _) = eshm.build();
969
970        hshm.fill(1, 0, 1024).unwrap();
971        hshm.fill(2, 1024, 1024).unwrap();
972        hshm.fill(3, 2048, 1024).unwrap();
973        hshm.fill(4, 3072, 1024).unwrap();
974
975        let vec = hshm
976            .with_exclusivity(|e| e.copy_all_to_vec().unwrap())
977            .unwrap();
978
979        assert!(vec[0..1024].iter().all(|&x| x == 1));
980        assert!(vec[1024..2048].iter().all(|&x| x == 2));
981        assert!(vec[2048..3072].iter().all(|&x| x == 3));
982        assert!(vec[3072..4096].iter().all(|&x| x == 4));
983
984        hshm.fill(5, 0, 4096).unwrap();
985
986        let vec2 = hshm
987            .with_exclusivity(|e| e.copy_all_to_vec().unwrap())
988            .unwrap();
989        assert!(vec2.iter().all(|&x| x == 5));
990
991        assert!(hshm.fill(0, 0, mem_size + 1).is_err());
992        assert!(hshm.fill(0, mem_size, 1).is_err());
993    }
994
995    #[test]
996    fn copy_into_from() -> Result<()> {
997        let mem_size: usize = 4096;
998        let vec_len = 10;
999        let eshm = ExclusiveSharedMemory::new(mem_size)?;
1000        let (hshm, _) = eshm.build();
1001        let vec = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
1002        // write the value to the memory at the beginning.
1003        hshm.copy_from_slice(&vec, 0)?;
1004
1005        let mut vec2 = vec![0; vec_len];
1006        // read the value back from the memory at the beginning.
1007        hshm.copy_to_slice(vec2.as_mut_slice(), 0)?;
1008        assert_eq!(vec, vec2);
1009
1010        let offset = mem_size - vec.len();
1011        // write the value to the memory at the end.
1012        hshm.copy_from_slice(&vec, offset)?;
1013
1014        let mut vec3 = vec![0; vec_len];
1015        // read the value back from the memory at the end.
1016        hshm.copy_to_slice(&mut vec3, offset)?;
1017        assert_eq!(vec, vec3);
1018
1019        let offset = mem_size / 2;
1020        // write the value to the memory at the middle.
1021        hshm.copy_from_slice(&vec, offset)?;
1022
1023        let mut vec4 = vec![0; vec_len];
1024        // read the value back from the memory at the middle.
1025        hshm.copy_to_slice(&mut vec4, offset)?;
1026        assert_eq!(vec, vec4);
1027
1028        // try and read a value from an offset that is beyond the end of the memory.
1029        let mut vec5 = vec![0; vec_len];
1030        assert!(hshm.copy_to_slice(&mut vec5, mem_size).is_err());
1031
1032        // try and write a value to an offset that is beyond the end of the memory.
1033        assert!(hshm.copy_from_slice(&vec5, mem_size).is_err());
1034
1035        // try and read a value from an offset that is too large.
1036        let mut vec6 = vec![0; vec_len];
1037        assert!(hshm.copy_to_slice(&mut vec6, mem_size * 2).is_err());
1038
1039        // try and write a value to an offset that is too large.
1040        assert!(hshm.copy_from_slice(&vec6, mem_size * 2).is_err());
1041
1042        // try and read a value that is too large.
1043        let mut vec7 = vec![0; mem_size * 2];
1044        assert!(hshm.copy_to_slice(&mut vec7, 0).is_err());
1045
1046        // try and write a value that is too large.
1047        assert!(hshm.copy_from_slice(&vec7, 0).is_err());
1048
1049        Ok(())
1050    }
1051
1052    proptest! {
1053        #[test]
1054        fn read_write_i32(val in -0x1000_i32..0x1000_i32) {
1055            read_write_test_suite(
1056                val,
1057                ExclusiveSharedMemory::new,
1058                Box::new(ExclusiveSharedMemory::read_i32),
1059                Box::new(ExclusiveSharedMemory::write_i32),
1060            )
1061            .unwrap();
1062            read_write_test_suite(
1063                val,
1064                |s| {
1065                    let e = ExclusiveSharedMemory::new(s)?;
1066                    let (h, _) = e.build();
1067                    Ok(h)
1068                },
1069                Box::new(HostSharedMemory::read::<i32>),
1070                Box::new(|h, o, v| h.write::<i32>(o, v)),
1071            )
1072            .unwrap();
1073        }
1074    }
1075
1076    #[test]
1077    fn alloc_fail() {
1078        let gm = ExclusiveSharedMemory::new(0);
1079        assert!(gm.is_err());
1080        let gm = ExclusiveSharedMemory::new(usize::MAX);
1081        assert!(gm.is_err());
1082    }
1083
1084    #[test]
1085    fn clone() {
1086        let eshm = ExclusiveSharedMemory::new(PAGE_SIZE_USIZE).unwrap();
1087        let (hshm1, _) = eshm.build();
1088        let hshm2 = hshm1.clone();
1089
1090        // after hshm1 is cloned, hshm1 and hshm2 should have identical
1091        // memory sizes and pointers.
1092        assert_eq!(hshm1.mem_size(), hshm2.mem_size());
1093        assert_eq!(hshm1.base_addr(), hshm2.base_addr());
1094
1095        // we should be able to copy a byte array into both hshm1 and hshm2,
1096        // and have both changes be reflected in all clones
1097        hshm1.copy_from_slice(b"a", 0).unwrap();
1098        hshm2.copy_from_slice(b"b", 1).unwrap();
1099
1100        // at this point, both hshm1 and hshm2 should have
1101        // offset 0 = 'a', offset 1 = 'b'
1102        for (raw_offset, expected) in &[(0, b'a'), (1, b'b')] {
1103            assert_eq!(hshm1.read::<u8>(*raw_offset).unwrap(), *expected);
1104            assert_eq!(hshm2.read::<u8>(*raw_offset).unwrap(), *expected);
1105        }
1106
1107        // after we drop hshm1, hshm2 should still exist, be valid,
1108        // and have all contents from before hshm1 was dropped
1109        drop(hshm1);
1110
1111        // at this point, hshm2 should still have offset 0 = 'a', offset 1 = 'b'
1112        for (raw_offset, expected) in &[(0, b'a'), (1, b'b')] {
1113            assert_eq!(hshm2.read::<u8>(*raw_offset).unwrap(), *expected);
1114        }
1115        hshm2.copy_from_slice(b"c", 2).unwrap();
1116        assert_eq!(hshm2.read::<u8>(2).unwrap(), b'c');
1117        drop(hshm2);
1118    }
1119
1120    #[test]
1121    fn copy_all_to_vec() {
1122        let mut data = vec![b'a', b'b', b'c'];
1123        data.resize(4096, 0);
1124        let mut eshm = ExclusiveSharedMemory::new(data.len()).unwrap();
1125        eshm.copy_from_slice(data.as_slice(), 0).unwrap();
1126        let ret_vec = eshm.copy_all_to_vec().unwrap();
1127        assert_eq!(data, ret_vec);
1128    }
1129
1130    /// A test to ensure that, if a `SharedMem` instance is cloned
1131    /// and _all_ clones are dropped, the memory region will no longer
1132    /// be valid.
1133    ///
1134    /// This test is ignored because it is incompatible with other tests as
1135    /// they may be allocating memory at the same time.
1136    ///
1137    /// Marking this test as ignored means that running `cargo test` will not
1138    /// run it. This feature will allow a developer who runs that command
1139    /// from their workstation to be successful without needing to know about
1140    /// test interdependencies. This test will, however, be run explicitly as a
1141    /// part of the CI pipeline.
1142    #[test]
1143    #[ignore]
1144    #[cfg(target_os = "linux")]
1145    fn test_drop() {
1146        use proc_maps::maps_contain_addr;
1147
1148        let pid = std::process::id();
1149
1150        let eshm = ExclusiveSharedMemory::new(PAGE_SIZE_USIZE).unwrap();
1151        let (hshm1, gshm) = eshm.build();
1152        let hshm2 = hshm1.clone();
1153        let addr = hshm1.raw_ptr() as usize;
1154
1155        // ensure the address is in the process's virtual memory
1156        let maps_before_drop = proc_maps::get_process_maps(pid.try_into().unwrap()).unwrap();
1157        assert!(
1158            maps_contain_addr(addr, &maps_before_drop),
1159            "shared memory address {:#x} was not found in process map, but should be",
1160            addr,
1161        );
1162        // drop both shared memory instances, which should result
1163        // in freeing the memory region
1164        drop(hshm1);
1165        drop(hshm2);
1166        drop(gshm);
1167
1168        let maps_after_drop = proc_maps::get_process_maps(pid.try_into().unwrap()).unwrap();
1169        // now, ensure the address is not in the process's virtual memory
1170        assert!(
1171            !maps_contain_addr(addr, &maps_after_drop),
1172            "shared memory address {:#x} was found in the process map, but shouldn't be",
1173            addr
1174        );
1175    }
1176
1177    #[cfg(target_os = "linux")]
1178    mod guard_page_crash_test {
1179        use crate::mem::shared_mem::{ExclusiveSharedMemory, SharedMemory};
1180
1181        const TEST_EXIT_CODE: u8 = 211; // an uncommon exit code, used for testing purposes
1182
1183        /// hook sigsegv to exit with status code, to make it testable, rather than have it exit from a signal
1184        /// NOTE: We CANNOT panic!() in the handler, and make the tests #[should_panic], because
1185        ///     the test harness process will crash anyway after the test passes
1186        fn setup_signal_handler() {
1187            unsafe {
1188                signal_hook_registry::register_signal_unchecked(libc::SIGSEGV, || {
1189                    std::process::exit(TEST_EXIT_CODE.into());
1190                })
1191                .unwrap();
1192            }
1193        }
1194
1195        #[test]
1196        #[ignore] // this test is ignored because it will crash the running process
1197        fn read() {
1198            setup_signal_handler();
1199
1200            let eshm = ExclusiveSharedMemory::new(4096).unwrap();
1201            let (hshm, _) = eshm.build();
1202            let guard_page_ptr = hshm.raw_ptr();
1203            unsafe { std::ptr::read_volatile(guard_page_ptr) };
1204        }
1205
1206        #[test]
1207        #[ignore] // this test is ignored because it will crash the running process
1208        fn write() {
1209            setup_signal_handler();
1210
1211            let eshm = ExclusiveSharedMemory::new(4096).unwrap();
1212            let (hshm, _) = eshm.build();
1213            let guard_page_ptr = hshm.raw_ptr();
1214            unsafe { std::ptr::write_volatile(guard_page_ptr, 0u8) };
1215        }
1216
1217        #[test]
1218        #[ignore] // this test is ignored because it will crash the running process
1219        fn exec() {
1220            setup_signal_handler();
1221
1222            let eshm = ExclusiveSharedMemory::new(4096).unwrap();
1223            let (hshm, _) = eshm.build();
1224            let guard_page_ptr = hshm.raw_ptr();
1225            let func: fn() = unsafe { std::mem::transmute(guard_page_ptr) };
1226            func();
1227        }
1228
1229        // provides a way for running the above tests in a separate process since they expect to crash
1230        #[test]
1231        fn guard_page_testing_shim() {
1232            let tests = vec!["read", "write", "exec"];
1233            for test in tests {
1234                let triple = std::env::var("TARGET_TRIPLE").ok();
1235                let target_args = if let Some(triple) = triple.filter(|t| !t.is_empty()) {
1236                    vec!["--target".to_string(), triple.to_string()]
1237                } else {
1238                    vec![]
1239                };
1240                let status = std::process::Command::new("cargo")
1241                    .args(["test", "-p", "hyperlight-host"])
1242                    .args(target_args)
1243                    .args(["--", "--ignored", test])
1244                    .stdin(std::process::Stdio::null())
1245                    .stdout(std::process::Stdio::null())
1246                    .stderr(std::process::Stdio::null())
1247                    .status()
1248                    .expect("Unable to launch tests");
1249                assert_eq!(
1250                    status.code(),
1251                    Some(TEST_EXIT_CODE.into()),
1252                    "Guard Page test failed: {}",
1253                    test
1254                );
1255            }
1256        }
1257    }
1258}