hyperlight_host/mem/
shared_mem.rs

1/*
2Copyright 2025  The Hyperlight Authors.
3
4Licensed under the Apache License, Version 2.0 (the "License");
5you may not use this file except in compliance with the License.
6You may obtain a copy of the License at
7
8    http://www.apache.org/licenses/LICENSE-2.0
9
10Unless required by applicable law or agreed to in writing, software
11distributed under the License is distributed on an "AS IS" BASIS,
12WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13See the License for the specific language governing permissions and
14limitations under the License.
15*/
16
17use std::any::type_name;
18use std::ffi::c_void;
19use std::io::Error;
20#[cfg(target_os = "linux")]
21use std::ptr::null_mut;
22use std::sync::{Arc, RwLock};
23
24use hyperlight_common::mem::PAGE_SIZE_USIZE;
25use tracing::{Span, instrument};
26#[cfg(target_os = "windows")]
27use windows::Win32::Foundation::{CloseHandle, HANDLE, INVALID_HANDLE_VALUE};
28#[cfg(target_os = "windows")]
29use windows::Win32::System::Memory::PAGE_READWRITE;
30#[cfg(target_os = "windows")]
31use windows::Win32::System::Memory::{
32    CreateFileMappingA, FILE_MAP_ALL_ACCESS, MEMORY_MAPPED_VIEW_ADDRESS, MapViewOfFile,
33    PAGE_EXECUTE_READWRITE, PAGE_NOACCESS, PAGE_PROTECTION_FLAGS, UnmapViewOfFile, VirtualProtect,
34};
35#[cfg(target_os = "windows")]
36use windows::core::PCSTR;
37
38#[cfg(target_os = "windows")]
39use crate::HyperlightError::MemoryAllocationFailed;
40#[cfg(target_os = "windows")]
41use crate::HyperlightError::{MemoryRequestTooBig, WindowsAPIError};
42use crate::{Result, log_then_return, new_error};
43
44/// Makes sure that the given `offset` and `size` are within the bounds of the memory with size `mem_size`.
45macro_rules! bounds_check {
46    ($offset:expr, $size:expr, $mem_size:expr) => {
47        if $offset + $size > $mem_size {
48            return Err(new_error!(
49                "Cannot read value from offset {} with size {} in memory of size {}",
50                $offset,
51                $size,
52                $mem_size
53            ));
54        }
55    };
56}
57
58/// generates a reader function for the given type
59macro_rules! generate_reader {
60    ($fname:ident, $ty:ty) => {
61        /// Read a value of type `$ty` from the memory at the given offset.
62        #[allow(dead_code)]
63        #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
64        pub(crate) fn $fname(&self, offset: usize) -> Result<$ty> {
65            let data = self.as_slice();
66            bounds_check!(offset, std::mem::size_of::<$ty>(), data.len());
67            Ok(<$ty>::from_le_bytes(
68                data[offset..offset + std::mem::size_of::<$ty>()].try_into()?,
69            ))
70        }
71    };
72}
73
74/// generates a writer function for the given type
75macro_rules! generate_writer {
76    ($fname:ident, $ty:ty) => {
77        /// Write a value of type `$ty` to the memory at the given offset.
78        #[allow(dead_code)]
79        pub(crate) fn $fname(&mut self, offset: usize, value: $ty) -> Result<()> {
80            let data = self.as_mut_slice();
81            bounds_check!(offset, std::mem::size_of::<$ty>(), data.len());
82            data[offset..offset + std::mem::size_of::<$ty>()].copy_from_slice(&value.to_le_bytes());
83            Ok(())
84        }
85    };
86}
87
88/// A representation of a host mapping of a shared memory region,
89/// which will be released when this structure is Drop'd. This is not
90/// individually Clone (since it holds ownership of the mapping), or
91/// Send or Sync, since it doesn't ensure any particular synchronization.
92#[derive(Debug)]
93pub struct HostMapping {
94    ptr: *mut u8,
95    size: usize,
96    #[cfg(target_os = "windows")]
97    handle: HANDLE,
98}
99
100impl Drop for HostMapping {
101    #[cfg(target_os = "linux")]
102    fn drop(&mut self) {
103        use libc::munmap;
104
105        unsafe {
106            munmap(self.ptr as *mut c_void, self.size);
107        }
108    }
109    #[cfg(target_os = "windows")]
110    fn drop(&mut self) {
111        let mem_mapped_address = MEMORY_MAPPED_VIEW_ADDRESS {
112            Value: self.ptr as *mut c_void,
113        };
114        if let Err(e) = unsafe { UnmapViewOfFile(mem_mapped_address) } {
115            tracing::error!(
116                "Failed to drop HostMapping (UnmapViewOfFile failed): {:?}",
117                e
118            );
119        }
120
121        let file_handle: HANDLE = self.handle;
122        if let Err(e) = unsafe { CloseHandle(file_handle) } {
123            tracing::error!("Failed to  drop HostMapping (CloseHandle failed): {:?}", e);
124        }
125    }
126}
127
128/// These three structures represent various phases of the lifecycle of
129/// a memory buffer that is shared with the guest. An
130/// ExclusiveSharedMemory is used for certain operations that
131/// unrestrictedly write to the shared memory, including setting it up
132/// and taking snapshots.
133#[derive(Debug)]
134pub struct ExclusiveSharedMemory {
135    region: Arc<HostMapping>,
136}
137unsafe impl Send for ExclusiveSharedMemory {}
138
139/// A GuestSharedMemory is used to represent
140/// the reference to all-of-memory that is taken by the virtual cpu.
141/// Because of the memory model limitations that affect
142/// HostSharedMemory, it is likely fairly important (to ensure that
143/// our UB remains limited to interaction with an external compilation
144/// unit that likely can't be discovered by the compiler) that _rust_
145/// users do not perform racy accesses to the guest communication
146/// buffers that are also accessed by HostSharedMemory.
147#[derive(Debug)]
148pub struct GuestSharedMemory {
149    region: Arc<HostMapping>,
150    /// The lock that indicates this shared memory is being used by non-Rust code
151    ///
152    /// This lock _must_ be held whenever the guest is executing,
153    /// because it prevents the host from converting its
154    /// HostSharedMemory to an ExclusiveSharedMemory. Since the guest
155    /// may arbitrarily mutate the shared memory, only synchronized
156    /// accesses from Rust should be allowed!
157    ///
158    /// We cannot enforce this in the type system, because the memory
159    /// is mapped in to the VM at VM creation time.
160    pub lock: Arc<RwLock<()>>,
161}
162unsafe impl Send for GuestSharedMemory {}
163
164/// A HostSharedMemory allows synchronized accesses to guest
165/// communication buffers, allowing it to be used concurrently with a
166/// GuestSharedMemory.
167///
168/// Given future requirements for asynchronous I/O with a minimum
169/// amount of copying (e.g. WASIp3 streams), we would like it to be
170/// possible to safely access these buffers concurrently with the
171/// guest, ensuring that (1) data is read appropriately if the guest
172/// is well-behaved; and (2) the host's behaviour is defined
173/// regardless of whether or not the guest is well-behaved.
174///
175/// The ideal (future) flow for a guest->host message is something like
176///   - Guest writes (unordered) bytes describing a work item into a buffer
177///   - Guest reveals buffer via a release-store of a pointer into an
178///     MMIO ring-buffer
179///   - Host acquire-loads the buffer pointer from the "MMIO" ring
180///     buffer
181///   - Host (unordered) reads the bytes from the buffer
182///   - Host performs validation of those bytes and uses them
183///
184/// Unfortunately, there appears to be no way to do this with defined
185/// behaviour in present Rust (see
186/// e.g. <https://github.com/rust-lang/unsafe-code-guidelines/issues/152>).
187/// Rust does not yet have its own defined memory model, but in the
188/// interim, it is widely treated as inheriting the current C/C++
189/// memory models.  The most immediate problem is that regardless of
190/// anything else, under those memory models \[1, p. 17-18; 2, p. 88\],
191///
192///   > The execution of a program contains a _data race_ if it
193///   > contains two [C++23: "potentially concurrent"] conflicting
194///   > actions [C23: "in different threads"], at least one of which
195///   > is not atomic, and neither happens before the other [C++23: ",
196///   > except for the special case for signal handlers described
197///   > below"].  Any such data race results in undefined behavior.
198///
199/// Consequently, if a misbehaving guest fails to correctly
200/// synchronize its stores with the host, the host's innocent loads
201/// will trigger undefined behaviour for the entire program, including
202/// the host.  Note that this also applies if the guest makes an
203/// unsynchronized read of a location that the host is writing!
204///
205/// Despite Rust's de jure inheritance of the C memory model at the
206/// present time, the compiler in many cases de facto adheres to LLVM
207/// semantics, so it is worthwhile to consider what LLVM does in this
208/// case as well.  According to the the LangRef \[3\] memory model,
209/// loads which are involved in a race that includes at least one
210/// non-atomic access (whether the load or a store) return `undef`,
211/// making them roughly equivalent to reading uninitialized
212/// memory. While this is much better, it is still bad.
213///
214/// Considering a different direction, recent C++ papers have seemed
215/// to lean towards using `volatile` for similar use cases. For
216/// example, in P1152R0 \[4\], JF Bastien notes that
217///
218///   > We’ve shown that volatile is purposely defined to denote
219///   > external modifications. This happens for:
220///   >   - Shared memory with untrusted code, where volatile is the
221///   >     right way to avoid time-of-check time-of-use (ToCToU)
222///   >     races which lead to security bugs such as \[PWN2OWN\] and
223///   >     \[XENXSA155\].
224///
225/// Unfortunately, although this paper was adopted for C++20 (and,
226/// sadly, mostly un-adopted for C++23, although that does not concern
227/// us), the paper did not actually redefine volatile accesses or data
228/// races to prevent volatile accesses from racing with other accesses
229/// and causing undefined behaviour.  P1382R1 \[5\] would have amended
230/// the wording of the data race definition to specifically exclude
231/// volatile, but, unfortunately, despite receiving a
232/// generally-positive reception at its first WG21 meeting more than
233/// five years ago, it has not progressed.
234///
235/// Separately from the data race issue, there is also a concern that
236/// according to the various memory models in use, there may be ways
237/// in which the guest can semantically obtain uninitialized memory
238/// and write it into the shared buffer, which may also result in
239/// undefined behaviour on reads.  The degree to which this is a
240/// concern is unclear, however, since it is unclear to what degree
241/// the Rust abstract machine's conception of uninitialized memory
242/// applies to the sandbox.  Returning briefly to the LLVM level,
243/// rather than the Rust level, this, combined with the fact that
244/// racing loads in LLVM return `undef`, as discussed above, we would
245/// ideally `llvm.freeze` the result of any load out of the sandbox.
246///
247/// It would furthermore be ideal if we could run the flatbuffers
248/// parsing code directly on the guest memory, in order to avoid
249/// unnecessary copies.  That is unfortunately probably not viable at
250/// the present time: because the generated flatbuffers parsing code
251/// doesn't use atomic or volatile accesses, it is likely to introduce
252/// double-read vulnerabilities.
253///
254/// In short, none of the Rust-level operations available to us do the
255/// right thing, at the Rust spec level or the LLVM spec level. Our
256/// major remaining options are therefore:
257///   - Choose one of the options that is available to us, and accept
258///     that we are doing something unsound according to the spec, but
259///     hope that no reasonable compiler could possibly notice.
260///   - Use inline assembly per architecture, for which we would only
261///     need to worry about the _architecture_'s memory model (which
262///     is far less demanding).
263///
264/// The leading candidate for the first option would seem to be to
265/// simply use volatile accesses; there seems to be wide agreement
266/// that this _should_ be a valid use case for them (even if it isn't
267/// now), and projects like Linux and rust-vmm already use C11
268/// `volatile` for this purpose.  It is also worth noting that because
269/// we still do need to synchronize with the guest when it _is_ being
270/// well-behaved, we would ideally use volatile acquire loads and
271/// volatile release stores for interacting with the stack pointer in
272/// the guest in this case.  Unfortunately, while those operations are
273/// defined in LLVM, they are not presently exposed to Rust. While
274/// atomic fences that are not associated with memory accesses
275/// ([`std::sync::atomic::fence`]) might at first glance seem to help with
276/// this problem, they unfortunately do not \[6\]:
277///
278///    > A fence ‘A’ which has (at least) Release ordering semantics,
279///    > synchronizes with a fence ‘B’ with (at least) Acquire
280///    > semantics, if and only if there exist operations X and Y,
281///    > both operating on some atomic object ‘M’ such that A is
282///    > sequenced before X, Y is sequenced before B and Y observes
283///    > the change to M. This provides a happens-before dependence
284///    > between A and B.
285///
286/// Note that the X and Y must be to an _atomic_ object.
287///
288/// We consequently assume that there has been a strong architectural
289/// fence on a vmenter/vmexit between data being read and written.
290/// This is unsafe (not guaranteed in the type system)!
291///
292/// \[1\] N3047 C23 Working Draft. <https://www.open-std.org/jtc1/sc22/wg14/www/docs/n3047.pdf>
293/// \[2\] N4950 C++23 Working Draft. <https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2023/n4950.pdf>
294/// \[3\] LLVM Language Reference Manual, Memory Model for Concurrent Operations. <https://llvm.org/docs/LangRef.html#memmodel>
295/// \[4\] P1152R0: Deprecating `volatile`. JF Bastien. <https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p1152r0.html>
296/// \[5\] P1382R1: `volatile_load<T>` and `volatile_store<T>`. JF Bastien, Paul McKenney, Jeffrey Yasskin, and the indefatigable TBD. <https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p1382r1.pdf>
297/// \[6\] Documentation for std::sync::atomic::fence. <https://doc.rust-lang.org/std/sync/atomic/fn.fence.html>
298#[derive(Clone, Debug)]
299pub struct HostSharedMemory {
300    region: Arc<HostMapping>,
301    lock: Arc<RwLock<()>>,
302}
303unsafe impl Send for HostSharedMemory {}
304
305impl ExclusiveSharedMemory {
306    /// Create a new region of shared memory with the given minimum
307    /// size in bytes. The region will be surrounded by guard pages.
308    ///
309    /// Return `Err` if shared memory could not be allocated.
310    #[cfg(target_os = "linux")]
311    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
312    pub fn new(min_size_bytes: usize) -> Result<Self> {
313        use libc::{
314            MAP_ANONYMOUS, MAP_FAILED, MAP_NORESERVE, MAP_SHARED, PROT_NONE, PROT_READ, PROT_WRITE,
315            c_int, mmap, mprotect, off_t, size_t,
316        };
317
318        use crate::error::HyperlightError::{MemoryRequestTooBig, MmapFailed, MprotectFailed};
319
320        if min_size_bytes == 0 {
321            return Err(new_error!("Cannot create shared memory with size 0"));
322        }
323
324        let total_size = min_size_bytes
325            .checked_add(2 * PAGE_SIZE_USIZE) // guard page around the memory
326            .ok_or_else(|| new_error!("Memory required for sandbox exceeded usize::MAX"))?;
327
328        if total_size % PAGE_SIZE_USIZE != 0 {
329            return Err(new_error!(
330                "shared memory must be a multiple of {}",
331                PAGE_SIZE_USIZE
332            ));
333        }
334
335        // usize and isize are guaranteed to be the same size, and
336        // isize::MAX should be positive, so this cast should be safe.
337        if total_size > isize::MAX as usize {
338            return Err(MemoryRequestTooBig(total_size, isize::MAX as usize));
339        }
340
341        // allocate the memory
342        let addr = unsafe {
343            mmap(
344                null_mut(),
345                total_size as size_t,
346                PROT_READ | PROT_WRITE,
347                MAP_ANONYMOUS | MAP_SHARED | MAP_NORESERVE,
348                -1 as c_int,
349                0 as off_t,
350            )
351        };
352        if addr == MAP_FAILED {
353            log_then_return!(MmapFailed(Error::last_os_error().raw_os_error()));
354        }
355
356        // protect the guard pages
357
358        let res = unsafe { mprotect(addr, PAGE_SIZE_USIZE, PROT_NONE) };
359        if res != 0 {
360            return Err(MprotectFailed(Error::last_os_error().raw_os_error()));
361        }
362        let res = unsafe {
363            mprotect(
364                (addr as *const u8).add(total_size - PAGE_SIZE_USIZE) as *mut c_void,
365                PAGE_SIZE_USIZE,
366                PROT_NONE,
367            )
368        };
369        if res != 0 {
370            return Err(MprotectFailed(Error::last_os_error().raw_os_error()));
371        }
372
373        Ok(Self {
374            // HostMapping is only non-Send/Sync because raw pointers
375            // are not ("as a lint", as the Rust docs say). We don't
376            // want to mark HostMapping Send/Sync immediately, because
377            // that could socially imply that it's "safe" to use
378            // unsafe accesses from multiple threads at once. Instead, we
379            // directly impl Send and Sync on this type. Since this
380            // type does have Send and Sync manually impl'd, the Arc
381            // is not pointless as the lint suggests.
382            #[allow(clippy::arc_with_non_send_sync)]
383            region: Arc::new(HostMapping {
384                ptr: addr as *mut u8,
385                size: total_size,
386            }),
387        })
388    }
389
390    /// Create a new region of shared memory with the given minimum
391    /// size in bytes. The region will be surrounded by guard pages.
392    ///
393    /// Return `Err` if shared memory could not be allocated.
394    #[cfg(target_os = "windows")]
395    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
396    pub fn new(min_size_bytes: usize) -> Result<Self> {
397        if min_size_bytes == 0 {
398            return Err(new_error!("Cannot create shared memory with size 0"));
399        }
400
401        let total_size = min_size_bytes
402            .checked_add(2 * PAGE_SIZE_USIZE)
403            .ok_or_else(|| new_error!("Memory required for sandbox exceeded {}", usize::MAX))?;
404
405        if total_size % PAGE_SIZE_USIZE != 0 {
406            return Err(new_error!(
407                "shared memory must be a multiple of {}",
408                PAGE_SIZE_USIZE
409            ));
410        }
411
412        // usize and isize are guaranteed to be the same size, and
413        // isize::MAX should be positive, so this cast should be safe.
414        if total_size > isize::MAX as usize {
415            return Err(MemoryRequestTooBig(total_size, isize::MAX as usize));
416        }
417
418        let mut dwmaximumsizehigh = 0;
419        let mut dwmaximumsizelow = 0;
420
421        if std::mem::size_of::<usize>() == 8 {
422            dwmaximumsizehigh = (total_size >> 32) as u32;
423            dwmaximumsizelow = (total_size & 0xFFFFFFFF) as u32;
424        }
425
426        // Allocate the memory use CreateFileMapping instead of VirtualAlloc
427        // This allows us to map the memory into the surrogate process using MapViewOfFile2
428
429        let flags = PAGE_READWRITE;
430
431        let handle = unsafe {
432            CreateFileMappingA(
433                INVALID_HANDLE_VALUE,
434                None,
435                flags,
436                dwmaximumsizehigh,
437                dwmaximumsizelow,
438                PCSTR::null(),
439            )?
440        };
441
442        if handle.is_invalid() {
443            log_then_return!(MemoryAllocationFailed(
444                Error::last_os_error().raw_os_error()
445            ));
446        }
447
448        let file_map = FILE_MAP_ALL_ACCESS;
449        let addr = unsafe { MapViewOfFile(handle, file_map, 0, 0, 0) };
450
451        if addr.Value.is_null() {
452            log_then_return!(MemoryAllocationFailed(
453                Error::last_os_error().raw_os_error()
454            ));
455        }
456
457        // Set the first and last pages to be guard pages
458
459        let mut unused_out_old_prot_flags = PAGE_PROTECTION_FLAGS(0);
460
461        // If the following calls to VirtualProtect are changed make sure to update the calls to VirtualProtectEx in surrogate_process_manager.rs
462
463        let first_guard_page_start = addr.Value;
464        if let Err(e) = unsafe {
465            VirtualProtect(
466                first_guard_page_start,
467                PAGE_SIZE_USIZE,
468                PAGE_NOACCESS,
469                &mut unused_out_old_prot_flags,
470            )
471        } {
472            log_then_return!(WindowsAPIError(e.clone()));
473        }
474
475        let last_guard_page_start = unsafe { addr.Value.add(total_size - PAGE_SIZE_USIZE) };
476        if let Err(e) = unsafe {
477            VirtualProtect(
478                last_guard_page_start,
479                PAGE_SIZE_USIZE,
480                PAGE_NOACCESS,
481                &mut unused_out_old_prot_flags,
482            )
483        } {
484            log_then_return!(WindowsAPIError(e.clone()));
485        }
486
487        Ok(Self {
488            // HostMapping is only non-Send/Sync because raw pointers
489            // are not ("as a lint", as the Rust docs say). We don't
490            // want to mark HostMapping Send/Sync immediately, because
491            // that could socially imply that it's "safe" to use
492            // unsafe accesses from multiple threads at once. Instead, we
493            // directly impl Send and Sync on this type. Since this
494            // type does have Send and Sync manually impl'd, the Arc
495            // is not pointless as the lint suggests.
496            #[allow(clippy::arc_with_non_send_sync)]
497            region: Arc::new(HostMapping {
498                ptr: addr.Value as *mut u8,
499                size: total_size,
500                handle,
501            }),
502        })
503    }
504
505    #[allow(dead_code)]
506    pub(super) fn make_memory_executable(&self) -> Result<()> {
507        #[cfg(target_os = "windows")]
508        {
509            let mut _old_flags = PAGE_PROTECTION_FLAGS::default();
510            if let Err(e) = unsafe {
511                VirtualProtect(
512                    self.region.ptr as *const c_void,
513                    self.region.size,
514                    PAGE_EXECUTE_READWRITE,
515                    &mut _old_flags as *mut PAGE_PROTECTION_FLAGS,
516                )
517            } {
518                log_then_return!(WindowsAPIError(e.clone()));
519            }
520        }
521
522        // make the memory executable on Linux
523        #[cfg(target_os = "linux")]
524        {
525            use libc::{PROT_EXEC, PROT_READ, PROT_WRITE, mprotect};
526
527            let res = unsafe {
528                mprotect(
529                    self.region.ptr as *mut c_void,
530                    self.region.size,
531                    PROT_READ | PROT_WRITE | PROT_EXEC,
532                )
533            };
534
535            if res != 0 {
536                return Err(new_error!(
537                    "Failed to make memory executable: {:#?}",
538                    Error::last_os_error().raw_os_error()
539                ));
540            }
541        }
542        Ok(())
543    }
544
545    /// Internal helper method to get the backing memory as a mutable slice.
546    ///
547    /// # Safety
548    /// As per std::slice::from_raw_parts_mut:
549    /// - self.base_addr() must be valid for both reads and writes for
550    ///   self.mem_size() * mem::size_of::<u8>() many bytes, and it
551    ///   must be properly aligned.
552    ///
553    ///   The rules on validity are still somewhat unspecified, but we
554    ///   assume that the result of our calls to mmap/CreateFileMappings may
555    ///   be considered a single "allocated object". The use of
556    ///   non-atomic accesses is alright from a Safe Rust standpoint,
557    ///   because SharedMemoryBuilder is  not Sync.
558    /// - self.base_addr() must point to self.mem_size() consecutive
559    ///   properly initialized values of type u8
560    ///
561    ///   Again, the exact provenance restrictions on what is
562    ///   considered to be initialized values are unclear, but we make
563    ///   sure to use mmap(MAP_ANONYMOUS) and
564    ///   CreateFileMapping(SEC_COMMIT), so the pages in question are
565    ///   zero-initialized, which we hope counts for u8.
566    /// - The memory referenced by the returned slice must not be
567    ///   accessed through any other pointer (not derived from the
568    ///   return value) for the duration of the lifetime 'a. Both read
569    ///   and write accesses are forbidden.
570    ///
571    ///   Accesses from Safe Rust necessarily follow this rule,
572    ///   because the returned slice's lifetime is the same as that of
573    ///   a mutable borrow of self.
574    /// - The total size self.mem_size() * mem::size_of::<u8>() of the
575    ///   slice must be no larger than isize::MAX, and adding that
576    ///   size to data must not "wrap around" the address space. See
577    ///   the safety documentation of pointer::offset.
578    ///
579    ///   This is ensured by a check in ::new()
580    pub(super) fn as_mut_slice(&mut self) -> &mut [u8] {
581        unsafe { std::slice::from_raw_parts_mut(self.base_ptr(), self.mem_size()) }
582    }
583
584    /// Internal helper method to get the backing memory as a slice.
585    ///
586    /// # Safety
587    /// See the discussion on as_mut_slice, with the third point
588    /// replaced by:
589    /// - The memory referenced by the returned slice must not be
590    ///   mutated for the duration of lifetime 'a, except inside an
591    ///   UnsafeCell.
592    ///
593    ///   Host accesses from Safe Rust necessarily follow this rule,
594    ///   because the returned slice's lifetime is the same as that of
595    ///   a borrow of self, preventing mutations via other methods.
596    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
597    pub fn as_slice<'a>(&'a self) -> &'a [u8] {
598        unsafe { std::slice::from_raw_parts(self.base_ptr(), self.mem_size()) }
599    }
600
601    /// Copy the entire contents of `self` into a `Vec<u8>`, then return it
602    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
603    pub(crate) fn copy_all_to_vec(&self) -> Result<Vec<u8>> {
604        let data = self.as_slice();
605        Ok(data.to_vec())
606    }
607
608    /// Copies all bytes from `src` to `self` starting at offset
609    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
610    pub fn copy_from_slice(&mut self, src: &[u8], offset: usize) -> Result<()> {
611        let data = self.as_mut_slice();
612        bounds_check!(offset, src.len(), data.len());
613        data[offset..offset + src.len()].copy_from_slice(src);
614        Ok(())
615    }
616
617    /// Return the address of memory at an offset to this `SharedMemory` checking
618    /// that the memory is within the bounds of the `SharedMemory`.
619    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
620    #[allow(dead_code)]
621    pub(crate) fn calculate_address(&self, offset: usize) -> Result<usize> {
622        bounds_check!(offset, 0, self.mem_size());
623        Ok(self.base_addr() + offset)
624    }
625
626    generate_reader!(read_u8, u8);
627    generate_reader!(read_i8, i8);
628    generate_reader!(read_u16, u16);
629    generate_reader!(read_i16, i16);
630    generate_reader!(read_u32, u32);
631    generate_reader!(read_i32, i32);
632    generate_reader!(read_u64, u64);
633    generate_reader!(read_i64, i64);
634    generate_reader!(read_usize, usize);
635    generate_reader!(read_isize, isize);
636
637    generate_writer!(write_u8, u8);
638    generate_writer!(write_i8, i8);
639    generate_writer!(write_u16, u16);
640    generate_writer!(write_i16, i16);
641    generate_writer!(write_u32, u32);
642    generate_writer!(write_i32, i32);
643    generate_writer!(write_u64, u64);
644    generate_writer!(write_i64, i64);
645    generate_writer!(write_usize, usize);
646    generate_writer!(write_isize, isize);
647
648    /// Convert the ExclusiveSharedMemory, which may be freely
649    /// modified, into a GuestSharedMemory, which may be somewhat
650    /// freely modified (mostly by the guest), and a HostSharedMemory,
651    /// which may only make certain kinds of accesses that do not race
652    /// in the presence of malicious code inside the guest mutating
653    /// the GuestSharedMemory.
654    pub fn build(self) -> (HostSharedMemory, GuestSharedMemory) {
655        let lock = Arc::new(RwLock::new(()));
656        (
657            HostSharedMemory {
658                region: self.region.clone(),
659                lock: lock.clone(),
660            },
661            GuestSharedMemory {
662                region: self.region.clone(),
663                lock: lock.clone(),
664            },
665        )
666    }
667
668    /// Gets the file handle of the shared memory region for this Sandbox
669    #[cfg(target_os = "windows")]
670    pub fn get_mmap_file_handle(&self) -> HANDLE {
671        self.region.handle
672    }
673}
674
675/// A trait that abstracts over the particular kind of SharedMemory,
676/// used when invoking operations from Rust that absolutely must have
677/// exclusive control over the shared memory for correctness +
678/// performance, like snapshotting.
679pub trait SharedMemory {
680    /// Return a readonly reference to the host mapping backing this SharedMemory
681    fn region(&self) -> &HostMapping;
682
683    /// Return the base address of the host mapping of this
684    /// region. Following the general Rust philosophy, this does not
685    /// need to be marked as `unsafe` because doing anything with this
686    /// pointer itself requires `unsafe`.
687    fn base_addr(&self) -> usize {
688        self.region().ptr as usize + PAGE_SIZE_USIZE
689    }
690
691    /// Return the base address of the host mapping of this region as
692    /// a pointer. Following the general Rust philosophy, this does
693    /// not need to be marked as `unsafe` because doing anything with
694    /// this pointer itself requires `unsafe`.
695    fn base_ptr(&self) -> *mut u8 {
696        self.base_addr() as *mut u8
697    }
698
699    /// Return the length of usable memory contained in `self`.
700    /// The returned size does not include the size of the surrounding
701    /// guard pages.
702    fn mem_size(&self) -> usize {
703        self.region().size - 2 * PAGE_SIZE_USIZE
704    }
705
706    /// Return the raw base address of the host mapping, including the
707    /// guard pages.
708    fn raw_ptr(&self) -> *mut u8 {
709        self.region().ptr
710    }
711
712    /// Return the raw size of the host mapping, including the guard
713    /// pages.
714    fn raw_mem_size(&self) -> usize {
715        self.region().size
716    }
717
718    /// Run some code with exclusive access to the SharedMemory
719    /// underlying this.  If the SharedMemory is not an
720    /// ExclusiveSharedMemory, any concurrent accesses to the relevant
721    /// HostSharedMemory/GuestSharedMemory may make this fail, or be
722    /// made to fail by this, and should be avoided.
723    fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
724        &mut self,
725        f: F,
726    ) -> Result<T>;
727}
728
729impl SharedMemory for ExclusiveSharedMemory {
730    fn region(&self) -> &HostMapping {
731        &self.region
732    }
733    fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
734        &mut self,
735        f: F,
736    ) -> Result<T> {
737        Ok(f(self))
738    }
739}
740
741impl SharedMemory for GuestSharedMemory {
742    fn region(&self) -> &HostMapping {
743        &self.region
744    }
745    fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
746        &mut self,
747        f: F,
748    ) -> Result<T> {
749        let guard = self
750            .lock
751            .try_write()
752            .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
753        let mut excl = ExclusiveSharedMemory {
754            region: self.region.clone(),
755        };
756        let ret = f(&mut excl);
757        drop(excl);
758        drop(guard);
759        Ok(ret)
760    }
761}
762
763/// An unsafe marker trait for types for which all bit patterns are valid.
764/// This is required in order for it to be safe to read a value of a particular
765/// type out of the sandbox from the HostSharedMemory.
766///
767/// # Safety
768/// This must only be implemented for types for which all bit patterns
769/// are valid. It requires that any (non-undef/poison) value of the
770/// correct size can be transmuted to the type.
771pub unsafe trait AllValid {}
772unsafe impl AllValid for u8 {}
773unsafe impl AllValid for u16 {}
774unsafe impl AllValid for u32 {}
775unsafe impl AllValid for u64 {}
776unsafe impl AllValid for i8 {}
777unsafe impl AllValid for i16 {}
778unsafe impl AllValid for i32 {}
779unsafe impl AllValid for i64 {}
780unsafe impl AllValid for [u8; 16] {}
781
782impl HostSharedMemory {
783    /// Read a value of type T, whose representation is the same
784    /// between the sandbox and the host, and which has no invalid bit
785    /// patterns
786    pub fn read<T: AllValid>(&self, offset: usize) -> Result<T> {
787        bounds_check!(offset, std::mem::size_of::<T>(), self.mem_size());
788        unsafe {
789            let mut ret: core::mem::MaybeUninit<T> = core::mem::MaybeUninit::uninit();
790            {
791                let slice: &mut [u8] = core::slice::from_raw_parts_mut(
792                    ret.as_mut_ptr() as *mut u8,
793                    std::mem::size_of::<T>(),
794                );
795                self.copy_to_slice(slice, offset)?;
796            }
797            Ok(ret.assume_init())
798        }
799    }
800
801    /// Write a value of type T, whose representation is the same
802    /// between the sandbox and the host, and which has no invalid bit
803    /// patterns
804    pub fn write<T: AllValid>(&self, offset: usize, data: T) -> Result<()> {
805        bounds_check!(offset, std::mem::size_of::<T>(), self.mem_size());
806        unsafe {
807            let slice: &[u8] = core::slice::from_raw_parts(
808                core::ptr::addr_of!(data) as *const u8,
809                std::mem::size_of::<T>(),
810            );
811            self.copy_from_slice(slice, offset)?;
812        }
813        Ok(())
814    }
815
816    /// Copy the contents of the slice into the sandbox at the
817    /// specified offset
818    pub fn copy_to_slice(&self, slice: &mut [u8], offset: usize) -> Result<()> {
819        bounds_check!(offset, slice.len(), self.mem_size());
820        let base = self.base_ptr().wrapping_add(offset);
821        let guard = self
822            .lock
823            .try_read()
824            .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
825        // todo: replace with something a bit more optimized + correct
826        for (i, b) in slice.iter_mut().enumerate() {
827            unsafe {
828                *b = base.wrapping_add(i).read_volatile();
829            }
830        }
831        drop(guard);
832        Ok(())
833    }
834
835    /// Copy the contents of the sandbox at the specified offset into
836    /// the slice
837    pub fn copy_from_slice(&self, slice: &[u8], offset: usize) -> Result<()> {
838        bounds_check!(offset, slice.len(), self.mem_size());
839        let base = self.base_ptr().wrapping_add(offset);
840        let guard = self
841            .lock
842            .try_read()
843            .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
844        // todo: replace with something a bit more optimized + correct
845        for (i, b) in slice.iter().enumerate() {
846            unsafe {
847                base.wrapping_add(i).write_volatile(*b);
848            }
849        }
850        drop(guard);
851        Ok(())
852    }
853
854    /// Fill the memory in the range `[offset, offset + len)` with `value`
855    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
856    pub fn fill(&mut self, value: u8, offset: usize, len: usize) -> Result<()> {
857        bounds_check!(offset, len, self.mem_size());
858        let base = self.base_ptr().wrapping_add(offset);
859        let guard = self
860            .lock
861            .try_read()
862            .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
863        // todo: replace with something a bit more optimized + correct
864        for i in 0..len {
865            unsafe { base.wrapping_add(i).write_volatile(value) };
866        }
867        drop(guard);
868        Ok(())
869    }
870
871    /// Pushes the given data onto shared memory to the buffer at the given offset.
872    /// NOTE! buffer_start_offset must point to the beginning of the buffer
873    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
874    pub fn push_buffer(
875        &mut self,
876        buffer_start_offset: usize,
877        buffer_size: usize,
878        data: &[u8],
879    ) -> Result<()> {
880        let stack_pointer_rel = self.read::<u64>(buffer_start_offset)? as usize;
881        let buffer_size_u64: u64 = buffer_size.try_into()?;
882
883        if stack_pointer_rel > buffer_size || stack_pointer_rel < 8 {
884            return Err(new_error!(
885                "Unable to push data to buffer: Stack pointer is out of bounds. Stack pointer: {}, Buffer size: {}",
886                stack_pointer_rel,
887                buffer_size_u64
888            ));
889        }
890
891        let size_required = data.len() + 8;
892        let size_available = buffer_size - stack_pointer_rel;
893
894        if size_required > size_available {
895            return Err(new_error!(
896                "Not enough space in buffer to push data. Required: {}, Available: {}",
897                size_required,
898                size_available
899            ));
900        }
901
902        // get absolute
903        let stack_pointer_abs = stack_pointer_rel + buffer_start_offset;
904
905        // write the actual data to the top of stack
906        self.copy_from_slice(data, stack_pointer_abs)?;
907
908        // write the offset to the newly written data, to the top of stack.
909        // this is used when popping the stack, to know how far back to jump
910        self.write::<u64>(stack_pointer_abs + data.len(), stack_pointer_rel as u64)?;
911
912        // update stack pointer to point to the next free address
913        self.write::<u64>(
914            buffer_start_offset,
915            (stack_pointer_rel + data.len() + 8) as u64,
916        )?;
917        Ok(())
918    }
919
920    /// Pops the given given buffer into a `T` and returns it.
921    /// NOTE! the data must be a size-prefixed flatbuffer, and
922    /// buffer_start_offset must point to the beginning of the buffer
923    pub fn try_pop_buffer_into<T>(
924        &mut self,
925        buffer_start_offset: usize,
926        buffer_size: usize,
927    ) -> Result<T>
928    where
929        T: for<'b> TryFrom<&'b [u8]>,
930    {
931        // get the stackpointer
932        let stack_pointer_rel = self.read::<u64>(buffer_start_offset)? as usize;
933
934        if stack_pointer_rel > buffer_size || stack_pointer_rel < 16 {
935            return Err(new_error!(
936                "Unable to pop data from buffer: Stack pointer is out of bounds. Stack pointer: {}, Buffer size: {}",
937                stack_pointer_rel,
938                buffer_size
939            ));
940        }
941
942        // make it absolute
943        let last_element_offset_abs = stack_pointer_rel + buffer_start_offset;
944
945        // go back 8 bytes to get offset to element on top of stack
946        let last_element_offset_rel: usize =
947            self.read::<u64>(last_element_offset_abs - 8)? as usize;
948
949        // make it absolute
950        let last_element_offset_abs = last_element_offset_rel + buffer_start_offset;
951
952        // Get the size of the flatbuffer buffer from memory
953        let fb_buffer_size = {
954            let size_i32 = self.read::<u32>(last_element_offset_abs)? + 4;
955            // ^^^ flatbuffer byte arrays are prefixed by 4 bytes
956            // indicating its size, so, to get the actual size, we need
957            // to add 4.
958            usize::try_from(size_i32)
959        }?;
960
961        let mut result_buffer = vec![0; fb_buffer_size];
962
963        self.copy_to_slice(&mut result_buffer, last_element_offset_abs)?;
964        let to_return = T::try_from(result_buffer.as_slice()).map_err(|_e| {
965            new_error!(
966                "pop_buffer_into: failed to convert buffer to {}",
967                type_name::<T>()
968            )
969        })?;
970
971        // update the stack pointer to point to the element we just popped off since that is now free
972        self.write::<u64>(buffer_start_offset, last_element_offset_rel as u64)?;
973
974        // zero out the memory we just popped off
975        let num_bytes_to_zero = stack_pointer_rel - last_element_offset_rel;
976        self.fill(0, last_element_offset_abs, num_bytes_to_zero)?;
977
978        Ok(to_return)
979    }
980}
981
982impl SharedMemory for HostSharedMemory {
983    fn region(&self) -> &HostMapping {
984        &self.region
985    }
986    fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
987        &mut self,
988        f: F,
989    ) -> Result<T> {
990        let guard = self
991            .lock
992            .try_write()
993            .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
994        let mut excl = ExclusiveSharedMemory {
995            region: self.region.clone(),
996        };
997        let ret = f(&mut excl);
998        drop(excl);
999        drop(guard);
1000        Ok(ret)
1001    }
1002}
1003
1004#[cfg(test)]
1005mod tests {
1006    use hyperlight_common::mem::PAGE_SIZE_USIZE;
1007    use proptest::prelude::*;
1008
1009    use super::{ExclusiveSharedMemory, HostSharedMemory, SharedMemory};
1010    use crate::Result;
1011    use crate::mem::shared_mem_tests::read_write_test_suite;
1012
1013    #[test]
1014    fn fill() {
1015        let mem_size: usize = 4096;
1016        let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1017        let (mut hshm, _) = eshm.build();
1018
1019        hshm.fill(1, 0, 1024).unwrap();
1020        hshm.fill(2, 1024, 1024).unwrap();
1021        hshm.fill(3, 2048, 1024).unwrap();
1022        hshm.fill(4, 3072, 1024).unwrap();
1023
1024        let vec = hshm
1025            .with_exclusivity(|e| e.copy_all_to_vec().unwrap())
1026            .unwrap();
1027
1028        assert!(vec[0..1024].iter().all(|&x| x == 1));
1029        assert!(vec[1024..2048].iter().all(|&x| x == 2));
1030        assert!(vec[2048..3072].iter().all(|&x| x == 3));
1031        assert!(vec[3072..4096].iter().all(|&x| x == 4));
1032
1033        hshm.fill(5, 0, 4096).unwrap();
1034
1035        let vec2 = hshm
1036            .with_exclusivity(|e| e.copy_all_to_vec().unwrap())
1037            .unwrap();
1038        assert!(vec2.iter().all(|&x| x == 5));
1039
1040        assert!(hshm.fill(0, 0, mem_size + 1).is_err());
1041        assert!(hshm.fill(0, mem_size, 1).is_err());
1042    }
1043
1044    #[test]
1045    fn copy_into_from() -> Result<()> {
1046        let mem_size: usize = 4096;
1047        let vec_len = 10;
1048        let eshm = ExclusiveSharedMemory::new(mem_size)?;
1049        let (hshm, _) = eshm.build();
1050        let vec = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
1051        // write the value to the memory at the beginning.
1052        hshm.copy_from_slice(&vec, 0)?;
1053
1054        let mut vec2 = vec![0; vec_len];
1055        // read the value back from the memory at the beginning.
1056        hshm.copy_to_slice(vec2.as_mut_slice(), 0)?;
1057        assert_eq!(vec, vec2);
1058
1059        let offset = mem_size - vec.len();
1060        // write the value to the memory at the end.
1061        hshm.copy_from_slice(&vec, offset)?;
1062
1063        let mut vec3 = vec![0; vec_len];
1064        // read the value back from the memory at the end.
1065        hshm.copy_to_slice(&mut vec3, offset)?;
1066        assert_eq!(vec, vec3);
1067
1068        let offset = mem_size / 2;
1069        // write the value to the memory at the middle.
1070        hshm.copy_from_slice(&vec, offset)?;
1071
1072        let mut vec4 = vec![0; vec_len];
1073        // read the value back from the memory at the middle.
1074        hshm.copy_to_slice(&mut vec4, offset)?;
1075        assert_eq!(vec, vec4);
1076
1077        // try and read a value from an offset that is beyond the end of the memory.
1078        let mut vec5 = vec![0; vec_len];
1079        assert!(hshm.copy_to_slice(&mut vec5, mem_size).is_err());
1080
1081        // try and write a value to an offset that is beyond the end of the memory.
1082        assert!(hshm.copy_from_slice(&vec5, mem_size).is_err());
1083
1084        // try and read a value from an offset that is too large.
1085        let mut vec6 = vec![0; vec_len];
1086        assert!(hshm.copy_to_slice(&mut vec6, mem_size * 2).is_err());
1087
1088        // try and write a value to an offset that is too large.
1089        assert!(hshm.copy_from_slice(&vec6, mem_size * 2).is_err());
1090
1091        // try and read a value that is too large.
1092        let mut vec7 = vec![0; mem_size * 2];
1093        assert!(hshm.copy_to_slice(&mut vec7, 0).is_err());
1094
1095        // try and write a value that is too large.
1096        assert!(hshm.copy_from_slice(&vec7, 0).is_err());
1097
1098        Ok(())
1099    }
1100
1101    proptest! {
1102        #[test]
1103        fn read_write_i32(val in -0x1000_i32..0x1000_i32) {
1104            read_write_test_suite(
1105                val,
1106                ExclusiveSharedMemory::new,
1107                Box::new(ExclusiveSharedMemory::read_i32),
1108                Box::new(ExclusiveSharedMemory::write_i32),
1109            )
1110            .unwrap();
1111            read_write_test_suite(
1112                val,
1113                |s| {
1114                    let e = ExclusiveSharedMemory::new(s)?;
1115                    let (h, _) = e.build();
1116                    Ok(h)
1117                },
1118                Box::new(HostSharedMemory::read::<i32>),
1119                Box::new(|h, o, v| h.write::<i32>(o, v)),
1120            )
1121            .unwrap();
1122        }
1123    }
1124
1125    #[test]
1126    fn alloc_fail() {
1127        let gm = ExclusiveSharedMemory::new(0);
1128        assert!(gm.is_err());
1129        let gm = ExclusiveSharedMemory::new(usize::MAX);
1130        assert!(gm.is_err());
1131    }
1132
1133    #[test]
1134    fn clone() {
1135        let eshm = ExclusiveSharedMemory::new(PAGE_SIZE_USIZE).unwrap();
1136        let (hshm1, _) = eshm.build();
1137        let hshm2 = hshm1.clone();
1138
1139        // after hshm1 is cloned, hshm1 and hshm2 should have identical
1140        // memory sizes and pointers.
1141        assert_eq!(hshm1.mem_size(), hshm2.mem_size());
1142        assert_eq!(hshm1.base_addr(), hshm2.base_addr());
1143
1144        // we should be able to copy a byte array into both hshm1 and hshm2,
1145        // and have both changes be reflected in all clones
1146        hshm1.copy_from_slice(b"a", 0).unwrap();
1147        hshm2.copy_from_slice(b"b", 1).unwrap();
1148
1149        // at this point, both hshm1 and hshm2 should have
1150        // offset 0 = 'a', offset 1 = 'b'
1151        for (raw_offset, expected) in &[(0, b'a'), (1, b'b')] {
1152            assert_eq!(hshm1.read::<u8>(*raw_offset).unwrap(), *expected);
1153            assert_eq!(hshm2.read::<u8>(*raw_offset).unwrap(), *expected);
1154        }
1155
1156        // after we drop hshm1, hshm2 should still exist, be valid,
1157        // and have all contents from before hshm1 was dropped
1158        drop(hshm1);
1159
1160        // at this point, hshm2 should still have offset 0 = 'a', offset 1 = 'b'
1161        for (raw_offset, expected) in &[(0, b'a'), (1, b'b')] {
1162            assert_eq!(hshm2.read::<u8>(*raw_offset).unwrap(), *expected);
1163        }
1164        hshm2.copy_from_slice(b"c", 2).unwrap();
1165        assert_eq!(hshm2.read::<u8>(2).unwrap(), b'c');
1166        drop(hshm2);
1167    }
1168
1169    #[test]
1170    fn copy_all_to_vec() {
1171        let mut data = vec![b'a', b'b', b'c'];
1172        data.resize(4096, 0);
1173        let mut eshm = ExclusiveSharedMemory::new(data.len()).unwrap();
1174        eshm.copy_from_slice(data.as_slice(), 0).unwrap();
1175        let ret_vec = eshm.copy_all_to_vec().unwrap();
1176        assert_eq!(data, ret_vec);
1177    }
1178
1179    /// A test to ensure that, if a `SharedMem` instance is cloned
1180    /// and _all_ clones are dropped, the memory region will no longer
1181    /// be valid.
1182    ///
1183    /// This test is ignored because it is incompatible with other tests as
1184    /// they may be allocating memory at the same time.
1185    ///
1186    /// Marking this test as ignored means that running `cargo test` will not
1187    /// run it. This feature will allow a developer who runs that command
1188    /// from their workstation to be successful without needing to know about
1189    /// test interdependencies. This test will, however, be run explicitly as a
1190    /// part of the CI pipeline.
1191    #[test]
1192    #[ignore]
1193    #[cfg(target_os = "linux")]
1194    fn test_drop() {
1195        use proc_maps::maps_contain_addr;
1196
1197        let pid = std::process::id();
1198
1199        let eshm = ExclusiveSharedMemory::new(PAGE_SIZE_USIZE).unwrap();
1200        let (hshm1, gshm) = eshm.build();
1201        let hshm2 = hshm1.clone();
1202        let addr = hshm1.raw_ptr() as usize;
1203
1204        // ensure the address is in the process's virtual memory
1205        let maps_before_drop = proc_maps::get_process_maps(pid.try_into().unwrap()).unwrap();
1206        assert!(
1207            maps_contain_addr(addr, &maps_before_drop),
1208            "shared memory address {:#x} was not found in process map, but should be",
1209            addr,
1210        );
1211        // drop both shared memory instances, which should result
1212        // in freeing the memory region
1213        drop(hshm1);
1214        drop(hshm2);
1215        drop(gshm);
1216
1217        let maps_after_drop = proc_maps::get_process_maps(pid.try_into().unwrap()).unwrap();
1218        // now, ensure the address is not in the process's virtual memory
1219        assert!(
1220            !maps_contain_addr(addr, &maps_after_drop),
1221            "shared memory address {:#x} was found in the process map, but shouldn't be",
1222            addr
1223        );
1224    }
1225
1226    #[cfg(target_os = "linux")]
1227    mod guard_page_crash_test {
1228        use crate::mem::shared_mem::{ExclusiveSharedMemory, SharedMemory};
1229
1230        const TEST_EXIT_CODE: u8 = 211; // an uncommon exit code, used for testing purposes
1231
1232        /// hook sigsegv to exit with status code, to make it testable, rather than have it exit from a signal
1233        /// NOTE: We CANNOT panic!() in the handler, and make the tests #[should_panic], because
1234        ///     the test harness process will crash anyway after the test passes
1235        fn setup_signal_handler() {
1236            unsafe {
1237                signal_hook_registry::register_signal_unchecked(libc::SIGSEGV, || {
1238                    std::process::exit(TEST_EXIT_CODE.into());
1239                })
1240                .unwrap();
1241            }
1242        }
1243
1244        #[test]
1245        #[ignore] // this test is ignored because it will crash the running process
1246        fn read() {
1247            setup_signal_handler();
1248
1249            let eshm = ExclusiveSharedMemory::new(4096).unwrap();
1250            let (hshm, _) = eshm.build();
1251            let guard_page_ptr = hshm.raw_ptr();
1252            unsafe { std::ptr::read_volatile(guard_page_ptr) };
1253        }
1254
1255        #[test]
1256        #[ignore] // this test is ignored because it will crash the running process
1257        fn write() {
1258            setup_signal_handler();
1259
1260            let eshm = ExclusiveSharedMemory::new(4096).unwrap();
1261            let (hshm, _) = eshm.build();
1262            let guard_page_ptr = hshm.raw_ptr();
1263            unsafe { std::ptr::write_volatile(guard_page_ptr, 0u8) };
1264        }
1265
1266        #[test]
1267        #[ignore] // this test is ignored because it will crash the running process
1268        fn exec() {
1269            setup_signal_handler();
1270
1271            let eshm = ExclusiveSharedMemory::new(4096).unwrap();
1272            let (hshm, _) = eshm.build();
1273            let guard_page_ptr = hshm.raw_ptr();
1274            let func: fn() = unsafe { std::mem::transmute(guard_page_ptr) };
1275            func();
1276        }
1277
1278        // provides a way for running the above tests in a separate process since they expect to crash
1279        #[test]
1280        fn guard_page_testing_shim() {
1281            let tests = vec!["read", "write", "exec"];
1282
1283            for test in tests {
1284                let status = std::process::Command::new("cargo")
1285                    .args(["test", "-p", "hyperlight-host", "--", "--ignored", test])
1286                    .stdin(std::process::Stdio::null())
1287                    .stdout(std::process::Stdio::null())
1288                    .stderr(std::process::Stdio::null())
1289                    .status()
1290                    .expect("Unable to launch tests");
1291                assert_eq!(
1292                    status.code(),
1293                    Some(TEST_EXIT_CODE.into()),
1294                    "Guard Page test failed: {}",
1295                    test
1296                );
1297            }
1298        }
1299    }
1300}