hyperlight_host/mem/
shared_mem.rs

1/*
2Copyright 2024 The Hyperlight Authors.
3
4Licensed under the Apache License, Version 2.0 (the "License");
5you may not use this file except in compliance with the License.
6You may obtain a copy of the License at
7
8    http://www.apache.org/licenses/LICENSE-2.0
9
10Unless required by applicable law or agreed to in writing, software
11distributed under the License is distributed on an "AS IS" BASIS,
12WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13See the License for the specific language governing permissions and
14limitations under the License.
15*/
16
17use std::any::type_name;
18use std::ffi::c_void;
19use std::io::Error;
20#[cfg(target_os = "linux")]
21use std::ptr::null_mut;
22use std::sync::{Arc, RwLock};
23
24use hyperlight_common::mem::PAGE_SIZE_USIZE;
25use tracing::{instrument, Span};
26#[cfg(target_os = "windows")]
27use windows::core::PCSTR;
28#[cfg(target_os = "windows")]
29use windows::Win32::Foundation::{CloseHandle, HANDLE, INVALID_HANDLE_VALUE};
30#[cfg(all(target_os = "windows", inprocess))]
31use windows::Win32::System::Memory::FILE_MAP_EXECUTE;
32#[cfg(all(target_os = "windows", not(inprocess)))]
33use windows::Win32::System::Memory::PAGE_READWRITE;
34#[cfg(target_os = "windows")]
35use windows::Win32::System::Memory::{
36    CreateFileMappingA, MapViewOfFile, UnmapViewOfFile, VirtualProtect, FILE_MAP_ALL_ACCESS,
37    MEMORY_MAPPED_VIEW_ADDRESS, PAGE_EXECUTE_READWRITE, PAGE_NOACCESS, PAGE_PROTECTION_FLAGS,
38};
39
40#[cfg(target_os = "windows")]
41use crate::HyperlightError::MemoryAllocationFailed;
42#[cfg(target_os = "windows")]
43use crate::HyperlightError::{MemoryRequestTooBig, WindowsAPIError};
44use crate::{log_then_return, new_error, Result};
45
46/// Makes sure that the given `offset` and `size` are within the bounds of the memory with size `mem_size`.
47macro_rules! bounds_check {
48    ($offset:expr, $size:expr, $mem_size:expr) => {
49        if $offset + $size > $mem_size {
50            return Err(new_error!(
51                "Cannot read value from offset {} with size {} in memory of size {}",
52                $offset,
53                $size,
54                $mem_size
55            ));
56        }
57    };
58}
59
60/// generates a reader function for the given type
61macro_rules! generate_reader {
62    ($fname:ident, $ty:ty) => {
63        /// Read a value of type `$ty` from the memory at the given offset.
64        #[allow(dead_code)]
65        #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
66        pub(crate) fn $fname(&self, offset: usize) -> Result<$ty> {
67            let data = self.as_slice();
68            bounds_check!(offset, std::mem::size_of::<$ty>(), data.len());
69            Ok(<$ty>::from_le_bytes(
70                data[offset..offset + std::mem::size_of::<$ty>()].try_into()?,
71            ))
72        }
73    };
74}
75
76/// generates a writer function for the given type
77macro_rules! generate_writer {
78    ($fname:ident, $ty:ty) => {
79        /// Write a value of type `$ty` to the memory at the given offset.
80        #[allow(dead_code)]
81        #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
82        pub(crate) fn $fname(&mut self, offset: usize, value: $ty) -> Result<()> {
83            let data = self.as_mut_slice();
84            bounds_check!(offset, std::mem::size_of::<$ty>(), data.len());
85            data[offset..offset + std::mem::size_of::<$ty>()].copy_from_slice(&value.to_le_bytes());
86            Ok(())
87        }
88    };
89}
90
91/// A representation of a host mapping of a shared memory region,
92/// which will be released when this structure is Drop'd. This is not
93/// individually Clone (since it holds ownership of the mapping), or
94/// Send or Sync, since it doesn't ensure any particular synchronization.
95#[derive(Debug)]
96pub struct HostMapping {
97    ptr: *mut u8,
98    size: usize,
99    #[cfg(target_os = "windows")]
100    handle: HANDLE,
101}
102
103impl Drop for HostMapping {
104    #[cfg(target_os = "linux")]
105    fn drop(&mut self) {
106        use libc::munmap;
107
108        unsafe {
109            munmap(self.ptr as *mut c_void, self.size);
110        }
111    }
112    #[cfg(target_os = "windows")]
113    fn drop(&mut self) {
114        let mem_mapped_address = MEMORY_MAPPED_VIEW_ADDRESS {
115            Value: self.ptr as *mut c_void,
116        };
117        if let Err(e) = unsafe { UnmapViewOfFile(mem_mapped_address) } {
118            tracing::error!(
119                "Failed to drop HostMapping (UnmapViewOfFile failed): {:?}",
120                e
121            );
122        }
123
124        let file_handle: HANDLE = self.handle;
125        if let Err(e) = unsafe { CloseHandle(file_handle) } {
126            tracing::error!("Failed to  drop HostMapping (CloseHandle failed): {:?}", e);
127        }
128    }
129}
130
131/// These three structures represent various phases of the lifecycle of
132/// a memory buffer that is shared with the guest. An
133/// ExclusiveSharedMemory is used for certain operations that
134/// unrestrictedly write to the shared memory, including setting it up
135/// and taking snapshots.
136#[derive(Debug)]
137pub struct ExclusiveSharedMemory {
138    region: Arc<HostMapping>,
139}
140unsafe impl Send for ExclusiveSharedMemory {}
141
142/// A GuestSharedMemory is used by the hypervisor handler to represent
143/// the reference to all-of-memory that is taken by the virtual cpu.
144/// Because of the memory model limitations that affect
145/// HostSharedMemory, it is likely fairly important (to ensure that
146/// our UB remains limited to interaction with an external compilation
147/// unit that likely can't be discovered by the compiler) that _rust_
148/// users do not perform racy accesses to the guest communication
149/// buffers that are also accessed by HostSharedMemory.
150#[derive(Debug)]
151pub struct GuestSharedMemory {
152    region: Arc<HostMapping>,
153    /// The lock that indicates this shared memory is being used by non-Rust code
154    ///
155    /// This lock _must_ be held whenever the guest is executing,
156    /// because it prevents the host from converting its
157    /// HostSharedMemory to an ExclusiveSharedMemory. Since the guest
158    /// may arbitrarily mutate the shared memory, only synchronized
159    /// accesses from Rust should be allowed!
160    ///
161    /// We cannot enforce this in the type system, because the memory
162    /// is mapped in to the VM at VM creation time.
163    pub lock: Arc<RwLock<()>>,
164}
165unsafe impl Send for GuestSharedMemory {}
166
167/// A HostSharedMemory allows synchronized accesses to guest
168/// communication buffers, allowing it to be used concurrently with a
169/// GuestSharedMemory.
170///
171/// Given future requirements for asynchronous I/O with a minimum
172/// amount of copying (e.g. WASIp3 streams), we would like it to be
173/// possible to safely access these buffers concurrently with the
174/// guest, ensuring that (1) data is read appropriately if the guest
175/// is well-behaved; and (2) the host's behaviour is defined
176/// regardless of whether or not the guest is well-behaved.
177///
178/// The ideal (future) flow for a guest->host message is something like
179///   - Guest writes (unordered) bytes describing a work item into a buffer
180///   - Guest reveals buffer via a release-store of a pointer into an
181///     MMIO ring-buffer
182///   - Host acquire-loads the buffer pointer from the "MMIO" ring
183///     buffer
184///   - Host (unordered) reads the bytes from the buffer
185///   - Host performs validation of those bytes and uses them
186///
187/// Unfortunately, there appears to be no way to do this with defined
188/// behaviour in present Rust (see
189/// e.g. https://github.com/rust-lang/unsafe-code-guidelines/issues/152).
190/// Rust does not yet have its own defined memory model, but in the
191/// interim, it is widely treated as inheriting the current C/C++
192/// memory models.  The most immediate problem is that regardless of
193/// anything else, under those memory models [1, p. 17-18; 2, p. 88],
194///
195///   > The execution of a program contains a _data race_ if it
196///   > contains two [C++23: "potentially concurrent"] conflicting
197///   > actions [C23: "in different threads"], at least one of which
198///   > is not atomic, and neither happens before the other [C++23: ",
199///   > except for the special case for signal handlers described
200///   > below"].  Any such data race results in undefined behavior.
201///
202/// Consequently, if a misbehaving guest fails to correctly
203/// synchronize its stores with the host, the host's innocent loads
204/// will trigger undefined behaviour for the entire program, including
205/// the host.  Note that this also applies if the guest makes an
206/// unsynchronized read of a location that the host is writing!
207///
208/// Despite Rust's de jure inheritance of the C memory model at the
209/// present time, the compiler in many cases de facto adheres to LLVM
210/// semantics, so it is worthwhile to consider what LLVM does in this
211/// case as well.  According to the the LangRef [3] memory model,
212/// loads which are involved in a race that includes at least one
213/// non-atomic access (whether the load or a store) return `undef`,
214/// making them roughly equivalent to reading uninitialized
215/// memory. While this is much better, it is still bad.
216///
217/// Considering a different direction, recent C++ papers have seemed
218/// to lean towards using `volatile` for similar use cases. For
219/// example, in P1152R0 [4], JF Bastien notes that
220///
221///   > We’ve shown that volatile is purposely defined to denote
222///   > external modifications. This happens for:
223///   >   - Shared memory with untrusted code, where volatile is the
224///   >     right way to avoid time-of-check time-of-use (ToCToU)
225///   >     races which lead to security bugs such as [PWN2OWN] and
226///   >     [XENXSA155].
227///
228/// Unfortunately, although this paper was adopted for C++20 (and,
229/// sadly, mostly un-adopted for C++23, although that does not concern
230/// us), the paper did not actually redefine volatile accesses or data
231/// races to prevent volatile accesses from racing with other accesses
232/// and causing undefined behaviour.  P1382R1 [5] would have amended
233/// the wording of the data race definition to specifically exclude
234/// volatile, but, unfortunately, despite receiving a
235/// generally-positive reception at its first WG21 meeting more than
236/// five years ago, it has not progressed.
237///
238/// Separately from the data race issue, there is also a concern that
239/// according to the various memory models in use, there may be ways
240/// in which the guest can semantically obtain uninitialized memory
241/// and write it into the shared buffer, which may also result in
242/// undefined behaviour on reads.  The degree to which this is a
243/// concern is unclear, however, since it is unclear to what degree
244/// the Rust abstract machine's conception of uninitialized memory
245/// applies to the sandbox.  Returning briefly to the LLVM level,
246/// rather than the Rust level, this, combined with the fact that
247/// racing loads in LLVM return `undef`, as discussed above, we would
248/// ideally `llvm.freeze` the result of any load out of the sandbox.
249///
250/// It would furthermore be ideal if we could run the flatbuffers
251/// parsing code directly on the guest memory, in order to avoid
252/// unnecessary copies.  That is unfortunately probably not viable at
253/// the present time: because the generated flatbuffers parsing code
254/// doesn't use atomic or volatile accesses, it is likely to introduce
255/// double-read vulnerabilities.
256///
257/// In short, none of the Rust-level operations available to us do the
258/// right thing, at the Rust spec level or the LLVM spec level. Our
259/// major remaining options are therefore:
260///   - Choose one of the options that is available to us, and accept
261///     that we are doing something unsound according to the spec, but
262///     hope that no reasonable compiler could possibly notice.
263///   - Use inline assembly per architecture, for which we would only
264///     need to worry about the _architecture_'s memory model (which
265///     is far less demanding).
266///
267/// The leading candidate for the first option would seem to be to
268/// simply use volatile accesses; there seems to be wide agreement
269/// that this _should_ be a valid use case for them (even if it isn't
270/// now), and projects like Linux and rust-vmm already use C11
271/// `volatile` for this purpose.  It is also worth noting that because
272/// we still do need to synchronize with the guest when it _is_ being
273/// well-behaved, we would ideally use volatile acquire loads and
274/// volatile release stores for interacting with the stack pointer in
275/// the guest in this case.  Unfortunately, while those operations are
276/// defined in LLVM, they are not presently exposed to Rust. While
277/// atomic fences that are not associated with memory accesses
278/// (std::sync::atomic::fence) might at first glance seem to help with
279/// this problem, they unfortunately do not [6]:
280///
281///    > A fence ‘A’ which has (at least) Release ordering semantics,
282///    > synchronizes with a fence ‘B’ with (at least) Acquire
283///    > semantics, if and only if there exist operations X and Y,
284///    > both operating on some atomic object ‘M’ such that A is
285///    > sequenced before X, Y is sequenced before B and Y observes
286///    > the change to M. This provides a happens-before dependence
287///    > between A and B.
288///
289/// Note that the X and Y must be to an _atomic_ object.
290///
291/// We consequently assume that there has been a strong architectural
292/// fence on a vmenter/vmexit between data being read and written.
293/// This is unsafe (not guaranteed in the type system)!
294///
295/// [1] N3047 C23 Working Draft. https://www.open-std.org/jtc1/sc22/wg14/www/docs/n3047.pdf
296/// [2] N4950 C++23 Working Draft. https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2023/n4950.pdf
297/// [3] LLVM Language Reference Manual, Memory Model for Concurrent Operations. https://llvm.org/docs/LangRef.html#memmodel
298/// [4] P1152R0: Deprecating `volatile`. JF Bastien. https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p1152r0.html
299/// [5] P1382R1: `volatile_load<T>` and `volatile_store<T>`. JF Bastien, Paul McKenney, Jeffrey Yasskin, and the indefatigable TBD. https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p1382r1.pdf
300/// [6] Documentation for std::sync::atomic::fence. https://doc.rust-lang.org/std/sync/atomic/fn.fence.html
301#[derive(Clone, Debug)]
302pub struct HostSharedMemory {
303    region: Arc<HostMapping>,
304    lock: Arc<RwLock<()>>,
305}
306unsafe impl Send for HostSharedMemory {}
307
308impl ExclusiveSharedMemory {
309    /// Create a new region of shared memory with the given minimum
310    /// size in bytes. The region will be surrounded by guard pages.
311    ///
312    /// Return `Err` if shared memory could not be allocated.
313    #[cfg(target_os = "linux")]
314    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
315    pub fn new(min_size_bytes: usize) -> Result<Self> {
316        use libc::{
317            c_int, mmap, mprotect, off_t, size_t, MAP_ANONYMOUS, MAP_FAILED, MAP_NORESERVE,
318            MAP_SHARED, PROT_NONE, PROT_READ, PROT_WRITE,
319        };
320
321        use crate::error::HyperlightError::{MemoryRequestTooBig, MmapFailed, MprotectFailed};
322
323        if min_size_bytes == 0 {
324            return Err(new_error!("Cannot create shared memory with size 0"));
325        }
326
327        let total_size = min_size_bytes
328            .checked_add(2 * PAGE_SIZE_USIZE) // guard page around the memory
329            .ok_or_else(|| new_error!("Memory required for sandbox exceeded usize::MAX"))?;
330
331        if total_size % PAGE_SIZE_USIZE != 0 {
332            return Err(new_error!(
333                "shared memory must be a multiple of {}",
334                PAGE_SIZE_USIZE
335            ));
336        }
337
338        // usize and isize are guaranteed to be the same size, and
339        // isize::MAX should be positive, so this cast should be safe.
340        if total_size > isize::MAX as usize {
341            return Err(MemoryRequestTooBig(total_size, isize::MAX as usize));
342        }
343
344        // allocate the memory
345        let addr = unsafe {
346            mmap(
347                null_mut(),
348                total_size as size_t,
349                PROT_READ | PROT_WRITE,
350                MAP_ANONYMOUS | MAP_SHARED | MAP_NORESERVE,
351                -1 as c_int,
352                0 as off_t,
353            )
354        };
355        if addr == MAP_FAILED {
356            log_then_return!(MmapFailed(Error::last_os_error().raw_os_error()));
357        }
358
359        // protect the guard pages
360
361        let res = unsafe { mprotect(addr, PAGE_SIZE_USIZE, PROT_NONE) };
362        if res != 0 {
363            return Err(MprotectFailed(Error::last_os_error().raw_os_error()));
364        }
365        let res = unsafe {
366            mprotect(
367                (addr as *const u8).add(total_size - PAGE_SIZE_USIZE) as *mut c_void,
368                PAGE_SIZE_USIZE,
369                PROT_NONE,
370            )
371        };
372        if res != 0 {
373            return Err(MprotectFailed(Error::last_os_error().raw_os_error()));
374        }
375
376        Ok(Self {
377            // HostMapping is only non-Send/Sync because raw pointers
378            // are not ("as a lint", as the Rust docs say). We don't
379            // want to mark HostMapping Send/Sync immediately, because
380            // that could socially imply that it's "safe" to use
381            // unsafe accesses from multiple threads at once. Instead, we
382            // directly impl Send and Sync on this type. Since this
383            // type does have Send and Sync manually impl'd, the Arc
384            // is not pointless as the lint suggests.
385            #[allow(clippy::arc_with_non_send_sync)]
386            region: Arc::new(HostMapping {
387                ptr: addr as *mut u8,
388                size: total_size,
389            }),
390        })
391    }
392
393    /// Create a new region of shared memory with the given minimum
394    /// size in bytes. The region will be surrounded by guard pages.
395    ///
396    /// Return `Err` if shared memory could not be allocated.
397    #[cfg(target_os = "windows")]
398    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
399    pub fn new(min_size_bytes: usize) -> Result<Self> {
400        if min_size_bytes == 0 {
401            return Err(new_error!("Cannot create shared memory with size 0"));
402        }
403
404        let total_size = min_size_bytes
405            .checked_add(2 * PAGE_SIZE_USIZE)
406            .ok_or_else(|| new_error!("Memory required for sandbox exceeded {}", usize::MAX))?;
407
408        if total_size % PAGE_SIZE_USIZE != 0 {
409            return Err(new_error!(
410                "shared memory must be a multiple of {}",
411                PAGE_SIZE_USIZE
412            ));
413        }
414
415        // usize and isize are guaranteed to be the same size, and
416        // isize::MAX should be positive, so this cast should be safe.
417        if total_size > isize::MAX as usize {
418            return Err(MemoryRequestTooBig(total_size, isize::MAX as usize));
419        }
420
421        let mut dwmaximumsizehigh = 0;
422        let mut dwmaximumsizelow = 0;
423
424        if std::mem::size_of::<usize>() == 8 {
425            dwmaximumsizehigh = (total_size >> 32) as u32;
426            dwmaximumsizelow = (total_size & 0xFFFFFFFF) as u32;
427        }
428
429        // Allocate the memory use CreateFileMapping instead of VirtualAlloc
430        // This allows us to map the memory into the surrogate process using MapViewOfFile2
431
432        #[cfg(not(inprocess))]
433        let flags = PAGE_READWRITE;
434        #[cfg(inprocess)]
435        let flags = PAGE_EXECUTE_READWRITE;
436
437        let handle = unsafe {
438            CreateFileMappingA(
439                INVALID_HANDLE_VALUE,
440                None,
441                flags,
442                dwmaximumsizehigh,
443                dwmaximumsizelow,
444                PCSTR::null(),
445            )?
446        };
447
448        if handle.is_invalid() {
449            log_then_return!(MemoryAllocationFailed(
450                Error::last_os_error().raw_os_error()
451            ));
452        }
453
454        #[cfg(not(inprocess))]
455        let file_map = FILE_MAP_ALL_ACCESS;
456        #[cfg(inprocess)]
457        let file_map = FILE_MAP_ALL_ACCESS | FILE_MAP_EXECUTE;
458
459        let addr = unsafe { MapViewOfFile(handle, file_map, 0, 0, 0) };
460
461        if addr.Value.is_null() {
462            log_then_return!(MemoryAllocationFailed(
463                Error::last_os_error().raw_os_error()
464            ));
465        }
466
467        // Set the first and last pages to be guard pages
468
469        let mut unused_out_old_prot_flags = PAGE_PROTECTION_FLAGS(0);
470
471        // If the following calls to VirtualProtect are changed make sure to update the calls to VirtualProtectEx in surrogate_process_manager.rs
472
473        let first_guard_page_start = addr.Value;
474        if let Err(e) = unsafe {
475            VirtualProtect(
476                first_guard_page_start,
477                PAGE_SIZE_USIZE,
478                PAGE_NOACCESS,
479                &mut unused_out_old_prot_flags,
480            )
481        } {
482            log_then_return!(WindowsAPIError(e.clone()));
483        }
484
485        let last_guard_page_start = unsafe { addr.Value.add(total_size - PAGE_SIZE_USIZE) };
486        if let Err(e) = unsafe {
487            VirtualProtect(
488                last_guard_page_start,
489                PAGE_SIZE_USIZE,
490                PAGE_NOACCESS,
491                &mut unused_out_old_prot_flags,
492            )
493        } {
494            log_then_return!(WindowsAPIError(e.clone()));
495        }
496
497        Ok(Self {
498            // HostMapping is only non-Send/Sync because raw pointers
499            // are not ("as a lint", as the Rust docs say). We don't
500            // want to mark HostMapping Send/Sync immediately, because
501            // that could socially imply that it's "safe" to use
502            // unsafe accesses from multiple threads at once. Instead, we
503            // directly impl Send and Sync on this type. Since this
504            // type does have Send and Sync manually impl'd, the Arc
505            // is not pointless as the lint suggests.
506            #[allow(clippy::arc_with_non_send_sync)]
507            region: Arc::new(HostMapping {
508                ptr: addr.Value as *mut u8,
509                size: total_size,
510                handle,
511            }),
512        })
513    }
514
515    pub(super) fn make_memory_executable(&self) -> Result<()> {
516        #[cfg(target_os = "windows")]
517        {
518            let mut _old_flags = PAGE_PROTECTION_FLAGS::default();
519            if let Err(e) = unsafe {
520                VirtualProtect(
521                    self.region.ptr as *const c_void,
522                    self.region.size,
523                    PAGE_EXECUTE_READWRITE,
524                    &mut _old_flags as *mut PAGE_PROTECTION_FLAGS,
525                )
526            } {
527                log_then_return!(WindowsAPIError(e.clone()));
528            }
529        }
530
531        // make the memory executable on Linux
532        #[cfg(target_os = "linux")]
533        {
534            use libc::{mprotect, PROT_EXEC, PROT_READ, PROT_WRITE};
535
536            let res = unsafe {
537                mprotect(
538                    self.region.ptr as *mut c_void,
539                    self.region.size,
540                    PROT_READ | PROT_WRITE | PROT_EXEC,
541                )
542            };
543
544            if res != 0 {
545                return Err(new_error!(
546                    "Failed to make memory executable: {:#?}",
547                    Error::last_os_error().raw_os_error()
548                ));
549            }
550        }
551        Ok(())
552    }
553
554    /// Internal helper method to get the backing memory as a mutable slice.
555    ///
556    /// # Safety
557    /// As per std::slice::from_raw_parts_mut:
558    /// - self.base_addr() must be valid for both reads and writes for
559    ///   self.mem_size() * mem::size_of::<u8>() many bytes, and it
560    ///   must be properly aligned.
561    ///
562    ///   The rules on validity are still somewhat unspecified, but we
563    ///   assume that the result of our calls to mmap/CreateFileMappings may
564    ///   be considered a single "allocated object". The use of
565    ///   non-atomic accesses is alright from a Safe Rust standpoint,
566    ///   because SharedMemoryBuilder is  not Sync.
567    /// - self.base_addr() must point to self.mem_size() consecutive
568    ///   properly initialized values of type u8
569    ///
570    ///   Again, the exact provenance restrictions on what is
571    ///   considered to be initialized values are unclear, but we make
572    ///   sure to use mmap(MAP_ANONYMOUS) and
573    ///   CreateFileMapping(SEC_COMMIT), so the pages in question are
574    ///   zero-initialized, which we hope counts for u8.
575    /// - The memory referenced by the returned slice must not be
576    ///   accessed through any other pointer (not derived from the
577    ///   return value) for the duration of the lifetime 'a. Both read
578    ///   and write accesses are forbidden.
579    ///
580    ///   Accesses from Safe Rust necessarily follow this rule,
581    ///   because the returned slice's lifetime is the same as that of
582    ///   a mutable borrow of self.
583    /// - The total size self.mem_size() * mem::size_of::<u8>() of the
584    ///   slice must be no larger than isize::MAX, and adding that
585    ///   size to data must not "wrap around" the address space. See
586    ///   the safety documentation of pointer::offset.
587    ///
588    ///   This is ensured by a check in ::new()
589    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
590    pub(super) fn as_mut_slice<'a>(&'a mut self) -> &'a mut [u8] {
591        unsafe { std::slice::from_raw_parts_mut(self.base_ptr(), self.mem_size()) }
592    }
593
594    /// Internal helper method to get the backing memory as a slice.
595    ///
596    /// # Safety
597    /// See the discussion on as_mut_slice, with the third point
598    /// replaced by:
599    /// - The memory referenced by the returned slice must not be
600    ///   mutated for the duration of lifetime 'a, except inside an
601    ///   UnsafeCell.
602    ///
603    ///   Host accesses from Safe Rust necessarily follow this rule,
604    ///   because the returned slice's lifetime is the same as that of
605    ///   a borrow of self, preventing mutations via other methods.
606    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
607    pub fn as_slice<'a>(&'a self) -> &'a [u8] {
608        unsafe { std::slice::from_raw_parts(self.base_ptr(), self.mem_size()) }
609    }
610
611    /// Copy the entire contents of `self` into a `Vec<u8>`, then return it
612    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
613    pub(crate) fn copy_all_to_vec(&self) -> Result<Vec<u8>> {
614        let data = self.as_slice();
615        Ok(data.to_vec())
616    }
617
618    /// Copies all bytes from `src` to `self` starting at offset
619    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
620    pub fn copy_from_slice(&mut self, src: &[u8], offset: usize) -> Result<()> {
621        let data = self.as_mut_slice();
622        bounds_check!(offset, src.len(), data.len());
623        data[offset..offset + src.len()].copy_from_slice(src);
624        Ok(())
625    }
626
627    /// Return the address of memory at an offset to this `SharedMemory` checking
628    /// that the memory is within the bounds of the `SharedMemory`.
629    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
630    pub(crate) fn calculate_address(&self, offset: usize) -> Result<usize> {
631        bounds_check!(offset, 0, self.mem_size());
632        Ok(self.base_addr() + offset)
633    }
634
635    generate_reader!(read_u8, u8);
636    generate_reader!(read_i8, i8);
637    generate_reader!(read_u16, u16);
638    generate_reader!(read_i16, i16);
639    generate_reader!(read_u32, u32);
640    generate_reader!(read_i32, i32);
641    generate_reader!(read_u64, u64);
642    generate_reader!(read_i64, i64);
643    generate_reader!(read_usize, usize);
644    generate_reader!(read_isize, isize);
645
646    generate_writer!(write_u8, u8);
647    generate_writer!(write_i8, i8);
648    generate_writer!(write_u16, u16);
649    generate_writer!(write_i16, i16);
650    generate_writer!(write_u32, u32);
651    generate_writer!(write_i32, i32);
652    generate_writer!(write_u64, u64);
653    generate_writer!(write_i64, i64);
654    generate_writer!(write_usize, usize);
655    generate_writer!(write_isize, isize);
656
657    /// Convert the ExclusiveSharedMemory, which may be freely
658    /// modified, into a GuestSharedMemory, which may be somewhat
659    /// freely modified (mostly by the guest), and a HostSharedMemory,
660    /// which may only make certain kinds of accesses that do not race
661    /// in the presence of malicious code inside the guest mutating
662    /// the GuestSharedMemory.
663    pub fn build(self) -> (HostSharedMemory, GuestSharedMemory) {
664        let lock = Arc::new(RwLock::new(()));
665        (
666            HostSharedMemory {
667                region: self.region.clone(),
668                lock: lock.clone(),
669            },
670            GuestSharedMemory {
671                region: self.region.clone(),
672                lock: lock.clone(),
673            },
674        )
675    }
676
677    /// Gets the file handle of the shared memory region for this Sandbox
678    #[cfg(target_os = "windows")]
679    pub fn get_mmap_file_handle(&self) -> HANDLE {
680        self.region.handle
681    }
682}
683
684/// A trait that abstracts over the particular kind of SharedMemory,
685/// used when invoking operations from Rust that absolutely must have
686/// exclusive control over the shared memory for correctness +
687/// performance, like snapshotting.
688pub trait SharedMemory {
689    /// Return a readonly reference to the host mapping backing this SharedMemory
690    fn region(&self) -> &HostMapping;
691
692    /// Return the base address of the host mapping of this
693    /// region. Following the general Rust philosophy, this does not
694    /// need to be marked as `unsafe` because doing anything with this
695    /// pointer itself requires `unsafe`.
696    fn base_addr(&self) -> usize {
697        self.region().ptr as usize + PAGE_SIZE_USIZE
698    }
699
700    /// Return the base address of the host mapping of this region as
701    /// a pointer. Following the general Rust philosophy, this does
702    /// not need to be marked as `unsafe` because doing anything with
703    /// this pointer itself requires `unsafe`.
704    fn base_ptr(&self) -> *mut u8 {
705        self.base_addr() as *mut u8
706    }
707
708    /// Return the length of usable memory contained in `self`.
709    /// The returned size does not include the size of the surrounding
710    /// guard pages.
711    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
712    fn mem_size(&self) -> usize {
713        self.region().size - 2 * PAGE_SIZE_USIZE
714    }
715
716    /// Return the raw base address of the host mapping, including the
717    /// guard pages.
718    fn raw_ptr(&self) -> *mut u8 {
719        self.region().ptr
720    }
721
722    /// Return the raw size of the host mapping, including the guard
723    /// pages.
724    fn raw_mem_size(&self) -> usize {
725        self.region().size
726    }
727
728    /// Run some code with exclusive access to the SharedMemory
729    /// underlying this.  If the SharedMemory is not an
730    /// ExclusiveSharedMemory, any concurrent accesses to the relevant
731    /// HostSharedMemory/GuestSharedMemory may make this fail, or be
732    /// made to fail by this, and should be avoided.
733    fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
734        &mut self,
735        f: F,
736    ) -> Result<T>;
737}
738
739impl SharedMemory for ExclusiveSharedMemory {
740    fn region(&self) -> &HostMapping {
741        &self.region
742    }
743    fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
744        &mut self,
745        f: F,
746    ) -> Result<T> {
747        Ok(f(self))
748    }
749}
750
751impl SharedMemory for GuestSharedMemory {
752    fn region(&self) -> &HostMapping {
753        &self.region
754    }
755    fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
756        &mut self,
757        f: F,
758    ) -> Result<T> {
759        let guard = self
760            .lock
761            .try_write()
762            .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
763        let mut excl = ExclusiveSharedMemory {
764            region: self.region.clone(),
765        };
766        let ret = f(&mut excl);
767        drop(excl);
768        drop(guard);
769        Ok(ret)
770    }
771}
772
773/// An unsafe marker trait for types for which all bit patterns are valid.
774/// This is required in order for it to be safe to read a value of a particular
775/// type out of the sandbox from the HostSharedMemory.
776///
777/// # Safety
778/// This must only be implemented for types for which all bit patterns
779/// are valid. It requires that any (non-undef/poison) value of the
780/// correct size can be transmuted to the type.
781pub unsafe trait AllValid {}
782unsafe impl AllValid for u8 {}
783unsafe impl AllValid for u16 {}
784unsafe impl AllValid for u32 {}
785unsafe impl AllValid for u64 {}
786unsafe impl AllValid for i8 {}
787unsafe impl AllValid for i16 {}
788unsafe impl AllValid for i32 {}
789unsafe impl AllValid for i64 {}
790unsafe impl AllValid for [u8; 16] {}
791
792impl HostSharedMemory {
793    /// Read a value of type T, whose representation is the same
794    /// between the sandbox and the host, and which has no invalid bit
795    /// patterns
796    pub fn read<T: AllValid>(&self, offset: usize) -> Result<T> {
797        bounds_check!(offset, std::mem::size_of::<T>(), self.mem_size());
798        let ret = unsafe {
799            let mut ret: core::mem::MaybeUninit<T> = core::mem::MaybeUninit::uninit();
800            {
801                let slice: &mut [u8] = core::slice::from_raw_parts_mut(
802                    ret.as_mut_ptr() as *mut u8,
803                    std::mem::size_of::<T>(),
804                );
805                self.copy_to_slice(slice, offset)?;
806            }
807            Ok(ret.assume_init())
808        };
809        ret
810    }
811
812    /// Write a value of type T, whose representation is the same
813    /// between the sandbox and the host, and which has no invalid bit
814    /// patterns
815    pub fn write<T: AllValid>(&self, offset: usize, data: T) -> Result<()> {
816        bounds_check!(offset, std::mem::size_of::<T>(), self.mem_size());
817        unsafe {
818            let slice: &[u8] = core::slice::from_raw_parts(
819                core::ptr::addr_of!(data) as *const u8,
820                std::mem::size_of::<T>(),
821            );
822            self.copy_from_slice(slice, offset)?;
823        }
824        Ok(())
825    }
826
827    /// Copy the contents of the slice into the sandbox at the
828    /// specified offset
829    pub fn copy_to_slice(&self, slice: &mut [u8], offset: usize) -> Result<()> {
830        bounds_check!(offset, slice.len(), self.mem_size());
831        let base = self.base_ptr().wrapping_add(offset);
832        let guard = self
833            .lock
834            .try_read()
835            .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
836        // todo: replace with something a bit more optimized + correct
837        for (i, b) in slice.iter_mut().enumerate() {
838            unsafe {
839                *b = base.wrapping_add(i).read_volatile();
840            }
841        }
842        drop(guard);
843        Ok(())
844    }
845
846    /// Copy the contents of the sandbox at the specified offset into
847    /// the slice
848    pub fn copy_from_slice(&self, slice: &[u8], offset: usize) -> Result<()> {
849        bounds_check!(offset, slice.len(), self.mem_size());
850        let base = self.base_ptr().wrapping_add(offset);
851        let guard = self
852            .lock
853            .try_read()
854            .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
855        // todo: replace with something a bit more optimized + correct
856        for (i, b) in slice.iter().enumerate() {
857            unsafe {
858                base.wrapping_add(i).write_volatile(*b);
859            }
860        }
861        drop(guard);
862        Ok(())
863    }
864
865    /// Fill the memory in the range `[offset, offset + len)` with `value`
866    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
867    pub fn fill(&mut self, value: u8, offset: usize, len: usize) -> Result<()> {
868        bounds_check!(offset, len, self.mem_size());
869        let base = self.base_ptr().wrapping_add(offset);
870        let guard = self
871            .lock
872            .try_read()
873            .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
874        // todo: replace with something a bit more optimized + correct
875        for i in 0..len {
876            unsafe { base.wrapping_add(i).write_volatile(value) };
877        }
878        drop(guard);
879        Ok(())
880    }
881
882    /// Pushes the given data onto shared memory to the buffer at the given offset.
883    /// NOTE! buffer_start_offset must point to the beginning of the buffer
884    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
885    pub fn push_buffer(
886        &mut self,
887        buffer_start_offset: usize,
888        buffer_size: usize,
889        data: &[u8],
890    ) -> Result<()> {
891        let stack_pointer_rel = self.read::<u64>(buffer_start_offset)? as usize;
892        let buffer_size_u64: u64 = buffer_size.try_into()?;
893
894        if stack_pointer_rel > buffer_size || stack_pointer_rel < 8 {
895            return Err(new_error!(
896                "Unable to push data to buffer: Stack pointer is out of bounds. Stack pointer: {}, Buffer size: {}",
897                stack_pointer_rel,
898                buffer_size_u64
899            ));
900        }
901
902        let size_required = data.len() + 8;
903        let size_available = buffer_size - stack_pointer_rel;
904
905        if size_required > size_available {
906            return Err(new_error!(
907                "Not enough space in buffer to push data. Required: {}, Available: {}",
908                size_required,
909                size_available
910            ));
911        }
912
913        // get absolute
914        let stack_pointer_abs = stack_pointer_rel + buffer_start_offset;
915
916        // write the actual data to the top of stack
917        self.copy_from_slice(data, stack_pointer_abs)?;
918
919        // write the offset to the newly written data, to the top of stack.
920        // this is used when popping the stack, to know how far back to jump
921        self.write::<u64>(stack_pointer_abs + data.len(), stack_pointer_rel as u64)?;
922
923        // update stack pointer to point to the next free address
924        self.write::<u64>(
925            buffer_start_offset,
926            (stack_pointer_rel + data.len() + 8) as u64,
927        )?;
928        Ok(())
929    }
930
931    /// Pops the given given buffer into a `T` and returns it.
932    /// NOTE! the data must be a size-prefixed flatbuffer, and
933    /// buffer_start_offset must point to the beginning of the buffer
934    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
935    pub fn try_pop_buffer_into<T>(
936        &mut self,
937        buffer_start_offset: usize,
938        buffer_size: usize,
939    ) -> Result<T>
940    where
941        T: for<'b> TryFrom<&'b [u8]>,
942    {
943        // get the stackpointer
944        let stack_pointer_rel = self.read::<u64>(buffer_start_offset)? as usize;
945
946        if stack_pointer_rel > buffer_size || stack_pointer_rel < 16 {
947            return Err(new_error!(
948                "Unable to pop data from buffer: Stack pointer is out of bounds. Stack pointer: {}, Buffer size: {}",
949                stack_pointer_rel,
950                buffer_size
951            ));
952        }
953
954        // make it absolute
955        let last_element_offset_abs = stack_pointer_rel + buffer_start_offset;
956
957        // go back 8 bytes to get offset to element on top of stack
958        let last_element_offset_rel: usize =
959            self.read::<u64>(last_element_offset_abs - 8)? as usize;
960
961        // make it absolute
962        let last_element_offset_abs = last_element_offset_rel + buffer_start_offset;
963
964        // Get the size of the flatbuffer buffer from memory
965        let fb_buffer_size = {
966            let size_i32 = self.read::<u32>(last_element_offset_abs)? + 4;
967            // ^^^ flatbuffer byte arrays are prefixed by 4 bytes
968            // indicating its size, so, to get the actual size, we need
969            // to add 4.
970            usize::try_from(size_i32)
971        }?;
972
973        let mut result_buffer = vec![0; fb_buffer_size];
974
975        self.copy_to_slice(&mut result_buffer, last_element_offset_abs)?;
976        let to_return = T::try_from(result_buffer.as_slice()).map_err(|_e| {
977            new_error!(
978                "pop_buffer_into: failed to convert buffer to {}",
979                type_name::<T>()
980            )
981        })?;
982
983        // update the stack pointer to point to the element we just popped off since that is now free
984        self.write::<u64>(buffer_start_offset, last_element_offset_rel as u64)?;
985
986        // zero out the memory we just popped off
987        let num_bytes_to_zero = stack_pointer_rel - last_element_offset_rel;
988        self.fill(0, last_element_offset_abs, num_bytes_to_zero)?;
989
990        Ok(to_return)
991    }
992}
993
994impl SharedMemory for HostSharedMemory {
995    fn region(&self) -> &HostMapping {
996        &self.region
997    }
998    fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
999        &mut self,
1000        f: F,
1001    ) -> Result<T> {
1002        let guard = self
1003            .lock
1004            .try_write()
1005            .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
1006        let mut excl = ExclusiveSharedMemory {
1007            region: self.region.clone(),
1008        };
1009        let ret = f(&mut excl);
1010        drop(excl);
1011        drop(guard);
1012        Ok(ret)
1013    }
1014}
1015
1016#[cfg(test)]
1017mod tests {
1018    use hyperlight_common::mem::PAGE_SIZE_USIZE;
1019    use proptest::prelude::*;
1020
1021    use super::{ExclusiveSharedMemory, HostSharedMemory, SharedMemory};
1022    use crate::mem::shared_mem_tests::read_write_test_suite;
1023    use crate::Result;
1024
1025    #[test]
1026    fn fill() {
1027        let mem_size: usize = 4096;
1028        let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1029        let (mut hshm, _) = eshm.build();
1030
1031        hshm.fill(1, 0, 1024).unwrap();
1032        hshm.fill(2, 1024, 1024).unwrap();
1033        hshm.fill(3, 2048, 1024).unwrap();
1034        hshm.fill(4, 3072, 1024).unwrap();
1035
1036        let vec = hshm
1037            .with_exclusivity(|e| e.copy_all_to_vec().unwrap())
1038            .unwrap();
1039
1040        assert!(vec[0..1024].iter().all(|&x| x == 1));
1041        assert!(vec[1024..2048].iter().all(|&x| x == 2));
1042        assert!(vec[2048..3072].iter().all(|&x| x == 3));
1043        assert!(vec[3072..4096].iter().all(|&x| x == 4));
1044
1045        hshm.fill(5, 0, 4096).unwrap();
1046
1047        let vec2 = hshm
1048            .with_exclusivity(|e| e.copy_all_to_vec().unwrap())
1049            .unwrap();
1050        assert!(vec2.iter().all(|&x| x == 5));
1051
1052        assert!(hshm.fill(0, 0, mem_size + 1).is_err());
1053        assert!(hshm.fill(0, mem_size, 1).is_err());
1054    }
1055
1056    #[test]
1057    fn copy_into_from() -> Result<()> {
1058        let mem_size: usize = 4096;
1059        let vec_len = 10;
1060        let eshm = ExclusiveSharedMemory::new(mem_size)?;
1061        let (hshm, _) = eshm.build();
1062        let vec = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
1063        // write the value to the memory at the beginning.
1064        hshm.copy_from_slice(&vec, 0)?;
1065
1066        let mut vec2 = vec![0; vec_len];
1067        // read the value back from the memory at the beginning.
1068        hshm.copy_to_slice(vec2.as_mut_slice(), 0)?;
1069        assert_eq!(vec, vec2);
1070
1071        let offset = mem_size - vec.len();
1072        // write the value to the memory at the end.
1073        hshm.copy_from_slice(&vec, offset)?;
1074
1075        let mut vec3 = vec![0; vec_len];
1076        // read the value back from the memory at the end.
1077        hshm.copy_to_slice(&mut vec3, offset)?;
1078        assert_eq!(vec, vec3);
1079
1080        let offset = mem_size / 2;
1081        // write the value to the memory at the middle.
1082        hshm.copy_from_slice(&vec, offset)?;
1083
1084        let mut vec4 = vec![0; vec_len];
1085        // read the value back from the memory at the middle.
1086        hshm.copy_to_slice(&mut vec4, offset)?;
1087        assert_eq!(vec, vec4);
1088
1089        // try and read a value from an offset that is beyond the end of the memory.
1090        let mut vec5 = vec![0; vec_len];
1091        assert!(hshm.copy_to_slice(&mut vec5, mem_size).is_err());
1092
1093        // try and write a value to an offset that is beyond the end of the memory.
1094        assert!(hshm.copy_from_slice(&vec5, mem_size).is_err());
1095
1096        // try and read a value from an offset that is too large.
1097        let mut vec6 = vec![0; vec_len];
1098        assert!(hshm.copy_to_slice(&mut vec6, mem_size * 2).is_err());
1099
1100        // try and write a value to an offset that is too large.
1101        assert!(hshm.copy_from_slice(&vec6, mem_size * 2).is_err());
1102
1103        // try and read a value that is too large.
1104        let mut vec7 = vec![0; mem_size * 2];
1105        assert!(hshm.copy_to_slice(&mut vec7, 0).is_err());
1106
1107        // try and write a value that is too large.
1108        assert!(hshm.copy_from_slice(&vec7, 0).is_err());
1109
1110        Ok(())
1111    }
1112
1113    proptest! {
1114        #[test]
1115        fn read_write_i32(val in -0x1000_i32..0x1000_i32) {
1116            read_write_test_suite(
1117                val,
1118                ExclusiveSharedMemory::new,
1119                Box::new(ExclusiveSharedMemory::read_i32),
1120                Box::new(ExclusiveSharedMemory::write_i32),
1121            )
1122            .unwrap();
1123            read_write_test_suite(
1124                val,
1125                |s| {
1126                    let e = ExclusiveSharedMemory::new(s)?;
1127                    let (h, _) = e.build();
1128                    Ok(h)
1129                },
1130                Box::new(HostSharedMemory::read::<i32>),
1131                Box::new(|h, o, v| h.write::<i32>(o, v)),
1132            )
1133            .unwrap();
1134        }
1135    }
1136
1137    #[test]
1138    fn alloc_fail() {
1139        let gm = ExclusiveSharedMemory::new(0);
1140        assert!(gm.is_err());
1141        let gm = ExclusiveSharedMemory::new(usize::MAX);
1142        assert!(gm.is_err());
1143    }
1144
1145    #[test]
1146    fn clone() {
1147        let eshm = ExclusiveSharedMemory::new(PAGE_SIZE_USIZE).unwrap();
1148        let (hshm1, _) = eshm.build();
1149        let hshm2 = hshm1.clone();
1150
1151        // after hshm1 is cloned, hshm1 and hshm2 should have identical
1152        // memory sizes and pointers.
1153        assert_eq!(hshm1.mem_size(), hshm2.mem_size());
1154        assert_eq!(hshm1.base_addr(), hshm2.base_addr());
1155
1156        // we should be able to copy a byte array into both hshm1 and hshm2,
1157        // and have both changes be reflected in all clones
1158        hshm1.copy_from_slice(b"a", 0).unwrap();
1159        hshm2.copy_from_slice(b"b", 1).unwrap();
1160
1161        // at this point, both hshm1 and hshm2 should have
1162        // offset 0 = 'a', offset 1 = 'b'
1163        for (raw_offset, expected) in &[(0, b'a'), (1, b'b')] {
1164            assert_eq!(hshm1.read::<u8>(*raw_offset).unwrap(), *expected);
1165            assert_eq!(hshm2.read::<u8>(*raw_offset).unwrap(), *expected);
1166        }
1167
1168        // after we drop hshm1, hshm2 should still exist, be valid,
1169        // and have all contents from before hshm1 was dropped
1170        drop(hshm1);
1171
1172        // at this point, hshm2 should still have offset 0 = 'a', offset 1 = 'b'
1173        for (raw_offset, expected) in &[(0, b'a'), (1, b'b')] {
1174            assert_eq!(hshm2.read::<u8>(*raw_offset).unwrap(), *expected);
1175        }
1176        hshm2.copy_from_slice(b"c", 2).unwrap();
1177        assert_eq!(hshm2.read::<u8>(2).unwrap(), b'c');
1178        drop(hshm2);
1179    }
1180
1181    #[test]
1182    fn copy_all_to_vec() {
1183        let mut data = vec![b'a', b'b', b'c'];
1184        data.resize(4096, 0);
1185        let mut eshm = ExclusiveSharedMemory::new(data.len()).unwrap();
1186        eshm.copy_from_slice(data.as_slice(), 0).unwrap();
1187        let ret_vec = eshm.copy_all_to_vec().unwrap();
1188        assert_eq!(data, ret_vec);
1189    }
1190
1191    /// A test to ensure that, if a `SharedMem` instance is cloned
1192    /// and _all_ clones are dropped, the memory region will no longer
1193    /// be valid.
1194    ///
1195    /// This test is ignored because it is incompatible with other tests as
1196    /// they may be allocating memory at the same time.
1197    ///
1198    /// Marking this test as ignored means that running `cargo test` will not
1199    /// run it. This feature will allow a developer who runs that command
1200    /// from their workstation to be successful without needing to know about
1201    /// test interdependencies. This test will, however, be run explicitly as a
1202    /// part of the CI pipeline.
1203    #[test]
1204    #[ignore]
1205    #[cfg(target_os = "linux")]
1206    fn test_drop() {
1207        use proc_maps::maps_contain_addr;
1208
1209        let pid = std::process::id();
1210
1211        let eshm = ExclusiveSharedMemory::new(PAGE_SIZE_USIZE).unwrap();
1212        let (hshm1, gshm) = eshm.build();
1213        let hshm2 = hshm1.clone();
1214        let addr = hshm1.raw_ptr() as usize;
1215
1216        // ensure the address is in the process's virtual memory
1217        let maps_before_drop = proc_maps::get_process_maps(pid.try_into().unwrap()).unwrap();
1218        assert!(
1219            maps_contain_addr(addr, &maps_before_drop),
1220            "shared memory address {:#x} was not found in process map, but should be",
1221            addr,
1222        );
1223        // drop both shared memory instances, which should result
1224        // in freeing the memory region
1225        drop(hshm1);
1226        drop(hshm2);
1227        drop(gshm);
1228
1229        let maps_after_drop = proc_maps::get_process_maps(pid.try_into().unwrap()).unwrap();
1230        // now, ensure the address is not in the process's virtual memory
1231        assert!(
1232            !maps_contain_addr(addr, &maps_after_drop),
1233            "shared memory address {:#x} was found in the process map, but shouldn't be",
1234            addr
1235        );
1236    }
1237
1238    #[cfg(target_os = "linux")]
1239    mod guard_page_crash_test {
1240        use crate::mem::shared_mem::{ExclusiveSharedMemory, SharedMemory};
1241
1242        const TEST_EXIT_CODE: u8 = 211; // an uncommon exit code, used for testing purposes
1243
1244        /// hook sigsegv to exit with status code, to make it testable, rather than have it exit from a signal
1245        /// NOTE: We CANNOT panic!() in the handler, and make the tests #[should_panic], because
1246        ///     the test harness process will crash anyway after the test passes
1247        fn setup_signal_handler() {
1248            unsafe {
1249                signal_hook_registry::register_signal_unchecked(libc::SIGSEGV, || {
1250                    std::process::exit(TEST_EXIT_CODE.into());
1251                })
1252                .unwrap();
1253            }
1254        }
1255
1256        #[test]
1257        #[ignore] // this test is ignored because it will crash the running process
1258        fn read() {
1259            setup_signal_handler();
1260
1261            let eshm = ExclusiveSharedMemory::new(4096).unwrap();
1262            let (hshm, _) = eshm.build();
1263            let guard_page_ptr = hshm.raw_ptr();
1264            unsafe { std::ptr::read_volatile(guard_page_ptr) };
1265        }
1266
1267        #[test]
1268        #[ignore] // this test is ignored because it will crash the running process
1269        fn write() {
1270            setup_signal_handler();
1271
1272            let eshm = ExclusiveSharedMemory::new(4096).unwrap();
1273            let (hshm, _) = eshm.build();
1274            let guard_page_ptr = hshm.raw_ptr();
1275            unsafe { std::ptr::write_volatile(guard_page_ptr, 0u8) };
1276        }
1277
1278        #[test]
1279        #[ignore] // this test is ignored because it will crash the running process
1280        fn exec() {
1281            setup_signal_handler();
1282
1283            let eshm = ExclusiveSharedMemory::new(4096).unwrap();
1284            let (hshm, _) = eshm.build();
1285            let guard_page_ptr = hshm.raw_ptr();
1286            let func: fn() = unsafe { std::mem::transmute(guard_page_ptr) };
1287            func();
1288        }
1289
1290        // provides a way for running the above tests in a separate process since they expect to crash
1291        #[test]
1292        fn guard_page_testing_shim() {
1293            let tests = vec!["read", "write", "exec"];
1294
1295            for test in tests {
1296                let status = std::process::Command::new("cargo")
1297                    .args(["test", "-p", "hyperlight-host", "--", "--ignored", test])
1298                    .stdin(std::process::Stdio::null())
1299                    .stdout(std::process::Stdio::null())
1300                    .stderr(std::process::Stdio::null())
1301                    .status()
1302                    .expect("Unable to launch tests");
1303                assert_eq!(
1304                    status.code(),
1305                    Some(TEST_EXIT_CODE.into()),
1306                    "Guard Page test failed: {}",
1307                    test
1308                );
1309            }
1310        }
1311    }
1312}