hyperlight_host/mem/
shared_mem.rs

1/*
2Copyright 2024 The Hyperlight Authors.
3
4Licensed under the Apache License, Version 2.0 (the "License");
5you may not use this file except in compliance with the License.
6You may obtain a copy of the License at
7
8    http://www.apache.org/licenses/LICENSE-2.0
9
10Unless required by applicable law or agreed to in writing, software
11distributed under the License is distributed on an "AS IS" BASIS,
12WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13See the License for the specific language governing permissions and
14limitations under the License.
15*/
16
17use std::any::type_name;
18use std::ffi::c_void;
19use std::io::Error;
20#[cfg(target_os = "linux")]
21use std::ptr::null_mut;
22use std::sync::{Arc, RwLock};
23
24use hyperlight_common::mem::PAGE_SIZE_USIZE;
25use tracing::{instrument, Span};
26#[cfg(target_os = "windows")]
27use windows::core::PCSTR;
28#[cfg(target_os = "windows")]
29use windows::Win32::Foundation::{CloseHandle, HANDLE, INVALID_HANDLE_VALUE};
30#[cfg(all(target_os = "windows", inprocess))]
31use windows::Win32::System::Memory::FILE_MAP_EXECUTE;
32#[cfg(all(target_os = "windows", not(inprocess)))]
33use windows::Win32::System::Memory::PAGE_READWRITE;
34#[cfg(target_os = "windows")]
35use windows::Win32::System::Memory::{
36    CreateFileMappingA, MapViewOfFile, UnmapViewOfFile, VirtualProtect, FILE_MAP_ALL_ACCESS,
37    MEMORY_MAPPED_VIEW_ADDRESS, PAGE_EXECUTE_READWRITE, PAGE_NOACCESS, PAGE_PROTECTION_FLAGS,
38};
39
40#[cfg(target_os = "windows")]
41use crate::HyperlightError::MemoryAllocationFailed;
42#[cfg(target_os = "windows")]
43use crate::HyperlightError::{MemoryRequestTooBig, WindowsAPIError};
44use crate::{log_then_return, new_error, Result};
45
46/// Makes sure that the given `offset` and `size` are within the bounds of the memory with size `mem_size`.
47macro_rules! bounds_check {
48    ($offset:expr, $size:expr, $mem_size:expr) => {
49        if $offset + $size > $mem_size {
50            return Err(new_error!(
51                "Cannot read value from offset {} with size {} in memory of size {}",
52                $offset,
53                $size,
54                $mem_size
55            ));
56        }
57    };
58}
59
60/// generates a reader function for the given type
61macro_rules! generate_reader {
62    ($fname:ident, $ty:ty) => {
63        /// Read a value of type `$ty` from the memory at the given offset.
64        #[allow(dead_code)]
65        #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
66        pub(crate) fn $fname(&self, offset: usize) -> Result<$ty> {
67            let data = self.as_slice();
68            bounds_check!(offset, std::mem::size_of::<$ty>(), data.len());
69            Ok(<$ty>::from_le_bytes(
70                data[offset..offset + std::mem::size_of::<$ty>()].try_into()?,
71            ))
72        }
73    };
74}
75
76/// generates a writer function for the given type
77macro_rules! generate_writer {
78    ($fname:ident, $ty:ty) => {
79        /// Write a value of type `$ty` to the memory at the given offset.
80        #[allow(dead_code)]
81        pub(crate) fn $fname(&mut self, offset: usize, value: $ty) -> Result<()> {
82            let data = self.as_mut_slice();
83            bounds_check!(offset, std::mem::size_of::<$ty>(), data.len());
84            data[offset..offset + std::mem::size_of::<$ty>()].copy_from_slice(&value.to_le_bytes());
85            Ok(())
86        }
87    };
88}
89
90/// A representation of a host mapping of a shared memory region,
91/// which will be released when this structure is Drop'd. This is not
92/// individually Clone (since it holds ownership of the mapping), or
93/// Send or Sync, since it doesn't ensure any particular synchronization.
94#[derive(Debug)]
95pub struct HostMapping {
96    ptr: *mut u8,
97    size: usize,
98    #[cfg(target_os = "windows")]
99    handle: HANDLE,
100}
101
102impl Drop for HostMapping {
103    #[cfg(target_os = "linux")]
104    fn drop(&mut self) {
105        use libc::munmap;
106
107        unsafe {
108            munmap(self.ptr as *mut c_void, self.size);
109        }
110    }
111    #[cfg(target_os = "windows")]
112    fn drop(&mut self) {
113        let mem_mapped_address = MEMORY_MAPPED_VIEW_ADDRESS {
114            Value: self.ptr as *mut c_void,
115        };
116        if let Err(e) = unsafe { UnmapViewOfFile(mem_mapped_address) } {
117            tracing::error!(
118                "Failed to drop HostMapping (UnmapViewOfFile failed): {:?}",
119                e
120            );
121        }
122
123        let file_handle: HANDLE = self.handle;
124        if let Err(e) = unsafe { CloseHandle(file_handle) } {
125            tracing::error!("Failed to  drop HostMapping (CloseHandle failed): {:?}", e);
126        }
127    }
128}
129
130/// These three structures represent various phases of the lifecycle of
131/// a memory buffer that is shared with the guest. An
132/// ExclusiveSharedMemory is used for certain operations that
133/// unrestrictedly write to the shared memory, including setting it up
134/// and taking snapshots.
135#[derive(Debug)]
136pub struct ExclusiveSharedMemory {
137    region: Arc<HostMapping>,
138}
139unsafe impl Send for ExclusiveSharedMemory {}
140
141/// A GuestSharedMemory is used by the hypervisor handler to represent
142/// the reference to all-of-memory that is taken by the virtual cpu.
143/// Because of the memory model limitations that affect
144/// HostSharedMemory, it is likely fairly important (to ensure that
145/// our UB remains limited to interaction with an external compilation
146/// unit that likely can't be discovered by the compiler) that _rust_
147/// users do not perform racy accesses to the guest communication
148/// buffers that are also accessed by HostSharedMemory.
149#[derive(Debug)]
150pub struct GuestSharedMemory {
151    region: Arc<HostMapping>,
152    /// The lock that indicates this shared memory is being used by non-Rust code
153    ///
154    /// This lock _must_ be held whenever the guest is executing,
155    /// because it prevents the host from converting its
156    /// HostSharedMemory to an ExclusiveSharedMemory. Since the guest
157    /// may arbitrarily mutate the shared memory, only synchronized
158    /// accesses from Rust should be allowed!
159    ///
160    /// We cannot enforce this in the type system, because the memory
161    /// is mapped in to the VM at VM creation time.
162    pub lock: Arc<RwLock<()>>,
163}
164unsafe impl Send for GuestSharedMemory {}
165
166/// A HostSharedMemory allows synchronized accesses to guest
167/// communication buffers, allowing it to be used concurrently with a
168/// GuestSharedMemory.
169///
170/// Given future requirements for asynchronous I/O with a minimum
171/// amount of copying (e.g. WASIp3 streams), we would like it to be
172/// possible to safely access these buffers concurrently with the
173/// guest, ensuring that (1) data is read appropriately if the guest
174/// is well-behaved; and (2) the host's behaviour is defined
175/// regardless of whether or not the guest is well-behaved.
176///
177/// The ideal (future) flow for a guest->host message is something like
178///   - Guest writes (unordered) bytes describing a work item into a buffer
179///   - Guest reveals buffer via a release-store of a pointer into an
180///     MMIO ring-buffer
181///   - Host acquire-loads the buffer pointer from the "MMIO" ring
182///     buffer
183///   - Host (unordered) reads the bytes from the buffer
184///   - Host performs validation of those bytes and uses them
185///
186/// Unfortunately, there appears to be no way to do this with defined
187/// behaviour in present Rust (see
188/// e.g. https://github.com/rust-lang/unsafe-code-guidelines/issues/152).
189/// Rust does not yet have its own defined memory model, but in the
190/// interim, it is widely treated as inheriting the current C/C++
191/// memory models.  The most immediate problem is that regardless of
192/// anything else, under those memory models [1, p. 17-18; 2, p. 88],
193///
194///   > The execution of a program contains a _data race_ if it
195///   > contains two [C++23: "potentially concurrent"] conflicting
196///   > actions [C23: "in different threads"], at least one of which
197///   > is not atomic, and neither happens before the other [C++23: ",
198///   > except for the special case for signal handlers described
199///   > below"].  Any such data race results in undefined behavior.
200///
201/// Consequently, if a misbehaving guest fails to correctly
202/// synchronize its stores with the host, the host's innocent loads
203/// will trigger undefined behaviour for the entire program, including
204/// the host.  Note that this also applies if the guest makes an
205/// unsynchronized read of a location that the host is writing!
206///
207/// Despite Rust's de jure inheritance of the C memory model at the
208/// present time, the compiler in many cases de facto adheres to LLVM
209/// semantics, so it is worthwhile to consider what LLVM does in this
210/// case as well.  According to the the LangRef [3] memory model,
211/// loads which are involved in a race that includes at least one
212/// non-atomic access (whether the load or a store) return `undef`,
213/// making them roughly equivalent to reading uninitialized
214/// memory. While this is much better, it is still bad.
215///
216/// Considering a different direction, recent C++ papers have seemed
217/// to lean towards using `volatile` for similar use cases. For
218/// example, in P1152R0 [4], JF Bastien notes that
219///
220///   > We’ve shown that volatile is purposely defined to denote
221///   > external modifications. This happens for:
222///   >   - Shared memory with untrusted code, where volatile is the
223///   >     right way to avoid time-of-check time-of-use (ToCToU)
224///   >     races which lead to security bugs such as [PWN2OWN] and
225///   >     [XENXSA155].
226///
227/// Unfortunately, although this paper was adopted for C++20 (and,
228/// sadly, mostly un-adopted for C++23, although that does not concern
229/// us), the paper did not actually redefine volatile accesses or data
230/// races to prevent volatile accesses from racing with other accesses
231/// and causing undefined behaviour.  P1382R1 [5] would have amended
232/// the wording of the data race definition to specifically exclude
233/// volatile, but, unfortunately, despite receiving a
234/// generally-positive reception at its first WG21 meeting more than
235/// five years ago, it has not progressed.
236///
237/// Separately from the data race issue, there is also a concern that
238/// according to the various memory models in use, there may be ways
239/// in which the guest can semantically obtain uninitialized memory
240/// and write it into the shared buffer, which may also result in
241/// undefined behaviour on reads.  The degree to which this is a
242/// concern is unclear, however, since it is unclear to what degree
243/// the Rust abstract machine's conception of uninitialized memory
244/// applies to the sandbox.  Returning briefly to the LLVM level,
245/// rather than the Rust level, this, combined with the fact that
246/// racing loads in LLVM return `undef`, as discussed above, we would
247/// ideally `llvm.freeze` the result of any load out of the sandbox.
248///
249/// It would furthermore be ideal if we could run the flatbuffers
250/// parsing code directly on the guest memory, in order to avoid
251/// unnecessary copies.  That is unfortunately probably not viable at
252/// the present time: because the generated flatbuffers parsing code
253/// doesn't use atomic or volatile accesses, it is likely to introduce
254/// double-read vulnerabilities.
255///
256/// In short, none of the Rust-level operations available to us do the
257/// right thing, at the Rust spec level or the LLVM spec level. Our
258/// major remaining options are therefore:
259///   - Choose one of the options that is available to us, and accept
260///     that we are doing something unsound according to the spec, but
261///     hope that no reasonable compiler could possibly notice.
262///   - Use inline assembly per architecture, for which we would only
263///     need to worry about the _architecture_'s memory model (which
264///     is far less demanding).
265///
266/// The leading candidate for the first option would seem to be to
267/// simply use volatile accesses; there seems to be wide agreement
268/// that this _should_ be a valid use case for them (even if it isn't
269/// now), and projects like Linux and rust-vmm already use C11
270/// `volatile` for this purpose.  It is also worth noting that because
271/// we still do need to synchronize with the guest when it _is_ being
272/// well-behaved, we would ideally use volatile acquire loads and
273/// volatile release stores for interacting with the stack pointer in
274/// the guest in this case.  Unfortunately, while those operations are
275/// defined in LLVM, they are not presently exposed to Rust. While
276/// atomic fences that are not associated with memory accesses
277/// (std::sync::atomic::fence) might at first glance seem to help with
278/// this problem, they unfortunately do not [6]:
279///
280///    > A fence ‘A’ which has (at least) Release ordering semantics,
281///    > synchronizes with a fence ‘B’ with (at least) Acquire
282///    > semantics, if and only if there exist operations X and Y,
283///    > both operating on some atomic object ‘M’ such that A is
284///    > sequenced before X, Y is sequenced before B and Y observes
285///    > the change to M. This provides a happens-before dependence
286///    > between A and B.
287///
288/// Note that the X and Y must be to an _atomic_ object.
289///
290/// We consequently assume that there has been a strong architectural
291/// fence on a vmenter/vmexit between data being read and written.
292/// This is unsafe (not guaranteed in the type system)!
293///
294/// [1] N3047 C23 Working Draft. https://www.open-std.org/jtc1/sc22/wg14/www/docs/n3047.pdf
295/// [2] N4950 C++23 Working Draft. https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2023/n4950.pdf
296/// [3] LLVM Language Reference Manual, Memory Model for Concurrent Operations. https://llvm.org/docs/LangRef.html#memmodel
297/// [4] P1152R0: Deprecating `volatile`. JF Bastien. https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p1152r0.html
298/// [5] P1382R1: `volatile_load<T>` and `volatile_store<T>`. JF Bastien, Paul McKenney, Jeffrey Yasskin, and the indefatigable TBD. https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p1382r1.pdf
299/// [6] Documentation for std::sync::atomic::fence. https://doc.rust-lang.org/std/sync/atomic/fn.fence.html
300#[derive(Clone, Debug)]
301pub struct HostSharedMemory {
302    region: Arc<HostMapping>,
303    lock: Arc<RwLock<()>>,
304}
305unsafe impl Send for HostSharedMemory {}
306
307impl ExclusiveSharedMemory {
308    /// Create a new region of shared memory with the given minimum
309    /// size in bytes. The region will be surrounded by guard pages.
310    ///
311    /// Return `Err` if shared memory could not be allocated.
312    #[cfg(target_os = "linux")]
313    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
314    pub fn new(min_size_bytes: usize) -> Result<Self> {
315        use libc::{
316            c_int, mmap, mprotect, off_t, size_t, MAP_ANONYMOUS, MAP_FAILED, MAP_NORESERVE,
317            MAP_SHARED, PROT_NONE, PROT_READ, PROT_WRITE,
318        };
319
320        use crate::error::HyperlightError::{MemoryRequestTooBig, MmapFailed, MprotectFailed};
321
322        if min_size_bytes == 0 {
323            return Err(new_error!("Cannot create shared memory with size 0"));
324        }
325
326        let total_size = min_size_bytes
327            .checked_add(2 * PAGE_SIZE_USIZE) // guard page around the memory
328            .ok_or_else(|| new_error!("Memory required for sandbox exceeded usize::MAX"))?;
329
330        if total_size % PAGE_SIZE_USIZE != 0 {
331            return Err(new_error!(
332                "shared memory must be a multiple of {}",
333                PAGE_SIZE_USIZE
334            ));
335        }
336
337        // usize and isize are guaranteed to be the same size, and
338        // isize::MAX should be positive, so this cast should be safe.
339        if total_size > isize::MAX as usize {
340            return Err(MemoryRequestTooBig(total_size, isize::MAX as usize));
341        }
342
343        // allocate the memory
344        let addr = unsafe {
345            mmap(
346                null_mut(),
347                total_size as size_t,
348                PROT_READ | PROT_WRITE,
349                MAP_ANONYMOUS | MAP_SHARED | MAP_NORESERVE,
350                -1 as c_int,
351                0 as off_t,
352            )
353        };
354        if addr == MAP_FAILED {
355            log_then_return!(MmapFailed(Error::last_os_error().raw_os_error()));
356        }
357
358        // protect the guard pages
359
360        let res = unsafe { mprotect(addr, PAGE_SIZE_USIZE, PROT_NONE) };
361        if res != 0 {
362            return Err(MprotectFailed(Error::last_os_error().raw_os_error()));
363        }
364        let res = unsafe {
365            mprotect(
366                (addr as *const u8).add(total_size - PAGE_SIZE_USIZE) as *mut c_void,
367                PAGE_SIZE_USIZE,
368                PROT_NONE,
369            )
370        };
371        if res != 0 {
372            return Err(MprotectFailed(Error::last_os_error().raw_os_error()));
373        }
374
375        Ok(Self {
376            // HostMapping is only non-Send/Sync because raw pointers
377            // are not ("as a lint", as the Rust docs say). We don't
378            // want to mark HostMapping Send/Sync immediately, because
379            // that could socially imply that it's "safe" to use
380            // unsafe accesses from multiple threads at once. Instead, we
381            // directly impl Send and Sync on this type. Since this
382            // type does have Send and Sync manually impl'd, the Arc
383            // is not pointless as the lint suggests.
384            #[allow(clippy::arc_with_non_send_sync)]
385            region: Arc::new(HostMapping {
386                ptr: addr as *mut u8,
387                size: total_size,
388            }),
389        })
390    }
391
392    /// Create a new region of shared memory with the given minimum
393    /// size in bytes. The region will be surrounded by guard pages.
394    ///
395    /// Return `Err` if shared memory could not be allocated.
396    #[cfg(target_os = "windows")]
397    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
398    pub fn new(min_size_bytes: usize) -> Result<Self> {
399        if min_size_bytes == 0 {
400            return Err(new_error!("Cannot create shared memory with size 0"));
401        }
402
403        let total_size = min_size_bytes
404            .checked_add(2 * PAGE_SIZE_USIZE)
405            .ok_or_else(|| new_error!("Memory required for sandbox exceeded {}", usize::MAX))?;
406
407        if total_size % PAGE_SIZE_USIZE != 0 {
408            return Err(new_error!(
409                "shared memory must be a multiple of {}",
410                PAGE_SIZE_USIZE
411            ));
412        }
413
414        // usize and isize are guaranteed to be the same size, and
415        // isize::MAX should be positive, so this cast should be safe.
416        if total_size > isize::MAX as usize {
417            return Err(MemoryRequestTooBig(total_size, isize::MAX as usize));
418        }
419
420        let mut dwmaximumsizehigh = 0;
421        let mut dwmaximumsizelow = 0;
422
423        if std::mem::size_of::<usize>() == 8 {
424            dwmaximumsizehigh = (total_size >> 32) as u32;
425            dwmaximumsizelow = (total_size & 0xFFFFFFFF) as u32;
426        }
427
428        // Allocate the memory use CreateFileMapping instead of VirtualAlloc
429        // This allows us to map the memory into the surrogate process using MapViewOfFile2
430
431        #[cfg(not(inprocess))]
432        let flags = PAGE_READWRITE;
433        #[cfg(inprocess)]
434        let flags = PAGE_EXECUTE_READWRITE;
435
436        let handle = unsafe {
437            CreateFileMappingA(
438                INVALID_HANDLE_VALUE,
439                None,
440                flags,
441                dwmaximumsizehigh,
442                dwmaximumsizelow,
443                PCSTR::null(),
444            )?
445        };
446
447        if handle.is_invalid() {
448            log_then_return!(MemoryAllocationFailed(
449                Error::last_os_error().raw_os_error()
450            ));
451        }
452
453        #[cfg(not(inprocess))]
454        let file_map = FILE_MAP_ALL_ACCESS;
455        #[cfg(inprocess)]
456        let file_map = FILE_MAP_ALL_ACCESS | FILE_MAP_EXECUTE;
457
458        let addr = unsafe { MapViewOfFile(handle, file_map, 0, 0, 0) };
459
460        if addr.Value.is_null() {
461            log_then_return!(MemoryAllocationFailed(
462                Error::last_os_error().raw_os_error()
463            ));
464        }
465
466        // Set the first and last pages to be guard pages
467
468        let mut unused_out_old_prot_flags = PAGE_PROTECTION_FLAGS(0);
469
470        // If the following calls to VirtualProtect are changed make sure to update the calls to VirtualProtectEx in surrogate_process_manager.rs
471
472        let first_guard_page_start = addr.Value;
473        if let Err(e) = unsafe {
474            VirtualProtect(
475                first_guard_page_start,
476                PAGE_SIZE_USIZE,
477                PAGE_NOACCESS,
478                &mut unused_out_old_prot_flags,
479            )
480        } {
481            log_then_return!(WindowsAPIError(e.clone()));
482        }
483
484        let last_guard_page_start = unsafe { addr.Value.add(total_size - PAGE_SIZE_USIZE) };
485        if let Err(e) = unsafe {
486            VirtualProtect(
487                last_guard_page_start,
488                PAGE_SIZE_USIZE,
489                PAGE_NOACCESS,
490                &mut unused_out_old_prot_flags,
491            )
492        } {
493            log_then_return!(WindowsAPIError(e.clone()));
494        }
495
496        Ok(Self {
497            // HostMapping is only non-Send/Sync because raw pointers
498            // are not ("as a lint", as the Rust docs say). We don't
499            // want to mark HostMapping Send/Sync immediately, because
500            // that could socially imply that it's "safe" to use
501            // unsafe accesses from multiple threads at once. Instead, we
502            // directly impl Send and Sync on this type. Since this
503            // type does have Send and Sync manually impl'd, the Arc
504            // is not pointless as the lint suggests.
505            #[allow(clippy::arc_with_non_send_sync)]
506            region: Arc::new(HostMapping {
507                ptr: addr.Value as *mut u8,
508                size: total_size,
509                handle,
510            }),
511        })
512    }
513
514    pub(super) fn make_memory_executable(&self) -> Result<()> {
515        #[cfg(target_os = "windows")]
516        {
517            let mut _old_flags = PAGE_PROTECTION_FLAGS::default();
518            if let Err(e) = unsafe {
519                VirtualProtect(
520                    self.region.ptr as *const c_void,
521                    self.region.size,
522                    PAGE_EXECUTE_READWRITE,
523                    &mut _old_flags as *mut PAGE_PROTECTION_FLAGS,
524                )
525            } {
526                log_then_return!(WindowsAPIError(e.clone()));
527            }
528        }
529
530        // make the memory executable on Linux
531        #[cfg(target_os = "linux")]
532        {
533            use libc::{mprotect, PROT_EXEC, PROT_READ, PROT_WRITE};
534
535            let res = unsafe {
536                mprotect(
537                    self.region.ptr as *mut c_void,
538                    self.region.size,
539                    PROT_READ | PROT_WRITE | PROT_EXEC,
540                )
541            };
542
543            if res != 0 {
544                return Err(new_error!(
545                    "Failed to make memory executable: {:#?}",
546                    Error::last_os_error().raw_os_error()
547                ));
548            }
549        }
550        Ok(())
551    }
552
553    /// Internal helper method to get the backing memory as a mutable slice.
554    ///
555    /// # Safety
556    /// As per std::slice::from_raw_parts_mut:
557    /// - self.base_addr() must be valid for both reads and writes for
558    ///   self.mem_size() * mem::size_of::<u8>() many bytes, and it
559    ///   must be properly aligned.
560    ///
561    ///   The rules on validity are still somewhat unspecified, but we
562    ///   assume that the result of our calls to mmap/CreateFileMappings may
563    ///   be considered a single "allocated object". The use of
564    ///   non-atomic accesses is alright from a Safe Rust standpoint,
565    ///   because SharedMemoryBuilder is  not Sync.
566    /// - self.base_addr() must point to self.mem_size() consecutive
567    ///   properly initialized values of type u8
568    ///
569    ///   Again, the exact provenance restrictions on what is
570    ///   considered to be initialized values are unclear, but we make
571    ///   sure to use mmap(MAP_ANONYMOUS) and
572    ///   CreateFileMapping(SEC_COMMIT), so the pages in question are
573    ///   zero-initialized, which we hope counts for u8.
574    /// - The memory referenced by the returned slice must not be
575    ///   accessed through any other pointer (not derived from the
576    ///   return value) for the duration of the lifetime 'a. Both read
577    ///   and write accesses are forbidden.
578    ///
579    ///   Accesses from Safe Rust necessarily follow this rule,
580    ///   because the returned slice's lifetime is the same as that of
581    ///   a mutable borrow of self.
582    /// - The total size self.mem_size() * mem::size_of::<u8>() of the
583    ///   slice must be no larger than isize::MAX, and adding that
584    ///   size to data must not "wrap around" the address space. See
585    ///   the safety documentation of pointer::offset.
586    ///
587    ///   This is ensured by a check in ::new()
588    pub(super) fn as_mut_slice(&mut self) -> &mut [u8] {
589        unsafe { std::slice::from_raw_parts_mut(self.base_ptr(), self.mem_size()) }
590    }
591
592    /// Internal helper method to get the backing memory as a slice.
593    ///
594    /// # Safety
595    /// See the discussion on as_mut_slice, with the third point
596    /// replaced by:
597    /// - The memory referenced by the returned slice must not be
598    ///   mutated for the duration of lifetime 'a, except inside an
599    ///   UnsafeCell.
600    ///
601    ///   Host accesses from Safe Rust necessarily follow this rule,
602    ///   because the returned slice's lifetime is the same as that of
603    ///   a borrow of self, preventing mutations via other methods.
604    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
605    pub fn as_slice<'a>(&'a self) -> &'a [u8] {
606        unsafe { std::slice::from_raw_parts(self.base_ptr(), self.mem_size()) }
607    }
608
609    /// Copy the entire contents of `self` into a `Vec<u8>`, then return it
610    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
611    pub(crate) fn copy_all_to_vec(&self) -> Result<Vec<u8>> {
612        let data = self.as_slice();
613        Ok(data.to_vec())
614    }
615
616    /// Copies all bytes from `src` to `self` starting at offset
617    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
618    pub fn copy_from_slice(&mut self, src: &[u8], offset: usize) -> Result<()> {
619        let data = self.as_mut_slice();
620        bounds_check!(offset, src.len(), data.len());
621        data[offset..offset + src.len()].copy_from_slice(src);
622        Ok(())
623    }
624
625    /// Return the address of memory at an offset to this `SharedMemory` checking
626    /// that the memory is within the bounds of the `SharedMemory`.
627    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
628    pub(crate) fn calculate_address(&self, offset: usize) -> Result<usize> {
629        bounds_check!(offset, 0, self.mem_size());
630        Ok(self.base_addr() + offset)
631    }
632
633    generate_reader!(read_u8, u8);
634    generate_reader!(read_i8, i8);
635    generate_reader!(read_u16, u16);
636    generate_reader!(read_i16, i16);
637    generate_reader!(read_u32, u32);
638    generate_reader!(read_i32, i32);
639    generate_reader!(read_u64, u64);
640    generate_reader!(read_i64, i64);
641    generate_reader!(read_usize, usize);
642    generate_reader!(read_isize, isize);
643
644    generate_writer!(write_u8, u8);
645    generate_writer!(write_i8, i8);
646    generate_writer!(write_u16, u16);
647    generate_writer!(write_i16, i16);
648    generate_writer!(write_u32, u32);
649    generate_writer!(write_i32, i32);
650    generate_writer!(write_u64, u64);
651    generate_writer!(write_i64, i64);
652    generate_writer!(write_usize, usize);
653    generate_writer!(write_isize, isize);
654
655    /// Convert the ExclusiveSharedMemory, which may be freely
656    /// modified, into a GuestSharedMemory, which may be somewhat
657    /// freely modified (mostly by the guest), and a HostSharedMemory,
658    /// which may only make certain kinds of accesses that do not race
659    /// in the presence of malicious code inside the guest mutating
660    /// the GuestSharedMemory.
661    pub fn build(self) -> (HostSharedMemory, GuestSharedMemory) {
662        let lock = Arc::new(RwLock::new(()));
663        (
664            HostSharedMemory {
665                region: self.region.clone(),
666                lock: lock.clone(),
667            },
668            GuestSharedMemory {
669                region: self.region.clone(),
670                lock: lock.clone(),
671            },
672        )
673    }
674
675    /// Gets the file handle of the shared memory region for this Sandbox
676    #[cfg(target_os = "windows")]
677    pub fn get_mmap_file_handle(&self) -> HANDLE {
678        self.region.handle
679    }
680}
681
682/// A trait that abstracts over the particular kind of SharedMemory,
683/// used when invoking operations from Rust that absolutely must have
684/// exclusive control over the shared memory for correctness +
685/// performance, like snapshotting.
686pub trait SharedMemory {
687    /// Return a readonly reference to the host mapping backing this SharedMemory
688    fn region(&self) -> &HostMapping;
689
690    /// Return the base address of the host mapping of this
691    /// region. Following the general Rust philosophy, this does not
692    /// need to be marked as `unsafe` because doing anything with this
693    /// pointer itself requires `unsafe`.
694    fn base_addr(&self) -> usize {
695        self.region().ptr as usize + PAGE_SIZE_USIZE
696    }
697
698    /// Return the base address of the host mapping of this region as
699    /// a pointer. Following the general Rust philosophy, this does
700    /// not need to be marked as `unsafe` because doing anything with
701    /// this pointer itself requires `unsafe`.
702    fn base_ptr(&self) -> *mut u8 {
703        self.base_addr() as *mut u8
704    }
705
706    /// Return the length of usable memory contained in `self`.
707    /// The returned size does not include the size of the surrounding
708    /// guard pages.
709    fn mem_size(&self) -> usize {
710        self.region().size - 2 * PAGE_SIZE_USIZE
711    }
712
713    /// Return the raw base address of the host mapping, including the
714    /// guard pages.
715    fn raw_ptr(&self) -> *mut u8 {
716        self.region().ptr
717    }
718
719    /// Return the raw size of the host mapping, including the guard
720    /// pages.
721    fn raw_mem_size(&self) -> usize {
722        self.region().size
723    }
724
725    /// Run some code with exclusive access to the SharedMemory
726    /// underlying this.  If the SharedMemory is not an
727    /// ExclusiveSharedMemory, any concurrent accesses to the relevant
728    /// HostSharedMemory/GuestSharedMemory may make this fail, or be
729    /// made to fail by this, and should be avoided.
730    fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
731        &mut self,
732        f: F,
733    ) -> Result<T>;
734}
735
736impl SharedMemory for ExclusiveSharedMemory {
737    fn region(&self) -> &HostMapping {
738        &self.region
739    }
740    fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
741        &mut self,
742        f: F,
743    ) -> Result<T> {
744        Ok(f(self))
745    }
746}
747
748impl SharedMemory for GuestSharedMemory {
749    fn region(&self) -> &HostMapping {
750        &self.region
751    }
752    fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
753        &mut self,
754        f: F,
755    ) -> Result<T> {
756        let guard = self
757            .lock
758            .try_write()
759            .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
760        let mut excl = ExclusiveSharedMemory {
761            region: self.region.clone(),
762        };
763        let ret = f(&mut excl);
764        drop(excl);
765        drop(guard);
766        Ok(ret)
767    }
768}
769
770/// An unsafe marker trait for types for which all bit patterns are valid.
771/// This is required in order for it to be safe to read a value of a particular
772/// type out of the sandbox from the HostSharedMemory.
773///
774/// # Safety
775/// This must only be implemented for types for which all bit patterns
776/// are valid. It requires that any (non-undef/poison) value of the
777/// correct size can be transmuted to the type.
778pub unsafe trait AllValid {}
779unsafe impl AllValid for u8 {}
780unsafe impl AllValid for u16 {}
781unsafe impl AllValid for u32 {}
782unsafe impl AllValid for u64 {}
783unsafe impl AllValid for i8 {}
784unsafe impl AllValid for i16 {}
785unsafe impl AllValid for i32 {}
786unsafe impl AllValid for i64 {}
787unsafe impl AllValid for [u8; 16] {}
788
789impl HostSharedMemory {
790    /// Read a value of type T, whose representation is the same
791    /// between the sandbox and the host, and which has no invalid bit
792    /// patterns
793    pub fn read<T: AllValid>(&self, offset: usize) -> Result<T> {
794        bounds_check!(offset, std::mem::size_of::<T>(), self.mem_size());
795        let ret = unsafe {
796            let mut ret: core::mem::MaybeUninit<T> = core::mem::MaybeUninit::uninit();
797            {
798                let slice: &mut [u8] = core::slice::from_raw_parts_mut(
799                    ret.as_mut_ptr() as *mut u8,
800                    std::mem::size_of::<T>(),
801                );
802                self.copy_to_slice(slice, offset)?;
803            }
804            Ok(ret.assume_init())
805        };
806        ret
807    }
808
809    /// Write a value of type T, whose representation is the same
810    /// between the sandbox and the host, and which has no invalid bit
811    /// patterns
812    pub fn write<T: AllValid>(&self, offset: usize, data: T) -> Result<()> {
813        bounds_check!(offset, std::mem::size_of::<T>(), self.mem_size());
814        unsafe {
815            let slice: &[u8] = core::slice::from_raw_parts(
816                core::ptr::addr_of!(data) as *const u8,
817                std::mem::size_of::<T>(),
818            );
819            self.copy_from_slice(slice, offset)?;
820        }
821        Ok(())
822    }
823
824    /// Copy the contents of the slice into the sandbox at the
825    /// specified offset
826    pub fn copy_to_slice(&self, slice: &mut [u8], offset: usize) -> Result<()> {
827        bounds_check!(offset, slice.len(), self.mem_size());
828        let base = self.base_ptr().wrapping_add(offset);
829        let guard = self
830            .lock
831            .try_read()
832            .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
833        // todo: replace with something a bit more optimized + correct
834        for (i, b) in slice.iter_mut().enumerate() {
835            unsafe {
836                *b = base.wrapping_add(i).read_volatile();
837            }
838        }
839        drop(guard);
840        Ok(())
841    }
842
843    /// Copy the contents of the sandbox at the specified offset into
844    /// the slice
845    pub fn copy_from_slice(&self, slice: &[u8], offset: usize) -> Result<()> {
846        bounds_check!(offset, slice.len(), self.mem_size());
847        let base = self.base_ptr().wrapping_add(offset);
848        let guard = self
849            .lock
850            .try_read()
851            .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
852        // todo: replace with something a bit more optimized + correct
853        for (i, b) in slice.iter().enumerate() {
854            unsafe {
855                base.wrapping_add(i).write_volatile(*b);
856            }
857        }
858        drop(guard);
859        Ok(())
860    }
861
862    /// Fill the memory in the range `[offset, offset + len)` with `value`
863    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
864    pub fn fill(&mut self, value: u8, offset: usize, len: usize) -> Result<()> {
865        bounds_check!(offset, len, self.mem_size());
866        let base = self.base_ptr().wrapping_add(offset);
867        let guard = self
868            .lock
869            .try_read()
870            .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
871        // todo: replace with something a bit more optimized + correct
872        for i in 0..len {
873            unsafe { base.wrapping_add(i).write_volatile(value) };
874        }
875        drop(guard);
876        Ok(())
877    }
878
879    /// Pushes the given data onto shared memory to the buffer at the given offset.
880    /// NOTE! buffer_start_offset must point to the beginning of the buffer
881    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
882    pub fn push_buffer(
883        &mut self,
884        buffer_start_offset: usize,
885        buffer_size: usize,
886        data: &[u8],
887    ) -> Result<()> {
888        let stack_pointer_rel = self.read::<u64>(buffer_start_offset)? as usize;
889        let buffer_size_u64: u64 = buffer_size.try_into()?;
890
891        if stack_pointer_rel > buffer_size || stack_pointer_rel < 8 {
892            return Err(new_error!(
893                "Unable to push data to buffer: Stack pointer is out of bounds. Stack pointer: {}, Buffer size: {}",
894                stack_pointer_rel,
895                buffer_size_u64
896            ));
897        }
898
899        let size_required = data.len() + 8;
900        let size_available = buffer_size - stack_pointer_rel;
901
902        if size_required > size_available {
903            return Err(new_error!(
904                "Not enough space in buffer to push data. Required: {}, Available: {}",
905                size_required,
906                size_available
907            ));
908        }
909
910        // get absolute
911        let stack_pointer_abs = stack_pointer_rel + buffer_start_offset;
912
913        // write the actual data to the top of stack
914        self.copy_from_slice(data, stack_pointer_abs)?;
915
916        // write the offset to the newly written data, to the top of stack.
917        // this is used when popping the stack, to know how far back to jump
918        self.write::<u64>(stack_pointer_abs + data.len(), stack_pointer_rel as u64)?;
919
920        // update stack pointer to point to the next free address
921        self.write::<u64>(
922            buffer_start_offset,
923            (stack_pointer_rel + data.len() + 8) as u64,
924        )?;
925        Ok(())
926    }
927
928    /// Pops the given given buffer into a `T` and returns it.
929    /// NOTE! the data must be a size-prefixed flatbuffer, and
930    /// buffer_start_offset must point to the beginning of the buffer
931    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
932    pub fn try_pop_buffer_into<T>(
933        &mut self,
934        buffer_start_offset: usize,
935        buffer_size: usize,
936    ) -> Result<T>
937    where
938        T: for<'b> TryFrom<&'b [u8]>,
939    {
940        // get the stackpointer
941        let stack_pointer_rel = self.read::<u64>(buffer_start_offset)? as usize;
942
943        if stack_pointer_rel > buffer_size || stack_pointer_rel < 16 {
944            return Err(new_error!(
945                "Unable to pop data from buffer: Stack pointer is out of bounds. Stack pointer: {}, Buffer size: {}",
946                stack_pointer_rel,
947                buffer_size
948            ));
949        }
950
951        // make it absolute
952        let last_element_offset_abs = stack_pointer_rel + buffer_start_offset;
953
954        // go back 8 bytes to get offset to element on top of stack
955        let last_element_offset_rel: usize =
956            self.read::<u64>(last_element_offset_abs - 8)? as usize;
957
958        // make it absolute
959        let last_element_offset_abs = last_element_offset_rel + buffer_start_offset;
960
961        // Get the size of the flatbuffer buffer from memory
962        let fb_buffer_size = {
963            let size_i32 = self.read::<u32>(last_element_offset_abs)? + 4;
964            // ^^^ flatbuffer byte arrays are prefixed by 4 bytes
965            // indicating its size, so, to get the actual size, we need
966            // to add 4.
967            usize::try_from(size_i32)
968        }?;
969
970        let mut result_buffer = vec![0; fb_buffer_size];
971
972        self.copy_to_slice(&mut result_buffer, last_element_offset_abs)?;
973        let to_return = T::try_from(result_buffer.as_slice()).map_err(|_e| {
974            new_error!(
975                "pop_buffer_into: failed to convert buffer to {}",
976                type_name::<T>()
977            )
978        })?;
979
980        // update the stack pointer to point to the element we just popped off since that is now free
981        self.write::<u64>(buffer_start_offset, last_element_offset_rel as u64)?;
982
983        // zero out the memory we just popped off
984        let num_bytes_to_zero = stack_pointer_rel - last_element_offset_rel;
985        self.fill(0, last_element_offset_abs, num_bytes_to_zero)?;
986
987        Ok(to_return)
988    }
989}
990
991impl SharedMemory for HostSharedMemory {
992    fn region(&self) -> &HostMapping {
993        &self.region
994    }
995    fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
996        &mut self,
997        f: F,
998    ) -> Result<T> {
999        let guard = self
1000            .lock
1001            .try_write()
1002            .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
1003        let mut excl = ExclusiveSharedMemory {
1004            region: self.region.clone(),
1005        };
1006        let ret = f(&mut excl);
1007        drop(excl);
1008        drop(guard);
1009        Ok(ret)
1010    }
1011}
1012
1013#[cfg(test)]
1014mod tests {
1015    use hyperlight_common::mem::PAGE_SIZE_USIZE;
1016    use proptest::prelude::*;
1017
1018    use super::{ExclusiveSharedMemory, HostSharedMemory, SharedMemory};
1019    use crate::mem::shared_mem_tests::read_write_test_suite;
1020    use crate::Result;
1021
1022    #[test]
1023    fn fill() {
1024        let mem_size: usize = 4096;
1025        let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1026        let (mut hshm, _) = eshm.build();
1027
1028        hshm.fill(1, 0, 1024).unwrap();
1029        hshm.fill(2, 1024, 1024).unwrap();
1030        hshm.fill(3, 2048, 1024).unwrap();
1031        hshm.fill(4, 3072, 1024).unwrap();
1032
1033        let vec = hshm
1034            .with_exclusivity(|e| e.copy_all_to_vec().unwrap())
1035            .unwrap();
1036
1037        assert!(vec[0..1024].iter().all(|&x| x == 1));
1038        assert!(vec[1024..2048].iter().all(|&x| x == 2));
1039        assert!(vec[2048..3072].iter().all(|&x| x == 3));
1040        assert!(vec[3072..4096].iter().all(|&x| x == 4));
1041
1042        hshm.fill(5, 0, 4096).unwrap();
1043
1044        let vec2 = hshm
1045            .with_exclusivity(|e| e.copy_all_to_vec().unwrap())
1046            .unwrap();
1047        assert!(vec2.iter().all(|&x| x == 5));
1048
1049        assert!(hshm.fill(0, 0, mem_size + 1).is_err());
1050        assert!(hshm.fill(0, mem_size, 1).is_err());
1051    }
1052
1053    #[test]
1054    fn copy_into_from() -> Result<()> {
1055        let mem_size: usize = 4096;
1056        let vec_len = 10;
1057        let eshm = ExclusiveSharedMemory::new(mem_size)?;
1058        let (hshm, _) = eshm.build();
1059        let vec = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
1060        // write the value to the memory at the beginning.
1061        hshm.copy_from_slice(&vec, 0)?;
1062
1063        let mut vec2 = vec![0; vec_len];
1064        // read the value back from the memory at the beginning.
1065        hshm.copy_to_slice(vec2.as_mut_slice(), 0)?;
1066        assert_eq!(vec, vec2);
1067
1068        let offset = mem_size - vec.len();
1069        // write the value to the memory at the end.
1070        hshm.copy_from_slice(&vec, offset)?;
1071
1072        let mut vec3 = vec![0; vec_len];
1073        // read the value back from the memory at the end.
1074        hshm.copy_to_slice(&mut vec3, offset)?;
1075        assert_eq!(vec, vec3);
1076
1077        let offset = mem_size / 2;
1078        // write the value to the memory at the middle.
1079        hshm.copy_from_slice(&vec, offset)?;
1080
1081        let mut vec4 = vec![0; vec_len];
1082        // read the value back from the memory at the middle.
1083        hshm.copy_to_slice(&mut vec4, offset)?;
1084        assert_eq!(vec, vec4);
1085
1086        // try and read a value from an offset that is beyond the end of the memory.
1087        let mut vec5 = vec![0; vec_len];
1088        assert!(hshm.copy_to_slice(&mut vec5, mem_size).is_err());
1089
1090        // try and write a value to an offset that is beyond the end of the memory.
1091        assert!(hshm.copy_from_slice(&vec5, mem_size).is_err());
1092
1093        // try and read a value from an offset that is too large.
1094        let mut vec6 = vec![0; vec_len];
1095        assert!(hshm.copy_to_slice(&mut vec6, mem_size * 2).is_err());
1096
1097        // try and write a value to an offset that is too large.
1098        assert!(hshm.copy_from_slice(&vec6, mem_size * 2).is_err());
1099
1100        // try and read a value that is too large.
1101        let mut vec7 = vec![0; mem_size * 2];
1102        assert!(hshm.copy_to_slice(&mut vec7, 0).is_err());
1103
1104        // try and write a value that is too large.
1105        assert!(hshm.copy_from_slice(&vec7, 0).is_err());
1106
1107        Ok(())
1108    }
1109
1110    proptest! {
1111        #[test]
1112        fn read_write_i32(val in -0x1000_i32..0x1000_i32) {
1113            read_write_test_suite(
1114                val,
1115                ExclusiveSharedMemory::new,
1116                Box::new(ExclusiveSharedMemory::read_i32),
1117                Box::new(ExclusiveSharedMemory::write_i32),
1118            )
1119            .unwrap();
1120            read_write_test_suite(
1121                val,
1122                |s| {
1123                    let e = ExclusiveSharedMemory::new(s)?;
1124                    let (h, _) = e.build();
1125                    Ok(h)
1126                },
1127                Box::new(HostSharedMemory::read::<i32>),
1128                Box::new(|h, o, v| h.write::<i32>(o, v)),
1129            )
1130            .unwrap();
1131        }
1132    }
1133
1134    #[test]
1135    fn alloc_fail() {
1136        let gm = ExclusiveSharedMemory::new(0);
1137        assert!(gm.is_err());
1138        let gm = ExclusiveSharedMemory::new(usize::MAX);
1139        assert!(gm.is_err());
1140    }
1141
1142    #[test]
1143    fn clone() {
1144        let eshm = ExclusiveSharedMemory::new(PAGE_SIZE_USIZE).unwrap();
1145        let (hshm1, _) = eshm.build();
1146        let hshm2 = hshm1.clone();
1147
1148        // after hshm1 is cloned, hshm1 and hshm2 should have identical
1149        // memory sizes and pointers.
1150        assert_eq!(hshm1.mem_size(), hshm2.mem_size());
1151        assert_eq!(hshm1.base_addr(), hshm2.base_addr());
1152
1153        // we should be able to copy a byte array into both hshm1 and hshm2,
1154        // and have both changes be reflected in all clones
1155        hshm1.copy_from_slice(b"a", 0).unwrap();
1156        hshm2.copy_from_slice(b"b", 1).unwrap();
1157
1158        // at this point, both hshm1 and hshm2 should have
1159        // offset 0 = 'a', offset 1 = 'b'
1160        for (raw_offset, expected) in &[(0, b'a'), (1, b'b')] {
1161            assert_eq!(hshm1.read::<u8>(*raw_offset).unwrap(), *expected);
1162            assert_eq!(hshm2.read::<u8>(*raw_offset).unwrap(), *expected);
1163        }
1164
1165        // after we drop hshm1, hshm2 should still exist, be valid,
1166        // and have all contents from before hshm1 was dropped
1167        drop(hshm1);
1168
1169        // at this point, hshm2 should still have offset 0 = 'a', offset 1 = 'b'
1170        for (raw_offset, expected) in &[(0, b'a'), (1, b'b')] {
1171            assert_eq!(hshm2.read::<u8>(*raw_offset).unwrap(), *expected);
1172        }
1173        hshm2.copy_from_slice(b"c", 2).unwrap();
1174        assert_eq!(hshm2.read::<u8>(2).unwrap(), b'c');
1175        drop(hshm2);
1176    }
1177
1178    #[test]
1179    fn copy_all_to_vec() {
1180        let mut data = vec![b'a', b'b', b'c'];
1181        data.resize(4096, 0);
1182        let mut eshm = ExclusiveSharedMemory::new(data.len()).unwrap();
1183        eshm.copy_from_slice(data.as_slice(), 0).unwrap();
1184        let ret_vec = eshm.copy_all_to_vec().unwrap();
1185        assert_eq!(data, ret_vec);
1186    }
1187
1188    /// A test to ensure that, if a `SharedMem` instance is cloned
1189    /// and _all_ clones are dropped, the memory region will no longer
1190    /// be valid.
1191    ///
1192    /// This test is ignored because it is incompatible with other tests as
1193    /// they may be allocating memory at the same time.
1194    ///
1195    /// Marking this test as ignored means that running `cargo test` will not
1196    /// run it. This feature will allow a developer who runs that command
1197    /// from their workstation to be successful without needing to know about
1198    /// test interdependencies. This test will, however, be run explicitly as a
1199    /// part of the CI pipeline.
1200    #[test]
1201    #[ignore]
1202    #[cfg(target_os = "linux")]
1203    fn test_drop() {
1204        use proc_maps::maps_contain_addr;
1205
1206        let pid = std::process::id();
1207
1208        let eshm = ExclusiveSharedMemory::new(PAGE_SIZE_USIZE).unwrap();
1209        let (hshm1, gshm) = eshm.build();
1210        let hshm2 = hshm1.clone();
1211        let addr = hshm1.raw_ptr() as usize;
1212
1213        // ensure the address is in the process's virtual memory
1214        let maps_before_drop = proc_maps::get_process_maps(pid.try_into().unwrap()).unwrap();
1215        assert!(
1216            maps_contain_addr(addr, &maps_before_drop),
1217            "shared memory address {:#x} was not found in process map, but should be",
1218            addr,
1219        );
1220        // drop both shared memory instances, which should result
1221        // in freeing the memory region
1222        drop(hshm1);
1223        drop(hshm2);
1224        drop(gshm);
1225
1226        let maps_after_drop = proc_maps::get_process_maps(pid.try_into().unwrap()).unwrap();
1227        // now, ensure the address is not in the process's virtual memory
1228        assert!(
1229            !maps_contain_addr(addr, &maps_after_drop),
1230            "shared memory address {:#x} was found in the process map, but shouldn't be",
1231            addr
1232        );
1233    }
1234
1235    #[cfg(target_os = "linux")]
1236    mod guard_page_crash_test {
1237        use crate::mem::shared_mem::{ExclusiveSharedMemory, SharedMemory};
1238
1239        const TEST_EXIT_CODE: u8 = 211; // an uncommon exit code, used for testing purposes
1240
1241        /// hook sigsegv to exit with status code, to make it testable, rather than have it exit from a signal
1242        /// NOTE: We CANNOT panic!() in the handler, and make the tests #[should_panic], because
1243        ///     the test harness process will crash anyway after the test passes
1244        fn setup_signal_handler() {
1245            unsafe {
1246                signal_hook_registry::register_signal_unchecked(libc::SIGSEGV, || {
1247                    std::process::exit(TEST_EXIT_CODE.into());
1248                })
1249                .unwrap();
1250            }
1251        }
1252
1253        #[test]
1254        #[ignore] // this test is ignored because it will crash the running process
1255        fn read() {
1256            setup_signal_handler();
1257
1258            let eshm = ExclusiveSharedMemory::new(4096).unwrap();
1259            let (hshm, _) = eshm.build();
1260            let guard_page_ptr = hshm.raw_ptr();
1261            unsafe { std::ptr::read_volatile(guard_page_ptr) };
1262        }
1263
1264        #[test]
1265        #[ignore] // this test is ignored because it will crash the running process
1266        fn write() {
1267            setup_signal_handler();
1268
1269            let eshm = ExclusiveSharedMemory::new(4096).unwrap();
1270            let (hshm, _) = eshm.build();
1271            let guard_page_ptr = hshm.raw_ptr();
1272            unsafe { std::ptr::write_volatile(guard_page_ptr, 0u8) };
1273        }
1274
1275        #[test]
1276        #[ignore] // this test is ignored because it will crash the running process
1277        fn exec() {
1278            setup_signal_handler();
1279
1280            let eshm = ExclusiveSharedMemory::new(4096).unwrap();
1281            let (hshm, _) = eshm.build();
1282            let guard_page_ptr = hshm.raw_ptr();
1283            let func: fn() = unsafe { std::mem::transmute(guard_page_ptr) };
1284            func();
1285        }
1286
1287        // provides a way for running the above tests in a separate process since they expect to crash
1288        #[test]
1289        fn guard_page_testing_shim() {
1290            let tests = vec!["read", "write", "exec"];
1291
1292            for test in tests {
1293                let status = std::process::Command::new("cargo")
1294                    .args(["test", "-p", "hyperlight-host", "--", "--ignored", test])
1295                    .stdin(std::process::Stdio::null())
1296                    .stdout(std::process::Stdio::null())
1297                    .stderr(std::process::Stdio::null())
1298                    .status()
1299                    .expect("Unable to launch tests");
1300                assert_eq!(
1301                    status.code(),
1302                    Some(TEST_EXIT_CODE.into()),
1303                    "Guard Page test failed: {}",
1304                    test
1305                );
1306            }
1307        }
1308    }
1309}