hyperlight_host/mem/
shared_mem.rs

1/*
2Copyright 2024 The Hyperlight Authors.
3
4Licensed under the Apache License, Version 2.0 (the "License");
5you may not use this file except in compliance with the License.
6You may obtain a copy of the License at
7
8    http://www.apache.org/licenses/LICENSE-2.0
9
10Unless required by applicable law or agreed to in writing, software
11distributed under the License is distributed on an "AS IS" BASIS,
12WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13See the License for the specific language governing permissions and
14limitations under the License.
15*/
16
17use std::any::type_name;
18use std::ffi::c_void;
19use std::io::Error;
20#[cfg(target_os = "linux")]
21use std::ptr::null_mut;
22use std::sync::{Arc, RwLock};
23
24use hyperlight_common::mem::PAGE_SIZE_USIZE;
25use tracing::{instrument, Span};
26#[cfg(target_os = "windows")]
27use windows::core::PCSTR;
28#[cfg(target_os = "windows")]
29use windows::Win32::Foundation::{CloseHandle, HANDLE, INVALID_HANDLE_VALUE};
30#[cfg(all(target_os = "windows", inprocess))]
31use windows::Win32::System::Memory::FILE_MAP_EXECUTE;
32#[cfg(all(target_os = "windows", not(inprocess)))]
33use windows::Win32::System::Memory::PAGE_READWRITE;
34#[cfg(target_os = "windows")]
35use windows::Win32::System::Memory::{
36    CreateFileMappingA, MapViewOfFile, UnmapViewOfFile, VirtualProtect, FILE_MAP_ALL_ACCESS,
37    MEMORY_MAPPED_VIEW_ADDRESS, PAGE_EXECUTE_READWRITE, PAGE_NOACCESS, PAGE_PROTECTION_FLAGS,
38};
39
40#[cfg(target_os = "windows")]
41use crate::HyperlightError::MemoryAllocationFailed;
42#[cfg(target_os = "windows")]
43use crate::HyperlightError::{MemoryRequestTooBig, WindowsAPIError};
44use crate::{log_then_return, new_error, Result};
45
46/// Makes sure that the given `offset` and `size` are within the bounds of the memory with size `mem_size`.
47macro_rules! bounds_check {
48    ($offset:expr, $size:expr, $mem_size:expr) => {
49        if $offset + $size > $mem_size {
50            return Err(new_error!(
51                "Cannot read value from offset {} with size {} in memory of size {}",
52                $offset,
53                $size,
54                $mem_size
55            ));
56        }
57    };
58}
59
60/// generates a reader function for the given type
61macro_rules! generate_reader {
62    ($fname:ident, $ty:ty) => {
63        /// Read a value of type `$ty` from the memory at the given offset.
64        #[allow(dead_code)]
65        #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
66        pub(crate) fn $fname(&self, offset: usize) -> Result<$ty> {
67            let data = self.as_slice();
68            bounds_check!(offset, std::mem::size_of::<$ty>(), data.len());
69            Ok(<$ty>::from_le_bytes(
70                data[offset..offset + std::mem::size_of::<$ty>()].try_into()?,
71            ))
72        }
73    };
74}
75
76/// generates a writer function for the given type
77macro_rules! generate_writer {
78    ($fname:ident, $ty:ty) => {
79        /// Write a value of type `$ty` to the memory at the given offset.
80        #[allow(dead_code)]
81        #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
82        pub(crate) fn $fname(&mut self, offset: usize, value: $ty) -> Result<()> {
83            let data = self.as_mut_slice();
84            bounds_check!(offset, std::mem::size_of::<$ty>(), data.len());
85            data[offset..offset + std::mem::size_of::<$ty>()].copy_from_slice(&value.to_le_bytes());
86            Ok(())
87        }
88    };
89}
90
91/// A representation of a host mapping of a shared memory region,
92/// which will be released when this structure is Drop'd. This is not
93/// individually Clone (since it holds ownership of the mapping), or
94/// Send or Sync, since it doesn't ensure any particular synchronization.
95#[derive(Debug)]
96pub struct HostMapping {
97    ptr: *mut u8,
98    size: usize,
99    #[cfg(target_os = "windows")]
100    handle: HANDLE,
101}
102
103impl Drop for HostMapping {
104    #[cfg(target_os = "linux")]
105    fn drop(&mut self) {
106        use libc::munmap;
107
108        unsafe {
109            munmap(self.ptr as *mut c_void, self.size);
110        }
111    }
112    #[cfg(target_os = "windows")]
113    fn drop(&mut self) {
114        let mem_mapped_address = MEMORY_MAPPED_VIEW_ADDRESS {
115            Value: self.ptr as *mut c_void,
116        };
117        if let Err(e) = unsafe { UnmapViewOfFile(mem_mapped_address) } {
118            tracing::error!(
119                "Failed to drop HostMapping (UnmapViewOfFile failed): {:?}",
120                e
121            );
122        }
123
124        let file_handle: HANDLE = self.handle;
125        if let Err(e) = unsafe { CloseHandle(file_handle) } {
126            tracing::error!("Failed to  drop HostMapping (CloseHandle failed): {:?}", e);
127        }
128    }
129}
130
131/// These three structures represent various phases of the lifecycle of
132/// a memory buffer that is shared with the guest. An
133/// ExclusiveSharedMemory is used for certain operations that
134/// unrestrictedly write to the shared memory, including setting it up
135/// and taking snapshots.
136#[derive(Debug)]
137pub struct ExclusiveSharedMemory {
138    region: Arc<HostMapping>,
139}
140unsafe impl Send for ExclusiveSharedMemory {}
141
142/// A GuestSharedMemory is used by the hypervisor handler to represent
143/// the reference to all-of-memory that is taken by the virtual cpu.
144/// Because of the memory model limitations that affect
145/// HostSharedMemory, it is likely fairly important (to ensure that
146/// our UB remains limited to interaction with an external compilation
147/// unit that likely can't be discovered by the compiler) that _rust_
148/// users do not perform racy accesses to the guest communication
149/// buffers that are also accessed by HostSharedMemory.
150#[derive(Debug)]
151pub struct GuestSharedMemory {
152    region: Arc<HostMapping>,
153    /// The lock that indicates this shared memory is being used by non-Rust code
154    ///
155    /// This lock _must_ be held whenever the guest is executing,
156    /// because it prevents the host from converting its
157    /// HostSharedMemory to an ExclusiveSharedMemory. Since the guest
158    /// may arbitrarily mutate the shared memory, only synchronized
159    /// accesses from Rust should be allowed!
160    ///
161    /// We cannot enforce this in the type system, because the memory
162    /// is mapped in to the VM at VM creation time.
163    pub lock: Arc<RwLock<()>>,
164}
165unsafe impl Send for GuestSharedMemory {}
166
167/// A HostSharedMemory allows synchronized accesses to guest
168/// communication buffers, allowing it to be used concurrently with a
169/// GuestSharedMemory.
170///
171/// Given future requirements for asynchronous I/O with a minimum
172/// amount of copying (e.g. WASIp3 streams), we would like it to be
173/// possible to safely access these buffers concurrently with the
174/// guest, ensuring that (1) data is read appropriately if the guest
175/// is well-behaved; and (2) the host's behaviour is defined
176/// regardless of whether or not the guest is well-behaved.
177///
178/// The ideal (future) flow for a guest->host message is something like
179///   - Guest writes (unordered) bytes describing a work item into a buffer
180///   - Guest reveals buffer via a release-store of a pointer into an
181///     MMIO ring-buffer
182///   - Host acquire-loads the buffer pointer from the "MMIO" ring
183///     buffer
184///   - Host (unordered) reads the bytes from the buffer
185///   - Host performs validation of those bytes and uses them
186///
187/// Unfortunately, there appears to be no way to do this with defined
188/// behaviour in present Rust (see
189/// e.g. https://github.com/rust-lang/unsafe-code-guidelines/issues/152).
190/// Rust does not yet have its own defined memory model, but in the
191/// interim, it is widely treated as inheriting the current C/C++
192/// memory models.  The most immediate problem is that regardless of
193/// anything else, under those memory models [1, p. 17-18; 2, p. 88],
194///
195///   > The execution of a program contains a _data race_ if it
196///   > contains two [C++23: "potentially concurrent"] conflicting
197///   > actions [C23: "in different threads"], at least one of which
198///   > is not atomic, and neither happens before the other [C++23: ",
199///   > except for the special case for signal handlers described
200///   > below"].  Any such data race results in undefined behavior.
201///
202/// Consequently, if a misbehaving guest fails to correctly
203/// synchronize its stores with the host, the host's innocent loads
204/// will trigger undefined behaviour for the entire program, including
205/// the host.  Note that this also applies if the guest makes an
206/// unsynchronized read of a location that the host is writing!
207///
208/// Despite Rust's de jure inheritance of the C memory model at the
209/// present time, the compiler in many cases de facto adheres to LLVM
210/// semantics, so it is worthwhile to consider what LLVM does in this
211/// case as well.  According to the the LangRef [3] memory model,
212/// loads which are involved in a race that includes at least one
213/// non-atomic access (whether the load or a store) return `undef`,
214/// making them roughly equivalent to reading uninitialized
215/// memory. While this is much better, it is still bad.
216///
217/// Considering a different direction, recent C++ papers have seemed
218/// to lean towards using `volatile` for similar use cases. For
219/// example, in P1152R0 [4], JF Bastien notes that
220///
221///   > We’ve shown that volatile is purposely defined to denote
222///   > external modifications. This happens for:
223///   >   - Shared memory with untrusted code, where volatile is the
224///   >     right way to avoid time-of-check time-of-use (ToCToU)
225///   >     races which lead to security bugs such as [PWN2OWN] and
226///   >     [XENXSA155].
227///
228/// Unfortunately, although this paper was adopted for C++20 (and,
229/// sadly, mostly un-adopted for C++23, although that does not concern
230/// us), the paper did not actually redefine volatile accesses or data
231/// races to prevent volatile accesses from racing with other accesses
232/// and causing undefined behaviour.  P1382R1 [5] would have amendend
233/// the wording of the data race definition to specifically exclude
234/// volatile, but, unfortunately, despite receiving a
235/// generally-positive reception at its first WG21 meeting more than
236/// five years ago, it has not progressed.
237///
238/// Separately from the data race issue, there is also a concern that
239/// according to the various memory models in use, there may be ways
240/// in which the guest can semantically obtain uninitialized memory
241/// and write it into the shared buffer, which may also result in
242/// undefined behaviour on reads.  The degree to which this is a
243/// concern is unclear, however, since it is unclear to what degree
244/// the Rust abstract machine's conception of uninitialized memory
245/// applies to the sandbox.  Returning briefly to the LLVM level,
246/// rather than the Rust level, this, combined with the fact that
247/// racing loads in LLVM return `undef`, as discussed above, we would
248/// ideally `llvm.freeze` the result of any load out of the sandbox.
249///
250/// It would furthermore be ideal if we could run the flatbuffers
251/// parsing code directly on the guest memory, in order to avoid
252/// unnecessary copies.  That is unfortunately probably not viable at
253/// the present time: because the generated flatbuffers parsing code
254/// doesn't use atomic or volatile accesses, it is likely to introduce
255/// double-read vulnerabilities.
256///
257/// In short, none of the Rust-level operations available to us do the
258/// right thing, at the Rust spec level or the LLVM spec level. Our
259/// major remaining options are therefore:
260///   - Choose one of the options that is avaiblale to us, and accept
261///     that we are doing something unsound according to the spec, but
262///     hope that no reasonable compiler could possibly notice.
263///   - Use inline assembly per architecture, for which we would only
264///     need to worry about the _architecture_'s memory model (which
265///     is far less demanding).
266///
267/// The leading candidate for the first option would seem to be to
268/// simply use volatile accesses; there seems to be wide agreement
269/// that this _should_ be a valid use case for them (even if it isn't
270/// now), and projects like Linux and rust-vmm already use C11
271/// `volatile` for this purpose.  It is also worth noting that because
272/// we still do need to synchronize with the guest when it _is_ being
273/// well-behaved, we would ideally use volatile acquire loads and
274/// volatile release stores for interacting with the stack pointer in
275/// the guest in this case.  Unfortunately, while those operations are
276/// defined in LLVM, they are not presently exposed to Rust. While
277/// atomic fences that are not associated with memory accesses
278/// (std::sync::atomic::fence) might at first glance seem to help with
279/// this problem, they unfortunately do not [6]:
280///
281///    > A fence ‘A’ which has (at least) Release ordering semantics,
282///    > synchronizes with a fence ‘B’ with (at least) Acquire
283///    > semantics, if and only if there exist operations X and Y,
284///    > both operating on some atomic object ‘M’ such that A is
285///    > sequenced before X, Y is sequenced before B and Y observes
286///    > the change to M. This provides a happens-before dependence
287///    > between A and B.
288///
289/// Note that the X and Y must be to an _atomic_ object.
290///
291/// We consequently assume that there has been a strong architectural
292/// fence on a vmenter/vmexit between data being read and written.
293/// This is unsafe (not guaranteed in the type system)!
294///
295/// [1] N3047 C23 Working Draft. https://www.open-std.org/jtc1/sc22/wg14/www/docs/n3047.pdf
296/// [2] N4950 C++23 Working Draft. https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2023/n4950.pdf
297/// [3] LLVM Language Reference Manual, Memory Model for Concurrent Operations. https://llvm.org/docs/LangRef.html#memmodel
298/// [4] P1152R0: Deprecating `volatile`. JF Bastien. https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p1152r0.html
299/// [5] P1382R1: `volatile_load<T>` and `volatile_store<T>`. JF Bastien, Paul McKenney, Jeffrey Yasskin, and the indefatigable TBD. https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p1382r1.pdf
300/// [6] Documentation for std::sync::atomic::fence. https://doc.rust-lang.org/std/sync/atomic/fn.fence.html
301#[derive(Clone, Debug)]
302pub struct HostSharedMemory {
303    region: Arc<HostMapping>,
304    lock: Arc<RwLock<()>>,
305}
306unsafe impl Send for HostSharedMemory {}
307
308impl ExclusiveSharedMemory {
309    /// Create a new region of shared memory with the given minimum
310    /// size in bytes. The region will be surrounded by guard pages.
311    ///
312    /// Return `Err` if shared memory could not be allocated.
313    #[cfg(target_os = "linux")]
314    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
315    pub fn new(min_size_bytes: usize) -> Result<Self> {
316        use libc::{
317            c_int, mmap, mprotect, off_t, size_t, MAP_ANONYMOUS, MAP_FAILED, MAP_NORESERVE,
318            MAP_SHARED, PROT_NONE, PROT_READ, PROT_WRITE,
319        };
320
321        use crate::error::HyperlightError::{MemoryRequestTooBig, MmapFailed, MprotectFailed};
322
323        if min_size_bytes == 0 {
324            return Err(new_error!("Cannot create shared memory with size 0"));
325        }
326
327        let total_size = min_size_bytes
328            .checked_add(2 * PAGE_SIZE_USIZE) // guard page around the memory
329            .ok_or_else(|| new_error!("Memory required for sandbox exceeded usize::MAX"))?;
330
331        assert!(
332            total_size % PAGE_SIZE_USIZE == 0,
333            "shared memory must be a multiple of 4096"
334        );
335        // usize and isize are guaranteed to be the same size, and
336        // isize::MAX should be positive, so this cast should be safe.
337        if total_size > isize::MAX as usize {
338            return Err(MemoryRequestTooBig(total_size, isize::MAX as usize));
339        }
340
341        // allocate the memory
342        let addr = unsafe {
343            mmap(
344                null_mut(),
345                total_size as size_t,
346                PROT_READ | PROT_WRITE,
347                MAP_ANONYMOUS | MAP_SHARED | MAP_NORESERVE,
348                -1 as c_int,
349                0 as off_t,
350            )
351        };
352        if addr == MAP_FAILED {
353            log_then_return!(MmapFailed(Error::last_os_error().raw_os_error()));
354        }
355
356        // protect the guard pages
357
358        let res = unsafe { mprotect(addr, PAGE_SIZE_USIZE, PROT_NONE) };
359        if res != 0 {
360            return Err(MprotectFailed(Error::last_os_error().raw_os_error()));
361        }
362        let res = unsafe {
363            mprotect(
364                (addr as *const u8).add(total_size - PAGE_SIZE_USIZE) as *mut c_void,
365                PAGE_SIZE_USIZE,
366                PROT_NONE,
367            )
368        };
369        if res != 0 {
370            return Err(MprotectFailed(Error::last_os_error().raw_os_error()));
371        }
372
373        Ok(Self {
374            // HostMapping is only non-Send/Sync because raw pointers
375            // are not ("as a lint", as the Rust docs say). We don't
376            // want to mark HostMapping Send/Sync immediately, because
377            // that could socially imply that it's "safe" to use
378            // unsafe accesses from multiple threads at once. Instead, we
379            // directly impl Send and Sync on this type. Since this
380            // type does have Send and Sync manually impl'd, the Arc
381            // is not pointless as the lint suggests.
382            #[allow(clippy::arc_with_non_send_sync)]
383            region: Arc::new(HostMapping {
384                ptr: addr as *mut u8,
385                size: total_size,
386            }),
387        })
388    }
389
390    /// Create a new region of shared memory with the given minimum
391    /// size in bytes. The region will be surrounded by guard pages.
392    ///
393    /// Return `Err` if shared memory could not be allocated.
394    #[cfg(target_os = "windows")]
395    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
396    pub fn new(min_size_bytes: usize) -> Result<Self> {
397        if min_size_bytes == 0 {
398            return Err(new_error!("Cannot create shared memory with size 0"));
399        }
400
401        let total_size = min_size_bytes
402            .checked_add(2 * PAGE_SIZE_USIZE)
403            .ok_or_else(|| new_error!("Memory required for sandbox exceeded {}", usize::MAX))?;
404
405        if total_size % PAGE_SIZE_USIZE != 0 {
406            return Err(new_error!(
407                "shared memory must be a multiple of {}",
408                PAGE_SIZE_USIZE
409            ));
410        }
411
412        // usize and isize are guaranteed to be the same size, and
413        // isize::MAX should be positive, so this cast should be safe.
414        if total_size > isize::MAX as usize {
415            return Err(MemoryRequestTooBig(total_size, isize::MAX as usize));
416        }
417
418        let mut dwmaximumsizehigh = 0;
419        let mut dwmaximumsizelow = 0;
420
421        if std::mem::size_of::<usize>() == 8 {
422            dwmaximumsizehigh = (total_size >> 32) as u32;
423            dwmaximumsizelow = (total_size & 0xFFFFFFFF) as u32;
424        }
425
426        // Allocate the memory use CreateFileMapping instead of VirtualAlloc
427        // This allows us to map the memory into the surrogate process using MapViewOfFile2
428
429        #[cfg(not(inprocess))]
430        let flags = PAGE_READWRITE;
431        #[cfg(inprocess)]
432        let flags = PAGE_EXECUTE_READWRITE;
433
434        let handle = unsafe {
435            CreateFileMappingA(
436                INVALID_HANDLE_VALUE,
437                None,
438                flags,
439                dwmaximumsizehigh,
440                dwmaximumsizelow,
441                PCSTR::null(),
442            )?
443        };
444
445        if handle.is_invalid() {
446            log_then_return!(MemoryAllocationFailed(
447                Error::last_os_error().raw_os_error()
448            ));
449        }
450
451        #[cfg(not(inprocess))]
452        let file_map = FILE_MAP_ALL_ACCESS;
453        #[cfg(inprocess)]
454        let file_map = FILE_MAP_ALL_ACCESS | FILE_MAP_EXECUTE;
455
456        let addr = unsafe { MapViewOfFile(handle, file_map, 0, 0, 0) };
457
458        if addr.Value.is_null() {
459            log_then_return!(MemoryAllocationFailed(
460                Error::last_os_error().raw_os_error()
461            ));
462        }
463
464        // Set the first and last pages to be guard pages
465
466        let mut unused_out_old_prot_flags = PAGE_PROTECTION_FLAGS(0);
467
468        // If the following calls to VirtualProtect are changed make sure to update the calls to VirtualProtectEx in surrogate_process_manager.rs
469
470        let first_guard_page_start = addr.Value;
471        if let Err(e) = unsafe {
472            VirtualProtect(
473                first_guard_page_start,
474                PAGE_SIZE_USIZE,
475                PAGE_NOACCESS,
476                &mut unused_out_old_prot_flags,
477            )
478        } {
479            log_then_return!(WindowsAPIError(e.clone()));
480        }
481
482        let last_guard_page_start = unsafe { addr.Value.add(total_size - PAGE_SIZE_USIZE) };
483        if let Err(e) = unsafe {
484            VirtualProtect(
485                last_guard_page_start,
486                PAGE_SIZE_USIZE,
487                PAGE_NOACCESS,
488                &mut unused_out_old_prot_flags,
489            )
490        } {
491            log_then_return!(WindowsAPIError(e.clone()));
492        }
493
494        Ok(Self {
495            // HostMapping is only non-Send/Sync because raw pointers
496            // are not ("as a lint", as the Rust docs say). We don't
497            // want to mark HostMapping Send/Sync immediately, because
498            // that could socially imply that it's "safe" to use
499            // unsafe accesses from multiple threads at once. Instead, we
500            // directly impl Send and Sync on this type. Since this
501            // type does have Send and Sync manually impl'd, the Arc
502            // is not pointless as the lint suggests.
503            #[allow(clippy::arc_with_non_send_sync)]
504            region: Arc::new(HostMapping {
505                ptr: addr.Value as *mut u8,
506                size: total_size,
507                handle,
508            }),
509        })
510    }
511
512    pub(super) fn make_memory_executable(&self) -> Result<()> {
513        #[cfg(target_os = "windows")]
514        {
515            let mut _old_flags = PAGE_PROTECTION_FLAGS::default();
516            if let Err(e) = unsafe {
517                VirtualProtect(
518                    self.region.ptr as *const c_void,
519                    self.region.size,
520                    PAGE_EXECUTE_READWRITE,
521                    &mut _old_flags as *mut PAGE_PROTECTION_FLAGS,
522                )
523            } {
524                log_then_return!(WindowsAPIError(e.clone()));
525            }
526        }
527
528        // make the memory executable on Linux
529        #[cfg(target_os = "linux")]
530        {
531            use libc::{mprotect, PROT_EXEC, PROT_READ, PROT_WRITE};
532
533            let res = unsafe {
534                mprotect(
535                    self.region.ptr as *mut c_void,
536                    self.region.size,
537                    PROT_READ | PROT_WRITE | PROT_EXEC,
538                )
539            };
540
541            if res != 0 {
542                return Err(new_error!(
543                    "Failed to make memory executable: {:#?}",
544                    Error::last_os_error().raw_os_error()
545                ));
546            }
547        }
548        Ok(())
549    }
550
551    /// Internal helper method to get the backing memory as a mutable slice.
552    ///
553    /// # Safety
554    /// As per std::slice::from_raw_parts_mut:
555    /// - self.base_addr() must be valid for both reads and writes for
556    ///   self.mem_size() * mem::size_of::<u8>() many bytes, and it
557    ///   must be properly aligned.
558    ///
559    ///   The rules on validity are still somewhat unspecified, but we
560    ///   assume that the result of our calls to mmap/CreateFileMappings may
561    ///   be considered a single "allocated object". The use of
562    ///   non-atomic accesses is alright from a Safe Rust standpoint,
563    ///   because SharedMemoryBuilder is  not Sync.
564    /// - self.base_addr() must point to self.mem_size() consecutive
565    ///   properly initialized values of type u8
566    ///
567    ///   Again, the exact provenance restrictions on what is
568    ///   considered to be initialized values are unclear, but we make
569    ///   sure to use mmap(MAP_ANONYMOUS) and
570    ///   CreateFileMapping(SEC_COMMIT), so the pages in question are
571    ///   zero-initialized, which we hope counts for u8.
572    /// - The memory referenced by the returned slice must not be
573    ///   accessed through any other pointer (not derived from the
574    ///   return value) for the duration of the lifetime 'a. Both read
575    ///   and write accesses are forbidden.
576    ///
577    ///   Accesses from Safe Rust necessarily follow this rule,
578    ///   because the returned slice's lifetime is the same as that of
579    ///   a mutable borrow of self.
580    /// - The total size self.mem_size() * mem::size_of::<u8>() of the
581    ///   slice must be no larger than isize::MAX, and adding that
582    ///   size to data must not "wrap around" the address space. See
583    ///   the safety documentation of pointer::offset.
584    ///
585    ///   This is ensured by a check in ::new()
586    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
587    pub(super) fn as_mut_slice<'a>(&'a mut self) -> &'a mut [u8] {
588        unsafe { std::slice::from_raw_parts_mut(self.base_ptr(), self.mem_size()) }
589    }
590
591    /// Internal helper method to get the backing memory as a slice.
592    ///
593    /// # Safety
594    /// See the discussion on as_mut_slice, with the third point
595    /// replaced by:
596    /// - The memory referenced by the returned slice must not be
597    ///   mutated for the duration of lifetime 'a, except inside an
598    ///   UnsafeCell.
599    ///
600    ///   Host accesses from Safe Rust necessarily follow this rule,
601    ///   because the returned slice's lifetime is the same as that of
602    ///   a borrow of self, preventing mutations via other methods.
603    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
604    pub fn as_slice<'a>(&'a self) -> &'a [u8] {
605        unsafe { std::slice::from_raw_parts(self.base_ptr(), self.mem_size()) }
606    }
607
608    /// Copy the entire contents of `self` into a `Vec<u8>`, then return it
609    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
610    pub(crate) fn copy_all_to_vec(&self) -> Result<Vec<u8>> {
611        let data = self.as_slice();
612        Ok(data.to_vec())
613    }
614
615    /// Copies all bytes from `src` to `self` starting at offset
616    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
617    pub fn copy_from_slice(&mut self, src: &[u8], offset: usize) -> Result<()> {
618        let data = self.as_mut_slice();
619        bounds_check!(offset, src.len(), data.len());
620        data[offset..offset + src.len()].copy_from_slice(src);
621        Ok(())
622    }
623
624    /// Return the address of memory at an offset to this `SharedMemory` checking
625    /// that the memory is within the bounds of the `SharedMemory`.
626    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
627    pub(crate) fn calculate_address(&self, offset: usize) -> Result<usize> {
628        bounds_check!(offset, 0, self.mem_size());
629        Ok(self.base_addr() + offset)
630    }
631
632    generate_reader!(read_u8, u8);
633    generate_reader!(read_i8, i8);
634    generate_reader!(read_u16, u16);
635    generate_reader!(read_i16, i16);
636    generate_reader!(read_u32, u32);
637    generate_reader!(read_i32, i32);
638    generate_reader!(read_u64, u64);
639    generate_reader!(read_i64, i64);
640    generate_reader!(read_usize, usize);
641    generate_reader!(read_isize, isize);
642
643    generate_writer!(write_u8, u8);
644    generate_writer!(write_i8, i8);
645    generate_writer!(write_u16, u16);
646    generate_writer!(write_i16, i16);
647    generate_writer!(write_u32, u32);
648    generate_writer!(write_i32, i32);
649    generate_writer!(write_u64, u64);
650    generate_writer!(write_i64, i64);
651    generate_writer!(write_usize, usize);
652    generate_writer!(write_isize, isize);
653
654    /// Convert the ExclusiveSharedMemory, which may be freely
655    /// modified, into a GuestSharedMemory, which may be somewhat
656    /// freely modified (mostly by the guest), and a HostSharedMemory,
657    /// which may only make certain kinds of accesses that do not race
658    /// in the presence of malicious code inside the guest mutating
659    /// the GuestSharedMemory.
660    pub fn build(self) -> (HostSharedMemory, GuestSharedMemory) {
661        let lock = Arc::new(RwLock::new(()));
662        (
663            HostSharedMemory {
664                region: self.region.clone(),
665                lock: lock.clone(),
666            },
667            GuestSharedMemory {
668                region: self.region.clone(),
669                lock: lock.clone(),
670            },
671        )
672    }
673
674    /// Gets the file handle of the shared memory region for this Sandbox
675    #[cfg(target_os = "windows")]
676    pub fn get_mmap_file_handle(&self) -> HANDLE {
677        self.region.handle
678    }
679}
680
681/// A trait that abstracts over the particular kind of SharedMemory,
682/// used when invoking operations from Rust that absolutely must have
683/// exclusive control over the shared memory for correctness +
684/// performance, like snapshotting.
685pub trait SharedMemory {
686    /// Return a readonly reference to the host mapping backing this SharedMemory
687    fn region(&self) -> &HostMapping;
688
689    /// Return the base address of the host mapping of this
690    /// region. Following the general Rust philosophy, this does not
691    /// need to be marked as `unsafe` because doing anything with this
692    /// pointer itself requires `unsafe`.
693    fn base_addr(&self) -> usize {
694        self.region().ptr as usize + PAGE_SIZE_USIZE
695    }
696
697    /// Return the base address of the host mapping of this region as
698    /// a pointer. Following the general Rust philosophy, this does
699    /// not need to be marked as `unsafe` because doing anything with
700    /// this pointer itself requires `unsafe`.
701    fn base_ptr(&self) -> *mut u8 {
702        self.base_addr() as *mut u8
703    }
704
705    /// Return the length of usable memory contained in `self`.
706    /// The returned size does not include the size of the surrounding
707    /// guard pages.
708    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
709    fn mem_size(&self) -> usize {
710        self.region().size - 2 * PAGE_SIZE_USIZE
711    }
712
713    /// Return the raw base address of the host mapping, including the
714    /// guard pages.
715    fn raw_ptr(&self) -> *mut u8 {
716        self.region().ptr
717    }
718
719    /// Return the raw size of the host mapping, including the guard
720    /// pages.
721    fn raw_mem_size(&self) -> usize {
722        self.region().size
723    }
724
725    /// Run some code with exclusive access to the SharedMemory
726    /// underlying this.  If the SharedMemory is not an
727    /// ExclusiveSharedMemory, any concurrent accesses to the relevant
728    /// HostSharedMemory/GuestSharedMemory may make this fail, or be
729    /// made to fail by this, and should be avoided.
730    fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
731        &mut self,
732        f: F,
733    ) -> Result<T>;
734}
735
736impl SharedMemory for ExclusiveSharedMemory {
737    fn region(&self) -> &HostMapping {
738        &self.region
739    }
740    fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
741        &mut self,
742        f: F,
743    ) -> Result<T> {
744        Ok(f(self))
745    }
746}
747
748impl SharedMemory for GuestSharedMemory {
749    fn region(&self) -> &HostMapping {
750        &self.region
751    }
752    fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
753        &mut self,
754        f: F,
755    ) -> Result<T> {
756        let guard = self
757            .lock
758            .try_write()
759            .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
760        let mut excl = ExclusiveSharedMemory {
761            region: self.region.clone(),
762        };
763        let ret = f(&mut excl);
764        drop(excl);
765        drop(guard);
766        Ok(ret)
767    }
768}
769
770/// An unsafe marker trait for types for which all bit patterns are valid.
771/// This is required in order for it to be safe to read a value of a particular
772/// type out of the sandbox from the HostSharedMemory.
773///
774/// # Safety
775/// This must only be implemented for types for which all bit patterns
776/// are valid. It requires that any (non-undef/poison) value of the
777/// correct size can be transmuted to the type.
778pub unsafe trait AllValid {}
779unsafe impl AllValid for u8 {}
780unsafe impl AllValid for u16 {}
781unsafe impl AllValid for u32 {}
782unsafe impl AllValid for u64 {}
783unsafe impl AllValid for i8 {}
784unsafe impl AllValid for i16 {}
785unsafe impl AllValid for i32 {}
786unsafe impl AllValid for i64 {}
787unsafe impl AllValid for [u8; 16] {}
788
789impl HostSharedMemory {
790    /// Read a value of type T, whose representation is the same
791    /// between the sandbox and the host, and which has no invalid bit
792    /// patterns
793    pub fn read<T: AllValid>(&self, offset: usize) -> Result<T> {
794        bounds_check!(offset, std::mem::size_of::<T>(), self.mem_size());
795        let ret = unsafe {
796            let mut ret: core::mem::MaybeUninit<T> = core::mem::MaybeUninit::uninit();
797            {
798                let slice: &mut [u8] = core::slice::from_raw_parts_mut(
799                    ret.as_mut_ptr() as *mut u8,
800                    std::mem::size_of::<T>(),
801                );
802                self.copy_to_slice(slice, offset)?;
803            }
804            Ok(ret.assume_init())
805        };
806        ret
807    }
808
809    /// Write a value of type T, whose representation is the same
810    /// between the sandbox and the host, and which has no invalid bit
811    /// patterns
812    pub fn write<T: AllValid>(&self, offset: usize, data: T) -> Result<()> {
813        bounds_check!(offset, std::mem::size_of::<T>(), self.mem_size());
814        unsafe {
815            let slice: &[u8] = core::slice::from_raw_parts(
816                core::ptr::addr_of!(data) as *const u8,
817                std::mem::size_of::<T>(),
818            );
819            self.copy_from_slice(slice, offset)?;
820        }
821        Ok(())
822    }
823
824    /// Copy the contents of the slice into the sandbox at the
825    /// specified offset
826    pub fn copy_to_slice(&self, slice: &mut [u8], offset: usize) -> Result<()> {
827        bounds_check!(offset, slice.len(), self.mem_size());
828        let base = self.base_ptr().wrapping_add(offset);
829        let guard = self
830            .lock
831            .try_read()
832            .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
833        // todo: replace with something a bit more optimized + correct
834        for (i, b) in slice.iter_mut().enumerate() {
835            unsafe {
836                *b = base.wrapping_add(i).read_volatile();
837            }
838        }
839        drop(guard);
840        Ok(())
841    }
842
843    /// Copy the contents of the sandbox at the specified offset into
844    /// the slice
845    pub fn copy_from_slice(&self, slice: &[u8], offset: usize) -> Result<()> {
846        bounds_check!(offset, slice.len(), self.mem_size());
847        let base = self.base_ptr().wrapping_add(offset);
848        let guard = self
849            .lock
850            .try_read()
851            .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
852        // todo: replace with something a bit more optimized + correct
853        for (i, b) in slice.iter().enumerate() {
854            unsafe {
855                base.wrapping_add(i).write_volatile(*b);
856            }
857        }
858        drop(guard);
859        Ok(())
860    }
861
862    /// Fill the memory in the range `[offset, offset + len)` with `value`
863    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
864    pub fn fill(&mut self, value: u8, offset: usize, len: usize) -> Result<()> {
865        bounds_check!(offset, len, self.mem_size());
866        let base = self.base_ptr().wrapping_add(offset);
867        let guard = self
868            .lock
869            .try_read()
870            .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
871        // todo: replace with something a bit more optimized + correct
872        for i in 0..len {
873            unsafe { base.wrapping_add(i).write_volatile(value) };
874        }
875        drop(guard);
876        Ok(())
877    }
878
879    /// Pushes the given data onto shared memory to the buffer at the given offset.
880    /// NOTE! buffer_start_offset must point to the beginning of the buffer
881    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
882    pub fn push_buffer(
883        &mut self,
884        buffer_start_offset: usize,
885        buffer_size: usize,
886        data: &[u8],
887    ) -> Result<()> {
888        let stack_pointer_rel = self.read::<u64>(buffer_start_offset).unwrap() as usize;
889        let buffer_size_u64: u64 = buffer_size.try_into()?;
890
891        if stack_pointer_rel > buffer_size || stack_pointer_rel < 8 {
892            return Err(new_error!(
893                "Unable to push data to buffer: Stack pointer is out of bounds. Stack pointer: {}, Buffer size: {}",
894                stack_pointer_rel,
895                buffer_size_u64
896            ));
897        }
898
899        let size_required = data.len() + 8;
900        let size_available = buffer_size - stack_pointer_rel;
901
902        if size_required > size_available {
903            return Err(new_error!(
904                "Not enough space in buffer to push data. Required: {}, Available: {}",
905                size_required,
906                size_available
907            ));
908        }
909
910        // get absolute
911        let stack_pointer_abs = stack_pointer_rel + buffer_start_offset;
912
913        // write the actual data to the top of stack
914        self.copy_from_slice(data, stack_pointer_abs)?;
915
916        // write the offset to the newly written data, to the top of stack.
917        // this is used when popping the stack, to know how far back to jump
918        self.write::<u64>(stack_pointer_abs + data.len(), stack_pointer_rel as u64)?;
919
920        // update stack pointer to point to the next free address
921        self.write::<u64>(
922            buffer_start_offset,
923            (stack_pointer_rel + data.len() + 8) as u64,
924        )?;
925        Ok(())
926    }
927
928    /// Pops the given given buffer into a `T` and returns it.
929    /// NOTE! the data must be a size-prefixed flatbuffer, and
930    /// buffer_start_offset must point to the beginning of the buffer
931    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
932    pub fn try_pop_buffer_into<T>(
933        &mut self,
934        buffer_start_offset: usize,
935        buffer_size: usize,
936    ) -> Result<T>
937    where
938        T: for<'b> TryFrom<&'b [u8]>,
939    {
940        // get the stackpointer
941        let stack_pointer_rel = self.read::<u64>(buffer_start_offset)? as usize;
942
943        if stack_pointer_rel > buffer_size || stack_pointer_rel < 16 {
944            return Err(new_error!(
945                "Unable to pop data from buffer: Stack pointer is out of bounds. Stack pointer: {}, Buffer size: {}",
946                stack_pointer_rel,
947                buffer_size
948            ));
949        }
950
951        // make it absolute
952        let last_element_offset_abs = stack_pointer_rel + buffer_start_offset;
953
954        // go back 8 bytes to get offset to element on top of stack
955        let last_element_offset_rel: usize =
956            self.read::<u64>(last_element_offset_abs - 8).unwrap() as usize;
957
958        // make it absolute
959        let last_element_offset_abs = last_element_offset_rel + buffer_start_offset;
960
961        // Get the size of the flatbuffer buffer from memory
962        let fb_buffer_size = {
963            let size_i32 = self.read::<u32>(last_element_offset_abs)? + 4;
964            // ^^^ flatbuffer byte arrays are prefixed by 4 bytes
965            // indicating its size, so, to get the actual size, we need
966            // to add 4.
967            usize::try_from(size_i32)
968        }?;
969
970        let mut result_buffer = vec![0; fb_buffer_size];
971
972        self.copy_to_slice(&mut result_buffer, last_element_offset_abs)?;
973        let to_return = T::try_from(result_buffer.as_slice()).map_err(|_e| {
974            new_error!(
975                "pop_buffer_into: failed to convert buffer to {}",
976                type_name::<T>()
977            )
978        })?;
979
980        // update the stack pointer to point to the element we just popped off since that is now free
981        self.write::<u64>(buffer_start_offset, last_element_offset_rel as u64)?;
982
983        // zero out the memory we just popped off
984        let num_bytes_to_zero = stack_pointer_rel - last_element_offset_rel;
985        self.fill(0, last_element_offset_abs, num_bytes_to_zero)?;
986
987        Ok(to_return)
988    }
989}
990
991impl SharedMemory for HostSharedMemory {
992    fn region(&self) -> &HostMapping {
993        &self.region
994    }
995    fn with_exclusivity<T, F: FnOnce(&mut ExclusiveSharedMemory) -> T>(
996        &mut self,
997        f: F,
998    ) -> Result<T> {
999        let guard = self
1000            .lock
1001            .try_write()
1002            .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
1003        let mut excl = ExclusiveSharedMemory {
1004            region: self.region.clone(),
1005        };
1006        let ret = f(&mut excl);
1007        drop(excl);
1008        drop(guard);
1009        Ok(ret)
1010    }
1011}
1012
1013#[cfg(test)]
1014mod tests {
1015    use hyperlight_common::mem::PAGE_SIZE_USIZE;
1016    use proptest::prelude::*;
1017
1018    use super::{ExclusiveSharedMemory, HostSharedMemory, SharedMemory};
1019    use crate::mem::shared_mem_tests::read_write_test_suite;
1020    use crate::Result;
1021
1022    #[test]
1023    fn fill() {
1024        let mem_size: usize = 4096;
1025        let eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
1026        let (mut hshm, _) = eshm.build();
1027
1028        hshm.fill(1, 0, 1024).unwrap();
1029        hshm.fill(2, 1024, 1024).unwrap();
1030        hshm.fill(3, 2048, 1024).unwrap();
1031        hshm.fill(4, 3072, 1024).unwrap();
1032
1033        let vec = hshm
1034            .with_exclusivity(|e| e.copy_all_to_vec().unwrap())
1035            .unwrap();
1036
1037        assert!(vec[0..1024].iter().all(|&x| x == 1));
1038        assert!(vec[1024..2048].iter().all(|&x| x == 2));
1039        assert!(vec[2048..3072].iter().all(|&x| x == 3));
1040        assert!(vec[3072..4096].iter().all(|&x| x == 4));
1041
1042        hshm.fill(5, 0, 4096).unwrap();
1043
1044        let vec2 = hshm
1045            .with_exclusivity(|e| e.copy_all_to_vec().unwrap())
1046            .unwrap();
1047        assert!(vec2.iter().all(|&x| x == 5));
1048
1049        assert!(hshm.fill(0, 0, mem_size + 1).is_err());
1050        assert!(hshm.fill(0, mem_size, 1).is_err());
1051    }
1052
1053    #[test]
1054    fn copy_into_from() -> Result<()> {
1055        let mem_size: usize = 4096;
1056        let vec_len = 10;
1057        let eshm = ExclusiveSharedMemory::new(mem_size)?;
1058        let (hshm, _) = eshm.build();
1059        let vec = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
1060        // write the value to the memory at the beginning.
1061        hshm.copy_from_slice(&vec, 0)?;
1062
1063        let mut vec2 = vec![0; vec_len];
1064        // read the value back from the memory at the beginning.
1065        hshm.copy_to_slice(vec2.as_mut_slice(), 0)?;
1066        assert_eq!(vec, vec2);
1067
1068        let offset = mem_size - vec.len();
1069        // write the value to the memory at the end.
1070        hshm.copy_from_slice(&vec, offset)?;
1071
1072        let mut vec3 = vec![0; vec_len];
1073        // read the value back from the memory at the end.
1074        hshm.copy_to_slice(&mut vec3, offset)?;
1075        assert_eq!(vec, vec3);
1076
1077        let offset = mem_size / 2;
1078        // write the value to the memory at the middle.
1079        hshm.copy_from_slice(&vec, offset)?;
1080
1081        let mut vec4 = vec![0; vec_len];
1082        // read the value back from the memory at the middle.
1083        hshm.copy_to_slice(&mut vec4, offset)?;
1084        assert_eq!(vec, vec4);
1085
1086        // try and read a value from an offset that is beyond the end of the memory.
1087        let mut vec5 = vec![0; vec_len];
1088        assert!(hshm.copy_to_slice(&mut vec5, mem_size).is_err());
1089
1090        // try and write a value to an offset that is beyond the end of the memory.
1091        assert!(hshm.copy_from_slice(&vec5, mem_size).is_err());
1092
1093        // try and read a value from an offset that is too large.
1094        let mut vec6 = vec![0; vec_len];
1095        assert!(hshm.copy_to_slice(&mut vec6, mem_size * 2).is_err());
1096
1097        // try and write a value to an offset that is too large.
1098        assert!(hshm.copy_from_slice(&vec6, mem_size * 2).is_err());
1099
1100        // try and read a value that is too large.
1101        let mut vec7 = vec![0; mem_size * 2];
1102        assert!(hshm.copy_to_slice(&mut vec7, 0).is_err());
1103
1104        // try and write a value that is too large.
1105        assert!(hshm.copy_from_slice(&vec7, 0).is_err());
1106
1107        Ok(())
1108    }
1109
1110    proptest! {
1111        #[test]
1112        fn read_write_i32(val in -0x1000_i32..0x1000_i32) {
1113            read_write_test_suite(
1114                val,
1115                ExclusiveSharedMemory::new,
1116                Box::new(ExclusiveSharedMemory::read_i32),
1117                Box::new(ExclusiveSharedMemory::write_i32),
1118            )
1119            .unwrap();
1120            read_write_test_suite(
1121                val,
1122                |s| {
1123                    let e = ExclusiveSharedMemory::new(s)?;
1124                    let (h, _) = e.build();
1125                    Ok(h)
1126                },
1127                Box::new(HostSharedMemory::read::<i32>),
1128                Box::new(|h, o, v| h.write::<i32>(o, v)),
1129            )
1130            .unwrap();
1131        }
1132    }
1133
1134    #[test]
1135    fn alloc_fail() {
1136        let gm = ExclusiveSharedMemory::new(0);
1137        assert!(gm.is_err());
1138        let gm = ExclusiveSharedMemory::new(usize::MAX);
1139        assert!(gm.is_err());
1140    }
1141
1142    #[test]
1143    fn clone() {
1144        let eshm = ExclusiveSharedMemory::new(PAGE_SIZE_USIZE).unwrap();
1145        let (hshm1, _) = eshm.build();
1146        let hshm2 = hshm1.clone();
1147
1148        // after hshm1 is cloned, hshm1 and hshm2 should have identical
1149        // memory sizes and pointers.
1150        assert_eq!(hshm1.mem_size(), hshm2.mem_size());
1151        assert_eq!(hshm1.base_addr(), hshm2.base_addr());
1152
1153        // we should be able to copy a byte array into both hshm1 and hshm2,
1154        // and have both changes be reflected in all clones
1155        hshm1.copy_from_slice(b"a", 0).unwrap();
1156        hshm2.copy_from_slice(b"b", 1).unwrap();
1157
1158        // at this point, both hshm1 and hshm2 should have
1159        // offset 0 = 'a', offset 1 = 'b'
1160        for (raw_offset, expected) in &[(0, b'a'), (1, b'b')] {
1161            assert_eq!(hshm1.read::<u8>(*raw_offset).unwrap(), *expected);
1162            assert_eq!(hshm2.read::<u8>(*raw_offset).unwrap(), *expected);
1163        }
1164
1165        // after we drop hshm1, hshm2 should still exist, be valid,
1166        // and have all contents from before hshm1 was dropped
1167        drop(hshm1);
1168
1169        // at this point, hshm2 should still have offset 0 = 'a', offset 1 = 'b'
1170        for (raw_offset, expected) in &[(0, b'a'), (1, b'b')] {
1171            assert_eq!(hshm2.read::<u8>(*raw_offset).unwrap(), *expected);
1172        }
1173        hshm2.copy_from_slice(b"c", 2).unwrap();
1174        assert_eq!(hshm2.read::<u8>(2).unwrap(), b'c');
1175        drop(hshm2);
1176    }
1177
1178    #[test]
1179    fn copy_all_to_vec() {
1180        let mut data = vec![b'a', b'b', b'c'];
1181        data.resize(4096, 0);
1182        let mut eshm = ExclusiveSharedMemory::new(data.len()).unwrap();
1183        eshm.copy_from_slice(data.as_slice(), 0).unwrap();
1184        let ret_vec = eshm.copy_all_to_vec().unwrap();
1185        assert_eq!(data, ret_vec);
1186    }
1187
1188    /// A test to ensure that, if a `SharedMem` instance is cloned
1189    /// and _all_ clones are dropped, the memory region will no longer
1190    /// be valid.
1191    ///
1192    /// This test is ignored because it is incompatible with other tests as
1193    /// they may be allocating memory at the same time.
1194    ///
1195    /// Marking this test as ignored means that running `cargo test` will not
1196    /// run it. This feature will allow a developer who runs that command
1197    /// from their workstation to be successful without needing to know about
1198    /// test interdependencies. This test will, however, be run explicitly as a
1199    /// part of the CI pipeline.
1200    #[test]
1201    #[ignore]
1202    #[cfg(target_os = "linux")]
1203    fn test_drop() {
1204        use proc_maps::maps_contain_addr;
1205
1206        let pid = std::process::id();
1207
1208        let eshm = ExclusiveSharedMemory::new(PAGE_SIZE_USIZE).unwrap();
1209        let (hshm1, gshm) = eshm.build();
1210        let hshm2 = hshm1.clone();
1211        let addr = hshm1.raw_ptr() as usize;
1212
1213        // ensure the address is in the process's virtual memory
1214        let maps_before_drop = proc_maps::get_process_maps(pid.try_into().unwrap()).unwrap();
1215        assert!(
1216            maps_contain_addr(addr, &maps_before_drop),
1217            "shared memory address {:#x} was not found in process map, but should be",
1218            addr,
1219        );
1220        // drop both shared memory instances, which should result
1221        // in freeing the memory region
1222        drop(hshm1);
1223        drop(hshm2);
1224        drop(gshm);
1225
1226        let maps_after_drop = proc_maps::get_process_maps(pid.try_into().unwrap()).unwrap();
1227        // now, ensure the address is not in the process's virtual memory
1228        assert!(
1229            !maps_contain_addr(addr, &maps_after_drop),
1230            "shared memory address {:#x} was found in the process map, but shouldn't be",
1231            addr
1232        );
1233    }
1234
1235    #[cfg(target_os = "linux")]
1236    mod guard_page_crash_test {
1237        use crate::mem::shared_mem::{ExclusiveSharedMemory, SharedMemory};
1238
1239        const TEST_EXIT_CODE: u8 = 211; // an uncommon exit code, used for testing purposes
1240
1241        /// hook sigsegv to exit with status code, to make it testable, rather than have it exit from a signal
1242        /// NOTE: We CANNOT panic!() in the handler, and make the tests #[should_panic], because
1243        ///     the test harness process will crash anyway after the test passes
1244        fn setup_signal_handler() {
1245            unsafe {
1246                signal_hook_registry::register_signal_unchecked(libc::SIGSEGV, || {
1247                    std::process::exit(TEST_EXIT_CODE.into());
1248                })
1249                .unwrap();
1250            }
1251        }
1252
1253        #[test]
1254        #[ignore] // this test is ignored because it will crash the running process
1255        fn read() {
1256            setup_signal_handler();
1257
1258            let eshm = ExclusiveSharedMemory::new(4096).unwrap();
1259            let (hshm, _) = eshm.build();
1260            let guard_page_ptr = hshm.raw_ptr();
1261            unsafe { std::ptr::read_volatile(guard_page_ptr) };
1262        }
1263
1264        #[test]
1265        #[ignore] // this test is ignored because it will crash the running process
1266        fn write() {
1267            setup_signal_handler();
1268
1269            let eshm = ExclusiveSharedMemory::new(4096).unwrap();
1270            let (hshm, _) = eshm.build();
1271            let guard_page_ptr = hshm.raw_ptr();
1272            unsafe { std::ptr::write_volatile(guard_page_ptr, 0u8) };
1273        }
1274
1275        #[test]
1276        #[ignore] // this test is ignored because it will crash the running process
1277        fn exec() {
1278            setup_signal_handler();
1279
1280            let eshm = ExclusiveSharedMemory::new(4096).unwrap();
1281            let (hshm, _) = eshm.build();
1282            let guard_page_ptr = hshm.raw_ptr();
1283            let func: fn() = unsafe { std::mem::transmute(guard_page_ptr) };
1284            func();
1285        }
1286
1287        // provides a way for running the above tests in a separate process since they expect to crash
1288        #[test]
1289        fn guard_page_testing_shim() {
1290            let tests = vec!["read", "write", "exec"];
1291
1292            for test in tests {
1293                let status = std::process::Command::new("cargo")
1294                    .args(["test", "-p", "hyperlight-host", "--", "--ignored", test])
1295                    .stdin(std::process::Stdio::null())
1296                    .stdout(std::process::Stdio::null())
1297                    .stderr(std::process::Stdio::null())
1298                    .status()
1299                    .expect("Unable to launch tests");
1300                assert_eq!(
1301                    status.code(),
1302                    Some(TEST_EXIT_CODE.into()),
1303                    "Guard Page test failed: {}",
1304                    test
1305                );
1306            }
1307        }
1308    }
1309}