hyperlight_host/mem/
mgr.rs

1/*
2Copyright 2024 The Hyperlight Authors.
3
4Licensed under the Apache License, Version 2.0 (the "License");
5you may not use this file except in compliance with the License.
6You may obtain a copy of the License at
7
8    http://www.apache.org/licenses/LICENSE-2.0
9
10Unless required by applicable law or agreed to in writing, software
11distributed under the License is distributed on an "AS IS" BASIS,
12WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13See the License for the specific language governing permissions and
14limitations under the License.
15*/
16
17use core::mem::size_of;
18use std::cmp::Ordering;
19use std::str::from_utf8;
20use std::sync::{Arc, Mutex};
21
22use hyperlight_common::flatbuffer_wrappers::function_call::{
23    validate_guest_function_call_buffer, FunctionCall,
24};
25use hyperlight_common::flatbuffer_wrappers::function_types::ReturnValue;
26use hyperlight_common::flatbuffer_wrappers::guest_error::{ErrorCode, GuestError};
27use hyperlight_common::flatbuffer_wrappers::guest_log_data::GuestLogData;
28use hyperlight_common::flatbuffer_wrappers::host_function_details::HostFunctionDetails;
29use serde_json::from_str;
30use tracing::{instrument, Span};
31
32use super::exe::ExeInfo;
33use super::layout::SandboxMemoryLayout;
34#[cfg(target_os = "windows")]
35use super::loaded_lib::LoadedLib;
36use super::memory_region::{MemoryRegion, MemoryRegionType};
37use super::ptr::{GuestPtr, RawPtr};
38use super::ptr_offset::Offset;
39use super::shared_mem::{ExclusiveSharedMemory, GuestSharedMemory, HostSharedMemory, SharedMemory};
40use super::shared_mem_snapshot::SharedMemorySnapshot;
41use crate::error::HyperlightError::{
42    ExceptionDataLengthIncorrect, ExceptionMessageTooBig, JsonConversionFailure, NoMemorySnapshot,
43    UTF8SliceConversionFailure,
44};
45use crate::error::HyperlightHostError;
46use crate::sandbox::SandboxConfiguration;
47use crate::{log_then_return, new_error, HyperlightError, Result};
48
49/// Paging Flags
50///
51/// See the following links explaining paging, also see paging-development-notes.md in docs:
52///
53/// * Very basic description: https://stackoverflow.com/a/26945892
54/// * More in-depth descriptions: https://wiki.osdev.org/Paging
55const PAGE_PRESENT: u64 = 1; // Page is Present
56const PAGE_RW: u64 = 1 << 1; // Page is Read/Write (if not set page is read only so long as the WP bit in CR0 is set to 1 - which it is in Hyperlight)
57const PAGE_USER: u64 = 1 << 2; // User/Supervisor (if this bit is set then the page is accessible by user mode code)
58const PAGE_NX: u64 = 1 << 63; // Execute Disable (if this bit is set then data in the page cannot be executed)
59
60// The amount of memory that can be mapped per page table
61pub(super) const AMOUNT_OF_MEMORY_PER_PT: usize = 0x200000;
62/// Read/write permissions flag for the 64-bit PDE
63/// The page size for the 64-bit PDE
64/// The size of stack guard cookies
65pub(crate) const STACK_COOKIE_LEN: usize = 16;
66
67/// A struct that is responsible for laying out and managing the memory
68/// for a given `Sandbox`.
69#[derive(Clone)]
70pub(crate) struct SandboxMemoryManager<S> {
71    /// Shared memory for the Sandbox
72    pub(crate) shared_mem: S,
73    /// The memory layout of the underlying shared memory
74    pub(crate) layout: SandboxMemoryLayout,
75    /// Whether the sandbox is running in-process
76    inprocess: bool,
77    /// Pointer to where to load memory from
78    pub(crate) load_addr: RawPtr,
79    /// Offset for the execution entrypoint from `load_addr`
80    pub(crate) entrypoint_offset: Offset,
81    /// A vector of memory snapshots that can be used to save and  restore the state of the memory
82    /// This is used by the Rust Sandbox implementation (rather than the mem_snapshot field above which only exists to support current C API)
83    snapshots: Arc<Mutex<Vec<SharedMemorySnapshot>>>,
84    /// This field must be present, even though it's not read,
85    /// so that its underlying resources are properly dropped at
86    /// the right time.
87    #[cfg(target_os = "windows")]
88    _lib: Option<LoadedLib>,
89}
90
91impl<S> SandboxMemoryManager<S>
92where
93    S: SharedMemory,
94{
95    /// Create a new `SandboxMemoryManager` with the given parameters
96    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
97    fn new(
98        layout: SandboxMemoryLayout,
99        shared_mem: S,
100        inprocess: bool,
101        load_addr: RawPtr,
102        entrypoint_offset: Offset,
103        #[cfg(target_os = "windows")] lib: Option<LoadedLib>,
104    ) -> Self {
105        Self {
106            layout,
107            shared_mem,
108            inprocess,
109            load_addr,
110            entrypoint_offset,
111            snapshots: Arc::new(Mutex::new(Vec::new())),
112            #[cfg(target_os = "windows")]
113            _lib: lib,
114        }
115    }
116
117    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
118    pub(crate) fn is_in_process(&self) -> bool {
119        self.inprocess
120    }
121
122    /// Get `SharedMemory` in `self` as a mutable reference
123    pub(crate) fn get_shared_mem_mut(&mut self) -> &mut S {
124        &mut self.shared_mem
125    }
126
127    /// Set up the hypervisor partition in the given `SharedMemory` parameter
128    /// `shared_mem`, with the given memory size `mem_size`
129    // TODO: This should perhaps happen earlier and use an
130    // ExclusiveSharedMemory from the beginning.
131    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
132    pub(crate) fn set_up_shared_memory(
133        &mut self,
134        mem_size: u64,
135        regions: &mut [MemoryRegion],
136    ) -> Result<u64> {
137        // Add 0x200000 because that's the start of mapped memory
138        // For MSVC, move rsp down by 0x28.  This gives the called 'main'
139        // function the appearance that rsp was 16 byte aligned before
140        // the 'call' that calls main (note we don't really have a return value
141        // on the stack but some assembly instructions are expecting rsp have
142        // started 0x8 bytes off of 16 byte alignment when 'main' is invoked.
143        // We do 0x28 instead of 0x8 because MSVC can expect that there are
144        // 0x20 bytes of space to write to by the called function.
145        // I am not sure if this happens with the 'main' method, but we do this
146        // just in case.
147        //
148        // NOTE: We do this also for GCC freestanding binaries because we
149        // specify __attribute__((ms_abi)) on the start method
150        let rsp: u64 = self.layout.get_top_of_user_stack_offset() as u64
151            + SandboxMemoryLayout::BASE_ADDRESS as u64
152            + self.layout.stack_size as u64
153            - 0x28;
154
155        self.shared_mem.with_exclusivity(|shared_mem| {
156            // Create PDL4 table with only 1 PML4E
157            shared_mem.write_u64(
158                SandboxMemoryLayout::PML4_OFFSET,
159                SandboxMemoryLayout::PDPT_GUEST_ADDRESS as u64 | PAGE_PRESENT | PAGE_RW,
160            )?;
161
162            // Create PDPT with only 1 PDPTE
163            shared_mem.write_u64(
164                SandboxMemoryLayout::PDPT_OFFSET,
165                SandboxMemoryLayout::PD_GUEST_ADDRESS as u64 | PAGE_PRESENT | PAGE_RW,
166            )?;
167
168            for i in 0..512 {
169                let offset = SandboxMemoryLayout::PD_OFFSET + (i * 8);
170                let val_to_write: u64 = (SandboxMemoryLayout::PT_GUEST_ADDRESS as u64
171                    + (i * 4096) as u64)
172                    | PAGE_PRESENT
173                    | PAGE_RW;
174                shared_mem.write_u64(offset, val_to_write)?;
175            }
176
177            // We only need to create enough PTEs to map the amount of memory we have
178            // We need one PT for every 2MB of memory that is mapped
179            // We can use the memory size to calculate the number of PTs we need
180            // We round up mem_size/2MB and then we need to add 1 as we start our memory mapping at 0x200000
181
182            let mem_size = usize::try_from(mem_size)?;
183
184            let num_pages: usize =
185                ((mem_size + AMOUNT_OF_MEMORY_PER_PT - 1) / AMOUNT_OF_MEMORY_PER_PT) + 1;
186
187            // Create num_pages PT with 512 PTEs
188            for p in 0..num_pages {
189                for i in 0..512 {
190                    let offset = SandboxMemoryLayout::PT_OFFSET + (p * 4096) + (i * 8);
191                    // Each PTE maps a 4KB page
192                    let val_to_write = if p == 0 {
193                        (p << 21) as u64 | (i << 12) as u64
194                    } else {
195                        let flags = match Self::get_page_flags(p, i, regions) {
196                            Ok(region_type) => match region_type {
197                                // TODO: We parse and load the exe according to its sections and then
198                                // have the correct flags set rather than just marking the entire binary as executable
199                                MemoryRegionType::Code => PAGE_PRESENT | PAGE_RW | PAGE_USER,
200                                MemoryRegionType::Stack => {
201                                    PAGE_PRESENT | PAGE_RW | PAGE_USER | PAGE_NX
202                                }
203                                #[cfg(feature = "executable_heap")]
204                                MemoryRegionType::Heap => PAGE_PRESENT | PAGE_RW | PAGE_USER,
205                                #[cfg(not(feature = "executable_heap"))]
206                                MemoryRegionType::Heap => {
207                                    PAGE_PRESENT | PAGE_RW | PAGE_USER | PAGE_NX
208                                }
209                                // The guard page is marked RW and User so that if it gets written to we can detect it in the host
210                                // If/When we implement an interrupt handler for page faults in the guest then we can remove this access and handle things properly there
211                                MemoryRegionType::GuardPage => {
212                                    PAGE_PRESENT | PAGE_RW | PAGE_USER | PAGE_NX
213                                }
214                                MemoryRegionType::InputData => PAGE_PRESENT | PAGE_RW | PAGE_NX,
215                                MemoryRegionType::OutputData => PAGE_PRESENT | PAGE_RW | PAGE_NX,
216                                MemoryRegionType::Peb => PAGE_PRESENT | PAGE_RW | PAGE_NX,
217                                // Host Function Definitions are readonly in the guest
218                                MemoryRegionType::HostFunctionDefinitions => PAGE_PRESENT | PAGE_NX,
219                                MemoryRegionType::PanicContext => PAGE_PRESENT | PAGE_RW | PAGE_NX,
220                                MemoryRegionType::GuestErrorData => {
221                                    PAGE_PRESENT | PAGE_RW | PAGE_NX
222                                }
223                                // Host Exception Data are readonly in the guest
224                                MemoryRegionType::HostExceptionData => PAGE_PRESENT | PAGE_NX,
225                                MemoryRegionType::PageTables => PAGE_PRESENT | PAGE_RW | PAGE_NX,
226                                MemoryRegionType::KernelStack => PAGE_PRESENT | PAGE_RW | PAGE_NX,
227                                MemoryRegionType::BootStack => PAGE_PRESENT | PAGE_RW | PAGE_NX,
228                            },
229                            // If there is an error then the address isn't mapped so mark it as not present
230                            Err(_) => 0,
231                        };
232                        ((p << 21) as u64 | (i << 12) as u64) | flags
233                    };
234                    shared_mem.write_u64(offset, val_to_write)?;
235                }
236            }
237            Ok::<(), HyperlightError>(())
238        })??;
239
240        Ok(rsp)
241    }
242
243    fn get_page_flags(
244        p: usize,
245        i: usize,
246        regions: &mut [MemoryRegion],
247    ) -> Result<MemoryRegionType> {
248        let addr = (p << 21) + (i << 12);
249
250        let idx = regions.binary_search_by(|region| {
251            if region.guest_region.contains(&addr) {
252                std::cmp::Ordering::Equal
253            } else if region.guest_region.start > addr {
254                std::cmp::Ordering::Greater
255            } else {
256                std::cmp::Ordering::Less
257            }
258        });
259
260        match idx {
261            Ok(index) => Ok(regions[index].region_type),
262            Err(_) => Err(new_error!("Could not find region for address: {}", addr)),
263        }
264    }
265
266    /// Get the process environment block (PEB) address assuming `start_addr`
267    /// is the address of the start of memory, using the given
268    /// `SandboxMemoryLayout` to calculate the address.
269    ///
270    /// For more details on PEBs, please see the following link:
271    ///
272    /// https://en.wikipedia.org/wiki/Process_Environment_Block
273    #[cfg(inprocess)]
274    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
275    pub(crate) fn get_in_process_peb_address(&self, start_addr: u64) -> Result<u64> {
276        Ok(start_addr + self.layout.get_in_process_peb_offset() as u64)
277    }
278
279    /// this function will create a memory snapshot and push it onto the stack of snapshots
280    /// It should be used when you want to save the state of the memory, for example, when evolving a sandbox to a new state
281    pub(crate) fn push_state(&mut self) -> Result<()> {
282        let snapshot = SharedMemorySnapshot::new(&mut self.shared_mem)?;
283        self.snapshots
284            .try_lock()
285            .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?
286            .push(snapshot);
287        Ok(())
288    }
289
290    /// this function restores a memory snapshot from the last snapshot in the list but does not pop the snapshot
291    /// off the stack
292    /// It should be used when you want to restore the state of the memory to a previous state but still want to
293    /// retain that state, for example after calling a function in the guest
294    pub(crate) fn restore_state_from_last_snapshot(&mut self) -> Result<()> {
295        let mut snapshots = self
296            .snapshots
297            .try_lock()
298            .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
299        let last = snapshots.last_mut();
300        if last.is_none() {
301            log_then_return!(NoMemorySnapshot);
302        }
303        let snapshot = last.unwrap();
304        snapshot.restore_from_snapshot(&mut self.shared_mem)
305    }
306
307    /// this function pops the last snapshot off the stack and restores the memory to the previous state
308    /// It should be used when you want to restore the state of the memory to a previous state and do not need to retain that state
309    /// for example when devolving a sandbox to a previous state.
310    pub(crate) fn pop_and_restore_state_from_snapshot(&mut self) -> Result<()> {
311        let last = self
312            .snapshots
313            .try_lock()
314            .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?
315            .pop();
316        if last.is_none() {
317            log_then_return!(NoMemorySnapshot);
318        }
319        self.restore_state_from_last_snapshot()
320    }
321
322    /// Sets `addr` to the correct offset in the memory referenced by
323    /// `shared_mem` to indicate the address of the outb pointer and context
324    /// for calling outb function
325    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
326    pub(crate) fn set_outb_address_and_context(&mut self, addr: u64, context: u64) -> Result<()> {
327        let pointer_offset = self.layout.get_outb_pointer_offset();
328        let context_offset = self.layout.get_outb_context_offset();
329        self.shared_mem.with_exclusivity(|excl| -> Result<()> {
330            excl.write_u64(pointer_offset, addr)?;
331            excl.write_u64(context_offset, context)?;
332            Ok(())
333        })?
334    }
335}
336
337/// Common setup functionality for the
338/// `load_guest_binary_{into_memory, using_load_library}` functions
339///
340/// Returns the newly created `SandboxMemoryLayout`, newly created
341/// `SharedMemory`, load address as calculated by `load_addr_fn`,
342/// and calculated entrypoint offset, in order.
343#[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
344fn load_guest_binary_common<F>(
345    cfg: SandboxConfiguration,
346    exe_info: &ExeInfo,
347    load_addr_fn: F,
348) -> Result<(SandboxMemoryLayout, ExclusiveSharedMemory, RawPtr, Offset)>
349where
350    F: FnOnce(&ExclusiveSharedMemory, &SandboxMemoryLayout) -> Result<RawPtr>,
351{
352    let layout = SandboxMemoryLayout::new(
353        cfg,
354        exe_info.loaded_size(),
355        usize::try_from(cfg.get_stack_size(exe_info))?,
356        usize::try_from(cfg.get_heap_size(exe_info))?,
357    )?;
358    let mut shared_mem = ExclusiveSharedMemory::new(layout.get_memory_size()?)?;
359
360    let load_addr: RawPtr = load_addr_fn(&shared_mem, &layout)?;
361
362    let entrypoint_offset = exe_info.entrypoint();
363
364    let offset = layout.get_code_pointer_offset();
365
366    {
367        // write the code pointer to shared memory
368        let load_addr_u64: u64 = load_addr.clone().into();
369        shared_mem.write_u64(offset, load_addr_u64)?;
370    }
371    Ok((layout, shared_mem, load_addr, entrypoint_offset))
372}
373
374impl SandboxMemoryManager<ExclusiveSharedMemory> {
375    /// Load the binary represented by `pe_info` into memory, ensuring
376    /// all necessary relocations are made prior to completing the load
377    /// operation, then create a new `SharedMemory` to store the new PE
378    /// file and a `SandboxMemoryLayout` to describe the layout of that
379    /// new `SharedMemory`.
380    ///
381    /// Returns the following:
382    ///
383    /// - The newly-created `SharedMemory`
384    /// - The `SandboxMemoryLayout` describing that `SharedMemory`
385    /// - The offset to the entrypoint. This value means something different
386    /// depending on whether we're using in-process mode or not:
387    ///     - If we're using in-process mode, this value will be into
388    ///     host memory
389    ///     - If we're not running with in-memory mode, this value will be
390    ///     into guest memory
391    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
392    pub(crate) fn load_guest_binary_into_memory(
393        cfg: SandboxConfiguration,
394        exe_info: &mut ExeInfo,
395        inprocess: bool,
396    ) -> Result<Self> {
397        let (layout, mut shared_mem, load_addr, entrypoint_offset) = load_guest_binary_common(
398            cfg,
399            exe_info,
400            |shared_mem: &ExclusiveSharedMemory, layout: &SandboxMemoryLayout| {
401                let addr_usize = if inprocess {
402                    // if we're running in-process, load_addr is the absolute
403                    // address to the start of shared memory, plus the offset to
404                    // code
405
406                    // We also need to make the memory executable
407
408                    shared_mem.make_memory_executable()?;
409                    shared_mem.base_addr() + layout.get_guest_code_offset()
410                } else {
411                    // otherwise, we're running in a VM, so load_addr
412                    // is the base address in a VM plus the code
413                    // offset
414                    layout.get_guest_code_address()
415                };
416                RawPtr::try_from(addr_usize)
417            },
418        )?;
419
420        exe_info.load(
421            load_addr.clone().try_into()?,
422            &mut shared_mem.as_mut_slice()[layout.get_guest_code_offset()..],
423        )?;
424
425        Ok(Self::new(
426            layout,
427            shared_mem,
428            inprocess,
429            load_addr,
430            entrypoint_offset,
431            #[cfg(target_os = "windows")]
432            None,
433        ))
434    }
435
436    /// Similar to load_guest_binary_into_memory, except only works on Windows
437    /// and uses the
438    /// [`LoadLibraryA`](https://learn.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibrarya)
439    /// function.
440    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
441    pub(crate) fn load_guest_binary_using_load_library(
442        cfg: SandboxConfiguration,
443        guest_bin_path: &str,
444        exe_info: &mut ExeInfo,
445    ) -> Result<Self> {
446        #[cfg(target_os = "windows")]
447        {
448            if !matches!(exe_info, ExeInfo::PE(_)) {
449                log_then_return!("LoadLibrary can only be used with PE files");
450            }
451
452            let lib = LoadedLib::load(guest_bin_path)?;
453            let (layout, shared_mem, load_addr, entrypoint_offset) =
454                load_guest_binary_common(cfg, exe_info, |_, _| Ok(lib.base_addr()))?;
455
456            // make the memory executable when running in-process
457            shared_mem.make_memory_executable()?;
458
459            Ok(Self::new(
460                layout,
461                shared_mem,
462                true,
463                load_addr,
464                entrypoint_offset,
465                Some(lib),
466            ))
467        }
468        #[cfg(target_os = "linux")]
469        {
470            let _ = (cfg, guest_bin_path, exe_info);
471            log_then_return!("load_guest_binary_using_load_library is only available on Windows");
472        }
473    }
474
475    /// Writes host function details to memory
476    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
477    pub(crate) fn write_buffer_host_function_details(&mut self, buffer: &[u8]) -> Result<()> {
478        let host_function_details = HostFunctionDetails::try_from(buffer).map_err(|e| {
479            new_error!(
480                "write_buffer_host_function_details: failed to convert buffer to HostFunctionDetails: {}",
481                e
482            )
483        })?;
484
485        let host_function_call_buffer: Vec<u8> = (&host_function_details).try_into().map_err(|_| {
486            new_error!(
487                "write_buffer_host_function_details: failed to convert HostFunctionDetails to Vec<u8>"
488            )
489        })?;
490
491        let buffer_size = {
492            let size_u64 = self
493                .shared_mem
494                .read_u64(self.layout.get_host_function_definitions_size_offset())?;
495            usize::try_from(size_u64)
496        }?;
497
498        if host_function_call_buffer.len() > buffer_size {
499            log_then_return!(
500                "Host Function Details buffer is too big for the host_function_definitions buffer"
501            );
502        }
503
504        self.shared_mem.copy_from_slice(
505            host_function_call_buffer.as_slice(),
506            self.layout.host_function_definitions_buffer_offset,
507        )?;
508        Ok(())
509    }
510
511    /// Set the stack guard to `cookie` using `layout` to calculate
512    /// its location and `shared_mem` to write it.
513    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
514    pub(crate) fn set_stack_guard(&mut self, cookie: &[u8; STACK_COOKIE_LEN]) -> Result<()> {
515        let stack_offset = self.layout.get_top_of_user_stack_offset();
516        self.shared_mem.copy_from_slice(cookie, stack_offset)
517    }
518
519    /// Wraps ExclusiveSharedMemory::build
520    pub fn build(
521        self,
522    ) -> (
523        SandboxMemoryManager<HostSharedMemory>,
524        SandboxMemoryManager<GuestSharedMemory>,
525    ) {
526        let (hshm, gshm) = self.shared_mem.build();
527        (
528            SandboxMemoryManager {
529                shared_mem: hshm,
530                layout: self.layout,
531                inprocess: self.inprocess,
532                load_addr: self.load_addr.clone(),
533                entrypoint_offset: self.entrypoint_offset,
534                snapshots: Arc::new(Mutex::new(Vec::new())),
535                #[cfg(target_os = "windows")]
536                _lib: self._lib,
537            },
538            SandboxMemoryManager {
539                shared_mem: gshm,
540                layout: self.layout,
541                inprocess: self.inprocess,
542                load_addr: self.load_addr.clone(),
543                entrypoint_offset: self.entrypoint_offset,
544                snapshots: Arc::new(Mutex::new(Vec::new())),
545                #[cfg(target_os = "windows")]
546                _lib: None,
547            },
548        )
549    }
550}
551
552impl SandboxMemoryManager<HostSharedMemory> {
553    /// Check the stack guard of the memory in `shared_mem`, using
554    /// `layout` to calculate its location.
555    ///
556    /// Return `true`
557    /// if `shared_mem` could be accessed properly and the guard
558    /// matches `cookie`. If it could be accessed properly and the
559    /// guard doesn't match `cookie`, return `false`. Otherwise, return
560    /// a descriptive error.
561    ///
562    /// This method could be an associated function instead. See
563    /// documentation at the bottom `set_stack_guard` for description
564    /// of why it isn't.
565    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
566    pub(crate) fn check_stack_guard(&self, cookie: [u8; STACK_COOKIE_LEN]) -> Result<bool> {
567        let offset = self.layout.get_top_of_user_stack_offset();
568        let test_cookie: [u8; STACK_COOKIE_LEN] = self.shared_mem.read(offset)?;
569        let cmp_res = cookie.iter().cmp(test_cookie.iter());
570        Ok(cmp_res == Ordering::Equal)
571    }
572
573    /// Get the address of the dispatch function in memory
574    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
575    pub(crate) fn get_pointer_to_dispatch_function(&self) -> Result<u64> {
576        let guest_dispatch_function_ptr = self
577            .shared_mem
578            .read::<u64>(self.layout.get_dispatch_function_pointer_offset())?;
579
580        // This pointer is written by the guest library but is accessible to
581        // the guest engine so we should bounds check it before we return it.
582        //
583        // When executing with in-hypervisor mode, there is no danger from
584        // the guest manipulating this memory location because the only
585        // addresses that are valid are in its own address space.
586        //
587        // When executing in-process, maniulating this pointer could cause the
588        // host to execute arbitrary functions.
589        let guest_ptr = GuestPtr::try_from(RawPtr::from(guest_dispatch_function_ptr))?;
590        guest_ptr.absolute()
591    }
592
593    /// Reads a host function call from memory
594    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
595    pub(crate) fn get_host_function_call(&mut self) -> Result<FunctionCall> {
596        self.shared_mem.try_pop_buffer_into::<FunctionCall>(
597            self.layout.output_data_buffer_offset,
598            self.layout.sandbox_memory_config.get_output_data_size(),
599        )
600    }
601
602    /// Writes a function call result to memory
603    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
604    pub(crate) fn write_response_from_host_method_call(&mut self, res: &ReturnValue) -> Result<()> {
605        let function_call_ret_val_buffer = Vec::<u8>::try_from(res).map_err(|_| {
606            new_error!(
607                "write_response_from_host_method_call: failed to convert ReturnValue to Vec<u8>"
608            )
609        })?;
610        self.shared_mem.push_buffer(
611            self.layout.input_data_buffer_offset,
612            self.layout.sandbox_memory_config.get_input_data_size(),
613            function_call_ret_val_buffer.as_slice(),
614        )
615    }
616
617    /// Writes a guest function call to memory
618    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
619    pub(crate) fn write_guest_function_call(&mut self, buffer: &[u8]) -> Result<()> {
620        validate_guest_function_call_buffer(buffer).map_err(|e| {
621            new_error!(
622                "Guest function call buffer validation failed: {}",
623                e.to_string()
624            )
625        })?;
626
627        self.shared_mem.push_buffer(
628            self.layout.input_data_buffer_offset,
629            self.layout.sandbox_memory_config.get_input_data_size(),
630            buffer,
631        )
632    }
633
634    /// Reads a function call result from memory
635    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
636    pub(crate) fn get_guest_function_call_result(&mut self) -> Result<ReturnValue> {
637        self.shared_mem.try_pop_buffer_into::<ReturnValue>(
638            self.layout.output_data_buffer_offset,
639            self.layout.sandbox_memory_config.get_output_data_size(),
640        )
641    }
642
643    /// Read guest log data from the `SharedMemory` contained within `self`
644    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
645    pub(crate) fn read_guest_log_data(&mut self) -> Result<GuestLogData> {
646        self.shared_mem.try_pop_buffer_into::<GuestLogData>(
647            self.layout.output_data_buffer_offset,
648            self.layout.sandbox_memory_config.get_output_data_size(),
649        )
650    }
651
652    /// Get the length of the host exception
653    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
654    fn get_host_error_length(&self) -> Result<i32> {
655        let offset = self.layout.get_host_exception_offset();
656        // The host exception field is expected to contain a 32-bit length followed by the exception data.
657        self.shared_mem.read::<i32>(offset)
658    }
659
660    /// Get a bool indicating if there is a host error
661    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
662    fn has_host_error(&self) -> Result<bool> {
663        let offset = self.layout.get_host_exception_offset();
664        // The host exception field is expected to contain a 32-bit length followed by the exception data.
665        let len = self.shared_mem.read::<i32>(offset)?;
666        Ok(len != 0)
667    }
668
669    /// Get the error data that was written by the Hyperlight Host
670    /// Returns a `Result` containing 'Unit' or an error.Error
671    /// Writes the exception data to the buffer at `exception_data_ptr`.
672    ///
673    /// TODO: have this function return a Vec<u8> instead of requiring
674    /// the user pass in a slice of the same length as returned by
675    /// self.get_host_error_length()
676    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
677    fn get_host_error_data(&self, exception_data_slc: &mut [u8]) -> Result<()> {
678        let offset = self.layout.get_host_exception_offset();
679        let len = self.get_host_error_length()?;
680
681        let exception_data_slc_len = exception_data_slc.len();
682        if exception_data_slc_len != len as usize {
683            log_then_return!(ExceptionDataLengthIncorrect(len, exception_data_slc_len));
684        }
685        // The host exception field is expected to contain a 32-bit length followed by the exception data.
686        self.shared_mem
687            .copy_to_slice(exception_data_slc, offset + size_of::<i32>())?;
688        Ok(())
689    }
690
691    /// Look for a `HyperlightError` generated by the host, and return
692    /// an `Ok(Some(the_error))` if we succeeded in looking for one, and
693    /// it was found. Return `Ok(None)` if we succeeded in looking for
694    /// one and it wasn't found. Return an `Err` if we did not succeed
695    /// in looking for one.
696    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
697    pub(crate) fn get_host_error(&self) -> Result<Option<HyperlightHostError>> {
698        if self.has_host_error()? {
699            let host_err_len = {
700                let len_i32 = self.get_host_error_length()?;
701                usize::try_from(len_i32)
702            }?;
703            // create a Vec<u8> of length host_err_len.
704            // it's important we set the length, rather than just
705            // the capacity, because self.get_host_error_data ensures
706            // the length of the vec matches the return value of
707            // self.get_host_error_length()
708            let mut host_err_data: Vec<u8> = vec![0; host_err_len];
709            self.get_host_error_data(&mut host_err_data)?;
710            let host_err_json = from_utf8(&host_err_data).map_err(UTF8SliceConversionFailure)?;
711            let host_err: HyperlightHostError =
712                from_str(host_err_json).map_err(JsonConversionFailure)?;
713            Ok(Some(host_err))
714        } else {
715            Ok(None)
716        }
717    }
718
719    /// Get the guest error data
720    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
721    pub(crate) fn get_guest_error(&self) -> Result<GuestError> {
722        // get memory buffer max size
723        let err_buffer_size_offset = self.layout.get_guest_error_buffer_size_offset();
724        let max_err_buffer_size = self.shared_mem.read::<u64>(err_buffer_size_offset)?;
725
726        // get guest error from layout and shared mem
727        let mut guest_error_buffer = vec![b'0'; usize::try_from(max_err_buffer_size)?];
728        let err_msg_offset = self.layout.guest_error_buffer_offset;
729        self.shared_mem
730            .copy_to_slice(guest_error_buffer.as_mut_slice(), err_msg_offset)?;
731        GuestError::try_from(guest_error_buffer.as_slice()).map_err(|e| {
732            new_error!(
733                "get_guest_error: failed to convert buffer to GuestError: {}",
734                e
735            )
736        })
737    }
738
739    /// This function writes an error to guest memory and is intended to be
740    /// used when the host's outb handler code raises an error.
741    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
742    fn write_outb_error(
743        &mut self,
744        guest_error_msg: &[u8],
745        host_exception_data: &[u8],
746    ) -> Result<()> {
747        let message = String::from_utf8(guest_error_msg.to_owned())?;
748        let ge = GuestError::new(ErrorCode::OutbError, message);
749
750        let guest_error_buffer: Vec<u8> = (&ge)
751            .try_into()
752            .map_err(|_| new_error!("write_outb_error: failed to convert GuestError to Vec<u8>"))?;
753
754        let err_buffer_size_offset = self.layout.get_guest_error_buffer_size_offset();
755        let max_err_buffer_size = self.shared_mem.read::<u64>(err_buffer_size_offset)?;
756
757        if guest_error_buffer.len() as u64 > max_err_buffer_size {
758            log_then_return!("The guest error message is too large to fit in the shared memory");
759        }
760        self.shared_mem.copy_from_slice(
761            guest_error_buffer.as_slice(),
762            self.layout.guest_error_buffer_offset,
763        )?;
764
765        let host_exception_offset = self.layout.get_host_exception_offset();
766        let host_exception_size_offset = self.layout.get_host_exception_size_offset();
767        let max_host_exception_size = {
768            let size_u64 = self.shared_mem.read::<u64>(host_exception_size_offset)?;
769            usize::try_from(size_u64)
770        }?;
771
772        // First four bytes of host exception are length
773
774        if host_exception_data.len() > max_host_exception_size - size_of::<i32>() {
775            log_then_return!(ExceptionMessageTooBig(
776                host_exception_data.len(),
777                max_host_exception_size - size_of::<i32>()
778            ));
779        }
780
781        self.shared_mem
782            .write::<i32>(host_exception_offset, host_exception_data.len() as i32)?;
783        self.shared_mem.copy_from_slice(
784            host_exception_data,
785            host_exception_offset + size_of::<i32>(),
786        )?;
787
788        Ok(())
789    }
790
791    /// Read guest panic data from the `SharedMemory` contained within `self`
792    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
793    pub fn read_guest_panic_context_data(&self) -> Result<Vec<u8>> {
794        let offset = self.layout.get_guest_panic_context_buffer_offset();
795        let buffer_size = {
796            let size_u64 = self
797                .shared_mem
798                .read::<u64>(self.layout.get_guest_panic_context_size_offset())?;
799            usize::try_from(size_u64)
800        }?;
801        let mut vec_out = vec![0; buffer_size];
802        self.shared_mem
803            .copy_to_slice(vec_out.as_mut_slice(), offset)?;
804        Ok(vec_out)
805    }
806}
807
808#[cfg(test)]
809mod tests {
810    use hyperlight_testing::rust_guest_as_pathbuf;
811    use serde_json::to_string;
812    #[cfg(all(target_os = "windows", inprocess))]
813    use serial_test::serial;
814
815    use super::SandboxMemoryManager;
816    use crate::error::HyperlightHostError;
817    use crate::mem::exe::ExeInfo;
818    use crate::mem::layout::SandboxMemoryLayout;
819    use crate::mem::ptr::RawPtr;
820    use crate::mem::ptr_offset::Offset;
821    use crate::mem::shared_mem::{ExclusiveSharedMemory, SharedMemory};
822    use crate::sandbox::SandboxConfiguration;
823    use crate::testing::bytes_for_path;
824
825    #[test]
826    fn load_guest_binary_common() {
827        let guests = vec![
828            rust_guest_as_pathbuf("simpleguest"),
829            rust_guest_as_pathbuf("callbackguest"),
830        ];
831        for guest in guests {
832            let guest_bytes = bytes_for_path(guest).unwrap();
833            let exe_info = ExeInfo::from_buf(guest_bytes.as_slice()).unwrap();
834            let stack_size_override = 0x3000;
835            let heap_size_override = 0x10000;
836            let mut cfg = SandboxConfiguration::default();
837            cfg.set_stack_size(stack_size_override);
838            cfg.set_heap_size(heap_size_override);
839            let (layout, shared_mem, _, _) =
840                super::load_guest_binary_common(cfg, &exe_info, |_, _| Ok(RawPtr::from(100)))
841                    .unwrap();
842            assert_eq!(
843                stack_size_override,
844                u64::try_from(layout.stack_size).unwrap()
845            );
846            assert_eq!(heap_size_override, u64::try_from(layout.heap_size).unwrap());
847            assert_eq!(layout.get_memory_size().unwrap(), shared_mem.mem_size());
848        }
849    }
850
851    #[cfg(all(target_os = "windows", inprocess))]
852    #[test]
853    #[serial]
854    fn load_guest_binary_using_load_library() {
855        use hyperlight_testing::rust_guest_as_pathbuf;
856
857        use crate::mem::mgr::SandboxMemoryManager;
858
859        let cfg = SandboxConfiguration::default();
860        let guest_pe_path = rust_guest_as_pathbuf("simpleguest.exe");
861        let guest_pe_bytes = bytes_for_path(guest_pe_path.clone()).unwrap();
862        let mut pe_info = ExeInfo::from_buf(guest_pe_bytes.as_slice()).unwrap();
863        let _ = SandboxMemoryManager::load_guest_binary_using_load_library(
864            cfg,
865            guest_pe_path.to_str().unwrap(),
866            &mut pe_info,
867        )
868        .unwrap();
869
870        let guest_elf_path = rust_guest_as_pathbuf("simpleguest");
871        let guest_elf_bytes = bytes_for_path(guest_elf_path.clone()).unwrap();
872        let mut elf_info = ExeInfo::from_buf(guest_elf_bytes.as_slice()).unwrap();
873
874        let res = SandboxMemoryManager::load_guest_binary_using_load_library(
875            cfg,
876            guest_elf_path.to_str().unwrap(),
877            &mut elf_info,
878        );
879
880        match res {
881            Ok(_) => {
882                panic!("loadlib with elf should fail");
883            }
884            Err(err) => {
885                assert!(err
886                    .to_string()
887                    .contains("LoadLibrary can only be used with PE files"));
888            }
889        }
890    }
891
892    /// Don't write a host error, try to read it back, and verify we
893    /// successfully do the read but get no error back
894    #[test]
895    fn get_host_error_none() {
896        let cfg = SandboxConfiguration::default();
897        let layout = SandboxMemoryLayout::new(cfg, 0x10000, 0x10000, 0x10000).unwrap();
898        let mut eshm = ExclusiveSharedMemory::new(layout.get_memory_size().unwrap()).unwrap();
899        let mem_size = eshm.mem_size();
900        layout
901            .write(
902                &mut eshm,
903                SandboxMemoryLayout::BASE_ADDRESS,
904                mem_size,
905                false,
906            )
907            .unwrap();
908        let emgr = SandboxMemoryManager::new(
909            layout,
910            eshm,
911            false,
912            RawPtr::from(0),
913            Offset::from(0),
914            #[cfg(target_os = "windows")]
915            None,
916        );
917        let (hmgr, _) = emgr.build();
918        assert_eq!(None, hmgr.get_host_error().unwrap());
919    }
920
921    /// write a host error to shared memory, then try to read it back out
922    #[test]
923    fn round_trip_host_error() {
924        let cfg = SandboxConfiguration::default();
925        let layout = SandboxMemoryLayout::new(cfg, 0x10000, 0x10000, 0x10000).unwrap();
926        let mem_size = layout.get_memory_size().unwrap();
927        // write a host error and then try to read it back
928        let mut eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
929        layout
930            .write(
931                &mut eshm,
932                SandboxMemoryLayout::BASE_ADDRESS,
933                mem_size,
934                false,
935            )
936            .unwrap();
937        let emgr = SandboxMemoryManager::new(
938            layout,
939            eshm,
940            false,
941            RawPtr::from(0),
942            Offset::from(0),
943            #[cfg(target_os = "windows")]
944            None,
945        );
946        let (mut hmgr, _) = emgr.build();
947        let err = HyperlightHostError {
948            message: "test message".to_string(),
949            source: "rust test".to_string(),
950        };
951        let err_json_bytes = {
952            let str = to_string(&err).unwrap();
953            str.into_bytes()
954        };
955        let err_json_msg = "test error message".to_string().into_bytes();
956        hmgr.write_outb_error(&err_json_msg, &err_json_bytes)
957            .unwrap();
958
959        let host_err_opt = hmgr
960            .get_host_error()
961            .expect("get_host_err should return an Ok");
962        assert!(host_err_opt.is_some());
963        assert_eq!(err, host_err_opt.unwrap());
964    }
965}