1use core::mem::size_of;
18use std::cmp::Ordering;
19use std::str::from_utf8;
20use std::sync::{Arc, Mutex};
21
22use hyperlight_common::flatbuffer_wrappers::function_call::{
23 validate_guest_function_call_buffer, FunctionCall,
24};
25use hyperlight_common::flatbuffer_wrappers::function_types::ReturnValue;
26use hyperlight_common::flatbuffer_wrappers::guest_error::{ErrorCode, GuestError};
27use hyperlight_common::flatbuffer_wrappers::guest_log_data::GuestLogData;
28use hyperlight_common::flatbuffer_wrappers::host_function_details::HostFunctionDetails;
29use serde_json::from_str;
30use tracing::{instrument, Span};
31
32use super::exe::ExeInfo;
33use super::layout::SandboxMemoryLayout;
34#[cfg(target_os = "windows")]
35use super::loaded_lib::LoadedLib;
36use super::memory_region::{MemoryRegion, MemoryRegionType};
37use super::ptr::{GuestPtr, RawPtr};
38use super::ptr_offset::Offset;
39use super::shared_mem::{ExclusiveSharedMemory, GuestSharedMemory, HostSharedMemory, SharedMemory};
40use super::shared_mem_snapshot::SharedMemorySnapshot;
41use crate::error::HyperlightError::{
42 ExceptionDataLengthIncorrect, ExceptionMessageTooBig, JsonConversionFailure, NoMemorySnapshot,
43 UTF8SliceConversionFailure,
44};
45use crate::error::HyperlightHostError;
46use crate::sandbox::SandboxConfiguration;
47use crate::{log_then_return, new_error, HyperlightError, Result};
48
49const PAGE_PRESENT: u64 = 1; const PAGE_RW: u64 = 1 << 1; const PAGE_USER: u64 = 1 << 2; const PAGE_NX: u64 = 1 << 63; pub(super) const AMOUNT_OF_MEMORY_PER_PT: usize = 0x200000;
62pub(crate) const STACK_COOKIE_LEN: usize = 16;
66
67#[derive(Clone)]
70pub(crate) struct SandboxMemoryManager<S> {
71 pub(crate) shared_mem: S,
73 pub(crate) layout: SandboxMemoryLayout,
75 inprocess: bool,
77 pub(crate) load_addr: RawPtr,
79 pub(crate) entrypoint_offset: Offset,
81 snapshots: Arc<Mutex<Vec<SharedMemorySnapshot>>>,
84 #[cfg(target_os = "windows")]
88 _lib: Option<LoadedLib>,
89}
90
91impl<S> SandboxMemoryManager<S>
92where
93 S: SharedMemory,
94{
95 #[instrument(skip_all, parent = Span::current(), level= "Trace")]
97 fn new(
98 layout: SandboxMemoryLayout,
99 shared_mem: S,
100 inprocess: bool,
101 load_addr: RawPtr,
102 entrypoint_offset: Offset,
103 #[cfg(target_os = "windows")] lib: Option<LoadedLib>,
104 ) -> Self {
105 Self {
106 layout,
107 shared_mem,
108 inprocess,
109 load_addr,
110 entrypoint_offset,
111 snapshots: Arc::new(Mutex::new(Vec::new())),
112 #[cfg(target_os = "windows")]
113 _lib: lib,
114 }
115 }
116
117 #[instrument(skip_all, parent = Span::current(), level= "Trace")]
118 pub(crate) fn is_in_process(&self) -> bool {
119 self.inprocess
120 }
121
122 pub(crate) fn get_shared_mem_mut(&mut self) -> &mut S {
124 &mut self.shared_mem
125 }
126
127 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
132 pub(crate) fn set_up_shared_memory(
133 &mut self,
134 mem_size: u64,
135 regions: &mut [MemoryRegion],
136 ) -> Result<u64> {
137 let rsp: u64 = self.layout.get_top_of_user_stack_offset() as u64
151 + SandboxMemoryLayout::BASE_ADDRESS as u64
152 + self.layout.stack_size as u64
153 - 0x28;
154
155 self.shared_mem.with_exclusivity(|shared_mem| {
156 shared_mem.write_u64(
158 SandboxMemoryLayout::PML4_OFFSET,
159 SandboxMemoryLayout::PDPT_GUEST_ADDRESS as u64 | PAGE_PRESENT | PAGE_RW,
160 )?;
161
162 shared_mem.write_u64(
164 SandboxMemoryLayout::PDPT_OFFSET,
165 SandboxMemoryLayout::PD_GUEST_ADDRESS as u64 | PAGE_PRESENT | PAGE_RW,
166 )?;
167
168 for i in 0..512 {
169 let offset = SandboxMemoryLayout::PD_OFFSET + (i * 8);
170 let val_to_write: u64 = (SandboxMemoryLayout::PT_GUEST_ADDRESS as u64
171 + (i * 4096) as u64)
172 | PAGE_PRESENT
173 | PAGE_RW;
174 shared_mem.write_u64(offset, val_to_write)?;
175 }
176
177 let mem_size = usize::try_from(mem_size)?;
183
184 let num_pages: usize =
185 ((mem_size + AMOUNT_OF_MEMORY_PER_PT - 1) / AMOUNT_OF_MEMORY_PER_PT) + 1;
186
187 for p in 0..num_pages {
189 for i in 0..512 {
190 let offset = SandboxMemoryLayout::PT_OFFSET + (p * 4096) + (i * 8);
191 let val_to_write = if p == 0 {
193 (p << 21) as u64 | (i << 12) as u64
194 } else {
195 let flags = match Self::get_page_flags(p, i, regions) {
196 Ok(region_type) => match region_type {
197 MemoryRegionType::Code => PAGE_PRESENT | PAGE_RW | PAGE_USER,
200 MemoryRegionType::Stack => {
201 PAGE_PRESENT | PAGE_RW | PAGE_USER | PAGE_NX
202 }
203 #[cfg(feature = "executable_heap")]
204 MemoryRegionType::Heap => PAGE_PRESENT | PAGE_RW | PAGE_USER,
205 #[cfg(not(feature = "executable_heap"))]
206 MemoryRegionType::Heap => {
207 PAGE_PRESENT | PAGE_RW | PAGE_USER | PAGE_NX
208 }
209 MemoryRegionType::GuardPage => {
212 PAGE_PRESENT | PAGE_RW | PAGE_USER | PAGE_NX
213 }
214 MemoryRegionType::InputData => PAGE_PRESENT | PAGE_RW | PAGE_NX,
215 MemoryRegionType::OutputData => PAGE_PRESENT | PAGE_RW | PAGE_NX,
216 MemoryRegionType::Peb => PAGE_PRESENT | PAGE_RW | PAGE_NX,
217 MemoryRegionType::HostFunctionDefinitions => PAGE_PRESENT | PAGE_NX,
219 MemoryRegionType::PanicContext => PAGE_PRESENT | PAGE_RW | PAGE_NX,
220 MemoryRegionType::GuestErrorData => {
221 PAGE_PRESENT | PAGE_RW | PAGE_NX
222 }
223 MemoryRegionType::HostExceptionData => PAGE_PRESENT | PAGE_NX,
225 MemoryRegionType::PageTables => PAGE_PRESENT | PAGE_RW | PAGE_NX,
226 MemoryRegionType::KernelStack => PAGE_PRESENT | PAGE_RW | PAGE_NX,
227 MemoryRegionType::BootStack => PAGE_PRESENT | PAGE_RW | PAGE_NX,
228 },
229 Err(_) => 0,
231 };
232 ((p << 21) as u64 | (i << 12) as u64) | flags
233 };
234 shared_mem.write_u64(offset, val_to_write)?;
235 }
236 }
237 Ok::<(), HyperlightError>(())
238 })??;
239
240 Ok(rsp)
241 }
242
243 fn get_page_flags(
244 p: usize,
245 i: usize,
246 regions: &mut [MemoryRegion],
247 ) -> Result<MemoryRegionType> {
248 let addr = (p << 21) + (i << 12);
249
250 let idx = regions.binary_search_by(|region| {
251 if region.guest_region.contains(&addr) {
252 std::cmp::Ordering::Equal
253 } else if region.guest_region.start > addr {
254 std::cmp::Ordering::Greater
255 } else {
256 std::cmp::Ordering::Less
257 }
258 });
259
260 match idx {
261 Ok(index) => Ok(regions[index].region_type),
262 Err(_) => Err(new_error!("Could not find region for address: {}", addr)),
263 }
264 }
265
266 #[cfg(inprocess)]
274 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
275 pub(crate) fn get_in_process_peb_address(&self, start_addr: u64) -> Result<u64> {
276 Ok(start_addr + self.layout.get_in_process_peb_offset() as u64)
277 }
278
279 pub(crate) fn push_state(&mut self) -> Result<()> {
282 let snapshot = SharedMemorySnapshot::new(&mut self.shared_mem)?;
283 self.snapshots
284 .try_lock()
285 .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?
286 .push(snapshot);
287 Ok(())
288 }
289
290 pub(crate) fn restore_state_from_last_snapshot(&mut self) -> Result<()> {
295 let mut snapshots = self
296 .snapshots
297 .try_lock()
298 .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?;
299 let last = snapshots.last_mut();
300 if last.is_none() {
301 log_then_return!(NoMemorySnapshot);
302 }
303 let snapshot = last.unwrap();
304 snapshot.restore_from_snapshot(&mut self.shared_mem)
305 }
306
307 pub(crate) fn pop_and_restore_state_from_snapshot(&mut self) -> Result<()> {
311 let last = self
312 .snapshots
313 .try_lock()
314 .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?
315 .pop();
316 if last.is_none() {
317 log_then_return!(NoMemorySnapshot);
318 }
319 self.restore_state_from_last_snapshot()
320 }
321
322 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
326 pub(crate) fn set_outb_address_and_context(&mut self, addr: u64, context: u64) -> Result<()> {
327 let pointer_offset = self.layout.get_outb_pointer_offset();
328 let context_offset = self.layout.get_outb_context_offset();
329 self.shared_mem.with_exclusivity(|excl| -> Result<()> {
330 excl.write_u64(pointer_offset, addr)?;
331 excl.write_u64(context_offset, context)?;
332 Ok(())
333 })?
334 }
335}
336
337#[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
344fn load_guest_binary_common<F>(
345 cfg: SandboxConfiguration,
346 exe_info: &ExeInfo,
347 load_addr_fn: F,
348) -> Result<(SandboxMemoryLayout, ExclusiveSharedMemory, RawPtr, Offset)>
349where
350 F: FnOnce(&ExclusiveSharedMemory, &SandboxMemoryLayout) -> Result<RawPtr>,
351{
352 let layout = SandboxMemoryLayout::new(
353 cfg,
354 exe_info.loaded_size(),
355 usize::try_from(cfg.get_stack_size(exe_info))?,
356 usize::try_from(cfg.get_heap_size(exe_info))?,
357 )?;
358 let mut shared_mem = ExclusiveSharedMemory::new(layout.get_memory_size()?)?;
359
360 let load_addr: RawPtr = load_addr_fn(&shared_mem, &layout)?;
361
362 let entrypoint_offset = exe_info.entrypoint();
363
364 let offset = layout.get_code_pointer_offset();
365
366 {
367 let load_addr_u64: u64 = load_addr.clone().into();
369 shared_mem.write_u64(offset, load_addr_u64)?;
370 }
371 Ok((layout, shared_mem, load_addr, entrypoint_offset))
372}
373
374impl SandboxMemoryManager<ExclusiveSharedMemory> {
375 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
392 pub(crate) fn load_guest_binary_into_memory(
393 cfg: SandboxConfiguration,
394 exe_info: &mut ExeInfo,
395 inprocess: bool,
396 ) -> Result<Self> {
397 let (layout, mut shared_mem, load_addr, entrypoint_offset) = load_guest_binary_common(
398 cfg,
399 exe_info,
400 |shared_mem: &ExclusiveSharedMemory, layout: &SandboxMemoryLayout| {
401 let addr_usize = if inprocess {
402 shared_mem.make_memory_executable()?;
409 shared_mem.base_addr() + layout.get_guest_code_offset()
410 } else {
411 layout.get_guest_code_address()
415 };
416 RawPtr::try_from(addr_usize)
417 },
418 )?;
419
420 exe_info.load(
421 load_addr.clone().try_into()?,
422 &mut shared_mem.as_mut_slice()[layout.get_guest_code_offset()..],
423 )?;
424
425 Ok(Self::new(
426 layout,
427 shared_mem,
428 inprocess,
429 load_addr,
430 entrypoint_offset,
431 #[cfg(target_os = "windows")]
432 None,
433 ))
434 }
435
436 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
441 pub(crate) fn load_guest_binary_using_load_library(
442 cfg: SandboxConfiguration,
443 guest_bin_path: &str,
444 exe_info: &mut ExeInfo,
445 ) -> Result<Self> {
446 #[cfg(target_os = "windows")]
447 {
448 if !matches!(exe_info, ExeInfo::PE(_)) {
449 log_then_return!("LoadLibrary can only be used with PE files");
450 }
451
452 let lib = LoadedLib::load(guest_bin_path)?;
453 let (layout, shared_mem, load_addr, entrypoint_offset) =
454 load_guest_binary_common(cfg, exe_info, |_, _| Ok(lib.base_addr()))?;
455
456 shared_mem.make_memory_executable()?;
458
459 Ok(Self::new(
460 layout,
461 shared_mem,
462 true,
463 load_addr,
464 entrypoint_offset,
465 Some(lib),
466 ))
467 }
468 #[cfg(target_os = "linux")]
469 {
470 let _ = (cfg, guest_bin_path, exe_info);
471 log_then_return!("load_guest_binary_using_load_library is only available on Windows");
472 }
473 }
474
475 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
477 pub(crate) fn write_buffer_host_function_details(&mut self, buffer: &[u8]) -> Result<()> {
478 let host_function_details = HostFunctionDetails::try_from(buffer).map_err(|e| {
479 new_error!(
480 "write_buffer_host_function_details: failed to convert buffer to HostFunctionDetails: {}",
481 e
482 )
483 })?;
484
485 let host_function_call_buffer: Vec<u8> = (&host_function_details).try_into().map_err(|_| {
486 new_error!(
487 "write_buffer_host_function_details: failed to convert HostFunctionDetails to Vec<u8>"
488 )
489 })?;
490
491 let buffer_size = {
492 let size_u64 = self
493 .shared_mem
494 .read_u64(self.layout.get_host_function_definitions_size_offset())?;
495 usize::try_from(size_u64)
496 }?;
497
498 if host_function_call_buffer.len() > buffer_size {
499 log_then_return!(
500 "Host Function Details buffer is too big for the host_function_definitions buffer"
501 );
502 }
503
504 self.shared_mem.copy_from_slice(
505 host_function_call_buffer.as_slice(),
506 self.layout.host_function_definitions_buffer_offset,
507 )?;
508 Ok(())
509 }
510
511 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
514 pub(crate) fn set_stack_guard(&mut self, cookie: &[u8; STACK_COOKIE_LEN]) -> Result<()> {
515 let stack_offset = self.layout.get_top_of_user_stack_offset();
516 self.shared_mem.copy_from_slice(cookie, stack_offset)
517 }
518
519 pub fn build(
521 self,
522 ) -> (
523 SandboxMemoryManager<HostSharedMemory>,
524 SandboxMemoryManager<GuestSharedMemory>,
525 ) {
526 let (hshm, gshm) = self.shared_mem.build();
527 (
528 SandboxMemoryManager {
529 shared_mem: hshm,
530 layout: self.layout,
531 inprocess: self.inprocess,
532 load_addr: self.load_addr.clone(),
533 entrypoint_offset: self.entrypoint_offset,
534 snapshots: Arc::new(Mutex::new(Vec::new())),
535 #[cfg(target_os = "windows")]
536 _lib: self._lib,
537 },
538 SandboxMemoryManager {
539 shared_mem: gshm,
540 layout: self.layout,
541 inprocess: self.inprocess,
542 load_addr: self.load_addr.clone(),
543 entrypoint_offset: self.entrypoint_offset,
544 snapshots: Arc::new(Mutex::new(Vec::new())),
545 #[cfg(target_os = "windows")]
546 _lib: None,
547 },
548 )
549 }
550}
551
552impl SandboxMemoryManager<HostSharedMemory> {
553 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
566 pub(crate) fn check_stack_guard(&self, cookie: [u8; STACK_COOKIE_LEN]) -> Result<bool> {
567 let offset = self.layout.get_top_of_user_stack_offset();
568 let test_cookie: [u8; STACK_COOKIE_LEN] = self.shared_mem.read(offset)?;
569 let cmp_res = cookie.iter().cmp(test_cookie.iter());
570 Ok(cmp_res == Ordering::Equal)
571 }
572
573 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
575 pub(crate) fn get_pointer_to_dispatch_function(&self) -> Result<u64> {
576 let guest_dispatch_function_ptr = self
577 .shared_mem
578 .read::<u64>(self.layout.get_dispatch_function_pointer_offset())?;
579
580 let guest_ptr = GuestPtr::try_from(RawPtr::from(guest_dispatch_function_ptr))?;
590 guest_ptr.absolute()
591 }
592
593 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
595 pub(crate) fn get_host_function_call(&mut self) -> Result<FunctionCall> {
596 self.shared_mem.try_pop_buffer_into::<FunctionCall>(
597 self.layout.output_data_buffer_offset,
598 self.layout.sandbox_memory_config.get_output_data_size(),
599 )
600 }
601
602 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
604 pub(crate) fn write_response_from_host_method_call(&mut self, res: &ReturnValue) -> Result<()> {
605 let function_call_ret_val_buffer = Vec::<u8>::try_from(res).map_err(|_| {
606 new_error!(
607 "write_response_from_host_method_call: failed to convert ReturnValue to Vec<u8>"
608 )
609 })?;
610 self.shared_mem.push_buffer(
611 self.layout.input_data_buffer_offset,
612 self.layout.sandbox_memory_config.get_input_data_size(),
613 function_call_ret_val_buffer.as_slice(),
614 )
615 }
616
617 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
619 pub(crate) fn write_guest_function_call(&mut self, buffer: &[u8]) -> Result<()> {
620 validate_guest_function_call_buffer(buffer).map_err(|e| {
621 new_error!(
622 "Guest function call buffer validation failed: {}",
623 e.to_string()
624 )
625 })?;
626
627 self.shared_mem.push_buffer(
628 self.layout.input_data_buffer_offset,
629 self.layout.sandbox_memory_config.get_input_data_size(),
630 buffer,
631 )
632 }
633
634 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
636 pub(crate) fn get_guest_function_call_result(&mut self) -> Result<ReturnValue> {
637 self.shared_mem.try_pop_buffer_into::<ReturnValue>(
638 self.layout.output_data_buffer_offset,
639 self.layout.sandbox_memory_config.get_output_data_size(),
640 )
641 }
642
643 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
645 pub(crate) fn read_guest_log_data(&mut self) -> Result<GuestLogData> {
646 self.shared_mem.try_pop_buffer_into::<GuestLogData>(
647 self.layout.output_data_buffer_offset,
648 self.layout.sandbox_memory_config.get_output_data_size(),
649 )
650 }
651
652 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
654 fn get_host_error_length(&self) -> Result<i32> {
655 let offset = self.layout.get_host_exception_offset();
656 self.shared_mem.read::<i32>(offset)
658 }
659
660 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
662 fn has_host_error(&self) -> Result<bool> {
663 let offset = self.layout.get_host_exception_offset();
664 let len = self.shared_mem.read::<i32>(offset)?;
666 Ok(len != 0)
667 }
668
669 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
677 fn get_host_error_data(&self, exception_data_slc: &mut [u8]) -> Result<()> {
678 let offset = self.layout.get_host_exception_offset();
679 let len = self.get_host_error_length()?;
680
681 let exception_data_slc_len = exception_data_slc.len();
682 if exception_data_slc_len != len as usize {
683 log_then_return!(ExceptionDataLengthIncorrect(len, exception_data_slc_len));
684 }
685 self.shared_mem
687 .copy_to_slice(exception_data_slc, offset + size_of::<i32>())?;
688 Ok(())
689 }
690
691 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
697 pub(crate) fn get_host_error(&self) -> Result<Option<HyperlightHostError>> {
698 if self.has_host_error()? {
699 let host_err_len = {
700 let len_i32 = self.get_host_error_length()?;
701 usize::try_from(len_i32)
702 }?;
703 let mut host_err_data: Vec<u8> = vec![0; host_err_len];
709 self.get_host_error_data(&mut host_err_data)?;
710 let host_err_json = from_utf8(&host_err_data).map_err(UTF8SliceConversionFailure)?;
711 let host_err: HyperlightHostError =
712 from_str(host_err_json).map_err(JsonConversionFailure)?;
713 Ok(Some(host_err))
714 } else {
715 Ok(None)
716 }
717 }
718
719 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
721 pub(crate) fn get_guest_error(&self) -> Result<GuestError> {
722 let err_buffer_size_offset = self.layout.get_guest_error_buffer_size_offset();
724 let max_err_buffer_size = self.shared_mem.read::<u64>(err_buffer_size_offset)?;
725
726 let mut guest_error_buffer = vec![b'0'; usize::try_from(max_err_buffer_size)?];
728 let err_msg_offset = self.layout.guest_error_buffer_offset;
729 self.shared_mem
730 .copy_to_slice(guest_error_buffer.as_mut_slice(), err_msg_offset)?;
731 GuestError::try_from(guest_error_buffer.as_slice()).map_err(|e| {
732 new_error!(
733 "get_guest_error: failed to convert buffer to GuestError: {}",
734 e
735 )
736 })
737 }
738
739 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
742 fn write_outb_error(
743 &mut self,
744 guest_error_msg: &[u8],
745 host_exception_data: &[u8],
746 ) -> Result<()> {
747 let message = String::from_utf8(guest_error_msg.to_owned())?;
748 let ge = GuestError::new(ErrorCode::OutbError, message);
749
750 let guest_error_buffer: Vec<u8> = (&ge)
751 .try_into()
752 .map_err(|_| new_error!("write_outb_error: failed to convert GuestError to Vec<u8>"))?;
753
754 let err_buffer_size_offset = self.layout.get_guest_error_buffer_size_offset();
755 let max_err_buffer_size = self.shared_mem.read::<u64>(err_buffer_size_offset)?;
756
757 if guest_error_buffer.len() as u64 > max_err_buffer_size {
758 log_then_return!("The guest error message is too large to fit in the shared memory");
759 }
760 self.shared_mem.copy_from_slice(
761 guest_error_buffer.as_slice(),
762 self.layout.guest_error_buffer_offset,
763 )?;
764
765 let host_exception_offset = self.layout.get_host_exception_offset();
766 let host_exception_size_offset = self.layout.get_host_exception_size_offset();
767 let max_host_exception_size = {
768 let size_u64 = self.shared_mem.read::<u64>(host_exception_size_offset)?;
769 usize::try_from(size_u64)
770 }?;
771
772 if host_exception_data.len() > max_host_exception_size - size_of::<i32>() {
775 log_then_return!(ExceptionMessageTooBig(
776 host_exception_data.len(),
777 max_host_exception_size - size_of::<i32>()
778 ));
779 }
780
781 self.shared_mem
782 .write::<i32>(host_exception_offset, host_exception_data.len() as i32)?;
783 self.shared_mem.copy_from_slice(
784 host_exception_data,
785 host_exception_offset + size_of::<i32>(),
786 )?;
787
788 Ok(())
789 }
790
791 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
793 pub fn read_guest_panic_context_data(&self) -> Result<Vec<u8>> {
794 let offset = self.layout.get_guest_panic_context_buffer_offset();
795 let buffer_size = {
796 let size_u64 = self
797 .shared_mem
798 .read::<u64>(self.layout.get_guest_panic_context_size_offset())?;
799 usize::try_from(size_u64)
800 }?;
801 let mut vec_out = vec![0; buffer_size];
802 self.shared_mem
803 .copy_to_slice(vec_out.as_mut_slice(), offset)?;
804 Ok(vec_out)
805 }
806}
807
808#[cfg(test)]
809mod tests {
810 use hyperlight_testing::rust_guest_as_pathbuf;
811 use serde_json::to_string;
812 #[cfg(all(target_os = "windows", inprocess))]
813 use serial_test::serial;
814
815 use super::SandboxMemoryManager;
816 use crate::error::HyperlightHostError;
817 use crate::mem::exe::ExeInfo;
818 use crate::mem::layout::SandboxMemoryLayout;
819 use crate::mem::ptr::RawPtr;
820 use crate::mem::ptr_offset::Offset;
821 use crate::mem::shared_mem::{ExclusiveSharedMemory, SharedMemory};
822 use crate::sandbox::SandboxConfiguration;
823 use crate::testing::bytes_for_path;
824
825 #[test]
826 fn load_guest_binary_common() {
827 let guests = vec![
828 rust_guest_as_pathbuf("simpleguest"),
829 rust_guest_as_pathbuf("callbackguest"),
830 ];
831 for guest in guests {
832 let guest_bytes = bytes_for_path(guest).unwrap();
833 let exe_info = ExeInfo::from_buf(guest_bytes.as_slice()).unwrap();
834 let stack_size_override = 0x3000;
835 let heap_size_override = 0x10000;
836 let mut cfg = SandboxConfiguration::default();
837 cfg.set_stack_size(stack_size_override);
838 cfg.set_heap_size(heap_size_override);
839 let (layout, shared_mem, _, _) =
840 super::load_guest_binary_common(cfg, &exe_info, |_, _| Ok(RawPtr::from(100)))
841 .unwrap();
842 assert_eq!(
843 stack_size_override,
844 u64::try_from(layout.stack_size).unwrap()
845 );
846 assert_eq!(heap_size_override, u64::try_from(layout.heap_size).unwrap());
847 assert_eq!(layout.get_memory_size().unwrap(), shared_mem.mem_size());
848 }
849 }
850
851 #[cfg(all(target_os = "windows", inprocess))]
852 #[test]
853 #[serial]
854 fn load_guest_binary_using_load_library() {
855 use hyperlight_testing::rust_guest_as_pathbuf;
856
857 use crate::mem::mgr::SandboxMemoryManager;
858
859 let cfg = SandboxConfiguration::default();
860 let guest_pe_path = rust_guest_as_pathbuf("simpleguest.exe");
861 let guest_pe_bytes = bytes_for_path(guest_pe_path.clone()).unwrap();
862 let mut pe_info = ExeInfo::from_buf(guest_pe_bytes.as_slice()).unwrap();
863 let _ = SandboxMemoryManager::load_guest_binary_using_load_library(
864 cfg,
865 guest_pe_path.to_str().unwrap(),
866 &mut pe_info,
867 )
868 .unwrap();
869
870 let guest_elf_path = rust_guest_as_pathbuf("simpleguest");
871 let guest_elf_bytes = bytes_for_path(guest_elf_path.clone()).unwrap();
872 let mut elf_info = ExeInfo::from_buf(guest_elf_bytes.as_slice()).unwrap();
873
874 let res = SandboxMemoryManager::load_guest_binary_using_load_library(
875 cfg,
876 guest_elf_path.to_str().unwrap(),
877 &mut elf_info,
878 );
879
880 match res {
881 Ok(_) => {
882 panic!("loadlib with elf should fail");
883 }
884 Err(err) => {
885 assert!(err
886 .to_string()
887 .contains("LoadLibrary can only be used with PE files"));
888 }
889 }
890 }
891
892 #[test]
895 fn get_host_error_none() {
896 let cfg = SandboxConfiguration::default();
897 let layout = SandboxMemoryLayout::new(cfg, 0x10000, 0x10000, 0x10000).unwrap();
898 let mut eshm = ExclusiveSharedMemory::new(layout.get_memory_size().unwrap()).unwrap();
899 let mem_size = eshm.mem_size();
900 layout
901 .write(
902 &mut eshm,
903 SandboxMemoryLayout::BASE_ADDRESS,
904 mem_size,
905 false,
906 )
907 .unwrap();
908 let emgr = SandboxMemoryManager::new(
909 layout,
910 eshm,
911 false,
912 RawPtr::from(0),
913 Offset::from(0),
914 #[cfg(target_os = "windows")]
915 None,
916 );
917 let (hmgr, _) = emgr.build();
918 assert_eq!(None, hmgr.get_host_error().unwrap());
919 }
920
921 #[test]
923 fn round_trip_host_error() {
924 let cfg = SandboxConfiguration::default();
925 let layout = SandboxMemoryLayout::new(cfg, 0x10000, 0x10000, 0x10000).unwrap();
926 let mem_size = layout.get_memory_size().unwrap();
927 let mut eshm = ExclusiveSharedMemory::new(mem_size).unwrap();
929 layout
930 .write(
931 &mut eshm,
932 SandboxMemoryLayout::BASE_ADDRESS,
933 mem_size,
934 false,
935 )
936 .unwrap();
937 let emgr = SandboxMemoryManager::new(
938 layout,
939 eshm,
940 false,
941 RawPtr::from(0),
942 Offset::from(0),
943 #[cfg(target_os = "windows")]
944 None,
945 );
946 let (mut hmgr, _) = emgr.build();
947 let err = HyperlightHostError {
948 message: "test message".to_string(),
949 source: "rust test".to_string(),
950 };
951 let err_json_bytes = {
952 let str = to_string(&err).unwrap();
953 str.into_bytes()
954 };
955 let err_json_msg = "test error message".to_string().into_bytes();
956 hmgr.write_outb_error(&err_json_msg, &err_json_bytes)
957 .unwrap();
958
959 let host_err_opt = hmgr
960 .get_host_error()
961 .expect("get_host_err should return an Ok");
962 assert!(host_err_opt.is_some());
963 assert_eq!(err, host_err_opt.unwrap());
964 }
965}