1use std::collections::HashSet;
18use std::path::Path;
19use std::sync::atomic::Ordering;
20use std::sync::{Arc, Mutex};
21
22use flatbuffers::FlatBufferBuilder;
23use hyperlight_common::flatbuffer_wrappers::function_call::{FunctionCall, FunctionCallType};
24use hyperlight_common::flatbuffer_wrappers::function_types::{
25 ParameterValue, ReturnType, ReturnValue,
26};
27use hyperlight_common::flatbuffer_wrappers::util::estimate_flatbuffer_capacity;
28use tracing::{Span, instrument};
29
30use super::Callable;
31use super::file_mapping::prepare_file_cow;
32use super::host_funcs::FunctionRegistry;
33use super::snapshot::Snapshot;
34use crate::HyperlightError::{self, SnapshotSandboxMismatch};
35use crate::Result;
36use crate::func::{ParameterTuple, SupportedReturnType};
37use crate::hypervisor::InterruptHandle;
38use crate::hypervisor::hyperlight_vm::{HyperlightVm, HyperlightVmError};
39#[cfg(target_os = "linux")]
40use crate::log_then_return;
41use crate::mem::memory_region::MemoryRegion;
42#[cfg(target_os = "linux")]
43use crate::mem::memory_region::MemoryRegionFlags;
44use crate::mem::mgr::SandboxMemoryManager;
45use crate::mem::shared_mem::{HostSharedMemory, SharedMemory as _};
46use crate::metrics::{
47 METRIC_GUEST_ERROR, METRIC_GUEST_ERROR_LABEL_CODE, maybe_time_and_emit_guest_call,
48};
49
50pub struct MultiUseSandbox {
89 id: u64,
91 poisoned: bool,
93 pub(super) host_funcs: Arc<Mutex<FunctionRegistry>>,
94 pub(crate) mem_mgr: SandboxMemoryManager<HostSharedMemory>,
95 vm: HyperlightVm,
96 #[cfg(gdb)]
97 dbg_mem_access_fn: Arc<Mutex<SandboxMemoryManager<HostSharedMemory>>>,
98 snapshot: Option<Arc<Snapshot>>,
101}
102
103impl MultiUseSandbox {
104 #[instrument(skip_all, parent = Span::current(), level = "Trace")]
110 pub(super) fn from_uninit(
111 host_funcs: Arc<Mutex<FunctionRegistry>>,
112 mgr: SandboxMemoryManager<HostSharedMemory>,
113 vm: HyperlightVm,
114 #[cfg(gdb)] dbg_mem_access_fn: Arc<Mutex<SandboxMemoryManager<HostSharedMemory>>>,
115 ) -> MultiUseSandbox {
116 Self {
117 id: super::snapshot::SANDBOX_CONFIGURATION_COUNTER.fetch_add(1, Ordering::Relaxed),
118 poisoned: false,
119 host_funcs,
120 mem_mgr: mgr,
121 vm,
122 #[cfg(gdb)]
123 dbg_mem_access_fn,
124 snapshot: None,
125 }
126 }
127
128 #[instrument(err(Debug), skip_all, parent = Span::current())]
157 pub fn snapshot(&mut self) -> Result<Arc<Snapshot>> {
158 if self.poisoned {
159 return Err(crate::HyperlightError::PoisonedSandbox);
160 }
161
162 if let Some(snapshot) = &self.snapshot {
163 return Ok(snapshot.clone());
164 }
165 let mapped_regions_iter = self.vm.get_mapped_regions();
166 let mapped_regions_vec: Vec<MemoryRegion> = mapped_regions_iter.cloned().collect();
167 let root_pt_gpa = self
168 .vm
169 .get_root_pt()
170 .map_err(|e| HyperlightError::HyperlightVmError(e.into()))?;
171 let stack_top_gpa = self.vm.get_stack_top();
172 let sregs = self
173 .vm
174 .get_snapshot_sregs()
175 .map_err(|e| HyperlightError::HyperlightVmError(e.into()))?;
176 let entrypoint = self.vm.get_entrypoint();
177 let memory_snapshot = self.mem_mgr.snapshot(
178 self.id,
179 mapped_regions_vec,
180 root_pt_gpa,
181 stack_top_gpa,
182 sregs,
183 entrypoint,
184 )?;
185 let snapshot = Arc::new(memory_snapshot);
186 self.snapshot = Some(snapshot.clone());
187 Ok(snapshot)
188 }
189
190 #[instrument(err(Debug), skip_all, parent = Span::current())]
266 pub fn restore(&mut self, snapshot: Arc<Snapshot>) -> Result<()> {
267 if self.id != snapshot.sandbox_id() {
291 return Err(SnapshotSandboxMismatch);
292 }
293
294 let (gsnapshot, gscratch) = self.mem_mgr.restore_snapshot(&snapshot)?;
295 if let Some(gsnapshot) = gsnapshot {
296 self.vm
297 .update_snapshot_mapping(gsnapshot)
298 .map_err(|e| HyperlightError::HyperlightVmError(e.into()))?;
299 }
300 if let Some(gscratch) = gscratch {
301 self.vm
302 .update_scratch_mapping(gscratch)
303 .map_err(|e| HyperlightError::HyperlightVmError(e.into()))?;
304 }
305
306 let sregs = snapshot.sregs().ok_or_else(|| {
307 HyperlightError::Error("snapshot from running sandbox should have sregs".to_string())
308 })?;
309 self.vm
312 .reset_vcpu(snapshot.root_pt_gpa(), sregs)
313 .map_err(|e| {
314 self.poisoned = true;
315 HyperlightVmError::Restore(e)
316 })?;
317
318 self.vm.set_stack_top(snapshot.stack_top_gva());
319 self.vm.set_entrypoint(snapshot.entrypoint());
320
321 let current_regions: HashSet<_> = self.vm.get_mapped_regions().cloned().collect();
322 let snapshot_regions: HashSet<_> = snapshot.regions().iter().cloned().collect();
323
324 let regions_to_unmap = current_regions.difference(&snapshot_regions);
325 let regions_to_map = snapshot_regions.difference(¤t_regions);
326
327 for region in regions_to_unmap {
328 self.vm
329 .unmap_region(region)
330 .map_err(HyperlightVmError::UnmapRegion)?;
331 }
332
333 for region in regions_to_map {
334 unsafe { self.vm.map_region(region) }.map_err(HyperlightVmError::MapRegion)?;
337 }
338
339 self.snapshot = Some(snapshot.clone());
341
342 self.poisoned = false;
352
353 Ok(())
354 }
355
356 #[doc(hidden)]
393 #[deprecated(
394 since = "0.8.0",
395 note = "Deprecated in favour of call and snapshot/restore."
396 )]
397 #[instrument(err(Debug), skip(self, args), parent = Span::current())]
398 pub fn call_guest_function_by_name<Output: SupportedReturnType>(
399 &mut self,
400 func_name: &str,
401 args: impl ParameterTuple,
402 ) -> Result<Output> {
403 if self.poisoned {
404 return Err(crate::HyperlightError::PoisonedSandbox);
405 }
406 let snapshot = self.snapshot()?;
407 let res = self.call(func_name, args);
408 self.restore(snapshot)?;
409 res
410 }
411
412 #[instrument(err(Debug), skip(self, args), parent = Span::current())]
489 pub fn call<Output: SupportedReturnType>(
490 &mut self,
491 func_name: &str,
492 args: impl ParameterTuple,
493 ) -> Result<Output> {
494 if self.poisoned {
495 return Err(crate::HyperlightError::PoisonedSandbox);
496 }
497 self.snapshot = None;
499 maybe_time_and_emit_guest_call(func_name, || {
500 let ret = self.call_guest_function_by_name_no_reset(
501 func_name,
502 Output::TYPE,
503 args.into_value(),
504 );
505 let ret = Output::from_value(ret?)?;
508 Ok(ret)
509 })
510 }
511
512 #[instrument(err(Debug), skip(self, rgn), parent = Span::current())]
528 #[cfg(target_os = "linux")]
529 pub unsafe fn map_region(&mut self, rgn: &MemoryRegion) -> Result<()> {
530 if self.poisoned {
531 return Err(crate::HyperlightError::PoisonedSandbox);
532 }
533 if rgn.flags.contains(MemoryRegionFlags::WRITE) {
534 log_then_return!("TODO: Writable mappings not yet supported");
538 }
539 self.snapshot = None;
541 unsafe { self.vm.map_region(rgn) }.map_err(HyperlightVmError::MapRegion)?;
542 self.mem_mgr.mapped_rgns += 1;
543 Ok(())
544 }
545
546 #[instrument(err(Debug), skip(self, file_path, guest_base, label), parent = Span::current())]
558 pub fn map_file_cow(
559 &mut self,
560 file_path: &Path,
561 guest_base: u64,
562 label: Option<&str>,
563 ) -> Result<u64> {
564 if self.poisoned {
565 return Err(crate::HyperlightError::PoisonedSandbox);
566 }
567
568 let current_count = self
571 .mem_mgr
572 .shared_mem
573 .read::<u64>(self.mem_mgr.layout.get_file_mappings_size_offset())?
574 as usize;
575 if current_count >= hyperlight_common::mem::MAX_FILE_MAPPINGS {
576 return Err(crate::HyperlightError::Error(format!(
577 "map_file_cow: file mapping limit reached ({} of {})",
578 current_count,
579 hyperlight_common::mem::MAX_FILE_MAPPINGS,
580 )));
581 }
582
583 let mut prepared = prepare_file_cow(file_path, guest_base, label)?;
585
586 let shared_size = self.mem_mgr.shared_mem.mem_size() as u64;
589 let base_addr = crate::mem::layout::SandboxMemoryLayout::BASE_ADDRESS as u64;
590 let shared_end = base_addr.checked_add(shared_size).ok_or_else(|| {
591 crate::HyperlightError::Error("shared memory end overflow".to_string())
592 })?;
593 let mapping_end = guest_base
594 .checked_add(prepared.size as u64)
595 .ok_or_else(|| {
596 crate::HyperlightError::Error(format!(
597 "map_file_cow: guest address overflow: {:#x} + {:#x}",
598 guest_base, prepared.size
599 ))
600 })?;
601 if guest_base < shared_end && mapping_end > base_addr {
602 return Err(crate::HyperlightError::Error(format!(
603 "map_file_cow: mapping [{:#x}..{:#x}) overlaps sandbox shared memory [{:#x}..{:#x})",
604 guest_base, mapping_end, base_addr, shared_end,
605 )));
606 }
607
608 let region = prepared.to_memory_region()?;
610
611 for existing_region in self.vm.get_mapped_regions() {
613 let ex_start = existing_region.guest_region.start as u64;
614 let ex_end = existing_region.guest_region.end as u64;
615 if guest_base < ex_end && mapping_end > ex_start {
616 return Err(crate::HyperlightError::Error(format!(
617 "map_file_cow: mapping [{:#x}..{:#x}) overlaps existing mapping [{:#x}..{:#x})",
618 guest_base, mapping_end, ex_start, ex_end,
619 )));
620 }
621 }
622
623 self.snapshot = None;
625
626 unsafe { self.vm.map_region(®ion) }
627 .map_err(HyperlightVmError::MapRegion)
628 .map_err(crate::HyperlightError::HyperlightVmError)?;
629
630 let size = prepared.size as u64;
631
632 prepared.mark_consumed();
640 self.mem_mgr.mapped_rgns += 1;
641
642 self.mem_mgr
647 .write_file_mapping_entry(prepared.guest_base, size, &prepared.label)?;
648
649 Ok(size)
650 }
651
652 #[cfg(feature = "fuzzing")]
661 #[instrument(err(Debug), skip(self, args), parent = Span::current())]
662 pub fn call_type_erased_guest_function_by_name(
663 &mut self,
664 func_name: &str,
665 ret_type: ReturnType,
666 args: Vec<ParameterValue>,
667 ) -> Result<ReturnValue> {
668 if self.poisoned {
669 return Err(crate::HyperlightError::PoisonedSandbox);
670 }
671 self.snapshot = None;
673 maybe_time_and_emit_guest_call(func_name, || {
674 self.call_guest_function_by_name_no_reset(func_name, ret_type, args)
675 })
676 }
677
678 fn call_guest_function_by_name_no_reset(
679 &mut self,
680 function_name: &str,
681 return_type: ReturnType,
682 args: Vec<ParameterValue>,
683 ) -> Result<ReturnValue> {
684 if self.poisoned {
685 return Err(crate::HyperlightError::PoisonedSandbox);
686 }
687 self.vm.clear_cancel();
691
692 let res = (|| {
693 let estimated_capacity = estimate_flatbuffer_capacity(function_name, &args);
694
695 let fc = FunctionCall::new(
696 function_name.to_string(),
697 Some(args),
698 FunctionCallType::Guest,
699 return_type,
700 );
701
702 let mut builder = FlatBufferBuilder::with_capacity(estimated_capacity);
703 let buffer = fc.encode(&mut builder);
704
705 self.mem_mgr.write_guest_function_call(buffer)?;
706
707 let dispatch_res = self.vm.dispatch_call_from_host(
708 &mut self.mem_mgr,
709 &self.host_funcs,
710 #[cfg(gdb)]
711 self.dbg_mem_access_fn.clone(),
712 );
713
714 if let Err(e) = dispatch_res {
717 let (error, should_poison) = e.promote();
718 self.poisoned |= should_poison;
719 return Err(error);
720 }
721
722 let guest_result = self.mem_mgr.get_guest_function_call_result()?.into_inner();
723
724 match guest_result {
725 Ok(val) => Ok(val),
726 Err(guest_error) => {
727 metrics::counter!(
728 METRIC_GUEST_ERROR,
729 METRIC_GUEST_ERROR_LABEL_CODE => (guest_error.code as u64).to_string()
730 )
731 .increment(1);
732
733 Err(HyperlightError::GuestError(
734 guest_error.code,
735 guest_error.message,
736 ))
737 }
738 }
739 })();
740
741 if let Err(e) = &res {
747 self.mem_mgr.clear_io_buffers();
748
749 self.poisoned |= e.is_poison_error();
751 }
752
753 res
756 }
757
758 pub fn interrupt_handle(&self) -> Arc<dyn InterruptHandle> {
788 self.vm.interrupt_handle()
789 }
790
791 #[cfg(crashdump)]
827 #[instrument(err(Debug), skip_all, parent = Span::current())]
828 pub fn generate_crashdump(&mut self) -> Result<()> {
829 crate::hypervisor::crashdump::generate_crashdump(&self.vm, &mut self.mem_mgr, None)
830 }
831
832 #[cfg(crashdump)]
839 #[instrument(err(Debug), skip_all, parent = Span::current())]
840 pub fn generate_crashdump_to_dir(&mut self, dir: impl Into<String>) -> Result<()> {
841 crate::hypervisor::crashdump::generate_crashdump(
842 &self.vm,
843 &mut self.mem_mgr,
844 Some(dir.into()),
845 )
846 }
847
848 pub fn poisoned(&self) -> bool {
885 self.poisoned
886 }
887}
888
889impl Callable for MultiUseSandbox {
890 fn call<Output: SupportedReturnType>(
891 &mut self,
892 func_name: &str,
893 args: impl ParameterTuple,
894 ) -> Result<Output> {
895 if self.poisoned {
896 return Err(crate::HyperlightError::PoisonedSandbox);
897 }
898 self.call(func_name, args)
899 }
900}
901
902impl std::fmt::Debug for MultiUseSandbox {
903 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
904 f.debug_struct("MultiUseSandbox").finish()
905 }
906}
907
908#[cfg(test)]
909mod tests {
910 use std::sync::{Arc, Barrier};
911 use std::thread;
912
913 use hyperlight_common::flatbuffer_wrappers::guest_error::ErrorCode;
914 use hyperlight_testing::sandbox_sizes::{LARGE_HEAP_SIZE, MEDIUM_HEAP_SIZE, SMALL_HEAP_SIZE};
915 use hyperlight_testing::simple_guest_as_string;
916
917 #[cfg(target_os = "linux")]
918 use crate::mem::memory_region::{MemoryRegion, MemoryRegionFlags, MemoryRegionType};
919 #[cfg(target_os = "linux")]
920 use crate::mem::shared_mem::{ExclusiveSharedMemory, GuestSharedMemory, SharedMemory as _};
921 use crate::sandbox::SandboxConfiguration;
922 use crate::{GuestBinary, HyperlightError, MultiUseSandbox, Result, UninitializedSandbox};
923
924 #[test]
925 fn poison() {
926 let mut sbox: MultiUseSandbox = {
927 let path = simple_guest_as_string().unwrap();
928 let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
929 u_sbox.evolve()
930 }
931 .unwrap();
932 let snapshot = sbox.snapshot().unwrap();
933
934 let res = sbox
936 .call::<()>("guest_panic", "hello".to_string())
937 .unwrap_err();
938 assert!(
939 matches!(res, HyperlightError::GuestAborted(code, context) if code == ErrorCode::UnknownError as u8 && context.contains("hello"))
940 );
941 assert!(sbox.poisoned());
942
943 let res = sbox
945 .call::<()>("guest_panic", "hello2".to_string())
946 .unwrap_err();
947 assert!(matches!(res, HyperlightError::PoisonedSandbox));
948
949 if let Err(e) = sbox.snapshot() {
951 assert!(sbox.poisoned());
952 assert!(matches!(e, HyperlightError::PoisonedSandbox));
953 } else {
954 panic!("Snapshot should fail");
955 }
956
957 #[cfg(target_os = "linux")]
959 {
960 let map_mem = allocate_guest_memory();
961 let guest_base = 0x0;
962 let region = region_for_memory(&map_mem, guest_base, MemoryRegionFlags::READ);
963 let res = unsafe { sbox.map_region(®ion) }.unwrap_err();
964 assert!(matches!(res, HyperlightError::PoisonedSandbox));
965 }
966
967 #[cfg(target_os = "linux")]
969 {
970 let temp_file = std::env::temp_dir().join("test_poison_map_file.bin");
971 let res = sbox.map_file_cow(&temp_file, 0x0, None).unwrap_err();
972 assert!(matches!(res, HyperlightError::PoisonedSandbox));
973 std::fs::remove_file(&temp_file).ok(); }
975
976 #[allow(deprecated)]
978 let res = sbox
979 .call_guest_function_by_name::<String>("Echo", "test".to_string())
980 .unwrap_err();
981 assert!(matches!(res, HyperlightError::PoisonedSandbox));
982
983 sbox.restore(snapshot.clone()).unwrap();
985 assert!(!sbox.poisoned());
986
987 let res = sbox.call::<String>("Echo", "hello2".to_string()).unwrap();
989 assert_eq!(res, "hello2".to_string());
990 assert!(!sbox.poisoned());
991
992 let res = sbox
994 .call::<()>("guest_panic", "hello".to_string())
995 .unwrap_err();
996 assert!(
997 matches!(res, HyperlightError::GuestAborted(code, context) if code == ErrorCode::UnknownError as u8 && context.contains("hello"))
998 );
999 assert!(sbox.poisoned());
1000
1001 sbox.restore(snapshot.clone()).unwrap();
1003 assert!(!sbox.poisoned());
1004
1005 let res = sbox.call::<String>("Echo", "hello3".to_string()).unwrap();
1007 assert_eq!(res, "hello3".to_string());
1008 assert!(!sbox.poisoned());
1009
1010 let _ = sbox.snapshot().unwrap();
1012 }
1013
1014 #[test]
1016 fn host_func_error() {
1017 let path = simple_guest_as_string().unwrap();
1018 let mut sandbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1019 sandbox
1020 .register("HostError", || -> Result<()> {
1021 Err(HyperlightError::Error("hi".to_string()))
1022 })
1023 .unwrap();
1024 let mut sandbox = sandbox.evolve().unwrap();
1025
1026 for _ in 0..1000 {
1028 let result = sandbox
1029 .call::<i64>(
1030 "CallGivenParamlessHostFuncThatReturnsI64",
1031 "HostError".to_string(),
1032 )
1033 .unwrap_err();
1034
1035 assert!(
1036 matches!(result, HyperlightError::GuestError(code, msg) if code == ErrorCode::HostFunctionError && msg == "hi"),
1037 );
1038 }
1039 }
1040
1041 #[test]
1042 fn call_host_func_expect_error() {
1043 let path = simple_guest_as_string().unwrap();
1044 let sandbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1045 let mut sandbox = sandbox.evolve().unwrap();
1046 sandbox
1047 .call::<()>("CallHostExpectError", "SomeUnknownHostFunc".to_string())
1048 .unwrap();
1049 }
1050
1051 #[test]
1053 fn io_buffer_reset() {
1054 let mut cfg = SandboxConfiguration::default();
1055 cfg.set_input_data_size(4096);
1056 cfg.set_output_data_size(4096);
1057 let path = simple_guest_as_string().unwrap();
1058 let mut sandbox =
1059 UninitializedSandbox::new(GuestBinary::FilePath(path), Some(cfg)).unwrap();
1060 sandbox.register("HostAdd", |a: i32, b: i32| a + b).unwrap();
1061 let mut sandbox = sandbox.evolve().unwrap();
1062
1063 for _ in 0..1000 {
1065 let result = sandbox.call::<i32>("Add", (5i32, 10i32)).unwrap();
1066 assert_eq!(result, 15);
1067 let result = sandbox.call::<i32>("AddToStaticAndFail", ()).unwrap_err();
1068 assert!(
1069 matches!(result, HyperlightError::GuestError (code, msg ) if code == ErrorCode::GuestError && msg == "Crash on purpose")
1070 );
1071 }
1072 }
1073
1074 #[test]
1076 fn test_call_guest_function_by_name() {
1077 let mut sbox: MultiUseSandbox = {
1078 let path = simple_guest_as_string().unwrap();
1079 let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1080 u_sbox.evolve()
1081 }
1082 .unwrap();
1083
1084 let snapshot = sbox.snapshot().unwrap();
1085
1086 let _ = sbox.call::<i32>("AddToStatic", 5i32).unwrap();
1087 let res: i32 = sbox.call("GetStatic", ()).unwrap();
1088 assert_eq!(res, 5);
1089
1090 sbox.restore(snapshot).unwrap();
1091 #[allow(deprecated)]
1092 let _ = sbox
1093 .call_guest_function_by_name::<i32>("AddToStatic", 5i32)
1094 .unwrap();
1095 #[allow(deprecated)]
1096 let res: i32 = sbox.call_guest_function_by_name("GetStatic", ()).unwrap();
1097 assert_eq!(res, 0);
1098 }
1099
1100 #[test]
1103 fn test_with_small_stack_and_heap() {
1104 let mut cfg = SandboxConfiguration::default();
1105 cfg.set_heap_size(20 * 1024);
1106 let min_scratch = hyperlight_common::layout::min_scratch_size(
1111 cfg.get_input_data_size(),
1112 cfg.get_output_data_size(),
1113 );
1114 cfg.set_scratch_size(min_scratch + 0x10000 + 0x10000);
1115
1116 let mut sbox1: MultiUseSandbox = {
1117 let path = simple_guest_as_string().unwrap();
1118 let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), Some(cfg)).unwrap();
1119 u_sbox.evolve()
1120 }
1121 .unwrap();
1122
1123 for _ in 0..1000 {
1124 sbox1.call::<String>("Echo", "hello".to_string()).unwrap();
1125 }
1126
1127 let mut sbox2: MultiUseSandbox = {
1128 let path = simple_guest_as_string().unwrap();
1129 let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), Some(cfg)).unwrap();
1130 u_sbox.evolve()
1131 }
1132 .unwrap();
1133
1134 for i in 0..1000 {
1135 sbox2
1136 .call::<i32>(
1137 "PrintUsingPrintf",
1138 format!("Hello World {}\n", i).to_string(),
1139 )
1140 .unwrap();
1141 }
1142 }
1143
1144 #[test]
1147 fn snapshot_evolve_restore_handles_state_correctly() {
1148 let mut sbox: MultiUseSandbox = {
1149 let path = simple_guest_as_string().unwrap();
1150 let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1151 u_sbox.evolve()
1152 }
1153 .unwrap();
1154
1155 let snapshot = sbox.snapshot().unwrap();
1156
1157 let _ = sbox.call::<i32>("AddToStatic", 5i32).unwrap();
1158
1159 let res: i32 = sbox.call("GetStatic", ()).unwrap();
1160 assert_eq!(res, 5);
1161
1162 sbox.restore(snapshot).unwrap();
1163 let res: i32 = sbox.call("GetStatic", ()).unwrap();
1164 assert_eq!(res, 0);
1165 }
1166
1167 #[test]
1168 fn test_trigger_exception_on_guest() {
1169 let usbox = UninitializedSandbox::new(
1170 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1171 None,
1172 )
1173 .unwrap();
1174
1175 let mut multi_use_sandbox: MultiUseSandbox = usbox.evolve().unwrap();
1176
1177 let res: Result<()> = multi_use_sandbox.call("TriggerException", ());
1178
1179 assert!(res.is_err());
1180
1181 match res.unwrap_err() {
1182 HyperlightError::GuestAborted(_, msg) => {
1183 assert!(msg.contains("InvalidOpcode"));
1185 }
1186 e => panic!(
1187 "Expected HyperlightError::GuestExecutionError but got {:?}",
1188 e
1189 ),
1190 }
1191 }
1192
1193 #[test]
1194 fn create_200_sandboxes() {
1195 const NUM_THREADS: usize = 10;
1196 const SANDBOXES_PER_THREAD: usize = 20;
1197
1198 let start_barrier = Arc::new(Barrier::new(NUM_THREADS + 1));
1200 let mut thread_handles = vec![];
1201
1202 for _ in 0..NUM_THREADS {
1203 let barrier = start_barrier.clone();
1204
1205 let handle = thread::spawn(move || {
1206 barrier.wait();
1207
1208 for _ in 0..SANDBOXES_PER_THREAD {
1209 let guest_path = simple_guest_as_string().expect("Guest Binary Missing");
1210 let uninit =
1211 UninitializedSandbox::new(GuestBinary::FilePath(guest_path), None).unwrap();
1212
1213 let mut sandbox: MultiUseSandbox = uninit.evolve().unwrap();
1214
1215 let result: i32 = sandbox.call("GetStatic", ()).unwrap();
1216 assert_eq!(result, 0);
1217 }
1218 });
1219
1220 thread_handles.push(handle);
1221 }
1222
1223 start_barrier.wait();
1224
1225 for handle in thread_handles {
1226 handle.join().unwrap();
1227 }
1228 }
1229
1230 #[cfg(target_os = "linux")]
1231 #[test]
1232 fn test_mmap() {
1233 let mut sbox = UninitializedSandbox::new(
1234 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1235 None,
1236 )
1237 .unwrap()
1238 .evolve()
1239 .unwrap();
1240
1241 let expected = b"hello world";
1242 let map_mem = page_aligned_memory(expected);
1243 let guest_base = 0x1_0000_0000; unsafe {
1246 sbox.map_region(®ion_for_memory(
1247 &map_mem,
1248 guest_base,
1249 MemoryRegionFlags::READ,
1250 ))
1251 .unwrap();
1252 }
1253
1254 let _guard = map_mem.lock.try_read().unwrap();
1255 let actual: Vec<u8> = sbox
1256 .call(
1257 "ReadMappedBuffer",
1258 (guest_base as u64, expected.len() as u64, true),
1259 )
1260 .unwrap();
1261
1262 assert_eq!(actual, expected);
1263 }
1264
1265 #[cfg(target_os = "linux")]
1267 #[test]
1268 fn test_mmap_write_exec() {
1269 let mut sbox = UninitializedSandbox::new(
1270 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1271 None,
1272 )
1273 .unwrap()
1274 .evolve()
1275 .unwrap();
1276
1277 let expected = &[0x90, 0x90, 0x90, 0xC3]; let map_mem = page_aligned_memory(expected);
1279 let guest_base = 0x1_0000_0000; unsafe {
1282 sbox.map_region(®ion_for_memory(
1283 &map_mem,
1284 guest_base,
1285 MemoryRegionFlags::READ | MemoryRegionFlags::EXECUTE,
1286 ))
1287 .unwrap();
1288 }
1289
1290 let _guard = map_mem.lock.try_read().unwrap();
1291
1292 let succeed = sbox
1294 .call::<bool>(
1295 "ExecMappedBuffer",
1296 (guest_base as u64, expected.len() as u64),
1297 )
1298 .unwrap();
1299 assert!(succeed, "Expected execution of mapped buffer to succeed");
1300
1301 let err = sbox
1303 .call::<bool>(
1304 "WriteMappedBuffer",
1305 (guest_base as u64, expected.len() as u64),
1306 )
1307 .unwrap_err();
1308
1309 match err {
1310 HyperlightError::MemoryAccessViolation(addr, ..) if addr == guest_base as u64 => {}
1311 _ => panic!("Expected MemoryAccessViolation error"),
1312 };
1313 }
1314
1315 #[cfg(target_os = "linux")]
1316 fn page_aligned_memory(src: &[u8]) -> GuestSharedMemory {
1317 use hyperlight_common::mem::PAGE_SIZE_USIZE;
1318
1319 let len = src.len().div_ceil(PAGE_SIZE_USIZE) * PAGE_SIZE_USIZE;
1320
1321 let mut mem = ExclusiveSharedMemory::new(len).unwrap();
1322 mem.copy_from_slice(src, 0).unwrap();
1323
1324 let (_, guest_mem) = mem.build();
1325
1326 guest_mem
1327 }
1328
1329 #[cfg(target_os = "linux")]
1330 fn region_for_memory(
1331 mem: &GuestSharedMemory,
1332 guest_base: usize,
1333 flags: MemoryRegionFlags,
1334 ) -> MemoryRegion {
1335 let ptr = mem.base_addr();
1336 let len = mem.mem_size();
1337 MemoryRegion {
1338 host_region: ptr..(ptr + len),
1339 guest_region: guest_base..(guest_base + len),
1340 flags,
1341 region_type: MemoryRegionType::Heap,
1342 }
1343 }
1344
1345 #[cfg(target_os = "linux")]
1346 fn allocate_guest_memory() -> GuestSharedMemory {
1347 page_aligned_memory(b"test data for snapshot")
1348 }
1349
1350 #[test]
1351 #[cfg(target_os = "linux")]
1352 fn snapshot_restore_handles_remapping_correctly() {
1353 let mut sbox: MultiUseSandbox = {
1354 let path = simple_guest_as_string().unwrap();
1355 let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1356 u_sbox.evolve().unwrap()
1357 };
1358
1359 let snapshot1 = sbox.snapshot().unwrap();
1361 assert_eq!(sbox.vm.get_mapped_regions().count(), 0);
1362
1363 let map_mem = allocate_guest_memory();
1365 let guest_base = 0x200000000_usize;
1366 let region = region_for_memory(&map_mem, guest_base, MemoryRegionFlags::READ);
1367
1368 unsafe { sbox.map_region(®ion).unwrap() };
1369 assert_eq!(sbox.vm.get_mapped_regions().count(), 1);
1370 let orig_read = sbox
1371 .call::<Vec<u8>>(
1372 "ReadMappedBuffer",
1373 (
1374 guest_base as u64,
1375 hyperlight_common::vmem::PAGE_SIZE as u64,
1376 true,
1377 ),
1378 )
1379 .unwrap();
1380
1381 let snapshot2 = sbox.snapshot().unwrap();
1383 assert_eq!(sbox.vm.get_mapped_regions().count(), 1);
1384
1385 sbox.restore(snapshot1.clone()).unwrap();
1387 assert_eq!(sbox.vm.get_mapped_regions().count(), 0);
1388 let is_mapped = sbox
1389 .call::<bool>("CheckMapped", (guest_base as u64,))
1390 .unwrap();
1391 assert!(!is_mapped);
1392
1393 sbox.restore(snapshot2.clone()).unwrap();
1396 assert_eq!(sbox.vm.get_mapped_regions().count(), 0);
1397 let is_mapped = sbox
1398 .call::<bool>("CheckMapped", (guest_base as u64,))
1399 .unwrap();
1400 assert!(is_mapped);
1401
1402 let new_read = sbox
1404 .call::<Vec<u8>>(
1405 "ReadMappedBuffer",
1406 (
1407 guest_base as u64,
1408 hyperlight_common::vmem::PAGE_SIZE as u64,
1409 false,
1410 ),
1411 )
1412 .unwrap();
1413 assert_eq!(new_read, orig_read);
1414 }
1415
1416 #[test]
1417 fn snapshot_different_sandbox() {
1418 let mut sandbox = {
1419 let path = simple_guest_as_string().unwrap();
1420 let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1421 u_sbox.evolve().unwrap()
1422 };
1423
1424 let mut sandbox2 = {
1425 let path = simple_guest_as_string().unwrap();
1426 let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1427 u_sbox.evolve().unwrap()
1428 };
1429 assert_ne!(sandbox.id, sandbox2.id);
1430
1431 let snapshot = sandbox.snapshot().unwrap();
1432 let err = sandbox2.restore(snapshot.clone());
1433 assert!(matches!(err, Err(HyperlightError::SnapshotSandboxMismatch)));
1434
1435 let sandbox_id = sandbox.id;
1436 drop(sandbox);
1437 drop(sandbox2);
1438 drop(snapshot);
1439
1440 let sandbox3 = {
1441 let path = simple_guest_as_string().unwrap();
1442 let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1443 u_sbox.evolve().unwrap()
1444 };
1445 assert_ne!(sandbox3.id, sandbox_id);
1446 }
1447
1448 #[test]
1451 fn snapshot_restore_resets_debug_registers() {
1452 let mut sandbox: MultiUseSandbox = {
1453 let path = simple_guest_as_string().unwrap();
1454 let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1455 u_sbox.evolve().unwrap()
1456 };
1457
1458 let snapshot = sandbox.snapshot().unwrap();
1459
1460 let dr0_initial: u64 = sandbox.call("GetDr0", ()).unwrap();
1462 assert_eq!(dr0_initial, 0, "DR0 should initially be 0");
1463
1464 const DIRTY_VALUE: u64 = 0xDEAD_BEEF_CAFE_BABE;
1466 sandbox.call::<()>("SetDr0", DIRTY_VALUE).unwrap();
1467 let dr0_dirty: u64 = sandbox.call("GetDr0", ()).unwrap();
1468 assert_eq!(
1469 dr0_dirty, DIRTY_VALUE,
1470 "DR0 should be dirty after SetDr0 call"
1471 );
1472
1473 sandbox.restore(snapshot).unwrap();
1475
1476 let dr0_after_restore: u64 = sandbox.call("GetDr0", ()).unwrap();
1477 assert_eq!(
1478 dr0_after_restore, 0,
1479 "DR0 should be 0 after restore (reset_vcpu should have been called)"
1480 );
1481 }
1482
1483 #[test]
1485 fn test_sandbox_creation_various_sizes() {
1486 let test_cases: [(&str, u64); 3] = [
1487 ("small (8MB heap)", SMALL_HEAP_SIZE),
1488 ("medium (64MB heap)", MEDIUM_HEAP_SIZE),
1489 ("large (256MB heap)", LARGE_HEAP_SIZE),
1490 ];
1491
1492 for (name, heap_size) in test_cases {
1493 let mut cfg = SandboxConfiguration::default();
1494 cfg.set_heap_size(heap_size);
1495 cfg.set_scratch_size(0x100000);
1496
1497 let path = simple_guest_as_string().unwrap();
1498 let sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), Some(cfg))
1499 .unwrap_or_else(|e| panic!("Failed to create {} sandbox: {}", name, e))
1500 .evolve()
1501 .unwrap_or_else(|e| panic!("Failed to evolve {} sandbox: {}", name, e));
1502
1503 drop(sbox);
1504 }
1505 }
1506
1507 #[cfg(feature = "trace_guest")]
1509 fn sandbox_for_gva_tests() -> MultiUseSandbox {
1510 let path = simple_guest_as_string().unwrap();
1511 UninitializedSandbox::new(GuestBinary::FilePath(path), None)
1512 .unwrap()
1513 .evolve()
1514 .unwrap()
1515 }
1516
1517 #[cfg(feature = "trace_guest")]
1521 fn assert_gva_read_matches(sbox: &mut MultiUseSandbox, gva: u64, len: usize) {
1522 let expected: Vec<u8> = sbox
1524 .call("ReadMappedBuffer", (gva, len as u64, true))
1525 .unwrap();
1526 assert_eq!(expected.len(), len);
1527
1528 let root_pt = sbox.vm.get_root_pt().unwrap();
1530 let actual = sbox
1531 .mem_mgr
1532 .read_guest_memory_by_gva(gva, len, root_pt)
1533 .unwrap();
1534
1535 assert_eq!(
1536 actual, expected,
1537 "read_guest_memory_by_gva at GVA {:#x} (len {}) differs from guest ReadMappedBuffer",
1538 gva, len,
1539 );
1540 }
1541
1542 #[test]
1545 #[cfg(feature = "trace_guest")]
1546 fn read_guest_memory_by_gva_single_page() {
1547 let mut sbox = sandbox_for_gva_tests();
1548 let code_gva = sbox.mem_mgr.layout.get_guest_code_address() as u64;
1549 assert_gva_read_matches(&mut sbox, code_gva, 128);
1550 }
1551
1552 #[test]
1555 #[cfg(feature = "trace_guest")]
1556 fn read_guest_memory_by_gva_full_page() {
1557 let mut sbox = sandbox_for_gva_tests();
1558 let code_gva = sbox.mem_mgr.layout.get_guest_code_address() as u64;
1559 assert_gva_read_matches(&mut sbox, code_gva, 4096);
1560 }
1561
1562 #[test]
1565 #[cfg(feature = "trace_guest")]
1566 fn read_guest_memory_by_gva_unaligned_cross_page() {
1567 let mut sbox = sandbox_for_gva_tests();
1568 let code_gva = sbox.mem_mgr.layout.get_guest_code_address() as u64;
1569 let start = code_gva + 4096 - 1;
1572 println!(
1573 "Testing unaligned cross-page read starting at {:#x} spanning 4097 bytes",
1574 start
1575 );
1576 assert_gva_read_matches(&mut sbox, start, 4097);
1577 }
1578
1579 #[test]
1581 #[cfg(feature = "trace_guest")]
1582 fn read_guest_memory_by_gva_two_full_pages() {
1583 let mut sbox = sandbox_for_gva_tests();
1584 let code_gva = sbox.mem_mgr.layout.get_guest_code_address() as u64;
1585 assert_gva_read_matches(&mut sbox, code_gva, 4096 * 2);
1586 }
1587
1588 #[test]
1592 #[cfg(feature = "trace_guest")]
1593 fn read_guest_memory_by_gva_cross_page_boundary() {
1594 let mut sbox = sandbox_for_gva_tests();
1595 let code_gva = sbox.mem_mgr.layout.get_guest_code_address() as u64;
1596 let start = code_gva + 4096 - 100;
1598 assert_gva_read_matches(&mut sbox, start, 200);
1599 }
1600
1601 fn create_test_file(name: &str, content: &[u8]) -> (std::path::PathBuf, Vec<u8>) {
1605 use std::io::Write;
1606
1607 let page_size = page_size::get();
1608 let padded_len = content.len().max(page_size).div_ceil(page_size) * page_size;
1609 let mut padded = vec![0u8; padded_len];
1610 padded[..content.len()].copy_from_slice(content);
1611
1612 let temp_dir = std::env::temp_dir();
1613 let path = temp_dir.join(name);
1614 let _ = std::fs::remove_file(&path); let mut f = std::fs::File::create(&path).unwrap();
1616 f.write_all(&padded).unwrap();
1617 (path, content.to_vec())
1618 }
1619
1620 #[test]
1623 fn test_map_file_cow_basic() {
1624 let expected = b"hello world from map_file_cow";
1625 let (path, expected_bytes) =
1626 create_test_file("hyperlight_test_map_file_cow_basic.bin", expected);
1627
1628 let mut sbox = UninitializedSandbox::new(
1629 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1630 None,
1631 )
1632 .unwrap()
1633 .evolve()
1634 .unwrap();
1635
1636 let guest_base: u64 = 0x1_0000_0000;
1637 let mapped_size = sbox.map_file_cow(&path, guest_base, None).unwrap();
1638 assert!(mapped_size > 0, "mapped_size should be positive");
1639 assert!(
1640 mapped_size >= expected.len() as u64,
1641 "mapped_size should be >= file content length"
1642 );
1643
1644 let actual: Vec<u8> = sbox
1646 .call(
1647 "ReadMappedBuffer",
1648 (guest_base, expected_bytes.len() as u64, true),
1649 )
1650 .unwrap();
1651
1652 assert_eq!(
1653 actual, expected_bytes,
1654 "Guest should read back the exact file content"
1655 );
1656
1657 let _ = std::fs::remove_file(&path);
1659 }
1660
1661 #[test]
1664 fn test_map_file_cow_read_only_enforcement() {
1665 let content = &[0xBB; 4096];
1666 let (path, _) = create_test_file("hyperlight_test_map_file_cow_readonly.bin", content);
1667
1668 let mut sbox = UninitializedSandbox::new(
1669 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1670 None,
1671 )
1672 .unwrap()
1673 .evolve()
1674 .unwrap();
1675
1676 let guest_base: u64 = 0x1_0000_0000;
1677 sbox.map_file_cow(&path, guest_base, None).unwrap();
1678
1679 let err = sbox
1681 .call::<bool>("WriteMappedBuffer", (guest_base, content.len() as u64))
1682 .unwrap_err();
1683
1684 match err {
1685 HyperlightError::MemoryAccessViolation(addr, ..) if addr == guest_base => {}
1686 _ => panic!(
1687 "Expected MemoryAccessViolation at guest_base, got: {:?}",
1688 err
1689 ),
1690 };
1691
1692 let _ = std::fs::remove_file(&path);
1694 }
1695
1696 #[test]
1699 fn test_map_file_cow_poisoned() {
1700 let (path, _) = create_test_file("hyperlight_test_map_file_cow_poison.bin", &[0xCC; 4096]);
1701
1702 let mut sbox: MultiUseSandbox = {
1703 let path = simple_guest_as_string().unwrap();
1704 let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1705 u_sbox.evolve()
1706 }
1707 .unwrap();
1708 let snapshot = sbox.snapshot().unwrap();
1709
1710 let _ = sbox
1712 .call::<()>("guest_panic", "hello".to_string())
1713 .unwrap_err();
1714 assert!(sbox.poisoned());
1715
1716 let err = sbox.map_file_cow(&path, 0x1_0000_0000, None).unwrap_err();
1718 assert!(matches!(err, HyperlightError::PoisonedSandbox));
1719
1720 sbox.restore(snapshot).unwrap();
1722 assert!(!sbox.poisoned());
1723 let result = sbox.map_file_cow(&path, 0x1_0000_0000, None);
1724 assert!(result.is_ok());
1725
1726 let _ = std::fs::remove_file(&path);
1727 }
1728
1729 #[test]
1732 fn test_map_file_cow_multi_vm_same_file() {
1733 let expected = b"shared file content across VMs";
1734 let (path, expected_bytes) =
1735 create_test_file("hyperlight_test_map_file_cow_multi_vm.bin", expected);
1736
1737 let guest_base: u64 = 0x1_0000_0000;
1738
1739 let mut sbox1 = UninitializedSandbox::new(
1740 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1741 None,
1742 )
1743 .unwrap()
1744 .evolve()
1745 .unwrap();
1746
1747 let mut sbox2 = UninitializedSandbox::new(
1748 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1749 None,
1750 )
1751 .unwrap()
1752 .evolve()
1753 .unwrap();
1754
1755 sbox1.map_file_cow(&path, guest_base, None).unwrap();
1757 sbox2.map_file_cow(&path, guest_base, None).unwrap();
1758
1759 let actual1: Vec<u8> = sbox1
1761 .call(
1762 "ReadMappedBuffer",
1763 (guest_base, expected_bytes.len() as u64, true),
1764 )
1765 .unwrap();
1766 let actual2: Vec<u8> = sbox2
1767 .call(
1768 "ReadMappedBuffer",
1769 (guest_base, expected_bytes.len() as u64, true),
1770 )
1771 .unwrap();
1772
1773 assert_eq!(
1774 actual1, expected_bytes,
1775 "Sandbox 1 should read correct content"
1776 );
1777 assert_eq!(
1778 actual2, expected_bytes,
1779 "Sandbox 2 should read correct content"
1780 );
1781
1782 let _ = std::fs::remove_file(&path);
1783 }
1784
1785 #[test]
1788 fn test_map_file_cow_multi_vm_threaded() {
1789 let expected = b"threaded file mapping test data";
1790 let (path, expected_bytes) =
1791 create_test_file("hyperlight_test_map_file_cow_threaded.bin", expected);
1792
1793 const NUM_THREADS: usize = 5;
1794 let path = Arc::new(path);
1795 let expected_bytes = Arc::new(expected_bytes);
1796 let barrier = Arc::new(Barrier::new(NUM_THREADS));
1797 let mut handles = vec![];
1798
1799 for _ in 0..NUM_THREADS {
1800 let path = path.clone();
1801 let expected_bytes = expected_bytes.clone();
1802 let barrier = barrier.clone();
1803
1804 handles.push(thread::spawn(move || {
1805 barrier.wait();
1806
1807 let mut sbox = UninitializedSandbox::new(
1808 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1809 None,
1810 )
1811 .unwrap()
1812 .evolve()
1813 .unwrap();
1814
1815 let guest_base: u64 = 0x1_0000_0000;
1816 sbox.map_file_cow(&path, guest_base, None).unwrap();
1817
1818 let actual: Vec<u8> = sbox
1819 .call(
1820 "ReadMappedBuffer",
1821 (guest_base, expected_bytes.len() as u64, true),
1822 )
1823 .unwrap();
1824
1825 assert_eq!(actual, *expected_bytes);
1826 }));
1827 }
1828
1829 for h in handles {
1830 h.join().unwrap();
1831 }
1832
1833 let _ = std::fs::remove_file(&*path);
1834 }
1835
1836 #[test]
1839 #[cfg(target_os = "windows")]
1840 fn test_map_file_cow_cleanup_no_handle_leak() {
1841 let (path, _) = create_test_file("hyperlight_test_map_file_cow_cleanup.bin", &[0xDD; 4096]);
1842
1843 {
1844 let mut sbox = UninitializedSandbox::new(
1845 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1846 None,
1847 )
1848 .unwrap()
1849 .evolve()
1850 .unwrap();
1851
1852 sbox.map_file_cow(&path, 0x1_0000_0000, None).unwrap();
1853 }
1855
1856 std::fs::remove_file(&path)
1857 .expect("File should be deletable after sandbox with map_file_cow is dropped");
1858 }
1859
1860 #[test]
1864 fn test_map_file_cow_snapshot_remapping_cycle() {
1865 let expected = b"snapshot remapping cycle test!";
1866 let (path, expected_bytes) =
1867 create_test_file("hyperlight_test_map_file_cow_snapshot_remap.bin", expected);
1868
1869 let mut sbox = UninitializedSandbox::new(
1870 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1871 None,
1872 )
1873 .unwrap()
1874 .evolve()
1875 .unwrap();
1876
1877 let guest_base: u64 = 0x1_0000_0000;
1878
1879 let snapshot1 = sbox.snapshot().unwrap();
1881
1882 sbox.map_file_cow(&path, guest_base, None).unwrap();
1884
1885 let actual: Vec<u8> = sbox
1887 .call(
1888 "ReadMappedBuffer",
1889 (guest_base, expected_bytes.len() as u64, true),
1890 )
1891 .unwrap();
1892 assert_eq!(actual, expected_bytes);
1893
1894 let snapshot2 = sbox.snapshot().unwrap();
1896
1897 sbox.restore(snapshot1.clone()).unwrap();
1899 let is_mapped: bool = sbox.call("CheckMapped", (guest_base,)).unwrap();
1900 assert!(
1901 !is_mapped,
1902 "Region should be unmapped after restoring to snapshot₁"
1903 );
1904
1905 sbox.restore(snapshot2).unwrap();
1908 let is_mapped: bool = sbox.call("CheckMapped", (guest_base,)).unwrap();
1909 assert!(
1910 is_mapped,
1911 "Region should be mapped after restoring to snapshot₂"
1912 );
1913 let actual2: Vec<u8> = sbox
1914 .call(
1915 "ReadMappedBuffer",
1916 (guest_base, expected_bytes.len() as u64, false),
1917 )
1918 .unwrap();
1919 assert_eq!(
1920 actual2, expected_bytes,
1921 "Data should be intact after snapshot₂ restore"
1922 );
1923
1924 let _ = std::fs::remove_file(&path);
1925 }
1926
1927 #[test]
1930 fn test_map_file_cow_snapshot_restore() {
1931 let expected = b"snapshot restore basic test!!";
1932 let (path, expected_bytes) =
1933 create_test_file("hyperlight_test_map_file_cow_snap_restore.bin", expected);
1934
1935 let mut sbox = UninitializedSandbox::new(
1936 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1937 None,
1938 )
1939 .unwrap()
1940 .evolve()
1941 .unwrap();
1942
1943 let guest_base: u64 = 0x1_0000_0000;
1944 sbox.map_file_cow(&path, guest_base, None).unwrap();
1945
1946 let actual: Vec<u8> = sbox
1948 .call(
1949 "ReadMappedBuffer",
1950 (guest_base, expected_bytes.len() as u64, true),
1951 )
1952 .unwrap();
1953 assert_eq!(actual, expected_bytes);
1954
1955 let snapshot = sbox.snapshot().unwrap();
1957
1958 sbox.restore(snapshot).unwrap();
1960
1961 let actual2: Vec<u8> = sbox
1963 .call(
1964 "ReadMappedBuffer",
1965 (guest_base, expected_bytes.len() as u64, false),
1966 )
1967 .unwrap();
1968 assert_eq!(
1969 actual2, expected_bytes,
1970 "Data should be readable after restore from snapshot"
1971 );
1972
1973 let _ = std::fs::remove_file(&path);
1974 }
1975
1976 #[test]
1980 fn test_map_file_cow_deferred_basic() {
1981 let expected = b"deferred map_file_cow test data";
1982 let (path, expected_bytes) =
1983 create_test_file("hyperlight_test_map_file_cow_deferred.bin", expected);
1984
1985 let guest_base: u64 = 0x1_0000_0000;
1986
1987 let mut u_sbox = UninitializedSandbox::new(
1988 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1989 None,
1990 )
1991 .unwrap();
1992
1993 let mapped_size = u_sbox.map_file_cow(&path, guest_base, None).unwrap();
1995 assert!(mapped_size > 0, "mapped_size should be positive");
1996 assert!(
1997 mapped_size >= expected.len() as u64,
1998 "mapped_size should be >= file content length"
1999 );
2000
2001 let mut sbox: MultiUseSandbox = u_sbox.evolve().unwrap();
2003
2004 let actual: Vec<u8> = sbox
2006 .call(
2007 "ReadMappedBuffer",
2008 (guest_base, expected_bytes.len() as u64, true),
2009 )
2010 .unwrap();
2011
2012 assert_eq!(
2013 actual, expected_bytes,
2014 "Guest should read back the exact file content after deferred mapping"
2015 );
2016
2017 let _ = std::fs::remove_file(&path);
2018 }
2019
2020 #[test]
2024 fn test_map_file_cow_deferred_drop_without_evolve() {
2025 let (path, _) = create_test_file(
2026 "hyperlight_test_map_file_cow_deferred_drop.bin",
2027 &[0xAA; 4096],
2028 );
2029
2030 let guest_base: u64 = 0x1_0000_0000;
2031
2032 {
2033 let mut u_sbox = UninitializedSandbox::new(
2034 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2035 None,
2036 )
2037 .unwrap();
2038
2039 u_sbox.map_file_cow(&path, guest_base, None).unwrap();
2040 }
2043
2044 #[cfg(target_os = "windows")]
2047 std::fs::remove_file(&path)
2048 .expect("File should be deletable after dropping UninitializedSandbox");
2049 #[cfg(not(target_os = "windows"))]
2050 let _ = std::fs::remove_file(&path);
2051 }
2052
2053 #[test]
2056 fn test_map_file_cow_unaligned_guest_base() {
2057 let (path, _) =
2058 create_test_file("hyperlight_test_map_file_cow_unaligned.bin", &[0xBB; 4096]);
2059
2060 let mut u_sbox = UninitializedSandbox::new(
2061 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2062 None,
2063 )
2064 .unwrap();
2065
2066 let unaligned_base: u64 = (page_size::get() + 1) as u64;
2068 let result = u_sbox.map_file_cow(&path, unaligned_base, None);
2069 assert!(
2070 result.is_err(),
2071 "map_file_cow should reject unaligned guest_base"
2072 );
2073
2074 let _ = std::fs::remove_file(&path);
2075 }
2076
2077 #[test]
2079 fn test_map_file_cow_empty_file() {
2080 let temp_dir = std::env::temp_dir();
2081 let path = temp_dir.join("hyperlight_test_map_file_cow_empty.bin");
2082 let _ = std::fs::remove_file(&path);
2083 std::fs::File::create(&path).unwrap(); let mut u_sbox = UninitializedSandbox::new(
2086 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2087 None,
2088 )
2089 .unwrap();
2090
2091 let guest_base: u64 = 0x1_0000_0000;
2092 let result = u_sbox.map_file_cow(&path, guest_base, None);
2093 assert!(result.is_err(), "map_file_cow should reject empty files");
2094
2095 let _ = std::fs::remove_file(&path);
2096 }
2097
2098 #[test]
2100 fn test_map_file_cow_custom_label() {
2101 let (path, _) = create_test_file("hyperlight_test_map_file_cow_label.bin", &[0xDD; 4096]);
2102
2103 let mut sbox = UninitializedSandbox::new(
2104 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2105 None,
2106 )
2107 .unwrap()
2108 .evolve()
2109 .unwrap();
2110
2111 let result = sbox.map_file_cow(&path, 0x1_0000_0000, Some("my_ramfs"));
2112 assert!(
2113 result.is_ok(),
2114 "map_file_cow with custom label should succeed"
2115 );
2116
2117 let _ = std::fs::remove_file(&path);
2118 }
2119
2120 #[test]
2124 fn test_map_file_cow_peb_entry_multiuse() {
2125 use std::mem::offset_of;
2126
2127 use hyperlight_common::mem::{FILE_MAPPING_LABEL_MAX_LEN, FileMappingInfo};
2128
2129 let (path, _) = create_test_file("hyperlight_test_peb_entry_multiuse.bin", &[0xDD; 4096]);
2130
2131 let guest_base: u64 = 0x1_0000_0000;
2132 let label = "my_ramfs";
2133
2134 let mut sbox = UninitializedSandbox::new(
2135 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2136 None,
2137 )
2138 .unwrap()
2139 .evolve()
2140 .unwrap();
2141
2142 let mapped_size = sbox.map_file_cow(&path, guest_base, Some(label)).unwrap();
2144
2145 let count = sbox
2147 .mem_mgr
2148 .shared_mem
2149 .read::<u64>(sbox.mem_mgr.layout.get_file_mappings_size_offset())
2150 .unwrap();
2151 assert_eq!(
2152 count, 1,
2153 "PEB file_mappings count should be 1 after one mapping"
2154 );
2155
2156 let entry_offset = sbox.mem_mgr.layout.get_file_mappings_array_offset();
2158
2159 let stored_addr = sbox
2160 .mem_mgr
2161 .shared_mem
2162 .read::<u64>(entry_offset + offset_of!(FileMappingInfo, guest_addr))
2163 .unwrap();
2164 assert_eq!(stored_addr, guest_base, "PEB entry guest_addr should match");
2165
2166 let stored_size = sbox
2167 .mem_mgr
2168 .shared_mem
2169 .read::<u64>(entry_offset + offset_of!(FileMappingInfo, size))
2170 .unwrap();
2171 assert_eq!(
2172 stored_size, mapped_size,
2173 "PEB entry size should match mapped_size"
2174 );
2175
2176 let label_offset = entry_offset + offset_of!(FileMappingInfo, label);
2178 let mut label_buf = [0u8; FILE_MAPPING_LABEL_MAX_LEN + 1];
2179 for (i, byte) in label_buf.iter_mut().enumerate() {
2180 *byte = sbox
2181 .mem_mgr
2182 .shared_mem
2183 .read::<u8>(label_offset + i)
2184 .unwrap();
2185 }
2186 let label_len = label_buf
2187 .iter()
2188 .position(|&b| b == 0)
2189 .unwrap_or(label_buf.len());
2190 let stored_label = std::str::from_utf8(&label_buf[..label_len]).unwrap();
2191 assert_eq!(stored_label, label, "PEB entry label should match");
2192
2193 let _ = std::fs::remove_file(&path);
2194 }
2195
2196 #[test]
2199 fn test_map_file_cow_peb_entry_deferred() {
2200 use std::mem::offset_of;
2201
2202 use hyperlight_common::mem::{FILE_MAPPING_LABEL_MAX_LEN, FileMappingInfo};
2203
2204 let (path, _) = create_test_file("hyperlight_test_peb_entry_deferred.bin", &[0xEE; 4096]);
2205
2206 let guest_base: u64 = 0x1_0000_0000;
2207 let label = "deferred_fs";
2208
2209 let mut u_sbox = UninitializedSandbox::new(
2210 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2211 None,
2212 )
2213 .unwrap();
2214
2215 let mapped_size = u_sbox.map_file_cow(&path, guest_base, Some(label)).unwrap();
2216
2217 let sbox: MultiUseSandbox = u_sbox.evolve().unwrap();
2219
2220 let count = sbox
2222 .mem_mgr
2223 .shared_mem
2224 .read::<u64>(sbox.mem_mgr.layout.get_file_mappings_size_offset())
2225 .unwrap();
2226 assert_eq!(count, 1, "PEB file_mappings count should be 1 after evolve");
2227
2228 let entry_offset = sbox.mem_mgr.layout.get_file_mappings_array_offset();
2230
2231 let stored_addr = sbox
2232 .mem_mgr
2233 .shared_mem
2234 .read::<u64>(entry_offset + offset_of!(FileMappingInfo, guest_addr))
2235 .unwrap();
2236 assert_eq!(stored_addr, guest_base);
2237
2238 let stored_size = sbox
2239 .mem_mgr
2240 .shared_mem
2241 .read::<u64>(entry_offset + offset_of!(FileMappingInfo, size))
2242 .unwrap();
2243 assert_eq!(stored_size, mapped_size);
2244
2245 let label_offset = entry_offset + offset_of!(FileMappingInfo, label);
2247 let mut label_buf = [0u8; FILE_MAPPING_LABEL_MAX_LEN + 1];
2248 for (i, byte) in label_buf.iter_mut().enumerate() {
2249 *byte = sbox
2250 .mem_mgr
2251 .shared_mem
2252 .read::<u8>(label_offset + i)
2253 .unwrap();
2254 }
2255 let label_len = label_buf
2256 .iter()
2257 .position(|&b| b == 0)
2258 .unwrap_or(label_buf.len());
2259 let stored_label = std::str::from_utf8(&label_buf[..label_len]).unwrap();
2260 assert_eq!(
2261 stored_label, label,
2262 "PEB entry label should match after evolve"
2263 );
2264
2265 let _ = std::fs::remove_file(&path);
2266 }
2267
2268 #[test]
2272 fn test_map_file_cow_peb_multiple_entries() {
2273 use std::mem::{offset_of, size_of};
2274
2275 use hyperlight_common::mem::{FILE_MAPPING_LABEL_MAX_LEN, FileMappingInfo};
2276
2277 const NUM_FILES: usize = 5;
2278 const DEFERRED_COUNT: usize = 3;
2279
2280 let mut paths = Vec::new();
2282 let mut labels: Vec<String> = Vec::new();
2283 for i in 0..NUM_FILES {
2284 let name = format!("hyperlight_test_peb_multi_{}.bin", i);
2285 let content = vec![i as u8 + 0xA0; 4096];
2286 let (path, _) = create_test_file(&name, &content);
2287 paths.push(path);
2288 labels.push(format!("file_{}", i));
2289 }
2290
2291 let page_size = page_size::get() as u64;
2294 let base: u64 = 0x1_0000_0000;
2295 let guest_bases: Vec<u64> = (0..NUM_FILES as u64)
2296 .map(|i| base + i * page_size)
2297 .collect();
2298
2299 let mut u_sbox = UninitializedSandbox::new(
2300 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2301 None,
2302 )
2303 .unwrap();
2304
2305 let mut mapped_sizes = Vec::new();
2307 for i in 0..DEFERRED_COUNT {
2308 let size = u_sbox
2309 .map_file_cow(&paths[i], guest_bases[i], Some(&labels[i]))
2310 .unwrap();
2311 mapped_sizes.push(size);
2312 }
2313
2314 let mut sbox: MultiUseSandbox = u_sbox.evolve().unwrap();
2316
2317 for i in DEFERRED_COUNT..NUM_FILES {
2319 let size = sbox
2320 .map_file_cow(&paths[i], guest_bases[i], Some(&labels[i]))
2321 .unwrap();
2322 mapped_sizes.push(size);
2323 }
2324
2325 let count = sbox
2327 .mem_mgr
2328 .shared_mem
2329 .read::<u64>(sbox.mem_mgr.layout.get_file_mappings_size_offset())
2330 .unwrap();
2331 assert_eq!(
2332 count, NUM_FILES as u64,
2333 "PEB should have {NUM_FILES} entries"
2334 );
2335
2336 let array_base = sbox.mem_mgr.layout.get_file_mappings_array_offset();
2338 for i in 0..NUM_FILES {
2339 let entry_offset = array_base + i * size_of::<FileMappingInfo>();
2340
2341 let stored_addr = sbox
2342 .mem_mgr
2343 .shared_mem
2344 .read::<u64>(entry_offset + offset_of!(FileMappingInfo, guest_addr))
2345 .unwrap();
2346 assert_eq!(
2347 stored_addr, guest_bases[i],
2348 "Entry {i}: guest_addr mismatch"
2349 );
2350
2351 let stored_size = sbox
2352 .mem_mgr
2353 .shared_mem
2354 .read::<u64>(entry_offset + offset_of!(FileMappingInfo, size))
2355 .unwrap();
2356 assert_eq!(stored_size, mapped_sizes[i], "Entry {i}: size mismatch");
2357
2358 let label_base = entry_offset + offset_of!(FileMappingInfo, label);
2360 let mut label_buf = [0u8; FILE_MAPPING_LABEL_MAX_LEN + 1];
2361 for (j, byte) in label_buf.iter_mut().enumerate() {
2362 *byte = sbox.mem_mgr.shared_mem.read::<u8>(label_base + j).unwrap();
2363 }
2364 let label_len = label_buf
2365 .iter()
2366 .position(|&b| b == 0)
2367 .unwrap_or(label_buf.len());
2368 let stored_label = std::str::from_utf8(&label_buf[..label_len]).unwrap();
2369 assert_eq!(stored_label, labels[i], "Entry {i}: label mismatch");
2370 }
2371
2372 for path in &paths {
2374 let _ = std::fs::remove_file(path);
2375 }
2376 }
2377
2378 #[test]
2380 fn test_map_file_cow_label_too_long() {
2381 let (path, _) =
2382 create_test_file("hyperlight_test_map_file_cow_long_label.bin", &[0xEE; 4096]);
2383
2384 let guest_base: u64 = 0x1_0000_0000;
2385
2386 let mut u_sbox = UninitializedSandbox::new(
2387 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2388 None,
2389 )
2390 .unwrap();
2391
2392 let long_label = "A".repeat(64);
2394 let result = u_sbox.map_file_cow(&path, guest_base, Some(&long_label));
2395 assert!(
2396 result.is_err(),
2397 "map_file_cow should reject labels longer than 63 bytes"
2398 );
2399
2400 let ok_label = "B".repeat(63);
2402 let result = u_sbox.map_file_cow(&path, guest_base, Some(&ok_label));
2403 assert!(
2404 result.is_ok(),
2405 "map_file_cow should accept labels of exactly 63 bytes"
2406 );
2407
2408 let _ = std::fs::remove_file(&path);
2409 }
2410
2411 #[test]
2413 fn test_map_file_cow_label_null_byte() {
2414 let (path, _) =
2415 create_test_file("hyperlight_test_map_file_cow_null_label.bin", &[0xFF; 4096]);
2416
2417 let guest_base: u64 = 0x1_0000_0000;
2418
2419 let mut u_sbox = UninitializedSandbox::new(
2420 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2421 None,
2422 )
2423 .unwrap();
2424
2425 let result = u_sbox.map_file_cow(&path, guest_base, Some("has\0null"));
2426 assert!(
2427 result.is_err(),
2428 "map_file_cow should reject labels containing null bytes"
2429 );
2430
2431 let _ = std::fs::remove_file(&path);
2432 }
2433
2434 #[test]
2436 fn test_map_file_cow_overlapping_mappings() {
2437 let (path1, _) =
2438 create_test_file("hyperlight_test_map_file_cow_overlap1.bin", &[0xAA; 4096]);
2439 let (path2, _) =
2440 create_test_file("hyperlight_test_map_file_cow_overlap2.bin", &[0xBB; 4096]);
2441
2442 let guest_base: u64 = 0x1_0000_0000;
2443
2444 let mut u_sbox = UninitializedSandbox::new(
2445 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2446 None,
2447 )
2448 .unwrap();
2449
2450 u_sbox.map_file_cow(&path1, guest_base, None).unwrap();
2452
2453 let result = u_sbox.map_file_cow(&path2, guest_base, None);
2455 assert!(
2456 result.is_err(),
2457 "map_file_cow should reject overlapping guest address ranges"
2458 );
2459
2460 let _ = std::fs::remove_file(&path1);
2461 let _ = std::fs::remove_file(&path2);
2462 }
2463
2464 #[test]
2467 fn test_map_file_cow_shared_mem_overlap() {
2468 let (path, _) = create_test_file(
2469 "hyperlight_test_map_file_cow_overlap_shm.bin",
2470 &[0xCC; 4096],
2471 );
2472
2473 let mut u_sbox = UninitializedSandbox::new(
2474 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2475 None,
2476 )
2477 .unwrap();
2478
2479 let base_addr = crate::mem::layout::SandboxMemoryLayout::BASE_ADDRESS as u64;
2481 let result = u_sbox.map_file_cow(&path, base_addr, None);
2483 assert!(
2484 result.is_err(),
2485 "map_file_cow should reject guest_base inside shared memory"
2486 );
2487
2488 let _ = std::fs::remove_file(&path);
2489 }
2490
2491 #[test]
2494 fn test_map_file_cow_max_limit() {
2495 use hyperlight_common::mem::MAX_FILE_MAPPINGS;
2496
2497 let mut u_sbox = UninitializedSandbox::new(
2498 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2499 None,
2500 )
2501 .unwrap();
2502
2503 let page_size = page_size::get() as u64;
2504 let base: u64 = 0x1_0000_0000;
2506
2507 let mut paths = Vec::new();
2510 for i in 0..MAX_FILE_MAPPINGS {
2511 let name = format!("hyperlight_test_max_limit_{}.bin", i);
2512 let (path, _) = create_test_file(&name, &[0xAA; 4096]);
2513 let guest_base = base + (i as u64) * page_size;
2514 u_sbox.map_file_cow(&path, guest_base, None).unwrap();
2515 paths.push(path);
2516 }
2517
2518 let name = format!("hyperlight_test_max_limit_{}.bin", MAX_FILE_MAPPINGS);
2520 let (path, _) = create_test_file(&name, &[0xBB; 4096]);
2521 let guest_base = base + (MAX_FILE_MAPPINGS as u64) * page_size;
2522 let result = u_sbox.map_file_cow(&path, guest_base, None);
2523 assert!(
2524 result.is_err(),
2525 "map_file_cow should reject after MAX_FILE_MAPPINGS registrations"
2526 );
2527
2528 for p in &paths {
2530 let _ = std::fs::remove_file(p);
2531 }
2532 let _ = std::fs::remove_file(&path);
2533 }
2534}