1use std::collections::HashSet;
18#[cfg(unix)]
19use std::os::fd::AsRawFd;
20#[cfg(unix)]
21use std::os::linux::fs::MetadataExt;
22use std::path::Path;
23use std::sync::atomic::{AtomicU64, Ordering};
24use std::sync::{Arc, Mutex};
25
26use flatbuffers::FlatBufferBuilder;
27use hyperlight_common::flatbuffer_wrappers::function_call::{FunctionCall, FunctionCallType};
28use hyperlight_common::flatbuffer_wrappers::function_types::{
29 ParameterValue, ReturnType, ReturnValue,
30};
31use hyperlight_common::flatbuffer_wrappers::util::estimate_flatbuffer_capacity;
32use tracing::{Span, instrument};
33
34use super::host_funcs::FunctionRegistry;
35use super::snapshot::Snapshot;
36use super::{Callable, WrapperGetter};
37use crate::HyperlightError::SnapshotSandboxMismatch;
38use crate::func::guest_err::check_for_guest_error;
39use crate::func::{ParameterTuple, SupportedReturnType};
40use crate::hypervisor::{Hypervisor, InterruptHandle};
41#[cfg(unix)]
42use crate::mem::memory_region::MemoryRegionType;
43use crate::mem::memory_region::{MemoryRegion, MemoryRegionFlags};
44use crate::mem::ptr::RawPtr;
45use crate::mem::shared_mem::HostSharedMemory;
46use crate::metrics::maybe_time_and_emit_guest_call;
47use crate::sandbox::mem_mgr::MemMgrWrapper;
48use crate::{Result, log_then_return};
49
50static SANDBOX_ID_COUNTER: AtomicU64 = AtomicU64::new(0);
52
53pub struct MultiUseSandbox {
58 id: u64,
60 pub(super) _host_funcs: Arc<Mutex<FunctionRegistry>>,
62 pub(crate) mem_mgr: MemMgrWrapper<HostSharedMemory>,
63 vm: Box<dyn Hypervisor>,
64 dispatch_ptr: RawPtr,
65 #[cfg(gdb)]
66 dbg_mem_access_fn: Arc<Mutex<MemMgrWrapper<HostSharedMemory>>>,
67 snapshot: Option<Snapshot>,
70}
71
72impl MultiUseSandbox {
73 #[instrument(skip_all, parent = Span::current(), level = "Trace")]
79 pub(super) fn from_uninit(
80 host_funcs: Arc<Mutex<FunctionRegistry>>,
81 mgr: MemMgrWrapper<HostSharedMemory>,
82 vm: Box<dyn Hypervisor>,
83 dispatch_ptr: RawPtr,
84 #[cfg(gdb)] dbg_mem_access_fn: Arc<Mutex<MemMgrWrapper<HostSharedMemory>>>,
85 ) -> MultiUseSandbox {
86 Self {
87 id: SANDBOX_ID_COUNTER.fetch_add(1, Ordering::Relaxed),
88 _host_funcs: host_funcs,
89 mem_mgr: mgr,
90 vm,
91 dispatch_ptr,
92 #[cfg(gdb)]
93 dbg_mem_access_fn,
94 snapshot: None,
95 }
96 }
97
98 #[instrument(err(Debug), skip_all, parent = Span::current())]
122 pub fn snapshot(&mut self) -> Result<Snapshot> {
123 if let Some(snapshot) = &self.snapshot {
124 return Ok(snapshot.clone());
125 }
126 let mapped_regions_iter = self.vm.get_mapped_regions();
127 let mapped_regions_vec: Vec<MemoryRegion> = mapped_regions_iter.cloned().collect();
128 let memory_snapshot = self
129 .mem_mgr
130 .unwrap_mgr_mut()
131 .snapshot(self.id, mapped_regions_vec)?;
132 let inner = Arc::new(memory_snapshot);
133 let snapshot = Snapshot { inner };
134 self.snapshot = Some(snapshot.clone());
135 Ok(snapshot)
136 }
137
138 #[instrument(err(Debug), skip_all, parent = Span::current())]
170 pub fn restore(&mut self, snapshot: &Snapshot) -> Result<()> {
171 if let Some(snap) = &self.snapshot {
172 if Arc::ptr_eq(&snap.inner, &snapshot.inner) {
173 return Ok(());
175 }
176 }
177
178 if self.id != snapshot.inner.sandbox_id() {
179 return Err(SnapshotSandboxMismatch);
180 }
181
182 self.mem_mgr
183 .unwrap_mgr_mut()
184 .restore_snapshot(&snapshot.inner)?;
185
186 let current_regions: HashSet<_> = self.vm.get_mapped_regions().cloned().collect();
187 let snapshot_regions: HashSet<_> = snapshot.inner.regions().iter().cloned().collect();
188
189 let regions_to_unmap = current_regions.difference(&snapshot_regions);
190 let regions_to_map = snapshot_regions.difference(¤t_regions);
191
192 for region in regions_to_unmap {
193 unsafe { self.vm.unmap_region(region)? };
194 }
195
196 for region in regions_to_map {
197 unsafe { self.vm.map_region(region)? };
198 }
199
200 self.snapshot = Some(snapshot.clone());
202
203 Ok(())
204 }
205
206 #[doc(hidden)]
238 #[deprecated(
239 since = "0.8.0",
240 note = "Deprecated in favour of call and snapshot/restore."
241 )]
242 #[instrument(err(Debug), skip(self, args), parent = Span::current())]
243 pub fn call_guest_function_by_name<Output: SupportedReturnType>(
244 &mut self,
245 func_name: &str,
246 args: impl ParameterTuple,
247 ) -> Result<Output> {
248 let snapshot = self.snapshot()?;
249 let res = self.call(func_name, args);
250 self.restore(&snapshot)?;
251 res
252 }
253
254 #[instrument(err(Debug), skip(self, args), parent = Span::current())]
286 pub fn call<Output: SupportedReturnType>(
287 &mut self,
288 func_name: &str,
289 args: impl ParameterTuple,
290 ) -> Result<Output> {
291 self.snapshot = None;
293 maybe_time_and_emit_guest_call(func_name, || {
294 let ret = self.call_guest_function_by_name_no_reset(
295 func_name,
296 Output::TYPE,
297 args.into_value(),
298 );
299 Output::from_value(ret?)
300 })
301 }
302
303 #[instrument(err(Debug), skip(self, rgn), parent = Span::current())]
314 pub unsafe fn map_region(&mut self, rgn: &MemoryRegion) -> Result<()> {
315 if rgn.flags.contains(MemoryRegionFlags::STACK_GUARD) {
316 log_then_return!("Cannot map host memory as a stack guard page");
319 }
320 if rgn.flags.contains(MemoryRegionFlags::WRITE) {
321 log_then_return!("TODO: Writable mappings not yet supported");
325 }
326 self.snapshot = None;
328 unsafe { self.vm.map_region(rgn) }?;
329 self.mem_mgr.unwrap_mgr_mut().mapped_rgns += 1;
330 Ok(())
331 }
332
333 #[allow(dead_code)]
337 #[instrument(err(Debug), skip(self, _fp, _guest_base), parent = Span::current())]
338 pub fn map_file_cow(&mut self, _fp: &Path, _guest_base: u64) -> Result<u64> {
339 #[cfg(windows)]
340 log_then_return!("mmap'ing a file into the guest is not yet supported on Windows");
341 #[cfg(unix)]
342 unsafe {
343 let file = std::fs::File::options().read(true).write(true).open(_fp)?;
344 let file_size = file.metadata()?.st_size();
345 let page_size = page_size::get();
346 let size = (file_size as usize).div_ceil(page_size) * page_size;
347 let base = libc::mmap(
348 std::ptr::null_mut(),
349 size,
350 libc::PROT_READ | libc::PROT_WRITE | libc::PROT_EXEC,
351 libc::MAP_PRIVATE,
352 file.as_raw_fd(),
353 0,
354 );
355 if base == libc::MAP_FAILED {
356 log_then_return!("mmap error: {:?}", std::io::Error::last_os_error());
357 }
358
359 if let Err(err) = self.map_region(&MemoryRegion {
360 host_region: base as usize..base.wrapping_add(size) as usize,
361 guest_region: _guest_base as usize.._guest_base as usize + size,
362 flags: MemoryRegionFlags::READ | MemoryRegionFlags::EXECUTE,
363 region_type: MemoryRegionType::Heap,
364 }) {
365 libc::munmap(base, size);
366 return Err(err);
367 };
368
369 Ok(size as u64)
370 }
371 }
372
373 #[cfg(feature = "fuzzing")]
377 #[instrument(err(Debug), skip(self, args), parent = Span::current())]
378 pub fn call_type_erased_guest_function_by_name(
379 &mut self,
380 func_name: &str,
381 ret_type: ReturnType,
382 args: Vec<ParameterValue>,
383 ) -> Result<ReturnValue> {
384 self.snapshot = None;
386 maybe_time_and_emit_guest_call(func_name, || {
387 self.call_guest_function_by_name_no_reset(func_name, ret_type, args)
388 })
389 }
390
391 fn call_guest_function_by_name_no_reset(
392 &mut self,
393 function_name: &str,
394 return_type: ReturnType,
395 args: Vec<ParameterValue>,
396 ) -> Result<ReturnValue> {
397 let res = (|| {
398 let estimated_capacity = estimate_flatbuffer_capacity(function_name, &args);
399
400 let fc = FunctionCall::new(
401 function_name.to_string(),
402 Some(args),
403 FunctionCallType::Guest,
404 return_type,
405 );
406
407 let mut builder = FlatBufferBuilder::with_capacity(estimated_capacity);
408 let buffer = fc.encode(&mut builder);
409
410 self.get_mgr_wrapper_mut()
411 .as_mut()
412 .write_guest_function_call(buffer)?;
413
414 self.vm.dispatch_call_from_host(
415 self.dispatch_ptr.clone(),
416 #[cfg(gdb)]
417 self.dbg_mem_access_fn.clone(),
418 )?;
419
420 self.mem_mgr.check_stack_guard()?;
421 check_for_guest_error(self.get_mgr_wrapper_mut())?;
422
423 self.get_mgr_wrapper_mut()
424 .as_mut()
425 .get_guest_function_call_result()
426 })();
427
428 self.get_mgr_wrapper_mut().as_mut().clear_io_buffers();
430
431 res
432 }
433
434 pub fn interrupt_handle(&self) -> Arc<dyn InterruptHandle> {
464 self.vm.interrupt_handle()
465 }
466}
467
468impl Callable for MultiUseSandbox {
469 fn call<Output: SupportedReturnType>(
470 &mut self,
471 func_name: &str,
472 args: impl ParameterTuple,
473 ) -> Result<Output> {
474 self.call(func_name, args)
475 }
476}
477
478impl WrapperGetter for MultiUseSandbox {
479 fn get_mgr_wrapper(&self) -> &MemMgrWrapper<HostSharedMemory> {
480 &self.mem_mgr
481 }
482 fn get_mgr_wrapper_mut(&mut self) -> &mut MemMgrWrapper<HostSharedMemory> {
483 &mut self.mem_mgr
484 }
485}
486
487impl std::fmt::Debug for MultiUseSandbox {
488 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
489 f.debug_struct("MultiUseSandbox")
490 .field("stack_guard", &self.mem_mgr.get_stack_cookie())
491 .finish()
492 }
493}
494
495#[cfg(test)]
496mod tests {
497 use std::sync::{Arc, Barrier};
498 use std::thread;
499
500 use hyperlight_testing::simple_guest_as_string;
501
502 #[cfg(target_os = "linux")]
503 use crate::mem::memory_region::{MemoryRegion, MemoryRegionFlags, MemoryRegionType};
504 #[cfg(target_os = "linux")]
505 use crate::mem::shared_mem::{ExclusiveSharedMemory, GuestSharedMemory, SharedMemory as _};
506 use crate::sandbox::SandboxConfiguration;
507 use crate::{GuestBinary, HyperlightError, MultiUseSandbox, Result, UninitializedSandbox};
508
509 #[test]
511 fn test_call_guest_function_by_name() {
512 let mut sbox: MultiUseSandbox = {
513 let path = simple_guest_as_string().unwrap();
514 let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
515 u_sbox.evolve()
516 }
517 .unwrap();
518
519 let snapshot = sbox.snapshot().unwrap();
520
521 let _ = sbox.call::<i32>("AddToStatic", 5i32).unwrap();
522 let res: i32 = sbox.call("GetStatic", ()).unwrap();
523 assert_eq!(res, 5);
524
525 sbox.restore(&snapshot).unwrap();
526 #[allow(deprecated)]
527 let _ = sbox
528 .call_guest_function_by_name::<i32>("AddToStatic", 5i32)
529 .unwrap();
530 #[allow(deprecated)]
531 let res: i32 = sbox.call_guest_function_by_name("GetStatic", ()).unwrap();
532 assert_eq!(res, 0);
533 }
534
535 #[test]
538 fn test_with_small_stack_and_heap() {
539 let mut cfg = SandboxConfiguration::default();
540 cfg.set_heap_size(20 * 1024);
541 cfg.set_stack_size(16 * 1024);
542
543 let mut sbox1: MultiUseSandbox = {
544 let path = simple_guest_as_string().unwrap();
545 let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), Some(cfg)).unwrap();
546 u_sbox.evolve()
547 }
548 .unwrap();
549
550 for _ in 0..1000 {
551 sbox1.call::<String>("Echo", "hello".to_string()).unwrap();
552 }
553
554 let mut sbox2: MultiUseSandbox = {
555 let path = simple_guest_as_string().unwrap();
556 let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), Some(cfg)).unwrap();
557 u_sbox.evolve()
558 }
559 .unwrap();
560
561 for i in 0..1000 {
562 sbox2
563 .call::<i32>(
564 "PrintUsingPrintf",
565 format!("Hello World {}\n", i).to_string(),
566 )
567 .unwrap();
568 }
569 }
570
571 #[test]
574 fn snapshot_evolve_restore_handles_state_correctly() {
575 let mut sbox: MultiUseSandbox = {
576 let path = simple_guest_as_string().unwrap();
577 let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
578 u_sbox.evolve()
579 }
580 .unwrap();
581
582 let snapshot = sbox.snapshot().unwrap();
583
584 let _ = sbox.call::<i32>("AddToStatic", 5i32).unwrap();
585
586 let res: i32 = sbox.call("GetStatic", ()).unwrap();
587 assert_eq!(res, 5);
588
589 sbox.restore(&snapshot).unwrap();
590 let res: i32 = sbox.call("GetStatic", ()).unwrap();
591 assert_eq!(res, 0);
592 }
593
594 #[test]
595 #[ignore]
597 #[cfg(target_os = "linux")]
598 fn test_violate_seccomp_filters() -> Result<()> {
599 fn make_get_pid_syscall() -> Result<u64> {
600 let pid = unsafe { libc::syscall(libc::SYS_getpid) };
601 Ok(pid as u64)
602 }
603
604 {
606 let mut usbox = UninitializedSandbox::new(
607 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
608 None,
609 )
610 .unwrap();
611
612 usbox.register("MakeGetpidSyscall", make_get_pid_syscall)?;
613
614 let mut sbox: MultiUseSandbox = usbox.evolve()?;
615
616 let res: Result<u64> = sbox.call("ViolateSeccompFilters", ());
617
618 #[cfg(feature = "seccomp")]
619 match res {
620 Ok(_) => panic!("Expected to fail due to seccomp violation"),
621 Err(e) => match e {
622 HyperlightError::DisallowedSyscall => {}
623 _ => panic!("Expected DisallowedSyscall error: {}", e),
624 },
625 }
626
627 #[cfg(not(feature = "seccomp"))]
628 match res {
629 Ok(_) => (),
630 Err(e) => panic!("Expected to succeed without seccomp: {}", e),
631 }
632 }
633
634 #[cfg(feature = "seccomp")]
636 {
637 let mut usbox = UninitializedSandbox::new(
638 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
639 None,
640 )
641 .unwrap();
642
643 usbox.register_with_extra_allowed_syscalls(
644 "MakeGetpidSyscall",
645 make_get_pid_syscall,
646 vec![libc::SYS_getpid],
647 )?;
648 let mut sbox: MultiUseSandbox = usbox.evolve()?;
651
652 let res: Result<u64> = sbox.call("ViolateSeccompFilters", ());
653
654 match res {
655 Ok(_) => {}
656 Err(e) => panic!("Expected to succeed due to seccomp violation: {}", e),
657 }
658 }
659
660 Ok(())
661 }
662
663 #[test]
665 #[cfg(target_os = "linux")]
666 fn violate_seccomp_filters_openat() -> Result<()> {
667 fn make_openat_syscall() -> Result<i64> {
669 use std::ffi::CString;
670
671 let path = CString::new("/proc/sys/vm/overcommit_memory").unwrap();
672
673 let fd_or_err = unsafe {
674 libc::syscall(
675 libc::SYS_openat,
676 libc::AT_FDCWD,
677 path.as_ptr(),
678 libc::O_RDONLY,
679 )
680 };
681
682 if fd_or_err == -1 {
683 Ok((-std::io::Error::last_os_error().raw_os_error().unwrap()).into())
684 } else {
685 Ok(fd_or_err)
686 }
687 }
688 {
689 let ret = make_openat_syscall()?;
691 assert!(
692 ret >= 0,
693 "Expected openat syscall to succeed, got: {:?}",
694 ret
695 );
696
697 let mut ubox = UninitializedSandbox::new(
698 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
699 None,
700 )
701 .unwrap();
702 ubox.register("Openat_Hostfunc", make_openat_syscall)?;
703
704 let mut sbox = ubox.evolve().unwrap();
705 let host_func_result = sbox
706 .call::<i64>(
707 "CallGivenParamlessHostFuncThatReturnsI64",
708 "Openat_Hostfunc".to_string(),
709 )
710 .expect("Expected to call host function that returns i64");
711
712 if cfg!(feature = "seccomp") {
713 assert_eq!(host_func_result, -libc::EACCES as i64);
715 } else {
716 assert!(host_func_result >= 0);
718 }
719 }
720
721 #[cfg(feature = "seccomp")]
722 {
723 let mut ubox = UninitializedSandbox::new(
725 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
726 None,
727 )
728 .unwrap();
729 ubox.register_with_extra_allowed_syscalls(
730 "Openat_Hostfunc",
731 make_openat_syscall,
732 [libc::SYS_openat],
733 )?;
734 let mut sbox = ubox.evolve().unwrap();
735 let host_func_result: i64 = sbox
736 .call::<i64>(
737 "CallGivenParamlessHostFuncThatReturnsI64",
738 "Openat_Hostfunc".to_string(),
739 )
740 .expect("Expected to call host function that returns i64");
741
742 assert!(host_func_result >= 0);
744 }
745
746 Ok(())
747 }
748
749 #[test]
750 fn test_trigger_exception_on_guest() {
751 let usbox = UninitializedSandbox::new(
752 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
753 None,
754 )
755 .unwrap();
756
757 let mut multi_use_sandbox: MultiUseSandbox = usbox.evolve().unwrap();
758
759 let res: Result<()> = multi_use_sandbox.call("TriggerException", ());
760
761 assert!(res.is_err());
762
763 match res.unwrap_err() {
764 HyperlightError::GuestAborted(_, msg) => {
765 assert!(msg.contains("InvalidOpcode"));
767 }
768 e => panic!(
769 "Expected HyperlightError::GuestExecutionError but got {:?}",
770 e
771 ),
772 }
773 }
774
775 #[test]
776 #[ignore] fn create_1000_sandboxes() {
778 let barrier = Arc::new(Barrier::new(21));
779
780 let mut handles = vec![];
781
782 for _ in 0..20 {
783 let c = barrier.clone();
784
785 let handle = thread::spawn(move || {
786 c.wait();
787
788 for _ in 0..50 {
789 let usbox = UninitializedSandbox::new(
790 GuestBinary::FilePath(
791 simple_guest_as_string().expect("Guest Binary Missing"),
792 ),
793 None,
794 )
795 .unwrap();
796
797 let mut multi_use_sandbox: MultiUseSandbox = usbox.evolve().unwrap();
798
799 let res: i32 = multi_use_sandbox.call("GetStatic", ()).unwrap();
800
801 assert_eq!(res, 0);
802 }
803 });
804
805 handles.push(handle);
806 }
807
808 barrier.wait();
809
810 for handle in handles {
811 handle.join().unwrap();
812 }
813 }
814
815 #[cfg(target_os = "linux")]
816 #[test]
817 fn test_mmap() {
818 let mut sbox = UninitializedSandbox::new(
819 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
820 None,
821 )
822 .unwrap()
823 .evolve()
824 .unwrap();
825
826 let expected = b"hello world";
827 let map_mem = page_aligned_memory(expected);
828 let guest_base = 0x1_0000_0000; unsafe {
831 sbox.map_region(®ion_for_memory(
832 &map_mem,
833 guest_base,
834 MemoryRegionFlags::READ,
835 ))
836 .unwrap();
837 }
838
839 let _guard = map_mem.lock.try_read().unwrap();
840 let actual: Vec<u8> = sbox
841 .call(
842 "ReadMappedBuffer",
843 (guest_base as u64, expected.len() as u64),
844 )
845 .unwrap();
846
847 assert_eq!(actual, expected);
848 }
849
850 #[cfg(target_os = "linux")]
852 #[test]
853 fn test_mmap_write_exec() {
854 let mut sbox = UninitializedSandbox::new(
855 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
856 None,
857 )
858 .unwrap()
859 .evolve()
860 .unwrap();
861
862 let expected = &[0x90, 0x90, 0x90, 0xC3]; let map_mem = page_aligned_memory(expected);
864 let guest_base = 0x1_0000_0000; unsafe {
867 sbox.map_region(®ion_for_memory(
868 &map_mem,
869 guest_base,
870 MemoryRegionFlags::READ | MemoryRegionFlags::EXECUTE,
871 ))
872 .unwrap();
873 }
874
875 let _guard = map_mem.lock.try_read().unwrap();
876
877 let succeed = sbox
879 .call::<bool>(
880 "ExecMappedBuffer",
881 (guest_base as u64, expected.len() as u64),
882 )
883 .unwrap();
884 assert!(succeed, "Expected execution of mapped buffer to succeed");
885
886 let err = sbox
888 .call::<bool>(
889 "WriteMappedBuffer",
890 (guest_base as u64, expected.len() as u64),
891 )
892 .unwrap_err();
893
894 match err {
895 HyperlightError::MemoryAccessViolation(addr, ..) if addr == guest_base as u64 => {}
896 _ => panic!("Expected MemoryAccessViolation error"),
897 };
898 }
899
900 #[cfg(target_os = "linux")]
901 fn page_aligned_memory(src: &[u8]) -> GuestSharedMemory {
902 use hyperlight_common::mem::PAGE_SIZE_USIZE;
903
904 let len = src.len().div_ceil(PAGE_SIZE_USIZE) * PAGE_SIZE_USIZE;
905
906 let mut mem = ExclusiveSharedMemory::new(len).unwrap();
907 mem.copy_from_slice(src, 0).unwrap();
908
909 let (_, guest_mem) = mem.build();
910
911 guest_mem
912 }
913
914 #[cfg(target_os = "linux")]
915 fn region_for_memory(
916 mem: &GuestSharedMemory,
917 guest_base: usize,
918 flags: MemoryRegionFlags,
919 ) -> MemoryRegion {
920 let ptr = mem.base_addr();
921 let len = mem.mem_size();
922 MemoryRegion {
923 host_region: ptr..(ptr + len),
924 guest_region: guest_base..(guest_base + len),
925 flags,
926 region_type: MemoryRegionType::Heap,
927 }
928 }
929
930 #[cfg(target_os = "linux")]
931 fn allocate_guest_memory() -> GuestSharedMemory {
932 page_aligned_memory(b"test data for snapshot")
933 }
934
935 #[test]
936 #[cfg(target_os = "linux")]
937 fn snapshot_restore_handles_remapping_correctly() {
938 let mut sbox: MultiUseSandbox = {
939 let path = simple_guest_as_string().unwrap();
940 let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
941 u_sbox.evolve().unwrap()
942 };
943
944 let snapshot1 = sbox.snapshot().unwrap();
946 assert_eq!(sbox.vm.get_mapped_regions().len(), 0);
947
948 let map_mem = allocate_guest_memory();
950 let guest_base = 0x200000000_usize;
951 let region = region_for_memory(&map_mem, guest_base, MemoryRegionFlags::READ);
952
953 unsafe { sbox.map_region(®ion).unwrap() };
954 assert_eq!(sbox.vm.get_mapped_regions().len(), 1);
955
956 let snapshot2 = sbox.snapshot().unwrap();
958 assert_eq!(sbox.vm.get_mapped_regions().len(), 1);
959
960 sbox.restore(&snapshot1).unwrap();
962 assert_eq!(sbox.vm.get_mapped_regions().len(), 0);
963
964 sbox.restore(&snapshot2).unwrap();
966 assert_eq!(sbox.vm.get_mapped_regions().len(), 1);
967
968 let mut restored_regions = sbox.vm.get_mapped_regions();
970 assert_eq!(*restored_regions.next().unwrap(), region);
971 assert!(restored_regions.next().is_none());
972 drop(restored_regions);
973
974 let err = unsafe { sbox.map_region(®ion) };
976 assert!(
977 err.is_err(),
978 "Expected error when remapping existing region: {:?}",
979 err
980 );
981 }
982
983 #[test]
984 fn snapshot_different_sandbox() {
985 let mut sandbox = {
986 let path = simple_guest_as_string().unwrap();
987 let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
988 u_sbox.evolve().unwrap()
989 };
990
991 let mut sandbox2 = {
992 let path = simple_guest_as_string().unwrap();
993 let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
994 u_sbox.evolve().unwrap()
995 };
996 assert_ne!(sandbox.id, sandbox2.id);
997
998 let snapshot = sandbox.snapshot().unwrap();
999 let err = sandbox2.restore(&snapshot);
1000 assert!(matches!(err, Err(HyperlightError::SnapshotSandboxMismatch)));
1001
1002 let sandbox_id = sandbox.id;
1003 drop(sandbox);
1004 drop(sandbox2);
1005 drop(snapshot);
1006
1007 let sandbox3 = {
1008 let path = simple_guest_as_string().unwrap();
1009 let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1010 u_sbox.evolve().unwrap()
1011 };
1012 assert_ne!(sandbox3.id, sandbox_id);
1013 }
1014}