1use std::collections::HashSet;
18#[cfg(unix)]
19use std::os::fd::AsRawFd;
20#[cfg(unix)]
21use std::os::linux::fs::MetadataExt;
22use std::path::Path;
23use std::sync::atomic::{AtomicU64, Ordering};
24use std::sync::{Arc, Mutex};
25
26use flatbuffers::FlatBufferBuilder;
27use hyperlight_common::flatbuffer_wrappers::function_call::{FunctionCall, FunctionCallType};
28use hyperlight_common::flatbuffer_wrappers::function_types::{
29 ParameterValue, ReturnType, ReturnValue,
30};
31use hyperlight_common::flatbuffer_wrappers::util::estimate_flatbuffer_capacity;
32use tracing::{Span, instrument};
33
34use super::Callable;
35use super::host_funcs::FunctionRegistry;
36use super::snapshot::Snapshot;
37use crate::HyperlightError::SnapshotSandboxMismatch;
38use crate::func::guest_err::check_for_guest_error;
39use crate::func::{ParameterTuple, SupportedReturnType};
40use crate::hypervisor::{Hypervisor, InterruptHandle};
41#[cfg(unix)]
42use crate::mem::memory_region::MemoryRegionType;
43use crate::mem::memory_region::{MemoryRegion, MemoryRegionFlags};
44use crate::mem::mgr::SandboxMemoryManager;
45use crate::mem::ptr::RawPtr;
46use crate::mem::shared_mem::HostSharedMemory;
47use crate::metrics::maybe_time_and_emit_guest_call;
48use crate::{Result, log_then_return};
49
50static SANDBOX_ID_COUNTER: AtomicU64 = AtomicU64::new(0);
52
53pub struct MultiUseSandbox {
58 id: u64,
60 pub(super) _host_funcs: Arc<Mutex<FunctionRegistry>>,
62 pub(crate) mem_mgr: SandboxMemoryManager<HostSharedMemory>,
63 vm: Box<dyn Hypervisor>,
64 dispatch_ptr: RawPtr,
65 #[cfg(gdb)]
66 dbg_mem_access_fn: Arc<Mutex<SandboxMemoryManager<HostSharedMemory>>>,
67 snapshot: Option<Snapshot>,
70}
71
72impl MultiUseSandbox {
73 #[instrument(skip_all, parent = Span::current(), level = "Trace")]
79 pub(super) fn from_uninit(
80 host_funcs: Arc<Mutex<FunctionRegistry>>,
81 mgr: SandboxMemoryManager<HostSharedMemory>,
82 vm: Box<dyn Hypervisor>,
83 dispatch_ptr: RawPtr,
84 #[cfg(gdb)] dbg_mem_access_fn: Arc<Mutex<SandboxMemoryManager<HostSharedMemory>>>,
85 ) -> MultiUseSandbox {
86 Self {
87 id: SANDBOX_ID_COUNTER.fetch_add(1, Ordering::Relaxed),
88 _host_funcs: host_funcs,
89 mem_mgr: mgr,
90 vm,
91 dispatch_ptr,
92 #[cfg(gdb)]
93 dbg_mem_access_fn,
94 snapshot: None,
95 }
96 }
97
98 #[instrument(err(Debug), skip_all, parent = Span::current())]
122 pub fn snapshot(&mut self) -> Result<Snapshot> {
123 if let Some(snapshot) = &self.snapshot {
124 return Ok(snapshot.clone());
125 }
126 let mapped_regions_iter = self.vm.get_mapped_regions();
127 let mapped_regions_vec: Vec<MemoryRegion> = mapped_regions_iter.cloned().collect();
128 let memory_snapshot = self.mem_mgr.snapshot(self.id, mapped_regions_vec)?;
129 let inner = Arc::new(memory_snapshot);
130 let snapshot = Snapshot { inner };
131 self.snapshot = Some(snapshot.clone());
132 Ok(snapshot)
133 }
134
135 #[instrument(err(Debug), skip_all, parent = Span::current())]
167 pub fn restore(&mut self, snapshot: &Snapshot) -> Result<()> {
168 if let Some(snap) = &self.snapshot
169 && Arc::ptr_eq(&snap.inner, &snapshot.inner)
170 {
171 return Ok(());
173 }
174
175 if self.id != snapshot.inner.sandbox_id() {
176 return Err(SnapshotSandboxMismatch);
177 }
178
179 self.mem_mgr.restore_snapshot(&snapshot.inner)?;
180
181 let current_regions: HashSet<_> = self.vm.get_mapped_regions().cloned().collect();
182 let snapshot_regions: HashSet<_> = snapshot.inner.regions().iter().cloned().collect();
183
184 let regions_to_unmap = current_regions.difference(&snapshot_regions);
185 let regions_to_map = snapshot_regions.difference(¤t_regions);
186
187 for region in regions_to_unmap {
188 unsafe { self.vm.unmap_region(region)? };
189 }
190
191 for region in regions_to_map {
192 unsafe { self.vm.map_region(region)? };
193 }
194
195 self.snapshot = Some(snapshot.clone());
197
198 Ok(())
199 }
200
201 #[doc(hidden)]
233 #[deprecated(
234 since = "0.8.0",
235 note = "Deprecated in favour of call and snapshot/restore."
236 )]
237 #[instrument(err(Debug), skip(self, args), parent = Span::current())]
238 pub fn call_guest_function_by_name<Output: SupportedReturnType>(
239 &mut self,
240 func_name: &str,
241 args: impl ParameterTuple,
242 ) -> Result<Output> {
243 let snapshot = self.snapshot()?;
244 let res = self.call(func_name, args);
245 self.restore(&snapshot)?;
246 res
247 }
248
249 #[instrument(err(Debug), skip(self, args), parent = Span::current())]
281 pub fn call<Output: SupportedReturnType>(
282 &mut self,
283 func_name: &str,
284 args: impl ParameterTuple,
285 ) -> Result<Output> {
286 self.snapshot = None;
288 maybe_time_and_emit_guest_call(func_name, || {
289 let ret = self.call_guest_function_by_name_no_reset(
290 func_name,
291 Output::TYPE,
292 args.into_value(),
293 );
294 Output::from_value(ret?)
295 })
296 }
297
298 #[instrument(err(Debug), skip(self, rgn), parent = Span::current())]
309 pub unsafe fn map_region(&mut self, rgn: &MemoryRegion) -> Result<()> {
310 if rgn.flags.contains(MemoryRegionFlags::STACK_GUARD) {
311 log_then_return!("Cannot map host memory as a stack guard page");
314 }
315 if rgn.flags.contains(MemoryRegionFlags::WRITE) {
316 log_then_return!("TODO: Writable mappings not yet supported");
320 }
321 self.snapshot = None;
323 unsafe { self.vm.map_region(rgn) }?;
324 self.mem_mgr.mapped_rgns += 1;
325 Ok(())
326 }
327
328 #[instrument(err(Debug), skip(self, _fp, _guest_base), parent = Span::current())]
332 pub fn map_file_cow(&mut self, _fp: &Path, _guest_base: u64) -> Result<u64> {
333 #[cfg(windows)]
334 log_then_return!("mmap'ing a file into the guest is not yet supported on Windows");
335 #[cfg(unix)]
336 unsafe {
337 let file = std::fs::File::options().read(true).write(true).open(_fp)?;
338 let file_size = file.metadata()?.st_size();
339 let page_size = page_size::get();
340 let size = (file_size as usize).div_ceil(page_size) * page_size;
341 let base = libc::mmap(
342 std::ptr::null_mut(),
343 size,
344 libc::PROT_READ | libc::PROT_WRITE | libc::PROT_EXEC,
345 libc::MAP_PRIVATE,
346 file.as_raw_fd(),
347 0,
348 );
349 if base == libc::MAP_FAILED {
350 log_then_return!("mmap error: {:?}", std::io::Error::last_os_error());
351 }
352
353 if let Err(err) = self.map_region(&MemoryRegion {
354 host_region: base as usize..base.wrapping_add(size) as usize,
355 guest_region: _guest_base as usize.._guest_base as usize + size,
356 flags: MemoryRegionFlags::READ | MemoryRegionFlags::EXECUTE,
357 region_type: MemoryRegionType::Heap,
358 }) {
359 libc::munmap(base, size);
360 return Err(err);
361 };
362
363 Ok(size as u64)
364 }
365 }
366
367 #[cfg(feature = "fuzzing")]
371 #[instrument(err(Debug), skip(self, args), parent = Span::current())]
372 pub fn call_type_erased_guest_function_by_name(
373 &mut self,
374 func_name: &str,
375 ret_type: ReturnType,
376 args: Vec<ParameterValue>,
377 ) -> Result<ReturnValue> {
378 self.snapshot = None;
380 maybe_time_and_emit_guest_call(func_name, || {
381 self.call_guest_function_by_name_no_reset(func_name, ret_type, args)
382 })
383 }
384
385 fn call_guest_function_by_name_no_reset(
386 &mut self,
387 function_name: &str,
388 return_type: ReturnType,
389 args: Vec<ParameterValue>,
390 ) -> Result<ReturnValue> {
391 let res = (|| {
392 let estimated_capacity = estimate_flatbuffer_capacity(function_name, &args);
393
394 let fc = FunctionCall::new(
395 function_name.to_string(),
396 Some(args),
397 FunctionCallType::Guest,
398 return_type,
399 );
400
401 let mut builder = FlatBufferBuilder::with_capacity(estimated_capacity);
402 let buffer = fc.encode(&mut builder);
403
404 self.mem_mgr.write_guest_function_call(buffer)?;
405
406 self.vm.dispatch_call_from_host(
407 self.dispatch_ptr.clone(),
408 #[cfg(gdb)]
409 self.dbg_mem_access_fn.clone(),
410 )?;
411
412 self.mem_mgr.check_stack_guard()?;
413 check_for_guest_error(&mut self.mem_mgr)?;
414
415 self.mem_mgr.get_guest_function_call_result()
416 })();
417
418 if res.is_err() {
424 self.mem_mgr.clear_io_buffers();
425 }
426 res
427 }
428
429 pub fn interrupt_handle(&self) -> Arc<dyn InterruptHandle> {
459 self.vm.interrupt_handle()
460 }
461}
462
463impl Callable for MultiUseSandbox {
464 fn call<Output: SupportedReturnType>(
465 &mut self,
466 func_name: &str,
467 args: impl ParameterTuple,
468 ) -> Result<Output> {
469 self.call(func_name, args)
470 }
471}
472
473impl std::fmt::Debug for MultiUseSandbox {
474 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
475 f.debug_struct("MultiUseSandbox")
476 .field("stack_guard", &self.mem_mgr.get_stack_cookie())
477 .finish()
478 }
479}
480
481#[cfg(test)]
482mod tests {
483 use std::sync::{Arc, Barrier};
484 use std::thread;
485
486 use hyperlight_common::flatbuffer_wrappers::guest_error::ErrorCode;
487 use hyperlight_testing::simple_guest_as_string;
488
489 #[cfg(target_os = "linux")]
490 use crate::mem::memory_region::{MemoryRegion, MemoryRegionFlags, MemoryRegionType};
491 #[cfg(target_os = "linux")]
492 use crate::mem::shared_mem::{ExclusiveSharedMemory, GuestSharedMemory, SharedMemory as _};
493 use crate::sandbox::SandboxConfiguration;
494 use crate::{GuestBinary, HyperlightError, MultiUseSandbox, Result, UninitializedSandbox};
495
496 #[test]
498 fn io_buffer_reset() {
499 let mut cfg = SandboxConfiguration::default();
500 cfg.set_input_data_size(4096);
501 cfg.set_output_data_size(4096);
502 let path = simple_guest_as_string().unwrap();
503 let mut sandbox =
504 UninitializedSandbox::new(GuestBinary::FilePath(path), Some(cfg)).unwrap();
505 sandbox.register("HostAdd", |a: i32, b: i32| a + b).unwrap();
506 let mut sandbox = sandbox.evolve().unwrap();
507
508 for _ in 0..1000 {
510 let result = sandbox.call::<i32>("Add", (5i32, 10i32)).unwrap();
511 assert_eq!(result, 15);
512 let result = sandbox.call::<i32>("AddToStaticAndFail", ()).unwrap_err();
513 assert!(
514 matches!(result, HyperlightError::GuestError (code, msg ) if code == ErrorCode::GuestError && msg == "Crash on purpose")
515 );
516 }
517 }
518
519 #[test]
521 fn test_call_guest_function_by_name() {
522 let mut sbox: MultiUseSandbox = {
523 let path = simple_guest_as_string().unwrap();
524 let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
525 u_sbox.evolve()
526 }
527 .unwrap();
528
529 let snapshot = sbox.snapshot().unwrap();
530
531 let _ = sbox.call::<i32>("AddToStatic", 5i32).unwrap();
532 let res: i32 = sbox.call("GetStatic", ()).unwrap();
533 assert_eq!(res, 5);
534
535 sbox.restore(&snapshot).unwrap();
536 #[allow(deprecated)]
537 let _ = sbox
538 .call_guest_function_by_name::<i32>("AddToStatic", 5i32)
539 .unwrap();
540 #[allow(deprecated)]
541 let res: i32 = sbox.call_guest_function_by_name("GetStatic", ()).unwrap();
542 assert_eq!(res, 0);
543 }
544
545 #[test]
548 fn test_with_small_stack_and_heap() {
549 let mut cfg = SandboxConfiguration::default();
550 cfg.set_heap_size(20 * 1024);
551 cfg.set_stack_size(16 * 1024);
552
553 let mut sbox1: MultiUseSandbox = {
554 let path = simple_guest_as_string().unwrap();
555 let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), Some(cfg)).unwrap();
556 u_sbox.evolve()
557 }
558 .unwrap();
559
560 for _ in 0..1000 {
561 sbox1.call::<String>("Echo", "hello".to_string()).unwrap();
562 }
563
564 let mut sbox2: MultiUseSandbox = {
565 let path = simple_guest_as_string().unwrap();
566 let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), Some(cfg)).unwrap();
567 u_sbox.evolve()
568 }
569 .unwrap();
570
571 for i in 0..1000 {
572 sbox2
573 .call::<i32>(
574 "PrintUsingPrintf",
575 format!("Hello World {}\n", i).to_string(),
576 )
577 .unwrap();
578 }
579 }
580
581 #[test]
584 fn snapshot_evolve_restore_handles_state_correctly() {
585 let mut sbox: MultiUseSandbox = {
586 let path = simple_guest_as_string().unwrap();
587 let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
588 u_sbox.evolve()
589 }
590 .unwrap();
591
592 let snapshot = sbox.snapshot().unwrap();
593
594 let _ = sbox.call::<i32>("AddToStatic", 5i32).unwrap();
595
596 let res: i32 = sbox.call("GetStatic", ()).unwrap();
597 assert_eq!(res, 5);
598
599 sbox.restore(&snapshot).unwrap();
600 let res: i32 = sbox.call("GetStatic", ()).unwrap();
601 assert_eq!(res, 0);
602 }
603
604 #[test]
605 #[ignore]
607 #[cfg(target_os = "linux")]
608 fn test_violate_seccomp_filters() -> Result<()> {
609 fn make_get_pid_syscall() -> Result<u64> {
610 let pid = unsafe { libc::syscall(libc::SYS_getpid) };
611 Ok(pid as u64)
612 }
613
614 {
616 let mut usbox = UninitializedSandbox::new(
617 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
618 None,
619 )
620 .unwrap();
621
622 usbox.register("MakeGetpidSyscall", make_get_pid_syscall)?;
623
624 let mut sbox: MultiUseSandbox = usbox.evolve()?;
625
626 let res: Result<u64> = sbox.call("ViolateSeccompFilters", ());
627
628 #[cfg(seccomp)]
629 match res {
630 Ok(_) => panic!("Expected to fail due to seccomp violation"),
631 Err(e) => match e {
632 HyperlightError::DisallowedSyscall => {}
633 _ => panic!("Expected DisallowedSyscall error: {}", e),
634 },
635 }
636
637 #[cfg(not(seccomp))]
638 match res {
639 Ok(_) => (),
640 Err(e) => panic!("Expected to succeed without seccomp: {}", e),
641 }
642 }
643
644 #[cfg(seccomp)]
646 {
647 let mut usbox = UninitializedSandbox::new(
648 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
649 None,
650 )
651 .unwrap();
652
653 usbox.register_with_extra_allowed_syscalls(
654 "MakeGetpidSyscall",
655 make_get_pid_syscall,
656 vec![libc::SYS_getpid],
657 )?;
658 let mut sbox: MultiUseSandbox = usbox.evolve()?;
661
662 let res: Result<u64> = sbox.call("ViolateSeccompFilters", ());
663
664 match res {
665 Ok(_) => {}
666 Err(e) => panic!("Expected to succeed due to seccomp violation: {}", e),
667 }
668 }
669
670 Ok(())
671 }
672
673 #[test]
675 #[cfg(target_os = "linux")]
676 fn violate_seccomp_filters_openat() -> Result<()> {
677 fn make_openat_syscall() -> Result<i64> {
679 use std::ffi::CString;
680
681 let path = CString::new("/proc/sys/vm/overcommit_memory").unwrap();
682
683 let fd_or_err = unsafe {
684 libc::syscall(
685 libc::SYS_openat,
686 libc::AT_FDCWD,
687 path.as_ptr(),
688 libc::O_RDONLY,
689 )
690 };
691
692 if fd_or_err == -1 {
693 Ok((-std::io::Error::last_os_error().raw_os_error().unwrap()).into())
694 } else {
695 Ok(fd_or_err)
696 }
697 }
698 {
699 let ret = make_openat_syscall()?;
701 assert!(
702 ret >= 0,
703 "Expected openat syscall to succeed, got: {:?}",
704 ret
705 );
706
707 let mut ubox = UninitializedSandbox::new(
708 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
709 None,
710 )
711 .unwrap();
712 ubox.register("Openat_Hostfunc", make_openat_syscall)?;
713
714 let mut sbox = ubox.evolve().unwrap();
715 let host_func_result = sbox
716 .call::<i64>(
717 "CallGivenParamlessHostFuncThatReturnsI64",
718 "Openat_Hostfunc".to_string(),
719 )
720 .expect("Expected to call host function that returns i64");
721
722 if cfg!(seccomp) {
723 assert_eq!(host_func_result, -libc::EACCES as i64);
725 } else {
726 assert!(host_func_result >= 0);
728 }
729 }
730
731 #[cfg(seccomp)]
732 {
733 let mut ubox = UninitializedSandbox::new(
735 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
736 None,
737 )
738 .unwrap();
739 ubox.register_with_extra_allowed_syscalls(
740 "Openat_Hostfunc",
741 make_openat_syscall,
742 [libc::SYS_openat],
743 )?;
744 let mut sbox = ubox.evolve().unwrap();
745 let host_func_result: i64 = sbox
746 .call::<i64>(
747 "CallGivenParamlessHostFuncThatReturnsI64",
748 "Openat_Hostfunc".to_string(),
749 )
750 .expect("Expected to call host function that returns i64");
751
752 assert!(host_func_result >= 0);
754 }
755
756 Ok(())
757 }
758
759 #[test]
760 fn test_trigger_exception_on_guest() {
761 let usbox = UninitializedSandbox::new(
762 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
763 None,
764 )
765 .unwrap();
766
767 let mut multi_use_sandbox: MultiUseSandbox = usbox.evolve().unwrap();
768
769 let res: Result<()> = multi_use_sandbox.call("TriggerException", ());
770
771 assert!(res.is_err());
772
773 match res.unwrap_err() {
774 HyperlightError::GuestAborted(_, msg) => {
775 assert!(msg.contains("InvalidOpcode"));
777 }
778 e => panic!(
779 "Expected HyperlightError::GuestExecutionError but got {:?}",
780 e
781 ),
782 }
783 }
784
785 #[test]
786 #[ignore] fn create_1000_sandboxes() {
788 let barrier = Arc::new(Barrier::new(21));
789
790 let mut handles = vec![];
791
792 for _ in 0..20 {
793 let c = barrier.clone();
794
795 let handle = thread::spawn(move || {
796 c.wait();
797
798 for _ in 0..50 {
799 let usbox = UninitializedSandbox::new(
800 GuestBinary::FilePath(
801 simple_guest_as_string().expect("Guest Binary Missing"),
802 ),
803 None,
804 )
805 .unwrap();
806
807 let mut multi_use_sandbox: MultiUseSandbox = usbox.evolve().unwrap();
808
809 let res: i32 = multi_use_sandbox.call("GetStatic", ()).unwrap();
810
811 assert_eq!(res, 0);
812 }
813 });
814
815 handles.push(handle);
816 }
817
818 barrier.wait();
819
820 for handle in handles {
821 handle.join().unwrap();
822 }
823 }
824
825 #[cfg(target_os = "linux")]
826 #[test]
827 fn test_mmap() {
828 let mut sbox = UninitializedSandbox::new(
829 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
830 None,
831 )
832 .unwrap()
833 .evolve()
834 .unwrap();
835
836 let expected = b"hello world";
837 let map_mem = page_aligned_memory(expected);
838 let guest_base = 0x1_0000_0000; unsafe {
841 sbox.map_region(®ion_for_memory(
842 &map_mem,
843 guest_base,
844 MemoryRegionFlags::READ,
845 ))
846 .unwrap();
847 }
848
849 let _guard = map_mem.lock.try_read().unwrap();
850 let actual: Vec<u8> = sbox
851 .call(
852 "ReadMappedBuffer",
853 (guest_base as u64, expected.len() as u64),
854 )
855 .unwrap();
856
857 assert_eq!(actual, expected);
858 }
859
860 #[cfg(target_os = "linux")]
862 #[test]
863 fn test_mmap_write_exec() {
864 let mut sbox = UninitializedSandbox::new(
865 GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
866 None,
867 )
868 .unwrap()
869 .evolve()
870 .unwrap();
871
872 let expected = &[0x90, 0x90, 0x90, 0xC3]; let map_mem = page_aligned_memory(expected);
874 let guest_base = 0x1_0000_0000; unsafe {
877 sbox.map_region(®ion_for_memory(
878 &map_mem,
879 guest_base,
880 MemoryRegionFlags::READ | MemoryRegionFlags::EXECUTE,
881 ))
882 .unwrap();
883 }
884
885 let _guard = map_mem.lock.try_read().unwrap();
886
887 let succeed = sbox
889 .call::<bool>(
890 "ExecMappedBuffer",
891 (guest_base as u64, expected.len() as u64),
892 )
893 .unwrap();
894 assert!(succeed, "Expected execution of mapped buffer to succeed");
895
896 let err = sbox
898 .call::<bool>(
899 "WriteMappedBuffer",
900 (guest_base as u64, expected.len() as u64),
901 )
902 .unwrap_err();
903
904 match err {
905 HyperlightError::MemoryAccessViolation(addr, ..) if addr == guest_base as u64 => {}
906 _ => panic!("Expected MemoryAccessViolation error"),
907 };
908 }
909
910 #[cfg(target_os = "linux")]
911 fn page_aligned_memory(src: &[u8]) -> GuestSharedMemory {
912 use hyperlight_common::mem::PAGE_SIZE_USIZE;
913
914 let len = src.len().div_ceil(PAGE_SIZE_USIZE) * PAGE_SIZE_USIZE;
915
916 let mut mem = ExclusiveSharedMemory::new(len).unwrap();
917 mem.copy_from_slice(src, 0).unwrap();
918
919 let (_, guest_mem) = mem.build();
920
921 guest_mem
922 }
923
924 #[cfg(target_os = "linux")]
925 fn region_for_memory(
926 mem: &GuestSharedMemory,
927 guest_base: usize,
928 flags: MemoryRegionFlags,
929 ) -> MemoryRegion {
930 let ptr = mem.base_addr();
931 let len = mem.mem_size();
932 MemoryRegion {
933 host_region: ptr..(ptr + len),
934 guest_region: guest_base..(guest_base + len),
935 flags,
936 region_type: MemoryRegionType::Heap,
937 }
938 }
939
940 #[cfg(target_os = "linux")]
941 fn allocate_guest_memory() -> GuestSharedMemory {
942 page_aligned_memory(b"test data for snapshot")
943 }
944
945 #[test]
946 #[cfg(target_os = "linux")]
947 fn snapshot_restore_handles_remapping_correctly() {
948 let mut sbox: MultiUseSandbox = {
949 let path = simple_guest_as_string().unwrap();
950 let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
951 u_sbox.evolve().unwrap()
952 };
953
954 let snapshot1 = sbox.snapshot().unwrap();
956 assert_eq!(sbox.vm.get_mapped_regions().len(), 0);
957
958 let map_mem = allocate_guest_memory();
960 let guest_base = 0x200000000_usize;
961 let region = region_for_memory(&map_mem, guest_base, MemoryRegionFlags::READ);
962
963 unsafe { sbox.map_region(®ion).unwrap() };
964 assert_eq!(sbox.vm.get_mapped_regions().len(), 1);
965
966 let snapshot2 = sbox.snapshot().unwrap();
968 assert_eq!(sbox.vm.get_mapped_regions().len(), 1);
969
970 sbox.restore(&snapshot1).unwrap();
972 assert_eq!(sbox.vm.get_mapped_regions().len(), 0);
973
974 sbox.restore(&snapshot2).unwrap();
976 assert_eq!(sbox.vm.get_mapped_regions().len(), 1);
977
978 let mut restored_regions = sbox.vm.get_mapped_regions();
980 assert_eq!(*restored_regions.next().unwrap(), region);
981 assert!(restored_regions.next().is_none());
982 drop(restored_regions);
983
984 let err = unsafe { sbox.map_region(®ion) };
986 assert!(
987 err.is_err(),
988 "Expected error when remapping existing region: {:?}",
989 err
990 );
991 }
992
993 #[test]
994 fn snapshot_different_sandbox() {
995 let mut sandbox = {
996 let path = simple_guest_as_string().unwrap();
997 let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
998 u_sbox.evolve().unwrap()
999 };
1000
1001 let mut sandbox2 = {
1002 let path = simple_guest_as_string().unwrap();
1003 let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1004 u_sbox.evolve().unwrap()
1005 };
1006 assert_ne!(sandbox.id, sandbox2.id);
1007
1008 let snapshot = sandbox.snapshot().unwrap();
1009 let err = sandbox2.restore(&snapshot);
1010 assert!(matches!(err, Err(HyperlightError::SnapshotSandboxMismatch)));
1011
1012 let sandbox_id = sandbox.id;
1013 drop(sandbox);
1014 drop(sandbox2);
1015 drop(snapshot);
1016
1017 let sandbox3 = {
1018 let path = simple_guest_as_string().unwrap();
1019 let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1020 u_sbox.evolve().unwrap()
1021 };
1022 assert_ne!(sandbox3.id, sandbox_id);
1023 }
1024}