Skip to main content

hyperlight_host/sandbox/
snapshot.rs

1/*
2Copyright 2025 The Hyperlight Authors.
3
4Licensed under the Apache License, Version 2.0 (the "License");
5you may not use this file except in compliance with the License.
6You may obtain a copy of the License at
7
8    http://www.apache.org/licenses/LICENSE-2.0
9
10Unless required by applicable law or agreed to in writing, software
11distributed under the License is distributed on an "AS IS" BASIS,
12WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13See the License for the specific language governing permissions and
14limitations under the License.
15*/
16
17use std::sync::atomic::{AtomicU64, Ordering};
18
19use hyperlight_common::layout::{scratch_base_gpa, scratch_base_gva};
20use hyperlight_common::vmem::{self, BasicMapping, CowMapping, Mapping, MappingKind, PAGE_SIZE};
21use tracing::{Span, instrument};
22
23use crate::HyperlightError::MemoryRegionSizeMismatch;
24use crate::Result;
25use crate::hypervisor::regs::CommonSpecialRegisters;
26use crate::mem::exe::LoadInfo;
27use crate::mem::layout::SandboxMemoryLayout;
28use crate::mem::memory_region::MemoryRegion;
29use crate::mem::mgr::{GuestPageTableBuffer, SnapshotSharedMemory};
30use crate::mem::shared_mem::{ReadonlySharedMemory, SharedMemory};
31use crate::sandbox::SandboxConfiguration;
32use crate::sandbox::uninitialized::{GuestBinary, GuestEnvironment};
33
34pub(super) static SANDBOX_CONFIGURATION_COUNTER: AtomicU64 = AtomicU64::new(0);
35
36/// Presently, a snapshot can be of a preinitialised sandbox, which
37/// still needs an initialise function called in order to determine
38/// how to call into it, or of an already-properly-initialised sandbox
39/// which can be immediately called into. This keeps track of the
40/// difference.
41///
42/// TODO: this should not necessarily be around in the long term:
43/// ideally we would just preinitialise earlier in the snapshot
44/// creation process and never need this.
45#[derive(Copy, Clone, PartialEq, Eq)]
46pub enum NextAction {
47    /// A sandbox in the preinitialise state still needs to be
48    /// initialised by calling the initialise function
49    Initialise(u64),
50    /// A sandbox in the ready state can immediately be called into,
51    /// using the dispatch function pointer.
52    Call(u64),
53    /// Only when compiling for tests: a sandbox that cannot actually
54    /// be used
55    #[cfg(test)]
56    None,
57}
58
59/// A wrapper around a `SharedMemory` reference and a snapshot
60/// of the memory therein
61pub struct Snapshot {
62    /// Unique ID of the sandbox configuration for sandboxes where
63    /// this snapshot may be restored.
64    sandbox_id: u64,
65    /// Layout object for the sandbox. TODO: get rid of this and
66    /// replace with something saner and set up from the guest (early
67    /// on?).
68    ///
69    /// Not checked on restore, since any sandbox with the same
70    /// configuration id will share the same layout
71    layout: crate::mem::layout::SandboxMemoryLayout,
72    /// Memory of the sandbox at the time this snapshot was taken
73    memory: ReadonlySharedMemory,
74    /// The memory regions that were mapped when this snapshot was
75    /// taken (excluding initial sandbox regions)
76    regions: Vec<MemoryRegion>,
77    /// Extra debug information about the binary in this snapshot,
78    /// from when the binary was first loaded into the snapshot.
79    ///
80    /// This information is provided on a best-effort basis, and there
81    /// is a pretty good chance that it does not exist; generally speaking,
82    /// things like persisting a snapshot and reloading it are likely
83    /// to destroy this information.
84    load_info: LoadInfo,
85    /// The hash of the other portions of the snapshot. Morally, this
86    /// is just a memoization cache for [`hash`], below, but it is not
87    /// a [`std::sync::OnceLock`] because it may be persisted to disk
88    /// without being recomputed on load.
89    ///
90    /// It is not a [`blake3::Hash`] because we do not presently
91    /// require constant-time equality checking
92    hash: [u8; 32],
93    /// The address of the top of the guest stack
94    stack_top_gva: u64,
95
96    /// Special register state captured from the vCPU during snapshot.
97    /// None for snapshots created directly from a binary (before
98    /// guest runs).  Some for snapshots taken from a running sandbox.
99    /// Note: CR3 in this struct is NOT used on restore, since page
100    /// tables are relocated during snapshot.
101    sregs: Option<CommonSpecialRegisters>,
102
103    /// The next action that should be performed on this snapshot
104    entrypoint: NextAction,
105}
106impl core::convert::AsRef<Snapshot> for Snapshot {
107    fn as_ref(&self) -> &Self {
108        self
109    }
110}
111impl hyperlight_common::vmem::TableReadOps for Snapshot {
112    type TableAddr = u64;
113    fn entry_addr(addr: u64, offset: u64) -> u64 {
114        addr + offset
115    }
116    unsafe fn read_entry(&self, addr: u64) -> u64 {
117        let addr = addr as usize;
118        let Some(pte_bytes) = self.memory.as_slice().get(addr..addr + 8) else {
119            // Attacker-controlled data pointed out-of-bounds. We'll
120            // default to returning 0 in this case, which, for most
121            // architectures (including x86-64 and arm64, the ones we
122            // care about presently) will be a not-present entry.
123            return 0;
124        };
125        // this is statically the correct size, so using unwrap() here
126        // doesn't make this any more panic-y.
127        #[allow(clippy::unwrap_used)]
128        let n: [u8; 8] = pte_bytes.try_into().unwrap();
129        u64::from_ne_bytes(n)
130    }
131    fn to_phys(addr: u64) -> u64 {
132        addr
133    }
134    fn from_phys(addr: u64) -> u64 {
135        addr
136    }
137    fn root_table(&self) -> u64 {
138        self.root_pt_gpa()
139    }
140}
141
142/// Compute a deterministic hash of a snapshot.
143///
144/// This does not include the load info from the snapshot, because
145/// that is only used for debugging builds.
146fn hash(memory: &[u8], regions: &[MemoryRegion]) -> Result<[u8; 32]> {
147    let mut hasher = blake3::Hasher::new();
148    hasher.update(memory);
149    for rgn in regions {
150        hasher.update(&usize::to_le_bytes(rgn.guest_region.start));
151        let guest_len = rgn.guest_region.end - rgn.guest_region.start;
152        #[allow(clippy::useless_conversion)]
153        let host_start_addr: usize = rgn.host_region.start.into();
154        #[allow(clippy::useless_conversion)]
155        let host_end_addr: usize = rgn.host_region.end.into();
156        hasher.update(&usize::to_le_bytes(host_start_addr));
157        let host_len = host_end_addr - host_start_addr;
158        if guest_len != host_len {
159            return Err(MemoryRegionSizeMismatch(
160                host_len,
161                guest_len,
162                format!("{:?}", rgn),
163            ));
164        }
165        // Ignore [`MemoryRegion::region_type`], since it is extra
166        // information for debugging rather than a core part of the
167        // identity of the snapshot/workload.
168        hasher.update(&usize::to_le_bytes(guest_len));
169        hasher.update(&u32::to_le_bytes(rgn.flags.bits()));
170    }
171    // Ignore [`load_info`], since it is extra information for
172    // debugging rather than a core part of the identity of the
173    // snapshot/workload.
174    Ok(hasher.finalize().into())
175}
176
177pub(crate) fn access_gpa<'a>(
178    snap: &'a [u8],
179    scratch: &'a [u8],
180    layout: SandboxMemoryLayout,
181    gpa: u64,
182) -> Option<(&'a [u8], usize)> {
183    let resolved = layout.resolve_gpa(gpa, &[])?.with_memories(snap, scratch);
184    Some((resolved.base.as_ref(), resolved.offset))
185}
186
187pub(crate) struct SharedMemoryPageTableBuffer<'a> {
188    snap: &'a [u8],
189    scratch: &'a [u8],
190    layout: SandboxMemoryLayout,
191    root: u64,
192}
193impl<'a> SharedMemoryPageTableBuffer<'a> {
194    pub(crate) fn new(
195        snap: &'a [u8],
196        scratch: &'a [u8],
197        layout: SandboxMemoryLayout,
198        root: u64,
199    ) -> Self {
200        Self {
201            snap,
202            scratch,
203            layout,
204            root,
205        }
206    }
207}
208impl<'a> hyperlight_common::vmem::TableReadOps for SharedMemoryPageTableBuffer<'a> {
209    type TableAddr = u64;
210    fn entry_addr(addr: u64, offset: u64) -> u64 {
211        addr + offset
212    }
213    unsafe fn read_entry(&self, addr: u64) -> u64 {
214        let memoff = access_gpa(self.snap, self.scratch, self.layout, addr);
215        let Some(pte_bytes) = memoff.and_then(|(mem, off)| mem.get(off..off + 8)) else {
216            // Attacker-controlled data pointed out-of-bounds. We'll
217            // default to returning 0 in this case, which, for most
218            // architectures (including x86-64 and arm64, the ones we
219            // care about presently) will be a not-present entry.
220            return 0;
221        };
222        // this is statically the correct size, so using unwrap() here
223        // doesn't make this any more panic-y.
224        #[allow(clippy::unwrap_used)]
225        let n: [u8; 8] = pte_bytes.try_into().unwrap();
226        u64::from_ne_bytes(n)
227    }
228    fn to_phys(addr: u64) -> u64 {
229        addr
230    }
231    fn from_phys(addr: u64) -> u64 {
232        addr
233    }
234    fn root_table(&self) -> u64 {
235        self.root
236    }
237}
238impl<'a> core::convert::AsRef<SharedMemoryPageTableBuffer<'a>> for SharedMemoryPageTableBuffer<'a> {
239    fn as_ref(&self) -> &Self {
240        self
241    }
242}
243fn filtered_mappings<'a>(
244    snap: &'a [u8],
245    scratch: &'a [u8],
246    regions: &[MemoryRegion],
247    layout: SandboxMemoryLayout,
248    root_pt: u64,
249) -> Vec<(Mapping, &'a [u8])> {
250    let op = SharedMemoryPageTableBuffer::new(snap, scratch, layout, root_pt);
251    unsafe {
252        hyperlight_common::vmem::virt_to_phys(&op, 0, hyperlight_common::layout::MAX_GVA as u64)
253    }
254    .filter_map(move |mapping| {
255        // the scratch map doesn't count
256        if mapping.virt_base >= scratch_base_gva(layout.get_scratch_size()) {
257            return None;
258        }
259        // neither does the mapping of the snapshot's own page tables
260        #[cfg(not(feature = "nanvix-unstable"))]
261        if mapping.virt_base >= hyperlight_common::layout::SNAPSHOT_PT_GVA_MIN as u64
262            && mapping.virt_base <= hyperlight_common::layout::SNAPSHOT_PT_GVA_MAX as u64
263        {
264            return None;
265        }
266        // todo: is it useful to warn if we can't resolve this?
267        let contents = unsafe { guest_page(snap, scratch, regions, layout, mapping.phys_base) }?;
268        Some((mapping, contents))
269    })
270    .collect()
271}
272
273/// Find the contents of the page which starts at gpa in guest physical
274/// memory, taking into account excess host->guest regions
275///
276/// # Safety
277/// The host side of the regions identified by MemoryRegion must be
278/// alive and must not be mutated by any other thread: references to
279/// these regions may be created and live for `'a`.
280unsafe fn guest_page<'a>(
281    snap: &'a [u8],
282    scratch: &'a [u8],
283    regions: &[MemoryRegion],
284    layout: SandboxMemoryLayout,
285    gpa: u64,
286) -> Option<&'a [u8]> {
287    let resolved = layout
288        .resolve_gpa(gpa, regions)?
289        .with_memories(snap, scratch);
290    if resolved.as_ref().len() < PAGE_SIZE {
291        return None;
292    }
293    Some(&resolved.as_ref()[..PAGE_SIZE])
294}
295
296fn map_specials(pt_buf: &GuestPageTableBuffer, scratch_size: usize) {
297    // Map the scratch region
298    let mapping = Mapping {
299        phys_base: scratch_base_gpa(scratch_size),
300        virt_base: scratch_base_gva(scratch_size),
301        len: scratch_size as u64,
302        kind: MappingKind::Basic(BasicMapping {
303            readable: true,
304            writable: true,
305            // assume that the guest will map these pages elsewhere if
306            // it actually needs to execute from them
307            executable: false,
308        }),
309    };
310    unsafe { vmem::map(pt_buf, mapping) };
311}
312
313impl Snapshot {
314    /// Create a new snapshot from the guest binary identified by `env`. With the configuration
315    /// specified in `cfg`.
316    pub(crate) fn from_env<'a, 'b>(
317        env: impl Into<GuestEnvironment<'a, 'b>>,
318        cfg: SandboxConfiguration,
319    ) -> Result<Self> {
320        let env = env.into();
321        let mut bin = env.guest_binary;
322        bin.canonicalize()?;
323        let blob = env.init_data;
324
325        use crate::mem::exe::ExeInfo;
326        let exe_info = match bin {
327            GuestBinary::FilePath(bin_path_str) => ExeInfo::from_file(&bin_path_str)?,
328            GuestBinary::Buffer(buffer) => ExeInfo::from_buf(buffer)?,
329        };
330
331        // Check guest/host version compatibility.
332        let host_version = env!("CARGO_PKG_VERSION");
333        if let Some(v) = exe_info.guest_bin_version()
334            && v != host_version
335        {
336            return Err(crate::HyperlightError::GuestBinVersionMismatch {
337                guest_bin_version: v.to_string(),
338                host_version: host_version.to_string(),
339            });
340        }
341
342        let guest_blob_size = blob.as_ref().map(|b| b.data.len()).unwrap_or(0);
343        let guest_blob_mem_flags = blob.as_ref().map(|b| b.permissions);
344
345        #[cfg_attr(feature = "nanvix-unstable", allow(unused_mut))]
346        let mut layout = crate::mem::layout::SandboxMemoryLayout::new(
347            cfg,
348            exe_info.loaded_size(),
349            guest_blob_size,
350            guest_blob_mem_flags,
351        )?;
352
353        let load_addr = layout.get_guest_code_address() as u64;
354        let entrypoint_offset: u64 = exe_info.entrypoint().into();
355
356        let mut memory = vec![0; layout.get_memory_size()?];
357
358        let load_info = exe_info.load(
359            load_addr.try_into()?,
360            &mut memory[layout.get_guest_code_offset()..],
361        )?;
362
363        layout.write_peb(&mut memory)?;
364
365        blob.map(|x| layout.write_init_data(&mut memory, x.data))
366            .transpose()?;
367
368        #[cfg(not(feature = "nanvix-unstable"))]
369        {
370            // Set up page table entries for the snapshot
371            let pt_buf = GuestPageTableBuffer::new(layout.get_pt_base_gpa() as usize);
372
373            use crate::mem::memory_region::{GuestMemoryRegion, MemoryRegionFlags};
374
375            // 1. Map the (ideally readonly) pages of snapshot data
376            for rgn in layout.get_memory_regions_::<GuestMemoryRegion>(())?.iter() {
377                let readable = rgn.flags.contains(MemoryRegionFlags::READ);
378                let executable = rgn.flags.contains(MemoryRegionFlags::EXECUTE);
379                let writable = rgn.flags.contains(MemoryRegionFlags::WRITE);
380                let kind = if writable {
381                    MappingKind::Cow(CowMapping {
382                        readable,
383                        executable,
384                    })
385                } else {
386                    MappingKind::Basic(BasicMapping {
387                        readable,
388                        writable: false,
389                        executable,
390                    })
391                };
392                let mapping = Mapping {
393                    phys_base: rgn.guest_region.start as u64,
394                    virt_base: rgn.guest_region.start as u64,
395                    len: rgn.guest_region.len() as u64,
396                    kind,
397                };
398                unsafe { vmem::map(&pt_buf, mapping) };
399            }
400
401            // 2. Map the special mappings
402            map_specials(&pt_buf, layout.get_scratch_size());
403
404            let pt_bytes = pt_buf.into_bytes();
405            layout.set_pt_size(pt_bytes.len())?;
406            memory.extend(&pt_bytes);
407        };
408
409        let exn_stack_top_gva = hyperlight_common::layout::MAX_GVA as u64
410            - hyperlight_common::layout::SCRATCH_TOP_EXN_STACK_OFFSET
411            + 1;
412
413        let extra_regions = Vec::new();
414        let hash = hash(&memory, &extra_regions)?;
415
416        Ok(Self {
417            sandbox_id: SANDBOX_CONFIGURATION_COUNTER.fetch_add(1, Ordering::Relaxed),
418            memory: ReadonlySharedMemory::from_bytes(&memory)?,
419            layout,
420            regions: extra_regions,
421            load_info,
422            hash,
423            stack_top_gva: exn_stack_top_gva,
424            sregs: None,
425            entrypoint: NextAction::Initialise(load_addr + entrypoint_offset),
426        })
427    }
428
429    // It might be nice to consider moving at least stack_top_gva into
430    // layout, and sharing (via RwLock or similar) the layout between
431    // the (host-side) mem mgr (where it can be passed in here) and
432    // the sandbox vm itself (which modifies it as it receives
433    // requests from the sandbox).
434    #[allow(clippy::too_many_arguments)]
435    /// Take a snapshot of the memory in `shared_mem`, then create a new
436    /// instance of `Self` with the snapshot stored therein.
437    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
438    pub(crate) fn new<S: SharedMemory>(
439        shared_mem: &mut SnapshotSharedMemory<S>,
440        scratch_mem: &mut S,
441        sandbox_id: u64,
442        mut layout: SandboxMemoryLayout,
443        load_info: LoadInfo,
444        regions: Vec<MemoryRegion>,
445        root_pt_gpa: u64,
446        stack_top_gva: u64,
447        sregs: CommonSpecialRegisters,
448        entrypoint: NextAction,
449    ) -> Result<Self> {
450        use std::collections::HashMap;
451        let mut phys_seen = HashMap::<u64, usize>::new();
452        let memory = shared_mem.with_contents(|snap_c| {
453            scratch_mem.with_contents(|scratch_c| {
454                // Pass 1: count how many pages need to live
455                let live_pages =
456                    filtered_mappings(snap_c, scratch_c, &regions, layout, root_pt_gpa);
457
458                // Pass 2: copy them, and map them
459                // TODO: Look for opportunities to hugepage map
460                let pt_buf = GuestPageTableBuffer::new(layout.get_pt_base_gpa() as usize);
461                let mut snapshot_memory: Vec<u8> = Vec::new();
462                for (mapping, contents) in live_pages {
463                    let kind = match mapping.kind {
464                        MappingKind::Cow(cm) => MappingKind::Cow(cm),
465                        MappingKind::Basic(bm) if bm.writable => MappingKind::Cow(CowMapping {
466                            readable: bm.readable,
467                            executable: bm.executable,
468                        }),
469                        MappingKind::Basic(bm) => MappingKind::Basic(BasicMapping {
470                            readable: bm.readable,
471                            writable: false,
472                            executable: bm.executable,
473                        }),
474                        MappingKind::Unmapped => continue,
475                    };
476                    let new_gpa = phys_seen.entry(mapping.phys_base).or_insert_with(|| {
477                        let new_offset = snapshot_memory.len();
478                        snapshot_memory.extend(contents);
479                        new_offset + SandboxMemoryLayout::BASE_ADDRESS
480                    });
481                    let mapping = Mapping {
482                        phys_base: *new_gpa as u64,
483                        virt_base: mapping.virt_base,
484                        len: PAGE_SIZE as u64,
485                        kind,
486                    };
487                    unsafe { vmem::map(&pt_buf, mapping) };
488                }
489                // Phase 3: Map the special mappings
490                map_specials(&pt_buf, layout.get_scratch_size());
491                let pt_bytes = pt_buf.into_bytes();
492                layout.set_pt_size(pt_bytes.len())?;
493                snapshot_memory.extend(&pt_bytes);
494                Ok::<Vec<u8>, crate::HyperlightError>(snapshot_memory)
495            })
496        })???;
497        layout.set_snapshot_size(memory.len());
498
499        // We do not need the original regions anymore, as any uses of
500        // them in the guest have been incorporated into the snapshot
501        // properly.
502        let regions = Vec::new();
503
504        let hash = hash(&memory, &regions)?;
505        Ok(Self {
506            sandbox_id,
507            layout,
508            memory: ReadonlySharedMemory::from_bytes(&memory)?,
509            regions,
510            load_info,
511            hash,
512            stack_top_gva,
513            sregs: Some(sregs),
514            entrypoint,
515        })
516    }
517
518    /// The id of the sandbox this snapshot was taken from.
519    pub(crate) fn sandbox_id(&self) -> u64 {
520        self.sandbox_id
521    }
522
523    /// Get the mapped regions from this snapshot
524    pub(crate) fn regions(&self) -> &[MemoryRegion] {
525        &self.regions
526    }
527
528    /// Return the main memory contents of the snapshot
529    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
530    pub(crate) fn memory(&self) -> &ReadonlySharedMemory {
531        &self.memory
532    }
533
534    /// Return a copy of the load info for the exe in the snapshot
535    pub(crate) fn load_info(&self) -> LoadInfo {
536        self.load_info.clone()
537    }
538
539    pub(crate) fn layout(&self) -> &crate::mem::layout::SandboxMemoryLayout {
540        &self.layout
541    }
542
543    pub(crate) fn root_pt_gpa(&self) -> u64 {
544        self.layout.get_pt_base_gpa()
545    }
546
547    pub(crate) fn stack_top_gva(&self) -> u64 {
548        self.stack_top_gva
549    }
550
551    /// Returns the special registers stored in this snapshot.
552    /// Returns None for snapshots created directly from a binary (before preinitialisation).
553    /// Returns Some for snapshots taken from a running sandbox.
554    /// Note: The CR3 value in the returned struct should NOT be used for restore;
555    /// use `root_pt_gpa()` instead since page tables are relocated during snapshot.
556    pub(crate) fn sregs(&self) -> Option<&CommonSpecialRegisters> {
557        self.sregs.as_ref()
558    }
559
560    pub(crate) fn entrypoint(&self) -> NextAction {
561        self.entrypoint
562    }
563}
564
565impl PartialEq for Snapshot {
566    fn eq(&self, other: &Snapshot) -> bool {
567        self.hash == other.hash
568    }
569}
570
571#[cfg(test)]
572mod tests {
573    use hyperlight_common::vmem::{self, BasicMapping, Mapping, MappingKind, PAGE_SIZE};
574
575    use crate::hypervisor::regs::CommonSpecialRegisters;
576    use crate::mem::exe::LoadInfo;
577    use crate::mem::layout::SandboxMemoryLayout;
578    use crate::mem::mgr::{GuestPageTableBuffer, SandboxMemoryManager, SnapshotSharedMemory};
579    use crate::mem::shared_mem::{
580        ExclusiveSharedMemory, HostSharedMemory, ReadonlySharedMemory, SharedMemory,
581    };
582
583    fn default_sregs() -> CommonSpecialRegisters {
584        CommonSpecialRegisters::default()
585    }
586
587    const SIMPLE_PT_BASE: usize = PAGE_SIZE + SandboxMemoryLayout::BASE_ADDRESS;
588
589    fn make_simple_pt_mem(contents: &[u8]) -> SnapshotSharedMemory<ExclusiveSharedMemory> {
590        let pt_buf = GuestPageTableBuffer::new(SIMPLE_PT_BASE);
591        let mapping = Mapping {
592            phys_base: SandboxMemoryLayout::BASE_ADDRESS as u64,
593            virt_base: SandboxMemoryLayout::BASE_ADDRESS as u64,
594            len: PAGE_SIZE as u64,
595            kind: MappingKind::Basic(BasicMapping {
596                readable: true,
597                writable: true,
598                executable: true,
599            }),
600        };
601        unsafe { vmem::map(&pt_buf, mapping) };
602        super::map_specials(&pt_buf, PAGE_SIZE);
603        let pt_bytes = pt_buf.into_bytes();
604
605        let mut snapshot_mem = vec![0u8; PAGE_SIZE + pt_bytes.len()];
606        snapshot_mem[0..PAGE_SIZE].copy_from_slice(contents);
607        snapshot_mem[PAGE_SIZE..].copy_from_slice(&pt_bytes);
608        ReadonlySharedMemory::from_bytes(&snapshot_mem)
609            .unwrap()
610            .to_mgr_snapshot_mem()
611            .unwrap()
612    }
613
614    fn make_simple_pt_mgr() -> (SandboxMemoryManager<HostSharedMemory>, u64) {
615        let cfg = crate::sandbox::SandboxConfiguration::default();
616        let scratch_mem = ExclusiveSharedMemory::new(cfg.get_scratch_size()).unwrap();
617        let mgr = SandboxMemoryManager::new(
618            SandboxMemoryLayout::new(cfg, 4096, 0x3000, None).unwrap(),
619            make_simple_pt_mem(&[0u8; PAGE_SIZE]),
620            scratch_mem,
621            super::NextAction::None,
622        );
623        let (mgr, _) = mgr.build().unwrap();
624        (mgr, SIMPLE_PT_BASE as u64)
625    }
626
627    #[test]
628    fn multiple_snapshots_independent() {
629        let (mut mgr, pt_base) = make_simple_pt_mgr();
630
631        // Create first snapshot with pattern A
632        let pattern_a = vec![0xAA; PAGE_SIZE];
633        let snapshot_a = super::Snapshot::new(
634            &mut make_simple_pt_mem(&pattern_a).build().0,
635            &mut mgr.scratch_mem,
636            1,
637            mgr.layout,
638            LoadInfo::dummy(),
639            Vec::new(),
640            pt_base,
641            0,
642            default_sregs(),
643            super::NextAction::None,
644        )
645        .unwrap();
646
647        // Create second snapshot with pattern B
648        let pattern_b = vec![0xBB; PAGE_SIZE];
649        let snapshot_b = super::Snapshot::new(
650            &mut make_simple_pt_mem(&pattern_b).build().0,
651            &mut mgr.scratch_mem,
652            2,
653            mgr.layout,
654            LoadInfo::dummy(),
655            Vec::new(),
656            pt_base,
657            0,
658            default_sregs(),
659            super::NextAction::None,
660        )
661        .unwrap();
662
663        // Restore snapshot A
664        mgr.restore_snapshot(&snapshot_a).unwrap();
665        mgr.shared_mem
666            .with_contents(|contents| assert_eq!(&contents[0..pattern_a.len()], &pattern_a[..]))
667            .unwrap();
668
669        // Restore snapshot B
670        mgr.restore_snapshot(&snapshot_b).unwrap();
671        mgr.shared_mem
672            .with_contents(|contents| assert_eq!(&contents[0..pattern_b.len()], &pattern_b[..]))
673            .unwrap();
674    }
675}