Skip to main content

hyperlight_host/sandbox/
initialized_multi_use.rs

1/*
2Copyright 2025  The Hyperlight Authors.
3
4Licensed under the Apache License, Version 2.0 (the "License");
5you may not use this file except in compliance with the License.
6You may obtain a copy of the License at
7
8    http://www.apache.org/licenses/LICENSE-2.0
9
10Unless required by applicable law or agreed to in writing, software
11distributed under the License is distributed on an "AS IS" BASIS,
12WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13See the License for the specific language governing permissions and
14limitations under the License.
15*/
16
17use std::collections::HashSet;
18use std::path::Path;
19use std::sync::atomic::Ordering;
20use std::sync::{Arc, Mutex};
21
22use flatbuffers::FlatBufferBuilder;
23use hyperlight_common::flatbuffer_wrappers::function_call::{FunctionCall, FunctionCallType};
24use hyperlight_common::flatbuffer_wrappers::function_types::{
25    ParameterValue, ReturnType, ReturnValue,
26};
27use hyperlight_common::flatbuffer_wrappers::util::estimate_flatbuffer_capacity;
28use tracing::{Span, instrument};
29
30use super::Callable;
31use super::file_mapping::prepare_file_cow;
32use super::host_funcs::FunctionRegistry;
33use super::snapshot::Snapshot;
34use crate::HyperlightError::{self, SnapshotSandboxMismatch};
35use crate::func::{ParameterTuple, SupportedReturnType};
36use crate::hypervisor::InterruptHandle;
37use crate::hypervisor::hyperlight_vm::{HyperlightVm, HyperlightVmError};
38use crate::mem::memory_region::{MemoryRegion, MemoryRegionFlags};
39use crate::mem::mgr::SandboxMemoryManager;
40use crate::mem::shared_mem::{HostSharedMemory, SharedMemory as _};
41use crate::metrics::{
42    METRIC_GUEST_ERROR, METRIC_GUEST_ERROR_LABEL_CODE, maybe_time_and_emit_guest_call,
43};
44use crate::{Result, log_then_return};
45
46/// A fully initialized sandbox that can execute guest functions multiple times.
47///
48/// Guest functions can be called repeatedly while maintaining state between calls.
49/// The sandbox supports creating snapshots and restoring to previous states.
50///
51/// ## Sandbox Poisoning
52///
53/// The sandbox becomes **poisoned** when the guest is not run to completion, leaving it in
54/// an inconsistent state that could compromise memory safety, data integrity, or security.
55///
56/// ### When Does Poisoning Occur?
57///
58/// Poisoning happens when guest execution is interrupted before normal completion:
59///
60/// - **Guest panics or aborts** - When a guest function panics, crashes, or calls `abort()`,
61///   the normal cleanup and unwinding process is interrupted
62/// - **Invalid memory access** - Attempts to read/write/execute memory outside allowed regions
63/// - **Stack overflow** - Guest exhausts its stack space during execution
64/// - **Heap exhaustion** - Guest runs out of heap memory
65/// - **Host-initiated cancellation** - Calling [`InterruptHandle::kill()`] to forcefully
66///   terminate an in-progress guest function
67///
68/// ### Why This Is Unsafe
69///
70/// When guest execution doesn't complete normally, critical cleanup operations are skipped:
71///
72/// - **Memory leaks** - Heap allocations remain unreachable as the call stack is unwound
73/// - **Corrupted allocator state** - Memory allocator metadata (free lists, heap headers)
74///   left inconsistent
75/// - **Locked resources** - Mutexes or other synchronization primitives remain locked
76/// - **Partial state updates** - Data structures left half-modified (corrupted linked lists,
77///   inconsistent hash tables, etc.)
78///
79/// ### Recovery
80///
81/// Use [`restore()`](Self::restore) with a snapshot taken before poisoning occurred.
82/// This is the **only safe way** to recover - it completely replaces all memory state,
83/// eliminating any inconsistencies. See [`restore()`](Self::restore) for details.
84pub struct MultiUseSandbox {
85    /// Unique identifier for this sandbox instance
86    id: u64,
87    /// Whether this sandbox is poisoned
88    poisoned: bool,
89    pub(crate) host_funcs: Arc<Mutex<FunctionRegistry>>,
90    pub(crate) mem_mgr: SandboxMemoryManager<HostSharedMemory>,
91    vm: HyperlightVm,
92    #[cfg(gdb)]
93    dbg_mem_access_fn: Arc<Mutex<SandboxMemoryManager<HostSharedMemory>>>,
94    /// If the current state of the sandbox has been captured in a snapshot,
95    /// that snapshot is stored here.
96    snapshot: Option<Arc<Snapshot>>,
97    /// Optional callback to discover page table roots from guest memory.
98    /// Given (snapshot_mem, scratch_mem, cr3), returns a list of root GPAs.
99    /// If not set, only CR3 is used as the single root.
100    pt_root_finder: Option<PtRootFinder>,
101}
102
103/// Callback for discovering page table roots from guest memory.
104///
105/// Called during [`MultiUseSandbox::snapshot`] with:
106/// - `snapshot_mem` - the sandbox's snapshot (shared) memory as a byte slice
107/// - `scratch_mem` - the sandbox's scratch memory as a byte slice
108/// - `root_pt_gpa` - the root page table GPA of the currently-executing
109///   address space
110///
111/// Returns a list of root page table GPAs to walk. If the list is
112/// empty, only `root_pt_gpa` is used.
113pub type PtRootFinder = Box<dyn Fn(&[u8], &[u8], u64) -> Vec<u64> + Send>;
114
115impl MultiUseSandbox {
116    /// Move an `UninitializedSandbox` into a new `MultiUseSandbox` instance.
117    ///
118    /// This function is not equivalent to doing an `evolve` from uninitialized
119    /// to initialized, and is purposely not exposed publicly outside the crate
120    /// (as a `From` implementation would be)
121    #[instrument(skip_all, parent = Span::current(), level = "Trace")]
122    pub(super) fn from_uninit(
123        host_funcs: Arc<Mutex<FunctionRegistry>>,
124        mgr: SandboxMemoryManager<HostSharedMemory>,
125        vm: HyperlightVm,
126        #[cfg(gdb)] dbg_mem_access_fn: Arc<Mutex<SandboxMemoryManager<HostSharedMemory>>>,
127    ) -> MultiUseSandbox {
128        Self {
129            id: super::snapshot::SANDBOX_CONFIGURATION_COUNTER.fetch_add(1, Ordering::Relaxed),
130            poisoned: false,
131            host_funcs,
132            mem_mgr: mgr,
133            vm,
134            #[cfg(gdb)]
135            dbg_mem_access_fn,
136            snapshot: None,
137            pt_root_finder: None,
138        }
139    }
140
141    /// Set a callback that discovers page table roots from guest memory.
142    /// The callback receives (snapshot_mem, scratch_mem, cr3) and returns
143    /// the list of root GPAs to walk during snapshot creation.
144    pub fn set_pt_root_finder(&mut self, finder: PtRootFinder) {
145        self.pt_root_finder = Some(finder);
146    }
147
148    /// Creates a snapshot of the sandbox's current memory state.
149    ///
150    /// The snapshot is tied to this specific sandbox instance and can only be
151    /// restored to the same sandbox it was created from.
152    ///
153    /// ## Poisoned Sandbox
154    ///
155    /// This method will return [`crate::HyperlightError::PoisonedSandbox`] if the sandbox
156    /// is currently poisoned. Snapshots can only be taken from non-poisoned sandboxes.
157    ///
158    /// # Examples
159    ///
160    /// ```no_run
161    /// # use hyperlight_host::{MultiUseSandbox, UninitializedSandbox, GuestBinary};
162    /// # fn example() -> Result<(), Box<dyn std::error::Error>> {
163    /// let mut sandbox: MultiUseSandbox = UninitializedSandbox::new(
164    ///     GuestBinary::FilePath("guest.bin".into()),
165    ///     None
166    /// )?.evolve()?;
167    ///
168    /// // Modify sandbox state
169    /// sandbox.call_guest_function_by_name::<i32>("SetValue", 42)?;
170    ///
171    /// // Create snapshot belonging to this sandbox
172    /// let snapshot = sandbox.snapshot()?;
173    /// # Ok(())
174    /// # }
175    /// ```
176    #[instrument(err(Debug), skip_all, parent = Span::current())]
177    pub fn snapshot(&mut self) -> Result<Arc<Snapshot>> {
178        if self.poisoned {
179            return Err(crate::HyperlightError::PoisonedSandbox);
180        }
181
182        if let Some(snapshot) = &self.snapshot {
183            return Ok(snapshot.clone());
184        }
185        let mapped_regions_iter = self.vm.get_mapped_regions();
186        let mapped_regions_vec: Vec<MemoryRegion> = mapped_regions_iter.cloned().collect();
187        // Get CR3 from the vCPU
188        let cr3 = self
189            .vm
190            .get_root_pt()
191            .map_err(|e| HyperlightError::HyperlightVmError(e.into()))?;
192        // Use the callback if set, otherwise just CR3
193        let root_pt_gpas = if let Some(finder) = &self.pt_root_finder {
194            let roots = self.mem_mgr.shared_mem.with_contents(|snap| {
195                self.mem_mgr
196                    .scratch_mem
197                    .with_contents(|scratch| finder(snap, scratch, cr3))
198            })??;
199            if roots.is_empty() { vec![cr3] } else { roots }
200        } else {
201            vec![cr3]
202        };
203
204        let stack_top_gpa = self.vm.get_stack_top();
205        let sregs = self
206            .vm
207            .get_snapshot_sregs()
208            .map_err(|e| HyperlightError::HyperlightVmError(e.into()))?;
209        let entrypoint = self.vm.get_entrypoint();
210        let memory_snapshot = self.mem_mgr.snapshot(
211            self.id,
212            mapped_regions_vec,
213            &root_pt_gpas,
214            stack_top_gpa,
215            sregs,
216            entrypoint,
217        )?;
218        let snapshot = Arc::new(memory_snapshot);
219        self.snapshot = Some(snapshot.clone());
220        Ok(snapshot)
221    }
222
223    /// Restores the sandbox's memory to a previously captured snapshot state.
224    ///
225    /// The snapshot must have been created from this same sandbox instance.
226    /// Attempting to restore a snapshot from a different sandbox will return
227    /// a [`SnapshotSandboxMismatch`](crate::HyperlightError::SnapshotSandboxMismatch) error.
228    ///
229    /// ## Poison State Recovery
230    ///
231    /// This method automatically clears any poison state when successful. This is safe because:
232    /// - Snapshots can only be taken from non-poisoned sandboxes
233    /// - Restoration completely replaces all memory state, eliminating any inconsistencies
234    ///   caused by incomplete guest execution
235    ///
236    /// ### What Gets Fixed During Restore
237    ///
238    /// When a poisoned sandbox is restored, the memory state is completely reset:
239    /// - **Leaked heap memory** - All allocations from interrupted execution are discarded
240    /// - **Corrupted allocator metadata** - Free lists and heap headers restored to consistent state
241    /// - **Locked mutexes** - All lock state is reset
242    /// - **Partial updates** - Data structures restored to their pre-execution state
243    ///
244    /// # Examples
245    ///
246    /// ```no_run
247    /// # use hyperlight_host::{MultiUseSandbox, UninitializedSandbox, GuestBinary};
248    /// # fn example() -> Result<(), Box<dyn std::error::Error>> {
249    /// let mut sandbox: MultiUseSandbox = UninitializedSandbox::new(
250    ///     GuestBinary::FilePath("guest.bin".into()),
251    ///     None
252    /// )?.evolve()?;
253    ///
254    /// // Take initial snapshot from this sandbox
255    /// let snapshot = sandbox.snapshot()?;
256    ///
257    /// // Modify sandbox state
258    /// sandbox.call_guest_function_by_name::<i32>("SetValue", 100)?;
259    /// let value: i32 = sandbox.call_guest_function_by_name("GetValue", ())?;
260    /// assert_eq!(value, 100);
261    ///
262    /// // Restore to previous state (same sandbox)
263    /// sandbox.restore(snapshot)?;
264    /// let restored_value: i32 = sandbox.call_guest_function_by_name("GetValue", ())?;
265    /// assert_eq!(restored_value, 0); // Back to initial state
266    /// # Ok(())
267    /// # }
268    /// ```
269    ///
270    /// ## Recovering from Poison
271    ///
272    /// ```no_run
273    /// # use hyperlight_host::{MultiUseSandbox, UninitializedSandbox, GuestBinary, HyperlightError};
274    /// # fn example() -> Result<(), Box<dyn std::error::Error>> {
275    /// let mut sandbox: MultiUseSandbox = UninitializedSandbox::new(
276    ///     GuestBinary::FilePath("guest.bin".into()),
277    ///     None
278    /// )?.evolve()?;
279    ///
280    /// // Take snapshot before potentially poisoning operation
281    /// let snapshot = sandbox.snapshot()?;
282    ///
283    /// // This might poison the sandbox (guest not run to completion)
284    /// let result = sandbox.call::<()>("guest_panic", ());
285    /// if result.is_err() {
286    ///     if sandbox.poisoned() {
287    ///         // Restore from snapshot to clear poison
288    ///         sandbox.restore(snapshot.clone())?;
289    ///         assert!(!sandbox.poisoned());
290    ///         
291    ///         // Sandbox is now usable again
292    ///         sandbox.call::<String>("Echo", "hello".to_string())?;
293    ///     }
294    /// }
295    /// # Ok(())
296    /// # }
297    /// ```
298    #[instrument(err(Debug), skip_all, parent = Span::current())]
299    pub fn restore(&mut self, snapshot: Arc<Snapshot>) -> Result<()> {
300        // Currently, we do not try to optimise restore to the
301        // most-current snapshot. This is because the most-current
302        // snapshot, while it must have identical virtual memory
303        // layout to the current sandbox, does not necessarily have
304        // the exact same /physical/ memory contents. It is not
305        // entirely inconceivable that this could lead to breakage of
306        // cross-request isolation in some way, although it would
307        // require some /very/ odd code.  For example, suppose that a
308        // service uses Hyperlight to sandbox native code from
309        // clients, and promises cross-request isolation. A tenant
310        // provides a binary that can process two forms of request,
311        // either writing a secret into physical memory, or reading
312        // from arbitrary physical memory, assuming that the two kinds
313        // of requests can never (dangerously) meet in the same
314        // sandbox.
315        //
316        // It is presently unclear whether this is a sensible threat
317        // model, especially since Hyperlight is often used with
318        // managed-code runtimes which do not allow even arbitrary
319        // access to virtual memory, much less physical memory.
320        // However, out of an abundance of caution, the optimisation
321        // is presently disabled.
322
323        if self.id != snapshot.sandbox_id() {
324            return Err(SnapshotSandboxMismatch);
325        }
326
327        let (gsnapshot, gscratch) = self.mem_mgr.restore_snapshot(&snapshot)?;
328        if let Some(gsnapshot) = gsnapshot {
329            self.vm
330                .update_snapshot_mapping(gsnapshot)
331                .map_err(|e| HyperlightError::HyperlightVmError(e.into()))?;
332        }
333        if let Some(gscratch) = gscratch {
334            self.vm
335                .update_scratch_mapping(gscratch)
336                .map_err(|e| HyperlightError::HyperlightVmError(e.into()))?;
337        }
338
339        let sregs = snapshot.sregs().ok_or_else(|| {
340            HyperlightError::Error("snapshot from running sandbox should have sregs".to_string())
341        })?;
342        // TODO (ludfjig): Go through the rest of possible errors in this `MultiUseSandbox::restore` function
343        // and determine if they should also poison the sandbox.
344        self.vm
345            .reset_vcpu(snapshot.root_pt_gpa(), sregs)
346            .map_err(|e| {
347                self.poisoned = true;
348                HyperlightVmError::Restore(e)
349            })?;
350
351        self.vm.set_stack_top(snapshot.stack_top_gva());
352        self.vm.set_entrypoint(snapshot.entrypoint());
353
354        let current_regions: HashSet<_> = self.vm.get_mapped_regions().cloned().collect();
355        let snapshot_regions: HashSet<_> = snapshot.regions().iter().cloned().collect();
356
357        let regions_to_unmap = current_regions.difference(&snapshot_regions);
358        let regions_to_map = snapshot_regions.difference(&current_regions);
359
360        for region in regions_to_unmap {
361            self.vm
362                .unmap_region(region)
363                .map_err(HyperlightVmError::UnmapRegion)?;
364        }
365
366        for region in regions_to_map {
367            // Safety: The region has been mapped before, and at that point the caller promised that the memory region is valid
368            // in their call to `MultiUseSandbox::map_region`
369            unsafe { self.vm.map_region(region) }.map_err(HyperlightVmError::MapRegion)?;
370        }
371
372        // The restored snapshot is now our most current snapshot
373        self.snapshot = Some(snapshot.clone());
374
375        // Clear poison state when successfully restoring from snapshot.
376        //
377        // # Safety:
378        // This is safe because:
379        // 1. Snapshots can only be taken from non-poisoned sandboxes (verified at snapshot creation)
380        // 2. Restoration completely replaces all memory state, eliminating:
381        //    - All leaked heap allocations (memory is restored to snapshot state)
382        //    - All corrupted data structures (overwritten with consistent snapshot data)
383        //    - All inconsistent global state (reset to snapshot values)
384        self.poisoned = false;
385
386        Ok(())
387    }
388
389    /// Calls a guest function by name with the specified arguments.
390    ///
391    /// Changes made to the sandbox during execution are *not* persisted.
392    ///
393    /// ## Poisoned Sandbox
394    ///
395    /// This method will return [`crate::HyperlightError::PoisonedSandbox`] if the sandbox
396    /// is currently poisoned. Use [`restore()`](Self::restore) to recover from a poisoned state.
397    ///
398    /// # Examples
399    ///
400    /// ```no_run
401    /// # use hyperlight_host::{MultiUseSandbox, UninitializedSandbox, GuestBinary};
402    /// # fn example() -> Result<(), Box<dyn std::error::Error>> {
403    /// let mut sandbox: MultiUseSandbox = UninitializedSandbox::new(
404    ///     GuestBinary::FilePath("guest.bin".into()),
405    ///     None
406    /// )?.evolve()?;
407    ///
408    /// // Call function with no arguments
409    /// let result: i32 = sandbox.call_guest_function_by_name("GetCounter", ())?;
410    ///
411    /// // Call function with single argument
412    /// let doubled: i32 = sandbox.call_guest_function_by_name("Double", 21)?;
413    /// assert_eq!(doubled, 42);
414    ///
415    /// // Call function with multiple arguments
416    /// let sum: i32 = sandbox.call_guest_function_by_name("Add", (10, 32))?;
417    /// assert_eq!(sum, 42);
418    ///
419    /// // Call function returning string
420    /// let message: String = sandbox.call_guest_function_by_name("Echo", "Hello, World!".to_string())?;
421    /// assert_eq!(message, "Hello, World!");
422    /// # Ok(())
423    /// # }
424    /// ```
425    #[doc(hidden)]
426    #[deprecated(
427        since = "0.8.0",
428        note = "Deprecated in favour of call and snapshot/restore."
429    )]
430    #[instrument(err(Debug), skip(self, args), parent = Span::current())]
431    pub fn call_guest_function_by_name<Output: SupportedReturnType>(
432        &mut self,
433        func_name: &str,
434        args: impl ParameterTuple,
435    ) -> Result<Output> {
436        if self.poisoned {
437            return Err(crate::HyperlightError::PoisonedSandbox);
438        }
439        let snapshot = self.snapshot()?;
440        let res = self.call(func_name, args);
441        self.restore(snapshot)?;
442        res
443    }
444
445    /// Calls a guest function by name with the specified arguments.
446    ///
447    /// Changes made to the sandbox during execution are persisted.
448    ///
449    /// ## Poisoned Sandbox
450    ///
451    /// This method will return [`crate::HyperlightError::PoisonedSandbox`] if the sandbox
452    /// is already poisoned before the call. Use [`restore()`](Self::restore) to recover from
453    /// a poisoned state.
454    ///
455    /// ## Sandbox Poisoning
456    ///
457    /// If this method returns an error, the sandbox may be poisoned if the guest was not run
458    /// to completion (due to panic, abort, memory violation, stack/heap exhaustion, or forced
459    /// termination). Use [`poisoned()`](Self::poisoned) to check the poison state and
460    /// [`restore()`](Self::restore) to recover if needed.
461    ///
462    /// If this method returns `Ok`, the sandbox is guaranteed to **not** be poisoned - the guest
463    /// function completed successfully and the sandbox state is consistent.
464    ///
465    /// # Examples
466    ///
467    /// ```no_run
468    /// # use hyperlight_host::{MultiUseSandbox, UninitializedSandbox, GuestBinary};
469    /// # fn example() -> Result<(), Box<dyn std::error::Error>> {
470    /// let mut sandbox: MultiUseSandbox = UninitializedSandbox::new(
471    ///     GuestBinary::FilePath("guest.bin".into()),
472    ///     None
473    /// )?.evolve()?;
474    ///
475    /// // Call function with no arguments
476    /// let result: i32 = sandbox.call("GetCounter", ())?;
477    ///
478    /// // Call function with single argument
479    /// let doubled: i32 = sandbox.call("Double", 21)?;
480    /// assert_eq!(doubled, 42);
481    ///
482    /// // Call function with multiple arguments
483    /// let sum: i32 = sandbox.call("Add", (10, 32))?;
484    /// assert_eq!(sum, 42);
485    ///
486    /// // Call function returning string
487    /// let message: String = sandbox.call("Echo", "Hello, World!".to_string())?;
488    /// assert_eq!(message, "Hello, World!");
489    /// # Ok(())
490    /// # }
491    /// ```
492    ///
493    /// ## Handling Potential Poisoning
494    ///
495    /// ```no_run
496    /// # use hyperlight_host::{MultiUseSandbox, UninitializedSandbox, GuestBinary};
497    /// # fn example() -> Result<(), Box<dyn std::error::Error>> {
498    /// let mut sandbox: MultiUseSandbox = UninitializedSandbox::new(
499    ///     GuestBinary::FilePath("guest.bin".into()),
500    ///     None
501    /// )?.evolve()?;
502    ///
503    /// // Take snapshot before risky operation
504    /// let snapshot = sandbox.snapshot()?;
505    ///
506    /// // Call potentially unsafe guest function
507    /// let result = sandbox.call::<String>("RiskyOperation", "input".to_string());
508    ///
509    /// // Check if the call failed and poisoned the sandbox
510    /// if let Err(e) = result {
511    ///     eprintln!("Guest function failed: {}", e);
512    ///     
513    ///     if sandbox.poisoned() {
514    ///         eprintln!("Sandbox was poisoned, restoring from snapshot");
515    ///         sandbox.restore(snapshot.clone())?;
516    ///     }
517    /// }
518    /// # Ok(())
519    /// # }
520    /// ```
521    #[instrument(err(Debug), skip(self, args), parent = Span::current())]
522    pub fn call<Output: SupportedReturnType>(
523        &mut self,
524        func_name: &str,
525        args: impl ParameterTuple,
526    ) -> Result<Output> {
527        if self.poisoned {
528            return Err(crate::HyperlightError::PoisonedSandbox);
529        }
530        // Reset snapshot since we are mutating the sandbox state
531        self.snapshot = None;
532        maybe_time_and_emit_guest_call(func_name, || {
533            let ret = self.call_guest_function_by_name_no_reset(
534                func_name,
535                Output::TYPE,
536                args.into_value(),
537            );
538            // Use the ? operator to allow converting any hyperlight_common::func::Error
539            // returned by from_value into a HyperlightError
540            let ret = Output::from_value(ret?)?;
541            Ok(ret)
542        })
543    }
544
545    /// Maps a region of host memory into the sandbox address space.
546    ///
547    /// The base address and length must meet platform alignment requirements
548    /// (typically page-aligned). The `region_type` field is ignored as guest
549    /// page table entries are not created.
550    ///
551    /// ## Poisoned Sandbox
552    ///
553    /// This method will return [`crate::HyperlightError::PoisonedSandbox`] if the sandbox
554    /// is currently poisoned. Use [`restore()`](Self::restore) to recover from a poisoned state.
555    ///
556    /// # Safety
557    ///
558    /// The caller must ensure the host memory region remains valid and unmodified
559    /// for the lifetime of `self`.
560    #[instrument(err(Debug), skip(self, rgn), parent = Span::current())]
561    pub unsafe fn map_region(&mut self, rgn: &MemoryRegion) -> Result<()> {
562        if self.poisoned {
563            return Err(crate::HyperlightError::PoisonedSandbox);
564        }
565        if rgn.flags.contains(MemoryRegionFlags::WRITE) {
566            // TODO: Implement support for writable mappings, which
567            // need to be registered with the memory manager so that
568            // writes can be rolled back when necessary.
569            log_then_return!("TODO: Writable mappings not yet supported");
570        }
571        // Reset snapshot since we are mutating the sandbox state
572        self.snapshot = None;
573        unsafe { self.vm.map_region(rgn) }.map_err(HyperlightVmError::MapRegion)?;
574        self.mem_mgr.mapped_rgns += 1;
575        Ok(())
576    }
577
578    /// Map the contents of a file into the guest at a particular address
579    ///
580    /// An optional `label` identifies this mapping in the PEB's
581    /// `FileMappingInfo` array (max 63 bytes, defaults to the file name).
582    ///
583    /// Returns the length of the mapping in bytes.
584    ///
585    /// ## Poisoned Sandbox
586    ///
587    /// This method will return [`crate::HyperlightError::PoisonedSandbox`] if the sandbox
588    /// is currently poisoned. Use [`restore()`](Self::restore) to recover from a poisoned state.
589    #[instrument(err(Debug), skip(self, file_path, guest_base, label), parent = Span::current())]
590    pub fn map_file_cow(
591        &mut self,
592        file_path: &Path,
593        guest_base: u64,
594        label: Option<&str>,
595    ) -> Result<u64> {
596        if self.poisoned {
597            return Err(crate::HyperlightError::PoisonedSandbox);
598        }
599
600        // Pre-check the file mapping limit before doing any expensive
601        // OS or VM work. The PEB count is the source of truth.
602        #[cfg(feature = "nanvix-unstable")]
603        let current_count = self
604            .mem_mgr
605            .shared_mem
606            .read::<u64>(self.mem_mgr.layout.get_file_mappings_size_offset())?
607            as usize;
608        #[cfg(feature = "nanvix-unstable")]
609        if current_count >= hyperlight_common::mem::MAX_FILE_MAPPINGS {
610            return Err(crate::HyperlightError::Error(format!(
611                "map_file_cow: file mapping limit reached ({} of {})",
612                current_count,
613                hyperlight_common::mem::MAX_FILE_MAPPINGS,
614            )));
615        }
616
617        // Phase 1: host-side OS work (open file, create mapping)
618        let mut prepared = prepare_file_cow(file_path, guest_base, label)?;
619
620        // Validate that the full mapped range doesn't overlap the
621        // sandbox's primary shared memory region.
622        let shared_size = self.mem_mgr.shared_mem.mem_size() as u64;
623        let base_addr = crate::mem::layout::SandboxMemoryLayout::BASE_ADDRESS as u64;
624        let shared_end = base_addr.checked_add(shared_size).ok_or_else(|| {
625            crate::HyperlightError::Error("shared memory end overflow".to_string())
626        })?;
627        let mapping_end = guest_base
628            .checked_add(prepared.size as u64)
629            .ok_or_else(|| {
630                crate::HyperlightError::Error(format!(
631                    "map_file_cow: guest address overflow: {:#x} + {:#x}",
632                    guest_base, prepared.size
633                ))
634            })?;
635        if guest_base < shared_end && mapping_end > base_addr {
636            return Err(crate::HyperlightError::Error(format!(
637                "map_file_cow: mapping [{:#x}..{:#x}) overlaps sandbox shared memory [{:#x}..{:#x})",
638                guest_base, mapping_end, base_addr, shared_end,
639            )));
640        }
641
642        // Phase 2: VM-side work (map into guest address space)
643        let region = prepared.to_memory_region()?;
644
645        // Check for overlaps with existing file mappings in the VM.
646        for existing_region in self.vm.get_mapped_regions() {
647            let ex_start = existing_region.guest_region.start as u64;
648            let ex_end = existing_region.guest_region.end as u64;
649            if guest_base < ex_end && mapping_end > ex_start {
650                return Err(crate::HyperlightError::Error(format!(
651                    "map_file_cow: mapping [{:#x}..{:#x}) overlaps existing mapping [{:#x}..{:#x})",
652                    guest_base, mapping_end, ex_start, ex_end,
653                )));
654            }
655        }
656
657        // Reset snapshot since we are mutating the sandbox state
658        self.snapshot = None;
659
660        unsafe { self.vm.map_region(&region) }
661            .map_err(HyperlightVmError::MapRegion)
662            .map_err(crate::HyperlightError::HyperlightVmError)?;
663
664        let size = prepared.size as u64;
665
666        // Mark consumed immediately after map_region succeeds.
667        // On Windows, WhpVm::map_memory copies the file mapping handle
668        // into its own `file_mappings` vec for cleanup on drop. If we
669        // deferred mark_consumed(), both PreparedFileMapping::drop and
670        // WhpVm::drop would release the same handle — a double-close.
671        // On Linux the hypervisor holds a reference to the host mmap;
672        // freeing it here would leave a dangling backing.
673        prepared.mark_consumed();
674        self.mem_mgr.mapped_rgns += 1;
675
676        // Record the mapping metadata in the PEB. If this fails the VM
677        // still holds a valid mapping but the PEB won't list it — the
678        // limit was already pre-checked above so this should not fail
679        // in practice.
680        #[cfg(feature = "nanvix-unstable")]
681        self.mem_mgr
682            .write_file_mapping_entry(prepared.guest_base, size, &prepared.label)?;
683
684        Ok(size)
685    }
686
687    /// Calls a guest function with type-erased parameters and return values.
688    ///
689    /// This function is used for fuzz testing parameter and return type handling.
690    ///
691    /// ## Poisoned Sandbox
692    ///
693    /// This method will return [`crate::HyperlightError::PoisonedSandbox`] if the sandbox
694    /// is currently poisoned. Use [`restore()`](Self::restore) to recover from a poisoned state.
695    #[cfg(feature = "fuzzing")]
696    #[instrument(err(Debug), skip(self, args), parent = Span::current())]
697    pub fn call_type_erased_guest_function_by_name(
698        &mut self,
699        func_name: &str,
700        ret_type: ReturnType,
701        args: Vec<ParameterValue>,
702    ) -> Result<ReturnValue> {
703        if self.poisoned {
704            return Err(crate::HyperlightError::PoisonedSandbox);
705        }
706        // Reset snapshot since we are mutating the sandbox state
707        self.snapshot = None;
708        maybe_time_and_emit_guest_call(func_name, || {
709            self.call_guest_function_by_name_no_reset(func_name, ret_type, args)
710        })
711    }
712
713    fn call_guest_function_by_name_no_reset(
714        &mut self,
715        function_name: &str,
716        return_type: ReturnType,
717        args: Vec<ParameterValue>,
718    ) -> Result<ReturnValue> {
719        if self.poisoned {
720            return Err(crate::HyperlightError::PoisonedSandbox);
721        }
722        // ===== KILL() TIMING POINT 1 =====
723        // Clear any stale cancellation from a previous guest function call or if kill() was called too early.
724        // Any kill() that completed (even partially) BEFORE this line has NO effect on this call.
725        self.vm.clear_cancel();
726
727        let res = (|| {
728            let estimated_capacity = estimate_flatbuffer_capacity(function_name, &args);
729
730            let fc = FunctionCall::new(
731                function_name.to_string(),
732                Some(args),
733                FunctionCallType::Guest,
734                return_type,
735            );
736
737            let mut builder = FlatBufferBuilder::with_capacity(estimated_capacity);
738            let buffer = fc.encode(&mut builder);
739
740            self.mem_mgr.write_guest_function_call(buffer)?;
741
742            let dispatch_res = self.vm.dispatch_call_from_host(
743                &mut self.mem_mgr,
744                &self.host_funcs,
745                #[cfg(gdb)]
746                self.dbg_mem_access_fn.clone(),
747            );
748
749            // Convert dispatch errors to HyperlightErrors to maintain backwards compatibility
750            // but first determine if sandbox should be poisoned
751            if let Err(e) = dispatch_res {
752                let (error, should_poison) = e.promote();
753                self.poisoned |= should_poison;
754                return Err(error);
755            }
756
757            let guest_result = self.mem_mgr.get_guest_function_call_result()?.into_inner();
758
759            match guest_result {
760                Ok(val) => Ok(val),
761                Err(guest_error) => {
762                    metrics::counter!(
763                        METRIC_GUEST_ERROR,
764                        METRIC_GUEST_ERROR_LABEL_CODE => (guest_error.code as u64).to_string()
765                    )
766                    .increment(1);
767
768                    Err(HyperlightError::GuestError(
769                        guest_error.code,
770                        guest_error.message,
771                    ))
772                }
773            }
774        })();
775
776        // Clear partial abort bytes so they don't leak across calls.
777        self.mem_mgr.abort_buffer.clear();
778
779        // In the happy path we do not need to clear io-buffers from the host because:
780        // - the serialized guest function call is zeroed out by the guest during deserialization, see call to `try_pop_shared_input_data_into::<FunctionCall>()`
781        // - the serialized guest function result is zeroed out by us (the host) during deserialization, see `get_guest_function_call_result`
782        // - any serialized host function call are zeroed out by us (the host) during deserialization, see `get_host_function_call`
783        // - any serialized host function result is zeroed out by the guest during deserialization, see `get_host_return_value`
784        if let Err(e) = &res {
785            self.mem_mgr.clear_io_buffers();
786
787            // Determine if we should poison the sandbox.
788            self.poisoned |= e.is_poison_error();
789        }
790
791        // Note: clear_call_active() is automatically called when _guard is dropped here
792
793        res
794    }
795
796    /// Returns a handle for interrupting guest execution.
797    ///
798    /// # Examples
799    ///
800    /// ```no_run
801    /// # use hyperlight_host::{MultiUseSandbox, UninitializedSandbox, GuestBinary};
802    /// # use std::thread;
803    /// # use std::time::Duration;
804    /// # fn example() -> Result<(), Box<dyn std::error::Error>> {
805    /// let mut sandbox: MultiUseSandbox = UninitializedSandbox::new(
806    ///     GuestBinary::FilePath("guest.bin".into()),
807    ///     None
808    /// )?.evolve()?;
809    ///
810    /// // Get interrupt handle before starting long-running operation
811    /// let interrupt_handle = sandbox.interrupt_handle();
812    ///
813    /// // Spawn thread to interrupt after timeout
814    /// let handle_clone = interrupt_handle.clone();
815    /// thread::spawn(move || {
816    ///     thread::sleep(Duration::from_secs(5));
817    ///     handle_clone.kill();
818    /// });
819    ///
820    /// // This call may be interrupted by the spawned thread
821    /// let result = sandbox.call_guest_function_by_name::<i32>("LongRunningFunction", ());
822    /// # Ok(())
823    /// # }
824    /// ```
825    pub fn interrupt_handle(&self) -> Arc<dyn InterruptHandle> {
826        self.vm.interrupt_handle()
827    }
828
829    /// Generate a crash dump of the current state of the VM underlying this sandbox.
830    ///
831    /// Creates an ELF core dump file that can be used for debugging. The dump
832    /// captures the current state of the sandbox including registers, memory regions,
833    /// and other execution context.
834    ///
835    /// The location of the core dump file is determined by the `HYPERLIGHT_CORE_DUMP_DIR`
836    /// environment variable. If not set, it defaults to the system's temporary directory.
837    ///
838    /// This is only available when the `crashdump` feature is enabled and then only if the sandbox
839    /// is also configured to allow core dumps (which is the default behavior).
840    ///
841    /// This can be useful for generating a crash dump from gdb when trying to debug issues in the
842    /// guest that dont cause crashes (e.g. a guest function that does not return)
843    ///
844    /// # Examples
845    ///
846    /// Attach to your running process with gdb and call this function:
847    ///
848    /// ```shell
849    /// sudo gdb -p <pid_of_your_process>
850    /// (gdb) info threads
851    /// # find the thread that is running the guest function you want to debug
852    /// (gdb) thread <thread_number>
853    /// # switch to the frame where you have access to your MultiUseSandbox instance
854    /// (gdb) backtrace
855    /// (gdb) frame <frame_number>
856    /// # get the pointer to your MultiUseSandbox instance
857    /// # Get the sandbox pointer
858    /// (gdb) print sandbox
859    /// # Call the crashdump function
860    /// call sandbox.generate_crashdump()
861    /// ```
862    /// The crashdump should be available in crash dump directory (see `HYPERLIGHT_CORE_DUMP_DIR` env var).
863    ///
864    #[cfg(crashdump)]
865    #[instrument(err(Debug), skip_all, parent = Span::current())]
866    pub fn generate_crashdump(&mut self) -> Result<()> {
867        crate::hypervisor::crashdump::generate_crashdump(&self.vm, &mut self.mem_mgr, None)
868    }
869
870    /// Generate a crash dump of the current state of the VM, writing to `dir`.
871    ///
872    /// Like [`generate_crashdump`](Self::generate_crashdump), but the core dump
873    /// file is placed in `dir` instead of consulting the `HYPERLIGHT_CORE_DUMP_DIR`
874    /// environment variable.  This avoids the need for callers to use
875    /// `unsafe { std::env::set_var(...) }`.
876    #[cfg(crashdump)]
877    #[instrument(err(Debug), skip_all, parent = Span::current())]
878    pub fn generate_crashdump_to_dir(&mut self, dir: impl Into<String>) -> Result<()> {
879        crate::hypervisor::crashdump::generate_crashdump(
880            &self.vm,
881            &mut self.mem_mgr,
882            Some(dir.into()),
883        )
884    }
885
886    /// Returns whether the sandbox is currently poisoned.
887    ///
888    /// A poisoned sandbox is in an inconsistent state due to the guest not running to completion.
889    /// All operations will be rejected until the sandbox is restored from a non-poisoned snapshot.
890    ///
891    /// ## Causes of Poisoning
892    ///
893    /// The sandbox becomes poisoned when guest execution is interrupted:
894    /// - **Panics/Aborts** - Guest code panics or calls `abort()`
895    /// - **Invalid Memory Access** - Read/write/execute violations  
896    /// - **Stack Overflow** - Guest exhausts stack space
897    /// - **Heap Exhaustion** - Guest runs out of heap memory
898    /// - **Forced Termination** - [`InterruptHandle::kill()`] called during execution
899    ///
900    /// ## Recovery
901    ///
902    /// To clear the poison state, use [`restore()`](Self::restore) with a snapshot
903    /// that was taken before the sandbox became poisoned.
904    ///
905    /// # Examples
906    ///
907    /// ```no_run
908    /// # use hyperlight_host::{MultiUseSandbox, UninitializedSandbox, GuestBinary};
909    /// # fn example() -> Result<(), Box<dyn std::error::Error>> {
910    /// let mut sandbox: MultiUseSandbox = UninitializedSandbox::new(
911    ///     GuestBinary::FilePath("guest.bin".into()),
912    ///     None
913    /// )?.evolve()?;
914    ///
915    /// // Check if sandbox is poisoned
916    /// if sandbox.poisoned() {
917    ///     println!("Sandbox is poisoned and needs attention");
918    /// }
919    /// # Ok(())
920    /// # }
921    /// ```
922    pub fn poisoned(&self) -> bool {
923        self.poisoned
924    }
925}
926
927impl Callable for MultiUseSandbox {
928    fn call<Output: SupportedReturnType>(
929        &mut self,
930        func_name: &str,
931        args: impl ParameterTuple,
932    ) -> Result<Output> {
933        if self.poisoned {
934            return Err(crate::HyperlightError::PoisonedSandbox);
935        }
936        self.call(func_name, args)
937    }
938}
939
940impl std::fmt::Debug for MultiUseSandbox {
941    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
942        f.debug_struct("MultiUseSandbox").finish()
943    }
944}
945
946#[cfg(test)]
947mod tests {
948    use std::sync::{Arc, Barrier};
949    use std::thread;
950
951    use hyperlight_common::flatbuffer_wrappers::guest_error::ErrorCode;
952    use hyperlight_testing::sandbox_sizes::{LARGE_HEAP_SIZE, MEDIUM_HEAP_SIZE, SMALL_HEAP_SIZE};
953    use hyperlight_testing::simple_guest_as_string;
954
955    use crate::mem::memory_region::{MemoryRegion, MemoryRegionFlags, MemoryRegionType};
956    use crate::mem::shared_mem::{ExclusiveSharedMemory, GuestSharedMemory, SharedMemory as _};
957    use crate::sandbox::SandboxConfiguration;
958    use crate::{GuestBinary, HyperlightError, MultiUseSandbox, Result, UninitializedSandbox};
959
960    #[test]
961    fn poison() {
962        let mut sbox: MultiUseSandbox = {
963            let path = simple_guest_as_string().unwrap();
964            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
965            u_sbox.evolve()
966        }
967        .unwrap();
968        let snapshot = sbox.snapshot().unwrap();
969
970        // poison on purpose
971        let res = sbox
972            .call::<()>("guest_panic", "hello".to_string())
973            .unwrap_err();
974        assert!(
975            matches!(res, HyperlightError::GuestAborted(code, context) if code == ErrorCode::UnknownError as u8 && context.contains("hello"))
976        );
977        assert!(sbox.poisoned());
978
979        // guest calls should fail when poisoned
980        let res = sbox
981            .call::<()>("guest_panic", "hello2".to_string())
982            .unwrap_err();
983        assert!(matches!(res, HyperlightError::PoisonedSandbox));
984
985        // snapshot should fail when poisoned
986        if let Err(e) = sbox.snapshot() {
987            assert!(sbox.poisoned());
988            assert!(matches!(e, HyperlightError::PoisonedSandbox));
989        } else {
990            panic!("Snapshot should fail");
991        }
992
993        // map_region should fail when poisoned
994        {
995            let map_mem = allocate_guest_memory();
996            let guest_base = 0x0;
997            let region = region_for_memory(&map_mem, guest_base, MemoryRegionFlags::READ);
998            let res = unsafe { sbox.map_region(&region) }.unwrap_err();
999            assert!(matches!(res, HyperlightError::PoisonedSandbox));
1000        }
1001
1002        // map_file_cow should fail when poisoned
1003        {
1004            let temp_file = std::env::temp_dir().join("test_poison_map_file.bin");
1005            let res = sbox.map_file_cow(&temp_file, 0x0, None).unwrap_err();
1006            assert!(matches!(res, HyperlightError::PoisonedSandbox));
1007            std::fs::remove_file(&temp_file).ok(); // Clean up
1008        }
1009
1010        // call_guest_function_by_name (deprecated) should fail when poisoned
1011        #[allow(deprecated)]
1012        let res = sbox
1013            .call_guest_function_by_name::<String>("Echo", "test".to_string())
1014            .unwrap_err();
1015        assert!(matches!(res, HyperlightError::PoisonedSandbox));
1016
1017        // restore to non-poisoned snapshot should work and clear poison
1018        sbox.restore(snapshot.clone()).unwrap();
1019        assert!(!sbox.poisoned());
1020
1021        // guest calls should work again after restore
1022        let res = sbox.call::<String>("Echo", "hello2".to_string()).unwrap();
1023        assert_eq!(res, "hello2".to_string());
1024        assert!(!sbox.poisoned());
1025
1026        // re-poison on purpose
1027        let res = sbox
1028            .call::<()>("guest_panic", "hello".to_string())
1029            .unwrap_err();
1030        assert!(
1031            matches!(res, HyperlightError::GuestAborted(code, context) if code == ErrorCode::UnknownError as u8 && context.contains("hello"))
1032        );
1033        assert!(sbox.poisoned());
1034
1035        // restore to non-poisoned snapshot should work again
1036        sbox.restore(snapshot.clone()).unwrap();
1037        assert!(!sbox.poisoned());
1038
1039        // guest calls should work again
1040        let res = sbox.call::<String>("Echo", "hello3".to_string()).unwrap();
1041        assert_eq!(res, "hello3".to_string());
1042        assert!(!sbox.poisoned());
1043
1044        // snapshot should work again
1045        let _ = sbox.snapshot().unwrap();
1046    }
1047
1048    /// Make sure input/output buffers are properly reset after guest call (with host call)
1049    #[test]
1050    fn host_func_error() {
1051        let path = simple_guest_as_string().unwrap();
1052        let mut sandbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1053        sandbox
1054            .register("HostError", || -> Result<()> {
1055                Err(HyperlightError::Error("hi".to_string()))
1056            })
1057            .unwrap();
1058        let mut sandbox = sandbox.evolve().unwrap();
1059
1060        // will exhaust io if leaky
1061        for _ in 0..1000 {
1062            let result = sandbox
1063                .call::<i64>(
1064                    "CallGivenParamlessHostFuncThatReturnsI64",
1065                    "HostError".to_string(),
1066                )
1067                .unwrap_err();
1068
1069            assert!(
1070                matches!(result, HyperlightError::GuestError(code, msg) if code == ErrorCode::HostFunctionError && msg == "hi"),
1071            );
1072        }
1073    }
1074
1075    #[test]
1076    fn call_host_func_expect_error() {
1077        let path = simple_guest_as_string().unwrap();
1078        let sandbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1079        let mut sandbox = sandbox.evolve().unwrap();
1080        sandbox
1081            .call::<()>("CallHostExpectError", "SomeUnknownHostFunc".to_string())
1082            .unwrap();
1083    }
1084
1085    /// Make sure input/output buffers are properly reset after guest call (with host call)
1086    #[test]
1087    fn io_buffer_reset() {
1088        let mut cfg = SandboxConfiguration::default();
1089        cfg.set_input_data_size(4096);
1090        cfg.set_output_data_size(4096);
1091        let path = simple_guest_as_string().unwrap();
1092        let mut sandbox =
1093            UninitializedSandbox::new(GuestBinary::FilePath(path), Some(cfg)).unwrap();
1094        sandbox.register("HostAdd", |a: i32, b: i32| a + b).unwrap();
1095        let mut sandbox = sandbox.evolve().unwrap();
1096
1097        // will exhaust io if leaky. Tests both success and error paths
1098        for _ in 0..1000 {
1099            let result = sandbox.call::<i32>("Add", (5i32, 10i32)).unwrap();
1100            assert_eq!(result, 15);
1101            let result = sandbox.call::<i32>("AddToStaticAndFail", ()).unwrap_err();
1102            assert!(
1103                matches!(result, HyperlightError::GuestError (code, msg ) if code == ErrorCode::GuestError && msg == "Crash on purpose")
1104            );
1105        }
1106    }
1107
1108    /// Tests that call_guest_function_by_name restores the state correctly
1109    #[test]
1110    fn test_call_guest_function_by_name() {
1111        let mut sbox: MultiUseSandbox = {
1112            let path = simple_guest_as_string().unwrap();
1113            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1114            u_sbox.evolve()
1115        }
1116        .unwrap();
1117
1118        let snapshot = sbox.snapshot().unwrap();
1119
1120        let _ = sbox.call::<i32>("AddToStatic", 5i32).unwrap();
1121        let res: i32 = sbox.call("GetStatic", ()).unwrap();
1122        assert_eq!(res, 5);
1123
1124        sbox.restore(snapshot).unwrap();
1125        #[allow(deprecated)]
1126        let _ = sbox
1127            .call_guest_function_by_name::<i32>("AddToStatic", 5i32)
1128            .unwrap();
1129        #[allow(deprecated)]
1130        let res: i32 = sbox.call_guest_function_by_name("GetStatic", ()).unwrap();
1131        assert_eq!(res, 0);
1132    }
1133
1134    // Tests to ensure that many (1000) function calls can be made in a call context with a small stack (24K) and heap(20K).
1135    // This test effectively ensures that the stack is being properly reset after each call and we are not leaking memory in the Guest.
1136    #[test]
1137    fn test_with_small_stack_and_heap() {
1138        let mut cfg = SandboxConfiguration::default();
1139        cfg.set_heap_size(20 * 1024);
1140        // min_scratch_size already includes 1 page (4k on most
1141        // platforms) of guest stack, so add 20k more to get 24k
1142        // total, and then add some more for the eagerly-copied page
1143        // tables on amd64
1144        let min_scratch = hyperlight_common::layout::min_scratch_size(
1145            cfg.get_input_data_size(),
1146            cfg.get_output_data_size(),
1147        );
1148        cfg.set_scratch_size(min_scratch + 0x10000 + 0x10000);
1149
1150        let mut sbox1: MultiUseSandbox = {
1151            let path = simple_guest_as_string().unwrap();
1152            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), Some(cfg)).unwrap();
1153            u_sbox.evolve()
1154        }
1155        .unwrap();
1156
1157        for _ in 0..1000 {
1158            sbox1.call::<String>("Echo", "hello".to_string()).unwrap();
1159        }
1160
1161        let mut sbox2: MultiUseSandbox = {
1162            let path = simple_guest_as_string().unwrap();
1163            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), Some(cfg)).unwrap();
1164            u_sbox.evolve()
1165        }
1166        .unwrap();
1167
1168        for i in 0..1000 {
1169            sbox2
1170                .call::<i32>(
1171                    "PrintUsingPrintf",
1172                    format!("Hello World {}\n", i).to_string(),
1173                )
1174                .unwrap();
1175        }
1176    }
1177
1178    /// Tests that evolving from MultiUseSandbox to MultiUseSandbox creates a new state
1179    /// and restoring a snapshot from before evolving restores the previous state
1180    #[test]
1181    fn snapshot_evolve_restore_handles_state_correctly() {
1182        let mut sbox: MultiUseSandbox = {
1183            let path = simple_guest_as_string().unwrap();
1184            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1185            u_sbox.evolve()
1186        }
1187        .unwrap();
1188
1189        let snapshot = sbox.snapshot().unwrap();
1190
1191        let _ = sbox.call::<i32>("AddToStatic", 5i32).unwrap();
1192
1193        let res: i32 = sbox.call("GetStatic", ()).unwrap();
1194        assert_eq!(res, 5);
1195
1196        sbox.restore(snapshot).unwrap();
1197        let res: i32 = sbox.call("GetStatic", ()).unwrap();
1198        assert_eq!(res, 0);
1199    }
1200
1201    #[test]
1202    fn test_trigger_exception_on_guest() {
1203        let usbox = UninitializedSandbox::new(
1204            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1205            None,
1206        )
1207        .unwrap();
1208
1209        let mut multi_use_sandbox: MultiUseSandbox = usbox.evolve().unwrap();
1210
1211        let res: Result<()> = multi_use_sandbox.call("TriggerException", ());
1212
1213        assert!(res.is_err());
1214
1215        match res.unwrap_err() {
1216            HyperlightError::GuestAborted(_, msg) => {
1217                // msg should indicate we got an invalid opcode exception
1218                assert!(msg.contains("InvalidOpcode"));
1219            }
1220            e => panic!(
1221                "Expected HyperlightError::GuestExecutionError but got {:?}",
1222                e
1223            ),
1224        }
1225    }
1226
1227    #[test]
1228    fn create_200_sandboxes() {
1229        const NUM_THREADS: usize = 10;
1230        const SANDBOXES_PER_THREAD: usize = 20;
1231
1232        // barrier to make sure all threads start their work simultaneously
1233        let start_barrier = Arc::new(Barrier::new(NUM_THREADS + 1));
1234        let mut thread_handles = vec![];
1235
1236        for _ in 0..NUM_THREADS {
1237            let barrier = start_barrier.clone();
1238
1239            let handle = thread::spawn(move || {
1240                barrier.wait();
1241
1242                for _ in 0..SANDBOXES_PER_THREAD {
1243                    let guest_path = simple_guest_as_string().expect("Guest Binary Missing");
1244                    let uninit =
1245                        UninitializedSandbox::new(GuestBinary::FilePath(guest_path), None).unwrap();
1246
1247                    let mut sandbox: MultiUseSandbox = uninit.evolve().unwrap();
1248
1249                    let result: i32 = sandbox.call("GetStatic", ()).unwrap();
1250                    assert_eq!(result, 0);
1251                }
1252            });
1253
1254            thread_handles.push(handle);
1255        }
1256
1257        start_barrier.wait();
1258
1259        for handle in thread_handles {
1260            handle.join().unwrap();
1261        }
1262    }
1263
1264    #[test]
1265    fn test_mmap() {
1266        let mut sbox = UninitializedSandbox::new(
1267            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1268            None,
1269        )
1270        .unwrap()
1271        .evolve()
1272        .unwrap();
1273
1274        let expected = b"hello world";
1275        let map_mem = page_aligned_memory(expected);
1276        let guest_base = 0x1_0000_0000; // Arbitrary guest base address
1277
1278        unsafe {
1279            sbox.map_region(&region_for_memory(
1280                &map_mem,
1281                guest_base,
1282                MemoryRegionFlags::READ,
1283            ))
1284            .unwrap();
1285        }
1286
1287        let _guard = map_mem.lock.try_read().unwrap();
1288        let actual: Vec<u8> = sbox
1289            .call(
1290                "ReadMappedBuffer",
1291                (guest_base as u64, expected.len() as u64, true),
1292            )
1293            .unwrap();
1294
1295        assert_eq!(actual, expected);
1296    }
1297
1298    // Makes sure MemoryRegionFlags::READ | MemoryRegionFlags::EXECUTE executable but not writable
1299    #[test]
1300    fn test_mmap_write_exec() {
1301        let mut sbox = UninitializedSandbox::new(
1302            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1303            None,
1304        )
1305        .unwrap()
1306        .evolve()
1307        .unwrap();
1308
1309        let expected = &[0x90, 0x90, 0x90, 0xC3]; // NOOP slide to RET
1310        let map_mem = page_aligned_memory(expected);
1311        let guest_base = 0x1_0000_0000; // Arbitrary guest base address
1312
1313        unsafe {
1314            sbox.map_region(&region_for_memory(
1315                &map_mem,
1316                guest_base,
1317                MemoryRegionFlags::READ | MemoryRegionFlags::EXECUTE,
1318            ))
1319            .unwrap();
1320        }
1321
1322        let _guard = map_mem.lock.try_read().unwrap();
1323
1324        // Execute should pass since memory is executable
1325        let succeed = sbox
1326            .call::<bool>(
1327                "ExecMappedBuffer",
1328                (guest_base as u64, expected.len() as u64),
1329            )
1330            .unwrap();
1331        assert!(succeed, "Expected execution of mapped buffer to succeed");
1332
1333        // write should fail because the memory is mapped as read-only
1334        let err = sbox
1335            .call::<bool>(
1336                "WriteMappedBuffer",
1337                (guest_base as u64, expected.len() as u64),
1338            )
1339            .unwrap_err();
1340
1341        match err {
1342            HyperlightError::MemoryAccessViolation(addr, ..) if addr == guest_base as u64 => {}
1343            _ => panic!("Expected MemoryAccessViolation error"),
1344        };
1345    }
1346
1347    fn page_aligned_memory(src: &[u8]) -> GuestSharedMemory {
1348        use hyperlight_common::mem::PAGE_SIZE_USIZE;
1349
1350        let len = src.len().div_ceil(PAGE_SIZE_USIZE) * PAGE_SIZE_USIZE;
1351
1352        let mut mem = ExclusiveSharedMemory::new(len).unwrap();
1353        mem.copy_from_slice(src, 0).unwrap();
1354
1355        let (_, guest_mem) = mem.build();
1356
1357        guest_mem
1358    }
1359
1360    fn region_for_memory(
1361        mem: &GuestSharedMemory,
1362        guest_base: usize,
1363        flags: MemoryRegionFlags,
1364    ) -> MemoryRegion {
1365        let len = mem.mem_size();
1366        MemoryRegion {
1367            host_region: mem.host_region_base()..mem.host_region_end(),
1368            guest_region: guest_base..(guest_base + len),
1369            flags,
1370            region_type: MemoryRegionType::Heap,
1371        }
1372    }
1373
1374    fn allocate_guest_memory() -> GuestSharedMemory {
1375        page_aligned_memory(b"test data for snapshot")
1376    }
1377
1378    #[test]
1379    fn snapshot_restore_handles_remapping_correctly() {
1380        let mut sbox: MultiUseSandbox = {
1381            let path = simple_guest_as_string().unwrap();
1382            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1383            u_sbox.evolve().unwrap()
1384        };
1385
1386        // 1. Take snapshot 1 with no additional regions mapped
1387        let snapshot1 = sbox.snapshot().unwrap();
1388        assert_eq!(sbox.vm.get_mapped_regions().count(), 0);
1389
1390        // 2. Map a memory region
1391        let map_mem = allocate_guest_memory();
1392        let guest_base = 0x200000000_usize;
1393        let region = region_for_memory(&map_mem, guest_base, MemoryRegionFlags::READ);
1394
1395        unsafe { sbox.map_region(&region).unwrap() };
1396        assert_eq!(sbox.vm.get_mapped_regions().count(), 1);
1397        let orig_read = sbox
1398            .call::<Vec<u8>>(
1399                "ReadMappedBuffer",
1400                (
1401                    guest_base as u64,
1402                    hyperlight_common::vmem::PAGE_SIZE as u64,
1403                    true,
1404                ),
1405            )
1406            .unwrap();
1407
1408        // 3. Take snapshot 2 with 1 region mapped
1409        let snapshot2 = sbox.snapshot().unwrap();
1410        assert_eq!(sbox.vm.get_mapped_regions().count(), 1);
1411
1412        // 4. Re(store to snapshot 1 (should unmap the region)
1413        sbox.restore(snapshot1.clone()).unwrap();
1414        assert_eq!(sbox.vm.get_mapped_regions().count(), 0);
1415        let is_mapped = sbox
1416            .call::<bool>("CheckMapped", (guest_base as u64,))
1417            .unwrap();
1418        assert!(!is_mapped);
1419
1420        // 5. Restore forward to snapshot 2 (should have folded the
1421        //    region into the snapshot)
1422        sbox.restore(snapshot2.clone()).unwrap();
1423        assert_eq!(sbox.vm.get_mapped_regions().count(), 0);
1424        let is_mapped = sbox
1425            .call::<bool>("CheckMapped", (guest_base as u64,))
1426            .unwrap();
1427        assert!(is_mapped);
1428
1429        // Verify the region is the same
1430        let new_read = sbox
1431            .call::<Vec<u8>>(
1432                "ReadMappedBuffer",
1433                (
1434                    guest_base as u64,
1435                    hyperlight_common::vmem::PAGE_SIZE as u64,
1436                    false,
1437                ),
1438            )
1439            .unwrap();
1440        assert_eq!(new_read, orig_read);
1441    }
1442
1443    #[test]
1444    fn snapshot_different_sandbox() {
1445        let mut sandbox = {
1446            let path = simple_guest_as_string().unwrap();
1447            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1448            u_sbox.evolve().unwrap()
1449        };
1450
1451        let mut sandbox2 = {
1452            let path = simple_guest_as_string().unwrap();
1453            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1454            u_sbox.evolve().unwrap()
1455        };
1456        assert_ne!(sandbox.id, sandbox2.id);
1457
1458        let snapshot = sandbox.snapshot().unwrap();
1459        let err = sandbox2.restore(snapshot.clone());
1460        assert!(matches!(err, Err(HyperlightError::SnapshotSandboxMismatch)));
1461
1462        let sandbox_id = sandbox.id;
1463        drop(sandbox);
1464        drop(sandbox2);
1465        drop(snapshot);
1466
1467        let sandbox3 = {
1468            let path = simple_guest_as_string().unwrap();
1469            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1470            u_sbox.evolve().unwrap()
1471        };
1472        assert_ne!(sandbox3.id, sandbox_id);
1473    }
1474
1475    /// Test that snapshot restore properly resets vCPU debug registers. This test verifies
1476    /// that restore() calls reset_vcpu().
1477    #[test]
1478    fn snapshot_restore_resets_debug_registers() {
1479        let mut sandbox: MultiUseSandbox = {
1480            let path = simple_guest_as_string().unwrap();
1481            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1482            u_sbox.evolve().unwrap()
1483        };
1484
1485        let snapshot = sandbox.snapshot().unwrap();
1486
1487        // Verify DR0 is initially 0 (clean state)
1488        let dr0_initial: u64 = sandbox.call("GetDr0", ()).unwrap();
1489        assert_eq!(dr0_initial, 0, "DR0 should initially be 0");
1490
1491        // Dirty DR0 by setting it to a known non-zero value
1492        const DIRTY_VALUE: u64 = 0xDEAD_BEEF_CAFE_BABE;
1493        sandbox.call::<()>("SetDr0", DIRTY_VALUE).unwrap();
1494        let dr0_dirty: u64 = sandbox.call("GetDr0", ()).unwrap();
1495        assert_eq!(
1496            dr0_dirty, DIRTY_VALUE,
1497            "DR0 should be dirty after SetDr0 call"
1498        );
1499
1500        // Restore to the snapshot - this should reset vCPU state including debug registers
1501        sandbox.restore(snapshot).unwrap();
1502
1503        let dr0_after_restore: u64 = sandbox.call("GetDr0", ()).unwrap();
1504        assert_eq!(
1505            dr0_after_restore, 0,
1506            "DR0 should be 0 after restore (reset_vcpu should have been called)"
1507        );
1508    }
1509
1510    /// Test that stale abort buffer bytes from a previous call don't
1511    /// leak into the next call.
1512    #[test]
1513    fn stale_abort_buffer_does_not_leak_across_calls() {
1514        let mut sbox: MultiUseSandbox = {
1515            let path = simple_guest_as_string().unwrap();
1516            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1517            u_sbox.evolve().unwrap()
1518        };
1519
1520        // Simulate a partial abort
1521        sbox.mem_mgr.abort_buffer.extend_from_slice(&[0xAA; 1020]);
1522
1523        let res = sbox.call::<String>("Echo", "hello".to_string());
1524        assert!(
1525            res.is_ok(),
1526            "Expected Ok after stale abort buffer, got: {:?}",
1527            res.unwrap_err()
1528        );
1529
1530        // The buffer should be empty after the call.
1531        assert!(
1532            sbox.mem_mgr.abort_buffer.is_empty(),
1533            "abort_buffer should be empty after a guest call"
1534        );
1535    }
1536
1537    /// Test that sandboxes can be created and evolved with different heap sizes
1538    #[test]
1539    fn test_sandbox_creation_various_sizes() {
1540        let test_cases: [(&str, u64); 3] = [
1541            ("small (8MB heap)", SMALL_HEAP_SIZE),
1542            ("medium (64MB heap)", MEDIUM_HEAP_SIZE),
1543            ("large (256MB heap)", LARGE_HEAP_SIZE),
1544        ];
1545
1546        for (name, heap_size) in test_cases {
1547            let mut cfg = SandboxConfiguration::default();
1548            cfg.set_heap_size(heap_size);
1549            cfg.set_scratch_size(0x100000);
1550
1551            let path = simple_guest_as_string().unwrap();
1552            let sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), Some(cfg))
1553                .unwrap_or_else(|e| panic!("Failed to create {} sandbox: {}", name, e))
1554                .evolve()
1555                .unwrap_or_else(|e| panic!("Failed to evolve {} sandbox: {}", name, e));
1556
1557            drop(sbox);
1558        }
1559    }
1560
1561    /// Helper: create a MultiUseSandbox from the simple guest with default config.
1562    #[cfg(feature = "trace_guest")]
1563    fn sandbox_for_gva_tests() -> MultiUseSandbox {
1564        let path = simple_guest_as_string().unwrap();
1565        UninitializedSandbox::new(GuestBinary::FilePath(path), None)
1566            .unwrap()
1567            .evolve()
1568            .unwrap()
1569    }
1570
1571    /// Helper: read memory at `gva` of length `len` from the guest side via
1572    /// `ReadMappedBuffer(gva, len, false)` and from the host side via
1573    /// `read_guest_memory_by_gva`, then assert both views are identical.
1574    #[cfg(feature = "trace_guest")]
1575    fn assert_gva_read_matches(sbox: &mut MultiUseSandbox, gva: u64, len: usize) {
1576        // Guest reads via its own page tables
1577        let expected: Vec<u8> = sbox
1578            .call("ReadMappedBuffer", (gva, len as u64, true))
1579            .unwrap();
1580        assert_eq!(expected.len(), len);
1581
1582        // Host reads by walking the same page tables
1583        let root_pt = sbox.vm.get_root_pt().unwrap();
1584        let actual = sbox
1585            .mem_mgr
1586            .read_guest_memory_by_gva(gva, len, root_pt)
1587            .unwrap();
1588
1589        assert_eq!(
1590            actual, expected,
1591            "read_guest_memory_by_gva at GVA {:#x} (len {}) differs from guest ReadMappedBuffer",
1592            gva, len,
1593        );
1594    }
1595
1596    /// Test reading a small buffer (< 1 page) from guest memory via GVA.
1597    /// Uses the guest code section which is already identity-mapped.
1598    #[test]
1599    #[cfg(feature = "trace_guest")]
1600    fn read_guest_memory_by_gva_single_page() {
1601        let mut sbox = sandbox_for_gva_tests();
1602        let code_gva = sbox.mem_mgr.layout.get_guest_code_address() as u64;
1603        assert_gva_read_matches(&mut sbox, code_gva, 128);
1604    }
1605
1606    /// Test reading exactly one full page (4096 bytes) from guest memory.
1607    /// Uses the guest code section
1608    #[test]
1609    #[cfg(feature = "trace_guest")]
1610    fn read_guest_memory_by_gva_full_page() {
1611        let mut sbox = sandbox_for_gva_tests();
1612        let code_gva = sbox.mem_mgr.layout.get_guest_code_address() as u64;
1613        assert_gva_read_matches(&mut sbox, code_gva, 4096);
1614    }
1615
1616    /// Test that a read starting at an odd (non-page-aligned) address and
1617    /// spanning two page boundaries returns correct data.
1618    #[test]
1619    #[cfg(feature = "trace_guest")]
1620    fn read_guest_memory_by_gva_unaligned_cross_page() {
1621        let mut sbox = sandbox_for_gva_tests();
1622        let code_gva = sbox.mem_mgr.layout.get_guest_code_address() as u64;
1623        // Start 1 byte before the second page boundary and read 4097 bytes
1624        // (spans 2 full page boundaries).
1625        let start = code_gva + 4096 - 1;
1626        println!(
1627            "Testing unaligned cross-page read starting at {:#x} spanning 4097 bytes",
1628            start
1629        );
1630        assert_gva_read_matches(&mut sbox, start, 4097);
1631    }
1632
1633    /// Test reading exactly two full pages (8192 bytes) from guest memory.
1634    #[test]
1635    #[cfg(feature = "trace_guest")]
1636    fn read_guest_memory_by_gva_two_full_pages() {
1637        let mut sbox = sandbox_for_gva_tests();
1638        let code_gva = sbox.mem_mgr.layout.get_guest_code_address() as u64;
1639        assert_gva_read_matches(&mut sbox, code_gva, 4096 * 2);
1640    }
1641
1642    /// Test reading a region that spans across a page boundary: starts
1643    /// 100 bytes before the end of the first page and reads 200 bytes
1644    /// into the second page.
1645    #[test]
1646    #[cfg(feature = "trace_guest")]
1647    fn read_guest_memory_by_gva_cross_page_boundary() {
1648        let mut sbox = sandbox_for_gva_tests();
1649        let code_gva = sbox.mem_mgr.layout.get_guest_code_address() as u64;
1650        // Start 100 bytes before the first page boundary, read across it.
1651        let start = code_gva + 4096 - 100;
1652        assert_gva_read_matches(&mut sbox, start, 200);
1653    }
1654
1655    /// Helper: create a temp file with known content, padded to be
1656    /// at least page-aligned (4096 bytes). Returns the path and the
1657    /// *original* content bytes (before padding).
1658    fn create_test_file(name: &str, content: &[u8]) -> (std::path::PathBuf, Vec<u8>) {
1659        use std::io::Write;
1660
1661        let page_size = page_size::get();
1662        let padded_len = content.len().max(page_size).div_ceil(page_size) * page_size;
1663        let mut padded = vec![0u8; padded_len];
1664        padded[..content.len()].copy_from_slice(content);
1665
1666        let temp_dir = std::env::temp_dir();
1667        let path = temp_dir.join(name);
1668        let _ = std::fs::remove_file(&path); // clean up from previous runs
1669        let mut f = std::fs::File::create(&path).unwrap();
1670        f.write_all(&padded).unwrap();
1671        (path, content.to_vec())
1672    }
1673
1674    /// Tests the basic `map_file_cow` flow: map a file, read its content
1675    /// from the guest, and verify it matches.
1676    #[test]
1677    fn test_map_file_cow_basic() {
1678        let expected = b"hello world from map_file_cow";
1679        let (path, expected_bytes) =
1680            create_test_file("hyperlight_test_map_file_cow_basic.bin", expected);
1681
1682        let mut sbox = UninitializedSandbox::new(
1683            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1684            None,
1685        )
1686        .unwrap()
1687        .evolve()
1688        .unwrap();
1689
1690        let guest_base: u64 = 0x1_0000_0000;
1691        let mapped_size = sbox.map_file_cow(&path, guest_base, None).unwrap();
1692        assert!(mapped_size > 0, "mapped_size should be positive");
1693        assert!(
1694            mapped_size >= expected.len() as u64,
1695            "mapped_size should be >= file content length"
1696        );
1697
1698        // Read the content back from the guest
1699        let actual: Vec<u8> = sbox
1700            .call(
1701                "ReadMappedBuffer",
1702                (guest_base, expected_bytes.len() as u64, true),
1703            )
1704            .unwrap();
1705
1706        assert_eq!(
1707            actual, expected_bytes,
1708            "Guest should read back the exact file content"
1709        );
1710
1711        // Clean up
1712        let _ = std::fs::remove_file(&path);
1713    }
1714
1715    /// Tests that `map_file_cow` enforces read-only access: writing to
1716    /// the mapped region from the guest should cause a MemoryAccessViolation.
1717    #[test]
1718    fn test_map_file_cow_read_only_enforcement() {
1719        let content = &[0xBB; 4096];
1720        let (path, _) = create_test_file("hyperlight_test_map_file_cow_readonly.bin", content);
1721
1722        let mut sbox = UninitializedSandbox::new(
1723            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1724            None,
1725        )
1726        .unwrap()
1727        .evolve()
1728        .unwrap();
1729
1730        let guest_base: u64 = 0x1_0000_0000;
1731        sbox.map_file_cow(&path, guest_base, None).unwrap();
1732
1733        // Writing to the mapped region should fail with MemoryAccessViolation
1734        let err = sbox
1735            .call::<bool>("WriteMappedBuffer", (guest_base, content.len() as u64))
1736            .unwrap_err();
1737
1738        match err {
1739            HyperlightError::MemoryAccessViolation(addr, ..) if addr == guest_base => {}
1740            _ => panic!(
1741                "Expected MemoryAccessViolation at guest_base, got: {:?}",
1742                err
1743            ),
1744        };
1745
1746        // Clean up
1747        let _ = std::fs::remove_file(&path);
1748    }
1749
1750    /// Tests that `map_file_cow` returns `PoisonedSandbox` when the
1751    /// sandbox is poisoned.
1752    #[test]
1753    fn test_map_file_cow_poisoned() {
1754        let (path, _) = create_test_file("hyperlight_test_map_file_cow_poison.bin", &[0xCC; 4096]);
1755
1756        let mut sbox: MultiUseSandbox = {
1757            let path = simple_guest_as_string().unwrap();
1758            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1759            u_sbox.evolve()
1760        }
1761        .unwrap();
1762        let snapshot = sbox.snapshot().unwrap();
1763
1764        // Poison the sandbox
1765        let _ = sbox
1766            .call::<()>("guest_panic", "hello".to_string())
1767            .unwrap_err();
1768        assert!(sbox.poisoned());
1769
1770        // map_file_cow should fail with PoisonedSandbox
1771        let err = sbox.map_file_cow(&path, 0x1_0000_0000, None).unwrap_err();
1772        assert!(matches!(err, HyperlightError::PoisonedSandbox));
1773
1774        // Restore and verify map_file_cow works again
1775        sbox.restore(snapshot).unwrap();
1776        assert!(!sbox.poisoned());
1777        let result = sbox.map_file_cow(&path, 0x1_0000_0000, None);
1778        assert!(result.is_ok());
1779
1780        let _ = std::fs::remove_file(&path);
1781    }
1782
1783    /// Tests that two separate sandboxes can map the same file
1784    /// simultaneously and both read it correctly.
1785    #[test]
1786    fn test_map_file_cow_multi_vm_same_file() {
1787        let expected = b"shared file content across VMs";
1788        let (path, expected_bytes) =
1789            create_test_file("hyperlight_test_map_file_cow_multi_vm.bin", expected);
1790
1791        let guest_base: u64 = 0x1_0000_0000;
1792
1793        let mut sbox1 = UninitializedSandbox::new(
1794            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1795            None,
1796        )
1797        .unwrap()
1798        .evolve()
1799        .unwrap();
1800
1801        let mut sbox2 = UninitializedSandbox::new(
1802            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1803            None,
1804        )
1805        .unwrap()
1806        .evolve()
1807        .unwrap();
1808
1809        // Map the same file into both sandboxes
1810        sbox1.map_file_cow(&path, guest_base, None).unwrap();
1811        sbox2.map_file_cow(&path, guest_base, None).unwrap();
1812
1813        // Both should read the correct content
1814        let actual1: Vec<u8> = sbox1
1815            .call(
1816                "ReadMappedBuffer",
1817                (guest_base, expected_bytes.len() as u64, true),
1818            )
1819            .unwrap();
1820        let actual2: Vec<u8> = sbox2
1821            .call(
1822                "ReadMappedBuffer",
1823                (guest_base, expected_bytes.len() as u64, true),
1824            )
1825            .unwrap();
1826
1827        assert_eq!(
1828            actual1, expected_bytes,
1829            "Sandbox 1 should read correct content"
1830        );
1831        assert_eq!(
1832            actual2, expected_bytes,
1833            "Sandbox 2 should read correct content"
1834        );
1835
1836        let _ = std::fs::remove_file(&path);
1837    }
1838
1839    /// Tests that multiple threads can each create a sandbox, map the
1840    /// same file, read it, and drop without errors.
1841    #[test]
1842    fn test_map_file_cow_multi_vm_threaded() {
1843        let expected = b"threaded file mapping test data";
1844        let (path, expected_bytes) =
1845            create_test_file("hyperlight_test_map_file_cow_threaded.bin", expected);
1846
1847        const NUM_THREADS: usize = 5;
1848        let path = Arc::new(path);
1849        let expected_bytes = Arc::new(expected_bytes);
1850        let barrier = Arc::new(Barrier::new(NUM_THREADS));
1851        let mut handles = vec![];
1852
1853        for _ in 0..NUM_THREADS {
1854            let path = path.clone();
1855            let expected_bytes = expected_bytes.clone();
1856            let barrier = barrier.clone();
1857
1858            handles.push(thread::spawn(move || {
1859                barrier.wait();
1860
1861                let mut sbox = UninitializedSandbox::new(
1862                    GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1863                    None,
1864                )
1865                .unwrap()
1866                .evolve()
1867                .unwrap();
1868
1869                let guest_base: u64 = 0x1_0000_0000;
1870                sbox.map_file_cow(&path, guest_base, None).unwrap();
1871
1872                let actual: Vec<u8> = sbox
1873                    .call(
1874                        "ReadMappedBuffer",
1875                        (guest_base, expected_bytes.len() as u64, true),
1876                    )
1877                    .unwrap();
1878
1879                assert_eq!(actual, *expected_bytes);
1880            }));
1881        }
1882
1883        for h in handles {
1884            h.join().unwrap();
1885        }
1886
1887        let _ = std::fs::remove_file(&*path);
1888    }
1889
1890    /// Tests that file cleanup works after dropping a sandbox that used
1891    /// `map_file_cow` — the file should be deletable (no leaked handles).
1892    #[test]
1893    #[cfg(target_os = "windows")]
1894    fn test_map_file_cow_cleanup_no_handle_leak() {
1895        let (path, _) = create_test_file("hyperlight_test_map_file_cow_cleanup.bin", &[0xDD; 4096]);
1896
1897        {
1898            let mut sbox = UninitializedSandbox::new(
1899                GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1900                None,
1901            )
1902            .unwrap()
1903            .evolve()
1904            .unwrap();
1905
1906            sbox.map_file_cow(&path, 0x1_0000_0000, None).unwrap();
1907            // sandbox dropped here
1908        }
1909
1910        std::fs::remove_file(&path)
1911            .expect("File should be deletable after sandbox with map_file_cow is dropped");
1912    }
1913
1914    /// Tests snapshot/restore cycle with map_file_cow:
1915    /// snapshot₁ (no file) → map file → snapshot₂ → restore₁ (unmapped)
1916    /// → restore₂ (data folded into snapshot).
1917    #[test]
1918    fn test_map_file_cow_snapshot_remapping_cycle() {
1919        let expected = b"snapshot remapping cycle test!";
1920        let (path, expected_bytes) =
1921            create_test_file("hyperlight_test_map_file_cow_snapshot_remap.bin", expected);
1922
1923        let mut sbox = UninitializedSandbox::new(
1924            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1925            None,
1926        )
1927        .unwrap()
1928        .evolve()
1929        .unwrap();
1930
1931        let guest_base: u64 = 0x1_0000_0000;
1932
1933        // 1. snapshot₁ — no file mapped
1934        let snapshot1 = sbox.snapshot().unwrap();
1935
1936        // 2. Map the file
1937        sbox.map_file_cow(&path, guest_base, None).unwrap();
1938
1939        // Verify we can read it
1940        let actual: Vec<u8> = sbox
1941            .call(
1942                "ReadMappedBuffer",
1943                (guest_base, expected_bytes.len() as u64, true),
1944            )
1945            .unwrap();
1946        assert_eq!(actual, expected_bytes);
1947
1948        // 3. snapshot₂ — file mapped (data folded into snapshot)
1949        let snapshot2 = sbox.snapshot().unwrap();
1950
1951        // 4. Restore to snapshot₁ — file should be unmapped
1952        sbox.restore(snapshot1.clone()).unwrap();
1953        let is_mapped: bool = sbox.call("CheckMapped", (guest_base,)).unwrap();
1954        assert!(
1955            !is_mapped,
1956            "Region should be unmapped after restoring to snapshot₁"
1957        );
1958
1959        // 5. Restore to snapshot₂ — data should still be readable
1960        //    (folded into snapshot memory, not the original file mapping)
1961        sbox.restore(snapshot2).unwrap();
1962        let is_mapped: bool = sbox.call("CheckMapped", (guest_base,)).unwrap();
1963        assert!(
1964            is_mapped,
1965            "Region should be mapped after restoring to snapshot₂"
1966        );
1967        let actual2: Vec<u8> = sbox
1968            .call(
1969                "ReadMappedBuffer",
1970                (guest_base, expected_bytes.len() as u64, false),
1971            )
1972            .unwrap();
1973        assert_eq!(
1974            actual2, expected_bytes,
1975            "Data should be intact after snapshot₂ restore"
1976        );
1977
1978        let _ = std::fs::remove_file(&path);
1979    }
1980
1981    /// Tests that snapshot correctly captures map_file_cow data and
1982    /// restore brings it back.
1983    #[test]
1984    fn test_map_file_cow_snapshot_restore() {
1985        let expected = b"snapshot restore basic test!!";
1986        let (path, expected_bytes) =
1987            create_test_file("hyperlight_test_map_file_cow_snap_restore.bin", expected);
1988
1989        let mut sbox = UninitializedSandbox::new(
1990            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1991            None,
1992        )
1993        .unwrap()
1994        .evolve()
1995        .unwrap();
1996
1997        let guest_base: u64 = 0x1_0000_0000;
1998        sbox.map_file_cow(&path, guest_base, None).unwrap();
1999
2000        // Read the content to verify mapping works
2001        let actual: Vec<u8> = sbox
2002            .call(
2003                "ReadMappedBuffer",
2004                (guest_base, expected_bytes.len() as u64, true),
2005            )
2006            .unwrap();
2007        assert_eq!(actual, expected_bytes);
2008
2009        // Take snapshot — folds file data into snapshot memory
2010        let snapshot = sbox.snapshot().unwrap();
2011
2012        // Restore — the file-backed region is unmapped but data is in snapshot
2013        sbox.restore(snapshot).unwrap();
2014
2015        // Data should still be readable from snapshot memory
2016        let actual2: Vec<u8> = sbox
2017            .call(
2018                "ReadMappedBuffer",
2019                (guest_base, expected_bytes.len() as u64, false),
2020            )
2021            .unwrap();
2022        assert_eq!(
2023            actual2, expected_bytes,
2024            "Data should be readable after restore from snapshot"
2025        );
2026
2027        let _ = std::fs::remove_file(&path);
2028    }
2029
2030    /// Tests the deferred `map_file_cow` flow: map a file on
2031    /// `UninitializedSandbox` (before evolve), then evolve and verify
2032    /// the guest can read the mapped content.
2033    #[test]
2034    fn test_map_file_cow_deferred_basic() {
2035        let expected = b"deferred map_file_cow test data";
2036        let (path, expected_bytes) =
2037            create_test_file("hyperlight_test_map_file_cow_deferred.bin", expected);
2038
2039        let guest_base: u64 = 0x1_0000_0000;
2040
2041        let mut u_sbox = UninitializedSandbox::new(
2042            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2043            None,
2044        )
2045        .unwrap();
2046
2047        // Map the file before evolving — this defers the VM-side work.
2048        let mapped_size = u_sbox.map_file_cow(&path, guest_base, None).unwrap();
2049        assert!(mapped_size > 0, "mapped_size should be positive");
2050        assert!(
2051            mapped_size >= expected.len() as u64,
2052            "mapped_size should be >= file content length"
2053        );
2054
2055        // Evolve — deferred mappings are applied during this step.
2056        let mut sbox: MultiUseSandbox = u_sbox.evolve().unwrap();
2057
2058        // Verify the guest can read the mapped content.
2059        let actual: Vec<u8> = sbox
2060            .call(
2061                "ReadMappedBuffer",
2062                (guest_base, expected_bytes.len() as u64, true),
2063            )
2064            .unwrap();
2065
2066        assert_eq!(
2067            actual, expected_bytes,
2068            "Guest should read back the exact file content after deferred mapping"
2069        );
2070
2071        let _ = std::fs::remove_file(&path);
2072    }
2073
2074    /// Tests that dropping an `UninitializedSandbox` with pending
2075    /// deferred file mappings does not leak or crash — the
2076    /// `PreparedFileMapping::Drop` should clean up host resources.
2077    #[test]
2078    fn test_map_file_cow_deferred_drop_without_evolve() {
2079        let (path, _) = create_test_file(
2080            "hyperlight_test_map_file_cow_deferred_drop.bin",
2081            &[0xAA; 4096],
2082        );
2083
2084        let guest_base: u64 = 0x1_0000_0000;
2085
2086        {
2087            let mut u_sbox = UninitializedSandbox::new(
2088                GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2089                None,
2090            )
2091            .unwrap();
2092
2093            u_sbox.map_file_cow(&path, guest_base, None).unwrap();
2094            // u_sbox dropped here without evolving — PreparedFileMapping::drop
2095            // should clean up host-side OS resources.
2096        }
2097
2098        // If we get here without a crash/hang, cleanup worked.
2099        // On Windows, also verify the file handle was released.
2100        #[cfg(target_os = "windows")]
2101        std::fs::remove_file(&path)
2102            .expect("File should be deletable after dropping UninitializedSandbox");
2103        #[cfg(not(target_os = "windows"))]
2104        let _ = std::fs::remove_file(&path);
2105    }
2106
2107    /// Tests that `prepare_file_cow` rejects unaligned `guest_base`
2108    /// addresses eagerly, before allocating any OS resources.
2109    #[test]
2110    fn test_map_file_cow_unaligned_guest_base() {
2111        let (path, _) =
2112            create_test_file("hyperlight_test_map_file_cow_unaligned.bin", &[0xBB; 4096]);
2113
2114        let mut u_sbox = UninitializedSandbox::new(
2115            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2116            None,
2117        )
2118        .unwrap();
2119
2120        // Use an intentionally unaligned address (page_size + 1).
2121        let unaligned_base: u64 = (page_size::get() + 1) as u64;
2122        let result = u_sbox.map_file_cow(&path, unaligned_base, None);
2123        assert!(
2124            result.is_err(),
2125            "map_file_cow should reject unaligned guest_base"
2126        );
2127
2128        let _ = std::fs::remove_file(&path);
2129    }
2130
2131    /// Tests that `prepare_file_cow` rejects empty files.
2132    #[test]
2133    fn test_map_file_cow_empty_file() {
2134        let temp_dir = std::env::temp_dir();
2135        let path = temp_dir.join("hyperlight_test_map_file_cow_empty.bin");
2136        let _ = std::fs::remove_file(&path);
2137        std::fs::File::create(&path).unwrap(); // create empty file
2138
2139        let mut u_sbox = UninitializedSandbox::new(
2140            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2141            None,
2142        )
2143        .unwrap();
2144
2145        let guest_base: u64 = 0x1_0000_0000;
2146        let result = u_sbox.map_file_cow(&path, guest_base, None);
2147        assert!(result.is_err(), "map_file_cow should reject empty files");
2148
2149        let _ = std::fs::remove_file(&path);
2150    }
2151
2152    /// Tests that `map_file_cow` with a custom label succeeds.
2153    #[test]
2154    fn test_map_file_cow_custom_label() {
2155        let (path, _) = create_test_file("hyperlight_test_map_file_cow_label.bin", &[0xDD; 4096]);
2156
2157        let mut sbox = UninitializedSandbox::new(
2158            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2159            None,
2160        )
2161        .unwrap()
2162        .evolve()
2163        .unwrap();
2164
2165        let result = sbox.map_file_cow(&path, 0x1_0000_0000, Some("my_ramfs"));
2166        assert!(
2167            result.is_ok(),
2168            "map_file_cow with custom label should succeed"
2169        );
2170
2171        let _ = std::fs::remove_file(&path);
2172    }
2173
2174    /// Tests that `map_file_cow` on a MultiUseSandbox correctly writes
2175    /// the FileMappingInfo entry (count, guest_addr, size, label) into
2176    /// the PEB.
2177    #[test]
2178    #[cfg(feature = "nanvix-unstable")]
2179    fn test_map_file_cow_peb_entry_multiuse() {
2180        use std::mem::offset_of;
2181
2182        use hyperlight_common::mem::{FILE_MAPPING_LABEL_MAX_LEN, FileMappingInfo};
2183
2184        let (path, _) = create_test_file("hyperlight_test_peb_entry_multiuse.bin", &[0xDD; 4096]);
2185
2186        let guest_base: u64 = 0x1_0000_0000;
2187        let label = "my_ramfs";
2188
2189        let mut sbox = UninitializedSandbox::new(
2190            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2191            None,
2192        )
2193        .unwrap()
2194        .evolve()
2195        .unwrap();
2196
2197        // Map with an explicit label.
2198        let mapped_size = sbox.map_file_cow(&path, guest_base, Some(label)).unwrap();
2199
2200        // Read back the PEB file_mappings count.
2201        let count = sbox
2202            .mem_mgr
2203            .shared_mem
2204            .read::<u64>(sbox.mem_mgr.layout.get_file_mappings_size_offset())
2205            .unwrap();
2206        assert_eq!(
2207            count, 1,
2208            "PEB file_mappings count should be 1 after one mapping"
2209        );
2210
2211        // Read back the first FileMappingInfo entry.
2212        let entry_offset = sbox.mem_mgr.layout.get_file_mappings_array_offset();
2213
2214        let stored_addr = sbox
2215            .mem_mgr
2216            .shared_mem
2217            .read::<u64>(entry_offset + offset_of!(FileMappingInfo, guest_addr))
2218            .unwrap();
2219        assert_eq!(stored_addr, guest_base, "PEB entry guest_addr should match");
2220
2221        let stored_size = sbox
2222            .mem_mgr
2223            .shared_mem
2224            .read::<u64>(entry_offset + offset_of!(FileMappingInfo, size))
2225            .unwrap();
2226        assert_eq!(
2227            stored_size, mapped_size,
2228            "PEB entry size should match mapped_size"
2229        );
2230
2231        // Read back the label bytes and verify.
2232        let label_offset = entry_offset + offset_of!(FileMappingInfo, label);
2233        let mut label_buf = [0u8; FILE_MAPPING_LABEL_MAX_LEN + 1];
2234        for (i, byte) in label_buf.iter_mut().enumerate() {
2235            *byte = sbox
2236                .mem_mgr
2237                .shared_mem
2238                .read::<u8>(label_offset + i)
2239                .unwrap();
2240        }
2241        let label_len = label_buf
2242            .iter()
2243            .position(|&b| b == 0)
2244            .unwrap_or(label_buf.len());
2245        let stored_label = std::str::from_utf8(&label_buf[..label_len]).unwrap();
2246        assert_eq!(stored_label, label, "PEB entry label should match");
2247
2248        let _ = std::fs::remove_file(&path);
2249    }
2250
2251    /// Tests that deferred `map_file_cow` (before evolve) correctly
2252    /// writes FileMappingInfo entries into the PEB during evolve.
2253    #[test]
2254    #[cfg(feature = "nanvix-unstable")]
2255    fn test_map_file_cow_peb_entry_deferred() {
2256        use std::mem::offset_of;
2257
2258        use hyperlight_common::mem::{FILE_MAPPING_LABEL_MAX_LEN, FileMappingInfo};
2259
2260        let (path, _) = create_test_file("hyperlight_test_peb_entry_deferred.bin", &[0xEE; 4096]);
2261
2262        let guest_base: u64 = 0x1_0000_0000;
2263        let label = "deferred_fs";
2264
2265        let mut u_sbox = UninitializedSandbox::new(
2266            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2267            None,
2268        )
2269        .unwrap();
2270
2271        let mapped_size = u_sbox.map_file_cow(&path, guest_base, Some(label)).unwrap();
2272
2273        // Evolve — PEB entries should be written during this step.
2274        let sbox: MultiUseSandbox = u_sbox.evolve().unwrap();
2275
2276        // Read back count.
2277        let count = sbox
2278            .mem_mgr
2279            .shared_mem
2280            .read::<u64>(sbox.mem_mgr.layout.get_file_mappings_size_offset())
2281            .unwrap();
2282        assert_eq!(count, 1, "PEB file_mappings count should be 1 after evolve");
2283
2284        // Read back the entry.
2285        let entry_offset = sbox.mem_mgr.layout.get_file_mappings_array_offset();
2286
2287        let stored_addr = sbox
2288            .mem_mgr
2289            .shared_mem
2290            .read::<u64>(entry_offset + offset_of!(FileMappingInfo, guest_addr))
2291            .unwrap();
2292        assert_eq!(stored_addr, guest_base);
2293
2294        let stored_size = sbox
2295            .mem_mgr
2296            .shared_mem
2297            .read::<u64>(entry_offset + offset_of!(FileMappingInfo, size))
2298            .unwrap();
2299        assert_eq!(stored_size, mapped_size);
2300
2301        // Verify the label.
2302        let label_offset = entry_offset + offset_of!(FileMappingInfo, label);
2303        let mut label_buf = [0u8; FILE_MAPPING_LABEL_MAX_LEN + 1];
2304        for (i, byte) in label_buf.iter_mut().enumerate() {
2305            *byte = sbox
2306                .mem_mgr
2307                .shared_mem
2308                .read::<u8>(label_offset + i)
2309                .unwrap();
2310        }
2311        let label_len = label_buf
2312            .iter()
2313            .position(|&b| b == 0)
2314            .unwrap_or(label_buf.len());
2315        let stored_label = std::str::from_utf8(&label_buf[..label_len]).unwrap();
2316        assert_eq!(
2317            stored_label, label,
2318            "PEB entry label should match after evolve"
2319        );
2320
2321        let _ = std::fs::remove_file(&path);
2322    }
2323
2324    /// Tests that mapping 5 files (3 deferred + 2 post-evolve) correctly
2325    /// populates all PEB FileMappingInfo slots with the right guest_addr,
2326    /// size, and label for each entry.
2327    #[test]
2328    #[cfg(feature = "nanvix-unstable")]
2329    fn test_map_file_cow_peb_multiple_entries() {
2330        use std::mem::{offset_of, size_of};
2331
2332        use hyperlight_common::mem::{FILE_MAPPING_LABEL_MAX_LEN, FileMappingInfo};
2333
2334        const NUM_FILES: usize = 5;
2335        const DEFERRED_COUNT: usize = 3;
2336
2337        // Create 5 test files with distinct content.
2338        let mut paths = Vec::new();
2339        let mut labels: Vec<String> = Vec::new();
2340        for i in 0..NUM_FILES {
2341            let name = format!("hyperlight_test_peb_multi_{}.bin", i);
2342            let content = vec![i as u8 + 0xA0; 4096];
2343            let (path, _) = create_test_file(&name, &content);
2344            paths.push(path);
2345            labels.push(format!("file_{}", i));
2346        }
2347
2348        // Each file gets a unique guest base, spaced 1 page apart
2349        // (well outside the shared memory region).
2350        let page_size = page_size::get() as u64;
2351        let base: u64 = 0x1_0000_0000;
2352        let guest_bases: Vec<u64> = (0..NUM_FILES as u64)
2353            .map(|i| base + i * page_size)
2354            .collect();
2355
2356        let mut u_sbox = UninitializedSandbox::new(
2357            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2358            None,
2359        )
2360        .unwrap();
2361
2362        // Map 3 files before evolve (deferred path).
2363        let mut mapped_sizes = Vec::new();
2364        for i in 0..DEFERRED_COUNT {
2365            let size = u_sbox
2366                .map_file_cow(&paths[i], guest_bases[i], Some(&labels[i]))
2367                .unwrap();
2368            mapped_sizes.push(size);
2369        }
2370
2371        // Evolve — deferred mappings applied + PEB entries written.
2372        let mut sbox: MultiUseSandbox = u_sbox.evolve().unwrap();
2373
2374        // Map 2 more files post-evolve (MultiUseSandbox path).
2375        for i in DEFERRED_COUNT..NUM_FILES {
2376            let size = sbox
2377                .map_file_cow(&paths[i], guest_bases[i], Some(&labels[i]))
2378                .unwrap();
2379            mapped_sizes.push(size);
2380        }
2381
2382        // Verify PEB count equals 5.
2383        let count = sbox
2384            .mem_mgr
2385            .shared_mem
2386            .read::<u64>(sbox.mem_mgr.layout.get_file_mappings_size_offset())
2387            .unwrap();
2388        assert_eq!(
2389            count, NUM_FILES as u64,
2390            "PEB should have {NUM_FILES} entries"
2391        );
2392
2393        // Verify each entry's guest_addr, size, and label.
2394        let array_base = sbox.mem_mgr.layout.get_file_mappings_array_offset();
2395        for i in 0..NUM_FILES {
2396            let entry_offset = array_base + i * size_of::<FileMappingInfo>();
2397
2398            let stored_addr = sbox
2399                .mem_mgr
2400                .shared_mem
2401                .read::<u64>(entry_offset + offset_of!(FileMappingInfo, guest_addr))
2402                .unwrap();
2403            assert_eq!(
2404                stored_addr, guest_bases[i],
2405                "Entry {i}: guest_addr mismatch"
2406            );
2407
2408            let stored_size = sbox
2409                .mem_mgr
2410                .shared_mem
2411                .read::<u64>(entry_offset + offset_of!(FileMappingInfo, size))
2412                .unwrap();
2413            assert_eq!(stored_size, mapped_sizes[i], "Entry {i}: size mismatch");
2414
2415            // Read and verify the label.
2416            let label_base = entry_offset + offset_of!(FileMappingInfo, label);
2417            let mut label_buf = [0u8; FILE_MAPPING_LABEL_MAX_LEN + 1];
2418            for (j, byte) in label_buf.iter_mut().enumerate() {
2419                *byte = sbox.mem_mgr.shared_mem.read::<u8>(label_base + j).unwrap();
2420            }
2421            let label_len = label_buf
2422                .iter()
2423                .position(|&b| b == 0)
2424                .unwrap_or(label_buf.len());
2425            let stored_label = std::str::from_utf8(&label_buf[..label_len]).unwrap();
2426            assert_eq!(stored_label, labels[i], "Entry {i}: label mismatch");
2427        }
2428
2429        // Clean up.
2430        for path in &paths {
2431            let _ = std::fs::remove_file(path);
2432        }
2433    }
2434
2435    /// Tests that an explicitly provided label exceeding 63 bytes is rejected.
2436    #[test]
2437    fn test_map_file_cow_label_too_long() {
2438        let (path, _) =
2439            create_test_file("hyperlight_test_map_file_cow_long_label.bin", &[0xEE; 4096]);
2440
2441        let guest_base: u64 = 0x1_0000_0000;
2442
2443        let mut u_sbox = UninitializedSandbox::new(
2444            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2445            None,
2446        )
2447        .unwrap();
2448
2449        // A label of exactly 64 bytes exceeds the 63-byte max.
2450        let long_label = "A".repeat(64);
2451        let result = u_sbox.map_file_cow(&path, guest_base, Some(&long_label));
2452        assert!(
2453            result.is_err(),
2454            "map_file_cow should reject labels longer than 63 bytes"
2455        );
2456
2457        // Labels at exactly 63 bytes should be fine.
2458        let ok_label = "B".repeat(63);
2459        let result = u_sbox.map_file_cow(&path, guest_base, Some(&ok_label));
2460        assert!(
2461            result.is_ok(),
2462            "map_file_cow should accept labels of exactly 63 bytes"
2463        );
2464
2465        let _ = std::fs::remove_file(&path);
2466    }
2467
2468    /// Tests that labels containing null bytes are rejected.
2469    #[test]
2470    fn test_map_file_cow_label_null_byte() {
2471        let (path, _) =
2472            create_test_file("hyperlight_test_map_file_cow_null_label.bin", &[0xFF; 4096]);
2473
2474        let guest_base: u64 = 0x1_0000_0000;
2475
2476        let mut u_sbox = UninitializedSandbox::new(
2477            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2478            None,
2479        )
2480        .unwrap();
2481
2482        let result = u_sbox.map_file_cow(&path, guest_base, Some("has\0null"));
2483        assert!(
2484            result.is_err(),
2485            "map_file_cow should reject labels containing null bytes"
2486        );
2487
2488        let _ = std::fs::remove_file(&path);
2489    }
2490
2491    /// Tests that mapping two files to overlapping GPA ranges is rejected.
2492    #[test]
2493    fn test_map_file_cow_overlapping_mappings() {
2494        let (path1, _) =
2495            create_test_file("hyperlight_test_map_file_cow_overlap1.bin", &[0xAA; 4096]);
2496        let (path2, _) =
2497            create_test_file("hyperlight_test_map_file_cow_overlap2.bin", &[0xBB; 4096]);
2498
2499        let guest_base: u64 = 0x1_0000_0000;
2500
2501        let mut u_sbox = UninitializedSandbox::new(
2502            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2503            None,
2504        )
2505        .unwrap();
2506
2507        // First mapping should succeed.
2508        u_sbox.map_file_cow(&path1, guest_base, None).unwrap();
2509
2510        // Second mapping at the same address should fail (overlap).
2511        let result = u_sbox.map_file_cow(&path2, guest_base, None);
2512        assert!(
2513            result.is_err(),
2514            "map_file_cow should reject overlapping guest address ranges"
2515        );
2516
2517        let _ = std::fs::remove_file(&path1);
2518        let _ = std::fs::remove_file(&path2);
2519    }
2520
2521    /// Tests that `map_file_cow` rejects a guest_base that overlaps
2522    /// the sandbox's shared memory region.
2523    #[test]
2524    fn test_map_file_cow_shared_mem_overlap() {
2525        let (path, _) = create_test_file(
2526            "hyperlight_test_map_file_cow_overlap_shm.bin",
2527            &[0xCC; 4096],
2528        );
2529
2530        let mut u_sbox = UninitializedSandbox::new(
2531            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2532            None,
2533        )
2534        .unwrap();
2535
2536        // Use BASE_ADDRESS itself — smack in the middle of shared memory.
2537        let base_addr = crate::mem::layout::SandboxMemoryLayout::BASE_ADDRESS as u64;
2538        // page-align it (BASE_ADDRESS is 0x1000, already page-aligned)
2539        let result = u_sbox.map_file_cow(&path, base_addr, None);
2540        assert!(
2541            result.is_err(),
2542            "map_file_cow should reject guest_base inside shared memory"
2543        );
2544
2545        let _ = std::fs::remove_file(&path);
2546    }
2547
2548    /// Tests that exceeding MAX_FILE_MAPPINGS on UninitializedSandbox
2549    /// is rejected at registration time.
2550    #[test]
2551    fn test_map_file_cow_max_limit() {
2552        use hyperlight_common::mem::MAX_FILE_MAPPINGS;
2553
2554        let mut u_sbox = UninitializedSandbox::new(
2555            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2556            None,
2557        )
2558        .unwrap();
2559
2560        let page_size = page_size::get() as u64;
2561        // Base well outside shared memory.
2562        let base: u64 = 0x1_0000_0000;
2563
2564        // Register MAX_FILE_MAPPINGS files — each needs a distinct file
2565        // and a non-overlapping GPA.
2566        let mut paths = Vec::new();
2567        for i in 0..MAX_FILE_MAPPINGS {
2568            let name = format!("hyperlight_test_max_limit_{}.bin", i);
2569            let (path, _) = create_test_file(&name, &[0xAA; 4096]);
2570            let guest_base = base + (i as u64) * page_size;
2571            u_sbox.map_file_cow(&path, guest_base, None).unwrap();
2572            paths.push(path);
2573        }
2574
2575        // The (MAX_FILE_MAPPINGS + 1)th should fail.
2576        let name = format!("hyperlight_test_max_limit_{}.bin", MAX_FILE_MAPPINGS);
2577        let (path, _) = create_test_file(&name, &[0xBB; 4096]);
2578        let guest_base = base + (MAX_FILE_MAPPINGS as u64) * page_size;
2579        let result = u_sbox.map_file_cow(&path, guest_base, None);
2580        assert!(
2581            result.is_err(),
2582            "map_file_cow should reject after MAX_FILE_MAPPINGS registrations"
2583        );
2584
2585        // Clean up.
2586        for p in &paths {
2587            let _ = std::fs::remove_file(p);
2588        }
2589        let _ = std::fs::remove_file(&path);
2590    }
2591}