Skip to main content

hyperlight_host/sandbox/
initialized_multi_use.rs

1/*
2Copyright 2025  The Hyperlight Authors.
3
4Licensed under the Apache License, Version 2.0 (the "License");
5you may not use this file except in compliance with the License.
6You may obtain a copy of the License at
7
8    http://www.apache.org/licenses/LICENSE-2.0
9
10Unless required by applicable law or agreed to in writing, software
11distributed under the License is distributed on an "AS IS" BASIS,
12WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13See the License for the specific language governing permissions and
14limitations under the License.
15*/
16
17use std::collections::HashSet;
18use std::path::Path;
19use std::sync::atomic::Ordering;
20use std::sync::{Arc, Mutex};
21
22use flatbuffers::FlatBufferBuilder;
23use hyperlight_common::flatbuffer_wrappers::function_call::{FunctionCall, FunctionCallType};
24use hyperlight_common::flatbuffer_wrappers::function_types::{
25    ParameterValue, ReturnType, ReturnValue,
26};
27use hyperlight_common::flatbuffer_wrappers::util::estimate_flatbuffer_capacity;
28use tracing::{Span, instrument};
29
30use super::Callable;
31use super::file_mapping::prepare_file_cow;
32use super::host_funcs::FunctionRegistry;
33use super::snapshot::Snapshot;
34use crate::HyperlightError::{self, SnapshotSandboxMismatch};
35use crate::func::{ParameterTuple, SupportedReturnType};
36use crate::hypervisor::InterruptHandle;
37use crate::hypervisor::hyperlight_vm::{HyperlightVm, HyperlightVmError};
38use crate::mem::memory_region::{MemoryRegion, MemoryRegionFlags};
39use crate::mem::mgr::SandboxMemoryManager;
40use crate::mem::shared_mem::{HostSharedMemory, SharedMemory as _};
41use crate::metrics::{
42    METRIC_GUEST_ERROR, METRIC_GUEST_ERROR_LABEL_CODE, maybe_time_and_emit_guest_call,
43};
44use crate::{Result, log_then_return};
45
46/// A fully initialized sandbox that can execute guest functions multiple times.
47///
48/// Guest functions can be called repeatedly while maintaining state between calls.
49/// The sandbox supports creating snapshots and restoring to previous states.
50///
51/// ## Sandbox Poisoning
52///
53/// The sandbox becomes **poisoned** when the guest is not run to completion, leaving it in
54/// an inconsistent state that could compromise memory safety, data integrity, or security.
55///
56/// ### When Does Poisoning Occur?
57///
58/// Poisoning happens when guest execution is interrupted before normal completion:
59///
60/// - **Guest panics or aborts** - When a guest function panics, crashes, or calls `abort()`,
61///   the normal cleanup and unwinding process is interrupted
62/// - **Invalid memory access** - Attempts to read/write/execute memory outside allowed regions
63/// - **Stack overflow** - Guest exhausts its stack space during execution
64/// - **Heap exhaustion** - Guest runs out of heap memory
65/// - **Host-initiated cancellation** - Calling [`InterruptHandle::kill()`] to forcefully
66///   terminate an in-progress guest function
67///
68/// ### Why This Is Unsafe
69///
70/// When guest execution doesn't complete normally, critical cleanup operations are skipped:
71///
72/// - **Memory leaks** - Heap allocations remain unreachable as the call stack is unwound
73/// - **Corrupted allocator state** - Memory allocator metadata (free lists, heap headers)
74///   left inconsistent
75/// - **Locked resources** - Mutexes or other synchronization primitives remain locked
76/// - **Partial state updates** - Data structures left half-modified (corrupted linked lists,
77///   inconsistent hash tables, etc.)
78///
79/// ### Recovery
80///
81/// Use [`restore()`](Self::restore) with a snapshot taken before poisoning occurred.
82/// This is the **only safe way** to recover - it completely replaces all memory state,
83/// eliminating any inconsistencies. See [`restore()`](Self::restore) for details.
84pub struct MultiUseSandbox {
85    /// Unique identifier for this sandbox instance
86    id: u64,
87    /// Whether this sandbox is poisoned
88    poisoned: bool,
89    pub(super) host_funcs: Arc<Mutex<FunctionRegistry>>,
90    pub(crate) mem_mgr: SandboxMemoryManager<HostSharedMemory>,
91    vm: HyperlightVm,
92    #[cfg(gdb)]
93    dbg_mem_access_fn: Arc<Mutex<SandboxMemoryManager<HostSharedMemory>>>,
94    /// If the current state of the sandbox has been captured in a snapshot,
95    /// that snapshot is stored here.
96    snapshot: Option<Arc<Snapshot>>,
97}
98
99impl MultiUseSandbox {
100    /// Move an `UninitializedSandbox` into a new `MultiUseSandbox` instance.
101    ///
102    /// This function is not equivalent to doing an `evolve` from uninitialized
103    /// to initialized, and is purposely not exposed publicly outside the crate
104    /// (as a `From` implementation would be)
105    #[instrument(skip_all, parent = Span::current(), level = "Trace")]
106    pub(super) fn from_uninit(
107        host_funcs: Arc<Mutex<FunctionRegistry>>,
108        mgr: SandboxMemoryManager<HostSharedMemory>,
109        vm: HyperlightVm,
110        #[cfg(gdb)] dbg_mem_access_fn: Arc<Mutex<SandboxMemoryManager<HostSharedMemory>>>,
111    ) -> MultiUseSandbox {
112        Self {
113            id: super::snapshot::SANDBOX_CONFIGURATION_COUNTER.fetch_add(1, Ordering::Relaxed),
114            poisoned: false,
115            host_funcs,
116            mem_mgr: mgr,
117            vm,
118            #[cfg(gdb)]
119            dbg_mem_access_fn,
120            snapshot: None,
121        }
122    }
123
124    /// Creates a snapshot of the sandbox's current memory state.
125    ///
126    /// The snapshot is tied to this specific sandbox instance and can only be
127    /// restored to the same sandbox it was created from.
128    ///
129    /// ## Poisoned Sandbox
130    ///
131    /// This method will return [`crate::HyperlightError::PoisonedSandbox`] if the sandbox
132    /// is currently poisoned. Snapshots can only be taken from non-poisoned sandboxes.
133    ///
134    /// # Examples
135    ///
136    /// ```no_run
137    /// # use hyperlight_host::{MultiUseSandbox, UninitializedSandbox, GuestBinary};
138    /// # fn example() -> Result<(), Box<dyn std::error::Error>> {
139    /// let mut sandbox: MultiUseSandbox = UninitializedSandbox::new(
140    ///     GuestBinary::FilePath("guest.bin".into()),
141    ///     None
142    /// )?.evolve()?;
143    ///
144    /// // Modify sandbox state
145    /// sandbox.call_guest_function_by_name::<i32>("SetValue", 42)?;
146    ///
147    /// // Create snapshot belonging to this sandbox
148    /// let snapshot = sandbox.snapshot()?;
149    /// # Ok(())
150    /// # }
151    /// ```
152    #[instrument(err(Debug), skip_all, parent = Span::current())]
153    pub fn snapshot(&mut self) -> Result<Arc<Snapshot>> {
154        if self.poisoned {
155            return Err(crate::HyperlightError::PoisonedSandbox);
156        }
157
158        if let Some(snapshot) = &self.snapshot {
159            return Ok(snapshot.clone());
160        }
161        let mapped_regions_iter = self.vm.get_mapped_regions();
162        let mapped_regions_vec: Vec<MemoryRegion> = mapped_regions_iter.cloned().collect();
163        let root_pt_gpa = self
164            .vm
165            .get_root_pt()
166            .map_err(|e| HyperlightError::HyperlightVmError(e.into()))?;
167        let stack_top_gpa = self.vm.get_stack_top();
168        let sregs = self
169            .vm
170            .get_snapshot_sregs()
171            .map_err(|e| HyperlightError::HyperlightVmError(e.into()))?;
172        let entrypoint = self.vm.get_entrypoint();
173        let memory_snapshot = self.mem_mgr.snapshot(
174            self.id,
175            mapped_regions_vec,
176            root_pt_gpa,
177            stack_top_gpa,
178            sregs,
179            entrypoint,
180        )?;
181        let snapshot = Arc::new(memory_snapshot);
182        self.snapshot = Some(snapshot.clone());
183        Ok(snapshot)
184    }
185
186    /// Restores the sandbox's memory to a previously captured snapshot state.
187    ///
188    /// The snapshot must have been created from this same sandbox instance.
189    /// Attempting to restore a snapshot from a different sandbox will return
190    /// a [`SnapshotSandboxMismatch`](crate::HyperlightError::SnapshotSandboxMismatch) error.
191    ///
192    /// ## Poison State Recovery
193    ///
194    /// This method automatically clears any poison state when successful. This is safe because:
195    /// - Snapshots can only be taken from non-poisoned sandboxes
196    /// - Restoration completely replaces all memory state, eliminating any inconsistencies
197    ///   caused by incomplete guest execution
198    ///
199    /// ### What Gets Fixed During Restore
200    ///
201    /// When a poisoned sandbox is restored, the memory state is completely reset:
202    /// - **Leaked heap memory** - All allocations from interrupted execution are discarded
203    /// - **Corrupted allocator metadata** - Free lists and heap headers restored to consistent state
204    /// - **Locked mutexes** - All lock state is reset
205    /// - **Partial updates** - Data structures restored to their pre-execution state
206    ///
207    /// # Examples
208    ///
209    /// ```no_run
210    /// # use hyperlight_host::{MultiUseSandbox, UninitializedSandbox, GuestBinary};
211    /// # fn example() -> Result<(), Box<dyn std::error::Error>> {
212    /// let mut sandbox: MultiUseSandbox = UninitializedSandbox::new(
213    ///     GuestBinary::FilePath("guest.bin".into()),
214    ///     None
215    /// )?.evolve()?;
216    ///
217    /// // Take initial snapshot from this sandbox
218    /// let snapshot = sandbox.snapshot()?;
219    ///
220    /// // Modify sandbox state
221    /// sandbox.call_guest_function_by_name::<i32>("SetValue", 100)?;
222    /// let value: i32 = sandbox.call_guest_function_by_name("GetValue", ())?;
223    /// assert_eq!(value, 100);
224    ///
225    /// // Restore to previous state (same sandbox)
226    /// sandbox.restore(snapshot)?;
227    /// let restored_value: i32 = sandbox.call_guest_function_by_name("GetValue", ())?;
228    /// assert_eq!(restored_value, 0); // Back to initial state
229    /// # Ok(())
230    /// # }
231    /// ```
232    ///
233    /// ## Recovering from Poison
234    ///
235    /// ```no_run
236    /// # use hyperlight_host::{MultiUseSandbox, UninitializedSandbox, GuestBinary, HyperlightError};
237    /// # fn example() -> Result<(), Box<dyn std::error::Error>> {
238    /// let mut sandbox: MultiUseSandbox = UninitializedSandbox::new(
239    ///     GuestBinary::FilePath("guest.bin".into()),
240    ///     None
241    /// )?.evolve()?;
242    ///
243    /// // Take snapshot before potentially poisoning operation
244    /// let snapshot = sandbox.snapshot()?;
245    ///
246    /// // This might poison the sandbox (guest not run to completion)
247    /// let result = sandbox.call::<()>("guest_panic", ());
248    /// if result.is_err() {
249    ///     if sandbox.poisoned() {
250    ///         // Restore from snapshot to clear poison
251    ///         sandbox.restore(snapshot.clone())?;
252    ///         assert!(!sandbox.poisoned());
253    ///         
254    ///         // Sandbox is now usable again
255    ///         sandbox.call::<String>("Echo", "hello".to_string())?;
256    ///     }
257    /// }
258    /// # Ok(())
259    /// # }
260    /// ```
261    #[instrument(err(Debug), skip_all, parent = Span::current())]
262    pub fn restore(&mut self, snapshot: Arc<Snapshot>) -> Result<()> {
263        // Currently, we do not try to optimise restore to the
264        // most-current snapshot. This is because the most-current
265        // snapshot, while it must have identical virtual memory
266        // layout to the current sandbox, does not necessarily have
267        // the exact same /physical/ memory contents. It is not
268        // entirely inconceivable that this could lead to breakage of
269        // cross-request isolation in some way, although it would
270        // require some /very/ odd code.  For example, suppose that a
271        // service uses Hyperlight to sandbox native code from
272        // clients, and promises cross-request isolation. A tenant
273        // provides a binary that can process two forms of request,
274        // either writing a secret into physical memory, or reading
275        // from arbitrary physical memory, assuming that the two kinds
276        // of requests can never (dangerously) meet in the same
277        // sandbox.
278        //
279        // It is presently unclear whether this is a sensible threat
280        // model, especially since Hyperlight is often used with
281        // managed-code runtimes which do not allow even arbitrary
282        // access to virtual memory, much less physical memory.
283        // However, out of an abundance of caution, the optimisation
284        // is presently disabled.
285
286        if self.id != snapshot.sandbox_id() {
287            return Err(SnapshotSandboxMismatch);
288        }
289
290        let (gsnapshot, gscratch) = self.mem_mgr.restore_snapshot(&snapshot)?;
291        if let Some(gsnapshot) = gsnapshot {
292            self.vm
293                .update_snapshot_mapping(gsnapshot)
294                .map_err(|e| HyperlightError::HyperlightVmError(e.into()))?;
295        }
296        if let Some(gscratch) = gscratch {
297            self.vm
298                .update_scratch_mapping(gscratch)
299                .map_err(|e| HyperlightError::HyperlightVmError(e.into()))?;
300        }
301
302        let sregs = snapshot.sregs().ok_or_else(|| {
303            HyperlightError::Error("snapshot from running sandbox should have sregs".to_string())
304        })?;
305        // TODO (ludfjig): Go through the rest of possible errors in this `MultiUseSandbox::restore` function
306        // and determine if they should also poison the sandbox.
307        self.vm
308            .reset_vcpu(snapshot.root_pt_gpa(), sregs)
309            .map_err(|e| {
310                self.poisoned = true;
311                HyperlightVmError::Restore(e)
312            })?;
313
314        self.vm.set_stack_top(snapshot.stack_top_gva());
315        self.vm.set_entrypoint(snapshot.entrypoint());
316
317        let current_regions: HashSet<_> = self.vm.get_mapped_regions().cloned().collect();
318        let snapshot_regions: HashSet<_> = snapshot.regions().iter().cloned().collect();
319
320        let regions_to_unmap = current_regions.difference(&snapshot_regions);
321        let regions_to_map = snapshot_regions.difference(&current_regions);
322
323        for region in regions_to_unmap {
324            self.vm
325                .unmap_region(region)
326                .map_err(HyperlightVmError::UnmapRegion)?;
327        }
328
329        for region in regions_to_map {
330            // Safety: The region has been mapped before, and at that point the caller promised that the memory region is valid
331            // in their call to `MultiUseSandbox::map_region`
332            unsafe { self.vm.map_region(region) }.map_err(HyperlightVmError::MapRegion)?;
333        }
334
335        // The restored snapshot is now our most current snapshot
336        self.snapshot = Some(snapshot.clone());
337
338        // Clear poison state when successfully restoring from snapshot.
339        //
340        // # Safety:
341        // This is safe because:
342        // 1. Snapshots can only be taken from non-poisoned sandboxes (verified at snapshot creation)
343        // 2. Restoration completely replaces all memory state, eliminating:
344        //    - All leaked heap allocations (memory is restored to snapshot state)
345        //    - All corrupted data structures (overwritten with consistent snapshot data)
346        //    - All inconsistent global state (reset to snapshot values)
347        self.poisoned = false;
348
349        Ok(())
350    }
351
352    /// Calls a guest function by name with the specified arguments.
353    ///
354    /// Changes made to the sandbox during execution are *not* persisted.
355    ///
356    /// ## Poisoned Sandbox
357    ///
358    /// This method will return [`crate::HyperlightError::PoisonedSandbox`] if the sandbox
359    /// is currently poisoned. Use [`restore()`](Self::restore) to recover from a poisoned state.
360    ///
361    /// # Examples
362    ///
363    /// ```no_run
364    /// # use hyperlight_host::{MultiUseSandbox, UninitializedSandbox, GuestBinary};
365    /// # fn example() -> Result<(), Box<dyn std::error::Error>> {
366    /// let mut sandbox: MultiUseSandbox = UninitializedSandbox::new(
367    ///     GuestBinary::FilePath("guest.bin".into()),
368    ///     None
369    /// )?.evolve()?;
370    ///
371    /// // Call function with no arguments
372    /// let result: i32 = sandbox.call_guest_function_by_name("GetCounter", ())?;
373    ///
374    /// // Call function with single argument
375    /// let doubled: i32 = sandbox.call_guest_function_by_name("Double", 21)?;
376    /// assert_eq!(doubled, 42);
377    ///
378    /// // Call function with multiple arguments
379    /// let sum: i32 = sandbox.call_guest_function_by_name("Add", (10, 32))?;
380    /// assert_eq!(sum, 42);
381    ///
382    /// // Call function returning string
383    /// let message: String = sandbox.call_guest_function_by_name("Echo", "Hello, World!".to_string())?;
384    /// assert_eq!(message, "Hello, World!");
385    /// # Ok(())
386    /// # }
387    /// ```
388    #[doc(hidden)]
389    #[deprecated(
390        since = "0.8.0",
391        note = "Deprecated in favour of call and snapshot/restore."
392    )]
393    #[instrument(err(Debug), skip(self, args), parent = Span::current())]
394    pub fn call_guest_function_by_name<Output: SupportedReturnType>(
395        &mut self,
396        func_name: &str,
397        args: impl ParameterTuple,
398    ) -> Result<Output> {
399        if self.poisoned {
400            return Err(crate::HyperlightError::PoisonedSandbox);
401        }
402        let snapshot = self.snapshot()?;
403        let res = self.call(func_name, args);
404        self.restore(snapshot)?;
405        res
406    }
407
408    /// Calls a guest function by name with the specified arguments.
409    ///
410    /// Changes made to the sandbox during execution are persisted.
411    ///
412    /// ## Poisoned Sandbox
413    ///
414    /// This method will return [`crate::HyperlightError::PoisonedSandbox`] if the sandbox
415    /// is already poisoned before the call. Use [`restore()`](Self::restore) to recover from
416    /// a poisoned state.
417    ///
418    /// ## Sandbox Poisoning
419    ///
420    /// If this method returns an error, the sandbox may be poisoned if the guest was not run
421    /// to completion (due to panic, abort, memory violation, stack/heap exhaustion, or forced
422    /// termination). Use [`poisoned()`](Self::poisoned) to check the poison state and
423    /// [`restore()`](Self::restore) to recover if needed.
424    ///
425    /// If this method returns `Ok`, the sandbox is guaranteed to **not** be poisoned - the guest
426    /// function completed successfully and the sandbox state is consistent.
427    ///
428    /// # Examples
429    ///
430    /// ```no_run
431    /// # use hyperlight_host::{MultiUseSandbox, UninitializedSandbox, GuestBinary};
432    /// # fn example() -> Result<(), Box<dyn std::error::Error>> {
433    /// let mut sandbox: MultiUseSandbox = UninitializedSandbox::new(
434    ///     GuestBinary::FilePath("guest.bin".into()),
435    ///     None
436    /// )?.evolve()?;
437    ///
438    /// // Call function with no arguments
439    /// let result: i32 = sandbox.call("GetCounter", ())?;
440    ///
441    /// // Call function with single argument
442    /// let doubled: i32 = sandbox.call("Double", 21)?;
443    /// assert_eq!(doubled, 42);
444    ///
445    /// // Call function with multiple arguments
446    /// let sum: i32 = sandbox.call("Add", (10, 32))?;
447    /// assert_eq!(sum, 42);
448    ///
449    /// // Call function returning string
450    /// let message: String = sandbox.call("Echo", "Hello, World!".to_string())?;
451    /// assert_eq!(message, "Hello, World!");
452    /// # Ok(())
453    /// # }
454    /// ```
455    ///
456    /// ## Handling Potential Poisoning
457    ///
458    /// ```no_run
459    /// # use hyperlight_host::{MultiUseSandbox, UninitializedSandbox, GuestBinary};
460    /// # fn example() -> Result<(), Box<dyn std::error::Error>> {
461    /// let mut sandbox: MultiUseSandbox = UninitializedSandbox::new(
462    ///     GuestBinary::FilePath("guest.bin".into()),
463    ///     None
464    /// )?.evolve()?;
465    ///
466    /// // Take snapshot before risky operation
467    /// let snapshot = sandbox.snapshot()?;
468    ///
469    /// // Call potentially unsafe guest function
470    /// let result = sandbox.call::<String>("RiskyOperation", "input".to_string());
471    ///
472    /// // Check if the call failed and poisoned the sandbox
473    /// if let Err(e) = result {
474    ///     eprintln!("Guest function failed: {}", e);
475    ///     
476    ///     if sandbox.poisoned() {
477    ///         eprintln!("Sandbox was poisoned, restoring from snapshot");
478    ///         sandbox.restore(snapshot.clone())?;
479    ///     }
480    /// }
481    /// # Ok(())
482    /// # }
483    /// ```
484    #[instrument(err(Debug), skip(self, args), parent = Span::current())]
485    pub fn call<Output: SupportedReturnType>(
486        &mut self,
487        func_name: &str,
488        args: impl ParameterTuple,
489    ) -> Result<Output> {
490        if self.poisoned {
491            return Err(crate::HyperlightError::PoisonedSandbox);
492        }
493        // Reset snapshot since we are mutating the sandbox state
494        self.snapshot = None;
495        maybe_time_and_emit_guest_call(func_name, || {
496            let ret = self.call_guest_function_by_name_no_reset(
497                func_name,
498                Output::TYPE,
499                args.into_value(),
500            );
501            // Use the ? operator to allow converting any hyperlight_common::func::Error
502            // returned by from_value into a HyperlightError
503            let ret = Output::from_value(ret?)?;
504            Ok(ret)
505        })
506    }
507
508    /// Maps a region of host memory into the sandbox address space.
509    ///
510    /// The base address and length must meet platform alignment requirements
511    /// (typically page-aligned). The `region_type` field is ignored as guest
512    /// page table entries are not created.
513    ///
514    /// ## Poisoned Sandbox
515    ///
516    /// This method will return [`crate::HyperlightError::PoisonedSandbox`] if the sandbox
517    /// is currently poisoned. Use [`restore()`](Self::restore) to recover from a poisoned state.
518    ///
519    /// # Safety
520    ///
521    /// The caller must ensure the host memory region remains valid and unmodified
522    /// for the lifetime of `self`.
523    #[instrument(err(Debug), skip(self, rgn), parent = Span::current())]
524    pub unsafe fn map_region(&mut self, rgn: &MemoryRegion) -> Result<()> {
525        if self.poisoned {
526            return Err(crate::HyperlightError::PoisonedSandbox);
527        }
528        if rgn.flags.contains(MemoryRegionFlags::WRITE) {
529            // TODO: Implement support for writable mappings, which
530            // need to be registered with the memory manager so that
531            // writes can be rolled back when necessary.
532            log_then_return!("TODO: Writable mappings not yet supported");
533        }
534        // Reset snapshot since we are mutating the sandbox state
535        self.snapshot = None;
536        unsafe { self.vm.map_region(rgn) }.map_err(HyperlightVmError::MapRegion)?;
537        self.mem_mgr.mapped_rgns += 1;
538        Ok(())
539    }
540
541    /// Map the contents of a file into the guest at a particular address
542    ///
543    /// An optional `label` identifies this mapping in the PEB's
544    /// `FileMappingInfo` array (max 63 bytes, defaults to the file name).
545    ///
546    /// Returns the length of the mapping in bytes.
547    ///
548    /// ## Poisoned Sandbox
549    ///
550    /// This method will return [`crate::HyperlightError::PoisonedSandbox`] if the sandbox
551    /// is currently poisoned. Use [`restore()`](Self::restore) to recover from a poisoned state.
552    #[instrument(err(Debug), skip(self, file_path, guest_base, label), parent = Span::current())]
553    pub fn map_file_cow(
554        &mut self,
555        file_path: &Path,
556        guest_base: u64,
557        label: Option<&str>,
558    ) -> Result<u64> {
559        if self.poisoned {
560            return Err(crate::HyperlightError::PoisonedSandbox);
561        }
562
563        // Pre-check the file mapping limit before doing any expensive
564        // OS or VM work. The PEB count is the source of truth.
565        #[cfg(feature = "nanvix-unstable")]
566        let current_count = self
567            .mem_mgr
568            .shared_mem
569            .read::<u64>(self.mem_mgr.layout.get_file_mappings_size_offset())?
570            as usize;
571        #[cfg(feature = "nanvix-unstable")]
572        if current_count >= hyperlight_common::mem::MAX_FILE_MAPPINGS {
573            return Err(crate::HyperlightError::Error(format!(
574                "map_file_cow: file mapping limit reached ({} of {})",
575                current_count,
576                hyperlight_common::mem::MAX_FILE_MAPPINGS,
577            )));
578        }
579
580        // Phase 1: host-side OS work (open file, create mapping)
581        let mut prepared = prepare_file_cow(file_path, guest_base, label)?;
582
583        // Validate that the full mapped range doesn't overlap the
584        // sandbox's primary shared memory region.
585        let shared_size = self.mem_mgr.shared_mem.mem_size() as u64;
586        let base_addr = crate::mem::layout::SandboxMemoryLayout::BASE_ADDRESS as u64;
587        let shared_end = base_addr.checked_add(shared_size).ok_or_else(|| {
588            crate::HyperlightError::Error("shared memory end overflow".to_string())
589        })?;
590        let mapping_end = guest_base
591            .checked_add(prepared.size as u64)
592            .ok_or_else(|| {
593                crate::HyperlightError::Error(format!(
594                    "map_file_cow: guest address overflow: {:#x} + {:#x}",
595                    guest_base, prepared.size
596                ))
597            })?;
598        if guest_base < shared_end && mapping_end > base_addr {
599            return Err(crate::HyperlightError::Error(format!(
600                "map_file_cow: mapping [{:#x}..{:#x}) overlaps sandbox shared memory [{:#x}..{:#x})",
601                guest_base, mapping_end, base_addr, shared_end,
602            )));
603        }
604
605        // Phase 2: VM-side work (map into guest address space)
606        let region = prepared.to_memory_region()?;
607
608        // Check for overlaps with existing file mappings in the VM.
609        for existing_region in self.vm.get_mapped_regions() {
610            let ex_start = existing_region.guest_region.start as u64;
611            let ex_end = existing_region.guest_region.end as u64;
612            if guest_base < ex_end && mapping_end > ex_start {
613                return Err(crate::HyperlightError::Error(format!(
614                    "map_file_cow: mapping [{:#x}..{:#x}) overlaps existing mapping [{:#x}..{:#x})",
615                    guest_base, mapping_end, ex_start, ex_end,
616                )));
617            }
618        }
619
620        // Reset snapshot since we are mutating the sandbox state
621        self.snapshot = None;
622
623        unsafe { self.vm.map_region(&region) }
624            .map_err(HyperlightVmError::MapRegion)
625            .map_err(crate::HyperlightError::HyperlightVmError)?;
626
627        let size = prepared.size as u64;
628
629        // Mark consumed immediately after map_region succeeds.
630        // On Windows, WhpVm::map_memory copies the file mapping handle
631        // into its own `file_mappings` vec for cleanup on drop. If we
632        // deferred mark_consumed(), both PreparedFileMapping::drop and
633        // WhpVm::drop would release the same handle — a double-close.
634        // On Linux the hypervisor holds a reference to the host mmap;
635        // freeing it here would leave a dangling backing.
636        prepared.mark_consumed();
637        self.mem_mgr.mapped_rgns += 1;
638
639        // Record the mapping metadata in the PEB. If this fails the VM
640        // still holds a valid mapping but the PEB won't list it — the
641        // limit was already pre-checked above so this should not fail
642        // in practice.
643        #[cfg(feature = "nanvix-unstable")]
644        self.mem_mgr
645            .write_file_mapping_entry(prepared.guest_base, size, &prepared.label)?;
646
647        Ok(size)
648    }
649
650    /// Calls a guest function with type-erased parameters and return values.
651    ///
652    /// This function is used for fuzz testing parameter and return type handling.
653    ///
654    /// ## Poisoned Sandbox
655    ///
656    /// This method will return [`crate::HyperlightError::PoisonedSandbox`] if the sandbox
657    /// is currently poisoned. Use [`restore()`](Self::restore) to recover from a poisoned state.
658    #[cfg(feature = "fuzzing")]
659    #[instrument(err(Debug), skip(self, args), parent = Span::current())]
660    pub fn call_type_erased_guest_function_by_name(
661        &mut self,
662        func_name: &str,
663        ret_type: ReturnType,
664        args: Vec<ParameterValue>,
665    ) -> Result<ReturnValue> {
666        if self.poisoned {
667            return Err(crate::HyperlightError::PoisonedSandbox);
668        }
669        // Reset snapshot since we are mutating the sandbox state
670        self.snapshot = None;
671        maybe_time_and_emit_guest_call(func_name, || {
672            self.call_guest_function_by_name_no_reset(func_name, ret_type, args)
673        })
674    }
675
676    fn call_guest_function_by_name_no_reset(
677        &mut self,
678        function_name: &str,
679        return_type: ReturnType,
680        args: Vec<ParameterValue>,
681    ) -> Result<ReturnValue> {
682        if self.poisoned {
683            return Err(crate::HyperlightError::PoisonedSandbox);
684        }
685        // ===== KILL() TIMING POINT 1 =====
686        // Clear any stale cancellation from a previous guest function call or if kill() was called too early.
687        // Any kill() that completed (even partially) BEFORE this line has NO effect on this call.
688        self.vm.clear_cancel();
689
690        let res = (|| {
691            let estimated_capacity = estimate_flatbuffer_capacity(function_name, &args);
692
693            let fc = FunctionCall::new(
694                function_name.to_string(),
695                Some(args),
696                FunctionCallType::Guest,
697                return_type,
698            );
699
700            let mut builder = FlatBufferBuilder::with_capacity(estimated_capacity);
701            let buffer = fc.encode(&mut builder);
702
703            self.mem_mgr.write_guest_function_call(buffer)?;
704
705            let dispatch_res = self.vm.dispatch_call_from_host(
706                &mut self.mem_mgr,
707                &self.host_funcs,
708                #[cfg(gdb)]
709                self.dbg_mem_access_fn.clone(),
710            );
711
712            // Convert dispatch errors to HyperlightErrors to maintain backwards compatibility
713            // but first determine if sandbox should be poisoned
714            if let Err(e) = dispatch_res {
715                let (error, should_poison) = e.promote();
716                self.poisoned |= should_poison;
717                return Err(error);
718            }
719
720            let guest_result = self.mem_mgr.get_guest_function_call_result()?.into_inner();
721
722            match guest_result {
723                Ok(val) => Ok(val),
724                Err(guest_error) => {
725                    metrics::counter!(
726                        METRIC_GUEST_ERROR,
727                        METRIC_GUEST_ERROR_LABEL_CODE => (guest_error.code as u64).to_string()
728                    )
729                    .increment(1);
730
731                    Err(HyperlightError::GuestError(
732                        guest_error.code,
733                        guest_error.message,
734                    ))
735                }
736            }
737        })();
738
739        // Clear partial abort bytes so they don't leak across calls.
740        self.mem_mgr.abort_buffer.clear();
741
742        // In the happy path we do not need to clear io-buffers from the host because:
743        // - the serialized guest function call is zeroed out by the guest during deserialization, see call to `try_pop_shared_input_data_into::<FunctionCall>()`
744        // - the serialized guest function result is zeroed out by us (the host) during deserialization, see `get_guest_function_call_result`
745        // - any serialized host function call are zeroed out by us (the host) during deserialization, see `get_host_function_call`
746        // - any serialized host function result is zeroed out by the guest during deserialization, see `get_host_return_value`
747        if let Err(e) = &res {
748            self.mem_mgr.clear_io_buffers();
749
750            // Determine if we should poison the sandbox.
751            self.poisoned |= e.is_poison_error();
752        }
753
754        // Note: clear_call_active() is automatically called when _guard is dropped here
755
756        res
757    }
758
759    /// Returns a handle for interrupting guest execution.
760    ///
761    /// # Examples
762    ///
763    /// ```no_run
764    /// # use hyperlight_host::{MultiUseSandbox, UninitializedSandbox, GuestBinary};
765    /// # use std::thread;
766    /// # use std::time::Duration;
767    /// # fn example() -> Result<(), Box<dyn std::error::Error>> {
768    /// let mut sandbox: MultiUseSandbox = UninitializedSandbox::new(
769    ///     GuestBinary::FilePath("guest.bin".into()),
770    ///     None
771    /// )?.evolve()?;
772    ///
773    /// // Get interrupt handle before starting long-running operation
774    /// let interrupt_handle = sandbox.interrupt_handle();
775    ///
776    /// // Spawn thread to interrupt after timeout
777    /// let handle_clone = interrupt_handle.clone();
778    /// thread::spawn(move || {
779    ///     thread::sleep(Duration::from_secs(5));
780    ///     handle_clone.kill();
781    /// });
782    ///
783    /// // This call may be interrupted by the spawned thread
784    /// let result = sandbox.call_guest_function_by_name::<i32>("LongRunningFunction", ());
785    /// # Ok(())
786    /// # }
787    /// ```
788    pub fn interrupt_handle(&self) -> Arc<dyn InterruptHandle> {
789        self.vm.interrupt_handle()
790    }
791
792    /// Generate a crash dump of the current state of the VM underlying this sandbox.
793    ///
794    /// Creates an ELF core dump file that can be used for debugging. The dump
795    /// captures the current state of the sandbox including registers, memory regions,
796    /// and other execution context.
797    ///
798    /// The location of the core dump file is determined by the `HYPERLIGHT_CORE_DUMP_DIR`
799    /// environment variable. If not set, it defaults to the system's temporary directory.
800    ///
801    /// This is only available when the `crashdump` feature is enabled and then only if the sandbox
802    /// is also configured to allow core dumps (which is the default behavior).
803    ///
804    /// This can be useful for generating a crash dump from gdb when trying to debug issues in the
805    /// guest that dont cause crashes (e.g. a guest function that does not return)
806    ///
807    /// # Examples
808    ///
809    /// Attach to your running process with gdb and call this function:
810    ///
811    /// ```shell
812    /// sudo gdb -p <pid_of_your_process>
813    /// (gdb) info threads
814    /// # find the thread that is running the guest function you want to debug
815    /// (gdb) thread <thread_number>
816    /// # switch to the frame where you have access to your MultiUseSandbox instance
817    /// (gdb) backtrace
818    /// (gdb) frame <frame_number>
819    /// # get the pointer to your MultiUseSandbox instance
820    /// # Get the sandbox pointer
821    /// (gdb) print sandbox
822    /// # Call the crashdump function
823    /// call sandbox.generate_crashdump()
824    /// ```
825    /// The crashdump should be available in crash dump directory (see `HYPERLIGHT_CORE_DUMP_DIR` env var).
826    ///
827    #[cfg(crashdump)]
828    #[instrument(err(Debug), skip_all, parent = Span::current())]
829    pub fn generate_crashdump(&mut self) -> Result<()> {
830        crate::hypervisor::crashdump::generate_crashdump(&self.vm, &mut self.mem_mgr, None)
831    }
832
833    /// Generate a crash dump of the current state of the VM, writing to `dir`.
834    ///
835    /// Like [`generate_crashdump`](Self::generate_crashdump), but the core dump
836    /// file is placed in `dir` instead of consulting the `HYPERLIGHT_CORE_DUMP_DIR`
837    /// environment variable.  This avoids the need for callers to use
838    /// `unsafe { std::env::set_var(...) }`.
839    #[cfg(crashdump)]
840    #[instrument(err(Debug), skip_all, parent = Span::current())]
841    pub fn generate_crashdump_to_dir(&mut self, dir: impl Into<String>) -> Result<()> {
842        crate::hypervisor::crashdump::generate_crashdump(
843            &self.vm,
844            &mut self.mem_mgr,
845            Some(dir.into()),
846        )
847    }
848
849    /// Returns whether the sandbox is currently poisoned.
850    ///
851    /// A poisoned sandbox is in an inconsistent state due to the guest not running to completion.
852    /// All operations will be rejected until the sandbox is restored from a non-poisoned snapshot.
853    ///
854    /// ## Causes of Poisoning
855    ///
856    /// The sandbox becomes poisoned when guest execution is interrupted:
857    /// - **Panics/Aborts** - Guest code panics or calls `abort()`
858    /// - **Invalid Memory Access** - Read/write/execute violations  
859    /// - **Stack Overflow** - Guest exhausts stack space
860    /// - **Heap Exhaustion** - Guest runs out of heap memory
861    /// - **Forced Termination** - [`InterruptHandle::kill()`] called during execution
862    ///
863    /// ## Recovery
864    ///
865    /// To clear the poison state, use [`restore()`](Self::restore) with a snapshot
866    /// that was taken before the sandbox became poisoned.
867    ///
868    /// # Examples
869    ///
870    /// ```no_run
871    /// # use hyperlight_host::{MultiUseSandbox, UninitializedSandbox, GuestBinary};
872    /// # fn example() -> Result<(), Box<dyn std::error::Error>> {
873    /// let mut sandbox: MultiUseSandbox = UninitializedSandbox::new(
874    ///     GuestBinary::FilePath("guest.bin".into()),
875    ///     None
876    /// )?.evolve()?;
877    ///
878    /// // Check if sandbox is poisoned
879    /// if sandbox.poisoned() {
880    ///     println!("Sandbox is poisoned and needs attention");
881    /// }
882    /// # Ok(())
883    /// # }
884    /// ```
885    pub fn poisoned(&self) -> bool {
886        self.poisoned
887    }
888}
889
890impl Callable for MultiUseSandbox {
891    fn call<Output: SupportedReturnType>(
892        &mut self,
893        func_name: &str,
894        args: impl ParameterTuple,
895    ) -> Result<Output> {
896        if self.poisoned {
897            return Err(crate::HyperlightError::PoisonedSandbox);
898        }
899        self.call(func_name, args)
900    }
901}
902
903impl std::fmt::Debug for MultiUseSandbox {
904    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
905        f.debug_struct("MultiUseSandbox").finish()
906    }
907}
908
909#[cfg(test)]
910mod tests {
911    use std::sync::{Arc, Barrier};
912    use std::thread;
913
914    use hyperlight_common::flatbuffer_wrappers::guest_error::ErrorCode;
915    use hyperlight_testing::sandbox_sizes::{LARGE_HEAP_SIZE, MEDIUM_HEAP_SIZE, SMALL_HEAP_SIZE};
916    use hyperlight_testing::simple_guest_as_string;
917
918    use crate::mem::memory_region::{MemoryRegion, MemoryRegionFlags, MemoryRegionType};
919    use crate::mem::shared_mem::{ExclusiveSharedMemory, GuestSharedMemory, SharedMemory as _};
920    use crate::sandbox::SandboxConfiguration;
921    use crate::{GuestBinary, HyperlightError, MultiUseSandbox, Result, UninitializedSandbox};
922
923    #[test]
924    fn poison() {
925        let mut sbox: MultiUseSandbox = {
926            let path = simple_guest_as_string().unwrap();
927            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
928            u_sbox.evolve()
929        }
930        .unwrap();
931        let snapshot = sbox.snapshot().unwrap();
932
933        // poison on purpose
934        let res = sbox
935            .call::<()>("guest_panic", "hello".to_string())
936            .unwrap_err();
937        assert!(
938            matches!(res, HyperlightError::GuestAborted(code, context) if code == ErrorCode::UnknownError as u8 && context.contains("hello"))
939        );
940        assert!(sbox.poisoned());
941
942        // guest calls should fail when poisoned
943        let res = sbox
944            .call::<()>("guest_panic", "hello2".to_string())
945            .unwrap_err();
946        assert!(matches!(res, HyperlightError::PoisonedSandbox));
947
948        // snapshot should fail when poisoned
949        if let Err(e) = sbox.snapshot() {
950            assert!(sbox.poisoned());
951            assert!(matches!(e, HyperlightError::PoisonedSandbox));
952        } else {
953            panic!("Snapshot should fail");
954        }
955
956        // map_region should fail when poisoned
957        {
958            let map_mem = allocate_guest_memory();
959            let guest_base = 0x0;
960            let region = region_for_memory(&map_mem, guest_base, MemoryRegionFlags::READ);
961            let res = unsafe { sbox.map_region(&region) }.unwrap_err();
962            assert!(matches!(res, HyperlightError::PoisonedSandbox));
963        }
964
965        // map_file_cow should fail when poisoned
966        {
967            let temp_file = std::env::temp_dir().join("test_poison_map_file.bin");
968            let res = sbox.map_file_cow(&temp_file, 0x0, None).unwrap_err();
969            assert!(matches!(res, HyperlightError::PoisonedSandbox));
970            std::fs::remove_file(&temp_file).ok(); // Clean up
971        }
972
973        // call_guest_function_by_name (deprecated) should fail when poisoned
974        #[allow(deprecated)]
975        let res = sbox
976            .call_guest_function_by_name::<String>("Echo", "test".to_string())
977            .unwrap_err();
978        assert!(matches!(res, HyperlightError::PoisonedSandbox));
979
980        // restore to non-poisoned snapshot should work and clear poison
981        sbox.restore(snapshot.clone()).unwrap();
982        assert!(!sbox.poisoned());
983
984        // guest calls should work again after restore
985        let res = sbox.call::<String>("Echo", "hello2".to_string()).unwrap();
986        assert_eq!(res, "hello2".to_string());
987        assert!(!sbox.poisoned());
988
989        // re-poison on purpose
990        let res = sbox
991            .call::<()>("guest_panic", "hello".to_string())
992            .unwrap_err();
993        assert!(
994            matches!(res, HyperlightError::GuestAborted(code, context) if code == ErrorCode::UnknownError as u8 && context.contains("hello"))
995        );
996        assert!(sbox.poisoned());
997
998        // restore to non-poisoned snapshot should work again
999        sbox.restore(snapshot.clone()).unwrap();
1000        assert!(!sbox.poisoned());
1001
1002        // guest calls should work again
1003        let res = sbox.call::<String>("Echo", "hello3".to_string()).unwrap();
1004        assert_eq!(res, "hello3".to_string());
1005        assert!(!sbox.poisoned());
1006
1007        // snapshot should work again
1008        let _ = sbox.snapshot().unwrap();
1009    }
1010
1011    /// Make sure input/output buffers are properly reset after guest call (with host call)
1012    #[test]
1013    fn host_func_error() {
1014        let path = simple_guest_as_string().unwrap();
1015        let mut sandbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1016        sandbox
1017            .register("HostError", || -> Result<()> {
1018                Err(HyperlightError::Error("hi".to_string()))
1019            })
1020            .unwrap();
1021        let mut sandbox = sandbox.evolve().unwrap();
1022
1023        // will exhaust io if leaky
1024        for _ in 0..1000 {
1025            let result = sandbox
1026                .call::<i64>(
1027                    "CallGivenParamlessHostFuncThatReturnsI64",
1028                    "HostError".to_string(),
1029                )
1030                .unwrap_err();
1031
1032            assert!(
1033                matches!(result, HyperlightError::GuestError(code, msg) if code == ErrorCode::HostFunctionError && msg == "hi"),
1034            );
1035        }
1036    }
1037
1038    #[test]
1039    fn call_host_func_expect_error() {
1040        let path = simple_guest_as_string().unwrap();
1041        let sandbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1042        let mut sandbox = sandbox.evolve().unwrap();
1043        sandbox
1044            .call::<()>("CallHostExpectError", "SomeUnknownHostFunc".to_string())
1045            .unwrap();
1046    }
1047
1048    /// Make sure input/output buffers are properly reset after guest call (with host call)
1049    #[test]
1050    fn io_buffer_reset() {
1051        let mut cfg = SandboxConfiguration::default();
1052        cfg.set_input_data_size(4096);
1053        cfg.set_output_data_size(4096);
1054        let path = simple_guest_as_string().unwrap();
1055        let mut sandbox =
1056            UninitializedSandbox::new(GuestBinary::FilePath(path), Some(cfg)).unwrap();
1057        sandbox.register("HostAdd", |a: i32, b: i32| a + b).unwrap();
1058        let mut sandbox = sandbox.evolve().unwrap();
1059
1060        // will exhaust io if leaky. Tests both success and error paths
1061        for _ in 0..1000 {
1062            let result = sandbox.call::<i32>("Add", (5i32, 10i32)).unwrap();
1063            assert_eq!(result, 15);
1064            let result = sandbox.call::<i32>("AddToStaticAndFail", ()).unwrap_err();
1065            assert!(
1066                matches!(result, HyperlightError::GuestError (code, msg ) if code == ErrorCode::GuestError && msg == "Crash on purpose")
1067            );
1068        }
1069    }
1070
1071    /// Tests that call_guest_function_by_name restores the state correctly
1072    #[test]
1073    fn test_call_guest_function_by_name() {
1074        let mut sbox: MultiUseSandbox = {
1075            let path = simple_guest_as_string().unwrap();
1076            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1077            u_sbox.evolve()
1078        }
1079        .unwrap();
1080
1081        let snapshot = sbox.snapshot().unwrap();
1082
1083        let _ = sbox.call::<i32>("AddToStatic", 5i32).unwrap();
1084        let res: i32 = sbox.call("GetStatic", ()).unwrap();
1085        assert_eq!(res, 5);
1086
1087        sbox.restore(snapshot).unwrap();
1088        #[allow(deprecated)]
1089        let _ = sbox
1090            .call_guest_function_by_name::<i32>("AddToStatic", 5i32)
1091            .unwrap();
1092        #[allow(deprecated)]
1093        let res: i32 = sbox.call_guest_function_by_name("GetStatic", ()).unwrap();
1094        assert_eq!(res, 0);
1095    }
1096
1097    // Tests to ensure that many (1000) function calls can be made in a call context with a small stack (24K) and heap(20K).
1098    // This test effectively ensures that the stack is being properly reset after each call and we are not leaking memory in the Guest.
1099    #[test]
1100    fn test_with_small_stack_and_heap() {
1101        let mut cfg = SandboxConfiguration::default();
1102        cfg.set_heap_size(20 * 1024);
1103        // min_scratch_size already includes 1 page (4k on most
1104        // platforms) of guest stack, so add 20k more to get 24k
1105        // total, and then add some more for the eagerly-copied page
1106        // tables on amd64
1107        let min_scratch = hyperlight_common::layout::min_scratch_size(
1108            cfg.get_input_data_size(),
1109            cfg.get_output_data_size(),
1110        );
1111        cfg.set_scratch_size(min_scratch + 0x10000 + 0x10000);
1112
1113        let mut sbox1: MultiUseSandbox = {
1114            let path = simple_guest_as_string().unwrap();
1115            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), Some(cfg)).unwrap();
1116            u_sbox.evolve()
1117        }
1118        .unwrap();
1119
1120        for _ in 0..1000 {
1121            sbox1.call::<String>("Echo", "hello".to_string()).unwrap();
1122        }
1123
1124        let mut sbox2: MultiUseSandbox = {
1125            let path = simple_guest_as_string().unwrap();
1126            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), Some(cfg)).unwrap();
1127            u_sbox.evolve()
1128        }
1129        .unwrap();
1130
1131        for i in 0..1000 {
1132            sbox2
1133                .call::<i32>(
1134                    "PrintUsingPrintf",
1135                    format!("Hello World {}\n", i).to_string(),
1136                )
1137                .unwrap();
1138        }
1139    }
1140
1141    /// Tests that evolving from MultiUseSandbox to MultiUseSandbox creates a new state
1142    /// and restoring a snapshot from before evolving restores the previous state
1143    #[test]
1144    fn snapshot_evolve_restore_handles_state_correctly() {
1145        let mut sbox: MultiUseSandbox = {
1146            let path = simple_guest_as_string().unwrap();
1147            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1148            u_sbox.evolve()
1149        }
1150        .unwrap();
1151
1152        let snapshot = sbox.snapshot().unwrap();
1153
1154        let _ = sbox.call::<i32>("AddToStatic", 5i32).unwrap();
1155
1156        let res: i32 = sbox.call("GetStatic", ()).unwrap();
1157        assert_eq!(res, 5);
1158
1159        sbox.restore(snapshot).unwrap();
1160        let res: i32 = sbox.call("GetStatic", ()).unwrap();
1161        assert_eq!(res, 0);
1162    }
1163
1164    #[test]
1165    fn test_trigger_exception_on_guest() {
1166        let usbox = UninitializedSandbox::new(
1167            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1168            None,
1169        )
1170        .unwrap();
1171
1172        let mut multi_use_sandbox: MultiUseSandbox = usbox.evolve().unwrap();
1173
1174        let res: Result<()> = multi_use_sandbox.call("TriggerException", ());
1175
1176        assert!(res.is_err());
1177
1178        match res.unwrap_err() {
1179            HyperlightError::GuestAborted(_, msg) => {
1180                // msg should indicate we got an invalid opcode exception
1181                assert!(msg.contains("InvalidOpcode"));
1182            }
1183            e => panic!(
1184                "Expected HyperlightError::GuestExecutionError but got {:?}",
1185                e
1186            ),
1187        }
1188    }
1189
1190    #[test]
1191    fn create_200_sandboxes() {
1192        const NUM_THREADS: usize = 10;
1193        const SANDBOXES_PER_THREAD: usize = 20;
1194
1195        // barrier to make sure all threads start their work simultaneously
1196        let start_barrier = Arc::new(Barrier::new(NUM_THREADS + 1));
1197        let mut thread_handles = vec![];
1198
1199        for _ in 0..NUM_THREADS {
1200            let barrier = start_barrier.clone();
1201
1202            let handle = thread::spawn(move || {
1203                barrier.wait();
1204
1205                for _ in 0..SANDBOXES_PER_THREAD {
1206                    let guest_path = simple_guest_as_string().expect("Guest Binary Missing");
1207                    let uninit =
1208                        UninitializedSandbox::new(GuestBinary::FilePath(guest_path), None).unwrap();
1209
1210                    let mut sandbox: MultiUseSandbox = uninit.evolve().unwrap();
1211
1212                    let result: i32 = sandbox.call("GetStatic", ()).unwrap();
1213                    assert_eq!(result, 0);
1214                }
1215            });
1216
1217            thread_handles.push(handle);
1218        }
1219
1220        start_barrier.wait();
1221
1222        for handle in thread_handles {
1223            handle.join().unwrap();
1224        }
1225    }
1226
1227    #[test]
1228    fn test_mmap() {
1229        let mut sbox = UninitializedSandbox::new(
1230            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1231            None,
1232        )
1233        .unwrap()
1234        .evolve()
1235        .unwrap();
1236
1237        let expected = b"hello world";
1238        let map_mem = page_aligned_memory(expected);
1239        let guest_base = 0x1_0000_0000; // Arbitrary guest base address
1240
1241        unsafe {
1242            sbox.map_region(&region_for_memory(
1243                &map_mem,
1244                guest_base,
1245                MemoryRegionFlags::READ,
1246            ))
1247            .unwrap();
1248        }
1249
1250        let _guard = map_mem.lock.try_read().unwrap();
1251        let actual: Vec<u8> = sbox
1252            .call(
1253                "ReadMappedBuffer",
1254                (guest_base as u64, expected.len() as u64, true),
1255            )
1256            .unwrap();
1257
1258        assert_eq!(actual, expected);
1259    }
1260
1261    // Makes sure MemoryRegionFlags::READ | MemoryRegionFlags::EXECUTE executable but not writable
1262    #[test]
1263    fn test_mmap_write_exec() {
1264        let mut sbox = UninitializedSandbox::new(
1265            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1266            None,
1267        )
1268        .unwrap()
1269        .evolve()
1270        .unwrap();
1271
1272        let expected = &[0x90, 0x90, 0x90, 0xC3]; // NOOP slide to RET
1273        let map_mem = page_aligned_memory(expected);
1274        let guest_base = 0x1_0000_0000; // Arbitrary guest base address
1275
1276        unsafe {
1277            sbox.map_region(&region_for_memory(
1278                &map_mem,
1279                guest_base,
1280                MemoryRegionFlags::READ | MemoryRegionFlags::EXECUTE,
1281            ))
1282            .unwrap();
1283        }
1284
1285        let _guard = map_mem.lock.try_read().unwrap();
1286
1287        // Execute should pass since memory is executable
1288        let succeed = sbox
1289            .call::<bool>(
1290                "ExecMappedBuffer",
1291                (guest_base as u64, expected.len() as u64),
1292            )
1293            .unwrap();
1294        assert!(succeed, "Expected execution of mapped buffer to succeed");
1295
1296        // write should fail because the memory is mapped as read-only
1297        let err = sbox
1298            .call::<bool>(
1299                "WriteMappedBuffer",
1300                (guest_base as u64, expected.len() as u64),
1301            )
1302            .unwrap_err();
1303
1304        match err {
1305            HyperlightError::MemoryAccessViolation(addr, ..) if addr == guest_base as u64 => {}
1306            _ => panic!("Expected MemoryAccessViolation error"),
1307        };
1308    }
1309
1310    fn page_aligned_memory(src: &[u8]) -> GuestSharedMemory {
1311        use hyperlight_common::mem::PAGE_SIZE_USIZE;
1312
1313        let len = src.len().div_ceil(PAGE_SIZE_USIZE) * PAGE_SIZE_USIZE;
1314
1315        let mut mem = ExclusiveSharedMemory::new(len).unwrap();
1316        mem.copy_from_slice(src, 0).unwrap();
1317
1318        let (_, guest_mem) = mem.build();
1319
1320        guest_mem
1321    }
1322
1323    fn region_for_memory(
1324        mem: &GuestSharedMemory,
1325        guest_base: usize,
1326        flags: MemoryRegionFlags,
1327    ) -> MemoryRegion {
1328        let len = mem.mem_size();
1329        MemoryRegion {
1330            host_region: mem.host_region_base()..mem.host_region_end(),
1331            guest_region: guest_base..(guest_base + len),
1332            flags,
1333            region_type: MemoryRegionType::Heap,
1334        }
1335    }
1336
1337    fn allocate_guest_memory() -> GuestSharedMemory {
1338        page_aligned_memory(b"test data for snapshot")
1339    }
1340
1341    #[test]
1342    fn snapshot_restore_handles_remapping_correctly() {
1343        let mut sbox: MultiUseSandbox = {
1344            let path = simple_guest_as_string().unwrap();
1345            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1346            u_sbox.evolve().unwrap()
1347        };
1348
1349        // 1. Take snapshot 1 with no additional regions mapped
1350        let snapshot1 = sbox.snapshot().unwrap();
1351        assert_eq!(sbox.vm.get_mapped_regions().count(), 0);
1352
1353        // 2. Map a memory region
1354        let map_mem = allocate_guest_memory();
1355        let guest_base = 0x200000000_usize;
1356        let region = region_for_memory(&map_mem, guest_base, MemoryRegionFlags::READ);
1357
1358        unsafe { sbox.map_region(&region).unwrap() };
1359        assert_eq!(sbox.vm.get_mapped_regions().count(), 1);
1360        let orig_read = sbox
1361            .call::<Vec<u8>>(
1362                "ReadMappedBuffer",
1363                (
1364                    guest_base as u64,
1365                    hyperlight_common::vmem::PAGE_SIZE as u64,
1366                    true,
1367                ),
1368            )
1369            .unwrap();
1370
1371        // 3. Take snapshot 2 with 1 region mapped
1372        let snapshot2 = sbox.snapshot().unwrap();
1373        assert_eq!(sbox.vm.get_mapped_regions().count(), 1);
1374
1375        // 4. Re(store to snapshot 1 (should unmap the region)
1376        sbox.restore(snapshot1.clone()).unwrap();
1377        assert_eq!(sbox.vm.get_mapped_regions().count(), 0);
1378        let is_mapped = sbox
1379            .call::<bool>("CheckMapped", (guest_base as u64,))
1380            .unwrap();
1381        assert!(!is_mapped);
1382
1383        // 5. Restore forward to snapshot 2 (should have folded the
1384        //    region into the snapshot)
1385        sbox.restore(snapshot2.clone()).unwrap();
1386        assert_eq!(sbox.vm.get_mapped_regions().count(), 0);
1387        let is_mapped = sbox
1388            .call::<bool>("CheckMapped", (guest_base as u64,))
1389            .unwrap();
1390        assert!(is_mapped);
1391
1392        // Verify the region is the same
1393        let new_read = sbox
1394            .call::<Vec<u8>>(
1395                "ReadMappedBuffer",
1396                (
1397                    guest_base as u64,
1398                    hyperlight_common::vmem::PAGE_SIZE as u64,
1399                    false,
1400                ),
1401            )
1402            .unwrap();
1403        assert_eq!(new_read, orig_read);
1404    }
1405
1406    #[test]
1407    fn snapshot_different_sandbox() {
1408        let mut sandbox = {
1409            let path = simple_guest_as_string().unwrap();
1410            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1411            u_sbox.evolve().unwrap()
1412        };
1413
1414        let mut sandbox2 = {
1415            let path = simple_guest_as_string().unwrap();
1416            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1417            u_sbox.evolve().unwrap()
1418        };
1419        assert_ne!(sandbox.id, sandbox2.id);
1420
1421        let snapshot = sandbox.snapshot().unwrap();
1422        let err = sandbox2.restore(snapshot.clone());
1423        assert!(matches!(err, Err(HyperlightError::SnapshotSandboxMismatch)));
1424
1425        let sandbox_id = sandbox.id;
1426        drop(sandbox);
1427        drop(sandbox2);
1428        drop(snapshot);
1429
1430        let sandbox3 = {
1431            let path = simple_guest_as_string().unwrap();
1432            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1433            u_sbox.evolve().unwrap()
1434        };
1435        assert_ne!(sandbox3.id, sandbox_id);
1436    }
1437
1438    /// Test that snapshot restore properly resets vCPU debug registers. This test verifies
1439    /// that restore() calls reset_vcpu().
1440    #[test]
1441    fn snapshot_restore_resets_debug_registers() {
1442        let mut sandbox: MultiUseSandbox = {
1443            let path = simple_guest_as_string().unwrap();
1444            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1445            u_sbox.evolve().unwrap()
1446        };
1447
1448        let snapshot = sandbox.snapshot().unwrap();
1449
1450        // Verify DR0 is initially 0 (clean state)
1451        let dr0_initial: u64 = sandbox.call("GetDr0", ()).unwrap();
1452        assert_eq!(dr0_initial, 0, "DR0 should initially be 0");
1453
1454        // Dirty DR0 by setting it to a known non-zero value
1455        const DIRTY_VALUE: u64 = 0xDEAD_BEEF_CAFE_BABE;
1456        sandbox.call::<()>("SetDr0", DIRTY_VALUE).unwrap();
1457        let dr0_dirty: u64 = sandbox.call("GetDr0", ()).unwrap();
1458        assert_eq!(
1459            dr0_dirty, DIRTY_VALUE,
1460            "DR0 should be dirty after SetDr0 call"
1461        );
1462
1463        // Restore to the snapshot - this should reset vCPU state including debug registers
1464        sandbox.restore(snapshot).unwrap();
1465
1466        let dr0_after_restore: u64 = sandbox.call("GetDr0", ()).unwrap();
1467        assert_eq!(
1468            dr0_after_restore, 0,
1469            "DR0 should be 0 after restore (reset_vcpu should have been called)"
1470        );
1471    }
1472
1473    /// Test that stale abort buffer bytes from a previous call don't
1474    /// leak into the next call.
1475    #[test]
1476    fn stale_abort_buffer_does_not_leak_across_calls() {
1477        let mut sbox: MultiUseSandbox = {
1478            let path = simple_guest_as_string().unwrap();
1479            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1480            u_sbox.evolve().unwrap()
1481        };
1482
1483        // Simulate a partial abort
1484        sbox.mem_mgr.abort_buffer.extend_from_slice(&[0xAA; 1020]);
1485
1486        let res = sbox.call::<String>("Echo", "hello".to_string());
1487        assert!(
1488            res.is_ok(),
1489            "Expected Ok after stale abort buffer, got: {:?}",
1490            res.unwrap_err()
1491        );
1492
1493        // The buffer should be empty after the call.
1494        assert!(
1495            sbox.mem_mgr.abort_buffer.is_empty(),
1496            "abort_buffer should be empty after a guest call"
1497        );
1498    }
1499
1500    /// Test that sandboxes can be created and evolved with different heap sizes
1501    #[test]
1502    fn test_sandbox_creation_various_sizes() {
1503        let test_cases: [(&str, u64); 3] = [
1504            ("small (8MB heap)", SMALL_HEAP_SIZE),
1505            ("medium (64MB heap)", MEDIUM_HEAP_SIZE),
1506            ("large (256MB heap)", LARGE_HEAP_SIZE),
1507        ];
1508
1509        for (name, heap_size) in test_cases {
1510            let mut cfg = SandboxConfiguration::default();
1511            cfg.set_heap_size(heap_size);
1512            cfg.set_scratch_size(0x100000);
1513
1514            let path = simple_guest_as_string().unwrap();
1515            let sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), Some(cfg))
1516                .unwrap_or_else(|e| panic!("Failed to create {} sandbox: {}", name, e))
1517                .evolve()
1518                .unwrap_or_else(|e| panic!("Failed to evolve {} sandbox: {}", name, e));
1519
1520            drop(sbox);
1521        }
1522    }
1523
1524    /// Helper: create a MultiUseSandbox from the simple guest with default config.
1525    #[cfg(feature = "trace_guest")]
1526    fn sandbox_for_gva_tests() -> MultiUseSandbox {
1527        let path = simple_guest_as_string().unwrap();
1528        UninitializedSandbox::new(GuestBinary::FilePath(path), None)
1529            .unwrap()
1530            .evolve()
1531            .unwrap()
1532    }
1533
1534    /// Helper: read memory at `gva` of length `len` from the guest side via
1535    /// `ReadMappedBuffer(gva, len, false)` and from the host side via
1536    /// `read_guest_memory_by_gva`, then assert both views are identical.
1537    #[cfg(feature = "trace_guest")]
1538    fn assert_gva_read_matches(sbox: &mut MultiUseSandbox, gva: u64, len: usize) {
1539        // Guest reads via its own page tables
1540        let expected: Vec<u8> = sbox
1541            .call("ReadMappedBuffer", (gva, len as u64, true))
1542            .unwrap();
1543        assert_eq!(expected.len(), len);
1544
1545        // Host reads by walking the same page tables
1546        let root_pt = sbox.vm.get_root_pt().unwrap();
1547        let actual = sbox
1548            .mem_mgr
1549            .read_guest_memory_by_gva(gva, len, root_pt)
1550            .unwrap();
1551
1552        assert_eq!(
1553            actual, expected,
1554            "read_guest_memory_by_gva at GVA {:#x} (len {}) differs from guest ReadMappedBuffer",
1555            gva, len,
1556        );
1557    }
1558
1559    /// Test reading a small buffer (< 1 page) from guest memory via GVA.
1560    /// Uses the guest code section which is already identity-mapped.
1561    #[test]
1562    #[cfg(feature = "trace_guest")]
1563    fn read_guest_memory_by_gva_single_page() {
1564        let mut sbox = sandbox_for_gva_tests();
1565        let code_gva = sbox.mem_mgr.layout.get_guest_code_address() as u64;
1566        assert_gva_read_matches(&mut sbox, code_gva, 128);
1567    }
1568
1569    /// Test reading exactly one full page (4096 bytes) from guest memory.
1570    /// Uses the guest code section
1571    #[test]
1572    #[cfg(feature = "trace_guest")]
1573    fn read_guest_memory_by_gva_full_page() {
1574        let mut sbox = sandbox_for_gva_tests();
1575        let code_gva = sbox.mem_mgr.layout.get_guest_code_address() as u64;
1576        assert_gva_read_matches(&mut sbox, code_gva, 4096);
1577    }
1578
1579    /// Test that a read starting at an odd (non-page-aligned) address and
1580    /// spanning two page boundaries returns correct data.
1581    #[test]
1582    #[cfg(feature = "trace_guest")]
1583    fn read_guest_memory_by_gva_unaligned_cross_page() {
1584        let mut sbox = sandbox_for_gva_tests();
1585        let code_gva = sbox.mem_mgr.layout.get_guest_code_address() as u64;
1586        // Start 1 byte before the second page boundary and read 4097 bytes
1587        // (spans 2 full page boundaries).
1588        let start = code_gva + 4096 - 1;
1589        println!(
1590            "Testing unaligned cross-page read starting at {:#x} spanning 4097 bytes",
1591            start
1592        );
1593        assert_gva_read_matches(&mut sbox, start, 4097);
1594    }
1595
1596    /// Test reading exactly two full pages (8192 bytes) from guest memory.
1597    #[test]
1598    #[cfg(feature = "trace_guest")]
1599    fn read_guest_memory_by_gva_two_full_pages() {
1600        let mut sbox = sandbox_for_gva_tests();
1601        let code_gva = sbox.mem_mgr.layout.get_guest_code_address() as u64;
1602        assert_gva_read_matches(&mut sbox, code_gva, 4096 * 2);
1603    }
1604
1605    /// Test reading a region that spans across a page boundary: starts
1606    /// 100 bytes before the end of the first page and reads 200 bytes
1607    /// into the second page.
1608    #[test]
1609    #[cfg(feature = "trace_guest")]
1610    fn read_guest_memory_by_gva_cross_page_boundary() {
1611        let mut sbox = sandbox_for_gva_tests();
1612        let code_gva = sbox.mem_mgr.layout.get_guest_code_address() as u64;
1613        // Start 100 bytes before the first page boundary, read across it.
1614        let start = code_gva + 4096 - 100;
1615        assert_gva_read_matches(&mut sbox, start, 200);
1616    }
1617
1618    /// Helper: create a temp file with known content, padded to be
1619    /// at least page-aligned (4096 bytes). Returns the path and the
1620    /// *original* content bytes (before padding).
1621    fn create_test_file(name: &str, content: &[u8]) -> (std::path::PathBuf, Vec<u8>) {
1622        use std::io::Write;
1623
1624        let page_size = page_size::get();
1625        let padded_len = content.len().max(page_size).div_ceil(page_size) * page_size;
1626        let mut padded = vec![0u8; padded_len];
1627        padded[..content.len()].copy_from_slice(content);
1628
1629        let temp_dir = std::env::temp_dir();
1630        let path = temp_dir.join(name);
1631        let _ = std::fs::remove_file(&path); // clean up from previous runs
1632        let mut f = std::fs::File::create(&path).unwrap();
1633        f.write_all(&padded).unwrap();
1634        (path, content.to_vec())
1635    }
1636
1637    /// Tests the basic `map_file_cow` flow: map a file, read its content
1638    /// from the guest, and verify it matches.
1639    #[test]
1640    fn test_map_file_cow_basic() {
1641        let expected = b"hello world from map_file_cow";
1642        let (path, expected_bytes) =
1643            create_test_file("hyperlight_test_map_file_cow_basic.bin", expected);
1644
1645        let mut sbox = UninitializedSandbox::new(
1646            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1647            None,
1648        )
1649        .unwrap()
1650        .evolve()
1651        .unwrap();
1652
1653        let guest_base: u64 = 0x1_0000_0000;
1654        let mapped_size = sbox.map_file_cow(&path, guest_base, None).unwrap();
1655        assert!(mapped_size > 0, "mapped_size should be positive");
1656        assert!(
1657            mapped_size >= expected.len() as u64,
1658            "mapped_size should be >= file content length"
1659        );
1660
1661        // Read the content back from the guest
1662        let actual: Vec<u8> = sbox
1663            .call(
1664                "ReadMappedBuffer",
1665                (guest_base, expected_bytes.len() as u64, true),
1666            )
1667            .unwrap();
1668
1669        assert_eq!(
1670            actual, expected_bytes,
1671            "Guest should read back the exact file content"
1672        );
1673
1674        // Clean up
1675        let _ = std::fs::remove_file(&path);
1676    }
1677
1678    /// Tests that `map_file_cow` enforces read-only access: writing to
1679    /// the mapped region from the guest should cause a MemoryAccessViolation.
1680    #[test]
1681    fn test_map_file_cow_read_only_enforcement() {
1682        let content = &[0xBB; 4096];
1683        let (path, _) = create_test_file("hyperlight_test_map_file_cow_readonly.bin", content);
1684
1685        let mut sbox = UninitializedSandbox::new(
1686            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1687            None,
1688        )
1689        .unwrap()
1690        .evolve()
1691        .unwrap();
1692
1693        let guest_base: u64 = 0x1_0000_0000;
1694        sbox.map_file_cow(&path, guest_base, None).unwrap();
1695
1696        // Writing to the mapped region should fail with MemoryAccessViolation
1697        let err = sbox
1698            .call::<bool>("WriteMappedBuffer", (guest_base, content.len() as u64))
1699            .unwrap_err();
1700
1701        match err {
1702            HyperlightError::MemoryAccessViolation(addr, ..) if addr == guest_base => {}
1703            _ => panic!(
1704                "Expected MemoryAccessViolation at guest_base, got: {:?}",
1705                err
1706            ),
1707        };
1708
1709        // Clean up
1710        let _ = std::fs::remove_file(&path);
1711    }
1712
1713    /// Tests that `map_file_cow` returns `PoisonedSandbox` when the
1714    /// sandbox is poisoned.
1715    #[test]
1716    fn test_map_file_cow_poisoned() {
1717        let (path, _) = create_test_file("hyperlight_test_map_file_cow_poison.bin", &[0xCC; 4096]);
1718
1719        let mut sbox: MultiUseSandbox = {
1720            let path = simple_guest_as_string().unwrap();
1721            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1722            u_sbox.evolve()
1723        }
1724        .unwrap();
1725        let snapshot = sbox.snapshot().unwrap();
1726
1727        // Poison the sandbox
1728        let _ = sbox
1729            .call::<()>("guest_panic", "hello".to_string())
1730            .unwrap_err();
1731        assert!(sbox.poisoned());
1732
1733        // map_file_cow should fail with PoisonedSandbox
1734        let err = sbox.map_file_cow(&path, 0x1_0000_0000, None).unwrap_err();
1735        assert!(matches!(err, HyperlightError::PoisonedSandbox));
1736
1737        // Restore and verify map_file_cow works again
1738        sbox.restore(snapshot).unwrap();
1739        assert!(!sbox.poisoned());
1740        let result = sbox.map_file_cow(&path, 0x1_0000_0000, None);
1741        assert!(result.is_ok());
1742
1743        let _ = std::fs::remove_file(&path);
1744    }
1745
1746    /// Tests that two separate sandboxes can map the same file
1747    /// simultaneously and both read it correctly.
1748    #[test]
1749    fn test_map_file_cow_multi_vm_same_file() {
1750        let expected = b"shared file content across VMs";
1751        let (path, expected_bytes) =
1752            create_test_file("hyperlight_test_map_file_cow_multi_vm.bin", expected);
1753
1754        let guest_base: u64 = 0x1_0000_0000;
1755
1756        let mut sbox1 = UninitializedSandbox::new(
1757            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1758            None,
1759        )
1760        .unwrap()
1761        .evolve()
1762        .unwrap();
1763
1764        let mut sbox2 = UninitializedSandbox::new(
1765            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1766            None,
1767        )
1768        .unwrap()
1769        .evolve()
1770        .unwrap();
1771
1772        // Map the same file into both sandboxes
1773        sbox1.map_file_cow(&path, guest_base, None).unwrap();
1774        sbox2.map_file_cow(&path, guest_base, None).unwrap();
1775
1776        // Both should read the correct content
1777        let actual1: Vec<u8> = sbox1
1778            .call(
1779                "ReadMappedBuffer",
1780                (guest_base, expected_bytes.len() as u64, true),
1781            )
1782            .unwrap();
1783        let actual2: Vec<u8> = sbox2
1784            .call(
1785                "ReadMappedBuffer",
1786                (guest_base, expected_bytes.len() as u64, true),
1787            )
1788            .unwrap();
1789
1790        assert_eq!(
1791            actual1, expected_bytes,
1792            "Sandbox 1 should read correct content"
1793        );
1794        assert_eq!(
1795            actual2, expected_bytes,
1796            "Sandbox 2 should read correct content"
1797        );
1798
1799        let _ = std::fs::remove_file(&path);
1800    }
1801
1802    /// Tests that multiple threads can each create a sandbox, map the
1803    /// same file, read it, and drop without errors.
1804    #[test]
1805    fn test_map_file_cow_multi_vm_threaded() {
1806        let expected = b"threaded file mapping test data";
1807        let (path, expected_bytes) =
1808            create_test_file("hyperlight_test_map_file_cow_threaded.bin", expected);
1809
1810        const NUM_THREADS: usize = 5;
1811        let path = Arc::new(path);
1812        let expected_bytes = Arc::new(expected_bytes);
1813        let barrier = Arc::new(Barrier::new(NUM_THREADS));
1814        let mut handles = vec![];
1815
1816        for _ in 0..NUM_THREADS {
1817            let path = path.clone();
1818            let expected_bytes = expected_bytes.clone();
1819            let barrier = barrier.clone();
1820
1821            handles.push(thread::spawn(move || {
1822                barrier.wait();
1823
1824                let mut sbox = UninitializedSandbox::new(
1825                    GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1826                    None,
1827                )
1828                .unwrap()
1829                .evolve()
1830                .unwrap();
1831
1832                let guest_base: u64 = 0x1_0000_0000;
1833                sbox.map_file_cow(&path, guest_base, None).unwrap();
1834
1835                let actual: Vec<u8> = sbox
1836                    .call(
1837                        "ReadMappedBuffer",
1838                        (guest_base, expected_bytes.len() as u64, true),
1839                    )
1840                    .unwrap();
1841
1842                assert_eq!(actual, *expected_bytes);
1843            }));
1844        }
1845
1846        for h in handles {
1847            h.join().unwrap();
1848        }
1849
1850        let _ = std::fs::remove_file(&*path);
1851    }
1852
1853    /// Tests that file cleanup works after dropping a sandbox that used
1854    /// `map_file_cow` — the file should be deletable (no leaked handles).
1855    #[test]
1856    #[cfg(target_os = "windows")]
1857    fn test_map_file_cow_cleanup_no_handle_leak() {
1858        let (path, _) = create_test_file("hyperlight_test_map_file_cow_cleanup.bin", &[0xDD; 4096]);
1859
1860        {
1861            let mut sbox = UninitializedSandbox::new(
1862                GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1863                None,
1864            )
1865            .unwrap()
1866            .evolve()
1867            .unwrap();
1868
1869            sbox.map_file_cow(&path, 0x1_0000_0000, None).unwrap();
1870            // sandbox dropped here
1871        }
1872
1873        std::fs::remove_file(&path)
1874            .expect("File should be deletable after sandbox with map_file_cow is dropped");
1875    }
1876
1877    /// Tests snapshot/restore cycle with map_file_cow:
1878    /// snapshot₁ (no file) → map file → snapshot₂ → restore₁ (unmapped)
1879    /// → restore₂ (data folded into snapshot).
1880    #[test]
1881    fn test_map_file_cow_snapshot_remapping_cycle() {
1882        let expected = b"snapshot remapping cycle test!";
1883        let (path, expected_bytes) =
1884            create_test_file("hyperlight_test_map_file_cow_snapshot_remap.bin", expected);
1885
1886        let mut sbox = UninitializedSandbox::new(
1887            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1888            None,
1889        )
1890        .unwrap()
1891        .evolve()
1892        .unwrap();
1893
1894        let guest_base: u64 = 0x1_0000_0000;
1895
1896        // 1. snapshot₁ — no file mapped
1897        let snapshot1 = sbox.snapshot().unwrap();
1898
1899        // 2. Map the file
1900        sbox.map_file_cow(&path, guest_base, None).unwrap();
1901
1902        // Verify we can read it
1903        let actual: Vec<u8> = sbox
1904            .call(
1905                "ReadMappedBuffer",
1906                (guest_base, expected_bytes.len() as u64, true),
1907            )
1908            .unwrap();
1909        assert_eq!(actual, expected_bytes);
1910
1911        // 3. snapshot₂ — file mapped (data folded into snapshot)
1912        let snapshot2 = sbox.snapshot().unwrap();
1913
1914        // 4. Restore to snapshot₁ — file should be unmapped
1915        sbox.restore(snapshot1.clone()).unwrap();
1916        let is_mapped: bool = sbox.call("CheckMapped", (guest_base,)).unwrap();
1917        assert!(
1918            !is_mapped,
1919            "Region should be unmapped after restoring to snapshot₁"
1920        );
1921
1922        // 5. Restore to snapshot₂ — data should still be readable
1923        //    (folded into snapshot memory, not the original file mapping)
1924        sbox.restore(snapshot2).unwrap();
1925        let is_mapped: bool = sbox.call("CheckMapped", (guest_base,)).unwrap();
1926        assert!(
1927            is_mapped,
1928            "Region should be mapped after restoring to snapshot₂"
1929        );
1930        let actual2: Vec<u8> = sbox
1931            .call(
1932                "ReadMappedBuffer",
1933                (guest_base, expected_bytes.len() as u64, false),
1934            )
1935            .unwrap();
1936        assert_eq!(
1937            actual2, expected_bytes,
1938            "Data should be intact after snapshot₂ restore"
1939        );
1940
1941        let _ = std::fs::remove_file(&path);
1942    }
1943
1944    /// Tests that snapshot correctly captures map_file_cow data and
1945    /// restore brings it back.
1946    #[test]
1947    fn test_map_file_cow_snapshot_restore() {
1948        let expected = b"snapshot restore basic test!!";
1949        let (path, expected_bytes) =
1950            create_test_file("hyperlight_test_map_file_cow_snap_restore.bin", expected);
1951
1952        let mut sbox = UninitializedSandbox::new(
1953            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1954            None,
1955        )
1956        .unwrap()
1957        .evolve()
1958        .unwrap();
1959
1960        let guest_base: u64 = 0x1_0000_0000;
1961        sbox.map_file_cow(&path, guest_base, None).unwrap();
1962
1963        // Read the content to verify mapping works
1964        let actual: Vec<u8> = sbox
1965            .call(
1966                "ReadMappedBuffer",
1967                (guest_base, expected_bytes.len() as u64, true),
1968            )
1969            .unwrap();
1970        assert_eq!(actual, expected_bytes);
1971
1972        // Take snapshot — folds file data into snapshot memory
1973        let snapshot = sbox.snapshot().unwrap();
1974
1975        // Restore — the file-backed region is unmapped but data is in snapshot
1976        sbox.restore(snapshot).unwrap();
1977
1978        // Data should still be readable from snapshot memory
1979        let actual2: Vec<u8> = sbox
1980            .call(
1981                "ReadMappedBuffer",
1982                (guest_base, expected_bytes.len() as u64, false),
1983            )
1984            .unwrap();
1985        assert_eq!(
1986            actual2, expected_bytes,
1987            "Data should be readable after restore from snapshot"
1988        );
1989
1990        let _ = std::fs::remove_file(&path);
1991    }
1992
1993    /// Tests the deferred `map_file_cow` flow: map a file on
1994    /// `UninitializedSandbox` (before evolve), then evolve and verify
1995    /// the guest can read the mapped content.
1996    #[test]
1997    fn test_map_file_cow_deferred_basic() {
1998        let expected = b"deferred map_file_cow test data";
1999        let (path, expected_bytes) =
2000            create_test_file("hyperlight_test_map_file_cow_deferred.bin", expected);
2001
2002        let guest_base: u64 = 0x1_0000_0000;
2003
2004        let mut u_sbox = UninitializedSandbox::new(
2005            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2006            None,
2007        )
2008        .unwrap();
2009
2010        // Map the file before evolving — this defers the VM-side work.
2011        let mapped_size = u_sbox.map_file_cow(&path, guest_base, None).unwrap();
2012        assert!(mapped_size > 0, "mapped_size should be positive");
2013        assert!(
2014            mapped_size >= expected.len() as u64,
2015            "mapped_size should be >= file content length"
2016        );
2017
2018        // Evolve — deferred mappings are applied during this step.
2019        let mut sbox: MultiUseSandbox = u_sbox.evolve().unwrap();
2020
2021        // Verify the guest can read the mapped content.
2022        let actual: Vec<u8> = sbox
2023            .call(
2024                "ReadMappedBuffer",
2025                (guest_base, expected_bytes.len() as u64, true),
2026            )
2027            .unwrap();
2028
2029        assert_eq!(
2030            actual, expected_bytes,
2031            "Guest should read back the exact file content after deferred mapping"
2032        );
2033
2034        let _ = std::fs::remove_file(&path);
2035    }
2036
2037    /// Tests that dropping an `UninitializedSandbox` with pending
2038    /// deferred file mappings does not leak or crash — the
2039    /// `PreparedFileMapping::Drop` should clean up host resources.
2040    #[test]
2041    fn test_map_file_cow_deferred_drop_without_evolve() {
2042        let (path, _) = create_test_file(
2043            "hyperlight_test_map_file_cow_deferred_drop.bin",
2044            &[0xAA; 4096],
2045        );
2046
2047        let guest_base: u64 = 0x1_0000_0000;
2048
2049        {
2050            let mut u_sbox = UninitializedSandbox::new(
2051                GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2052                None,
2053            )
2054            .unwrap();
2055
2056            u_sbox.map_file_cow(&path, guest_base, None).unwrap();
2057            // u_sbox dropped here without evolving — PreparedFileMapping::drop
2058            // should clean up host-side OS resources.
2059        }
2060
2061        // If we get here without a crash/hang, cleanup worked.
2062        // On Windows, also verify the file handle was released.
2063        #[cfg(target_os = "windows")]
2064        std::fs::remove_file(&path)
2065            .expect("File should be deletable after dropping UninitializedSandbox");
2066        #[cfg(not(target_os = "windows"))]
2067        let _ = std::fs::remove_file(&path);
2068    }
2069
2070    /// Tests that `prepare_file_cow` rejects unaligned `guest_base`
2071    /// addresses eagerly, before allocating any OS resources.
2072    #[test]
2073    fn test_map_file_cow_unaligned_guest_base() {
2074        let (path, _) =
2075            create_test_file("hyperlight_test_map_file_cow_unaligned.bin", &[0xBB; 4096]);
2076
2077        let mut u_sbox = UninitializedSandbox::new(
2078            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2079            None,
2080        )
2081        .unwrap();
2082
2083        // Use an intentionally unaligned address (page_size + 1).
2084        let unaligned_base: u64 = (page_size::get() + 1) as u64;
2085        let result = u_sbox.map_file_cow(&path, unaligned_base, None);
2086        assert!(
2087            result.is_err(),
2088            "map_file_cow should reject unaligned guest_base"
2089        );
2090
2091        let _ = std::fs::remove_file(&path);
2092    }
2093
2094    /// Tests that `prepare_file_cow` rejects empty files.
2095    #[test]
2096    fn test_map_file_cow_empty_file() {
2097        let temp_dir = std::env::temp_dir();
2098        let path = temp_dir.join("hyperlight_test_map_file_cow_empty.bin");
2099        let _ = std::fs::remove_file(&path);
2100        std::fs::File::create(&path).unwrap(); // create empty file
2101
2102        let mut u_sbox = UninitializedSandbox::new(
2103            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2104            None,
2105        )
2106        .unwrap();
2107
2108        let guest_base: u64 = 0x1_0000_0000;
2109        let result = u_sbox.map_file_cow(&path, guest_base, None);
2110        assert!(result.is_err(), "map_file_cow should reject empty files");
2111
2112        let _ = std::fs::remove_file(&path);
2113    }
2114
2115    /// Tests that `map_file_cow` with a custom label succeeds.
2116    #[test]
2117    fn test_map_file_cow_custom_label() {
2118        let (path, _) = create_test_file("hyperlight_test_map_file_cow_label.bin", &[0xDD; 4096]);
2119
2120        let mut sbox = UninitializedSandbox::new(
2121            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2122            None,
2123        )
2124        .unwrap()
2125        .evolve()
2126        .unwrap();
2127
2128        let result = sbox.map_file_cow(&path, 0x1_0000_0000, Some("my_ramfs"));
2129        assert!(
2130            result.is_ok(),
2131            "map_file_cow with custom label should succeed"
2132        );
2133
2134        let _ = std::fs::remove_file(&path);
2135    }
2136
2137    /// Tests that `map_file_cow` on a MultiUseSandbox correctly writes
2138    /// the FileMappingInfo entry (count, guest_addr, size, label) into
2139    /// the PEB.
2140    #[test]
2141    #[cfg(feature = "nanvix-unstable")]
2142    fn test_map_file_cow_peb_entry_multiuse() {
2143        use std::mem::offset_of;
2144
2145        use hyperlight_common::mem::{FILE_MAPPING_LABEL_MAX_LEN, FileMappingInfo};
2146
2147        let (path, _) = create_test_file("hyperlight_test_peb_entry_multiuse.bin", &[0xDD; 4096]);
2148
2149        let guest_base: u64 = 0x1_0000_0000;
2150        let label = "my_ramfs";
2151
2152        let mut sbox = UninitializedSandbox::new(
2153            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2154            None,
2155        )
2156        .unwrap()
2157        .evolve()
2158        .unwrap();
2159
2160        // Map with an explicit label.
2161        let mapped_size = sbox.map_file_cow(&path, guest_base, Some(label)).unwrap();
2162
2163        // Read back the PEB file_mappings count.
2164        let count = sbox
2165            .mem_mgr
2166            .shared_mem
2167            .read::<u64>(sbox.mem_mgr.layout.get_file_mappings_size_offset())
2168            .unwrap();
2169        assert_eq!(
2170            count, 1,
2171            "PEB file_mappings count should be 1 after one mapping"
2172        );
2173
2174        // Read back the first FileMappingInfo entry.
2175        let entry_offset = sbox.mem_mgr.layout.get_file_mappings_array_offset();
2176
2177        let stored_addr = sbox
2178            .mem_mgr
2179            .shared_mem
2180            .read::<u64>(entry_offset + offset_of!(FileMappingInfo, guest_addr))
2181            .unwrap();
2182        assert_eq!(stored_addr, guest_base, "PEB entry guest_addr should match");
2183
2184        let stored_size = sbox
2185            .mem_mgr
2186            .shared_mem
2187            .read::<u64>(entry_offset + offset_of!(FileMappingInfo, size))
2188            .unwrap();
2189        assert_eq!(
2190            stored_size, mapped_size,
2191            "PEB entry size should match mapped_size"
2192        );
2193
2194        // Read back the label bytes and verify.
2195        let label_offset = entry_offset + offset_of!(FileMappingInfo, label);
2196        let mut label_buf = [0u8; FILE_MAPPING_LABEL_MAX_LEN + 1];
2197        for (i, byte) in label_buf.iter_mut().enumerate() {
2198            *byte = sbox
2199                .mem_mgr
2200                .shared_mem
2201                .read::<u8>(label_offset + i)
2202                .unwrap();
2203        }
2204        let label_len = label_buf
2205            .iter()
2206            .position(|&b| b == 0)
2207            .unwrap_or(label_buf.len());
2208        let stored_label = std::str::from_utf8(&label_buf[..label_len]).unwrap();
2209        assert_eq!(stored_label, label, "PEB entry label should match");
2210
2211        let _ = std::fs::remove_file(&path);
2212    }
2213
2214    /// Tests that deferred `map_file_cow` (before evolve) correctly
2215    /// writes FileMappingInfo entries into the PEB during evolve.
2216    #[test]
2217    #[cfg(feature = "nanvix-unstable")]
2218    fn test_map_file_cow_peb_entry_deferred() {
2219        use std::mem::offset_of;
2220
2221        use hyperlight_common::mem::{FILE_MAPPING_LABEL_MAX_LEN, FileMappingInfo};
2222
2223        let (path, _) = create_test_file("hyperlight_test_peb_entry_deferred.bin", &[0xEE; 4096]);
2224
2225        let guest_base: u64 = 0x1_0000_0000;
2226        let label = "deferred_fs";
2227
2228        let mut u_sbox = UninitializedSandbox::new(
2229            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2230            None,
2231        )
2232        .unwrap();
2233
2234        let mapped_size = u_sbox.map_file_cow(&path, guest_base, Some(label)).unwrap();
2235
2236        // Evolve — PEB entries should be written during this step.
2237        let sbox: MultiUseSandbox = u_sbox.evolve().unwrap();
2238
2239        // Read back count.
2240        let count = sbox
2241            .mem_mgr
2242            .shared_mem
2243            .read::<u64>(sbox.mem_mgr.layout.get_file_mappings_size_offset())
2244            .unwrap();
2245        assert_eq!(count, 1, "PEB file_mappings count should be 1 after evolve");
2246
2247        // Read back the entry.
2248        let entry_offset = sbox.mem_mgr.layout.get_file_mappings_array_offset();
2249
2250        let stored_addr = sbox
2251            .mem_mgr
2252            .shared_mem
2253            .read::<u64>(entry_offset + offset_of!(FileMappingInfo, guest_addr))
2254            .unwrap();
2255        assert_eq!(stored_addr, guest_base);
2256
2257        let stored_size = sbox
2258            .mem_mgr
2259            .shared_mem
2260            .read::<u64>(entry_offset + offset_of!(FileMappingInfo, size))
2261            .unwrap();
2262        assert_eq!(stored_size, mapped_size);
2263
2264        // Verify the label.
2265        let label_offset = entry_offset + offset_of!(FileMappingInfo, label);
2266        let mut label_buf = [0u8; FILE_MAPPING_LABEL_MAX_LEN + 1];
2267        for (i, byte) in label_buf.iter_mut().enumerate() {
2268            *byte = sbox
2269                .mem_mgr
2270                .shared_mem
2271                .read::<u8>(label_offset + i)
2272                .unwrap();
2273        }
2274        let label_len = label_buf
2275            .iter()
2276            .position(|&b| b == 0)
2277            .unwrap_or(label_buf.len());
2278        let stored_label = std::str::from_utf8(&label_buf[..label_len]).unwrap();
2279        assert_eq!(
2280            stored_label, label,
2281            "PEB entry label should match after evolve"
2282        );
2283
2284        let _ = std::fs::remove_file(&path);
2285    }
2286
2287    /// Tests that mapping 5 files (3 deferred + 2 post-evolve) correctly
2288    /// populates all PEB FileMappingInfo slots with the right guest_addr,
2289    /// size, and label for each entry.
2290    #[test]
2291    #[cfg(feature = "nanvix-unstable")]
2292    fn test_map_file_cow_peb_multiple_entries() {
2293        use std::mem::{offset_of, size_of};
2294
2295        use hyperlight_common::mem::{FILE_MAPPING_LABEL_MAX_LEN, FileMappingInfo};
2296
2297        const NUM_FILES: usize = 5;
2298        const DEFERRED_COUNT: usize = 3;
2299
2300        // Create 5 test files with distinct content.
2301        let mut paths = Vec::new();
2302        let mut labels: Vec<String> = Vec::new();
2303        for i in 0..NUM_FILES {
2304            let name = format!("hyperlight_test_peb_multi_{}.bin", i);
2305            let content = vec![i as u8 + 0xA0; 4096];
2306            let (path, _) = create_test_file(&name, &content);
2307            paths.push(path);
2308            labels.push(format!("file_{}", i));
2309        }
2310
2311        // Each file gets a unique guest base, spaced 1 page apart
2312        // (well outside the shared memory region).
2313        let page_size = page_size::get() as u64;
2314        let base: u64 = 0x1_0000_0000;
2315        let guest_bases: Vec<u64> = (0..NUM_FILES as u64)
2316            .map(|i| base + i * page_size)
2317            .collect();
2318
2319        let mut u_sbox = UninitializedSandbox::new(
2320            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2321            None,
2322        )
2323        .unwrap();
2324
2325        // Map 3 files before evolve (deferred path).
2326        let mut mapped_sizes = Vec::new();
2327        for i in 0..DEFERRED_COUNT {
2328            let size = u_sbox
2329                .map_file_cow(&paths[i], guest_bases[i], Some(&labels[i]))
2330                .unwrap();
2331            mapped_sizes.push(size);
2332        }
2333
2334        // Evolve — deferred mappings applied + PEB entries written.
2335        let mut sbox: MultiUseSandbox = u_sbox.evolve().unwrap();
2336
2337        // Map 2 more files post-evolve (MultiUseSandbox path).
2338        for i in DEFERRED_COUNT..NUM_FILES {
2339            let size = sbox
2340                .map_file_cow(&paths[i], guest_bases[i], Some(&labels[i]))
2341                .unwrap();
2342            mapped_sizes.push(size);
2343        }
2344
2345        // Verify PEB count equals 5.
2346        let count = sbox
2347            .mem_mgr
2348            .shared_mem
2349            .read::<u64>(sbox.mem_mgr.layout.get_file_mappings_size_offset())
2350            .unwrap();
2351        assert_eq!(
2352            count, NUM_FILES as u64,
2353            "PEB should have {NUM_FILES} entries"
2354        );
2355
2356        // Verify each entry's guest_addr, size, and label.
2357        let array_base = sbox.mem_mgr.layout.get_file_mappings_array_offset();
2358        for i in 0..NUM_FILES {
2359            let entry_offset = array_base + i * size_of::<FileMappingInfo>();
2360
2361            let stored_addr = sbox
2362                .mem_mgr
2363                .shared_mem
2364                .read::<u64>(entry_offset + offset_of!(FileMappingInfo, guest_addr))
2365                .unwrap();
2366            assert_eq!(
2367                stored_addr, guest_bases[i],
2368                "Entry {i}: guest_addr mismatch"
2369            );
2370
2371            let stored_size = sbox
2372                .mem_mgr
2373                .shared_mem
2374                .read::<u64>(entry_offset + offset_of!(FileMappingInfo, size))
2375                .unwrap();
2376            assert_eq!(stored_size, mapped_sizes[i], "Entry {i}: size mismatch");
2377
2378            // Read and verify the label.
2379            let label_base = entry_offset + offset_of!(FileMappingInfo, label);
2380            let mut label_buf = [0u8; FILE_MAPPING_LABEL_MAX_LEN + 1];
2381            for (j, byte) in label_buf.iter_mut().enumerate() {
2382                *byte = sbox.mem_mgr.shared_mem.read::<u8>(label_base + j).unwrap();
2383            }
2384            let label_len = label_buf
2385                .iter()
2386                .position(|&b| b == 0)
2387                .unwrap_or(label_buf.len());
2388            let stored_label = std::str::from_utf8(&label_buf[..label_len]).unwrap();
2389            assert_eq!(stored_label, labels[i], "Entry {i}: label mismatch");
2390        }
2391
2392        // Clean up.
2393        for path in &paths {
2394            let _ = std::fs::remove_file(path);
2395        }
2396    }
2397
2398    /// Tests that an explicitly provided label exceeding 63 bytes is rejected.
2399    #[test]
2400    fn test_map_file_cow_label_too_long() {
2401        let (path, _) =
2402            create_test_file("hyperlight_test_map_file_cow_long_label.bin", &[0xEE; 4096]);
2403
2404        let guest_base: u64 = 0x1_0000_0000;
2405
2406        let mut u_sbox = UninitializedSandbox::new(
2407            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2408            None,
2409        )
2410        .unwrap();
2411
2412        // A label of exactly 64 bytes exceeds the 63-byte max.
2413        let long_label = "A".repeat(64);
2414        let result = u_sbox.map_file_cow(&path, guest_base, Some(&long_label));
2415        assert!(
2416            result.is_err(),
2417            "map_file_cow should reject labels longer than 63 bytes"
2418        );
2419
2420        // Labels at exactly 63 bytes should be fine.
2421        let ok_label = "B".repeat(63);
2422        let result = u_sbox.map_file_cow(&path, guest_base, Some(&ok_label));
2423        assert!(
2424            result.is_ok(),
2425            "map_file_cow should accept labels of exactly 63 bytes"
2426        );
2427
2428        let _ = std::fs::remove_file(&path);
2429    }
2430
2431    /// Tests that labels containing null bytes are rejected.
2432    #[test]
2433    fn test_map_file_cow_label_null_byte() {
2434        let (path, _) =
2435            create_test_file("hyperlight_test_map_file_cow_null_label.bin", &[0xFF; 4096]);
2436
2437        let guest_base: u64 = 0x1_0000_0000;
2438
2439        let mut u_sbox = UninitializedSandbox::new(
2440            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2441            None,
2442        )
2443        .unwrap();
2444
2445        let result = u_sbox.map_file_cow(&path, guest_base, Some("has\0null"));
2446        assert!(
2447            result.is_err(),
2448            "map_file_cow should reject labels containing null bytes"
2449        );
2450
2451        let _ = std::fs::remove_file(&path);
2452    }
2453
2454    /// Tests that mapping two files to overlapping GPA ranges is rejected.
2455    #[test]
2456    fn test_map_file_cow_overlapping_mappings() {
2457        let (path1, _) =
2458            create_test_file("hyperlight_test_map_file_cow_overlap1.bin", &[0xAA; 4096]);
2459        let (path2, _) =
2460            create_test_file("hyperlight_test_map_file_cow_overlap2.bin", &[0xBB; 4096]);
2461
2462        let guest_base: u64 = 0x1_0000_0000;
2463
2464        let mut u_sbox = UninitializedSandbox::new(
2465            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2466            None,
2467        )
2468        .unwrap();
2469
2470        // First mapping should succeed.
2471        u_sbox.map_file_cow(&path1, guest_base, None).unwrap();
2472
2473        // Second mapping at the same address should fail (overlap).
2474        let result = u_sbox.map_file_cow(&path2, guest_base, None);
2475        assert!(
2476            result.is_err(),
2477            "map_file_cow should reject overlapping guest address ranges"
2478        );
2479
2480        let _ = std::fs::remove_file(&path1);
2481        let _ = std::fs::remove_file(&path2);
2482    }
2483
2484    /// Tests that `map_file_cow` rejects a guest_base that overlaps
2485    /// the sandbox's shared memory region.
2486    #[test]
2487    fn test_map_file_cow_shared_mem_overlap() {
2488        let (path, _) = create_test_file(
2489            "hyperlight_test_map_file_cow_overlap_shm.bin",
2490            &[0xCC; 4096],
2491        );
2492
2493        let mut u_sbox = UninitializedSandbox::new(
2494            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2495            None,
2496        )
2497        .unwrap();
2498
2499        // Use BASE_ADDRESS itself — smack in the middle of shared memory.
2500        let base_addr = crate::mem::layout::SandboxMemoryLayout::BASE_ADDRESS as u64;
2501        // page-align it (BASE_ADDRESS is 0x1000, already page-aligned)
2502        let result = u_sbox.map_file_cow(&path, base_addr, None);
2503        assert!(
2504            result.is_err(),
2505            "map_file_cow should reject guest_base inside shared memory"
2506        );
2507
2508        let _ = std::fs::remove_file(&path);
2509    }
2510
2511    /// Tests that exceeding MAX_FILE_MAPPINGS on UninitializedSandbox
2512    /// is rejected at registration time.
2513    #[test]
2514    fn test_map_file_cow_max_limit() {
2515        use hyperlight_common::mem::MAX_FILE_MAPPINGS;
2516
2517        let mut u_sbox = UninitializedSandbox::new(
2518            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2519            None,
2520        )
2521        .unwrap();
2522
2523        let page_size = page_size::get() as u64;
2524        // Base well outside shared memory.
2525        let base: u64 = 0x1_0000_0000;
2526
2527        // Register MAX_FILE_MAPPINGS files — each needs a distinct file
2528        // and a non-overlapping GPA.
2529        let mut paths = Vec::new();
2530        for i in 0..MAX_FILE_MAPPINGS {
2531            let name = format!("hyperlight_test_max_limit_{}.bin", i);
2532            let (path, _) = create_test_file(&name, &[0xAA; 4096]);
2533            let guest_base = base + (i as u64) * page_size;
2534            u_sbox.map_file_cow(&path, guest_base, None).unwrap();
2535            paths.push(path);
2536        }
2537
2538        // The (MAX_FILE_MAPPINGS + 1)th should fail.
2539        let name = format!("hyperlight_test_max_limit_{}.bin", MAX_FILE_MAPPINGS);
2540        let (path, _) = create_test_file(&name, &[0xBB; 4096]);
2541        let guest_base = base + (MAX_FILE_MAPPINGS as u64) * page_size;
2542        let result = u_sbox.map_file_cow(&path, guest_base, None);
2543        assert!(
2544            result.is_err(),
2545            "map_file_cow should reject after MAX_FILE_MAPPINGS registrations"
2546        );
2547
2548        // Clean up.
2549        for p in &paths {
2550            let _ = std::fs::remove_file(p);
2551        }
2552        let _ = std::fs::remove_file(&path);
2553    }
2554}