Skip to main content

hyperlight_host/sandbox/
initialized_multi_use.rs

1/*
2Copyright 2025  The Hyperlight Authors.
3
4Licensed under the Apache License, Version 2.0 (the "License");
5you may not use this file except in compliance with the License.
6You may obtain a copy of the License at
7
8    http://www.apache.org/licenses/LICENSE-2.0
9
10Unless required by applicable law or agreed to in writing, software
11distributed under the License is distributed on an "AS IS" BASIS,
12WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13See the License for the specific language governing permissions and
14limitations under the License.
15*/
16
17use std::collections::HashSet;
18use std::path::Path;
19use std::sync::atomic::Ordering;
20use std::sync::{Arc, Mutex};
21
22use flatbuffers::FlatBufferBuilder;
23use hyperlight_common::flatbuffer_wrappers::function_call::{FunctionCall, FunctionCallType};
24use hyperlight_common::flatbuffer_wrappers::function_types::{
25    ParameterValue, ReturnType, ReturnValue,
26};
27use hyperlight_common::flatbuffer_wrappers::util::estimate_flatbuffer_capacity;
28use tracing::{Span, instrument};
29
30use super::Callable;
31use super::file_mapping::prepare_file_cow;
32use super::host_funcs::FunctionRegistry;
33use super::snapshot::Snapshot;
34use crate::HyperlightError::{self, SnapshotSandboxMismatch};
35use crate::Result;
36use crate::func::{ParameterTuple, SupportedReturnType};
37use crate::hypervisor::InterruptHandle;
38use crate::hypervisor::hyperlight_vm::{HyperlightVm, HyperlightVmError};
39#[cfg(target_os = "linux")]
40use crate::log_then_return;
41use crate::mem::memory_region::MemoryRegion;
42#[cfg(target_os = "linux")]
43use crate::mem::memory_region::MemoryRegionFlags;
44use crate::mem::mgr::SandboxMemoryManager;
45use crate::mem::shared_mem::{HostSharedMemory, SharedMemory as _};
46use crate::metrics::{
47    METRIC_GUEST_ERROR, METRIC_GUEST_ERROR_LABEL_CODE, maybe_time_and_emit_guest_call,
48};
49
50/// A fully initialized sandbox that can execute guest functions multiple times.
51///
52/// Guest functions can be called repeatedly while maintaining state between calls.
53/// The sandbox supports creating snapshots and restoring to previous states.
54///
55/// ## Sandbox Poisoning
56///
57/// The sandbox becomes **poisoned** when the guest is not run to completion, leaving it in
58/// an inconsistent state that could compromise memory safety, data integrity, or security.
59///
60/// ### When Does Poisoning Occur?
61///
62/// Poisoning happens when guest execution is interrupted before normal completion:
63///
64/// - **Guest panics or aborts** - When a guest function panics, crashes, or calls `abort()`,
65///   the normal cleanup and unwinding process is interrupted
66/// - **Invalid memory access** - Attempts to read/write/execute memory outside allowed regions
67/// - **Stack overflow** - Guest exhausts its stack space during execution
68/// - **Heap exhaustion** - Guest runs out of heap memory
69/// - **Host-initiated cancellation** - Calling [`InterruptHandle::kill()`] to forcefully
70///   terminate an in-progress guest function
71///
72/// ### Why This Is Unsafe
73///
74/// When guest execution doesn't complete normally, critical cleanup operations are skipped:
75///
76/// - **Memory leaks** - Heap allocations remain unreachable as the call stack is unwound
77/// - **Corrupted allocator state** - Memory allocator metadata (free lists, heap headers)
78///   left inconsistent
79/// - **Locked resources** - Mutexes or other synchronization primitives remain locked
80/// - **Partial state updates** - Data structures left half-modified (corrupted linked lists,
81///   inconsistent hash tables, etc.)
82///
83/// ### Recovery
84///
85/// Use [`restore()`](Self::restore) with a snapshot taken before poisoning occurred.
86/// This is the **only safe way** to recover - it completely replaces all memory state,
87/// eliminating any inconsistencies. See [`restore()`](Self::restore) for details.
88pub struct MultiUseSandbox {
89    /// Unique identifier for this sandbox instance
90    id: u64,
91    /// Whether this sandbox is poisoned
92    poisoned: bool,
93    pub(super) host_funcs: Arc<Mutex<FunctionRegistry>>,
94    pub(crate) mem_mgr: SandboxMemoryManager<HostSharedMemory>,
95    vm: HyperlightVm,
96    #[cfg(gdb)]
97    dbg_mem_access_fn: Arc<Mutex<SandboxMemoryManager<HostSharedMemory>>>,
98    /// If the current state of the sandbox has been captured in a snapshot,
99    /// that snapshot is stored here.
100    snapshot: Option<Arc<Snapshot>>,
101}
102
103impl MultiUseSandbox {
104    /// Move an `UninitializedSandbox` into a new `MultiUseSandbox` instance.
105    ///
106    /// This function is not equivalent to doing an `evolve` from uninitialized
107    /// to initialized, and is purposely not exposed publicly outside the crate
108    /// (as a `From` implementation would be)
109    #[instrument(skip_all, parent = Span::current(), level = "Trace")]
110    pub(super) fn from_uninit(
111        host_funcs: Arc<Mutex<FunctionRegistry>>,
112        mgr: SandboxMemoryManager<HostSharedMemory>,
113        vm: HyperlightVm,
114        #[cfg(gdb)] dbg_mem_access_fn: Arc<Mutex<SandboxMemoryManager<HostSharedMemory>>>,
115    ) -> MultiUseSandbox {
116        Self {
117            id: super::snapshot::SANDBOX_CONFIGURATION_COUNTER.fetch_add(1, Ordering::Relaxed),
118            poisoned: false,
119            host_funcs,
120            mem_mgr: mgr,
121            vm,
122            #[cfg(gdb)]
123            dbg_mem_access_fn,
124            snapshot: None,
125        }
126    }
127
128    /// Creates a snapshot of the sandbox's current memory state.
129    ///
130    /// The snapshot is tied to this specific sandbox instance and can only be
131    /// restored to the same sandbox it was created from.
132    ///
133    /// ## Poisoned Sandbox
134    ///
135    /// This method will return [`crate::HyperlightError::PoisonedSandbox`] if the sandbox
136    /// is currently poisoned. Snapshots can only be taken from non-poisoned sandboxes.
137    ///
138    /// # Examples
139    ///
140    /// ```no_run
141    /// # use hyperlight_host::{MultiUseSandbox, UninitializedSandbox, GuestBinary};
142    /// # fn example() -> Result<(), Box<dyn std::error::Error>> {
143    /// let mut sandbox: MultiUseSandbox = UninitializedSandbox::new(
144    ///     GuestBinary::FilePath("guest.bin".into()),
145    ///     None
146    /// )?.evolve()?;
147    ///
148    /// // Modify sandbox state
149    /// sandbox.call_guest_function_by_name::<i32>("SetValue", 42)?;
150    ///
151    /// // Create snapshot belonging to this sandbox
152    /// let snapshot = sandbox.snapshot()?;
153    /// # Ok(())
154    /// # }
155    /// ```
156    #[instrument(err(Debug), skip_all, parent = Span::current())]
157    pub fn snapshot(&mut self) -> Result<Arc<Snapshot>> {
158        if self.poisoned {
159            return Err(crate::HyperlightError::PoisonedSandbox);
160        }
161
162        if let Some(snapshot) = &self.snapshot {
163            return Ok(snapshot.clone());
164        }
165        let mapped_regions_iter = self.vm.get_mapped_regions();
166        let mapped_regions_vec: Vec<MemoryRegion> = mapped_regions_iter.cloned().collect();
167        let root_pt_gpa = self
168            .vm
169            .get_root_pt()
170            .map_err(|e| HyperlightError::HyperlightVmError(e.into()))?;
171        let stack_top_gpa = self.vm.get_stack_top();
172        let sregs = self
173            .vm
174            .get_snapshot_sregs()
175            .map_err(|e| HyperlightError::HyperlightVmError(e.into()))?;
176        let entrypoint = self.vm.get_entrypoint();
177        let memory_snapshot = self.mem_mgr.snapshot(
178            self.id,
179            mapped_regions_vec,
180            root_pt_gpa,
181            stack_top_gpa,
182            sregs,
183            entrypoint,
184        )?;
185        let snapshot = Arc::new(memory_snapshot);
186        self.snapshot = Some(snapshot.clone());
187        Ok(snapshot)
188    }
189
190    /// Restores the sandbox's memory to a previously captured snapshot state.
191    ///
192    /// The snapshot must have been created from this same sandbox instance.
193    /// Attempting to restore a snapshot from a different sandbox will return
194    /// a [`SnapshotSandboxMismatch`](crate::HyperlightError::SnapshotSandboxMismatch) error.
195    ///
196    /// ## Poison State Recovery
197    ///
198    /// This method automatically clears any poison state when successful. This is safe because:
199    /// - Snapshots can only be taken from non-poisoned sandboxes
200    /// - Restoration completely replaces all memory state, eliminating any inconsistencies
201    ///   caused by incomplete guest execution
202    ///
203    /// ### What Gets Fixed During Restore
204    ///
205    /// When a poisoned sandbox is restored, the memory state is completely reset:
206    /// - **Leaked heap memory** - All allocations from interrupted execution are discarded
207    /// - **Corrupted allocator metadata** - Free lists and heap headers restored to consistent state
208    /// - **Locked mutexes** - All lock state is reset
209    /// - **Partial updates** - Data structures restored to their pre-execution state
210    ///
211    /// # Examples
212    ///
213    /// ```no_run
214    /// # use hyperlight_host::{MultiUseSandbox, UninitializedSandbox, GuestBinary};
215    /// # fn example() -> Result<(), Box<dyn std::error::Error>> {
216    /// let mut sandbox: MultiUseSandbox = UninitializedSandbox::new(
217    ///     GuestBinary::FilePath("guest.bin".into()),
218    ///     None
219    /// )?.evolve()?;
220    ///
221    /// // Take initial snapshot from this sandbox
222    /// let snapshot = sandbox.snapshot()?;
223    ///
224    /// // Modify sandbox state
225    /// sandbox.call_guest_function_by_name::<i32>("SetValue", 100)?;
226    /// let value: i32 = sandbox.call_guest_function_by_name("GetValue", ())?;
227    /// assert_eq!(value, 100);
228    ///
229    /// // Restore to previous state (same sandbox)
230    /// sandbox.restore(snapshot)?;
231    /// let restored_value: i32 = sandbox.call_guest_function_by_name("GetValue", ())?;
232    /// assert_eq!(restored_value, 0); // Back to initial state
233    /// # Ok(())
234    /// # }
235    /// ```
236    ///
237    /// ## Recovering from Poison
238    ///
239    /// ```no_run
240    /// # use hyperlight_host::{MultiUseSandbox, UninitializedSandbox, GuestBinary, HyperlightError};
241    /// # fn example() -> Result<(), Box<dyn std::error::Error>> {
242    /// let mut sandbox: MultiUseSandbox = UninitializedSandbox::new(
243    ///     GuestBinary::FilePath("guest.bin".into()),
244    ///     None
245    /// )?.evolve()?;
246    ///
247    /// // Take snapshot before potentially poisoning operation
248    /// let snapshot = sandbox.snapshot()?;
249    ///
250    /// // This might poison the sandbox (guest not run to completion)
251    /// let result = sandbox.call::<()>("guest_panic", ());
252    /// if result.is_err() {
253    ///     if sandbox.poisoned() {
254    ///         // Restore from snapshot to clear poison
255    ///         sandbox.restore(snapshot.clone())?;
256    ///         assert!(!sandbox.poisoned());
257    ///         
258    ///         // Sandbox is now usable again
259    ///         sandbox.call::<String>("Echo", "hello".to_string())?;
260    ///     }
261    /// }
262    /// # Ok(())
263    /// # }
264    /// ```
265    #[instrument(err(Debug), skip_all, parent = Span::current())]
266    pub fn restore(&mut self, snapshot: Arc<Snapshot>) -> Result<()> {
267        // Currently, we do not try to optimise restore to the
268        // most-current snapshot. This is because the most-current
269        // snapshot, while it must have identical virtual memory
270        // layout to the current sandbox, does not necessarily have
271        // the exact same /physical/ memory contents. It is not
272        // entirely inconceivable that this could lead to breakage of
273        // cross-request isolation in some way, although it would
274        // require some /very/ odd code.  For example, suppose that a
275        // service uses Hyperlight to sandbox native code from
276        // clients, and promises cross-request isolation. A tenant
277        // provides a binary that can process two forms of request,
278        // either writing a secret into physical memory, or reading
279        // from arbitrary physical memory, assuming that the two kinds
280        // of requests can never (dangerously) meet in the same
281        // sandbox.
282        //
283        // It is presently unclear whether this is a sensible threat
284        // model, especially since Hyperlight is often used with
285        // managed-code runtimes which do not allow even arbitrary
286        // access to virtual memory, much less physical memory.
287        // However, out of an abundance of caution, the optimisation
288        // is presently disabled.
289
290        if self.id != snapshot.sandbox_id() {
291            return Err(SnapshotSandboxMismatch);
292        }
293
294        let (gsnapshot, gscratch) = self.mem_mgr.restore_snapshot(&snapshot)?;
295        if let Some(gsnapshot) = gsnapshot {
296            self.vm
297                .update_snapshot_mapping(gsnapshot)
298                .map_err(|e| HyperlightError::HyperlightVmError(e.into()))?;
299        }
300        if let Some(gscratch) = gscratch {
301            self.vm
302                .update_scratch_mapping(gscratch)
303                .map_err(|e| HyperlightError::HyperlightVmError(e.into()))?;
304        }
305
306        let sregs = snapshot.sregs().ok_or_else(|| {
307            HyperlightError::Error("snapshot from running sandbox should have sregs".to_string())
308        })?;
309        // TODO (ludfjig): Go through the rest of possible errors in this `MultiUseSandbox::restore` function
310        // and determine if they should also poison the sandbox.
311        self.vm
312            .reset_vcpu(snapshot.root_pt_gpa(), sregs)
313            .map_err(|e| {
314                self.poisoned = true;
315                HyperlightVmError::Restore(e)
316            })?;
317
318        self.vm.set_stack_top(snapshot.stack_top_gva());
319        self.vm.set_entrypoint(snapshot.entrypoint());
320
321        let current_regions: HashSet<_> = self.vm.get_mapped_regions().cloned().collect();
322        let snapshot_regions: HashSet<_> = snapshot.regions().iter().cloned().collect();
323
324        let regions_to_unmap = current_regions.difference(&snapshot_regions);
325        let regions_to_map = snapshot_regions.difference(&current_regions);
326
327        for region in regions_to_unmap {
328            self.vm
329                .unmap_region(region)
330                .map_err(HyperlightVmError::UnmapRegion)?;
331        }
332
333        for region in regions_to_map {
334            // Safety: The region has been mapped before, and at that point the caller promised that the memory region is valid
335            // in their call to `MultiUseSandbox::map_region`
336            unsafe { self.vm.map_region(region) }.map_err(HyperlightVmError::MapRegion)?;
337        }
338
339        // The restored snapshot is now our most current snapshot
340        self.snapshot = Some(snapshot.clone());
341
342        // Clear poison state when successfully restoring from snapshot.
343        //
344        // # Safety:
345        // This is safe because:
346        // 1. Snapshots can only be taken from non-poisoned sandboxes (verified at snapshot creation)
347        // 2. Restoration completely replaces all memory state, eliminating:
348        //    - All leaked heap allocations (memory is restored to snapshot state)
349        //    - All corrupted data structures (overwritten with consistent snapshot data)
350        //    - All inconsistent global state (reset to snapshot values)
351        self.poisoned = false;
352
353        Ok(())
354    }
355
356    /// Calls a guest function by name with the specified arguments.
357    ///
358    /// Changes made to the sandbox during execution are *not* persisted.
359    ///
360    /// ## Poisoned Sandbox
361    ///
362    /// This method will return [`crate::HyperlightError::PoisonedSandbox`] if the sandbox
363    /// is currently poisoned. Use [`restore()`](Self::restore) to recover from a poisoned state.
364    ///
365    /// # Examples
366    ///
367    /// ```no_run
368    /// # use hyperlight_host::{MultiUseSandbox, UninitializedSandbox, GuestBinary};
369    /// # fn example() -> Result<(), Box<dyn std::error::Error>> {
370    /// let mut sandbox: MultiUseSandbox = UninitializedSandbox::new(
371    ///     GuestBinary::FilePath("guest.bin".into()),
372    ///     None
373    /// )?.evolve()?;
374    ///
375    /// // Call function with no arguments
376    /// let result: i32 = sandbox.call_guest_function_by_name("GetCounter", ())?;
377    ///
378    /// // Call function with single argument
379    /// let doubled: i32 = sandbox.call_guest_function_by_name("Double", 21)?;
380    /// assert_eq!(doubled, 42);
381    ///
382    /// // Call function with multiple arguments
383    /// let sum: i32 = sandbox.call_guest_function_by_name("Add", (10, 32))?;
384    /// assert_eq!(sum, 42);
385    ///
386    /// // Call function returning string
387    /// let message: String = sandbox.call_guest_function_by_name("Echo", "Hello, World!".to_string())?;
388    /// assert_eq!(message, "Hello, World!");
389    /// # Ok(())
390    /// # }
391    /// ```
392    #[doc(hidden)]
393    #[deprecated(
394        since = "0.8.0",
395        note = "Deprecated in favour of call and snapshot/restore."
396    )]
397    #[instrument(err(Debug), skip(self, args), parent = Span::current())]
398    pub fn call_guest_function_by_name<Output: SupportedReturnType>(
399        &mut self,
400        func_name: &str,
401        args: impl ParameterTuple,
402    ) -> Result<Output> {
403        if self.poisoned {
404            return Err(crate::HyperlightError::PoisonedSandbox);
405        }
406        let snapshot = self.snapshot()?;
407        let res = self.call(func_name, args);
408        self.restore(snapshot)?;
409        res
410    }
411
412    /// Calls a guest function by name with the specified arguments.
413    ///
414    /// Changes made to the sandbox during execution are persisted.
415    ///
416    /// ## Poisoned Sandbox
417    ///
418    /// This method will return [`crate::HyperlightError::PoisonedSandbox`] if the sandbox
419    /// is already poisoned before the call. Use [`restore()`](Self::restore) to recover from
420    /// a poisoned state.
421    ///
422    /// ## Sandbox Poisoning
423    ///
424    /// If this method returns an error, the sandbox may be poisoned if the guest was not run
425    /// to completion (due to panic, abort, memory violation, stack/heap exhaustion, or forced
426    /// termination). Use [`poisoned()`](Self::poisoned) to check the poison state and
427    /// [`restore()`](Self::restore) to recover if needed.
428    ///
429    /// If this method returns `Ok`, the sandbox is guaranteed to **not** be poisoned - the guest
430    /// function completed successfully and the sandbox state is consistent.
431    ///
432    /// # Examples
433    ///
434    /// ```no_run
435    /// # use hyperlight_host::{MultiUseSandbox, UninitializedSandbox, GuestBinary};
436    /// # fn example() -> Result<(), Box<dyn std::error::Error>> {
437    /// let mut sandbox: MultiUseSandbox = UninitializedSandbox::new(
438    ///     GuestBinary::FilePath("guest.bin".into()),
439    ///     None
440    /// )?.evolve()?;
441    ///
442    /// // Call function with no arguments
443    /// let result: i32 = sandbox.call("GetCounter", ())?;
444    ///
445    /// // Call function with single argument
446    /// let doubled: i32 = sandbox.call("Double", 21)?;
447    /// assert_eq!(doubled, 42);
448    ///
449    /// // Call function with multiple arguments
450    /// let sum: i32 = sandbox.call("Add", (10, 32))?;
451    /// assert_eq!(sum, 42);
452    ///
453    /// // Call function returning string
454    /// let message: String = sandbox.call("Echo", "Hello, World!".to_string())?;
455    /// assert_eq!(message, "Hello, World!");
456    /// # Ok(())
457    /// # }
458    /// ```
459    ///
460    /// ## Handling Potential Poisoning
461    ///
462    /// ```no_run
463    /// # use hyperlight_host::{MultiUseSandbox, UninitializedSandbox, GuestBinary};
464    /// # fn example() -> Result<(), Box<dyn std::error::Error>> {
465    /// let mut sandbox: MultiUseSandbox = UninitializedSandbox::new(
466    ///     GuestBinary::FilePath("guest.bin".into()),
467    ///     None
468    /// )?.evolve()?;
469    ///
470    /// // Take snapshot before risky operation
471    /// let snapshot = sandbox.snapshot()?;
472    ///
473    /// // Call potentially unsafe guest function
474    /// let result = sandbox.call::<String>("RiskyOperation", "input".to_string());
475    ///
476    /// // Check if the call failed and poisoned the sandbox
477    /// if let Err(e) = result {
478    ///     eprintln!("Guest function failed: {}", e);
479    ///     
480    ///     if sandbox.poisoned() {
481    ///         eprintln!("Sandbox was poisoned, restoring from snapshot");
482    ///         sandbox.restore(snapshot.clone())?;
483    ///     }
484    /// }
485    /// # Ok(())
486    /// # }
487    /// ```
488    #[instrument(err(Debug), skip(self, args), parent = Span::current())]
489    pub fn call<Output: SupportedReturnType>(
490        &mut self,
491        func_name: &str,
492        args: impl ParameterTuple,
493    ) -> Result<Output> {
494        if self.poisoned {
495            return Err(crate::HyperlightError::PoisonedSandbox);
496        }
497        // Reset snapshot since we are mutating the sandbox state
498        self.snapshot = None;
499        maybe_time_and_emit_guest_call(func_name, || {
500            let ret = self.call_guest_function_by_name_no_reset(
501                func_name,
502                Output::TYPE,
503                args.into_value(),
504            );
505            // Use the ? operator to allow converting any hyperlight_common::func::Error
506            // returned by from_value into a HyperlightError
507            let ret = Output::from_value(ret?)?;
508            Ok(ret)
509        })
510    }
511
512    /// Maps a region of host memory into the sandbox address space.
513    ///
514    /// The base address and length must meet platform alignment requirements
515    /// (typically page-aligned). The `region_type` field is ignored as guest
516    /// page table entries are not created.
517    ///
518    /// ## Poisoned Sandbox
519    ///
520    /// This method will return [`crate::HyperlightError::PoisonedSandbox`] if the sandbox
521    /// is currently poisoned. Use [`restore()`](Self::restore) to recover from a poisoned state.
522    ///
523    /// # Safety
524    ///
525    /// The caller must ensure the host memory region remains valid and unmodified
526    /// for the lifetime of `self`.
527    #[instrument(err(Debug), skip(self, rgn), parent = Span::current())]
528    #[cfg(target_os = "linux")]
529    pub unsafe fn map_region(&mut self, rgn: &MemoryRegion) -> Result<()> {
530        if self.poisoned {
531            return Err(crate::HyperlightError::PoisonedSandbox);
532        }
533        if rgn.flags.contains(MemoryRegionFlags::WRITE) {
534            // TODO: Implement support for writable mappings, which
535            // need to be registered with the memory manager so that
536            // writes can be rolled back when necessary.
537            log_then_return!("TODO: Writable mappings not yet supported");
538        }
539        // Reset snapshot since we are mutating the sandbox state
540        self.snapshot = None;
541        unsafe { self.vm.map_region(rgn) }.map_err(HyperlightVmError::MapRegion)?;
542        self.mem_mgr.mapped_rgns += 1;
543        Ok(())
544    }
545
546    /// Map the contents of a file into the guest at a particular address
547    ///
548    /// An optional `label` identifies this mapping in the PEB's
549    /// `FileMappingInfo` array (max 63 bytes, defaults to the file name).
550    ///
551    /// Returns the length of the mapping in bytes.
552    ///
553    /// ## Poisoned Sandbox
554    ///
555    /// This method will return [`crate::HyperlightError::PoisonedSandbox`] if the sandbox
556    /// is currently poisoned. Use [`restore()`](Self::restore) to recover from a poisoned state.
557    #[instrument(err(Debug), skip(self, file_path, guest_base, label), parent = Span::current())]
558    pub fn map_file_cow(
559        &mut self,
560        file_path: &Path,
561        guest_base: u64,
562        label: Option<&str>,
563    ) -> Result<u64> {
564        if self.poisoned {
565            return Err(crate::HyperlightError::PoisonedSandbox);
566        }
567
568        // Pre-check the file mapping limit before doing any expensive
569        // OS or VM work. The PEB count is the source of truth.
570        let current_count = self
571            .mem_mgr
572            .shared_mem
573            .read::<u64>(self.mem_mgr.layout.get_file_mappings_size_offset())?
574            as usize;
575        if current_count >= hyperlight_common::mem::MAX_FILE_MAPPINGS {
576            return Err(crate::HyperlightError::Error(format!(
577                "map_file_cow: file mapping limit reached ({} of {})",
578                current_count,
579                hyperlight_common::mem::MAX_FILE_MAPPINGS,
580            )));
581        }
582
583        // Phase 1: host-side OS work (open file, create mapping)
584        let mut prepared = prepare_file_cow(file_path, guest_base, label)?;
585
586        // Validate that the full mapped range doesn't overlap the
587        // sandbox's primary shared memory region.
588        let shared_size = self.mem_mgr.shared_mem.mem_size() as u64;
589        let base_addr = crate::mem::layout::SandboxMemoryLayout::BASE_ADDRESS as u64;
590        let shared_end = base_addr.checked_add(shared_size).ok_or_else(|| {
591            crate::HyperlightError::Error("shared memory end overflow".to_string())
592        })?;
593        let mapping_end = guest_base
594            .checked_add(prepared.size as u64)
595            .ok_or_else(|| {
596                crate::HyperlightError::Error(format!(
597                    "map_file_cow: guest address overflow: {:#x} + {:#x}",
598                    guest_base, prepared.size
599                ))
600            })?;
601        if guest_base < shared_end && mapping_end > base_addr {
602            return Err(crate::HyperlightError::Error(format!(
603                "map_file_cow: mapping [{:#x}..{:#x}) overlaps sandbox shared memory [{:#x}..{:#x})",
604                guest_base, mapping_end, base_addr, shared_end,
605            )));
606        }
607
608        // Phase 2: VM-side work (map into guest address space)
609        let region = prepared.to_memory_region()?;
610
611        // Check for overlaps with existing file mappings in the VM.
612        for existing_region in self.vm.get_mapped_regions() {
613            let ex_start = existing_region.guest_region.start as u64;
614            let ex_end = existing_region.guest_region.end as u64;
615            if guest_base < ex_end && mapping_end > ex_start {
616                return Err(crate::HyperlightError::Error(format!(
617                    "map_file_cow: mapping [{:#x}..{:#x}) overlaps existing mapping [{:#x}..{:#x})",
618                    guest_base, mapping_end, ex_start, ex_end,
619                )));
620            }
621        }
622
623        // Reset snapshot since we are mutating the sandbox state
624        self.snapshot = None;
625
626        unsafe { self.vm.map_region(&region) }
627            .map_err(HyperlightVmError::MapRegion)
628            .map_err(crate::HyperlightError::HyperlightVmError)?;
629
630        let size = prepared.size as u64;
631
632        // Mark consumed immediately after map_region succeeds.
633        // On Windows, WhpVm::map_memory copies the file mapping handle
634        // into its own `file_mappings` vec for cleanup on drop. If we
635        // deferred mark_consumed(), both PreparedFileMapping::drop and
636        // WhpVm::drop would release the same handle — a double-close.
637        // On Linux the hypervisor holds a reference to the host mmap;
638        // freeing it here would leave a dangling backing.
639        prepared.mark_consumed();
640        self.mem_mgr.mapped_rgns += 1;
641
642        // Record the mapping metadata in the PEB. If this fails the VM
643        // still holds a valid mapping but the PEB won't list it — the
644        // limit was already pre-checked above so this should not fail
645        // in practice.
646        self.mem_mgr
647            .write_file_mapping_entry(prepared.guest_base, size, &prepared.label)?;
648
649        Ok(size)
650    }
651
652    /// Calls a guest function with type-erased parameters and return values.
653    ///
654    /// This function is used for fuzz testing parameter and return type handling.
655    ///
656    /// ## Poisoned Sandbox
657    ///
658    /// This method will return [`crate::HyperlightError::PoisonedSandbox`] if the sandbox
659    /// is currently poisoned. Use [`restore()`](Self::restore) to recover from a poisoned state.
660    #[cfg(feature = "fuzzing")]
661    #[instrument(err(Debug), skip(self, args), parent = Span::current())]
662    pub fn call_type_erased_guest_function_by_name(
663        &mut self,
664        func_name: &str,
665        ret_type: ReturnType,
666        args: Vec<ParameterValue>,
667    ) -> Result<ReturnValue> {
668        if self.poisoned {
669            return Err(crate::HyperlightError::PoisonedSandbox);
670        }
671        // Reset snapshot since we are mutating the sandbox state
672        self.snapshot = None;
673        maybe_time_and_emit_guest_call(func_name, || {
674            self.call_guest_function_by_name_no_reset(func_name, ret_type, args)
675        })
676    }
677
678    fn call_guest_function_by_name_no_reset(
679        &mut self,
680        function_name: &str,
681        return_type: ReturnType,
682        args: Vec<ParameterValue>,
683    ) -> Result<ReturnValue> {
684        if self.poisoned {
685            return Err(crate::HyperlightError::PoisonedSandbox);
686        }
687        // ===== KILL() TIMING POINT 1 =====
688        // Clear any stale cancellation from a previous guest function call or if kill() was called too early.
689        // Any kill() that completed (even partially) BEFORE this line has NO effect on this call.
690        self.vm.clear_cancel();
691
692        let res = (|| {
693            let estimated_capacity = estimate_flatbuffer_capacity(function_name, &args);
694
695            let fc = FunctionCall::new(
696                function_name.to_string(),
697                Some(args),
698                FunctionCallType::Guest,
699                return_type,
700            );
701
702            let mut builder = FlatBufferBuilder::with_capacity(estimated_capacity);
703            let buffer = fc.encode(&mut builder);
704
705            self.mem_mgr.write_guest_function_call(buffer)?;
706
707            let dispatch_res = self.vm.dispatch_call_from_host(
708                &mut self.mem_mgr,
709                &self.host_funcs,
710                #[cfg(gdb)]
711                self.dbg_mem_access_fn.clone(),
712            );
713
714            // Convert dispatch errors to HyperlightErrors to maintain backwards compatibility
715            // but first determine if sandbox should be poisoned
716            if let Err(e) = dispatch_res {
717                let (error, should_poison) = e.promote();
718                self.poisoned |= should_poison;
719                return Err(error);
720            }
721
722            let guest_result = self.mem_mgr.get_guest_function_call_result()?.into_inner();
723
724            match guest_result {
725                Ok(val) => Ok(val),
726                Err(guest_error) => {
727                    metrics::counter!(
728                        METRIC_GUEST_ERROR,
729                        METRIC_GUEST_ERROR_LABEL_CODE => (guest_error.code as u64).to_string()
730                    )
731                    .increment(1);
732
733                    Err(HyperlightError::GuestError(
734                        guest_error.code,
735                        guest_error.message,
736                    ))
737                }
738            }
739        })();
740
741        // In the happy path we do not need to clear io-buffers from the host because:
742        // - the serialized guest function call is zeroed out by the guest during deserialization, see call to `try_pop_shared_input_data_into::<FunctionCall>()`
743        // - the serialized guest function result is zeroed out by us (the host) during deserialization, see `get_guest_function_call_result`
744        // - any serialized host function call are zeroed out by us (the host) during deserialization, see `get_host_function_call`
745        // - any serialized host function result is zeroed out by the guest during deserialization, see `get_host_return_value`
746        if let Err(e) = &res {
747            self.mem_mgr.clear_io_buffers();
748
749            // Determine if we should poison the sandbox.
750            self.poisoned |= e.is_poison_error();
751        }
752
753        // Note: clear_call_active() is automatically called when _guard is dropped here
754
755        res
756    }
757
758    /// Returns a handle for interrupting guest execution.
759    ///
760    /// # Examples
761    ///
762    /// ```no_run
763    /// # use hyperlight_host::{MultiUseSandbox, UninitializedSandbox, GuestBinary};
764    /// # use std::thread;
765    /// # use std::time::Duration;
766    /// # fn example() -> Result<(), Box<dyn std::error::Error>> {
767    /// let mut sandbox: MultiUseSandbox = UninitializedSandbox::new(
768    ///     GuestBinary::FilePath("guest.bin".into()),
769    ///     None
770    /// )?.evolve()?;
771    ///
772    /// // Get interrupt handle before starting long-running operation
773    /// let interrupt_handle = sandbox.interrupt_handle();
774    ///
775    /// // Spawn thread to interrupt after timeout
776    /// let handle_clone = interrupt_handle.clone();
777    /// thread::spawn(move || {
778    ///     thread::sleep(Duration::from_secs(5));
779    ///     handle_clone.kill();
780    /// });
781    ///
782    /// // This call may be interrupted by the spawned thread
783    /// let result = sandbox.call_guest_function_by_name::<i32>("LongRunningFunction", ());
784    /// # Ok(())
785    /// # }
786    /// ```
787    pub fn interrupt_handle(&self) -> Arc<dyn InterruptHandle> {
788        self.vm.interrupt_handle()
789    }
790
791    /// Generate a crash dump of the current state of the VM underlying this sandbox.
792    ///
793    /// Creates an ELF core dump file that can be used for debugging. The dump
794    /// captures the current state of the sandbox including registers, memory regions,
795    /// and other execution context.
796    ///
797    /// The location of the core dump file is determined by the `HYPERLIGHT_CORE_DUMP_DIR`
798    /// environment variable. If not set, it defaults to the system's temporary directory.
799    ///
800    /// This is only available when the `crashdump` feature is enabled and then only if the sandbox
801    /// is also configured to allow core dumps (which is the default behavior).
802    ///
803    /// This can be useful for generating a crash dump from gdb when trying to debug issues in the
804    /// guest that dont cause crashes (e.g. a guest function that does not return)
805    ///
806    /// # Examples
807    ///
808    /// Attach to your running process with gdb and call this function:
809    ///
810    /// ```shell
811    /// sudo gdb -p <pid_of_your_process>
812    /// (gdb) info threads
813    /// # find the thread that is running the guest function you want to debug
814    /// (gdb) thread <thread_number>
815    /// # switch to the frame where you have access to your MultiUseSandbox instance
816    /// (gdb) backtrace
817    /// (gdb) frame <frame_number>
818    /// # get the pointer to your MultiUseSandbox instance
819    /// # Get the sandbox pointer
820    /// (gdb) print sandbox
821    /// # Call the crashdump function
822    /// call sandbox.generate_crashdump()
823    /// ```
824    /// The crashdump should be available in crash dump directory (see `HYPERLIGHT_CORE_DUMP_DIR` env var).
825    ///
826    #[cfg(crashdump)]
827    #[instrument(err(Debug), skip_all, parent = Span::current())]
828    pub fn generate_crashdump(&mut self) -> Result<()> {
829        crate::hypervisor::crashdump::generate_crashdump(&self.vm, &mut self.mem_mgr, None)
830    }
831
832    /// Generate a crash dump of the current state of the VM, writing to `dir`.
833    ///
834    /// Like [`generate_crashdump`](Self::generate_crashdump), but the core dump
835    /// file is placed in `dir` instead of consulting the `HYPERLIGHT_CORE_DUMP_DIR`
836    /// environment variable.  This avoids the need for callers to use
837    /// `unsafe { std::env::set_var(...) }`.
838    #[cfg(crashdump)]
839    #[instrument(err(Debug), skip_all, parent = Span::current())]
840    pub fn generate_crashdump_to_dir(&mut self, dir: impl Into<String>) -> Result<()> {
841        crate::hypervisor::crashdump::generate_crashdump(
842            &self.vm,
843            &mut self.mem_mgr,
844            Some(dir.into()),
845        )
846    }
847
848    /// Returns whether the sandbox is currently poisoned.
849    ///
850    /// A poisoned sandbox is in an inconsistent state due to the guest not running to completion.
851    /// All operations will be rejected until the sandbox is restored from a non-poisoned snapshot.
852    ///
853    /// ## Causes of Poisoning
854    ///
855    /// The sandbox becomes poisoned when guest execution is interrupted:
856    /// - **Panics/Aborts** - Guest code panics or calls `abort()`
857    /// - **Invalid Memory Access** - Read/write/execute violations  
858    /// - **Stack Overflow** - Guest exhausts stack space
859    /// - **Heap Exhaustion** - Guest runs out of heap memory
860    /// - **Forced Termination** - [`InterruptHandle::kill()`] called during execution
861    ///
862    /// ## Recovery
863    ///
864    /// To clear the poison state, use [`restore()`](Self::restore) with a snapshot
865    /// that was taken before the sandbox became poisoned.
866    ///
867    /// # Examples
868    ///
869    /// ```no_run
870    /// # use hyperlight_host::{MultiUseSandbox, UninitializedSandbox, GuestBinary};
871    /// # fn example() -> Result<(), Box<dyn std::error::Error>> {
872    /// let mut sandbox: MultiUseSandbox = UninitializedSandbox::new(
873    ///     GuestBinary::FilePath("guest.bin".into()),
874    ///     None
875    /// )?.evolve()?;
876    ///
877    /// // Check if sandbox is poisoned
878    /// if sandbox.poisoned() {
879    ///     println!("Sandbox is poisoned and needs attention");
880    /// }
881    /// # Ok(())
882    /// # }
883    /// ```
884    pub fn poisoned(&self) -> bool {
885        self.poisoned
886    }
887}
888
889impl Callable for MultiUseSandbox {
890    fn call<Output: SupportedReturnType>(
891        &mut self,
892        func_name: &str,
893        args: impl ParameterTuple,
894    ) -> Result<Output> {
895        if self.poisoned {
896            return Err(crate::HyperlightError::PoisonedSandbox);
897        }
898        self.call(func_name, args)
899    }
900}
901
902impl std::fmt::Debug for MultiUseSandbox {
903    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
904        f.debug_struct("MultiUseSandbox").finish()
905    }
906}
907
908#[cfg(test)]
909mod tests {
910    use std::sync::{Arc, Barrier};
911    use std::thread;
912
913    use hyperlight_common::flatbuffer_wrappers::guest_error::ErrorCode;
914    use hyperlight_testing::sandbox_sizes::{LARGE_HEAP_SIZE, MEDIUM_HEAP_SIZE, SMALL_HEAP_SIZE};
915    use hyperlight_testing::simple_guest_as_string;
916
917    #[cfg(target_os = "linux")]
918    use crate::mem::memory_region::{MemoryRegion, MemoryRegionFlags, MemoryRegionType};
919    #[cfg(target_os = "linux")]
920    use crate::mem::shared_mem::{ExclusiveSharedMemory, GuestSharedMemory, SharedMemory as _};
921    use crate::sandbox::SandboxConfiguration;
922    use crate::{GuestBinary, HyperlightError, MultiUseSandbox, Result, UninitializedSandbox};
923
924    #[test]
925    fn poison() {
926        let mut sbox: MultiUseSandbox = {
927            let path = simple_guest_as_string().unwrap();
928            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
929            u_sbox.evolve()
930        }
931        .unwrap();
932        let snapshot = sbox.snapshot().unwrap();
933
934        // poison on purpose
935        let res = sbox
936            .call::<()>("guest_panic", "hello".to_string())
937            .unwrap_err();
938        assert!(
939            matches!(res, HyperlightError::GuestAborted(code, context) if code == ErrorCode::UnknownError as u8 && context.contains("hello"))
940        );
941        assert!(sbox.poisoned());
942
943        // guest calls should fail when poisoned
944        let res = sbox
945            .call::<()>("guest_panic", "hello2".to_string())
946            .unwrap_err();
947        assert!(matches!(res, HyperlightError::PoisonedSandbox));
948
949        // snapshot should fail when poisoned
950        if let Err(e) = sbox.snapshot() {
951            assert!(sbox.poisoned());
952            assert!(matches!(e, HyperlightError::PoisonedSandbox));
953        } else {
954            panic!("Snapshot should fail");
955        }
956
957        // map_region should fail when poisoned
958        #[cfg(target_os = "linux")]
959        {
960            let map_mem = allocate_guest_memory();
961            let guest_base = 0x0;
962            let region = region_for_memory(&map_mem, guest_base, MemoryRegionFlags::READ);
963            let res = unsafe { sbox.map_region(&region) }.unwrap_err();
964            assert!(matches!(res, HyperlightError::PoisonedSandbox));
965        }
966
967        // map_file_cow should fail when poisoned
968        #[cfg(target_os = "linux")]
969        {
970            let temp_file = std::env::temp_dir().join("test_poison_map_file.bin");
971            let res = sbox.map_file_cow(&temp_file, 0x0, None).unwrap_err();
972            assert!(matches!(res, HyperlightError::PoisonedSandbox));
973            std::fs::remove_file(&temp_file).ok(); // Clean up
974        }
975
976        // call_guest_function_by_name (deprecated) should fail when poisoned
977        #[allow(deprecated)]
978        let res = sbox
979            .call_guest_function_by_name::<String>("Echo", "test".to_string())
980            .unwrap_err();
981        assert!(matches!(res, HyperlightError::PoisonedSandbox));
982
983        // restore to non-poisoned snapshot should work and clear poison
984        sbox.restore(snapshot.clone()).unwrap();
985        assert!(!sbox.poisoned());
986
987        // guest calls should work again after restore
988        let res = sbox.call::<String>("Echo", "hello2".to_string()).unwrap();
989        assert_eq!(res, "hello2".to_string());
990        assert!(!sbox.poisoned());
991
992        // re-poison on purpose
993        let res = sbox
994            .call::<()>("guest_panic", "hello".to_string())
995            .unwrap_err();
996        assert!(
997            matches!(res, HyperlightError::GuestAborted(code, context) if code == ErrorCode::UnknownError as u8 && context.contains("hello"))
998        );
999        assert!(sbox.poisoned());
1000
1001        // restore to non-poisoned snapshot should work again
1002        sbox.restore(snapshot.clone()).unwrap();
1003        assert!(!sbox.poisoned());
1004
1005        // guest calls should work again
1006        let res = sbox.call::<String>("Echo", "hello3".to_string()).unwrap();
1007        assert_eq!(res, "hello3".to_string());
1008        assert!(!sbox.poisoned());
1009
1010        // snapshot should work again
1011        let _ = sbox.snapshot().unwrap();
1012    }
1013
1014    /// Make sure input/output buffers are properly reset after guest call (with host call)
1015    #[test]
1016    fn host_func_error() {
1017        let path = simple_guest_as_string().unwrap();
1018        let mut sandbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1019        sandbox
1020            .register("HostError", || -> Result<()> {
1021                Err(HyperlightError::Error("hi".to_string()))
1022            })
1023            .unwrap();
1024        let mut sandbox = sandbox.evolve().unwrap();
1025
1026        // will exhaust io if leaky
1027        for _ in 0..1000 {
1028            let result = sandbox
1029                .call::<i64>(
1030                    "CallGivenParamlessHostFuncThatReturnsI64",
1031                    "HostError".to_string(),
1032                )
1033                .unwrap_err();
1034
1035            assert!(
1036                matches!(result, HyperlightError::GuestError(code, msg) if code == ErrorCode::HostFunctionError && msg == "hi"),
1037            );
1038        }
1039    }
1040
1041    #[test]
1042    fn call_host_func_expect_error() {
1043        let path = simple_guest_as_string().unwrap();
1044        let sandbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1045        let mut sandbox = sandbox.evolve().unwrap();
1046        sandbox
1047            .call::<()>("CallHostExpectError", "SomeUnknownHostFunc".to_string())
1048            .unwrap();
1049    }
1050
1051    /// Make sure input/output buffers are properly reset after guest call (with host call)
1052    #[test]
1053    fn io_buffer_reset() {
1054        let mut cfg = SandboxConfiguration::default();
1055        cfg.set_input_data_size(4096);
1056        cfg.set_output_data_size(4096);
1057        let path = simple_guest_as_string().unwrap();
1058        let mut sandbox =
1059            UninitializedSandbox::new(GuestBinary::FilePath(path), Some(cfg)).unwrap();
1060        sandbox.register("HostAdd", |a: i32, b: i32| a + b).unwrap();
1061        let mut sandbox = sandbox.evolve().unwrap();
1062
1063        // will exhaust io if leaky. Tests both success and error paths
1064        for _ in 0..1000 {
1065            let result = sandbox.call::<i32>("Add", (5i32, 10i32)).unwrap();
1066            assert_eq!(result, 15);
1067            let result = sandbox.call::<i32>("AddToStaticAndFail", ()).unwrap_err();
1068            assert!(
1069                matches!(result, HyperlightError::GuestError (code, msg ) if code == ErrorCode::GuestError && msg == "Crash on purpose")
1070            );
1071        }
1072    }
1073
1074    /// Tests that call_guest_function_by_name restores the state correctly
1075    #[test]
1076    fn test_call_guest_function_by_name() {
1077        let mut sbox: MultiUseSandbox = {
1078            let path = simple_guest_as_string().unwrap();
1079            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1080            u_sbox.evolve()
1081        }
1082        .unwrap();
1083
1084        let snapshot = sbox.snapshot().unwrap();
1085
1086        let _ = sbox.call::<i32>("AddToStatic", 5i32).unwrap();
1087        let res: i32 = sbox.call("GetStatic", ()).unwrap();
1088        assert_eq!(res, 5);
1089
1090        sbox.restore(snapshot).unwrap();
1091        #[allow(deprecated)]
1092        let _ = sbox
1093            .call_guest_function_by_name::<i32>("AddToStatic", 5i32)
1094            .unwrap();
1095        #[allow(deprecated)]
1096        let res: i32 = sbox.call_guest_function_by_name("GetStatic", ()).unwrap();
1097        assert_eq!(res, 0);
1098    }
1099
1100    // Tests to ensure that many (1000) function calls can be made in a call context with a small stack (24K) and heap(20K).
1101    // This test effectively ensures that the stack is being properly reset after each call and we are not leaking memory in the Guest.
1102    #[test]
1103    fn test_with_small_stack_and_heap() {
1104        let mut cfg = SandboxConfiguration::default();
1105        cfg.set_heap_size(20 * 1024);
1106        // min_scratch_size already includes 1 page (4k on most
1107        // platforms) of guest stack, so add 20k more to get 24k
1108        // total, and then add some more for the eagerly-copied page
1109        // tables on amd64
1110        let min_scratch = hyperlight_common::layout::min_scratch_size(
1111            cfg.get_input_data_size(),
1112            cfg.get_output_data_size(),
1113        );
1114        cfg.set_scratch_size(min_scratch + 0x10000 + 0x10000);
1115
1116        let mut sbox1: MultiUseSandbox = {
1117            let path = simple_guest_as_string().unwrap();
1118            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), Some(cfg)).unwrap();
1119            u_sbox.evolve()
1120        }
1121        .unwrap();
1122
1123        for _ in 0..1000 {
1124            sbox1.call::<String>("Echo", "hello".to_string()).unwrap();
1125        }
1126
1127        let mut sbox2: MultiUseSandbox = {
1128            let path = simple_guest_as_string().unwrap();
1129            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), Some(cfg)).unwrap();
1130            u_sbox.evolve()
1131        }
1132        .unwrap();
1133
1134        for i in 0..1000 {
1135            sbox2
1136                .call::<i32>(
1137                    "PrintUsingPrintf",
1138                    format!("Hello World {}\n", i).to_string(),
1139                )
1140                .unwrap();
1141        }
1142    }
1143
1144    /// Tests that evolving from MultiUseSandbox to MultiUseSandbox creates a new state
1145    /// and restoring a snapshot from before evolving restores the previous state
1146    #[test]
1147    fn snapshot_evolve_restore_handles_state_correctly() {
1148        let mut sbox: MultiUseSandbox = {
1149            let path = simple_guest_as_string().unwrap();
1150            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1151            u_sbox.evolve()
1152        }
1153        .unwrap();
1154
1155        let snapshot = sbox.snapshot().unwrap();
1156
1157        let _ = sbox.call::<i32>("AddToStatic", 5i32).unwrap();
1158
1159        let res: i32 = sbox.call("GetStatic", ()).unwrap();
1160        assert_eq!(res, 5);
1161
1162        sbox.restore(snapshot).unwrap();
1163        let res: i32 = sbox.call("GetStatic", ()).unwrap();
1164        assert_eq!(res, 0);
1165    }
1166
1167    #[test]
1168    fn test_trigger_exception_on_guest() {
1169        let usbox = UninitializedSandbox::new(
1170            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1171            None,
1172        )
1173        .unwrap();
1174
1175        let mut multi_use_sandbox: MultiUseSandbox = usbox.evolve().unwrap();
1176
1177        let res: Result<()> = multi_use_sandbox.call("TriggerException", ());
1178
1179        assert!(res.is_err());
1180
1181        match res.unwrap_err() {
1182            HyperlightError::GuestAborted(_, msg) => {
1183                // msg should indicate we got an invalid opcode exception
1184                assert!(msg.contains("InvalidOpcode"));
1185            }
1186            e => panic!(
1187                "Expected HyperlightError::GuestExecutionError but got {:?}",
1188                e
1189            ),
1190        }
1191    }
1192
1193    #[test]
1194    fn create_200_sandboxes() {
1195        const NUM_THREADS: usize = 10;
1196        const SANDBOXES_PER_THREAD: usize = 20;
1197
1198        // barrier to make sure all threads start their work simultaneously
1199        let start_barrier = Arc::new(Barrier::new(NUM_THREADS + 1));
1200        let mut thread_handles = vec![];
1201
1202        for _ in 0..NUM_THREADS {
1203            let barrier = start_barrier.clone();
1204
1205            let handle = thread::spawn(move || {
1206                barrier.wait();
1207
1208                for _ in 0..SANDBOXES_PER_THREAD {
1209                    let guest_path = simple_guest_as_string().expect("Guest Binary Missing");
1210                    let uninit =
1211                        UninitializedSandbox::new(GuestBinary::FilePath(guest_path), None).unwrap();
1212
1213                    let mut sandbox: MultiUseSandbox = uninit.evolve().unwrap();
1214
1215                    let result: i32 = sandbox.call("GetStatic", ()).unwrap();
1216                    assert_eq!(result, 0);
1217                }
1218            });
1219
1220            thread_handles.push(handle);
1221        }
1222
1223        start_barrier.wait();
1224
1225        for handle in thread_handles {
1226            handle.join().unwrap();
1227        }
1228    }
1229
1230    #[cfg(target_os = "linux")]
1231    #[test]
1232    fn test_mmap() {
1233        let mut sbox = UninitializedSandbox::new(
1234            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1235            None,
1236        )
1237        .unwrap()
1238        .evolve()
1239        .unwrap();
1240
1241        let expected = b"hello world";
1242        let map_mem = page_aligned_memory(expected);
1243        let guest_base = 0x1_0000_0000; // Arbitrary guest base address
1244
1245        unsafe {
1246            sbox.map_region(&region_for_memory(
1247                &map_mem,
1248                guest_base,
1249                MemoryRegionFlags::READ,
1250            ))
1251            .unwrap();
1252        }
1253
1254        let _guard = map_mem.lock.try_read().unwrap();
1255        let actual: Vec<u8> = sbox
1256            .call(
1257                "ReadMappedBuffer",
1258                (guest_base as u64, expected.len() as u64, true),
1259            )
1260            .unwrap();
1261
1262        assert_eq!(actual, expected);
1263    }
1264
1265    // Makes sure MemoryRegionFlags::READ | MemoryRegionFlags::EXECUTE executable but not writable
1266    #[cfg(target_os = "linux")]
1267    #[test]
1268    fn test_mmap_write_exec() {
1269        let mut sbox = UninitializedSandbox::new(
1270            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1271            None,
1272        )
1273        .unwrap()
1274        .evolve()
1275        .unwrap();
1276
1277        let expected = &[0x90, 0x90, 0x90, 0xC3]; // NOOP slide to RET
1278        let map_mem = page_aligned_memory(expected);
1279        let guest_base = 0x1_0000_0000; // Arbitrary guest base address
1280
1281        unsafe {
1282            sbox.map_region(&region_for_memory(
1283                &map_mem,
1284                guest_base,
1285                MemoryRegionFlags::READ | MemoryRegionFlags::EXECUTE,
1286            ))
1287            .unwrap();
1288        }
1289
1290        let _guard = map_mem.lock.try_read().unwrap();
1291
1292        // Execute should pass since memory is executable
1293        let succeed = sbox
1294            .call::<bool>(
1295                "ExecMappedBuffer",
1296                (guest_base as u64, expected.len() as u64),
1297            )
1298            .unwrap();
1299        assert!(succeed, "Expected execution of mapped buffer to succeed");
1300
1301        // write should fail because the memory is mapped as read-only
1302        let err = sbox
1303            .call::<bool>(
1304                "WriteMappedBuffer",
1305                (guest_base as u64, expected.len() as u64),
1306            )
1307            .unwrap_err();
1308
1309        match err {
1310            HyperlightError::MemoryAccessViolation(addr, ..) if addr == guest_base as u64 => {}
1311            _ => panic!("Expected MemoryAccessViolation error"),
1312        };
1313    }
1314
1315    #[cfg(target_os = "linux")]
1316    fn page_aligned_memory(src: &[u8]) -> GuestSharedMemory {
1317        use hyperlight_common::mem::PAGE_SIZE_USIZE;
1318
1319        let len = src.len().div_ceil(PAGE_SIZE_USIZE) * PAGE_SIZE_USIZE;
1320
1321        let mut mem = ExclusiveSharedMemory::new(len).unwrap();
1322        mem.copy_from_slice(src, 0).unwrap();
1323
1324        let (_, guest_mem) = mem.build();
1325
1326        guest_mem
1327    }
1328
1329    #[cfg(target_os = "linux")]
1330    fn region_for_memory(
1331        mem: &GuestSharedMemory,
1332        guest_base: usize,
1333        flags: MemoryRegionFlags,
1334    ) -> MemoryRegion {
1335        let ptr = mem.base_addr();
1336        let len = mem.mem_size();
1337        MemoryRegion {
1338            host_region: ptr..(ptr + len),
1339            guest_region: guest_base..(guest_base + len),
1340            flags,
1341            region_type: MemoryRegionType::Heap,
1342        }
1343    }
1344
1345    #[cfg(target_os = "linux")]
1346    fn allocate_guest_memory() -> GuestSharedMemory {
1347        page_aligned_memory(b"test data for snapshot")
1348    }
1349
1350    #[test]
1351    #[cfg(target_os = "linux")]
1352    fn snapshot_restore_handles_remapping_correctly() {
1353        let mut sbox: MultiUseSandbox = {
1354            let path = simple_guest_as_string().unwrap();
1355            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1356            u_sbox.evolve().unwrap()
1357        };
1358
1359        // 1. Take snapshot 1 with no additional regions mapped
1360        let snapshot1 = sbox.snapshot().unwrap();
1361        assert_eq!(sbox.vm.get_mapped_regions().count(), 0);
1362
1363        // 2. Map a memory region
1364        let map_mem = allocate_guest_memory();
1365        let guest_base = 0x200000000_usize;
1366        let region = region_for_memory(&map_mem, guest_base, MemoryRegionFlags::READ);
1367
1368        unsafe { sbox.map_region(&region).unwrap() };
1369        assert_eq!(sbox.vm.get_mapped_regions().count(), 1);
1370        let orig_read = sbox
1371            .call::<Vec<u8>>(
1372                "ReadMappedBuffer",
1373                (
1374                    guest_base as u64,
1375                    hyperlight_common::vmem::PAGE_SIZE as u64,
1376                    true,
1377                ),
1378            )
1379            .unwrap();
1380
1381        // 3. Take snapshot 2 with 1 region mapped
1382        let snapshot2 = sbox.snapshot().unwrap();
1383        assert_eq!(sbox.vm.get_mapped_regions().count(), 1);
1384
1385        // 4. Re(store to snapshot 1 (should unmap the region)
1386        sbox.restore(snapshot1.clone()).unwrap();
1387        assert_eq!(sbox.vm.get_mapped_regions().count(), 0);
1388        let is_mapped = sbox
1389            .call::<bool>("CheckMapped", (guest_base as u64,))
1390            .unwrap();
1391        assert!(!is_mapped);
1392
1393        // 5. Restore forward to snapshot 2 (should have folded the
1394        //    region into the snapshot)
1395        sbox.restore(snapshot2.clone()).unwrap();
1396        assert_eq!(sbox.vm.get_mapped_regions().count(), 0);
1397        let is_mapped = sbox
1398            .call::<bool>("CheckMapped", (guest_base as u64,))
1399            .unwrap();
1400        assert!(is_mapped);
1401
1402        // Verify the region is the same
1403        let new_read = sbox
1404            .call::<Vec<u8>>(
1405                "ReadMappedBuffer",
1406                (
1407                    guest_base as u64,
1408                    hyperlight_common::vmem::PAGE_SIZE as u64,
1409                    false,
1410                ),
1411            )
1412            .unwrap();
1413        assert_eq!(new_read, orig_read);
1414    }
1415
1416    #[test]
1417    fn snapshot_different_sandbox() {
1418        let mut sandbox = {
1419            let path = simple_guest_as_string().unwrap();
1420            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1421            u_sbox.evolve().unwrap()
1422        };
1423
1424        let mut sandbox2 = {
1425            let path = simple_guest_as_string().unwrap();
1426            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1427            u_sbox.evolve().unwrap()
1428        };
1429        assert_ne!(sandbox.id, sandbox2.id);
1430
1431        let snapshot = sandbox.snapshot().unwrap();
1432        let err = sandbox2.restore(snapshot.clone());
1433        assert!(matches!(err, Err(HyperlightError::SnapshotSandboxMismatch)));
1434
1435        let sandbox_id = sandbox.id;
1436        drop(sandbox);
1437        drop(sandbox2);
1438        drop(snapshot);
1439
1440        let sandbox3 = {
1441            let path = simple_guest_as_string().unwrap();
1442            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1443            u_sbox.evolve().unwrap()
1444        };
1445        assert_ne!(sandbox3.id, sandbox_id);
1446    }
1447
1448    /// Test that snapshot restore properly resets vCPU debug registers. This test verifies
1449    /// that restore() calls reset_vcpu().
1450    #[test]
1451    fn snapshot_restore_resets_debug_registers() {
1452        let mut sandbox: MultiUseSandbox = {
1453            let path = simple_guest_as_string().unwrap();
1454            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1455            u_sbox.evolve().unwrap()
1456        };
1457
1458        let snapshot = sandbox.snapshot().unwrap();
1459
1460        // Verify DR0 is initially 0 (clean state)
1461        let dr0_initial: u64 = sandbox.call("GetDr0", ()).unwrap();
1462        assert_eq!(dr0_initial, 0, "DR0 should initially be 0");
1463
1464        // Dirty DR0 by setting it to a known non-zero value
1465        const DIRTY_VALUE: u64 = 0xDEAD_BEEF_CAFE_BABE;
1466        sandbox.call::<()>("SetDr0", DIRTY_VALUE).unwrap();
1467        let dr0_dirty: u64 = sandbox.call("GetDr0", ()).unwrap();
1468        assert_eq!(
1469            dr0_dirty, DIRTY_VALUE,
1470            "DR0 should be dirty after SetDr0 call"
1471        );
1472
1473        // Restore to the snapshot - this should reset vCPU state including debug registers
1474        sandbox.restore(snapshot).unwrap();
1475
1476        let dr0_after_restore: u64 = sandbox.call("GetDr0", ()).unwrap();
1477        assert_eq!(
1478            dr0_after_restore, 0,
1479            "DR0 should be 0 after restore (reset_vcpu should have been called)"
1480        );
1481    }
1482
1483    /// Test that sandboxes can be created and evolved with different heap sizes
1484    #[test]
1485    fn test_sandbox_creation_various_sizes() {
1486        let test_cases: [(&str, u64); 3] = [
1487            ("small (8MB heap)", SMALL_HEAP_SIZE),
1488            ("medium (64MB heap)", MEDIUM_HEAP_SIZE),
1489            ("large (256MB heap)", LARGE_HEAP_SIZE),
1490        ];
1491
1492        for (name, heap_size) in test_cases {
1493            let mut cfg = SandboxConfiguration::default();
1494            cfg.set_heap_size(heap_size);
1495            cfg.set_scratch_size(0x100000);
1496
1497            let path = simple_guest_as_string().unwrap();
1498            let sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), Some(cfg))
1499                .unwrap_or_else(|e| panic!("Failed to create {} sandbox: {}", name, e))
1500                .evolve()
1501                .unwrap_or_else(|e| panic!("Failed to evolve {} sandbox: {}", name, e));
1502
1503            drop(sbox);
1504        }
1505    }
1506
1507    /// Helper: create a MultiUseSandbox from the simple guest with default config.
1508    #[cfg(feature = "trace_guest")]
1509    fn sandbox_for_gva_tests() -> MultiUseSandbox {
1510        let path = simple_guest_as_string().unwrap();
1511        UninitializedSandbox::new(GuestBinary::FilePath(path), None)
1512            .unwrap()
1513            .evolve()
1514            .unwrap()
1515    }
1516
1517    /// Helper: read memory at `gva` of length `len` from the guest side via
1518    /// `ReadMappedBuffer(gva, len, false)` and from the host side via
1519    /// `read_guest_memory_by_gva`, then assert both views are identical.
1520    #[cfg(feature = "trace_guest")]
1521    fn assert_gva_read_matches(sbox: &mut MultiUseSandbox, gva: u64, len: usize) {
1522        // Guest reads via its own page tables
1523        let expected: Vec<u8> = sbox
1524            .call("ReadMappedBuffer", (gva, len as u64, true))
1525            .unwrap();
1526        assert_eq!(expected.len(), len);
1527
1528        // Host reads by walking the same page tables
1529        let root_pt = sbox.vm.get_root_pt().unwrap();
1530        let actual = sbox
1531            .mem_mgr
1532            .read_guest_memory_by_gva(gva, len, root_pt)
1533            .unwrap();
1534
1535        assert_eq!(
1536            actual, expected,
1537            "read_guest_memory_by_gva at GVA {:#x} (len {}) differs from guest ReadMappedBuffer",
1538            gva, len,
1539        );
1540    }
1541
1542    /// Test reading a small buffer (< 1 page) from guest memory via GVA.
1543    /// Uses the guest code section which is already identity-mapped.
1544    #[test]
1545    #[cfg(feature = "trace_guest")]
1546    fn read_guest_memory_by_gva_single_page() {
1547        let mut sbox = sandbox_for_gva_tests();
1548        let code_gva = sbox.mem_mgr.layout.get_guest_code_address() as u64;
1549        assert_gva_read_matches(&mut sbox, code_gva, 128);
1550    }
1551
1552    /// Test reading exactly one full page (4096 bytes) from guest memory.
1553    /// Uses the guest code section
1554    #[test]
1555    #[cfg(feature = "trace_guest")]
1556    fn read_guest_memory_by_gva_full_page() {
1557        let mut sbox = sandbox_for_gva_tests();
1558        let code_gva = sbox.mem_mgr.layout.get_guest_code_address() as u64;
1559        assert_gva_read_matches(&mut sbox, code_gva, 4096);
1560    }
1561
1562    /// Test that a read starting at an odd (non-page-aligned) address and
1563    /// spanning two page boundaries returns correct data.
1564    #[test]
1565    #[cfg(feature = "trace_guest")]
1566    fn read_guest_memory_by_gva_unaligned_cross_page() {
1567        let mut sbox = sandbox_for_gva_tests();
1568        let code_gva = sbox.mem_mgr.layout.get_guest_code_address() as u64;
1569        // Start 1 byte before the second page boundary and read 4097 bytes
1570        // (spans 2 full page boundaries).
1571        let start = code_gva + 4096 - 1;
1572        println!(
1573            "Testing unaligned cross-page read starting at {:#x} spanning 4097 bytes",
1574            start
1575        );
1576        assert_gva_read_matches(&mut sbox, start, 4097);
1577    }
1578
1579    /// Test reading exactly two full pages (8192 bytes) from guest memory.
1580    #[test]
1581    #[cfg(feature = "trace_guest")]
1582    fn read_guest_memory_by_gva_two_full_pages() {
1583        let mut sbox = sandbox_for_gva_tests();
1584        let code_gva = sbox.mem_mgr.layout.get_guest_code_address() as u64;
1585        assert_gva_read_matches(&mut sbox, code_gva, 4096 * 2);
1586    }
1587
1588    /// Test reading a region that spans across a page boundary: starts
1589    /// 100 bytes before the end of the first page and reads 200 bytes
1590    /// into the second page.
1591    #[test]
1592    #[cfg(feature = "trace_guest")]
1593    fn read_guest_memory_by_gva_cross_page_boundary() {
1594        let mut sbox = sandbox_for_gva_tests();
1595        let code_gva = sbox.mem_mgr.layout.get_guest_code_address() as u64;
1596        // Start 100 bytes before the first page boundary, read across it.
1597        let start = code_gva + 4096 - 100;
1598        assert_gva_read_matches(&mut sbox, start, 200);
1599    }
1600
1601    /// Helper: create a temp file with known content, padded to be
1602    /// at least page-aligned (4096 bytes). Returns the path and the
1603    /// *original* content bytes (before padding).
1604    fn create_test_file(name: &str, content: &[u8]) -> (std::path::PathBuf, Vec<u8>) {
1605        use std::io::Write;
1606
1607        let page_size = page_size::get();
1608        let padded_len = content.len().max(page_size).div_ceil(page_size) * page_size;
1609        let mut padded = vec![0u8; padded_len];
1610        padded[..content.len()].copy_from_slice(content);
1611
1612        let temp_dir = std::env::temp_dir();
1613        let path = temp_dir.join(name);
1614        let _ = std::fs::remove_file(&path); // clean up from previous runs
1615        let mut f = std::fs::File::create(&path).unwrap();
1616        f.write_all(&padded).unwrap();
1617        (path, content.to_vec())
1618    }
1619
1620    /// Tests the basic `map_file_cow` flow: map a file, read its content
1621    /// from the guest, and verify it matches.
1622    #[test]
1623    fn test_map_file_cow_basic() {
1624        let expected = b"hello world from map_file_cow";
1625        let (path, expected_bytes) =
1626            create_test_file("hyperlight_test_map_file_cow_basic.bin", expected);
1627
1628        let mut sbox = UninitializedSandbox::new(
1629            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1630            None,
1631        )
1632        .unwrap()
1633        .evolve()
1634        .unwrap();
1635
1636        let guest_base: u64 = 0x1_0000_0000;
1637        let mapped_size = sbox.map_file_cow(&path, guest_base, None).unwrap();
1638        assert!(mapped_size > 0, "mapped_size should be positive");
1639        assert!(
1640            mapped_size >= expected.len() as u64,
1641            "mapped_size should be >= file content length"
1642        );
1643
1644        // Read the content back from the guest
1645        let actual: Vec<u8> = sbox
1646            .call(
1647                "ReadMappedBuffer",
1648                (guest_base, expected_bytes.len() as u64, true),
1649            )
1650            .unwrap();
1651
1652        assert_eq!(
1653            actual, expected_bytes,
1654            "Guest should read back the exact file content"
1655        );
1656
1657        // Clean up
1658        let _ = std::fs::remove_file(&path);
1659    }
1660
1661    /// Tests that `map_file_cow` enforces read-only access: writing to
1662    /// the mapped region from the guest should cause a MemoryAccessViolation.
1663    #[test]
1664    fn test_map_file_cow_read_only_enforcement() {
1665        let content = &[0xBB; 4096];
1666        let (path, _) = create_test_file("hyperlight_test_map_file_cow_readonly.bin", content);
1667
1668        let mut sbox = UninitializedSandbox::new(
1669            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1670            None,
1671        )
1672        .unwrap()
1673        .evolve()
1674        .unwrap();
1675
1676        let guest_base: u64 = 0x1_0000_0000;
1677        sbox.map_file_cow(&path, guest_base, None).unwrap();
1678
1679        // Writing to the mapped region should fail with MemoryAccessViolation
1680        let err = sbox
1681            .call::<bool>("WriteMappedBuffer", (guest_base, content.len() as u64))
1682            .unwrap_err();
1683
1684        match err {
1685            HyperlightError::MemoryAccessViolation(addr, ..) if addr == guest_base => {}
1686            _ => panic!(
1687                "Expected MemoryAccessViolation at guest_base, got: {:?}",
1688                err
1689            ),
1690        };
1691
1692        // Clean up
1693        let _ = std::fs::remove_file(&path);
1694    }
1695
1696    /// Tests that `map_file_cow` returns `PoisonedSandbox` when the
1697    /// sandbox is poisoned.
1698    #[test]
1699    fn test_map_file_cow_poisoned() {
1700        let (path, _) = create_test_file("hyperlight_test_map_file_cow_poison.bin", &[0xCC; 4096]);
1701
1702        let mut sbox: MultiUseSandbox = {
1703            let path = simple_guest_as_string().unwrap();
1704            let u_sbox = UninitializedSandbox::new(GuestBinary::FilePath(path), None).unwrap();
1705            u_sbox.evolve()
1706        }
1707        .unwrap();
1708        let snapshot = sbox.snapshot().unwrap();
1709
1710        // Poison the sandbox
1711        let _ = sbox
1712            .call::<()>("guest_panic", "hello".to_string())
1713            .unwrap_err();
1714        assert!(sbox.poisoned());
1715
1716        // map_file_cow should fail with PoisonedSandbox
1717        let err = sbox.map_file_cow(&path, 0x1_0000_0000, None).unwrap_err();
1718        assert!(matches!(err, HyperlightError::PoisonedSandbox));
1719
1720        // Restore and verify map_file_cow works again
1721        sbox.restore(snapshot).unwrap();
1722        assert!(!sbox.poisoned());
1723        let result = sbox.map_file_cow(&path, 0x1_0000_0000, None);
1724        assert!(result.is_ok());
1725
1726        let _ = std::fs::remove_file(&path);
1727    }
1728
1729    /// Tests that two separate sandboxes can map the same file
1730    /// simultaneously and both read it correctly.
1731    #[test]
1732    fn test_map_file_cow_multi_vm_same_file() {
1733        let expected = b"shared file content across VMs";
1734        let (path, expected_bytes) =
1735            create_test_file("hyperlight_test_map_file_cow_multi_vm.bin", expected);
1736
1737        let guest_base: u64 = 0x1_0000_0000;
1738
1739        let mut sbox1 = UninitializedSandbox::new(
1740            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1741            None,
1742        )
1743        .unwrap()
1744        .evolve()
1745        .unwrap();
1746
1747        let mut sbox2 = UninitializedSandbox::new(
1748            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1749            None,
1750        )
1751        .unwrap()
1752        .evolve()
1753        .unwrap();
1754
1755        // Map the same file into both sandboxes
1756        sbox1.map_file_cow(&path, guest_base, None).unwrap();
1757        sbox2.map_file_cow(&path, guest_base, None).unwrap();
1758
1759        // Both should read the correct content
1760        let actual1: Vec<u8> = sbox1
1761            .call(
1762                "ReadMappedBuffer",
1763                (guest_base, expected_bytes.len() as u64, true),
1764            )
1765            .unwrap();
1766        let actual2: Vec<u8> = sbox2
1767            .call(
1768                "ReadMappedBuffer",
1769                (guest_base, expected_bytes.len() as u64, true),
1770            )
1771            .unwrap();
1772
1773        assert_eq!(
1774            actual1, expected_bytes,
1775            "Sandbox 1 should read correct content"
1776        );
1777        assert_eq!(
1778            actual2, expected_bytes,
1779            "Sandbox 2 should read correct content"
1780        );
1781
1782        let _ = std::fs::remove_file(&path);
1783    }
1784
1785    /// Tests that multiple threads can each create a sandbox, map the
1786    /// same file, read it, and drop without errors.
1787    #[test]
1788    fn test_map_file_cow_multi_vm_threaded() {
1789        let expected = b"threaded file mapping test data";
1790        let (path, expected_bytes) =
1791            create_test_file("hyperlight_test_map_file_cow_threaded.bin", expected);
1792
1793        const NUM_THREADS: usize = 5;
1794        let path = Arc::new(path);
1795        let expected_bytes = Arc::new(expected_bytes);
1796        let barrier = Arc::new(Barrier::new(NUM_THREADS));
1797        let mut handles = vec![];
1798
1799        for _ in 0..NUM_THREADS {
1800            let path = path.clone();
1801            let expected_bytes = expected_bytes.clone();
1802            let barrier = barrier.clone();
1803
1804            handles.push(thread::spawn(move || {
1805                barrier.wait();
1806
1807                let mut sbox = UninitializedSandbox::new(
1808                    GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1809                    None,
1810                )
1811                .unwrap()
1812                .evolve()
1813                .unwrap();
1814
1815                let guest_base: u64 = 0x1_0000_0000;
1816                sbox.map_file_cow(&path, guest_base, None).unwrap();
1817
1818                let actual: Vec<u8> = sbox
1819                    .call(
1820                        "ReadMappedBuffer",
1821                        (guest_base, expected_bytes.len() as u64, true),
1822                    )
1823                    .unwrap();
1824
1825                assert_eq!(actual, *expected_bytes);
1826            }));
1827        }
1828
1829        for h in handles {
1830            h.join().unwrap();
1831        }
1832
1833        let _ = std::fs::remove_file(&*path);
1834    }
1835
1836    /// Tests that file cleanup works after dropping a sandbox that used
1837    /// `map_file_cow` — the file should be deletable (no leaked handles).
1838    #[test]
1839    #[cfg(target_os = "windows")]
1840    fn test_map_file_cow_cleanup_no_handle_leak() {
1841        let (path, _) = create_test_file("hyperlight_test_map_file_cow_cleanup.bin", &[0xDD; 4096]);
1842
1843        {
1844            let mut sbox = UninitializedSandbox::new(
1845                GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1846                None,
1847            )
1848            .unwrap()
1849            .evolve()
1850            .unwrap();
1851
1852            sbox.map_file_cow(&path, 0x1_0000_0000, None).unwrap();
1853            // sandbox dropped here
1854        }
1855
1856        std::fs::remove_file(&path)
1857            .expect("File should be deletable after sandbox with map_file_cow is dropped");
1858    }
1859
1860    /// Tests snapshot/restore cycle with map_file_cow:
1861    /// snapshot₁ (no file) → map file → snapshot₂ → restore₁ (unmapped)
1862    /// → restore₂ (data folded into snapshot).
1863    #[test]
1864    fn test_map_file_cow_snapshot_remapping_cycle() {
1865        let expected = b"snapshot remapping cycle test!";
1866        let (path, expected_bytes) =
1867            create_test_file("hyperlight_test_map_file_cow_snapshot_remap.bin", expected);
1868
1869        let mut sbox = UninitializedSandbox::new(
1870            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1871            None,
1872        )
1873        .unwrap()
1874        .evolve()
1875        .unwrap();
1876
1877        let guest_base: u64 = 0x1_0000_0000;
1878
1879        // 1. snapshot₁ — no file mapped
1880        let snapshot1 = sbox.snapshot().unwrap();
1881
1882        // 2. Map the file
1883        sbox.map_file_cow(&path, guest_base, None).unwrap();
1884
1885        // Verify we can read it
1886        let actual: Vec<u8> = sbox
1887            .call(
1888                "ReadMappedBuffer",
1889                (guest_base, expected_bytes.len() as u64, true),
1890            )
1891            .unwrap();
1892        assert_eq!(actual, expected_bytes);
1893
1894        // 3. snapshot₂ — file mapped (data folded into snapshot)
1895        let snapshot2 = sbox.snapshot().unwrap();
1896
1897        // 4. Restore to snapshot₁ — file should be unmapped
1898        sbox.restore(snapshot1.clone()).unwrap();
1899        let is_mapped: bool = sbox.call("CheckMapped", (guest_base,)).unwrap();
1900        assert!(
1901            !is_mapped,
1902            "Region should be unmapped after restoring to snapshot₁"
1903        );
1904
1905        // 5. Restore to snapshot₂ — data should still be readable
1906        //    (folded into snapshot memory, not the original file mapping)
1907        sbox.restore(snapshot2).unwrap();
1908        let is_mapped: bool = sbox.call("CheckMapped", (guest_base,)).unwrap();
1909        assert!(
1910            is_mapped,
1911            "Region should be mapped after restoring to snapshot₂"
1912        );
1913        let actual2: Vec<u8> = sbox
1914            .call(
1915                "ReadMappedBuffer",
1916                (guest_base, expected_bytes.len() as u64, false),
1917            )
1918            .unwrap();
1919        assert_eq!(
1920            actual2, expected_bytes,
1921            "Data should be intact after snapshot₂ restore"
1922        );
1923
1924        let _ = std::fs::remove_file(&path);
1925    }
1926
1927    /// Tests that snapshot correctly captures map_file_cow data and
1928    /// restore brings it back.
1929    #[test]
1930    fn test_map_file_cow_snapshot_restore() {
1931        let expected = b"snapshot restore basic test!!";
1932        let (path, expected_bytes) =
1933            create_test_file("hyperlight_test_map_file_cow_snap_restore.bin", expected);
1934
1935        let mut sbox = UninitializedSandbox::new(
1936            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1937            None,
1938        )
1939        .unwrap()
1940        .evolve()
1941        .unwrap();
1942
1943        let guest_base: u64 = 0x1_0000_0000;
1944        sbox.map_file_cow(&path, guest_base, None).unwrap();
1945
1946        // Read the content to verify mapping works
1947        let actual: Vec<u8> = sbox
1948            .call(
1949                "ReadMappedBuffer",
1950                (guest_base, expected_bytes.len() as u64, true),
1951            )
1952            .unwrap();
1953        assert_eq!(actual, expected_bytes);
1954
1955        // Take snapshot — folds file data into snapshot memory
1956        let snapshot = sbox.snapshot().unwrap();
1957
1958        // Restore — the file-backed region is unmapped but data is in snapshot
1959        sbox.restore(snapshot).unwrap();
1960
1961        // Data should still be readable from snapshot memory
1962        let actual2: Vec<u8> = sbox
1963            .call(
1964                "ReadMappedBuffer",
1965                (guest_base, expected_bytes.len() as u64, false),
1966            )
1967            .unwrap();
1968        assert_eq!(
1969            actual2, expected_bytes,
1970            "Data should be readable after restore from snapshot"
1971        );
1972
1973        let _ = std::fs::remove_file(&path);
1974    }
1975
1976    /// Tests the deferred `map_file_cow` flow: map a file on
1977    /// `UninitializedSandbox` (before evolve), then evolve and verify
1978    /// the guest can read the mapped content.
1979    #[test]
1980    fn test_map_file_cow_deferred_basic() {
1981        let expected = b"deferred map_file_cow test data";
1982        let (path, expected_bytes) =
1983            create_test_file("hyperlight_test_map_file_cow_deferred.bin", expected);
1984
1985        let guest_base: u64 = 0x1_0000_0000;
1986
1987        let mut u_sbox = UninitializedSandbox::new(
1988            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
1989            None,
1990        )
1991        .unwrap();
1992
1993        // Map the file before evolving — this defers the VM-side work.
1994        let mapped_size = u_sbox.map_file_cow(&path, guest_base, None).unwrap();
1995        assert!(mapped_size > 0, "mapped_size should be positive");
1996        assert!(
1997            mapped_size >= expected.len() as u64,
1998            "mapped_size should be >= file content length"
1999        );
2000
2001        // Evolve — deferred mappings are applied during this step.
2002        let mut sbox: MultiUseSandbox = u_sbox.evolve().unwrap();
2003
2004        // Verify the guest can read the mapped content.
2005        let actual: Vec<u8> = sbox
2006            .call(
2007                "ReadMappedBuffer",
2008                (guest_base, expected_bytes.len() as u64, true),
2009            )
2010            .unwrap();
2011
2012        assert_eq!(
2013            actual, expected_bytes,
2014            "Guest should read back the exact file content after deferred mapping"
2015        );
2016
2017        let _ = std::fs::remove_file(&path);
2018    }
2019
2020    /// Tests that dropping an `UninitializedSandbox` with pending
2021    /// deferred file mappings does not leak or crash — the
2022    /// `PreparedFileMapping::Drop` should clean up host resources.
2023    #[test]
2024    fn test_map_file_cow_deferred_drop_without_evolve() {
2025        let (path, _) = create_test_file(
2026            "hyperlight_test_map_file_cow_deferred_drop.bin",
2027            &[0xAA; 4096],
2028        );
2029
2030        let guest_base: u64 = 0x1_0000_0000;
2031
2032        {
2033            let mut u_sbox = UninitializedSandbox::new(
2034                GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2035                None,
2036            )
2037            .unwrap();
2038
2039            u_sbox.map_file_cow(&path, guest_base, None).unwrap();
2040            // u_sbox dropped here without evolving — PreparedFileMapping::drop
2041            // should clean up host-side OS resources.
2042        }
2043
2044        // If we get here without a crash/hang, cleanup worked.
2045        // On Windows, also verify the file handle was released.
2046        #[cfg(target_os = "windows")]
2047        std::fs::remove_file(&path)
2048            .expect("File should be deletable after dropping UninitializedSandbox");
2049        #[cfg(not(target_os = "windows"))]
2050        let _ = std::fs::remove_file(&path);
2051    }
2052
2053    /// Tests that `prepare_file_cow` rejects unaligned `guest_base`
2054    /// addresses eagerly, before allocating any OS resources.
2055    #[test]
2056    fn test_map_file_cow_unaligned_guest_base() {
2057        let (path, _) =
2058            create_test_file("hyperlight_test_map_file_cow_unaligned.bin", &[0xBB; 4096]);
2059
2060        let mut u_sbox = UninitializedSandbox::new(
2061            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2062            None,
2063        )
2064        .unwrap();
2065
2066        // Use an intentionally unaligned address (page_size + 1).
2067        let unaligned_base: u64 = (page_size::get() + 1) as u64;
2068        let result = u_sbox.map_file_cow(&path, unaligned_base, None);
2069        assert!(
2070            result.is_err(),
2071            "map_file_cow should reject unaligned guest_base"
2072        );
2073
2074        let _ = std::fs::remove_file(&path);
2075    }
2076
2077    /// Tests that `prepare_file_cow` rejects empty files.
2078    #[test]
2079    fn test_map_file_cow_empty_file() {
2080        let temp_dir = std::env::temp_dir();
2081        let path = temp_dir.join("hyperlight_test_map_file_cow_empty.bin");
2082        let _ = std::fs::remove_file(&path);
2083        std::fs::File::create(&path).unwrap(); // create empty file
2084
2085        let mut u_sbox = UninitializedSandbox::new(
2086            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2087            None,
2088        )
2089        .unwrap();
2090
2091        let guest_base: u64 = 0x1_0000_0000;
2092        let result = u_sbox.map_file_cow(&path, guest_base, None);
2093        assert!(result.is_err(), "map_file_cow should reject empty files");
2094
2095        let _ = std::fs::remove_file(&path);
2096    }
2097
2098    /// Tests that `map_file_cow` with a custom label succeeds.
2099    #[test]
2100    fn test_map_file_cow_custom_label() {
2101        let (path, _) = create_test_file("hyperlight_test_map_file_cow_label.bin", &[0xDD; 4096]);
2102
2103        let mut sbox = UninitializedSandbox::new(
2104            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2105            None,
2106        )
2107        .unwrap()
2108        .evolve()
2109        .unwrap();
2110
2111        let result = sbox.map_file_cow(&path, 0x1_0000_0000, Some("my_ramfs"));
2112        assert!(
2113            result.is_ok(),
2114            "map_file_cow with custom label should succeed"
2115        );
2116
2117        let _ = std::fs::remove_file(&path);
2118    }
2119
2120    /// Tests that `map_file_cow` on a MultiUseSandbox correctly writes
2121    /// the FileMappingInfo entry (count, guest_addr, size, label) into
2122    /// the PEB.
2123    #[test]
2124    fn test_map_file_cow_peb_entry_multiuse() {
2125        use std::mem::offset_of;
2126
2127        use hyperlight_common::mem::{FILE_MAPPING_LABEL_MAX_LEN, FileMappingInfo};
2128
2129        let (path, _) = create_test_file("hyperlight_test_peb_entry_multiuse.bin", &[0xDD; 4096]);
2130
2131        let guest_base: u64 = 0x1_0000_0000;
2132        let label = "my_ramfs";
2133
2134        let mut sbox = UninitializedSandbox::new(
2135            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2136            None,
2137        )
2138        .unwrap()
2139        .evolve()
2140        .unwrap();
2141
2142        // Map with an explicit label.
2143        let mapped_size = sbox.map_file_cow(&path, guest_base, Some(label)).unwrap();
2144
2145        // Read back the PEB file_mappings count.
2146        let count = sbox
2147            .mem_mgr
2148            .shared_mem
2149            .read::<u64>(sbox.mem_mgr.layout.get_file_mappings_size_offset())
2150            .unwrap();
2151        assert_eq!(
2152            count, 1,
2153            "PEB file_mappings count should be 1 after one mapping"
2154        );
2155
2156        // Read back the first FileMappingInfo entry.
2157        let entry_offset = sbox.mem_mgr.layout.get_file_mappings_array_offset();
2158
2159        let stored_addr = sbox
2160            .mem_mgr
2161            .shared_mem
2162            .read::<u64>(entry_offset + offset_of!(FileMappingInfo, guest_addr))
2163            .unwrap();
2164        assert_eq!(stored_addr, guest_base, "PEB entry guest_addr should match");
2165
2166        let stored_size = sbox
2167            .mem_mgr
2168            .shared_mem
2169            .read::<u64>(entry_offset + offset_of!(FileMappingInfo, size))
2170            .unwrap();
2171        assert_eq!(
2172            stored_size, mapped_size,
2173            "PEB entry size should match mapped_size"
2174        );
2175
2176        // Read back the label bytes and verify.
2177        let label_offset = entry_offset + offset_of!(FileMappingInfo, label);
2178        let mut label_buf = [0u8; FILE_MAPPING_LABEL_MAX_LEN + 1];
2179        for (i, byte) in label_buf.iter_mut().enumerate() {
2180            *byte = sbox
2181                .mem_mgr
2182                .shared_mem
2183                .read::<u8>(label_offset + i)
2184                .unwrap();
2185        }
2186        let label_len = label_buf
2187            .iter()
2188            .position(|&b| b == 0)
2189            .unwrap_or(label_buf.len());
2190        let stored_label = std::str::from_utf8(&label_buf[..label_len]).unwrap();
2191        assert_eq!(stored_label, label, "PEB entry label should match");
2192
2193        let _ = std::fs::remove_file(&path);
2194    }
2195
2196    /// Tests that deferred `map_file_cow` (before evolve) correctly
2197    /// writes FileMappingInfo entries into the PEB during evolve.
2198    #[test]
2199    fn test_map_file_cow_peb_entry_deferred() {
2200        use std::mem::offset_of;
2201
2202        use hyperlight_common::mem::{FILE_MAPPING_LABEL_MAX_LEN, FileMappingInfo};
2203
2204        let (path, _) = create_test_file("hyperlight_test_peb_entry_deferred.bin", &[0xEE; 4096]);
2205
2206        let guest_base: u64 = 0x1_0000_0000;
2207        let label = "deferred_fs";
2208
2209        let mut u_sbox = UninitializedSandbox::new(
2210            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2211            None,
2212        )
2213        .unwrap();
2214
2215        let mapped_size = u_sbox.map_file_cow(&path, guest_base, Some(label)).unwrap();
2216
2217        // Evolve — PEB entries should be written during this step.
2218        let sbox: MultiUseSandbox = u_sbox.evolve().unwrap();
2219
2220        // Read back count.
2221        let count = sbox
2222            .mem_mgr
2223            .shared_mem
2224            .read::<u64>(sbox.mem_mgr.layout.get_file_mappings_size_offset())
2225            .unwrap();
2226        assert_eq!(count, 1, "PEB file_mappings count should be 1 after evolve");
2227
2228        // Read back the entry.
2229        let entry_offset = sbox.mem_mgr.layout.get_file_mappings_array_offset();
2230
2231        let stored_addr = sbox
2232            .mem_mgr
2233            .shared_mem
2234            .read::<u64>(entry_offset + offset_of!(FileMappingInfo, guest_addr))
2235            .unwrap();
2236        assert_eq!(stored_addr, guest_base);
2237
2238        let stored_size = sbox
2239            .mem_mgr
2240            .shared_mem
2241            .read::<u64>(entry_offset + offset_of!(FileMappingInfo, size))
2242            .unwrap();
2243        assert_eq!(stored_size, mapped_size);
2244
2245        // Verify the label.
2246        let label_offset = entry_offset + offset_of!(FileMappingInfo, label);
2247        let mut label_buf = [0u8; FILE_MAPPING_LABEL_MAX_LEN + 1];
2248        for (i, byte) in label_buf.iter_mut().enumerate() {
2249            *byte = sbox
2250                .mem_mgr
2251                .shared_mem
2252                .read::<u8>(label_offset + i)
2253                .unwrap();
2254        }
2255        let label_len = label_buf
2256            .iter()
2257            .position(|&b| b == 0)
2258            .unwrap_or(label_buf.len());
2259        let stored_label = std::str::from_utf8(&label_buf[..label_len]).unwrap();
2260        assert_eq!(
2261            stored_label, label,
2262            "PEB entry label should match after evolve"
2263        );
2264
2265        let _ = std::fs::remove_file(&path);
2266    }
2267
2268    /// Tests that mapping 5 files (3 deferred + 2 post-evolve) correctly
2269    /// populates all PEB FileMappingInfo slots with the right guest_addr,
2270    /// size, and label for each entry.
2271    #[test]
2272    fn test_map_file_cow_peb_multiple_entries() {
2273        use std::mem::{offset_of, size_of};
2274
2275        use hyperlight_common::mem::{FILE_MAPPING_LABEL_MAX_LEN, FileMappingInfo};
2276
2277        const NUM_FILES: usize = 5;
2278        const DEFERRED_COUNT: usize = 3;
2279
2280        // Create 5 test files with distinct content.
2281        let mut paths = Vec::new();
2282        let mut labels: Vec<String> = Vec::new();
2283        for i in 0..NUM_FILES {
2284            let name = format!("hyperlight_test_peb_multi_{}.bin", i);
2285            let content = vec![i as u8 + 0xA0; 4096];
2286            let (path, _) = create_test_file(&name, &content);
2287            paths.push(path);
2288            labels.push(format!("file_{}", i));
2289        }
2290
2291        // Each file gets a unique guest base, spaced 1 page apart
2292        // (well outside the shared memory region).
2293        let page_size = page_size::get() as u64;
2294        let base: u64 = 0x1_0000_0000;
2295        let guest_bases: Vec<u64> = (0..NUM_FILES as u64)
2296            .map(|i| base + i * page_size)
2297            .collect();
2298
2299        let mut u_sbox = UninitializedSandbox::new(
2300            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2301            None,
2302        )
2303        .unwrap();
2304
2305        // Map 3 files before evolve (deferred path).
2306        let mut mapped_sizes = Vec::new();
2307        for i in 0..DEFERRED_COUNT {
2308            let size = u_sbox
2309                .map_file_cow(&paths[i], guest_bases[i], Some(&labels[i]))
2310                .unwrap();
2311            mapped_sizes.push(size);
2312        }
2313
2314        // Evolve — deferred mappings applied + PEB entries written.
2315        let mut sbox: MultiUseSandbox = u_sbox.evolve().unwrap();
2316
2317        // Map 2 more files post-evolve (MultiUseSandbox path).
2318        for i in DEFERRED_COUNT..NUM_FILES {
2319            let size = sbox
2320                .map_file_cow(&paths[i], guest_bases[i], Some(&labels[i]))
2321                .unwrap();
2322            mapped_sizes.push(size);
2323        }
2324
2325        // Verify PEB count equals 5.
2326        let count = sbox
2327            .mem_mgr
2328            .shared_mem
2329            .read::<u64>(sbox.mem_mgr.layout.get_file_mappings_size_offset())
2330            .unwrap();
2331        assert_eq!(
2332            count, NUM_FILES as u64,
2333            "PEB should have {NUM_FILES} entries"
2334        );
2335
2336        // Verify each entry's guest_addr, size, and label.
2337        let array_base = sbox.mem_mgr.layout.get_file_mappings_array_offset();
2338        for i in 0..NUM_FILES {
2339            let entry_offset = array_base + i * size_of::<FileMappingInfo>();
2340
2341            let stored_addr = sbox
2342                .mem_mgr
2343                .shared_mem
2344                .read::<u64>(entry_offset + offset_of!(FileMappingInfo, guest_addr))
2345                .unwrap();
2346            assert_eq!(
2347                stored_addr, guest_bases[i],
2348                "Entry {i}: guest_addr mismatch"
2349            );
2350
2351            let stored_size = sbox
2352                .mem_mgr
2353                .shared_mem
2354                .read::<u64>(entry_offset + offset_of!(FileMappingInfo, size))
2355                .unwrap();
2356            assert_eq!(stored_size, mapped_sizes[i], "Entry {i}: size mismatch");
2357
2358            // Read and verify the label.
2359            let label_base = entry_offset + offset_of!(FileMappingInfo, label);
2360            let mut label_buf = [0u8; FILE_MAPPING_LABEL_MAX_LEN + 1];
2361            for (j, byte) in label_buf.iter_mut().enumerate() {
2362                *byte = sbox.mem_mgr.shared_mem.read::<u8>(label_base + j).unwrap();
2363            }
2364            let label_len = label_buf
2365                .iter()
2366                .position(|&b| b == 0)
2367                .unwrap_or(label_buf.len());
2368            let stored_label = std::str::from_utf8(&label_buf[..label_len]).unwrap();
2369            assert_eq!(stored_label, labels[i], "Entry {i}: label mismatch");
2370        }
2371
2372        // Clean up.
2373        for path in &paths {
2374            let _ = std::fs::remove_file(path);
2375        }
2376    }
2377
2378    /// Tests that an explicitly provided label exceeding 63 bytes is rejected.
2379    #[test]
2380    fn test_map_file_cow_label_too_long() {
2381        let (path, _) =
2382            create_test_file("hyperlight_test_map_file_cow_long_label.bin", &[0xEE; 4096]);
2383
2384        let guest_base: u64 = 0x1_0000_0000;
2385
2386        let mut u_sbox = UninitializedSandbox::new(
2387            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2388            None,
2389        )
2390        .unwrap();
2391
2392        // A label of exactly 64 bytes exceeds the 63-byte max.
2393        let long_label = "A".repeat(64);
2394        let result = u_sbox.map_file_cow(&path, guest_base, Some(&long_label));
2395        assert!(
2396            result.is_err(),
2397            "map_file_cow should reject labels longer than 63 bytes"
2398        );
2399
2400        // Labels at exactly 63 bytes should be fine.
2401        let ok_label = "B".repeat(63);
2402        let result = u_sbox.map_file_cow(&path, guest_base, Some(&ok_label));
2403        assert!(
2404            result.is_ok(),
2405            "map_file_cow should accept labels of exactly 63 bytes"
2406        );
2407
2408        let _ = std::fs::remove_file(&path);
2409    }
2410
2411    /// Tests that labels containing null bytes are rejected.
2412    #[test]
2413    fn test_map_file_cow_label_null_byte() {
2414        let (path, _) =
2415            create_test_file("hyperlight_test_map_file_cow_null_label.bin", &[0xFF; 4096]);
2416
2417        let guest_base: u64 = 0x1_0000_0000;
2418
2419        let mut u_sbox = UninitializedSandbox::new(
2420            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2421            None,
2422        )
2423        .unwrap();
2424
2425        let result = u_sbox.map_file_cow(&path, guest_base, Some("has\0null"));
2426        assert!(
2427            result.is_err(),
2428            "map_file_cow should reject labels containing null bytes"
2429        );
2430
2431        let _ = std::fs::remove_file(&path);
2432    }
2433
2434    /// Tests that mapping two files to overlapping GPA ranges is rejected.
2435    #[test]
2436    fn test_map_file_cow_overlapping_mappings() {
2437        let (path1, _) =
2438            create_test_file("hyperlight_test_map_file_cow_overlap1.bin", &[0xAA; 4096]);
2439        let (path2, _) =
2440            create_test_file("hyperlight_test_map_file_cow_overlap2.bin", &[0xBB; 4096]);
2441
2442        let guest_base: u64 = 0x1_0000_0000;
2443
2444        let mut u_sbox = UninitializedSandbox::new(
2445            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2446            None,
2447        )
2448        .unwrap();
2449
2450        // First mapping should succeed.
2451        u_sbox.map_file_cow(&path1, guest_base, None).unwrap();
2452
2453        // Second mapping at the same address should fail (overlap).
2454        let result = u_sbox.map_file_cow(&path2, guest_base, None);
2455        assert!(
2456            result.is_err(),
2457            "map_file_cow should reject overlapping guest address ranges"
2458        );
2459
2460        let _ = std::fs::remove_file(&path1);
2461        let _ = std::fs::remove_file(&path2);
2462    }
2463
2464    /// Tests that `map_file_cow` rejects a guest_base that overlaps
2465    /// the sandbox's shared memory region.
2466    #[test]
2467    fn test_map_file_cow_shared_mem_overlap() {
2468        let (path, _) = create_test_file(
2469            "hyperlight_test_map_file_cow_overlap_shm.bin",
2470            &[0xCC; 4096],
2471        );
2472
2473        let mut u_sbox = UninitializedSandbox::new(
2474            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2475            None,
2476        )
2477        .unwrap();
2478
2479        // Use BASE_ADDRESS itself — smack in the middle of shared memory.
2480        let base_addr = crate::mem::layout::SandboxMemoryLayout::BASE_ADDRESS as u64;
2481        // page-align it (BASE_ADDRESS is 0x1000, already page-aligned)
2482        let result = u_sbox.map_file_cow(&path, base_addr, None);
2483        assert!(
2484            result.is_err(),
2485            "map_file_cow should reject guest_base inside shared memory"
2486        );
2487
2488        let _ = std::fs::remove_file(&path);
2489    }
2490
2491    /// Tests that exceeding MAX_FILE_MAPPINGS on UninitializedSandbox
2492    /// is rejected at registration time.
2493    #[test]
2494    fn test_map_file_cow_max_limit() {
2495        use hyperlight_common::mem::MAX_FILE_MAPPINGS;
2496
2497        let mut u_sbox = UninitializedSandbox::new(
2498            GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")),
2499            None,
2500        )
2501        .unwrap();
2502
2503        let page_size = page_size::get() as u64;
2504        // Base well outside shared memory.
2505        let base: u64 = 0x1_0000_0000;
2506
2507        // Register MAX_FILE_MAPPINGS files — each needs a distinct file
2508        // and a non-overlapping GPA.
2509        let mut paths = Vec::new();
2510        for i in 0..MAX_FILE_MAPPINGS {
2511            let name = format!("hyperlight_test_max_limit_{}.bin", i);
2512            let (path, _) = create_test_file(&name, &[0xAA; 4096]);
2513            let guest_base = base + (i as u64) * page_size;
2514            u_sbox.map_file_cow(&path, guest_base, None).unwrap();
2515            paths.push(path);
2516        }
2517
2518        // The (MAX_FILE_MAPPINGS + 1)th should fail.
2519        let name = format!("hyperlight_test_max_limit_{}.bin", MAX_FILE_MAPPINGS);
2520        let (path, _) = create_test_file(&name, &[0xBB; 4096]);
2521        let guest_base = base + (MAX_FILE_MAPPINGS as u64) * page_size;
2522        let result = u_sbox.map_file_cow(&path, guest_base, None);
2523        assert!(
2524            result.is_err(),
2525            "map_file_cow should reject after MAX_FILE_MAPPINGS registrations"
2526        );
2527
2528        // Clean up.
2529        for p in &paths {
2530            let _ = std::fs::remove_file(p);
2531        }
2532        let _ = std::fs::remove_file(&path);
2533    }
2534}