Skip to main content

hyperlight_host/mem/
layout.rs

1/*
2Copyright 2025  The Hyperlight Authors.
3
4Licensed under the Apache License, Version 2.0 (the "License");
5you may not use this file except in compliance with the License.
6You may obtain a copy of the License at
7
8    http://www.apache.org/licenses/LICENSE-2.0
9
10Unless required by applicable law or agreed to in writing, software
11distributed under the License is distributed on an "AS IS" BASIS,
12WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13See the License for the specific language governing permissions and
14limitations under the License.
15 */
16//! This module describes the virtual and physical addresses of a
17//! number of special regions in the hyperlight VM, although we hope
18//! to reduce the number of these over time.
19//!
20//! A snapshot freshly created from an empty VM will result in roughly
21//! the following physical layout:
22//!
23//! +-------------------------------------------+
24//! |             Guest Page Tables             |
25//! +-------------------------------------------+
26//! |              Init Data                    | (GuestBlob size)
27//! +-------------------------------------------+
28//! |             Guest Heap                    |
29//! +-------------------------------------------+
30//! |                PEB Struct                 | (HyperlightPEB size)
31//! +-------------------------------------------+
32//! |               Guest Code                  |
33//! +-------------------------------------------+ 0x1_000
34//! |              NULL guard page              |
35//! +-------------------------------------------+ 0x0_000
36//!
37//! Everything except for the guest page tables is currently
38//! identity-mapped; the guest page tables themselves are mapped at
39//! [`hyperlight_common::layout::SNAPSHOT_PT_GVA`] =
40//! 0xffff_8000_0000_0000.
41//!
42//! - `InitData` - some extra data that can be loaded onto the sandbox during
43//!   initialization.
44//!
45//! - `GuestHeap` - this is a buffer that is used for heap data in the guest. the length
46//!   of this field is returned by the `heap_size()` method of this struct
47//!
48//! There is also a scratch region at the top of physical memory,
49//! which is mostly laid out as a large undifferentiated blob of
50//! memory, although at present the snapshot process specially
51//! privileges the statically allocated input and output data regions:
52//!
53//! +-------------------------------------------+ (top of physical memory)
54//! |         Exception Stack, Metadata         |
55//! +-------------------------------------------+ (1 page below)
56//! |              Scratch Memory               |
57//! +-------------------------------------------+
58//! |                Output Data                |
59//! +-------------------------------------------+
60//! |                Input Data                 |
61//! +-------------------------------------------+ (scratch size)
62
63use std::fmt::Debug;
64use std::mem::{offset_of, size_of};
65
66use hyperlight_common::mem::{HyperlightPEB, PAGE_SIZE_USIZE};
67use tracing::{Span, instrument};
68
69use super::memory_region::MemoryRegionType::{Code, Heap, InitData, Peb};
70use super::memory_region::{
71    DEFAULT_GUEST_BLOB_MEM_FLAGS, MemoryRegion, MemoryRegion_, MemoryRegionFlags, MemoryRegionKind,
72    MemoryRegionVecBuilder,
73};
74#[cfg(any(gdb, feature = "mem_profile"))]
75use super::shared_mem::HostSharedMemory;
76use super::shared_mem::{ExclusiveSharedMemory, ReadonlySharedMemory};
77use crate::error::HyperlightError::{MemoryRequestTooBig, MemoryRequestTooSmall};
78use crate::sandbox::SandboxConfiguration;
79use crate::{Result, new_error};
80
81pub(crate) enum BaseGpaRegion<Sn, Sc> {
82    Snapshot(Sn),
83    Scratch(Sc),
84    Mmap(MemoryRegion),
85}
86
87// It's an invariant of this type, checked on creation, that the
88// offset is in bounds for the base region.
89pub(crate) struct ResolvedGpa<Sn, Sc> {
90    pub(crate) offset: usize,
91    pub(crate) base: BaseGpaRegion<Sn, Sc>,
92}
93
94impl AsRef<[u8]> for ExclusiveSharedMemory {
95    fn as_ref(&self) -> &[u8] {
96        self.as_slice()
97    }
98}
99impl AsRef<[u8]> for ReadonlySharedMemory {
100    fn as_ref(&self) -> &[u8] {
101        self.as_slice()
102    }
103}
104
105impl<Sn, Sc> ResolvedGpa<Sn, Sc> {
106    pub(crate) fn with_memories<Sn2, Sc2>(self, sn: Sn2, sc: Sc2) -> ResolvedGpa<Sn2, Sc2> {
107        ResolvedGpa {
108            offset: self.offset,
109            base: match self.base {
110                BaseGpaRegion::Snapshot(_) => BaseGpaRegion::Snapshot(sn),
111                BaseGpaRegion::Scratch(_) => BaseGpaRegion::Scratch(sc),
112                BaseGpaRegion::Mmap(r) => BaseGpaRegion::Mmap(r),
113            },
114        }
115    }
116}
117impl<'a> BaseGpaRegion<&'a [u8], &'a [u8]> {
118    pub(crate) fn as_ref<'b>(&'b self) -> &'a [u8] {
119        match self {
120            BaseGpaRegion::Snapshot(sn) => sn,
121            BaseGpaRegion::Scratch(sc) => sc,
122            BaseGpaRegion::Mmap(r) => unsafe {
123                #[allow(clippy::useless_conversion)]
124                let host_region_base: usize = r.host_region.start.into();
125                #[allow(clippy::useless_conversion)]
126                let host_region_end: usize = r.host_region.end.into();
127                let len = host_region_end - host_region_base;
128                std::slice::from_raw_parts(host_region_base as *const u8, len)
129            },
130        }
131    }
132}
133impl<'a> ResolvedGpa<&'a [u8], &'a [u8]> {
134    pub(crate) fn as_ref<'b>(&'b self) -> &'a [u8] {
135        let base = self.base.as_ref();
136        if self.offset > base.len() {
137            return &[];
138        }
139        &self.base.as_ref()[self.offset..]
140    }
141}
142#[cfg(any(gdb, feature = "mem_profile"))]
143#[allow(unused)] // may be unused when nanvix-unstable is also enabled
144pub(crate) trait ReadableSharedMemory {
145    fn copy_to_slice(&self, slice: &mut [u8], offset: usize) -> Result<()>;
146}
147#[cfg(any(gdb, feature = "mem_profile"))]
148impl ReadableSharedMemory for &HostSharedMemory {
149    fn copy_to_slice(&self, slice: &mut [u8], offset: usize) -> Result<()> {
150        HostSharedMemory::copy_to_slice(self, slice, offset)
151    }
152}
153#[cfg(any(gdb, feature = "mem_profile"))]
154mod coherence_hack {
155    use super::{ExclusiveSharedMemory, ReadonlySharedMemory};
156    #[allow(unused)] // it actually is; see the impl below
157    pub(super) trait SharedMemoryAsRefMarker: AsRef<[u8]> {}
158    impl SharedMemoryAsRefMarker for ExclusiveSharedMemory {}
159    impl SharedMemoryAsRefMarker for &ExclusiveSharedMemory {}
160    impl SharedMemoryAsRefMarker for ReadonlySharedMemory {}
161    impl SharedMemoryAsRefMarker for &ReadonlySharedMemory {}
162}
163#[cfg(any(gdb, feature = "mem_profile"))]
164impl<T: coherence_hack::SharedMemoryAsRefMarker> ReadableSharedMemory for T {
165    fn copy_to_slice(&self, slice: &mut [u8], offset: usize) -> Result<()> {
166        let ss: &[u8] = self.as_ref();
167        let end = offset + slice.len();
168        if end > ss.len() {
169            return Err(new_error!(
170                "Attempt to read up to {} in memory of size {}",
171                offset + slice.len(),
172                self.as_ref().len()
173            ));
174        }
175        slice.copy_from_slice(&ss[offset..end]);
176        Ok(())
177    }
178}
179#[cfg(any(gdb, feature = "mem_profile"))]
180impl<Sn: ReadableSharedMemory, Sc: ReadableSharedMemory> ResolvedGpa<Sn, Sc> {
181    #[allow(unused)] // may be unused when nanvix-unstable is also enabled
182    pub(crate) fn copy_to_slice(&self, slice: &mut [u8]) -> Result<()> {
183        match &self.base {
184            BaseGpaRegion::Snapshot(sn) => sn.copy_to_slice(slice, self.offset),
185            BaseGpaRegion::Scratch(sc) => sc.copy_to_slice(slice, self.offset),
186            BaseGpaRegion::Mmap(r) => unsafe {
187                #[allow(clippy::useless_conversion)]
188                let host_region_base: usize = r.host_region.start.into();
189                #[allow(clippy::useless_conversion)]
190                let host_region_end: usize = r.host_region.end.into();
191                let len = host_region_end - host_region_base;
192                // Safety: it's a documented invariant of MemoryRegion
193                // that the memory must remain alive as long as the
194                // sandbox is alive, and the way this code is used,
195                // the lifetimes of the snapshot and scratch memories
196                // ensure that the sandbox is still alive. This could
197                // perhaps be cleaned up/improved/made harder to
198                // misuse significantly, but it would require a much
199                // larger rework.
200                let ss = std::slice::from_raw_parts(host_region_base as *const u8, len);
201                let end = self.offset + slice.len();
202                if end > ss.len() {
203                    return Err(new_error!(
204                        "Attempt to read up to {} in memory of size {}",
205                        self.offset + slice.len(),
206                        ss.len()
207                    ));
208                }
209                slice.copy_from_slice(&ss[self.offset..end]);
210                Ok(())
211            },
212        }
213    }
214}
215
216#[derive(Copy, Clone)]
217pub(crate) struct SandboxMemoryLayout {
218    pub(super) sandbox_memory_config: SandboxConfiguration,
219    /// The heap size of this sandbox.
220    pub(super) heap_size: usize,
221    init_data_size: usize,
222
223    /// The following fields are offsets to the actual PEB struct fields.
224    /// They are used when writing the PEB struct itself
225    peb_offset: usize,
226    peb_input_data_offset: usize,
227    peb_output_data_offset: usize,
228    peb_init_data_offset: usize,
229    peb_heap_data_offset: usize,
230    #[cfg(feature = "nanvix-unstable")]
231    peb_file_mappings_offset: usize,
232
233    guest_heap_buffer_offset: usize,
234    init_data_offset: usize,
235    pt_size: Option<usize>,
236
237    // other
238    pub(crate) peb_address: usize,
239    code_size: usize,
240    // The offset in the sandbox memory where the code starts
241    guest_code_offset: usize,
242    #[cfg_attr(feature = "nanvix-unstable", allow(unused))]
243    pub(crate) init_data_permissions: Option<MemoryRegionFlags>,
244
245    // The size of the scratch region in physical memory; note that
246    // this will appear under the top of physical memory.
247    scratch_size: usize,
248    // The size of the snapshot region in physical memory; note that
249    // this will appear somewhere near the base of physical memory.
250    snapshot_size: usize,
251}
252
253impl Debug for SandboxMemoryLayout {
254    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
255        let mut ff = f.debug_struct("SandboxMemoryLayout");
256        ff.field(
257            "Total Memory Size",
258            &format_args!("{:#x}", self.get_memory_size().unwrap_or(0)),
259        )
260        .field("Heap Size", &format_args!("{:#x}", self.heap_size))
261        .field(
262            "Init Data Size",
263            &format_args!("{:#x}", self.init_data_size),
264        )
265        .field("PEB Address", &format_args!("{:#x}", self.peb_address))
266        .field("PEB Offset", &format_args!("{:#x}", self.peb_offset))
267        .field("Code Size", &format_args!("{:#x}", self.code_size))
268        .field(
269            "Input Data Offset",
270            &format_args!("{:#x}", self.peb_input_data_offset),
271        )
272        .field(
273            "Output Data Offset",
274            &format_args!("{:#x}", self.peb_output_data_offset),
275        )
276        .field(
277            "Init Data Offset",
278            &format_args!("{:#x}", self.peb_init_data_offset),
279        )
280        .field(
281            "Guest Heap Offset",
282            &format_args!("{:#x}", self.peb_heap_data_offset),
283        );
284        #[cfg(feature = "nanvix-unstable")]
285        ff.field(
286            "File Mappings Offset",
287            &format_args!("{:#x}", self.peb_file_mappings_offset),
288        );
289        ff.field(
290            "Guest Heap Buffer Offset",
291            &format_args!("{:#x}", self.guest_heap_buffer_offset),
292        )
293        .field(
294            "Init Data Offset",
295            &format_args!("{:#x}", self.init_data_offset),
296        )
297        .field("PT Size", &format_args!("{:#x}", self.pt_size.unwrap_or(0)))
298        .field(
299            "Guest Code Offset",
300            &format_args!("{:#x}", self.guest_code_offset),
301        )
302        .field(
303            "Scratch region size",
304            &format_args!("{:#x}", self.scratch_size),
305        )
306        .finish()
307    }
308}
309
310impl SandboxMemoryLayout {
311    /// The maximum amount of memory a single sandbox will be allowed.
312    ///
313    /// Both the scratch region and the snapshot region are bounded by
314    /// this size. The value is arbitrary but chosen to be large enough
315    /// for most workloads while preventing accidental resource exhaustion.
316    const MAX_MEMORY_SIZE: usize = (16 * 1024 * 1024 * 1024) - Self::BASE_ADDRESS; // 16 GiB - BASE_ADDRESS
317
318    /// The base address of the sandbox's memory.
319    #[cfg(not(feature = "nanvix-unstable"))]
320    pub(crate) const BASE_ADDRESS: usize = 0x1000;
321    #[cfg(feature = "nanvix-unstable")]
322    pub(crate) const BASE_ADDRESS: usize = 0x0;
323
324    // the offset into a sandbox's input/output buffer where the stack starts
325    pub(crate) const STACK_POINTER_SIZE_BYTES: u64 = 8;
326
327    /// Create a new `SandboxMemoryLayout` with the given
328    /// `SandboxConfiguration`, code size and stack/heap size.
329    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
330    pub(crate) fn new(
331        cfg: SandboxConfiguration,
332        code_size: usize,
333        init_data_size: usize,
334        init_data_permissions: Option<MemoryRegionFlags>,
335    ) -> Result<Self> {
336        let heap_size = usize::try_from(cfg.get_heap_size())?;
337        let scratch_size = cfg.get_scratch_size();
338        if scratch_size > Self::MAX_MEMORY_SIZE {
339            return Err(MemoryRequestTooBig(scratch_size, Self::MAX_MEMORY_SIZE));
340        }
341        let min_scratch_size = hyperlight_common::layout::min_scratch_size(
342            cfg.get_input_data_size(),
343            cfg.get_output_data_size(),
344        );
345        if scratch_size < min_scratch_size {
346            return Err(MemoryRequestTooSmall(scratch_size, min_scratch_size));
347        }
348
349        let guest_code_offset = 0;
350        // The following offsets are to the fields of the PEB struct itself!
351        let peb_offset = code_size.next_multiple_of(PAGE_SIZE_USIZE);
352        let peb_input_data_offset = peb_offset + offset_of!(HyperlightPEB, input_stack);
353        let peb_output_data_offset = peb_offset + offset_of!(HyperlightPEB, output_stack);
354        let peb_init_data_offset = peb_offset + offset_of!(HyperlightPEB, init_data);
355        let peb_heap_data_offset = peb_offset + offset_of!(HyperlightPEB, guest_heap);
356        #[cfg(feature = "nanvix-unstable")]
357        let peb_file_mappings_offset = peb_offset + offset_of!(HyperlightPEB, file_mappings);
358
359        // The following offsets are the actual values that relate to memory layout,
360        // which are written to PEB struct
361        let peb_address = Self::BASE_ADDRESS + peb_offset;
362        // make sure heap buffer starts at 4K boundary.
363        // The FileMappingInfo array is stored immediately after the PEB struct.
364        // We statically reserve space for MAX_FILE_MAPPINGS entries so that
365        // the heap never overlaps the array, even when all slots are used.
366        // The host writes file mapping metadata here via write_file_mapping_entry;
367        // the guest only reads the entries. We don't know at layout time how
368        // many file mappings the host will register, so we reserve space for
369        // the maximum number.
370        // The heap starts at the next page boundary after this reserved area.
371        #[cfg(feature = "nanvix-unstable")]
372        let file_mappings_array_end = peb_offset
373            + size_of::<HyperlightPEB>()
374            + hyperlight_common::mem::MAX_FILE_MAPPINGS
375                * size_of::<hyperlight_common::mem::FileMappingInfo>();
376        #[cfg(feature = "nanvix-unstable")]
377        let guest_heap_buffer_offset = file_mappings_array_end.next_multiple_of(PAGE_SIZE_USIZE);
378        #[cfg(not(feature = "nanvix-unstable"))]
379        let guest_heap_buffer_offset =
380            (peb_offset + size_of::<HyperlightPEB>()).next_multiple_of(PAGE_SIZE_USIZE);
381
382        // make sure init data starts at 4K boundary
383        let init_data_offset =
384            (guest_heap_buffer_offset + heap_size).next_multiple_of(PAGE_SIZE_USIZE);
385        let mut ret = Self {
386            peb_offset,
387            heap_size,
388            peb_input_data_offset,
389            peb_output_data_offset,
390            peb_init_data_offset,
391            peb_heap_data_offset,
392            #[cfg(feature = "nanvix-unstable")]
393            peb_file_mappings_offset,
394            sandbox_memory_config: cfg,
395            code_size,
396            guest_heap_buffer_offset,
397            peb_address,
398            guest_code_offset,
399            init_data_offset,
400            init_data_size,
401            init_data_permissions,
402            pt_size: None,
403            scratch_size,
404            snapshot_size: 0,
405        };
406        ret.set_snapshot_size(ret.get_memory_size()?);
407        Ok(ret)
408    }
409
410    /// Get the offset in guest memory to the output data size
411    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
412    pub(super) fn get_output_data_size_offset(&self) -> usize {
413        // The size field is the first field in the `OutputData` struct
414        self.peb_output_data_offset
415    }
416
417    /// Get the offset in guest memory to the init data size
418    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
419    pub(super) fn get_init_data_size_offset(&self) -> usize {
420        // The init data size is the first field in the `GuestMemoryRegion` struct
421        self.peb_init_data_offset
422    }
423
424    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
425    pub(crate) fn get_scratch_size(&self) -> usize {
426        self.scratch_size
427    }
428
429    /// Get the offset in guest memory to the output data pointer.
430    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
431    fn get_output_data_pointer_offset(&self) -> usize {
432        // This field is immediately after the output data size field,
433        // which is a `u64`.
434        self.get_output_data_size_offset() + size_of::<u64>()
435    }
436
437    /// Get the offset in guest memory to the init data pointer.
438    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
439    pub(super) fn get_init_data_pointer_offset(&self) -> usize {
440        // The init data pointer is immediately after the init data size field,
441        // which is a `u64`.
442        self.get_init_data_size_offset() + size_of::<u64>()
443    }
444
445    /// Get the guest virtual address of the start of output data.
446    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
447    pub(crate) fn get_output_data_buffer_gva(&self) -> u64 {
448        hyperlight_common::layout::scratch_base_gva(self.scratch_size)
449            + self.sandbox_memory_config.get_input_data_size() as u64
450    }
451
452    /// Get the offset into the host scratch buffer of the start of
453    /// the output data.
454    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
455    pub(crate) fn get_output_data_buffer_scratch_host_offset(&self) -> usize {
456        self.sandbox_memory_config.get_input_data_size()
457    }
458
459    /// Get the offset in guest memory to the input data size.
460    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
461    pub(super) fn get_input_data_size_offset(&self) -> usize {
462        // The input data size is the first field in the input stack's `GuestMemoryRegion` struct
463        self.peb_input_data_offset
464    }
465
466    /// Get the offset in guest memory to the input data pointer.
467    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
468    fn get_input_data_pointer_offset(&self) -> usize {
469        // The input data pointer is immediately after the input
470        // data size field in the input data `GuestMemoryRegion` struct which is a `u64`.
471        self.get_input_data_size_offset() + size_of::<u64>()
472    }
473
474    /// Get the guest virtual address of the start of input data
475    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
476    fn get_input_data_buffer_gva(&self) -> u64 {
477        hyperlight_common::layout::scratch_base_gva(self.scratch_size)
478    }
479
480    /// Get the offset into the host scratch buffer of the start of
481    /// the input data
482    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
483    pub(crate) fn get_input_data_buffer_scratch_host_offset(&self) -> usize {
484        0
485    }
486
487    /// Get the offset from the beginning of the scratch region to the
488    /// location where page tables will be eagerly copied on restore
489    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
490    pub(crate) fn get_pt_base_scratch_offset(&self) -> usize {
491        (self.sandbox_memory_config.get_input_data_size()
492            + self.sandbox_memory_config.get_output_data_size())
493        .next_multiple_of(hyperlight_common::vmem::PAGE_SIZE)
494    }
495
496    /// Get the base GPA to which the page tables will be eagerly
497    /// copied on restore
498    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
499    pub(crate) fn get_pt_base_gpa(&self) -> u64 {
500        hyperlight_common::layout::scratch_base_gpa(self.scratch_size)
501            + self.get_pt_base_scratch_offset() as u64
502    }
503
504    /// Get the first GPA of the scratch region that the host hasn't
505    /// used for something else
506    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
507    pub(crate) fn get_first_free_scratch_gpa(&self) -> u64 {
508        self.get_pt_base_gpa() + self.pt_size.unwrap_or(0) as u64
509    }
510
511    /// Get the offset in guest memory to the heap size
512    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
513    fn get_heap_size_offset(&self) -> usize {
514        self.peb_heap_data_offset
515    }
516
517    /// Get the offset in guest memory to the file_mappings count field
518    /// (the `size` field of the `GuestMemoryRegion` in the PEB).
519    #[cfg(feature = "nanvix-unstable")]
520    pub(crate) fn get_file_mappings_size_offset(&self) -> usize {
521        self.peb_file_mappings_offset
522    }
523
524    /// Get the offset in guest memory to the file_mappings pointer field.
525    #[cfg(feature = "nanvix-unstable")]
526    fn get_file_mappings_pointer_offset(&self) -> usize {
527        self.get_file_mappings_size_offset() + size_of::<u64>()
528    }
529
530    /// Get the offset in snapshot memory where the FileMappingInfo array starts
531    /// (immediately after the PEB struct, within the same page).
532    #[cfg(feature = "nanvix-unstable")]
533    pub(crate) fn get_file_mappings_array_offset(&self) -> usize {
534        self.peb_offset + size_of::<HyperlightPEB>()
535    }
536
537    /// Get the guest address of the FileMappingInfo array.
538    #[cfg(feature = "nanvix-unstable")]
539    fn get_file_mappings_array_gva(&self) -> u64 {
540        (Self::BASE_ADDRESS + self.get_file_mappings_array_offset()) as u64
541    }
542
543    /// Get the offset of the heap pointer in guest memory,
544    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
545    fn get_heap_pointer_offset(&self) -> usize {
546        // The heap pointer is immediately after the
547        // heap size field in the guest heap's `GuestMemoryRegion` struct which is a `u64`.
548        self.get_heap_size_offset() + size_of::<u64>()
549    }
550
551    /// Get the total size of guest memory in `self`'s memory
552    /// layout.
553    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
554    fn get_unaligned_memory_size(&self) -> usize {
555        self.init_data_offset + self.init_data_size
556    }
557
558    /// get the code offset
559    /// This is the offset in the sandbox memory where the code starts
560    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
561    pub(crate) fn get_guest_code_offset(&self) -> usize {
562        self.guest_code_offset
563    }
564
565    /// Get the guest address of the code section in the sandbox
566    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
567    pub(crate) fn get_guest_code_address(&self) -> usize {
568        Self::BASE_ADDRESS + self.guest_code_offset
569    }
570
571    /// Get the total size of guest memory in `self`'s memory
572    /// layout aligned to page size boundaries.
573    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
574    pub(crate) fn get_memory_size(&self) -> Result<usize> {
575        let total_memory = self.get_unaligned_memory_size();
576
577        // Size should be a multiple of page size.
578        let remainder = total_memory % PAGE_SIZE_USIZE;
579        let multiples = total_memory / PAGE_SIZE_USIZE;
580        let size = match remainder {
581            0 => total_memory,
582            _ => (multiples + 1) * PAGE_SIZE_USIZE,
583        };
584
585        if size > Self::MAX_MEMORY_SIZE {
586            Err(MemoryRequestTooBig(size, Self::MAX_MEMORY_SIZE))
587        } else {
588            Ok(size)
589        }
590    }
591
592    /// Sets the size of the memory region used for page tables
593    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
594    pub(crate) fn set_pt_size(&mut self, size: usize) -> Result<()> {
595        let min_fixed_scratch = hyperlight_common::layout::min_scratch_size(
596            self.sandbox_memory_config.get_input_data_size(),
597            self.sandbox_memory_config.get_output_data_size(),
598        );
599        let min_scratch = min_fixed_scratch + size;
600        if self.scratch_size < min_scratch {
601            return Err(MemoryRequestTooSmall(self.scratch_size, min_scratch));
602        }
603        let old_pt_size = self.pt_size.unwrap_or(0);
604        self.snapshot_size = self.snapshot_size - old_pt_size + size;
605        self.pt_size = Some(size);
606        Ok(())
607    }
608
609    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
610    pub(crate) fn set_snapshot_size(&mut self, new_size: usize) {
611        self.snapshot_size = new_size;
612    }
613
614    /// Get the size of the memory region used for page tables
615    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
616    pub(crate) fn get_pt_size(&self) -> usize {
617        self.pt_size.unwrap_or(0)
618    }
619
620    /// Returns the memory regions associated with this memory layout,
621    /// suitable for passing to a hypervisor for mapping into memory
622    #[cfg_attr(feature = "nanvix-unstable", allow(unused))]
623    pub(crate) fn get_memory_regions_<K: MemoryRegionKind>(
624        &self,
625        host_base: K::HostBaseType,
626    ) -> Result<Vec<MemoryRegion_<K>>> {
627        let mut builder = MemoryRegionVecBuilder::new(Self::BASE_ADDRESS, host_base);
628
629        // code
630        let peb_offset = builder.push_page_aligned(
631            self.code_size,
632            MemoryRegionFlags::READ | MemoryRegionFlags::WRITE | MemoryRegionFlags::EXECUTE,
633            Code,
634        );
635
636        let expected_peb_offset = TryInto::<usize>::try_into(self.peb_offset)?;
637
638        if peb_offset != expected_peb_offset {
639            return Err(new_error!(
640                "PEB offset does not match expected PEB offset expected:  {}, actual:  {}",
641                expected_peb_offset,
642                peb_offset
643            ));
644        }
645
646        // PEB + preallocated FileMappingInfo array
647        #[cfg(feature = "nanvix-unstable")]
648        let heap_offset = {
649            let peb_and_array_size = size_of::<HyperlightPEB>()
650                + hyperlight_common::mem::MAX_FILE_MAPPINGS
651                    * size_of::<hyperlight_common::mem::FileMappingInfo>();
652            builder.push_page_aligned(
653                peb_and_array_size,
654                MemoryRegionFlags::READ | MemoryRegionFlags::WRITE,
655                Peb,
656            )
657        };
658        #[cfg(not(feature = "nanvix-unstable"))]
659        let heap_offset =
660            builder.push_page_aligned(size_of::<HyperlightPEB>(), MemoryRegionFlags::READ, Peb);
661
662        let expected_heap_offset = TryInto::<usize>::try_into(self.guest_heap_buffer_offset)?;
663
664        if heap_offset != expected_heap_offset {
665            return Err(new_error!(
666                "Guest Heap offset does not match expected Guest Heap offset expected:  {}, actual:  {}",
667                expected_heap_offset,
668                heap_offset
669            ));
670        }
671
672        // heap
673        #[cfg(feature = "executable_heap")]
674        let init_data_offset = builder.push_page_aligned(
675            self.heap_size,
676            MemoryRegionFlags::READ | MemoryRegionFlags::WRITE | MemoryRegionFlags::EXECUTE,
677            Heap,
678        );
679        #[cfg(not(feature = "executable_heap"))]
680        let init_data_offset = builder.push_page_aligned(
681            self.heap_size,
682            MemoryRegionFlags::READ | MemoryRegionFlags::WRITE,
683            Heap,
684        );
685
686        let expected_init_data_offset = TryInto::<usize>::try_into(self.init_data_offset)?;
687
688        if init_data_offset != expected_init_data_offset {
689            return Err(new_error!(
690                "Init Data offset does not match expected Init Data offset expected:  {}, actual:  {}",
691                expected_init_data_offset,
692                init_data_offset
693            ));
694        }
695
696        // init data
697        let after_init_offset = if self.init_data_size > 0 {
698            let mem_flags = self
699                .init_data_permissions
700                .unwrap_or(DEFAULT_GUEST_BLOB_MEM_FLAGS);
701            builder.push_page_aligned(self.init_data_size, mem_flags, InitData)
702        } else {
703            init_data_offset
704        };
705
706        let final_offset = after_init_offset;
707
708        let expected_final_offset = TryInto::<usize>::try_into(self.get_memory_size()?)?;
709
710        if final_offset != expected_final_offset {
711            return Err(new_error!(
712                "Final offset does not match expected Final offset expected:  {}, actual:  {}",
713                expected_final_offset,
714                final_offset
715            ));
716        }
717
718        Ok(builder.build())
719    }
720
721    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
722    pub(crate) fn write_init_data(&self, out: &mut [u8], bytes: &[u8]) -> Result<()> {
723        out[self.init_data_offset..self.init_data_offset + self.init_data_size]
724            .copy_from_slice(bytes);
725        Ok(())
726    }
727
728    /// Write the finished memory layout to `mem` and return `Ok` if
729    /// successful.
730    ///
731    /// Note: `mem` may have been modified, even if `Err` was returned
732    /// from this function.
733    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
734    pub(crate) fn write_peb(&self, mem: &mut [u8]) -> Result<()> {
735        let guest_offset = SandboxMemoryLayout::BASE_ADDRESS;
736
737        fn write_u64(mem: &mut [u8], offset: usize, value: u64) -> Result<()> {
738            if offset + 8 > mem.len() {
739                return Err(new_error!(
740                    "Cannot write to offset {} in slice of len {}",
741                    offset,
742                    mem.len()
743                ));
744            }
745            mem[offset..offset + 8].copy_from_slice(&u64::to_ne_bytes(value));
746            Ok(())
747        }
748
749        macro_rules! get_address {
750            ($something:ident) => {
751                u64::try_from(guest_offset + self.$something)?
752            };
753        }
754
755        // Start of setting up the PEB. The following are in the order of the PEB fields
756
757        // Set up input buffer pointer
758        write_u64(
759            mem,
760            self.get_input_data_size_offset(),
761            self.sandbox_memory_config
762                .get_input_data_size()
763                .try_into()?,
764        )?;
765        write_u64(
766            mem,
767            self.get_input_data_pointer_offset(),
768            self.get_input_data_buffer_gva(),
769        )?;
770
771        // Set up output buffer pointer
772        write_u64(
773            mem,
774            self.get_output_data_size_offset(),
775            self.sandbox_memory_config
776                .get_output_data_size()
777                .try_into()?,
778        )?;
779        write_u64(
780            mem,
781            self.get_output_data_pointer_offset(),
782            self.get_output_data_buffer_gva(),
783        )?;
784
785        // Set up init data pointer
786        write_u64(
787            mem,
788            self.get_init_data_size_offset(),
789            (self.get_unaligned_memory_size() - self.init_data_offset).try_into()?,
790        )?;
791        let addr = get_address!(init_data_offset);
792        write_u64(mem, self.get_init_data_pointer_offset(), addr)?;
793
794        // Set up heap buffer pointer
795        let addr = get_address!(guest_heap_buffer_offset);
796        write_u64(mem, self.get_heap_size_offset(), self.heap_size.try_into()?)?;
797        write_u64(mem, self.get_heap_pointer_offset(), addr)?;
798
799        // Set up the file_mappings descriptor in the PEB.
800        // - The `size` field holds the number of valid FileMappingInfo
801        //   entries currently written (initially 0 — entries are added
802        //   later by map_file_cow / evolve).
803        // - The `ptr` field holds the guest address of the preallocated
804        //   FileMappingInfo array
805        #[cfg(feature = "nanvix-unstable")]
806        write_u64(mem, self.get_file_mappings_size_offset(), 0)?;
807        #[cfg(feature = "nanvix-unstable")]
808        write_u64(
809            mem,
810            self.get_file_mappings_pointer_offset(),
811            self.get_file_mappings_array_gva(),
812        )?;
813
814        // End of setting up the PEB
815
816        // The input and output data regions do not have their layout
817        // initialised here, because they are in the scratch
818        // region---they are instead set in
819        // [`SandboxMemoryManager::update_scratch_bookkeeping`].
820
821        Ok(())
822    }
823
824    /// Determine what region this gpa is in, and its offset into that region
825    pub(crate) fn resolve_gpa(
826        &self,
827        gpa: u64,
828        mmap_regions: &[MemoryRegion],
829    ) -> Option<ResolvedGpa<(), ()>> {
830        let scratch_base = hyperlight_common::layout::scratch_base_gpa(self.scratch_size);
831        if gpa >= scratch_base && gpa < scratch_base + self.scratch_size as u64 {
832            return Some(ResolvedGpa {
833                offset: (gpa - scratch_base) as usize,
834                base: BaseGpaRegion::Scratch(()),
835            });
836        } else if gpa >= SandboxMemoryLayout::BASE_ADDRESS as u64
837            && gpa < SandboxMemoryLayout::BASE_ADDRESS as u64 + self.snapshot_size as u64
838        {
839            return Some(ResolvedGpa {
840                offset: gpa as usize - SandboxMemoryLayout::BASE_ADDRESS,
841                base: BaseGpaRegion::Snapshot(()),
842            });
843        }
844        for rgn in mmap_regions {
845            if gpa >= rgn.guest_region.start as u64 && gpa < rgn.guest_region.end as u64 {
846                return Some(ResolvedGpa {
847                    offset: gpa as usize - rgn.guest_region.start,
848                    base: BaseGpaRegion::Mmap(rgn.clone()),
849                });
850            }
851        }
852        None
853    }
854}
855
856#[cfg(test)]
857mod tests {
858    use hyperlight_common::mem::PAGE_SIZE_USIZE;
859
860    use super::*;
861
862    // helper func for testing
863    fn get_expected_memory_size(layout: &SandboxMemoryLayout) -> usize {
864        let mut expected_size = 0;
865        // in order of layout
866        expected_size += layout.code_size;
867
868        // PEB + preallocated FileMappingInfo array
869        #[cfg(feature = "nanvix-unstable")]
870        let peb_and_array = size_of::<HyperlightPEB>()
871            + hyperlight_common::mem::MAX_FILE_MAPPINGS
872                * size_of::<hyperlight_common::mem::FileMappingInfo>();
873        #[cfg(not(feature = "nanvix-unstable"))]
874        let peb_and_array = size_of::<HyperlightPEB>();
875        expected_size += peb_and_array.next_multiple_of(PAGE_SIZE_USIZE);
876
877        expected_size += layout.heap_size.next_multiple_of(PAGE_SIZE_USIZE);
878
879        expected_size
880    }
881
882    #[test]
883    fn test_get_memory_size() {
884        let sbox_cfg = SandboxConfiguration::default();
885        let sbox_mem_layout = SandboxMemoryLayout::new(sbox_cfg, 4096, 0, None).unwrap();
886        assert_eq!(
887            sbox_mem_layout.get_memory_size().unwrap(),
888            get_expected_memory_size(&sbox_mem_layout)
889        );
890    }
891
892    #[test]
893    fn test_max_memory_sandbox() {
894        let mut cfg = SandboxConfiguration::default();
895        // scratch_size exceeds 16 GiB limit
896        cfg.set_scratch_size(17 * 1024 * 1024 * 1024);
897        cfg.set_input_data_size(16 * 1024 * 1024 * 1024);
898        let layout = SandboxMemoryLayout::new(cfg, 4096, 4096, None);
899        assert!(matches!(layout.unwrap_err(), MemoryRequestTooBig(..)));
900    }
901}