Skip to main content

hyperlight_host/mem/
layout.rs

1/*
2Copyright 2025  The Hyperlight Authors.
3
4Licensed under the Apache License, Version 2.0 (the "License");
5you may not use this file except in compliance with the License.
6You may obtain a copy of the License at
7
8    http://www.apache.org/licenses/LICENSE-2.0
9
10Unless required by applicable law or agreed to in writing, software
11distributed under the License is distributed on an "AS IS" BASIS,
12WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13See the License for the specific language governing permissions and
14limitations under the License.
15 */
16//! This module describes the virtual and physical addresses of a
17//! number of special regions in the hyperlight VM, although we hope
18//! to reduce the number of these over time.
19//!
20//! A snapshot freshly created from an empty VM will result in roughly
21//! the following physical layout:
22//!
23//! +-------------------------------------------+
24//! |             Guest Page Tables             |
25//! +-------------------------------------------+
26//! |              Init Data                    | (GuestBlob size)
27//! +-------------------------------------------+
28//! |             Guest Heap                    |
29//! +-------------------------------------------+
30//! |                PEB Struct                 | (HyperlightPEB size)
31//! +-------------------------------------------+
32//! |               Guest Code                  |
33//! +-------------------------------------------+ 0x1_000
34//! |              NULL guard page              |
35//! +-------------------------------------------+ 0x0_000
36//!
37//! Everything except for the guest page tables is currently
38//! identity-mapped; the guest page tables themselves are mapped at
39//! [`hyperlight_common::layout::SNAPSHOT_PT_GVA`] =
40//! 0xffff_8000_0000_0000.
41//!
42//! - `InitData` - some extra data that can be loaded onto the sandbox during
43//!   initialization.
44//!
45//! - `GuestHeap` - this is a buffer that is used for heap data in the guest. the length
46//!   of this field is returned by the `heap_size()` method of this struct
47//!
48//! There is also a scratch region at the top of physical memory,
49//! which is mostly laid out as a large undifferentiated blob of
50//! memory, although at present the snapshot process specially
51//! privileges the statically allocated input and output data regions:
52//!
53//! +-------------------------------------------+ (top of physical memory)
54//! |         Exception Stack, Metadata         |
55//! +-------------------------------------------+ (1 page below)
56//! |              Scratch Memory               |
57//! +-------------------------------------------+
58//! |                Output Data                |
59//! +-------------------------------------------+
60//! |                Input Data                 |
61//! +-------------------------------------------+ (scratch size)
62
63use std::fmt::Debug;
64use std::mem::{offset_of, size_of};
65
66use hyperlight_common::mem::{HyperlightPEB, PAGE_SIZE_USIZE};
67use tracing::{Span, instrument};
68
69use super::memory_region::MemoryRegionType::{Code, Heap, InitData, Peb};
70use super::memory_region::{
71    DEFAULT_GUEST_BLOB_MEM_FLAGS, MemoryRegion, MemoryRegion_, MemoryRegionFlags, MemoryRegionKind,
72    MemoryRegionVecBuilder,
73};
74#[cfg(any(gdb, feature = "mem_profile"))]
75use super::shared_mem::HostSharedMemory;
76use super::shared_mem::{ExclusiveSharedMemory, ReadonlySharedMemory};
77use crate::error::HyperlightError::{MemoryRequestTooBig, MemoryRequestTooSmall};
78use crate::sandbox::SandboxConfiguration;
79use crate::{Result, new_error};
80
81pub(crate) enum BaseGpaRegion<Sn, Sc> {
82    Snapshot(Sn),
83    Scratch(Sc),
84    Mmap(MemoryRegion),
85}
86
87// It's an invariant of this type, checked on creation, that the
88// offset is in bounds for the base region.
89pub(crate) struct ResolvedGpa<Sn, Sc> {
90    pub(crate) offset: usize,
91    pub(crate) base: BaseGpaRegion<Sn, Sc>,
92}
93
94impl AsRef<[u8]> for ExclusiveSharedMemory {
95    fn as_ref(&self) -> &[u8] {
96        self.as_slice()
97    }
98}
99impl AsRef<[u8]> for ReadonlySharedMemory {
100    fn as_ref(&self) -> &[u8] {
101        self.as_slice()
102    }
103}
104
105impl<Sn, Sc> ResolvedGpa<Sn, Sc> {
106    pub(crate) fn with_memories<Sn2, Sc2>(self, sn: Sn2, sc: Sc2) -> ResolvedGpa<Sn2, Sc2> {
107        ResolvedGpa {
108            offset: self.offset,
109            base: match self.base {
110                BaseGpaRegion::Snapshot(_) => BaseGpaRegion::Snapshot(sn),
111                BaseGpaRegion::Scratch(_) => BaseGpaRegion::Scratch(sc),
112                BaseGpaRegion::Mmap(r) => BaseGpaRegion::Mmap(r),
113            },
114        }
115    }
116}
117impl<'a> BaseGpaRegion<&'a [u8], &'a [u8]> {
118    pub(crate) fn as_ref<'b>(&'b self) -> &'a [u8] {
119        match self {
120            BaseGpaRegion::Snapshot(sn) => sn,
121            BaseGpaRegion::Scratch(sc) => sc,
122            BaseGpaRegion::Mmap(r) => unsafe {
123                #[allow(clippy::useless_conversion)]
124                let host_region_base: usize = r.host_region.start.into();
125                #[allow(clippy::useless_conversion)]
126                let host_region_end: usize = r.host_region.end.into();
127                let len = host_region_end - host_region_base;
128                std::slice::from_raw_parts(host_region_base as *const u8, len)
129            },
130        }
131    }
132}
133impl<'a> ResolvedGpa<&'a [u8], &'a [u8]> {
134    pub(crate) fn as_ref<'b>(&'b self) -> &'a [u8] {
135        let base = self.base.as_ref();
136        if self.offset > base.len() {
137            return &[];
138        }
139        &self.base.as_ref()[self.offset..]
140    }
141}
142#[cfg(any(gdb, feature = "mem_profile"))]
143#[allow(unused)] // may be unused when i686-guest is also enabled
144pub(crate) trait ReadableSharedMemory {
145    fn copy_to_slice(&self, slice: &mut [u8], offset: usize) -> Result<()>;
146}
147#[cfg(any(gdb, feature = "mem_profile"))]
148impl ReadableSharedMemory for &HostSharedMemory {
149    fn copy_to_slice(&self, slice: &mut [u8], offset: usize) -> Result<()> {
150        HostSharedMemory::copy_to_slice(self, slice, offset)
151    }
152}
153#[cfg(any(gdb, feature = "mem_profile"))]
154mod coherence_hack {
155    use super::{ExclusiveSharedMemory, ReadonlySharedMemory};
156    #[allow(unused)] // it actually is; see the impl below
157    pub(super) trait SharedMemoryAsRefMarker: AsRef<[u8]> {}
158    impl SharedMemoryAsRefMarker for ExclusiveSharedMemory {}
159    impl SharedMemoryAsRefMarker for &ExclusiveSharedMemory {}
160    impl SharedMemoryAsRefMarker for ReadonlySharedMemory {}
161    impl SharedMemoryAsRefMarker for &ReadonlySharedMemory {}
162}
163#[cfg(any(gdb, feature = "mem_profile"))]
164impl<T: coherence_hack::SharedMemoryAsRefMarker> ReadableSharedMemory for T {
165    fn copy_to_slice(&self, slice: &mut [u8], offset: usize) -> Result<()> {
166        let ss: &[u8] = self.as_ref();
167        let end = offset + slice.len();
168        if end > ss.len() {
169            return Err(new_error!(
170                "Attempt to read up to {} in memory of size {}",
171                offset + slice.len(),
172                self.as_ref().len()
173            ));
174        }
175        slice.copy_from_slice(&ss[offset..end]);
176        Ok(())
177    }
178}
179#[cfg(any(gdb, feature = "mem_profile"))]
180impl<Sn: ReadableSharedMemory, Sc: ReadableSharedMemory> ResolvedGpa<Sn, Sc> {
181    #[allow(unused)] // may be unused when i686-guest is also enabled
182    pub(crate) fn copy_to_slice(&self, slice: &mut [u8]) -> Result<()> {
183        match &self.base {
184            BaseGpaRegion::Snapshot(sn) => sn.copy_to_slice(slice, self.offset),
185            BaseGpaRegion::Scratch(sc) => sc.copy_to_slice(slice, self.offset),
186            BaseGpaRegion::Mmap(r) => unsafe {
187                #[allow(clippy::useless_conversion)]
188                let host_region_base: usize = r.host_region.start.into();
189                #[allow(clippy::useless_conversion)]
190                let host_region_end: usize = r.host_region.end.into();
191                let len = host_region_end - host_region_base;
192                // Safety: it's a documented invariant of MemoryRegion
193                // that the memory must remain alive as long as the
194                // sandbox is alive, and the way this code is used,
195                // the lifetimes of the snapshot and scratch memories
196                // ensure that the sandbox is still alive. This could
197                // perhaps be cleaned up/improved/made harder to
198                // misuse significantly, but it would require a much
199                // larger rework.
200                let ss = std::slice::from_raw_parts(host_region_base as *const u8, len);
201                let end = self.offset + slice.len();
202                if end > ss.len() {
203                    return Err(new_error!(
204                        "Attempt to read up to {} in memory of size {}",
205                        self.offset + slice.len(),
206                        ss.len()
207                    ));
208                }
209                slice.copy_from_slice(&ss[self.offset..end]);
210                Ok(())
211            },
212        }
213    }
214}
215
216#[derive(Copy, Clone)]
217pub(crate) struct SandboxMemoryLayout {
218    pub(super) sandbox_memory_config: SandboxConfiguration,
219    /// The heap size of this sandbox.
220    pub(super) heap_size: usize,
221    init_data_size: usize,
222
223    /// The following fields are offsets to the actual PEB struct fields.
224    /// They are used when writing the PEB struct itself
225    peb_offset: usize,
226    peb_input_data_offset: usize,
227    peb_output_data_offset: usize,
228    peb_init_data_offset: usize,
229    peb_heap_data_offset: usize,
230    #[cfg(feature = "nanvix-unstable")]
231    peb_file_mappings_offset: usize,
232
233    guest_heap_buffer_offset: usize,
234    init_data_offset: usize,
235    pt_size: Option<usize>,
236
237    // other
238    pub(crate) peb_address: usize,
239    code_size: usize,
240    // The offset in the sandbox memory where the code starts
241    guest_code_offset: usize,
242    #[cfg_attr(feature = "i686-guest", allow(unused))]
243    pub(crate) init_data_permissions: Option<MemoryRegionFlags>,
244
245    // The size of the scratch region in physical memory; note that
246    // this will appear under the top of physical memory.
247    scratch_size: usize,
248    // The guest-visible size of the snapshot region in physical
249    // memory. After compaction this may be smaller than the full
250    // snapshot blob (which also contains a PT tail that is only
251    // host-accessible).
252    snapshot_size: usize,
253}
254
255impl Debug for SandboxMemoryLayout {
256    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
257        let mut ff = f.debug_struct("SandboxMemoryLayout");
258        ff.field(
259            "Total Memory Size",
260            &format_args!("{:#x}", self.get_memory_size().unwrap_or(0)),
261        )
262        .field("Heap Size", &format_args!("{:#x}", self.heap_size))
263        .field(
264            "Init Data Size",
265            &format_args!("{:#x}", self.init_data_size),
266        )
267        .field("PEB Address", &format_args!("{:#x}", self.peb_address))
268        .field("PEB Offset", &format_args!("{:#x}", self.peb_offset))
269        .field("Code Size", &format_args!("{:#x}", self.code_size))
270        .field(
271            "Input Data Offset",
272            &format_args!("{:#x}", self.peb_input_data_offset),
273        )
274        .field(
275            "Output Data Offset",
276            &format_args!("{:#x}", self.peb_output_data_offset),
277        )
278        .field(
279            "Init Data Offset",
280            &format_args!("{:#x}", self.peb_init_data_offset),
281        )
282        .field(
283            "Guest Heap Offset",
284            &format_args!("{:#x}", self.peb_heap_data_offset),
285        );
286        #[cfg(feature = "nanvix-unstable")]
287        ff.field(
288            "File Mappings Offset",
289            &format_args!("{:#x}", self.peb_file_mappings_offset),
290        );
291        ff.field(
292            "Guest Heap Buffer Offset",
293            &format_args!("{:#x}", self.guest_heap_buffer_offset),
294        )
295        .field(
296            "Init Data Offset",
297            &format_args!("{:#x}", self.init_data_offset),
298        )
299        .field("PT Size", &format_args!("{:#x}", self.pt_size.unwrap_or(0)))
300        .field(
301            "Guest Code Offset",
302            &format_args!("{:#x}", self.guest_code_offset),
303        )
304        .field(
305            "Scratch region size",
306            &format_args!("{:#x}", self.scratch_size),
307        )
308        .finish()
309    }
310}
311
312impl SandboxMemoryLayout {
313    /// The maximum amount of memory a single sandbox will be allowed.
314    ///
315    /// Both the scratch region and the snapshot region are bounded by
316    /// this size. The value is arbitrary but chosen to be large enough
317    /// for most workloads while preventing accidental resource exhaustion.
318    const MAX_MEMORY_SIZE: usize = (16 * 1024 * 1024 * 1024) - Self::BASE_ADDRESS; // 16 GiB - BASE_ADDRESS
319
320    /// The base address of the sandbox's memory.
321    pub(crate) const BASE_ADDRESS: usize = 0x1000;
322
323    // the offset into a sandbox's input/output buffer where the stack starts
324    pub(crate) const STACK_POINTER_SIZE_BYTES: u64 = 8;
325
326    /// Create a new `SandboxMemoryLayout` with the given
327    /// `SandboxConfiguration`, code size and stack/heap size.
328    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
329    pub(crate) fn new(
330        cfg: SandboxConfiguration,
331        code_size: usize,
332        init_data_size: usize,
333        init_data_permissions: Option<MemoryRegionFlags>,
334    ) -> Result<Self> {
335        let heap_size = usize::try_from(cfg.get_heap_size())?;
336        let scratch_size = cfg.get_scratch_size();
337        if scratch_size > Self::MAX_MEMORY_SIZE {
338            return Err(MemoryRequestTooBig(scratch_size, Self::MAX_MEMORY_SIZE));
339        }
340        let min_scratch_size = hyperlight_common::layout::min_scratch_size(
341            cfg.get_input_data_size(),
342            cfg.get_output_data_size(),
343        );
344        if scratch_size < min_scratch_size {
345            return Err(MemoryRequestTooSmall(scratch_size, min_scratch_size));
346        }
347
348        let guest_code_offset = 0;
349        // The following offsets are to the fields of the PEB struct itself!
350        let peb_offset = code_size.next_multiple_of(PAGE_SIZE_USIZE);
351        let peb_input_data_offset = peb_offset + offset_of!(HyperlightPEB, input_stack);
352        let peb_output_data_offset = peb_offset + offset_of!(HyperlightPEB, output_stack);
353        let peb_init_data_offset = peb_offset + offset_of!(HyperlightPEB, init_data);
354        let peb_heap_data_offset = peb_offset + offset_of!(HyperlightPEB, guest_heap);
355        #[cfg(feature = "nanvix-unstable")]
356        let peb_file_mappings_offset = peb_offset + offset_of!(HyperlightPEB, file_mappings);
357
358        // The following offsets are the actual values that relate to memory layout,
359        // which are written to PEB struct
360        let peb_address = Self::BASE_ADDRESS + peb_offset;
361        // make sure heap buffer starts at 4K boundary.
362        // The FileMappingInfo array is stored immediately after the PEB struct.
363        // We statically reserve space for MAX_FILE_MAPPINGS entries so that
364        // the heap never overlaps the array, even when all slots are used.
365        // The host writes file mapping metadata here via write_file_mapping_entry;
366        // the guest only reads the entries. We don't know at layout time how
367        // many file mappings the host will register, so we reserve space for
368        // the maximum number.
369        // The heap starts at the next page boundary after this reserved area.
370        #[cfg(feature = "nanvix-unstable")]
371        let file_mappings_array_end = peb_offset
372            + size_of::<HyperlightPEB>()
373            + hyperlight_common::mem::MAX_FILE_MAPPINGS
374                * size_of::<hyperlight_common::mem::FileMappingInfo>();
375        #[cfg(feature = "nanvix-unstable")]
376        let guest_heap_buffer_offset = file_mappings_array_end.next_multiple_of(PAGE_SIZE_USIZE);
377        #[cfg(not(feature = "nanvix-unstable"))]
378        let guest_heap_buffer_offset =
379            (peb_offset + size_of::<HyperlightPEB>()).next_multiple_of(PAGE_SIZE_USIZE);
380
381        // make sure init data starts at 4K boundary
382        let init_data_offset =
383            (guest_heap_buffer_offset + heap_size).next_multiple_of(PAGE_SIZE_USIZE);
384        let mut ret = Self {
385            peb_offset,
386            heap_size,
387            peb_input_data_offset,
388            peb_output_data_offset,
389            peb_init_data_offset,
390            peb_heap_data_offset,
391            #[cfg(feature = "nanvix-unstable")]
392            peb_file_mappings_offset,
393            sandbox_memory_config: cfg,
394            code_size,
395            guest_heap_buffer_offset,
396            peb_address,
397            guest_code_offset,
398            init_data_offset,
399            init_data_size,
400            init_data_permissions,
401            pt_size: None,
402            scratch_size,
403            snapshot_size: 0,
404        };
405        ret.set_snapshot_size(ret.get_memory_size()?);
406        Ok(ret)
407    }
408
409    /// Get the offset in guest memory to the output data size
410    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
411    pub(super) fn get_output_data_size_offset(&self) -> usize {
412        // The size field is the first field in the `OutputData` struct
413        self.peb_output_data_offset
414    }
415
416    /// Get the offset in guest memory to the init data size
417    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
418    pub(super) fn get_init_data_size_offset(&self) -> usize {
419        // The init data size is the first field in the `GuestMemoryRegion` struct
420        self.peb_init_data_offset
421    }
422
423    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
424    pub(crate) fn get_scratch_size(&self) -> usize {
425        self.scratch_size
426    }
427
428    /// Get the offset in guest memory to the output data pointer.
429    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
430    fn get_output_data_pointer_offset(&self) -> usize {
431        // This field is immediately after the output data size field,
432        // which is a `u64`.
433        self.get_output_data_size_offset() + size_of::<u64>()
434    }
435
436    /// Get the offset in guest memory to the init data pointer.
437    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
438    pub(super) fn get_init_data_pointer_offset(&self) -> usize {
439        // The init data pointer is immediately after the init data size field,
440        // which is a `u64`.
441        self.get_init_data_size_offset() + size_of::<u64>()
442    }
443
444    /// Get the guest virtual address of the start of output data.
445    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
446    pub(crate) fn get_output_data_buffer_gva(&self) -> u64 {
447        hyperlight_common::layout::scratch_base_gva(self.scratch_size)
448            + self.sandbox_memory_config.get_input_data_size() as u64
449    }
450
451    /// Get the offset into the host scratch buffer of the start of
452    /// the output data.
453    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
454    pub(crate) fn get_output_data_buffer_scratch_host_offset(&self) -> usize {
455        self.sandbox_memory_config.get_input_data_size()
456    }
457
458    /// Get the offset in guest memory to the input data size.
459    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
460    pub(super) fn get_input_data_size_offset(&self) -> usize {
461        // The input data size is the first field in the input stack's `GuestMemoryRegion` struct
462        self.peb_input_data_offset
463    }
464
465    /// Get the offset in guest memory to the input data pointer.
466    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
467    fn get_input_data_pointer_offset(&self) -> usize {
468        // The input data pointer is immediately after the input
469        // data size field in the input data `GuestMemoryRegion` struct which is a `u64`.
470        self.get_input_data_size_offset() + size_of::<u64>()
471    }
472
473    /// Get the guest virtual address of the start of input data
474    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
475    fn get_input_data_buffer_gva(&self) -> u64 {
476        hyperlight_common::layout::scratch_base_gva(self.scratch_size)
477    }
478
479    /// Get the offset into the host scratch buffer of the start of
480    /// the input data
481    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
482    pub(crate) fn get_input_data_buffer_scratch_host_offset(&self) -> usize {
483        0
484    }
485
486    /// Get the offset from the beginning of the scratch region to the
487    /// location where page tables will be eagerly copied on restore
488    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
489    pub(crate) fn get_pt_base_scratch_offset(&self) -> usize {
490        (self.sandbox_memory_config.get_input_data_size()
491            + self.sandbox_memory_config.get_output_data_size())
492        .next_multiple_of(hyperlight_common::vmem::PAGE_SIZE)
493    }
494
495    /// Get the base GPA to which the page tables will be eagerly
496    /// copied on restore
497    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
498    pub(crate) fn get_pt_base_gpa(&self) -> u64 {
499        hyperlight_common::layout::scratch_base_gpa(self.scratch_size)
500            + self.get_pt_base_scratch_offset() as u64
501    }
502
503    /// Get the first GPA of the scratch region that the host hasn't
504    /// used for something else
505    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
506    pub(crate) fn get_first_free_scratch_gpa(&self) -> u64 {
507        self.get_pt_base_gpa() + self.pt_size.unwrap_or(0) as u64
508    }
509
510    /// Get the offset in guest memory to the heap size
511    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
512    fn get_heap_size_offset(&self) -> usize {
513        self.peb_heap_data_offset
514    }
515
516    /// Get the offset in guest memory to the file_mappings count field
517    /// (the `size` field of the `GuestMemoryRegion` in the PEB).
518    #[cfg(feature = "nanvix-unstable")]
519    pub(crate) fn get_file_mappings_size_offset(&self) -> usize {
520        self.peb_file_mappings_offset
521    }
522
523    /// Get the offset in guest memory to the file_mappings pointer field.
524    #[cfg(feature = "nanvix-unstable")]
525    fn get_file_mappings_pointer_offset(&self) -> usize {
526        self.get_file_mappings_size_offset() + size_of::<u64>()
527    }
528
529    /// Get the offset in snapshot memory where the FileMappingInfo array starts
530    /// (immediately after the PEB struct, within the same page).
531    #[cfg(feature = "nanvix-unstable")]
532    pub(crate) fn get_file_mappings_array_offset(&self) -> usize {
533        self.peb_offset + size_of::<HyperlightPEB>()
534    }
535
536    /// Get the guest address of the FileMappingInfo array.
537    #[cfg(feature = "nanvix-unstable")]
538    fn get_file_mappings_array_gva(&self) -> u64 {
539        (Self::BASE_ADDRESS + self.get_file_mappings_array_offset()) as u64
540    }
541
542    /// Get the offset of the heap pointer in guest memory,
543    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
544    fn get_heap_pointer_offset(&self) -> usize {
545        // The heap pointer is immediately after the
546        // heap size field in the guest heap's `GuestMemoryRegion` struct which is a `u64`.
547        self.get_heap_size_offset() + size_of::<u64>()
548    }
549
550    /// Get the total size of guest memory in `self`'s memory
551    /// layout.
552    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
553    fn get_unaligned_memory_size(&self) -> usize {
554        self.init_data_offset + self.init_data_size
555    }
556
557    /// get the code offset
558    /// This is the offset in the sandbox memory where the code starts
559    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
560    pub(crate) fn get_guest_code_offset(&self) -> usize {
561        self.guest_code_offset
562    }
563
564    /// Get the guest address of the code section in the sandbox
565    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
566    pub(crate) fn get_guest_code_address(&self) -> usize {
567        Self::BASE_ADDRESS + self.guest_code_offset
568    }
569
570    /// Get the total size of guest memory in `self`'s memory
571    /// layout aligned to page size boundaries.
572    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
573    pub(crate) fn get_memory_size(&self) -> Result<usize> {
574        let total_memory = self.get_unaligned_memory_size();
575
576        // Size should be a multiple of page size.
577        let remainder = total_memory % PAGE_SIZE_USIZE;
578        let multiples = total_memory / PAGE_SIZE_USIZE;
579        let size = match remainder {
580            0 => total_memory,
581            _ => (multiples + 1) * PAGE_SIZE_USIZE,
582        };
583
584        if size > Self::MAX_MEMORY_SIZE {
585            Err(MemoryRequestTooBig(size, Self::MAX_MEMORY_SIZE))
586        } else {
587            Ok(size)
588        }
589    }
590
591    /// Sets the size of the memory region used for page tables
592    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
593    pub(crate) fn set_pt_size(&mut self, size: usize) -> Result<()> {
594        let min_fixed_scratch = hyperlight_common::layout::min_scratch_size(
595            self.sandbox_memory_config.get_input_data_size(),
596            self.sandbox_memory_config.get_output_data_size(),
597        );
598        let min_scratch = min_fixed_scratch + size;
599        if self.scratch_size < min_scratch {
600            return Err(MemoryRequestTooSmall(self.scratch_size, min_scratch));
601        }
602        let old_pt_size = self.pt_size.unwrap_or(0);
603        self.snapshot_size = self.snapshot_size - old_pt_size + size;
604        self.pt_size = Some(size);
605        Ok(())
606    }
607
608    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
609    pub(crate) fn set_snapshot_size(&mut self, new_size: usize) {
610        self.snapshot_size = new_size;
611    }
612
613    /// Get the size of the memory region used for page tables
614    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
615    pub(crate) fn get_pt_size(&self) -> usize {
616        self.pt_size.unwrap_or(0)
617    }
618
619    /// Returns the memory regions associated with this memory layout,
620    /// suitable for passing to a hypervisor for mapping into memory
621    #[cfg_attr(feature = "i686-guest", allow(unused))]
622    pub(crate) fn get_memory_regions_<K: MemoryRegionKind>(
623        &self,
624        host_base: K::HostBaseType,
625    ) -> Result<Vec<MemoryRegion_<K>>> {
626        let mut builder = MemoryRegionVecBuilder::new(Self::BASE_ADDRESS, host_base);
627
628        // code
629        let peb_offset = builder.push_page_aligned(
630            self.code_size,
631            MemoryRegionFlags::READ | MemoryRegionFlags::WRITE | MemoryRegionFlags::EXECUTE,
632            Code,
633        );
634
635        let expected_peb_offset = TryInto::<usize>::try_into(self.peb_offset)?;
636
637        if peb_offset != expected_peb_offset {
638            return Err(new_error!(
639                "PEB offset does not match expected PEB offset expected:  {}, actual:  {}",
640                expected_peb_offset,
641                peb_offset
642            ));
643        }
644
645        // PEB + preallocated FileMappingInfo array
646        #[cfg(feature = "nanvix-unstable")]
647        let heap_offset = {
648            let peb_and_array_size = size_of::<HyperlightPEB>()
649                + hyperlight_common::mem::MAX_FILE_MAPPINGS
650                    * size_of::<hyperlight_common::mem::FileMappingInfo>();
651            builder.push_page_aligned(
652                peb_and_array_size,
653                MemoryRegionFlags::READ | MemoryRegionFlags::WRITE,
654                Peb,
655            )
656        };
657        #[cfg(not(feature = "nanvix-unstable"))]
658        let heap_offset =
659            builder.push_page_aligned(size_of::<HyperlightPEB>(), MemoryRegionFlags::READ, Peb);
660
661        let expected_heap_offset = TryInto::<usize>::try_into(self.guest_heap_buffer_offset)?;
662
663        if heap_offset != expected_heap_offset {
664            return Err(new_error!(
665                "Guest Heap offset does not match expected Guest Heap offset expected:  {}, actual:  {}",
666                expected_heap_offset,
667                heap_offset
668            ));
669        }
670
671        // heap
672        #[cfg(feature = "executable_heap")]
673        let init_data_offset = builder.push_page_aligned(
674            self.heap_size,
675            MemoryRegionFlags::READ | MemoryRegionFlags::WRITE | MemoryRegionFlags::EXECUTE,
676            Heap,
677        );
678        #[cfg(not(feature = "executable_heap"))]
679        let init_data_offset = builder.push_page_aligned(
680            self.heap_size,
681            MemoryRegionFlags::READ | MemoryRegionFlags::WRITE,
682            Heap,
683        );
684
685        let expected_init_data_offset = TryInto::<usize>::try_into(self.init_data_offset)?;
686
687        if init_data_offset != expected_init_data_offset {
688            return Err(new_error!(
689                "Init Data offset does not match expected Init Data offset expected:  {}, actual:  {}",
690                expected_init_data_offset,
691                init_data_offset
692            ));
693        }
694
695        // init data
696        let after_init_offset = if self.init_data_size > 0 {
697            let mem_flags = self
698                .init_data_permissions
699                .unwrap_or(DEFAULT_GUEST_BLOB_MEM_FLAGS);
700            builder.push_page_aligned(self.init_data_size, mem_flags, InitData)
701        } else {
702            init_data_offset
703        };
704
705        let final_offset = after_init_offset;
706
707        let expected_final_offset = TryInto::<usize>::try_into(self.get_memory_size()?)?;
708
709        if final_offset != expected_final_offset {
710            return Err(new_error!(
711                "Final offset does not match expected Final offset expected:  {}, actual:  {}",
712                expected_final_offset,
713                final_offset
714            ));
715        }
716
717        Ok(builder.build())
718    }
719
720    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
721    pub(crate) fn write_init_data(&self, out: &mut [u8], bytes: &[u8]) -> Result<()> {
722        out[self.init_data_offset..self.init_data_offset + self.init_data_size]
723            .copy_from_slice(bytes);
724        Ok(())
725    }
726
727    /// Write the finished memory layout to `mem` and return `Ok` if
728    /// successful.
729    ///
730    /// Note: `mem` may have been modified, even if `Err` was returned
731    /// from this function.
732    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
733    pub(crate) fn write_peb(&self, mem: &mut [u8]) -> Result<()> {
734        let guest_offset = SandboxMemoryLayout::BASE_ADDRESS;
735
736        fn write_u64(mem: &mut [u8], offset: usize, value: u64) -> Result<()> {
737            if offset + 8 > mem.len() {
738                return Err(new_error!(
739                    "Cannot write to offset {} in slice of len {}",
740                    offset,
741                    mem.len()
742                ));
743            }
744            mem[offset..offset + 8].copy_from_slice(&u64::to_ne_bytes(value));
745            Ok(())
746        }
747
748        macro_rules! get_address {
749            ($something:ident) => {
750                u64::try_from(guest_offset + self.$something)?
751            };
752        }
753
754        // Start of setting up the PEB. The following are in the order of the PEB fields
755
756        // Set up input buffer pointer
757        write_u64(
758            mem,
759            self.get_input_data_size_offset(),
760            self.sandbox_memory_config
761                .get_input_data_size()
762                .try_into()?,
763        )?;
764        write_u64(
765            mem,
766            self.get_input_data_pointer_offset(),
767            self.get_input_data_buffer_gva(),
768        )?;
769
770        // Set up output buffer pointer
771        write_u64(
772            mem,
773            self.get_output_data_size_offset(),
774            self.sandbox_memory_config
775                .get_output_data_size()
776                .try_into()?,
777        )?;
778        write_u64(
779            mem,
780            self.get_output_data_pointer_offset(),
781            self.get_output_data_buffer_gva(),
782        )?;
783
784        // Set up init data pointer
785        write_u64(
786            mem,
787            self.get_init_data_size_offset(),
788            (self.get_unaligned_memory_size() - self.init_data_offset).try_into()?,
789        )?;
790        let addr = get_address!(init_data_offset);
791        write_u64(mem, self.get_init_data_pointer_offset(), addr)?;
792
793        // Set up heap buffer pointer
794        let addr = get_address!(guest_heap_buffer_offset);
795        write_u64(mem, self.get_heap_size_offset(), self.heap_size.try_into()?)?;
796        write_u64(mem, self.get_heap_pointer_offset(), addr)?;
797
798        // Set up the file_mappings descriptor in the PEB.
799        // - The `size` field holds the number of valid FileMappingInfo
800        //   entries currently written (initially 0 — entries are added
801        //   later by map_file_cow / evolve).
802        // - The `ptr` field holds the guest address of the preallocated
803        //   FileMappingInfo array
804        #[cfg(feature = "nanvix-unstable")]
805        write_u64(mem, self.get_file_mappings_size_offset(), 0)?;
806        #[cfg(feature = "nanvix-unstable")]
807        write_u64(
808            mem,
809            self.get_file_mappings_pointer_offset(),
810            self.get_file_mappings_array_gva(),
811        )?;
812
813        // End of setting up the PEB
814
815        // The input and output data regions do not have their layout
816        // initialised here, because they are in the scratch
817        // region---they are instead set in
818        // [`SandboxMemoryManager::update_scratch_bookkeeping`].
819
820        Ok(())
821    }
822
823    /// Determine what region this gpa is in, and its offset into that region
824    pub(crate) fn resolve_gpa(
825        &self,
826        gpa: u64,
827        mmap_regions: &[MemoryRegion],
828    ) -> Option<ResolvedGpa<(), ()>> {
829        let scratch_base = hyperlight_common::layout::scratch_base_gpa(self.scratch_size);
830        if gpa >= scratch_base && gpa < scratch_base + self.scratch_size as u64 {
831            return Some(ResolvedGpa {
832                offset: (gpa - scratch_base) as usize,
833                base: BaseGpaRegion::Scratch(()),
834            });
835        } else if gpa >= SandboxMemoryLayout::BASE_ADDRESS as u64
836            && gpa < SandboxMemoryLayout::BASE_ADDRESS as u64 + self.snapshot_size as u64
837        {
838            return Some(ResolvedGpa {
839                offset: gpa as usize - SandboxMemoryLayout::BASE_ADDRESS,
840                base: BaseGpaRegion::Snapshot(()),
841            });
842        }
843        for rgn in mmap_regions {
844            if gpa >= rgn.guest_region.start as u64 && gpa < rgn.guest_region.end as u64 {
845                return Some(ResolvedGpa {
846                    offset: gpa as usize - rgn.guest_region.start,
847                    base: BaseGpaRegion::Mmap(rgn.clone()),
848                });
849            }
850        }
851        None
852    }
853}
854
855#[cfg(test)]
856mod tests {
857    use hyperlight_common::mem::PAGE_SIZE_USIZE;
858
859    use super::*;
860
861    // helper func for testing
862    fn get_expected_memory_size(layout: &SandboxMemoryLayout) -> usize {
863        let mut expected_size = 0;
864        // in order of layout
865        expected_size += layout.code_size;
866
867        // PEB + preallocated FileMappingInfo array
868        #[cfg(feature = "nanvix-unstable")]
869        let peb_and_array = size_of::<HyperlightPEB>()
870            + hyperlight_common::mem::MAX_FILE_MAPPINGS
871                * size_of::<hyperlight_common::mem::FileMappingInfo>();
872        #[cfg(not(feature = "nanvix-unstable"))]
873        let peb_and_array = size_of::<HyperlightPEB>();
874        expected_size += peb_and_array.next_multiple_of(PAGE_SIZE_USIZE);
875
876        expected_size += layout.heap_size.next_multiple_of(PAGE_SIZE_USIZE);
877
878        expected_size
879    }
880
881    #[test]
882    fn test_get_memory_size() {
883        let sbox_cfg = SandboxConfiguration::default();
884        let sbox_mem_layout = SandboxMemoryLayout::new(sbox_cfg, 4096, 0, None).unwrap();
885        assert_eq!(
886            sbox_mem_layout.get_memory_size().unwrap(),
887            get_expected_memory_size(&sbox_mem_layout)
888        );
889    }
890
891    #[test]
892    fn test_max_memory_sandbox() {
893        let mut cfg = SandboxConfiguration::default();
894        // scratch_size exceeds 16 GiB limit
895        cfg.set_scratch_size(17 * 1024 * 1024 * 1024);
896        cfg.set_input_data_size(16 * 1024 * 1024 * 1024);
897        let layout = SandboxMemoryLayout::new(cfg, 4096, 4096, None);
898        assert!(matches!(layout.unwrap_err(), MemoryRequestTooBig(..)));
899    }
900}