hyperlight_host/mem/
memory_region.rs

1/*
2Copyright 2025  The Hyperlight Authors.
3
4Licensed under the Apache License, Version 2.0 (the "License");
5you may not use this file except in compliance with the License.
6You may obtain a copy of the License at
7
8    http://www.apache.org/licenses/LICENSE-2.0
9
10Unless required by applicable law or agreed to in writing, software
11distributed under the License is distributed on an "AS IS" BASIS,
12WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13See the License for the specific language governing permissions and
14limitations under the License.
15*/
16
17#[cfg(mshv2)]
18extern crate mshv_bindings2 as mshv_bindings;
19#[cfg(mshv2)]
20extern crate mshv_ioctls2 as mshv_ioctls;
21
22#[cfg(mshv3)]
23extern crate mshv_bindings3 as mshv_bindings;
24#[cfg(mshv3)]
25extern crate mshv_ioctls3 as mshv_ioctls;
26
27use std::ops::Range;
28
29use bitflags::bitflags;
30#[cfg(mshv)]
31use hyperlight_common::mem::PAGE_SHIFT;
32use hyperlight_common::mem::PAGE_SIZE_USIZE;
33#[cfg(kvm)]
34use kvm_bindings::{KVM_MEM_READONLY, kvm_userspace_memory_region};
35#[cfg(mshv2)]
36use mshv_bindings::{
37    HV_MAP_GPA_EXECUTABLE, HV_MAP_GPA_PERMISSIONS_NONE, HV_MAP_GPA_READABLE, HV_MAP_GPA_WRITABLE,
38};
39#[cfg(mshv3)]
40use mshv_bindings::{
41    MSHV_SET_MEM_BIT_EXECUTABLE, MSHV_SET_MEM_BIT_UNMAP, MSHV_SET_MEM_BIT_WRITABLE,
42};
43#[cfg(mshv)]
44use mshv_bindings::{hv_x64_memory_intercept_message, mshv_user_mem_region};
45#[cfg(target_os = "windows")]
46use windows::Win32::System::Hypervisor::{self, WHV_MEMORY_ACCESS_TYPE};
47
48#[cfg(feature = "init-paging")]
49use super::mgr::{PAGE_NX, PAGE_PRESENT, PAGE_RW, PAGE_USER};
50
51pub(crate) const DEFAULT_GUEST_BLOB_MEM_FLAGS: MemoryRegionFlags = MemoryRegionFlags::READ;
52
53bitflags! {
54    /// flags representing memory permission for a memory region
55    #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
56    pub struct MemoryRegionFlags: u32 {
57        /// no permissions
58        const NONE = 0;
59        /// allow guest to read
60        const READ = 1;
61        /// allow guest to write
62        const WRITE = 2;
63        /// allow guest to execute
64        const EXECUTE = 4;
65        /// identifier that this is a stack guard page
66        const STACK_GUARD = 8;
67    }
68}
69
70impl MemoryRegionFlags {
71    #[cfg(feature = "init-paging")]
72    pub(crate) fn translate_flags(&self) -> u64 {
73        let mut page_flags = 0;
74
75        page_flags |= PAGE_PRESENT; // Mark page as present
76
77        if self.contains(MemoryRegionFlags::WRITE) {
78            page_flags |= PAGE_RW; // Allow read/write
79        }
80
81        if self.contains(MemoryRegionFlags::STACK_GUARD) {
82            page_flags |= PAGE_RW; // The guard page is marked RW so that if it gets written to we can detect it in the host
83        }
84
85        if self.contains(MemoryRegionFlags::EXECUTE) {
86            page_flags |= PAGE_USER; // Allow user access
87        } else {
88            page_flags |= PAGE_NX; // Mark as non-executable if EXECUTE is not set
89        }
90
91        page_flags
92    }
93}
94
95impl std::fmt::Display for MemoryRegionFlags {
96    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
97        if self.is_empty() {
98            write!(f, "NONE")
99        } else {
100            let mut first = true;
101            if self.contains(MemoryRegionFlags::READ) {
102                write!(f, "READ")?;
103                first = false;
104            }
105            if self.contains(MemoryRegionFlags::WRITE) {
106                if !first {
107                    write!(f, " | ")?;
108                }
109                write!(f, "WRITE")?;
110                first = false;
111            }
112            if self.contains(MemoryRegionFlags::EXECUTE) {
113                if !first {
114                    write!(f, " | ")?;
115                }
116                write!(f, "EXECUTE")?;
117            }
118            Ok(())
119        }
120    }
121}
122
123#[cfg(target_os = "windows")]
124impl TryFrom<WHV_MEMORY_ACCESS_TYPE> for MemoryRegionFlags {
125    type Error = crate::HyperlightError;
126
127    fn try_from(flags: WHV_MEMORY_ACCESS_TYPE) -> crate::Result<Self> {
128        match flags {
129            Hypervisor::WHvMemoryAccessRead => Ok(MemoryRegionFlags::READ),
130            Hypervisor::WHvMemoryAccessWrite => Ok(MemoryRegionFlags::WRITE),
131            Hypervisor::WHvMemoryAccessExecute => Ok(MemoryRegionFlags::EXECUTE),
132            _ => Err(crate::HyperlightError::Error(
133                "unknown memory access type".to_string(),
134            )),
135        }
136    }
137}
138
139#[cfg(mshv)]
140impl TryFrom<hv_x64_memory_intercept_message> for MemoryRegionFlags {
141    type Error = crate::HyperlightError;
142
143    fn try_from(msg: hv_x64_memory_intercept_message) -> crate::Result<Self> {
144        let access_type = msg.header.intercept_access_type;
145        match access_type {
146            0 => Ok(MemoryRegionFlags::READ),
147            1 => Ok(MemoryRegionFlags::WRITE),
148            2 => Ok(MemoryRegionFlags::EXECUTE),
149            _ => Err(crate::HyperlightError::Error(
150                "unknown memory access type".to_string(),
151            )),
152        }
153    }
154}
155
156// only used for debugging
157#[derive(Debug, PartialEq, Eq, Copy, Clone, Hash)]
158/// The type of memory region
159pub enum MemoryRegionType {
160    /// The region contains the guest's page tables
161    PageTables,
162    /// The region contains the guest's code
163    Code,
164    /// The region contains the guest's init data
165    InitData,
166    /// The region contains the PEB
167    Peb,
168    /// The region contains the Host Function Definitions
169    HostFunctionDefinitions,
170    /// The region contains the Input Data
171    InputData,
172    /// The region contains the Output Data
173    OutputData,
174    /// The region contains the Heap
175    Heap,
176    /// The region contains the Guard Page
177    GuardPage,
178    /// The region contains the Stack
179    Stack,
180}
181
182/// represents a single memory region inside the guest. All memory within a region has
183/// the same memory permissions
184#[derive(Debug, Clone, PartialEq, Eq, Hash)]
185pub struct MemoryRegion {
186    /// the range of guest memory addresses
187    pub guest_region: Range<usize>,
188    /// the range of host memory addresses
189    pub host_region: Range<usize>,
190    /// memory access flags for the given region
191    pub flags: MemoryRegionFlags,
192    /// the type of memory region
193    pub region_type: MemoryRegionType,
194}
195
196pub(crate) struct MemoryRegionVecBuilder {
197    guest_base_phys_addr: usize,
198    host_base_virt_addr: usize,
199    regions: Vec<MemoryRegion>,
200}
201
202impl MemoryRegionVecBuilder {
203    pub(crate) fn new(guest_base_phys_addr: usize, host_base_virt_addr: usize) -> Self {
204        Self {
205            guest_base_phys_addr,
206            host_base_virt_addr,
207            regions: Vec::new(),
208        }
209    }
210
211    fn push(
212        &mut self,
213        size: usize,
214        flags: MemoryRegionFlags,
215        region_type: MemoryRegionType,
216    ) -> usize {
217        if self.regions.is_empty() {
218            let guest_end = self.guest_base_phys_addr + size;
219            let host_end = self.host_base_virt_addr + size;
220            self.regions.push(MemoryRegion {
221                guest_region: self.guest_base_phys_addr..guest_end,
222                host_region: self.host_base_virt_addr..host_end,
223                flags,
224                region_type,
225            });
226            return guest_end - self.guest_base_phys_addr;
227        }
228
229        #[allow(clippy::unwrap_used)]
230        // we know this is safe because we check if the regions are empty above
231        let last_region = self.regions.last().unwrap();
232        let new_region = MemoryRegion {
233            guest_region: last_region.guest_region.end..last_region.guest_region.end + size,
234            host_region: last_region.host_region.end..last_region.host_region.end + size,
235            flags,
236            region_type,
237        };
238        let ret = new_region.guest_region.end;
239        self.regions.push(new_region);
240        ret - self.guest_base_phys_addr
241    }
242
243    /// Pushes a memory region with the given size. Will round up the size to the nearest page.
244    /// Returns the current size of the all memory regions in the builder after adding the given region.
245    /// # Note:
246    /// Memory regions pushed MUST match the guest's memory layout, in SandboxMemoryLayout::new(..)
247    pub(crate) fn push_page_aligned(
248        &mut self,
249        size: usize,
250        flags: MemoryRegionFlags,
251        region_type: MemoryRegionType,
252    ) -> usize {
253        let aligned_size = (size + PAGE_SIZE_USIZE - 1) & !(PAGE_SIZE_USIZE - 1);
254        self.push(aligned_size, flags, region_type)
255    }
256
257    /// Consumes the builder and returns a vec of memory regions. The regions are guaranteed to be a contiguous chunk
258    /// of memory, in other words, there will be any memory gaps between them.
259    pub(crate) fn build(self) -> Vec<MemoryRegion> {
260        self.regions
261    }
262}
263
264#[cfg(mshv)]
265impl From<MemoryRegion> for mshv_user_mem_region {
266    fn from(region: MemoryRegion) -> Self {
267        let size = (region.guest_region.end - region.guest_region.start) as u64;
268        let guest_pfn = region.guest_region.start as u64 >> PAGE_SHIFT;
269        let userspace_addr = region.host_region.start as u64;
270
271        #[cfg(mshv2)]
272        {
273            let flags = region.flags.iter().fold(0, |acc, flag| {
274                let flag_value = match flag {
275                    MemoryRegionFlags::NONE => HV_MAP_GPA_PERMISSIONS_NONE,
276                    MemoryRegionFlags::READ => HV_MAP_GPA_READABLE,
277                    MemoryRegionFlags::WRITE => HV_MAP_GPA_WRITABLE,
278                    MemoryRegionFlags::EXECUTE => HV_MAP_GPA_EXECUTABLE,
279                    _ => 0, // ignore any unknown flags
280                };
281                acc | flag_value
282            });
283            mshv_user_mem_region {
284                guest_pfn,
285                size,
286                userspace_addr,
287                flags,
288            }
289        }
290        #[cfg(mshv3)]
291        {
292            let flags: u8 = region.flags.iter().fold(0, |acc, flag| {
293                let flag_value = match flag {
294                    MemoryRegionFlags::NONE => 1 << MSHV_SET_MEM_BIT_UNMAP,
295                    MemoryRegionFlags::READ => 0,
296                    MemoryRegionFlags::WRITE => 1 << MSHV_SET_MEM_BIT_WRITABLE,
297                    MemoryRegionFlags::EXECUTE => 1 << MSHV_SET_MEM_BIT_EXECUTABLE,
298                    _ => 0, // ignore any unknown flags
299                };
300                acc | flag_value
301            });
302
303            mshv_user_mem_region {
304                guest_pfn,
305                size,
306                userspace_addr,
307                flags,
308                ..Default::default()
309            }
310        }
311    }
312}
313
314#[cfg(kvm)]
315impl From<MemoryRegion> for kvm_bindings::kvm_userspace_memory_region {
316    fn from(region: MemoryRegion) -> Self {
317        let perm_flags =
318            MemoryRegionFlags::READ | MemoryRegionFlags::WRITE | MemoryRegionFlags::EXECUTE;
319
320        let perm_flags = perm_flags.intersection(region.flags);
321
322        kvm_userspace_memory_region {
323            slot: 0,
324            guest_phys_addr: region.guest_region.start as u64,
325            memory_size: (region.guest_region.end - region.guest_region.start) as u64,
326            userspace_addr: region.host_region.start as u64,
327            flags: if perm_flags.contains(MemoryRegionFlags::WRITE) {
328                0 // RWX
329            } else {
330                // Note: KVM_MEM_READONLY is executable
331                KVM_MEM_READONLY // RX 
332            },
333        }
334    }
335}