1use std::ops::Range;
18
19use bitflags::bitflags;
20#[cfg(mshv3)]
21use hyperlight_common::mem::PAGE_SHIFT;
22use hyperlight_common::mem::PAGE_SIZE_USIZE;
23#[cfg(kvm)]
24use kvm_bindings::{KVM_MEM_READONLY, kvm_userspace_memory_region};
25#[cfg(mshv3)]
26use mshv_bindings::{
27 MSHV_SET_MEM_BIT_EXECUTABLE, MSHV_SET_MEM_BIT_UNMAP, MSHV_SET_MEM_BIT_WRITABLE,
28};
29#[cfg(all(mshv3, target_arch = "aarch64"))]
30use mshv_bindings::{hv_arm64_memory_intercept_message, mshv_user_mem_region};
31#[cfg(all(mshv3, target_arch = "x86_64"))]
32use mshv_bindings::{hv_x64_memory_intercept_message, mshv_user_mem_region};
33#[cfg(target_os = "windows")]
34use windows::Win32::System::Hypervisor::{self, WHV_MEMORY_ACCESS_TYPE};
35
36#[cfg(target_os = "windows")]
37use crate::hypervisor::wrappers::HandleWrapper;
38
39pub(crate) const DEFAULT_GUEST_BLOB_MEM_FLAGS: MemoryRegionFlags = MemoryRegionFlags::READ;
40
41bitflags! {
42 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
44 pub struct MemoryRegionFlags: u32 {
45 const NONE = 0;
47 const READ = 1;
49 const WRITE = 2;
51 const EXECUTE = 4;
53 }
54}
55
56impl std::fmt::Display for MemoryRegionFlags {
57 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
58 if self.is_empty() {
59 write!(f, "NONE")
60 } else {
61 let mut first = true;
62 if self.contains(MemoryRegionFlags::READ) {
63 write!(f, "READ")?;
64 first = false;
65 }
66 if self.contains(MemoryRegionFlags::WRITE) {
67 if !first {
68 write!(f, " | ")?;
69 }
70 write!(f, "WRITE")?;
71 first = false;
72 }
73 if self.contains(MemoryRegionFlags::EXECUTE) {
74 if !first {
75 write!(f, " | ")?;
76 }
77 write!(f, "EXECUTE")?;
78 }
79 Ok(())
80 }
81 }
82}
83
84#[cfg(target_os = "windows")]
85impl TryFrom<WHV_MEMORY_ACCESS_TYPE> for MemoryRegionFlags {
86 type Error = crate::HyperlightError;
87
88 fn try_from(flags: WHV_MEMORY_ACCESS_TYPE) -> crate::Result<Self> {
89 match flags {
90 Hypervisor::WHvMemoryAccessRead => Ok(MemoryRegionFlags::READ),
91 Hypervisor::WHvMemoryAccessWrite => Ok(MemoryRegionFlags::WRITE),
92 Hypervisor::WHvMemoryAccessExecute => Ok(MemoryRegionFlags::EXECUTE),
93 _ => Err(crate::HyperlightError::Error(
94 "unknown memory access type".to_string(),
95 )),
96 }
97 }
98}
99
100#[cfg(all(mshv3, target_arch = "x86_64"))]
101impl TryFrom<hv_x64_memory_intercept_message> for MemoryRegionFlags {
102 type Error = crate::HyperlightError;
103
104 fn try_from(msg: hv_x64_memory_intercept_message) -> crate::Result<Self> {
105 let access_type = msg.header.intercept_access_type;
106 match access_type {
107 0 => Ok(MemoryRegionFlags::READ),
108 1 => Ok(MemoryRegionFlags::WRITE),
109 2 => Ok(MemoryRegionFlags::EXECUTE),
110 _ => Err(crate::HyperlightError::Error(
111 "unknown memory access type".to_string(),
112 )),
113 }
114 }
115}
116
117#[cfg(all(mshv3, target_arch = "aarch64"))]
118impl TryFrom<hv_arm64_memory_intercept_message> for MemoryRegionFlags {
119 type Error = crate::HyperlightError;
120
121 fn try_from(_msg: hv_arm64_memory_intercept_message) -> crate::Result<Self> {
122 unimplemented!("try_from")
123 }
124}
125
126#[derive(Debug, PartialEq, Eq, Copy, Clone, Hash)]
131pub enum MemoryRegionType {
133 Code,
135 InitData,
137 Peb,
139 Heap,
141 Scratch,
143 Snapshot,
145 MappedFile,
150}
151
152#[cfg(target_os = "windows")]
153impl MemoryRegionType {
154 pub fn surrogate_mapping(&self) -> SurrogateMapping {
160 match self {
161 MemoryRegionType::MappedFile => SurrogateMapping::ReadOnlyFile,
162 _ => SurrogateMapping::SandboxMemory,
163 }
164 }
165}
166
167pub trait MemoryRegionKind {
171 type HostBaseType: Copy;
173
174 fn add(base: Self::HostBaseType, size: usize) -> Self::HostBaseType;
184}
185
186#[derive(Debug, PartialEq, Eq, Copy, Clone, Hash)]
195pub struct HostGuestMemoryRegion {}
196
197#[cfg(not(target_os = "windows"))]
198impl MemoryRegionKind for HostGuestMemoryRegion {
199 type HostBaseType = usize;
200
201 fn add(base: Self::HostBaseType, size: usize) -> Self::HostBaseType {
202 base + size
203 }
204}
205#[cfg(target_os = "windows")]
211#[derive(Debug, PartialEq, Eq, Copy, Clone, Hash)]
212pub enum SurrogateMapping {
213 SandboxMemory,
216 ReadOnlyFile,
219}
220
221#[cfg(target_os = "windows")]
226#[derive(Debug, PartialEq, Eq, Copy, Clone)]
227pub struct HostRegionBase {
228 pub from_handle: HandleWrapper,
230 pub handle_base: usize,
232 pub handle_size: usize,
234 pub offset: usize,
237}
238#[cfg(target_os = "windows")]
239impl std::hash::Hash for HostRegionBase {
240 fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
241 self.handle_base.hash(state);
246 self.handle_size.hash(state);
247 self.offset.hash(state);
248 }
249}
250#[cfg(target_os = "windows")]
251impl From<HostRegionBase> for usize {
252 fn from(x: HostRegionBase) -> usize {
253 x.handle_base + x.offset
254 }
255}
256#[cfg(target_os = "windows")]
257impl TryFrom<HostRegionBase> for isize {
258 type Error = <isize as TryFrom<usize>>::Error;
259 fn try_from(x: HostRegionBase) -> Result<isize, Self::Error> {
260 <isize as TryFrom<usize>>::try_from(x.into())
261 }
262}
263#[cfg(target_os = "windows")]
264impl MemoryRegionKind for HostGuestMemoryRegion {
265 type HostBaseType = HostRegionBase;
266
267 fn add(base: Self::HostBaseType, size: usize) -> Self::HostBaseType {
268 HostRegionBase {
269 from_handle: base.from_handle,
270 handle_base: base.handle_base,
271 handle_size: base.handle_size,
272 offset: base.offset + size,
273 }
274 }
275}
276
277#[cfg_attr(feature = "nanvix-unstable", allow(dead_code))]
280#[derive(Debug, PartialEq, Eq, Copy, Clone, Hash)]
281pub(crate) struct GuestMemoryRegion {}
282
283impl MemoryRegionKind for GuestMemoryRegion {
284 type HostBaseType = ();
285
286 fn add(_base: Self::HostBaseType, _size: usize) -> Self::HostBaseType {}
287}
288
289#[derive(Debug, Clone, PartialEq, Eq, Hash)]
292pub struct MemoryRegion_<K: MemoryRegionKind> {
293 pub guest_region: Range<usize>,
295 pub host_region: Range<K::HostBaseType>,
299 pub flags: MemoryRegionFlags,
301 pub region_type: MemoryRegionType,
303}
304
305pub type MemoryRegion = MemoryRegion_<HostGuestMemoryRegion>;
307
308#[cfg(crashdump)]
313#[derive(Debug, PartialEq, Eq, Copy, Clone, Hash)]
314pub(crate) struct CrashDumpMemoryRegion;
315
316#[cfg(crashdump)]
317impl MemoryRegionKind for CrashDumpMemoryRegion {
318 type HostBaseType = usize;
319
320 fn add(base: Self::HostBaseType, size: usize) -> Self::HostBaseType {
321 base + size
322 }
323}
324
325#[cfg(crashdump)]
330pub(crate) type CrashDumpRegion = MemoryRegion_<CrashDumpMemoryRegion>;
331
332#[cfg(all(crashdump, feature = "nanvix-unstable"))]
333impl HostGuestMemoryRegion {
334 pub(crate) fn to_addr(val: <Self as MemoryRegionKind>::HostBaseType) -> usize {
341 #[cfg(not(target_os = "windows"))]
342 {
343 val
344 }
345 #[cfg(target_os = "windows")]
346 {
347 val.into()
348 }
349 }
350}
351
352#[cfg_attr(feature = "nanvix-unstable", allow(unused))]
353pub(crate) struct MemoryRegionVecBuilder<K: MemoryRegionKind> {
354 guest_base_phys_addr: usize,
355 host_base_virt_addr: K::HostBaseType,
356 regions: Vec<MemoryRegion_<K>>,
357}
358
359impl<K: MemoryRegionKind> MemoryRegionVecBuilder<K> {
360 pub(crate) fn new(guest_base_phys_addr: usize, host_base_virt_addr: K::HostBaseType) -> Self {
361 Self {
362 guest_base_phys_addr,
363 host_base_virt_addr,
364 regions: Vec::new(),
365 }
366 }
367
368 fn push(
369 &mut self,
370 size: usize,
371 flags: MemoryRegionFlags,
372 region_type: MemoryRegionType,
373 ) -> usize {
374 if self.regions.is_empty() {
375 let guest_end = self.guest_base_phys_addr + size;
376 let host_end = <K as MemoryRegionKind>::add(self.host_base_virt_addr, size);
377 self.regions.push(MemoryRegion_ {
378 guest_region: self.guest_base_phys_addr..guest_end,
379 host_region: self.host_base_virt_addr..host_end,
380 flags,
381 region_type,
382 });
383 return guest_end - self.guest_base_phys_addr;
384 }
385
386 #[allow(clippy::unwrap_used)]
387 let last_region = self.regions.last().unwrap();
389 let host_end = <K as MemoryRegionKind>::add(last_region.host_region.end, size);
390 let new_region = MemoryRegion_ {
391 guest_region: last_region.guest_region.end..last_region.guest_region.end + size,
392 host_region: last_region.host_region.end..host_end,
393 flags,
394 region_type,
395 };
396 let ret = new_region.guest_region.end;
397 self.regions.push(new_region);
398 ret - self.guest_base_phys_addr
399 }
400
401 pub(crate) fn push_page_aligned(
406 &mut self,
407 size: usize,
408 flags: MemoryRegionFlags,
409 region_type: MemoryRegionType,
410 ) -> usize {
411 let aligned_size = (size + PAGE_SIZE_USIZE - 1) & !(PAGE_SIZE_USIZE - 1);
412 self.push(aligned_size, flags, region_type)
413 }
414
415 pub(crate) fn build(self) -> Vec<MemoryRegion_<K>> {
418 self.regions
419 }
420}
421
422#[cfg(mshv3)]
423impl From<&MemoryRegion> for mshv_user_mem_region {
424 fn from(region: &MemoryRegion) -> Self {
425 let size = (region.guest_region.end - region.guest_region.start) as u64;
426 let guest_pfn = region.guest_region.start as u64 >> PAGE_SHIFT;
427 let userspace_addr = region.host_region.start as u64;
428
429 let flags: u8 = region.flags.iter().fold(0, |acc, flag| {
430 let flag_value = match flag {
431 MemoryRegionFlags::NONE => 1 << MSHV_SET_MEM_BIT_UNMAP,
432 MemoryRegionFlags::READ => 0,
433 MemoryRegionFlags::WRITE => 1 << MSHV_SET_MEM_BIT_WRITABLE,
434 MemoryRegionFlags::EXECUTE => 1 << MSHV_SET_MEM_BIT_EXECUTABLE,
435 _ => 0, };
437 acc | flag_value
438 });
439
440 mshv_user_mem_region {
441 guest_pfn,
442 size,
443 userspace_addr,
444 flags,
445 ..Default::default()
446 }
447 }
448}
449
450#[cfg(kvm)]
451impl From<&MemoryRegion> for kvm_bindings::kvm_userspace_memory_region {
452 fn from(region: &MemoryRegion) -> Self {
453 let perm_flags =
454 MemoryRegionFlags::READ | MemoryRegionFlags::WRITE | MemoryRegionFlags::EXECUTE;
455
456 let perm_flags = perm_flags.intersection(region.flags);
457
458 kvm_userspace_memory_region {
459 slot: 0,
460 guest_phys_addr: region.guest_region.start as u64,
461 memory_size: (region.guest_region.end - region.guest_region.start) as u64,
462 userspace_addr: region.host_region.start as u64,
463 flags: if perm_flags.contains(MemoryRegionFlags::WRITE) {
464 0 } else {
466 KVM_MEM_READONLY },
469 }
470 }
471}