1use std::collections::{BTreeMap, HashMap};
18use std::sync::atomic::{AtomicU64, Ordering};
19
20use hyperlight_common::layout::{scratch_base_gpa, scratch_base_gva};
21use hyperlight_common::vmem;
22use hyperlight_common::vmem::{
23 BasicMapping, CowMapping, Mapping, MappingKind, PAGE_SIZE, SpaceAwareMapping, SpaceId, TableOps,
24};
25use tracing::{Span, instrument};
26
27use crate::HyperlightError::MemoryRegionSizeMismatch;
28use crate::Result;
29use crate::hypervisor::regs::CommonSpecialRegisters;
30use crate::mem::exe::{ExeInfo, LoadInfo};
31use crate::mem::layout::SandboxMemoryLayout;
32use crate::mem::memory_region::{GuestMemoryRegion, MemoryRegion, MemoryRegionFlags};
33use crate::mem::mgr::{GuestPageTableBuffer, SnapshotSharedMemory};
34use crate::mem::shared_mem::{ReadonlySharedMemory, SharedMemory};
35use crate::sandbox::SandboxConfiguration;
36use crate::sandbox::uninitialized::{GuestBinary, GuestEnvironment};
37
38pub(super) static SANDBOX_CONFIGURATION_COUNTER: AtomicU64 = AtomicU64::new(0);
39
40const PTE_SIZE: usize = size_of::<vmem::PageTableEntry>();
41
42#[derive(Copy, Clone, PartialEq, Eq)]
52pub enum NextAction {
53 Initialise(u64),
56 Call(u64),
59 #[cfg(test)]
62 None,
63}
64
65pub struct Snapshot {
68 sandbox_id: u64,
71 layout: crate::mem::layout::SandboxMemoryLayout,
78 memory: ReadonlySharedMemory,
80 regions: Vec<MemoryRegion>,
83 load_info: LoadInfo,
91 hash: [u8; 32],
99 stack_top_gva: u64,
101
102 sregs: Option<CommonSpecialRegisters>,
108
109 entrypoint: NextAction,
111
112 snapshot_generation: u64,
118}
119impl core::convert::AsRef<Snapshot> for Snapshot {
120 fn as_ref(&self) -> &Self {
121 self
122 }
123}
124impl hyperlight_common::vmem::TableReadOps for Snapshot {
125 type TableAddr = u64;
126 fn entry_addr(addr: u64, offset: u64) -> u64 {
127 addr + offset
128 }
129 unsafe fn read_entry(&self, addr: u64) -> vmem::PageTableEntry {
130 let addr = addr as usize;
131 let Some(pte_bytes) = self.memory.as_slice().get(addr..addr + PTE_SIZE) else {
132 return 0;
137 };
138 #[allow(clippy::unwrap_used)]
140 vmem::PageTableEntry::from_le_bytes(pte_bytes.try_into().unwrap())
141 }
142 #[allow(clippy::unnecessary_cast)]
143 fn to_phys(addr: u64) -> vmem::PhysAddr {
144 addr as vmem::PhysAddr
145 }
146 #[allow(clippy::unnecessary_cast)]
147 fn from_phys(addr: vmem::PhysAddr) -> u64 {
148 addr as u64
149 }
150 fn root_table(&self) -> u64 {
151 self.root_pt_gpa()
152 }
153}
154
155fn hash(memory: &[u8], regions: &[MemoryRegion]) -> Result<[u8; 32]> {
160 let mut hasher = blake3::Hasher::new();
161 hasher.update(memory);
162 for rgn in regions {
163 hasher.update(&usize::to_le_bytes(rgn.guest_region.start));
164 let guest_len = rgn.guest_region.end - rgn.guest_region.start;
165 #[allow(clippy::useless_conversion)]
166 let host_start_addr: usize = rgn.host_region.start.into();
167 #[allow(clippy::useless_conversion)]
168 let host_end_addr: usize = rgn.host_region.end.into();
169 hasher.update(&usize::to_le_bytes(host_start_addr));
170 let host_len = host_end_addr - host_start_addr;
171 if guest_len != host_len {
172 return Err(MemoryRegionSizeMismatch(
173 host_len,
174 guest_len,
175 format!("{:?}", rgn),
176 ));
177 }
178 hasher.update(&usize::to_le_bytes(guest_len));
182 hasher.update(&u32::to_le_bytes(rgn.flags.bits()));
183 }
184 Ok(hasher.finalize().into())
188}
189
190pub(crate) fn access_gpa<'a>(
191 snap: &'a [u8],
192 scratch: &'a [u8],
193 layout: SandboxMemoryLayout,
194 gpa: u64,
195) -> Option<(&'a [u8], usize)> {
196 let resolved = layout.resolve_gpa(gpa, &[])?.with_memories(snap, scratch);
197 Some((resolved.base.as_ref(), resolved.offset))
198}
199
200pub(crate) struct SharedMemoryPageTableBuffer<'a> {
201 snap: &'a [u8],
202 scratch: &'a [u8],
203 layout: SandboxMemoryLayout,
204 root: u64,
205}
206impl<'a> SharedMemoryPageTableBuffer<'a> {
207 pub(crate) fn new(
208 snap: &'a [u8],
209 scratch: &'a [u8],
210 layout: SandboxMemoryLayout,
211 root: u64,
212 ) -> Self {
213 Self {
214 snap,
215 scratch,
216 layout,
217 root,
218 }
219 }
220}
221impl<'a> hyperlight_common::vmem::TableReadOps for SharedMemoryPageTableBuffer<'a> {
222 type TableAddr = u64;
223 fn entry_addr(addr: u64, offset: u64) -> u64 {
224 addr + offset
225 }
226 unsafe fn read_entry(&self, addr: u64) -> vmem::PageTableEntry {
227 let memoff = access_gpa(self.snap, self.scratch, self.layout, addr);
228 let Some(pte_bytes) = memoff.and_then(|(mem, off)| mem.get(off..off + PTE_SIZE)) else {
229 return 0;
234 };
235 #[allow(clippy::unwrap_used)]
237 vmem::PageTableEntry::from_le_bytes(pte_bytes.try_into().unwrap())
238 }
239 #[allow(clippy::unnecessary_cast)]
240 fn to_phys(addr: u64) -> vmem::PhysAddr {
241 addr as vmem::PhysAddr
242 }
243 #[allow(clippy::unnecessary_cast)]
244 fn from_phys(addr: vmem::PhysAddr) -> u64 {
245 addr as u64
246 }
247 fn root_table(&self) -> u64 {
248 self.root
249 }
250}
251impl<'a> core::convert::AsRef<SharedMemoryPageTableBuffer<'a>> for SharedMemoryPageTableBuffer<'a> {
252 fn as_ref(&self) -> &Self {
253 self
254 }
255}
256fn skip_virt(virt_base: u64, scratch_gva: u64) -> bool {
261 if virt_base >= scratch_gva {
262 return true;
263 }
264 #[cfg(not(feature = "i686-guest"))]
265 if virt_base >= hyperlight_common::layout::SNAPSHOT_PT_GVA_MIN as u64
266 && virt_base <= hyperlight_common::layout::SNAPSHOT_PT_GVA_MAX as u64
267 {
268 return true;
269 }
270 #[cfg(feature = "i686-guest")]
271 let _ = virt_base;
272 false
273}
274
275unsafe fn guest_page<'a>(
283 snap: &'a [u8],
284 scratch: &'a [u8],
285 regions: &[MemoryRegion],
286 layout: SandboxMemoryLayout,
287 gpa: u64,
288) -> Option<&'a [u8]> {
289 let resolved = layout
290 .resolve_gpa(gpa, regions)?
291 .with_memories(snap, scratch);
292 if resolved.as_ref().len() < PAGE_SIZE {
293 return None;
294 }
295 Some(&resolved.as_ref()[..PAGE_SIZE])
296}
297
298fn map_specials(pt_buf: &GuestPageTableBuffer, scratch_size: usize) {
299 let mapping = Mapping {
301 phys_base: scratch_base_gpa(scratch_size),
302 virt_base: scratch_base_gva(scratch_size),
303 len: scratch_size as u64,
304 kind: MappingKind::Basic(BasicMapping {
305 readable: true,
306 writable: true,
307 executable: false,
310 }),
311 user_accessible: false,
312 };
313 unsafe { vmem::map(pt_buf, mapping) };
314}
315
316impl Snapshot {
317 pub(crate) fn from_env<'a, 'b>(
320 env: impl Into<GuestEnvironment<'a, 'b>>,
321 cfg: SandboxConfiguration,
322 ) -> Result<Self> {
323 let env = env.into();
324 let mut bin = env.guest_binary;
325 bin.canonicalize()?;
326 let blob = env.init_data;
327
328 let exe_info = match bin {
329 GuestBinary::FilePath(bin_path_str) => ExeInfo::from_file(&bin_path_str)?,
330 GuestBinary::Buffer(buffer) => ExeInfo::from_buf(buffer)?,
331 };
332
333 let host_version = env!("CARGO_PKG_VERSION");
335 if let Some(v) = exe_info.guest_bin_version()
336 && v != host_version
337 {
338 return Err(crate::HyperlightError::GuestBinVersionMismatch {
339 guest_bin_version: v.to_string(),
340 host_version: host_version.to_string(),
341 });
342 }
343
344 let guest_blob_size = blob.as_ref().map(|b| b.data.len()).unwrap_or(0);
345 let guest_blob_mem_flags = blob.as_ref().map(|b| b.permissions);
346
347 #[cfg_attr(feature = "i686-guest", allow(unused_mut))]
348 let mut layout = crate::mem::layout::SandboxMemoryLayout::new(
349 cfg,
350 exe_info.loaded_size(),
351 guest_blob_size,
352 guest_blob_mem_flags,
353 )?;
354
355 let load_addr = layout.get_guest_code_address() as u64;
356 let base_va = exe_info.base_va();
357 let entrypoint_va: u64 = exe_info.entrypoint().into();
358
359 let mut memory = vec![0; layout.get_memory_size()?];
360
361 let load_info = exe_info.load(
362 load_addr.try_into()?,
363 &mut memory[layout.get_guest_code_offset()..],
364 )?;
365
366 layout.write_peb(&mut memory)?;
367
368 blob.map(|x| layout.write_init_data(&mut memory, x.data))
369 .transpose()?;
370
371 let pt_buf = GuestPageTableBuffer::new(layout.get_pt_base_gpa() as usize);
373
374 for rgn in layout.get_memory_regions_::<GuestMemoryRegion>(())?.iter() {
376 let readable = rgn.flags.contains(MemoryRegionFlags::READ);
377 let executable = rgn.flags.contains(MemoryRegionFlags::EXECUTE);
378 let writable = rgn.flags.contains(MemoryRegionFlags::WRITE);
379 let kind = if writable {
380 MappingKind::Cow(CowMapping {
381 readable,
382 executable,
383 })
384 } else {
385 MappingKind::Basic(BasicMapping {
386 readable,
387 writable: false,
388 executable,
389 })
390 };
391 let mapping = Mapping {
392 phys_base: rgn.guest_region.start as u64,
393 virt_base: rgn.guest_region.start as u64,
394 len: rgn.guest_region.len() as u64,
395 kind,
396 user_accessible: false,
397 };
398 unsafe { vmem::map(&pt_buf, mapping) };
399 }
400
401 map_specials(&pt_buf, layout.get_scratch_size());
403
404 let pt_bytes = pt_buf.into_bytes();
405 layout.set_pt_size(pt_bytes.len())?;
406 memory.extend(&pt_bytes);
407
408 let exn_stack_top_gva = hyperlight_common::layout::MAX_GVA as u64
409 - hyperlight_common::layout::SCRATCH_TOP_EXN_STACK_OFFSET
410 + 1;
411
412 let extra_regions = Vec::new();
413 let hash = hash(&memory, &extra_regions)?;
414
415 Ok(Self {
416 sandbox_id: SANDBOX_CONFIGURATION_COUNTER.fetch_add(1, Ordering::Relaxed),
417 memory: ReadonlySharedMemory::from_bytes(&memory)?,
418 layout,
419 regions: extra_regions,
420 load_info,
421 hash,
422 stack_top_gva: exn_stack_top_gva,
423 sregs: None,
424 entrypoint: NextAction::Initialise(load_addr + entrypoint_va - base_va),
425 snapshot_generation: 0,
426 })
427 }
428
429 #[allow(clippy::too_many_arguments)]
435 #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
438 pub(crate) fn new<S: SharedMemory>(
439 shared_mem: &mut SnapshotSharedMemory<S>,
440 scratch_mem: &mut S,
441 sandbox_id: u64,
442 mut layout: SandboxMemoryLayout,
443 load_info: LoadInfo,
444 regions: Vec<MemoryRegion>,
445 root_pt_gpas: &[u64],
446 stack_top_gva: u64,
447 sregs: CommonSpecialRegisters,
448 entrypoint: NextAction,
449 snapshot_generation: u64,
450 ) -> Result<Self> {
451 let mut phys_seen = HashMap::<u64, usize>::new();
452 let scratch_gva = scratch_base_gva(layout.get_scratch_size());
453 let memory = shared_mem.with_contents(|snap_c| {
454 scratch_mem.with_contents(|scratch_c| {
455 let op = SharedMemoryPageTableBuffer::new(
466 snap_c,
467 scratch_c,
468 layout,
469 root_pt_gpas.first().copied().unwrap_or(0),
470 );
471 let walk = unsafe {
472 vmem::walk_va_spaces(
473 &op,
474 root_pt_gpas,
475 0,
476 hyperlight_common::layout::MAX_GVA as u64,
477 )
478 };
479
480 let mut snapshot_memory: Vec<u8> = Vec::new();
486 let pt_buf = GuestPageTableBuffer::new(layout.get_pt_base_gpa() as usize);
487 let mut root_addrs: Vec<u64> = Vec::with_capacity(root_pt_gpas.len());
491 root_addrs.push(pt_buf.initial_root());
492 for _ in 1..root_pt_gpas.len() {
493 root_addrs.push(unsafe { pt_buf.alloc_table() });
494 }
495
496 let mut built_roots: BTreeMap<SpaceId, u64> = BTreeMap::new();
497 for (root_idx, (space_id, mappings)) in walk.into_iter().enumerate() {
498 pt_buf.set_root(root_addrs[root_idx]);
499 built_roots.insert(space_id, root_addrs[root_idx]);
500
501 for sam in mappings {
502 match sam {
503 SpaceAwareMapping::ThisSpace(mapping) => {
504 if skip_virt(mapping.virt_base, scratch_gva) {
509 continue;
510 }
511 let Some(contents) = (unsafe {
512 guest_page(
513 snap_c,
514 scratch_c,
515 ®ions,
516 layout,
517 mapping.phys_base,
518 )
519 }) else {
520 continue;
521 };
522
523 let kind = match mapping.kind {
527 MappingKind::Cow(cm) => MappingKind::Cow(cm),
528 MappingKind::Basic(bm) if bm.writable => {
529 MappingKind::Cow(CowMapping {
530 readable: bm.readable,
531 executable: bm.executable,
532 })
533 }
534 MappingKind::Basic(bm) => MappingKind::Basic(BasicMapping {
535 readable: bm.readable,
536 writable: false,
537 executable: bm.executable,
538 }),
539 MappingKind::Unmapped => continue,
540 };
541 let new_gpa =
542 phys_seen.entry(mapping.phys_base).or_insert_with(|| {
543 let new_offset = snapshot_memory.len();
544 snapshot_memory.extend(contents);
545 new_offset + SandboxMemoryLayout::BASE_ADDRESS
546 });
547
548 let compacted = Mapping {
549 phys_base: *new_gpa as u64,
550 virt_base: mapping.virt_base,
551 len: PAGE_SIZE as u64,
552 kind,
553 user_accessible: mapping.user_accessible,
554 };
555 unsafe { vmem::map(&pt_buf, compacted) };
556 }
557 SpaceAwareMapping::AnotherSpace(ref_map) => {
558 unsafe {
564 vmem::space_aware_map(&pt_buf, ref_map, &built_roots);
565 }
566 }
567 }
568 }
569 }
570
571 for &root_addr in &root_addrs {
573 pt_buf.set_root(root_addr);
574 map_specials(&pt_buf, layout.get_scratch_size());
575 }
576 pt_buf.set_root(pt_buf.initial_root());
577
578 let pt_data = pt_buf.into_bytes();
580 layout.set_pt_size(pt_data.len())?;
581 snapshot_memory.extend(&pt_data);
582 Ok::<_, crate::HyperlightError>(snapshot_memory)
583 })
584 })???;
585 let guest_visible_size = memory.len() - layout.get_pt_size();
589 debug_assert!(guest_visible_size.is_multiple_of(PAGE_SIZE));
590 layout.set_snapshot_size(guest_visible_size);
591
592 let regions: Vec<MemoryRegion> = Vec::new();
600
601 let hash = hash(&memory, ®ions)?;
602 Ok(Self {
603 sandbox_id,
604 layout,
605 memory: ReadonlySharedMemory::from_bytes_with_mapped_size(&memory, guest_visible_size)?,
606 regions,
607 load_info,
608 hash,
609 stack_top_gva,
610 sregs: Some(sregs),
611 entrypoint,
612 snapshot_generation,
613 })
614 }
615
616 pub(crate) fn snapshot_generation(&self) -> u64 {
618 self.snapshot_generation
619 }
620
621 pub(crate) fn sandbox_id(&self) -> u64 {
623 self.sandbox_id
624 }
625
626 pub(crate) fn regions(&self) -> &[MemoryRegion] {
628 &self.regions
629 }
630
631 #[instrument(skip_all, parent = Span::current(), level= "Trace")]
633 pub(crate) fn memory(&self) -> &ReadonlySharedMemory {
634 &self.memory
635 }
636
637 pub(crate) fn load_info(&self) -> LoadInfo {
639 self.load_info.clone()
640 }
641
642 pub(crate) fn layout(&self) -> &crate::mem::layout::SandboxMemoryLayout {
643 &self.layout
644 }
645
646 pub(crate) fn root_pt_gpa(&self) -> u64 {
647 self.layout.get_pt_base_gpa()
648 }
649
650 pub(crate) fn stack_top_gva(&self) -> u64 {
651 self.stack_top_gva
652 }
653
654 pub(crate) fn sregs(&self) -> Option<&CommonSpecialRegisters> {
660 self.sregs.as_ref()
661 }
662
663 pub(crate) fn entrypoint(&self) -> NextAction {
664 self.entrypoint
665 }
666}
667
668impl PartialEq for Snapshot {
669 fn eq(&self, other: &Snapshot) -> bool {
670 self.hash == other.hash
671 }
672}
673
674#[cfg(test)]
675#[cfg(not(feature = "i686-guest"))]
676mod tests {
677 use hyperlight_common::vmem::{self, BasicMapping, Mapping, MappingKind, PAGE_SIZE};
678
679 use crate::hypervisor::regs::CommonSpecialRegisters;
680 use crate::mem::exe::LoadInfo;
681 use crate::mem::layout::SandboxMemoryLayout;
682 use crate::mem::mgr::{GuestPageTableBuffer, SandboxMemoryManager, SnapshotSharedMemory};
683 use crate::mem::shared_mem::{
684 ExclusiveSharedMemory, HostSharedMemory, ReadonlySharedMemory, SharedMemory,
685 };
686
687 fn default_sregs() -> CommonSpecialRegisters {
688 CommonSpecialRegisters::default()
689 }
690
691 const SIMPLE_PT_BASE: usize = PAGE_SIZE + SandboxMemoryLayout::BASE_ADDRESS;
692
693 fn make_simple_pt_mem(contents: &[u8]) -> SnapshotSharedMemory<ExclusiveSharedMemory> {
694 let pt_buf = GuestPageTableBuffer::new(SIMPLE_PT_BASE);
695 let mapping = Mapping {
696 phys_base: SandboxMemoryLayout::BASE_ADDRESS as u64,
697 virt_base: SandboxMemoryLayout::BASE_ADDRESS as u64,
698 len: PAGE_SIZE as u64,
699 kind: MappingKind::Basic(BasicMapping {
700 readable: true,
701 writable: true,
702 executable: true,
703 }),
704 user_accessible: false,
705 };
706 unsafe { vmem::map(&pt_buf, mapping) };
707 super::map_specials(&pt_buf, PAGE_SIZE);
708 let pt_bytes = pt_buf.into_bytes();
709
710 let mut snapshot_mem = vec![0u8; PAGE_SIZE + pt_bytes.len()];
711 snapshot_mem[0..PAGE_SIZE].copy_from_slice(contents);
712 snapshot_mem[PAGE_SIZE..].copy_from_slice(&pt_bytes);
713 ReadonlySharedMemory::from_bytes(&snapshot_mem)
714 .unwrap()
715 .to_mgr_snapshot_mem()
716 .unwrap()
717 }
718
719 fn make_simple_pt_mgr() -> (SandboxMemoryManager<HostSharedMemory>, u64) {
720 let cfg = crate::sandbox::SandboxConfiguration::default();
721 let scratch_mem = ExclusiveSharedMemory::new(cfg.get_scratch_size()).unwrap();
722 let mgr = SandboxMemoryManager::new(
723 SandboxMemoryLayout::new(cfg, 4096, 0x3000, None).unwrap(),
724 make_simple_pt_mem(&[0u8; PAGE_SIZE]),
725 scratch_mem,
726 super::NextAction::None,
727 );
728 let (mgr, _) = mgr.build().unwrap();
729 (mgr, SIMPLE_PT_BASE as u64)
730 }
731
732 #[test]
733 fn multiple_snapshots_independent() {
734 let (mut mgr, pt_base) = make_simple_pt_mgr();
735
736 let pattern_a = vec![0xAA; PAGE_SIZE];
738 let snapshot_a = super::Snapshot::new(
739 &mut make_simple_pt_mem(&pattern_a).build().0,
740 &mut mgr.scratch_mem,
741 1,
742 mgr.layout,
743 LoadInfo::dummy(),
744 Vec::new(),
745 &[pt_base],
746 0,
747 default_sregs(),
748 super::NextAction::None,
749 1,
750 )
751 .unwrap();
752
753 let pattern_b = vec![0xBB; PAGE_SIZE];
755 let snapshot_b = super::Snapshot::new(
756 &mut make_simple_pt_mem(&pattern_b).build().0,
757 &mut mgr.scratch_mem,
758 2,
759 mgr.layout,
760 LoadInfo::dummy(),
761 Vec::new(),
762 &[pt_base],
763 0,
764 default_sregs(),
765 super::NextAction::None,
766 2,
767 )
768 .unwrap();
769
770 mgr.restore_snapshot(&snapshot_a).unwrap();
772 mgr.shared_mem
773 .with_contents(|contents| assert_eq!(&contents[0..pattern_a.len()], &pattern_a[..]))
774 .unwrap();
775
776 mgr.restore_snapshot(&snapshot_b).unwrap();
778 mgr.shared_mem
779 .with_contents(|contents| assert_eq!(&contents[0..pattern_b.len()], &pattern_b[..]))
780 .unwrap();
781 }
782}
783
784#[cfg(test)]
785#[cfg(feature = "i686-guest")]
786mod i686_tests {
787 use hyperlight_common::vmem::{
788 self, BasicMapping, CowMapping, Mapping, MappingKind, PAGE_SIZE,
789 };
790
791 use crate::mem::mgr::GuestPageTableBuffer;
792
793 const PT_BASE: usize = 0x10_0000;
794
795 #[test]
796 fn map_single_page() {
797 let pt = GuestPageTableBuffer::new(PT_BASE);
798 let mapping = Mapping {
799 phys_base: 0x2000,
800 virt_base: 0x1000,
801 len: PAGE_SIZE as u64,
802 kind: MappingKind::Basic(BasicMapping {
803 readable: true,
804 writable: true,
805 executable: true,
806 }),
807 user_accessible: false,
808 };
809 unsafe { vmem::map(&pt, mapping) };
810
811 let results: Vec<_> =
812 unsafe { vmem::virt_to_phys(&pt, 0x1000, PAGE_SIZE as u64) }.collect();
813 assert_eq!(results.len(), 1);
814 assert_eq!(results[0].phys_base, 0x2000);
815 assert_eq!(results[0].virt_base, 0x1000);
816 assert!(matches!(
817 results[0].kind,
818 MappingKind::Basic(BasicMapping { writable: true, .. })
819 ));
820 }
821
822 #[test]
823 fn map_cow_page() {
824 let pt = GuestPageTableBuffer::new(PT_BASE);
825 let mapping = Mapping {
826 phys_base: 0x3000,
827 virt_base: 0x2000,
828 len: PAGE_SIZE as u64,
829 kind: MappingKind::Cow(CowMapping {
830 readable: true,
831 executable: true,
832 }),
833 user_accessible: false,
834 };
835 unsafe { vmem::map(&pt, mapping) };
836
837 let results: Vec<_> =
838 unsafe { vmem::virt_to_phys(&pt, 0x2000, PAGE_SIZE as u64) }.collect();
839 assert_eq!(results.len(), 1);
840 assert_eq!(results[0].phys_base, 0x3000);
841 assert!(matches!(results[0].kind, MappingKind::Cow(_)));
842 }
843
844 #[test]
845 fn map_multiple_pages_across_pd_boundary() {
846 let pt = GuestPageTableBuffer::new(PT_BASE);
847 let va_start = 0x003F_F000u64; let pa_start = 0x5000u64;
850 let mapping = Mapping {
851 phys_base: pa_start,
852 virt_base: va_start,
853 len: 2 * PAGE_SIZE as u64,
854 kind: MappingKind::Basic(BasicMapping {
855 readable: true,
856 writable: false,
857 executable: true,
858 }),
859 user_accessible: false,
860 };
861 unsafe { vmem::map(&pt, mapping) };
862
863 let results: Vec<_> =
864 unsafe { vmem::virt_to_phys(&pt, va_start, 2 * PAGE_SIZE as u64) }.collect();
865 assert_eq!(results.len(), 2);
866 assert_eq!(results[0].phys_base, pa_start);
867 assert_eq!(results[0].virt_base, va_start);
868 assert_eq!(results[1].phys_base, pa_start + PAGE_SIZE as u64);
869 assert_eq!(results[1].virt_base, va_start + PAGE_SIZE as u64);
870 }
871
872 #[test]
873 fn virt_to_phys_unmapped_returns_empty() {
874 let pt = GuestPageTableBuffer::new(PT_BASE);
875 let results: Vec<_> =
876 unsafe { vmem::virt_to_phys(&pt, 0x1000, PAGE_SIZE as u64) }.collect();
877 assert!(results.is_empty());
878 }
879
880 #[test]
881 fn map_reuses_existing_page_table() {
882 let pt = GuestPageTableBuffer::new(PT_BASE);
883 unsafe {
885 vmem::map(
886 &pt,
887 Mapping {
888 phys_base: 0x1000,
889 virt_base: 0x1000,
890 len: PAGE_SIZE as u64,
891 kind: MappingKind::Basic(BasicMapping {
892 readable: true,
893 writable: true,
894 executable: true,
895 }),
896 user_accessible: false,
897 },
898 );
899 vmem::map(
900 &pt,
901 Mapping {
902 phys_base: 0x5000,
903 virt_base: 0x5000,
904 len: PAGE_SIZE as u64,
905 kind: MappingKind::Basic(BasicMapping {
906 readable: true,
907 writable: true,
908 executable: true,
909 }),
910 user_accessible: false,
911 },
912 );
913 }
914 let r1: Vec<_> = unsafe { vmem::virt_to_phys(&pt, 0x1000, PAGE_SIZE as u64) }.collect();
916 let r2: Vec<_> = unsafe { vmem::virt_to_phys(&pt, 0x5000, PAGE_SIZE as u64) }.collect();
917 assert_eq!(r1.len(), 1);
918 assert_eq!(r2.len(), 1);
919 assert_eq!(r1[0].phys_base, 0x1000);
920 assert_eq!(r2[0].phys_base, 0x5000);
921 assert_eq!(pt.size(), 2 * PAGE_SIZE);
923 }
924}