use std::sync::atomic::{AtomicU64, Ordering};
use hyperlight_common::layout::{scratch_base_gpa, scratch_base_gva};
use hyperlight_common::vmem::{self, BasicMapping, CowMapping, Mapping, MappingKind, PAGE_SIZE};
use tracing::{Span, instrument};
use crate::HyperlightError::MemoryRegionSizeMismatch;
use crate::Result;
use crate::hypervisor::regs::CommonSpecialRegisters;
use crate::mem::exe::LoadInfo;
use crate::mem::layout::SandboxMemoryLayout;
use crate::mem::memory_region::MemoryRegion;
use crate::mem::mgr::{GuestPageTableBuffer, SnapshotSharedMemory};
use crate::mem::shared_mem::{ReadonlySharedMemory, SharedMemory};
use crate::sandbox::SandboxConfiguration;
use crate::sandbox::uninitialized::{GuestBinary, GuestEnvironment};
pub(super) static SANDBOX_CONFIGURATION_COUNTER: AtomicU64 = AtomicU64::new(0);
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum NextAction {
Initialise(u64),
Call(u64),
#[cfg(test)]
None,
}
pub struct Snapshot {
sandbox_id: u64,
layout: crate::mem::layout::SandboxMemoryLayout,
memory: ReadonlySharedMemory,
regions: Vec<MemoryRegion>,
load_info: LoadInfo,
hash: [u8; 32],
stack_top_gva: u64,
sregs: Option<CommonSpecialRegisters>,
entrypoint: NextAction,
}
impl core::convert::AsRef<Snapshot> for Snapshot {
fn as_ref(&self) -> &Self {
self
}
}
impl hyperlight_common::vmem::TableReadOps for Snapshot {
type TableAddr = u64;
fn entry_addr(addr: u64, offset: u64) -> u64 {
addr + offset
}
unsafe fn read_entry(&self, addr: u64) -> u64 {
let addr = addr as usize;
let Some(pte_bytes) = self.memory.as_slice().get(addr..addr + 8) else {
return 0;
};
#[allow(clippy::unwrap_used)]
let n: [u8; 8] = pte_bytes.try_into().unwrap();
u64::from_ne_bytes(n)
}
fn to_phys(addr: u64) -> u64 {
addr
}
fn from_phys(addr: u64) -> u64 {
addr
}
fn root_table(&self) -> u64 {
self.root_pt_gpa()
}
}
fn hash(memory: &[u8], regions: &[MemoryRegion]) -> Result<[u8; 32]> {
let mut hasher = blake3::Hasher::new();
hasher.update(memory);
for rgn in regions {
hasher.update(&usize::to_le_bytes(rgn.guest_region.start));
let guest_len = rgn.guest_region.end - rgn.guest_region.start;
#[allow(clippy::useless_conversion)]
let host_start_addr: usize = rgn.host_region.start.into();
#[allow(clippy::useless_conversion)]
let host_end_addr: usize = rgn.host_region.end.into();
hasher.update(&usize::to_le_bytes(host_start_addr));
let host_len = host_end_addr - host_start_addr;
if guest_len != host_len {
return Err(MemoryRegionSizeMismatch(
host_len,
guest_len,
format!("{:?}", rgn),
));
}
hasher.update(&usize::to_le_bytes(guest_len));
hasher.update(&u32::to_le_bytes(rgn.flags.bits()));
}
Ok(hasher.finalize().into())
}
pub(crate) fn access_gpa<'a>(
snap: &'a [u8],
scratch: &'a [u8],
layout: SandboxMemoryLayout,
gpa: u64,
) -> Option<(&'a [u8], usize)> {
let resolved = layout.resolve_gpa(gpa, &[])?.with_memories(snap, scratch);
Some((resolved.base.as_ref(), resolved.offset))
}
pub(crate) struct SharedMemoryPageTableBuffer<'a> {
snap: &'a [u8],
scratch: &'a [u8],
layout: SandboxMemoryLayout,
root: u64,
}
impl<'a> SharedMemoryPageTableBuffer<'a> {
pub(crate) fn new(
snap: &'a [u8],
scratch: &'a [u8],
layout: SandboxMemoryLayout,
root: u64,
) -> Self {
Self {
snap,
scratch,
layout,
root,
}
}
}
impl<'a> hyperlight_common::vmem::TableReadOps for SharedMemoryPageTableBuffer<'a> {
type TableAddr = u64;
fn entry_addr(addr: u64, offset: u64) -> u64 {
addr + offset
}
unsafe fn read_entry(&self, addr: u64) -> u64 {
let memoff = access_gpa(self.snap, self.scratch, self.layout, addr);
let Some(pte_bytes) = memoff.and_then(|(mem, off)| mem.get(off..off + 8)) else {
return 0;
};
#[allow(clippy::unwrap_used)]
let n: [u8; 8] = pte_bytes.try_into().unwrap();
u64::from_ne_bytes(n)
}
fn to_phys(addr: u64) -> u64 {
addr
}
fn from_phys(addr: u64) -> u64 {
addr
}
fn root_table(&self) -> u64 {
self.root
}
}
impl<'a> core::convert::AsRef<SharedMemoryPageTableBuffer<'a>> for SharedMemoryPageTableBuffer<'a> {
fn as_ref(&self) -> &Self {
self
}
}
fn filtered_mappings<'a>(
snap: &'a [u8],
scratch: &'a [u8],
regions: &[MemoryRegion],
layout: SandboxMemoryLayout,
root_pt: u64,
) -> Vec<(Mapping, &'a [u8])> {
let op = SharedMemoryPageTableBuffer::new(snap, scratch, layout, root_pt);
unsafe {
hyperlight_common::vmem::virt_to_phys(&op, 0, hyperlight_common::layout::MAX_GVA as u64)
}
.filter_map(move |mapping| {
if mapping.virt_base >= scratch_base_gva(layout.get_scratch_size()) {
return None;
}
#[cfg(not(feature = "nanvix-unstable"))]
if mapping.virt_base >= hyperlight_common::layout::SNAPSHOT_PT_GVA_MIN as u64
&& mapping.virt_base <= hyperlight_common::layout::SNAPSHOT_PT_GVA_MAX as u64
{
return None;
}
let contents = unsafe { guest_page(snap, scratch, regions, layout, mapping.phys_base) }?;
Some((mapping, contents))
})
.collect()
}
unsafe fn guest_page<'a>(
snap: &'a [u8],
scratch: &'a [u8],
regions: &[MemoryRegion],
layout: SandboxMemoryLayout,
gpa: u64,
) -> Option<&'a [u8]> {
let resolved = layout
.resolve_gpa(gpa, regions)?
.with_memories(snap, scratch);
if resolved.as_ref().len() < PAGE_SIZE {
return None;
}
Some(&resolved.as_ref()[..PAGE_SIZE])
}
fn map_specials(pt_buf: &GuestPageTableBuffer, scratch_size: usize) {
let mapping = Mapping {
phys_base: scratch_base_gpa(scratch_size),
virt_base: scratch_base_gva(scratch_size),
len: scratch_size as u64,
kind: MappingKind::Basic(BasicMapping {
readable: true,
writable: true,
executable: false,
}),
};
unsafe { vmem::map(pt_buf, mapping) };
}
impl Snapshot {
pub(crate) fn from_env<'a, 'b>(
env: impl Into<GuestEnvironment<'a, 'b>>,
cfg: SandboxConfiguration,
) -> Result<Self> {
let env = env.into();
let mut bin = env.guest_binary;
bin.canonicalize()?;
let blob = env.init_data;
use crate::mem::exe::ExeInfo;
let exe_info = match bin {
GuestBinary::FilePath(bin_path_str) => ExeInfo::from_file(&bin_path_str)?,
GuestBinary::Buffer(buffer) => ExeInfo::from_buf(buffer)?,
};
let host_version = env!("CARGO_PKG_VERSION");
if let Some(v) = exe_info.guest_bin_version()
&& v != host_version
{
return Err(crate::HyperlightError::GuestBinVersionMismatch {
guest_bin_version: v.to_string(),
host_version: host_version.to_string(),
});
}
let guest_blob_size = blob.as_ref().map(|b| b.data.len()).unwrap_or(0);
let guest_blob_mem_flags = blob.as_ref().map(|b| b.permissions);
#[cfg_attr(feature = "nanvix-unstable", allow(unused_mut))]
let mut layout = crate::mem::layout::SandboxMemoryLayout::new(
cfg,
exe_info.loaded_size(),
guest_blob_size,
guest_blob_mem_flags,
)?;
let load_addr = layout.get_guest_code_address() as u64;
let entrypoint_offset: u64 = exe_info.entrypoint().into();
let mut memory = vec![0; layout.get_memory_size()?];
let load_info = exe_info.load(
load_addr.try_into()?,
&mut memory[layout.get_guest_code_offset()..],
)?;
layout.write_peb(&mut memory)?;
blob.map(|x| layout.write_init_data(&mut memory, x.data))
.transpose()?;
#[cfg(not(feature = "nanvix-unstable"))]
{
let pt_buf = GuestPageTableBuffer::new(layout.get_pt_base_gpa() as usize);
use crate::mem::memory_region::{GuestMemoryRegion, MemoryRegionFlags};
for rgn in layout.get_memory_regions_::<GuestMemoryRegion>(())?.iter() {
let readable = rgn.flags.contains(MemoryRegionFlags::READ);
let executable = rgn.flags.contains(MemoryRegionFlags::EXECUTE);
let writable = rgn.flags.contains(MemoryRegionFlags::WRITE);
let kind = if writable {
MappingKind::Cow(CowMapping {
readable,
executable,
})
} else {
MappingKind::Basic(BasicMapping {
readable,
writable: false,
executable,
})
};
let mapping = Mapping {
phys_base: rgn.guest_region.start as u64,
virt_base: rgn.guest_region.start as u64,
len: rgn.guest_region.len() as u64,
kind,
};
unsafe { vmem::map(&pt_buf, mapping) };
}
map_specials(&pt_buf, layout.get_scratch_size());
let pt_bytes = pt_buf.into_bytes();
layout.set_pt_size(pt_bytes.len())?;
memory.extend(&pt_bytes);
};
let exn_stack_top_gva = hyperlight_common::layout::MAX_GVA as u64
- hyperlight_common::layout::SCRATCH_TOP_EXN_STACK_OFFSET
+ 1;
let extra_regions = Vec::new();
let hash = hash(&memory, &extra_regions)?;
Ok(Self {
sandbox_id: SANDBOX_CONFIGURATION_COUNTER.fetch_add(1, Ordering::Relaxed),
memory: ReadonlySharedMemory::from_bytes(&memory)?,
layout,
regions: extra_regions,
load_info,
hash,
stack_top_gva: exn_stack_top_gva,
sregs: None,
entrypoint: NextAction::Initialise(load_addr + entrypoint_offset),
})
}
#[allow(clippy::too_many_arguments)]
#[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
pub(crate) fn new<S: SharedMemory>(
shared_mem: &mut SnapshotSharedMemory<S>,
scratch_mem: &mut S,
sandbox_id: u64,
mut layout: SandboxMemoryLayout,
load_info: LoadInfo,
regions: Vec<MemoryRegion>,
root_pt_gpa: u64,
stack_top_gva: u64,
sregs: CommonSpecialRegisters,
entrypoint: NextAction,
) -> Result<Self> {
use std::collections::HashMap;
let mut phys_seen = HashMap::<u64, usize>::new();
let memory = shared_mem.with_contents(|snap_c| {
scratch_mem.with_contents(|scratch_c| {
let live_pages =
filtered_mappings(snap_c, scratch_c, ®ions, layout, root_pt_gpa);
let pt_buf = GuestPageTableBuffer::new(layout.get_pt_base_gpa() as usize);
let mut snapshot_memory: Vec<u8> = Vec::new();
for (mapping, contents) in live_pages {
let kind = match mapping.kind {
MappingKind::Cow(cm) => MappingKind::Cow(cm),
MappingKind::Basic(bm) if bm.writable => MappingKind::Cow(CowMapping {
readable: bm.readable,
executable: bm.executable,
}),
MappingKind::Basic(bm) => MappingKind::Basic(BasicMapping {
readable: bm.readable,
writable: false,
executable: bm.executable,
}),
MappingKind::Unmapped => continue,
};
let new_gpa = phys_seen.entry(mapping.phys_base).or_insert_with(|| {
let new_offset = snapshot_memory.len();
snapshot_memory.extend(contents);
new_offset + SandboxMemoryLayout::BASE_ADDRESS
});
let mapping = Mapping {
phys_base: *new_gpa as u64,
virt_base: mapping.virt_base,
len: PAGE_SIZE as u64,
kind,
};
unsafe { vmem::map(&pt_buf, mapping) };
}
map_specials(&pt_buf, layout.get_scratch_size());
let pt_bytes = pt_buf.into_bytes();
layout.set_pt_size(pt_bytes.len())?;
snapshot_memory.extend(&pt_bytes);
Ok::<Vec<u8>, crate::HyperlightError>(snapshot_memory)
})
})???;
layout.set_snapshot_size(memory.len());
let regions = Vec::new();
let hash = hash(&memory, ®ions)?;
Ok(Self {
sandbox_id,
layout,
memory: ReadonlySharedMemory::from_bytes(&memory)?,
regions,
load_info,
hash,
stack_top_gva,
sregs: Some(sregs),
entrypoint,
})
}
pub(crate) fn sandbox_id(&self) -> u64 {
self.sandbox_id
}
pub(crate) fn regions(&self) -> &[MemoryRegion] {
&self.regions
}
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(crate) fn memory(&self) -> &ReadonlySharedMemory {
&self.memory
}
pub(crate) fn load_info(&self) -> LoadInfo {
self.load_info.clone()
}
pub(crate) fn layout(&self) -> &crate::mem::layout::SandboxMemoryLayout {
&self.layout
}
pub(crate) fn root_pt_gpa(&self) -> u64 {
self.layout.get_pt_base_gpa()
}
pub(crate) fn stack_top_gva(&self) -> u64 {
self.stack_top_gva
}
pub(crate) fn sregs(&self) -> Option<&CommonSpecialRegisters> {
self.sregs.as_ref()
}
pub(crate) fn entrypoint(&self) -> NextAction {
self.entrypoint
}
}
impl PartialEq for Snapshot {
fn eq(&self, other: &Snapshot) -> bool {
self.hash == other.hash
}
}
#[cfg(test)]
mod tests {
use hyperlight_common::vmem::{self, BasicMapping, Mapping, MappingKind, PAGE_SIZE};
use crate::hypervisor::regs::CommonSpecialRegisters;
use crate::mem::exe::LoadInfo;
use crate::mem::layout::SandboxMemoryLayout;
use crate::mem::mgr::{GuestPageTableBuffer, SandboxMemoryManager, SnapshotSharedMemory};
use crate::mem::shared_mem::{
ExclusiveSharedMemory, HostSharedMemory, ReadonlySharedMemory, SharedMemory,
};
fn default_sregs() -> CommonSpecialRegisters {
CommonSpecialRegisters::default()
}
const SIMPLE_PT_BASE: usize = PAGE_SIZE + SandboxMemoryLayout::BASE_ADDRESS;
fn make_simple_pt_mem(contents: &[u8]) -> SnapshotSharedMemory<ExclusiveSharedMemory> {
let pt_buf = GuestPageTableBuffer::new(SIMPLE_PT_BASE);
let mapping = Mapping {
phys_base: SandboxMemoryLayout::BASE_ADDRESS as u64,
virt_base: SandboxMemoryLayout::BASE_ADDRESS as u64,
len: PAGE_SIZE as u64,
kind: MappingKind::Basic(BasicMapping {
readable: true,
writable: true,
executable: true,
}),
};
unsafe { vmem::map(&pt_buf, mapping) };
super::map_specials(&pt_buf, PAGE_SIZE);
let pt_bytes = pt_buf.into_bytes();
let mut snapshot_mem = vec![0u8; PAGE_SIZE + pt_bytes.len()];
snapshot_mem[0..PAGE_SIZE].copy_from_slice(contents);
snapshot_mem[PAGE_SIZE..].copy_from_slice(&pt_bytes);
ReadonlySharedMemory::from_bytes(&snapshot_mem)
.unwrap()
.to_mgr_snapshot_mem()
.unwrap()
}
fn make_simple_pt_mgr() -> (SandboxMemoryManager<HostSharedMemory>, u64) {
let cfg = crate::sandbox::SandboxConfiguration::default();
let scratch_mem = ExclusiveSharedMemory::new(cfg.get_scratch_size()).unwrap();
let mgr = SandboxMemoryManager::new(
SandboxMemoryLayout::new(cfg, 4096, 0x3000, None).unwrap(),
make_simple_pt_mem(&[0u8; PAGE_SIZE]),
scratch_mem,
super::NextAction::None,
);
let (mgr, _) = mgr.build().unwrap();
(mgr, SIMPLE_PT_BASE as u64)
}
#[test]
fn multiple_snapshots_independent() {
let (mut mgr, pt_base) = make_simple_pt_mgr();
let pattern_a = vec![0xAA; PAGE_SIZE];
let snapshot_a = super::Snapshot::new(
&mut make_simple_pt_mem(&pattern_a).build().0,
&mut mgr.scratch_mem,
1,
mgr.layout,
LoadInfo::dummy(),
Vec::new(),
pt_base,
0,
default_sregs(),
super::NextAction::None,
)
.unwrap();
let pattern_b = vec![0xBB; PAGE_SIZE];
let snapshot_b = super::Snapshot::new(
&mut make_simple_pt_mem(&pattern_b).build().0,
&mut mgr.scratch_mem,
2,
mgr.layout,
LoadInfo::dummy(),
Vec::new(),
pt_base,
0,
default_sregs(),
super::NextAction::None,
)
.unwrap();
mgr.restore_snapshot(&snapshot_a).unwrap();
mgr.shared_mem
.with_contents(|contents| assert_eq!(&contents[0..pattern_a.len()], &pattern_a[..]))
.unwrap();
mgr.restore_snapshot(&snapshot_b).unwrap();
mgr.shared_mem
.with_contents(|contents| assert_eq!(&contents[0..pattern_b.len()], &pattern_b[..]))
.unwrap();
}
}