use crate::abi::GuestMemoryConfig;
use crate::utils::align_to_next_page_usize;
use core::cell::UnsafeCell;
use core::sync::atomic::{AtomicU32, AtomicU64};
macro_rules! define_address_table {
($($name:ident: $type:ty,)+) => {
#[repr(C)]
pub struct AddressTableRaw {
$(pub $name: $type),+
}
#[derive(Copy, Clone)]
#[repr(packed)]
pub struct AddressTablePacked {
$(pub $name: u64),+
}
#[derive(Copy, Clone)]
pub struct AddressTable {
$(pub $name: u64),+
}
impl AddressTable {
#[inline]
pub fn from_raw(table: AddressTableRaw) -> Self {
Self {
$(
$name: table.$name as u64
),+
}
}
pub const fn from_packed(table: &AddressTablePacked) -> Self {
Self {
$(
$name: table.$name
),+
}
}
}
static_assert!(core::mem::size_of::<AddressTableRaw>() == core::mem::size_of::<AddressTablePacked>());
static_assert!(core::mem::size_of::<AddressTableRaw>() == core::mem::size_of::<AddressTable>());
}
}
define_address_table! {
syscall_hostcall: unsafe extern "C" fn(u32),
syscall_trap: unsafe extern "C" fn() -> !,
syscall_return: unsafe extern "C" fn() -> !,
syscall_trace: unsafe extern "C" fn(u32, u64),
}
pub const VM_ADDR_NATIVE_CODE: u64 = 0x100000000;
pub const VM_ADDR_JUMP_TABLE: u64 = 0x800000000;
pub const VM_ADDR_JUMP_TABLE_RETURN_TO_HOST: u64 = VM_ADDR_JUMP_TABLE + ((crate::abi::VM_ADDR_RETURN_TO_HOST as u64) << 3);
pub const HOSTCALL_ABORT_EXECUTION: u32 = !0;
pub const SANDBOX_EMPTY_NTH_INSTRUCTION: u32 = !0;
pub const SANDBOX_EMPTY_NATIVE_PROGRAM_COUNTER: u64 = 0;
pub const VM_ADDR_VMCTX: u64 = 0x400000000;
pub const VM_ADDR_SIGSTACK: u64 = 0x500000000;
pub const VM_ADDR_NATIVE_STACK_LOW: u64 = 0x600000000;
pub const VM_ADDR_NATIVE_STACK_SIZE: u64 = 0x4000;
pub const VM_ADDR_NATIVE_STACK_HIGH: u64 = VM_ADDR_NATIVE_STACK_LOW + VM_ADDR_NATIVE_STACK_SIZE;
pub const VM_COMPILER_MAXIMUM_INSTRUCTION_LENGTH: u32 = 53;
pub const VM_COMPILER_MAXIMUM_EPILOGUE_LENGTH: u32 = 1024 * 1024;
const VM_SANDBOX_MAXIMUM_JUMP_TABLE_SIZE: u64 = (crate::abi::VM_MAXIMUM_INSTRUCTION_COUNT as u64 + 1)
* core::mem::size_of::<u64>() as u64
* crate::abi::VM_CODE_ADDRESS_ALIGNMENT as u64;
pub const VM_SANDBOX_MAXIMUM_JUMP_TABLE_VIRTUAL_SIZE: u64 = 0x100000000 * core::mem::size_of::<u64>() as u64;
pub const VM_SANDBOX_MAXIMUM_NATIVE_CODE_SIZE: u32 = 512 * 1024 * 1024 - 1;
#[derive(Copy, Clone, PartialEq, Eq)]
#[repr(C)]
pub struct SandboxMemoryConfig {
guest_config: GuestMemoryConfig,
code_size: u32,
jump_table_size: u32,
}
impl core::ops::Deref for SandboxMemoryConfig {
type Target = GuestMemoryConfig;
#[inline]
fn deref(&self) -> &Self::Target {
&self.guest_config
}
}
impl core::ops::DerefMut for SandboxMemoryConfig {
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.guest_config
}
}
impl SandboxMemoryConfig {
#[inline]
pub const fn empty() -> Self {
Self {
guest_config: GuestMemoryConfig::empty(),
code_size: 0,
jump_table_size: 0,
}
}
#[inline]
pub fn set_guest_config(&mut self, guest_config: GuestMemoryConfig) {
self.guest_config = guest_config;
}
#[inline]
pub const fn code_size(&self) -> usize {
self.code_size as usize
}
#[inline]
pub fn clear_code_size(&mut self) {
self.code_size = 0;
}
pub fn set_code_size(&mut self, native_page_size: usize, code_size: usize) -> Result<(), &'static str> {
if code_size > VM_SANDBOX_MAXIMUM_NATIVE_CODE_SIZE as usize {
return Err("size of the native code exceeded the maximum code size");
}
let code_size = match align_to_next_page_usize(native_page_size, code_size) {
Some(value) => value,
None => unreachable!(),
};
self.code_size = code_size as u32;
Ok(())
}
#[inline]
pub const fn jump_table_size(&self) -> usize {
self.jump_table_size as usize
}
#[inline]
pub fn clear_jump_table_size(&mut self) {
self.jump_table_size = 0;
}
pub fn set_jump_table_size(&mut self, native_page_size: usize, jump_table_size: usize) -> Result<(), &'static str> {
if jump_table_size > VM_SANDBOX_MAXIMUM_JUMP_TABLE_SIZE as usize {
return Err("size of the jump table exceeded te maximum size");
}
let jump_table_size = match align_to_next_page_usize(native_page_size, jump_table_size) {
Some(value) => value,
None => unreachable!(),
};
self.jump_table_size = jump_table_size as u32;
Ok(())
}
}
pub const VM_RPC_FLAG_RECONFIGURE: u32 = 1 << 0;
pub const VM_RPC_FLAG_RESET_MEMORY_AFTER_EXECUTION: u32 = 1 << 1;
pub const VM_RPC_FLAG_CLEAR_PROGRAM_AFTER_EXECUTION: u32 = 1 << 2;
#[repr(C)]
pub struct VmInit {
pub stack_address: AtomicU64,
pub stack_length: AtomicU64,
pub vdso_address: AtomicU64,
pub vdso_length: AtomicU64,
pub vvar_address: AtomicU64,
pub vvar_length: AtomicU64,
}
const MESSAGE_BUFFER_SIZE: usize = 512;
#[repr(align(64))]
pub struct CacheAligned<T>(pub T);
impl<T> core::ops::Deref for CacheAligned<T> {
type Target = T;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<T> core::ops::DerefMut for CacheAligned<T> {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
const REG_COUNT: usize = crate::program::Reg::ALL.len();
#[repr(C)]
pub struct VmCtxSyscall {
pub gas: UnsafeCell<i64>,
pub hostcall: UnsafeCell<u32>,
pub regs: UnsafeCell<[u32; REG_COUNT]>,
pub nth_instruction: UnsafeCell<u32>,
pub rip: UnsafeCell<u64>,
}
#[repr(C)]
pub struct VmCtxCounters {
pub syscall_wait_loop_start: UnsafeCell<u64>,
pub syscall_futex_wait: UnsafeCell<u64>,
}
#[repr(C)]
pub struct VmCtx {
syscall_ffi: CacheAligned<VmCtxSyscall>,
pub futex: CacheAligned<AtomicU32>,
pub rpc_address: UnsafeCell<u64>,
pub rpc_flags: UnsafeCell<u32>,
pub memory_config: UnsafeCell<SandboxMemoryConfig>,
pub new_memory_config: UnsafeCell<SandboxMemoryConfig>,
pub new_sysreturn_address: UnsafeCell<u64>,
pub counters: CacheAligned<VmCtxCounters>,
pub init: VmInit,
pub message_length: UnsafeCell<u32>,
pub message_buffer: UnsafeCell<[u8; MESSAGE_BUFFER_SIZE]>,
}
static_assert!(core::mem::size_of::<VmCtx>() <= 4096);
pub const VMCTX_FUTEX_BUSY: u32 = 0;
pub const VMCTX_FUTEX_INIT: u32 = 1;
pub const VMCTX_FUTEX_IDLE: u32 = 2;
pub const VMCTX_FUTEX_HOSTCALL: u32 = 3;
pub const VMCTX_FUTEX_TRAP: u32 = 4;
impl VmCtx {
pub const fn zeroed() -> Self {
VmCtx {
futex: CacheAligned(AtomicU32::new(VMCTX_FUTEX_BUSY)),
rpc_address: UnsafeCell::new(0),
rpc_flags: UnsafeCell::new(0),
memory_config: UnsafeCell::new(SandboxMemoryConfig::empty()),
new_memory_config: UnsafeCell::new(SandboxMemoryConfig::empty()),
new_sysreturn_address: UnsafeCell::new(0),
syscall_ffi: CacheAligned(VmCtxSyscall {
gas: UnsafeCell::new(0),
hostcall: UnsafeCell::new(0),
regs: UnsafeCell::new([0; REG_COUNT]),
rip: UnsafeCell::new(0),
nth_instruction: UnsafeCell::new(0),
}),
counters: CacheAligned(VmCtxCounters {
syscall_wait_loop_start: UnsafeCell::new(0),
syscall_futex_wait: UnsafeCell::new(0),
}),
init: VmInit {
stack_address: AtomicU64::new(0),
stack_length: AtomicU64::new(0),
vdso_address: AtomicU64::new(0),
vdso_length: AtomicU64::new(0),
vvar_address: AtomicU64::new(0),
vvar_length: AtomicU64::new(0),
},
message_length: UnsafeCell::new(0),
message_buffer: UnsafeCell::new([0; MESSAGE_BUFFER_SIZE]),
}
}
pub const fn new() -> Self {
let mut vmctx = Self::zeroed();
vmctx.syscall_ffi.0.nth_instruction = UnsafeCell::new(SANDBOX_EMPTY_NTH_INSTRUCTION);
vmctx
}
#[inline(always)]
pub const fn gas(&self) -> &UnsafeCell<i64> {
&self.syscall_ffi.0.gas
}
#[inline(always)]
pub const fn hostcall(&self) -> &UnsafeCell<u32> {
&self.syscall_ffi.0.hostcall
}
#[inline(always)]
pub const fn regs(&self) -> &UnsafeCell<[u32; REG_COUNT]> {
&self.syscall_ffi.0.regs
}
#[inline(always)]
pub const fn rip(&self) -> &UnsafeCell<u64> {
&self.syscall_ffi.0.rip
}
#[inline(always)]
pub const fn nth_instruction(&self) -> &UnsafeCell<u32> {
&self.syscall_ffi.0.nth_instruction
}
}
static_assert!(VM_ADDR_JUMP_TABLE_RETURN_TO_HOST > VM_ADDR_JUMP_TABLE);
static_assert!(VM_ADDR_JUMP_TABLE_RETURN_TO_HOST % 0x4000 == 0);
static_assert!(VM_SANDBOX_MAXIMUM_JUMP_TABLE_SIZE <= VM_SANDBOX_MAXIMUM_JUMP_TABLE_VIRTUAL_SIZE);
static_assert!(VM_ADDR_JUMP_TABLE + VM_SANDBOX_MAXIMUM_JUMP_TABLE_SIZE < VM_ADDR_JUMP_TABLE_RETURN_TO_HOST);
static_assert!(VM_ADDR_JUMP_TABLE_RETURN_TO_HOST < VM_ADDR_JUMP_TABLE + VM_SANDBOX_MAXIMUM_JUMP_TABLE_VIRTUAL_SIZE);
static_assert!(VM_ADDR_JUMP_TABLE.count_ones() == 1);
static_assert!((1 << VM_ADDR_JUMP_TABLE.trailing_zeros()) == VM_ADDR_JUMP_TABLE);
static_assert!(
VM_SANDBOX_MAXIMUM_NATIVE_CODE_SIZE
>= crate::abi::VM_MAXIMUM_INSTRUCTION_COUNT * VM_COMPILER_MAXIMUM_INSTRUCTION_LENGTH + VM_COMPILER_MAXIMUM_EPILOGUE_LENGTH
);
static_assert!(VM_ADDR_NATIVE_CODE > 0xffffffff);
static_assert!(VM_ADDR_VMCTX > 0xffffffff);
static_assert!(VM_ADDR_NATIVE_STACK_LOW > 0xffffffff);