use crate::runtime::vm::vmcontext::VMArrayCallNative;
use crate::runtime::vm::{
StoreBox, TrapRegisters, TrapTest, VMContext, VMOpaqueContext, f32x4, f64x2, i8x16, tls,
};
use crate::{Engine, ValRaw};
use core::marker;
use core::ptr::NonNull;
use pulley_interpreter::interp::{DoneReason, RegType, TrapKind, Val, Vm, XRegVal};
use pulley_interpreter::{FReg, Reg, XReg};
use wasmtime_environ::{BuiltinFunctionIndex, HostCall, Trap};
use wasmtime_unwinder::Unwind;
#[repr(transparent)]
pub struct Interpreter {
pulley: StoreBox<Vm>,
}
impl Interpreter {
pub fn new(engine: &Engine) -> Interpreter {
let ret = Interpreter {
pulley: StoreBox::new(Vm::with_stack(engine.config().max_wasm_stack)),
};
engine.profiler().register_interpreter(&ret);
ret
}
pub fn as_interpreter_ref(&mut self) -> InterpreterRef<'_> {
InterpreterRef {
vm: self.pulley.get(),
_phantom: marker::PhantomData,
}
}
pub fn pulley(&self) -> &Vm {
unsafe { self.pulley.get().as_ref() }
}
pub fn unwinder(&self) -> &'static dyn Unwind {
&UnwindPulley
}
}
#[repr(transparent)]
pub struct InterpreterRef<'a> {
vm: NonNull<Vm>,
_phantom: marker::PhantomData<&'a mut Vm>,
}
pub struct UnwindPulley;
unsafe impl Unwind for UnwindPulley {
fn next_older_fp_from_fp_offset(&self) -> usize {
0
}
fn next_older_sp_from_fp_offset(&self) -> usize {
if cfg!(target_pointer_width = "32") {
8
} else {
16
}
}
unsafe fn get_next_older_pc_from_fp(&self, fp: usize) -> usize {
*(fp as *mut usize).offset(1)
}
fn assert_fp_is_aligned(&self, fp: usize) {
let expected = if cfg!(target_pointer_width = "32") {
8
} else {
16
};
assert_eq!(fp % expected, 0, "stack should always be aligned");
}
}
#[derive(Clone, Copy)]
struct Setjmp {
xregs: [u64; 16],
fregs: [f64; 16],
fp: *mut u8,
lr: *mut u8,
}
impl InterpreterRef<'_> {
fn vm(&mut self) -> &mut Vm {
unsafe { self.vm.as_mut() }
}
pub unsafe fn call(
mut self,
mut bytecode: NonNull<u8>,
callee: NonNull<VMOpaqueContext>,
caller: NonNull<VMContext>,
args_and_results: NonNull<[ValRaw]>,
) -> bool {
let args = [
XRegVal::new_ptr(callee.as_ptr()).into(),
XRegVal::new_ptr(caller.as_ptr()).into(),
XRegVal::new_ptr(args_and_results.cast::<u8>().as_ptr()).into(),
XRegVal::new_u64(args_and_results.len() as u64).into(),
];
let mut vm = self.vm();
let setjmp = setjmp(vm);
let old_lr = vm.call_start(&args);
let ret = loop {
match vm.call_run(bytecode) {
DoneReason::ReturnToHost(()) => {
match vm.call_end(old_lr, [RegType::XReg]).next().unwrap() {
#[allow(
clippy::cast_possible_truncation,
reason = "intentionally reading the lower bits only"
)]
Val::XReg(xreg) => break (xreg.get_u32() as u8) != 0,
_ => unreachable!(),
}
}
DoneReason::CallIndirectHost { id, resume } => {
if u32::from(id) == HostCall::Builtin(BuiltinFunctionIndex::raise()).index() {
longjmp(vm, setjmp);
break false;
} else {
vm = self.call_indirect_host(id);
bytecode = resume;
}
}
DoneReason::Trap { pc, kind } => {
trap(vm, pc, kind, setjmp);
break false;
}
}
};
if cfg!(debug_assertions) {
for (i, reg) in callee_save_xregs() {
assert!(vm[reg].get_u64() == setjmp.xregs[i]);
}
for (i, reg) in callee_save_fregs() {
assert!(vm[reg].get_f64().to_bits() == setjmp.fregs[i].to_bits());
}
assert!(vm.fp() == setjmp.fp);
assert!(vm.lr() == setjmp.lr);
}
ret
}
#[allow(
clippy::cast_possible_truncation,
clippy::cast_sign_loss,
unused,
reason = "macro-generated code"
)]
#[cfg_attr(
not(feature = "component-model"),
expect(unused_macro_rules, reason = "macro-code")
)]
unsafe fn call_indirect_host(&mut self, id: u8) -> &mut Vm {
let id = u32::from(id);
let fnptr = self.vm()[XReg::x0].get_ptr();
let mut arg_reg = 1;
macro_rules! call {
(@builtin($($param:ident),*) $(-> $result:ident)?) => {{
#[allow(improper_ctypes_definitions)]
type T = unsafe extern "C" fn($(call!(@ty $param)),*) $(-> call!(@ty $result))?;
call!(@host T($($param),*) $(-> $result)?);
}};
(@host $ty:ident($($param:ident),*) $(-> $result:ident)?) => {{
let ret = {
let mut vm = self.vm();
union GetNative {
fnptr: *mut u8,
host: $ty,
}
let host = GetNative { fnptr }.host;
host($({
let reg = XReg::new(arg_reg).unwrap();
arg_reg += 1;
call!(@get $param vm[reg])
}),*)
};
let _ = arg_reg;
let vm = self.vm();
$(
call!(@set $result ret => vm[XReg::x0]);
)?
let _ = ret;
return vm;
}};
(@ty bool) => (bool);
(@ty u8) => (u8);
(@ty u32) => (u32);
(@ty i32) => (i32);
(@ty u64) => (u64);
(@ty i64) => (i64);
(@ty f32) => (f32);
(@ty f64) => (f64);
(@ty i8x16) => (i8x16);
(@ty f32x4) => (f32x4);
(@ty f64x2) => (f64x2);
(@ty vmctx) => (*mut VMContext);
(@ty pointer) => (*mut u8);
(@ty ptr_u8) => (*mut u8);
(@ty ptr_u16) => (*mut u16);
(@ty ptr_size) => (*mut usize);
(@ty size) => (usize);
(@get u8 $reg:expr) => ($reg.get_i32() as u8);
(@get u32 $reg:expr) => ($reg.get_u32());
(@get u64 $reg:expr) => ($reg.get_u64());
(@get f32 $reg:expr) => (unreachable::<f32, _>($reg));
(@get f64 $reg:expr) => (unreachable::<f64, _>($reg));
(@get i8x16 $reg:expr) => (unreachable::<i8x16, _>($reg));
(@get f32x4 $reg:expr) => (unreachable::<f32x4, _>($reg));
(@get f64x2 $reg:expr) => (unreachable::<f64x2, _>($reg));
(@get vmctx $reg:expr) => ($reg.get_ptr());
(@get pointer $reg:expr) => ($reg.get_ptr());
(@get ptr $reg:expr) => ($reg.get_ptr());
(@get nonnull $reg:expr) => (NonNull::new($reg.get_ptr()).unwrap());
(@get ptr_u8 $reg:expr) => ($reg.get_ptr());
(@get ptr_u16 $reg:expr) => ($reg.get_ptr());
(@get ptr_size $reg:expr) => ($reg.get_ptr());
(@get size $reg:expr) => ($reg.get_ptr::<u8>() as usize);
(@set bool $src:expr => $dst:expr) => ($dst.set_i32(i32::from($src)));
(@set u32 $src:expr => $dst:expr) => ($dst.set_u32($src));
(@set u64 $src:expr => $dst:expr) => ($dst.set_u64($src));
(@set f32 $src:expr => $dst:expr) => (unreachable::<f32, _>(($dst, $src)));
(@set f64 $src:expr => $dst:expr) => (unreachable::<f64, _>(($dst, $src)));
(@set i8x16 $src:expr => $dst:expr) => (unreachable::<i8x16, _>(($dst, $src)));
(@set f32x4 $src:expr => $dst:expr) => (unreachable::<f32x4, _>(($dst, $src)));
(@set f64x2 $src:expr => $dst:expr) => (unreachable::<f64x2, _>(($dst, $src)));
(@set pointer $src:expr => $dst:expr) => ($dst.set_ptr($src));
(@set size $src:expr => $dst:expr) => ($dst.set_ptr($src as *mut u8));
}
if id == const { HostCall::ArrayCall.index() } {
call!(@host VMArrayCallNative(nonnull, nonnull, nonnull, size) -> bool);
}
macro_rules! core {
(
$(
$( #[cfg($attr:meta)] )?
$name:ident($($pname:ident: $param:ident ),* ) $(-> $result:ident)?;
)*
) => {
$(
$( #[cfg($attr)] )?
if id == const { HostCall::Builtin(BuiltinFunctionIndex::$name()).index() } {
call!(@builtin($($param),*) $(-> $result)?);
}
)*
}
}
wasmtime_environ::foreach_builtin_function!(core);
#[cfg(feature = "component-model")]
{
use crate::runtime::vm::component::VMLoweringCallee;
use wasmtime_environ::component::ComponentBuiltinFunctionIndex;
if id == const { HostCall::ComponentLowerImport.index() } {
call!(@host VMLoweringCallee(nonnull, nonnull, u32, u32, nonnull, ptr, ptr, u8, u8, nonnull, size) -> bool);
}
macro_rules! component {
(
$(
$( #[cfg($attr:meta)] )?
$name:ident($($pname:ident: $param:ident ),* ) $(-> $result:ident)?;
)*
) => {
$(
$( #[cfg($attr)] )?
if id == const { HostCall::ComponentBuiltin(ComponentBuiltinFunctionIndex::$name()).index() } {
call!(@builtin($($param),*) $(-> $result)?);
}
)*
}
}
wasmtime_environ::foreach_builtin_component_function!(component);
}
return unreachable(());
fn unreachable<T, U>(_: U) -> T {
unreachable!()
}
}
}
fn trap(vm: &mut Vm, pc: NonNull<u8>, kind: Option<TrapKind>, setjmp: Setjmp) {
let regs = TrapRegisters {
pc: pc.as_ptr() as usize,
fp: vm.fp() as usize,
};
tls::with(|s| {
let s = s.unwrap();
match kind {
Some(kind) => {
let trap = match kind {
TrapKind::IntegerOverflow => Trap::IntegerOverflow,
TrapKind::DivideByZero => Trap::IntegerDivisionByZero,
TrapKind::BadConversionToInteger => Trap::BadConversionToInteger,
TrapKind::MemoryOutOfBounds => Trap::MemoryOutOfBounds,
TrapKind::DisabledOpcode => Trap::DisabledOpcode,
TrapKind::StackOverflow => Trap::StackOverflow,
};
s.set_jit_trap(regs, None, trap);
}
None => {
match s.test_if_trap(regs, None, |_| false) {
TrapTest::NotWasm => {
panic!("pulley trap at {pc:?} without trap code registered")
}
#[cfg(has_host_compiler_backend)]
TrapTest::HandledByEmbedder => unreachable!(),
TrapTest::Trap { .. } => {}
}
}
}
});
longjmp(vm, setjmp);
}
fn setjmp(vm: &Vm) -> Setjmp {
let mut xregs = [0; 16];
let mut fregs = [0.0; 16];
for (i, reg) in callee_save_xregs() {
xregs[i] = vm[reg].get_u64();
}
for (i, reg) in callee_save_fregs() {
fregs[i] = vm[reg].get_f64();
}
Setjmp {
xregs,
fregs,
fp: vm.fp(),
lr: vm.lr(),
}
}
fn longjmp(vm: &mut Vm, setjmp: Setjmp) {
let Setjmp {
xregs,
fregs,
fp,
lr,
} = setjmp;
unsafe {
for (i, reg) in callee_save_xregs() {
vm[reg].set_u64(xregs[i]);
}
for (i, reg) in callee_save_fregs() {
vm[reg].set_f64(fregs[i]);
}
vm.set_fp(fp);
vm.set_lr(lr);
}
}
fn callee_save_xregs() -> impl Iterator<Item = (usize, XReg)> {
(0..16).map(|i| (i.into(), XReg::new(i + 16).unwrap()))
}
fn callee_save_fregs() -> impl Iterator<Item = (usize, FReg)> {
(0..16).map(|i| (i.into(), FReg::new(i + 16).unwrap()))
}