mod backtrace;
#[cfg(feature = "coredump")]
#[path = "traphandlers/coredump_enabled.rs"]
mod coredump;
#[cfg(not(feature = "coredump"))]
#[path = "traphandlers/coredump_disabled.rs"]
mod coredump;
use crate::sys::traphandlers;
use crate::{Instance, VMContext, VMRuntimeLimits};
use anyhow::Error;
use std::cell::{Cell, UnsafeCell};
use std::mem::MaybeUninit;
use std::ptr;
use std::sync::Once;
pub use self::backtrace::{Backtrace, Frame};
pub use self::coredump::CoreDumpStack;
pub use self::tls::{tls_eager_initialize, AsyncWasmCallState, PreviousAsyncWasmCallState};
pub use traphandlers::SignalHandler;
pub(crate) static mut GET_WASM_TRAP: fn(usize) -> Option<wasmtime_environ::Trap> = |_| None;
pub fn init_traps(
get_wasm_trap: fn(usize) -> Option<wasmtime_environ::Trap>,
macos_use_mach_ports: bool,
) {
static INIT: Once = Once::new();
INIT.call_once(|| unsafe {
GET_WASM_TRAP = get_wasm_trap;
traphandlers::platform_init(macos_use_mach_ports);
});
#[cfg(target_os = "macos")]
assert_eq!(
traphandlers::using_mach_ports(),
macos_use_mach_ports,
"cannot configure two different methods of signal handling in the same process"
);
}
fn lazy_per_thread_init() {
traphandlers::lazy_per_thread_init();
}
pub unsafe fn raise_trap(reason: TrapReason) -> ! {
tls::with(|info| info.unwrap().unwind_with(UnwindReason::Trap(reason)))
}
pub unsafe fn raise_user_trap(error: Error, needs_backtrace: bool) -> ! {
raise_trap(TrapReason::User {
error,
needs_backtrace,
})
}
pub unsafe fn raise_lib_trap(trap: wasmtime_environ::Trap) -> ! {
raise_trap(TrapReason::Wasm(trap))
}
pub unsafe fn catch_unwind_and_longjmp<R>(f: impl FnOnce() -> R) -> R {
#[cfg(panic = "unwind")]
{
match std::panic::catch_unwind(std::panic::AssertUnwindSafe(f)) {
Ok(ret) => ret,
Err(err) => tls::with(|info| info.unwrap().unwind_with(UnwindReason::Panic(err))),
}
}
#[cfg(not(panic = "unwind"))]
{
f()
}
}
#[derive(Debug)]
pub struct Trap {
pub reason: TrapReason,
pub backtrace: Option<Backtrace>,
pub coredumpstack: Option<CoreDumpStack>,
}
#[derive(Debug)]
pub enum TrapReason {
User {
error: Error,
needs_backtrace: bool,
},
Jit {
pc: usize,
faulting_addr: Option<usize>,
trap: wasmtime_environ::Trap,
},
Wasm(wasmtime_environ::Trap),
}
impl TrapReason {
pub fn user_without_backtrace(error: Error) -> Self {
TrapReason::User {
error,
needs_backtrace: true,
}
}
pub fn user_with_backtrace(error: Error) -> Self {
TrapReason::User {
error,
needs_backtrace: false,
}
}
pub fn is_jit(&self) -> bool {
matches!(self, TrapReason::Jit { .. })
}
}
impl From<Error> for TrapReason {
fn from(err: Error) -> Self {
TrapReason::user_without_backtrace(err)
}
}
impl From<wasmtime_environ::Trap> for TrapReason {
fn from(code: wasmtime_environ::Trap) -> Self {
TrapReason::Wasm(code)
}
}
pub(crate) enum TrapTest {
NotWasm,
HandledByEmbedder,
#[cfg_attr(miri, allow(dead_code))]
Trap {
jmp_buf: *const u8,
trap: wasmtime_environ::Trap,
},
}
pub unsafe fn catch_traps<'a, F>(
signal_handler: Option<*const SignalHandler<'static>>,
capture_backtrace: bool,
capture_coredump: bool,
caller: *mut VMContext,
mut closure: F,
) -> Result<(), Box<Trap>>
where
F: FnMut(*mut VMContext),
{
let limits = Instance::from_vmctx(caller, |i| i.runtime_limits());
let result = CallThreadState::new(signal_handler, capture_backtrace, capture_coredump, *limits)
.with(|cx| {
traphandlers::wasmtime_setjmp(
cx.jmp_buf.as_ptr(),
call_closure::<F>,
&mut closure as *mut F as *mut u8,
caller,
)
});
return match result {
Ok(x) => Ok(x),
Err((UnwindReason::Trap(reason), backtrace, coredumpstack)) => Err(Box::new(Trap {
reason,
backtrace,
coredumpstack,
})),
#[cfg(panic = "unwind")]
Err((UnwindReason::Panic(panic), _, _)) => std::panic::resume_unwind(panic),
};
extern "C" fn call_closure<F>(payload: *mut u8, caller: *mut VMContext)
where
F: FnMut(*mut VMContext),
{
unsafe { (*(payload as *mut F))(caller) }
}
}
mod call_thread_state {
use super::*;
pub struct CallThreadState {
pub(super) unwind:
UnsafeCell<MaybeUninit<(UnwindReason, Option<Backtrace>, Option<CoreDumpStack>)>>,
pub(super) jmp_buf: Cell<*const u8>,
pub(super) signal_handler: Option<*const SignalHandler<'static>>,
pub(super) capture_backtrace: bool,
#[cfg(feature = "coredump")]
pub(super) capture_coredump: bool,
pub(crate) limits: *const VMRuntimeLimits,
pub(super) prev: Cell<tls::Ptr>,
old_last_wasm_exit_fp: Cell<usize>,
old_last_wasm_exit_pc: Cell<usize>,
old_last_wasm_entry_sp: Cell<usize>,
}
impl Drop for CallThreadState {
fn drop(&mut self) {
unsafe {
*(*self.limits).last_wasm_exit_fp.get() = self.old_last_wasm_exit_fp.get();
*(*self.limits).last_wasm_exit_pc.get() = self.old_last_wasm_exit_pc.get();
*(*self.limits).last_wasm_entry_sp.get() = self.old_last_wasm_entry_sp.get();
}
}
}
impl CallThreadState {
#[inline]
pub(super) fn new(
signal_handler: Option<*const SignalHandler<'static>>,
capture_backtrace: bool,
capture_coredump: bool,
limits: *const VMRuntimeLimits,
) -> CallThreadState {
let _ = capture_coredump;
CallThreadState {
unwind: UnsafeCell::new(MaybeUninit::uninit()),
jmp_buf: Cell::new(ptr::null()),
signal_handler,
capture_backtrace,
#[cfg(feature = "coredump")]
capture_coredump,
limits,
prev: Cell::new(ptr::null()),
old_last_wasm_exit_fp: Cell::new(unsafe { *(*limits).last_wasm_exit_fp.get() }),
old_last_wasm_exit_pc: Cell::new(unsafe { *(*limits).last_wasm_exit_pc.get() }),
old_last_wasm_entry_sp: Cell::new(unsafe { *(*limits).last_wasm_entry_sp.get() }),
}
}
pub fn old_last_wasm_exit_fp(&self) -> usize {
self.old_last_wasm_exit_fp.get()
}
pub fn old_last_wasm_exit_pc(&self) -> usize {
self.old_last_wasm_exit_pc.get()
}
pub fn old_last_wasm_entry_sp(&self) -> usize {
self.old_last_wasm_entry_sp.get()
}
pub fn prev(&self) -> tls::Ptr {
self.prev.get()
}
#[inline]
pub(crate) unsafe fn push(&self) {
assert!(self.prev.get().is_null());
self.prev.set(tls::raw::replace(self));
}
#[inline]
pub(crate) unsafe fn pop(&self) {
let prev = self.prev.replace(ptr::null());
let head = tls::raw::replace(prev);
assert!(std::ptr::eq(head, self));
}
}
}
pub use call_thread_state::*;
enum UnwindReason {
#[cfg(panic = "unwind")]
Panic(Box<dyn std::any::Any + Send>),
Trap(TrapReason),
}
impl CallThreadState {
#[inline]
fn with(
mut self,
closure: impl FnOnce(&CallThreadState) -> i32,
) -> Result<(), (UnwindReason, Option<Backtrace>, Option<CoreDumpStack>)> {
let ret = tls::set(&mut self, |me| closure(me));
if ret != 0 {
Ok(())
} else {
Err(unsafe { self.read_unwind() })
}
}
#[cold]
unsafe fn read_unwind(&self) -> (UnwindReason, Option<Backtrace>, Option<CoreDumpStack>) {
(*self.unwind.get()).as_ptr().read()
}
fn unwind_with(&self, reason: UnwindReason) -> ! {
let (backtrace, coredump) = match reason {
#[cfg(panic = "unwind")]
UnwindReason::Panic(_) => (None, None),
UnwindReason::Trap(TrapReason::User {
needs_backtrace: false,
..
}) => (None, None),
UnwindReason::Trap(_) => (
self.capture_backtrace(self.limits, None),
self.capture_coredump(self.limits, None),
),
};
unsafe {
(*self.unwind.get())
.as_mut_ptr()
.write((reason, backtrace, coredump));
traphandlers::wasmtime_longjmp(self.jmp_buf.get());
}
}
#[cfg_attr(miri, allow(dead_code))] pub(crate) fn test_if_trap(
&self,
pc: *const u8,
call_handler: impl Fn(&SignalHandler) -> bool,
) -> TrapTest {
if self.jmp_buf.get().is_null() {
return TrapTest::NotWasm;
}
if let Some(handler) = self.signal_handler {
if unsafe { call_handler(&*handler) } {
return TrapTest::HandledByEmbedder;
}
}
let trap = match unsafe { GET_WASM_TRAP(pc as usize) } {
Some(trap) => trap,
None => return TrapTest::NotWasm,
};
TrapTest::Trap {
jmp_buf: self.take_jmp_buf(),
trap,
}
}
pub(crate) fn take_jmp_buf(&self) -> *const u8 {
self.jmp_buf.replace(ptr::null())
}
#[cfg_attr(miri, allow(dead_code))] pub(crate) fn set_jit_trap(
&self,
pc: *const u8,
fp: usize,
faulting_addr: Option<usize>,
trap: wasmtime_environ::Trap,
) {
let backtrace = self.capture_backtrace(self.limits, Some((pc as usize, fp)));
let coredump = self.capture_coredump(self.limits, Some((pc as usize, fp)));
unsafe {
(*self.unwind.get()).as_mut_ptr().write((
UnwindReason::Trap(TrapReason::Jit {
pc: pc as usize,
faulting_addr,
trap,
}),
backtrace,
coredump,
));
}
}
fn capture_backtrace(
&self,
limits: *const VMRuntimeLimits,
trap_pc_and_fp: Option<(usize, usize)>,
) -> Option<Backtrace> {
if !self.capture_backtrace {
return None;
}
Some(unsafe { Backtrace::new_with_trap_state(limits, self, trap_pc_and_fp) })
}
pub(crate) fn iter<'a>(&'a self) -> impl Iterator<Item = &Self> + 'a {
let mut state = Some(self);
std::iter::from_fn(move || {
let this = state?;
state = unsafe { this.prev().as_ref() };
Some(this)
})
}
}
pub(crate) mod tls {
use super::CallThreadState;
use std::mem;
use std::ops::Range;
pub use raw::Ptr;
pub(super) mod raw {
use super::CallThreadState;
use std::cell::Cell;
use std::ptr;
pub type Ptr = *const CallThreadState;
thread_local!(static PTR: Cell<(Ptr, bool)> = const { Cell::new((ptr::null(), false)) });
#[cfg_attr(feature = "async", inline(never))] #[cfg_attr(not(feature = "async"), inline)]
pub fn replace(val: Ptr) -> Ptr {
PTR.with(|p| {
let (prev, initialized) = p.get();
if !initialized {
super::super::lazy_per_thread_init();
}
p.set((val, true));
prev
})
}
#[cfg_attr(feature = "async", inline(never))] #[cfg_attr(not(feature = "async"), inline)]
pub fn initialize() {
PTR.with(|p| {
let (state, initialized) = p.get();
if initialized {
return;
}
super::super::lazy_per_thread_init();
p.set((state, true));
})
}
#[cfg_attr(feature = "async", inline(never))] #[cfg_attr(not(feature = "async"), inline)]
pub fn get() -> Ptr {
PTR.with(|p| p.get().0)
}
}
pub use raw::initialize as tls_eager_initialize;
pub struct AsyncWasmCallState {
state: raw::Ptr,
}
impl AsyncWasmCallState {
pub fn new() -> AsyncWasmCallState {
AsyncWasmCallState {
state: std::ptr::null_mut(),
}
}
pub unsafe fn push(self) -> PreviousAsyncWasmCallState {
let ret = PreviousAsyncWasmCallState { state: raw::get() };
let mut ptr = self.state;
while let Some(state) = ptr.as_ref() {
ptr = state.prev.replace(std::ptr::null_mut());
state.push();
}
ret
}
pub fn assert_null(&self) {
assert!(self.state.is_null());
}
pub fn assert_current_state_not_in_range(range: Range<usize>) {
let p = raw::get() as usize;
assert!(p < range.start || range.end < p);
}
}
pub struct PreviousAsyncWasmCallState {
state: raw::Ptr,
}
impl PreviousAsyncWasmCallState {
pub unsafe fn restore(self) -> AsyncWasmCallState {
let thread_head = self.state;
mem::forget(self);
let mut ret = AsyncWasmCallState::new();
loop {
let ptr = raw::get();
if ptr == thread_head {
break ret;
}
(*ptr).pop();
if let Some(state) = ret.state.as_ref() {
(*ptr).prev.set(state);
}
ret.state = ptr;
}
}
}
impl Drop for PreviousAsyncWasmCallState {
fn drop(&mut self) {
panic!("must be consumed with `restore`");
}
}
#[inline]
pub fn set<R>(state: &mut CallThreadState, closure: impl FnOnce(&CallThreadState) -> R) -> R {
struct Reset<'a> {
state: &'a CallThreadState,
}
impl Drop for Reset<'_> {
#[inline]
fn drop(&mut self) {
unsafe {
self.state.pop();
}
}
}
unsafe {
state.push();
let reset = Reset { state };
closure(reset.state)
}
}
pub fn with<R>(closure: impl FnOnce(Option<&CallThreadState>) -> R) -> R {
let p = raw::get();
unsafe { closure(if p.is_null() { None } else { Some(&*p) }) }
}
}