use crate::prelude::*;
use crate::runtime::vm::table::{Table, TableElementType};
use crate::runtime::vm::vmcontext::VMFuncRef;
use crate::runtime::vm::{Instance, TrapReason, VMGcRef};
#[cfg(feature = "threads")]
use core::time::Duration;
use wasmtime_environ::Unsigned;
use wasmtime_environ::{DataIndex, ElemIndex, FuncIndex, MemoryIndex, TableIndex, Trap};
#[cfg(feature = "wmemcheck")]
use wasmtime_wmemcheck::AccessError::{
DoubleMalloc, InvalidFree, InvalidRead, InvalidWrite, OutOfBounds,
};
pub mod raw {
#![allow(unused_doc_comments, unused_attributes)]
use crate::runtime::vm::{Instance, TrapReason, VMContext};
macro_rules! libcall {
(
$(
$( #[cfg($attr:meta)] )?
$name:ident( vmctx: vmctx $(, $pname:ident: $param:ident )* ) $( -> $result:ident )?;
)*
) => {
$(
// This is the direct entrypoint from the compiled module which
// still has the raw signature.
//
// This will delegate to the outer module to the actual
// implementation and automatically perform `catch_unwind` along
// with conversion of the return value in the face of traps.
#[allow(unused_variables, missing_docs)]
pub unsafe extern "C" fn $name(
vmctx: *mut VMContext,
$( $pname : libcall!(@ty $param), )*
) $( -> libcall!(@ty $result))? {
$(#[cfg($attr)])?
{
let ret = crate::runtime::vm::traphandlers::catch_unwind_and_longjmp(|| {
Instance::from_vmctx(vmctx, |instance| {
{
super::$name(instance, $($pname),*)
}
})
});
LibcallResult::convert(ret)
}
$(
#[cfg(not($attr))]
unreachable!();
)?
}
#[allow(non_upper_case_globals)]
const _: () = {
#[used]
static I_AM_USED: unsafe extern "C" fn(
*mut VMContext,
$( $pname : libcall!(@ty $param), )*
) $( -> libcall!(@ty $result))? = $name;
};
)*
};
(@ty i32) => (u32);
(@ty i64) => (u64);
(@ty f64) => (f64);
(@ty u8) => (u8);
(@ty reference) => (u32);
(@ty pointer) => (*mut u8);
}
wasmtime_environ::foreach_builtin_function!(libcall);
trait LibcallResult {
type Abi;
unsafe fn convert(self) -> Self::Abi;
}
impl LibcallResult for () {
type Abi = ();
unsafe fn convert(self) {}
}
impl<T, E> LibcallResult for Result<T, E>
where
E: Into<TrapReason>,
{
type Abi = T;
unsafe fn convert(self) -> T {
match self {
Ok(t) => t,
Err(e) => crate::runtime::vm::traphandlers::raise_trap(e.into()),
}
}
}
impl LibcallResult for *mut u8 {
type Abi = *mut u8;
unsafe fn convert(self) -> *mut u8 {
self
}
}
}
fn memory32_grow(
instance: &mut Instance,
delta: u64,
memory_index: u32,
) -> Result<*mut u8, TrapReason> {
let memory_index = MemoryIndex::from_u32(memory_index);
let result =
match instance
.memory_grow(memory_index, delta)
.map_err(|error| TrapReason::User {
error,
needs_backtrace: true,
})? {
Some(size_in_bytes) => size_in_bytes / instance.memory_page_size(memory_index),
None => usize::max_value(),
};
Ok(result as *mut _)
}
unsafe fn table_grow_func_ref(
instance: &mut Instance,
table_index: u32,
delta: u64,
init_value: *mut u8,
) -> Result<*mut u8> {
let table_index = TableIndex::from_u32(table_index);
let element = match instance.table_element_type(table_index) {
TableElementType::Func => (init_value as *mut VMFuncRef).into(),
TableElementType::GcRef => unreachable!(),
};
let result = match instance.table_grow(table_index, delta, element)? {
Some(r) => r,
None => usize::MAX,
};
Ok(result as *mut _)
}
#[cfg(feature = "gc")]
unsafe fn table_grow_gc_ref(
instance: &mut Instance,
table_index: u32,
delta: u64,
init_value: u32,
) -> Result<*mut u8> {
let table_index = TableIndex::from_u32(table_index);
let element = match instance.table_element_type(table_index) {
TableElementType::Func => unreachable!(),
TableElementType::GcRef => VMGcRef::from_raw_u32(init_value)
.map(|r| (*instance.store()).unwrap_gc_store_mut().clone_gc_ref(&r))
.into(),
};
let result = match instance.table_grow(table_index, delta, element)? {
Some(r) => r,
None => usize::MAX,
};
Ok(result as *mut _)
}
unsafe fn table_fill_func_ref(
instance: &mut Instance,
table_index: u32,
dst: u64,
val: *mut u8,
len: u64,
) -> Result<(), Trap> {
let table_index = TableIndex::from_u32(table_index);
let table = &mut *instance.get_table(table_index);
match table.element_type() {
TableElementType::Func => {
let val = val.cast::<VMFuncRef>();
table.fill(
(*instance.store()).unwrap_gc_store_mut(),
dst,
val.into(),
len,
)
}
TableElementType::GcRef => unreachable!(),
}
}
#[cfg(feature = "gc")]
unsafe fn table_fill_gc_ref(
instance: &mut Instance,
table_index: u32,
dst: u64,
val: u32,
len: u64,
) -> Result<(), Trap> {
let table_index = TableIndex::from_u32(table_index);
let table = &mut *instance.get_table(table_index);
match table.element_type() {
TableElementType::Func => unreachable!(),
TableElementType::GcRef => {
let gc_store = (*instance.store()).unwrap_gc_store_mut();
let gc_ref = VMGcRef::from_raw_u32(val);
let gc_ref = gc_ref.map(|r| gc_store.clone_gc_ref(&r));
table.fill(gc_store, dst, gc_ref.into(), len)
}
}
}
unsafe fn table_copy(
instance: &mut Instance,
dst_table_index: u32,
src_table_index: u32,
dst: u64,
src: u64,
len: u64,
) -> Result<(), Trap> {
let dst_table_index = TableIndex::from_u32(dst_table_index);
let src_table_index = TableIndex::from_u32(src_table_index);
let dst_table = instance.get_table(dst_table_index);
let src_range = src..(src.checked_add(len).unwrap_or(u64::MAX));
let src_table = instance.get_table_with_lazy_init(src_table_index, src_range);
let gc_store = (*instance.store()).unwrap_gc_store_mut();
Table::copy(gc_store, dst_table, src_table, dst, src, len)
}
fn table_init(
instance: &mut Instance,
table_index: u32,
elem_index: u32,
dst: u64,
src: u64,
len: u64,
) -> Result<(), Trap> {
let table_index = TableIndex::from_u32(table_index);
let elem_index = ElemIndex::from_u32(elem_index);
instance.table_init(table_index, elem_index, dst, src, len)
}
fn elem_drop(instance: &mut Instance, elem_index: u32) {
let elem_index = ElemIndex::from_u32(elem_index);
instance.elem_drop(elem_index)
}
fn memory_copy(
instance: &mut Instance,
dst_index: u32,
dst: u64,
src_index: u32,
src: u64,
len: u64,
) -> Result<(), Trap> {
let src_index = MemoryIndex::from_u32(src_index);
let dst_index = MemoryIndex::from_u32(dst_index);
instance.memory_copy(dst_index, dst, src_index, src, len)
}
fn memory_fill(
instance: &mut Instance,
memory_index: u32,
dst: u64,
val: u32,
len: u64,
) -> Result<(), Trap> {
let memory_index = MemoryIndex::from_u32(memory_index);
#[allow(clippy::cast_possible_truncation)]
instance.memory_fill(memory_index, dst, val as u8, len)
}
fn memory_init(
instance: &mut Instance,
memory_index: u32,
data_index: u32,
dst: u64,
src: u32,
len: u32,
) -> Result<(), Trap> {
let memory_index = MemoryIndex::from_u32(memory_index);
let data_index = DataIndex::from_u32(data_index);
instance.memory_init(memory_index, data_index, dst, src, len)
}
fn ref_func(instance: &mut Instance, func_index: u32) -> *mut u8 {
instance
.get_func_ref(FuncIndex::from_u32(func_index))
.expect("ref_func: funcref should always be available for given func index")
.cast()
}
fn data_drop(instance: &mut Instance, data_index: u32) {
let data_index = DataIndex::from_u32(data_index);
instance.data_drop(data_index)
}
unsafe fn table_get_lazy_init_func_ref(
instance: &mut Instance,
table_index: u32,
index: u64,
) -> *mut u8 {
let table_index = TableIndex::from_u32(table_index);
let table = instance.get_table_with_lazy_init(table_index, core::iter::once(index));
let gc_store = (*instance.store()).unwrap_gc_store_mut();
let elem = (*table)
.get(gc_store, index)
.expect("table access already bounds-checked");
elem.into_func_ref_asserting_initialized().cast()
}
#[cfg(feature = "gc")]
unsafe fn drop_gc_ref(instance: &mut Instance, gc_ref: u32) {
log::trace!("libcalls::drop_gc_ref({gc_ref:#x})");
let gc_ref = VMGcRef::from_raw_u32(gc_ref).expect("non-null VMGcRef");
(*instance.store())
.unwrap_gc_store_mut()
.drop_gc_ref(gc_ref);
}
#[cfg(feature = "gc")]
unsafe fn gc(instance: &mut Instance, gc_ref: u32) -> Result<u32> {
let gc_ref = VMGcRef::from_raw_u32(gc_ref);
let gc_ref = gc_ref.map(|r| (*instance.store()).unwrap_gc_store_mut().clone_gc_ref(&r));
if let Some(gc_ref) = &gc_ref {
let gc_store = (*instance.store()).unwrap_gc_store_mut();
let gc_ref = gc_store.clone_gc_ref(gc_ref);
gc_store.expose_gc_ref_to_wasm(gc_ref);
}
match (*instance.store()).gc(gc_ref)? {
None => Ok(0),
Some(r) => {
let raw = r.as_raw_u32();
(*instance.store())
.unwrap_gc_store_mut()
.expose_gc_ref_to_wasm(r);
Ok(raw)
}
}
}
#[cfg(feature = "gc")]
unsafe fn gc_alloc_raw(
instance: &mut Instance,
kind: u32,
module_interned_type_index: u32,
size: u32,
align: u32,
) -> Result<u32> {
use crate::{vm::VMGcHeader, GcHeapOutOfMemory};
use core::alloc::Layout;
use wasmtime_environ::{ModuleInternedTypeIndex, VMGcKind};
let kind = VMGcKind::from_high_bits_of_u32(kind);
log::trace!("gc_alloc_raw(kind={kind:?}, size={size}, align={align})",);
let module = instance
.runtime_module()
.expect("should never allocate GC types defined in a dummy module");
let module_interned_type_index = ModuleInternedTypeIndex::from_u32(module_interned_type_index);
let shared_type_index = module
.signatures()
.shared_type(module_interned_type_index)
.expect("should have engine type index for module type index");
let header = VMGcHeader::from_kind_and_index(kind, shared_type_index);
let size = usize::try_from(size).unwrap();
let align = usize::try_from(align).unwrap();
let layout = Layout::from_size_align(size, align).unwrap();
let gc_ref = match (*instance.store())
.unwrap_gc_store_mut()
.alloc_raw(header, layout)?
{
Some(r) => r,
None => {
(*instance.store()).gc(None)?;
(*instance.store())
.unwrap_gc_store_mut()
.alloc_raw(header, layout)?
.ok_or_else(|| GcHeapOutOfMemory::new(()))
.err2anyhow()?
}
};
Ok(gc_ref.as_raw_u32())
}
#[cfg(feature = "gc")]
unsafe fn intern_func_ref_for_gc_heap(instance: &mut Instance, func_ref: *mut u8) -> Result<u32> {
use crate::{store::AutoAssertNoGc, vm::SendSyncPtr};
use core::ptr::NonNull;
let mut store = AutoAssertNoGc::new((*instance.store()).store_opaque_mut());
let func_ref = func_ref.cast::<VMFuncRef>();
let func_ref = NonNull::new(func_ref).map(SendSyncPtr::new);
let func_ref_id = store.unwrap_gc_store_mut().func_ref_table.intern(func_ref);
Ok(func_ref_id.into_raw())
}
#[cfg(feature = "gc")]
unsafe fn get_interned_func_ref(
instance: &mut Instance,
func_ref_id: u32,
module_interned_type_index: u32,
) -> *mut u8 {
use super::FuncRefTableId;
use crate::store::AutoAssertNoGc;
use wasmtime_environ::{packed_option::ReservedValue, ModuleInternedTypeIndex};
let store = AutoAssertNoGc::new((*instance.store()).store_opaque_mut());
let func_ref_id = FuncRefTableId::from_raw(func_ref_id);
let module_interned_type_index = ModuleInternedTypeIndex::from_bits(module_interned_type_index);
let func_ref = if module_interned_type_index.is_reserved_value() {
store
.unwrap_gc_store()
.func_ref_table
.get_untyped(func_ref_id)
} else {
let types = store.engine().signatures();
let engine_ty = instance.engine_type_index(module_interned_type_index);
store
.unwrap_gc_store()
.func_ref_table
.get_typed(types, func_ref_id, engine_ty)
};
func_ref.map_or(core::ptr::null_mut(), |f| f.as_ptr().cast())
}
#[cfg(feature = "gc")]
unsafe fn array_new_data(
instance: &mut Instance,
array_type_index: u32,
data_index: u32,
src: u32,
len: u32,
) -> Result<u32> {
use crate::{ArrayType, GcHeapOutOfMemory};
use wasmtime_environ::ModuleInternedTypeIndex;
let array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index);
let data_index = DataIndex::from_u32(data_index);
let data_range = instance.wasm_data_range(data_index);
let shared_ty = instance.engine_type_index(array_type_index);
let array_ty = ArrayType::from_shared_type_index((*instance.store()).engine(), shared_ty);
let one_elem_size = array_ty
.element_type()
.data_byte_size()
.expect("Wasm validation ensures that this type have a defined byte size");
let byte_len = len
.checked_mul(one_elem_size)
.and_then(|x| usize::try_from(x).ok())
.ok_or_else(|| Trap::MemoryOutOfBounds.into_anyhow())?;
let src = usize::try_from(src).map_err(|_| Trap::MemoryOutOfBounds.into_anyhow())?;
let data = instance
.wasm_data(data_range)
.get(src..)
.and_then(|d| d.get(..byte_len))
.ok_or_else(|| Trap::MemoryOutOfBounds.into_anyhow())?;
let gc_layout = (*instance.store())
.engine()
.signatures()
.layout(shared_ty)
.expect("array types have GC layouts");
let array_layout = gc_layout.unwrap_array();
let array_ref = match (*instance.store())
.unwrap_gc_store_mut()
.alloc_uninit_array(shared_ty, len, &array_layout)?
{
Some(a) => a,
None => {
(*instance.store()).gc(None)?;
(*instance.store())
.unwrap_gc_store_mut()
.alloc_uninit_array(shared_ty, u32::try_from(byte_len).unwrap(), &array_layout)?
.ok_or_else(|| GcHeapOutOfMemory::new(()).into_anyhow())?
}
};
(*instance.store())
.unwrap_gc_store_mut()
.gc_object_data(array_ref.as_gc_ref())
.copy_from_slice(array_layout.base_size, data);
let raw = array_ref.as_gc_ref().as_raw_u32();
(*instance.store())
.unwrap_gc_store_mut()
.expose_gc_ref_to_wasm(array_ref.into());
Ok(raw)
}
#[cfg(feature = "gc")]
unsafe fn array_init_data(
instance: &mut Instance,
array_type_index: u32,
array: u32,
dst: u32,
data_index: u32,
src: u32,
len: u32,
) -> Result<()> {
use crate::ArrayType;
use wasmtime_environ::ModuleInternedTypeIndex;
let array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index);
let data_index = DataIndex::from_u32(data_index);
log::trace!(
"array.init_data(array={array:#x}, dst={dst}, data_index={data_index:?}, src={src}, len={len})",
);
let gc_ref = VMGcRef::from_raw_u32(array).ok_or_else(|| Trap::NullReference.into_anyhow())?;
let array = gc_ref
.into_arrayref(&*(*instance.store()).unwrap_gc_store().gc_heap)
.expect("gc ref should be an array");
let dst = usize::try_from(dst).map_err(|_| Trap::MemoryOutOfBounds.into_anyhow())?;
let src = usize::try_from(src).map_err(|_| Trap::MemoryOutOfBounds.into_anyhow())?;
let len = usize::try_from(len).map_err(|_| Trap::MemoryOutOfBounds.into_anyhow())?;
let array_len = array.len((*instance.store()).store_opaque());
let array_len = usize::try_from(array_len).map_err(|_| Trap::ArrayOutOfBounds.into_anyhow())?;
if dst
.checked_add(len)
.ok_or_else(|| Trap::ArrayOutOfBounds.into_anyhow())?
> array_len
{
return Err(Trap::ArrayOutOfBounds.into_anyhow());
}
let shared_ty = instance.engine_type_index(array_type_index);
let array_ty = ArrayType::from_shared_type_index((*instance.store()).engine(), shared_ty);
let one_elem_size = array_ty
.element_type()
.data_byte_size()
.expect("Wasm validation ensures that this type have a defined byte size");
let data_len = len
.checked_mul(usize::try_from(one_elem_size).unwrap())
.ok_or_else(|| Trap::MemoryOutOfBounds.into_anyhow())?;
let data_range = instance.wasm_data_range(data_index);
let data = instance
.wasm_data(data_range)
.get(src..)
.and_then(|d| d.get(..data_len))
.ok_or_else(|| Trap::MemoryOutOfBounds.into_anyhow())?;
let dst_offset = u32::try_from(dst)
.unwrap()
.checked_mul(one_elem_size)
.unwrap();
let array_layout = (*instance.store())
.engine()
.signatures()
.layout(shared_ty)
.expect("array types have GC layouts");
let array_layout = array_layout.unwrap_array();
let obj_offset = array_layout.base_size.checked_add(dst_offset).unwrap();
(*instance.store())
.unwrap_gc_store_mut()
.gc_object_data(array.as_gc_ref())
.copy_from_slice(obj_offset, data);
Ok(())
}
#[cfg(feature = "gc")]
unsafe fn array_new_elem(
instance: &mut Instance,
array_type_index: u32,
elem_index: u32,
src: u32,
len: u32,
) -> Result<u32> {
use crate::{
store::AutoAssertNoGc,
vm::const_expr::{ConstEvalContext, ConstExprEvaluator},
ArrayRef, ArrayRefPre, ArrayType, Func, GcHeapOutOfMemory, RootedGcRefImpl, Val,
};
use wasmtime_environ::{ModuleInternedTypeIndex, TableSegmentElements};
let array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index);
let elem_index = ElemIndex::from_u32(elem_index);
let mut storage = None;
let elements = instance.passive_element_segment(&mut storage, elem_index);
let src = usize::try_from(src).map_err(|_| Trap::TableOutOfBounds.into_anyhow())?;
let len = usize::try_from(len).map_err(|_| Trap::TableOutOfBounds.into_anyhow())?;
let shared_ty = instance.engine_type_index(array_type_index);
let array_ty = ArrayType::from_shared_type_index((*instance.store()).engine(), shared_ty);
let elem_ty = array_ty.element_type();
let pre = ArrayRefPre::_new((*instance.store()).store_opaque_mut(), array_ty);
instance.with_gc_lifo_scope(|instance| {
let mut vals = Vec::with_capacity(usize::try_from(elements.len()).unwrap());
match elements {
TableSegmentElements::Functions(fs) => {
vals.extend(
fs.get(src..)
.and_then(|s| s.get(..len))
.ok_or_else(|| Trap::TableOutOfBounds.into_anyhow())?
.iter()
.map(|f| {
let raw_func_ref =
instance.get_func_ref(*f).unwrap_or(core::ptr::null_mut());
let func = Func::from_vm_func_ref(
(*instance.store()).store_opaque_mut(),
raw_func_ref,
);
Val::FuncRef(func)
}),
);
}
TableSegmentElements::Expressions(xs) => {
let xs = xs
.get(src..)
.and_then(|s| s.get(..len))
.ok_or_else(|| Trap::TableOutOfBounds.into_anyhow())?;
let mut const_context = ConstEvalContext::new(instance);
let mut const_evaluator = ConstExprEvaluator::default();
vals.extend(xs.iter().map(|x| unsafe {
let raw = const_evaluator
.eval(&mut const_context, x)
.expect("const expr should be valid");
let mut store =
AutoAssertNoGc::new((*const_context.instance.store()).store_opaque_mut());
Val::_from_raw(&mut store, raw, elem_ty.unwrap_val_type())
}));
}
}
let array = match ArrayRef::_new_fixed((*instance.store()).store_opaque_mut(), &pre, &vals)
{
Ok(a) => a,
Err(e) if e.is::<GcHeapOutOfMemory<()>>() => {
(*instance.store()).gc(None)?;
ArrayRef::_new_fixed((*instance.store()).store_opaque_mut(), &pre, &vals)?
}
Err(e) => return Err(e),
};
let mut store = AutoAssertNoGc::new((*instance.store()).store_opaque_mut());
let gc_ref = array.try_clone_gc_ref(&mut store)?;
let raw = gc_ref.as_raw_u32();
store.unwrap_gc_store_mut().expose_gc_ref_to_wasm(gc_ref);
Ok(raw)
})
}
#[cfg(feature = "gc")]
unsafe fn array_init_elem(
instance: &mut Instance,
array_type_index: u32,
array: u32,
dst: u32,
elem_index: u32,
src: u32,
len: u32,
) -> Result<()> {
use crate::{
store::AutoAssertNoGc,
vm::const_expr::{ConstEvalContext, ConstExprEvaluator},
ArrayRef, Func, Val,
};
use wasmtime_environ::{ModuleInternedTypeIndex, TableSegmentElements};
instance.with_gc_lifo_scope(|instance| {
let _array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index);
let elem_index = ElemIndex::from_u32(elem_index);
log::trace!(
"array.init_elem(array={array:#x}, dst={dst}, elem_index={elem_index:?}, src={src}, len={len})",
);
let array =
VMGcRef::from_raw_u32(array).ok_or_else(|| Trap::NullReference.into_anyhow())?;
let array = {
let mut no_gc = AutoAssertNoGc::new((*instance.store()).store_opaque_mut());
ArrayRef::from_cloned_gc_ref(&mut no_gc, array)
};
let array_len = array._len((*instance.store()).store_opaque())?;
log::trace!("array_len = {array_len}");
if dst
.checked_add(len)
.ok_or_else(|| Trap::ArrayOutOfBounds.into_anyhow())?
> array_len
{
return Err(Trap::ArrayOutOfBounds.into_anyhow());
}
let mut storage = None;
let elements = instance.passive_element_segment(&mut storage, elem_index);
let src = usize::try_from(src).map_err(|_| Trap::TableOutOfBounds.into_anyhow())?;
let len = usize::try_from(len).map_err(|_| Trap::TableOutOfBounds.into_anyhow())?;
let vals = match elements {
TableSegmentElements::Functions(fs) => fs
.get(src..)
.and_then(|s| s.get(..len))
.ok_or_else(|| Trap::TableOutOfBounds.into_anyhow())?
.iter()
.map(|f| {
let raw_func_ref = instance.get_func_ref(*f).unwrap_or(core::ptr::null_mut());
let func = Func::from_vm_func_ref(
(*instance.store()).store_opaque_mut(),
raw_func_ref,
);
Val::FuncRef(func)
})
.collect::<Vec<_>>(),
TableSegmentElements::Expressions(xs) => {
let elem_ty = array
._ty((*instance.store()).store_opaque())?
.element_type();
let elem_ty = elem_ty.unwrap_val_type();
let mut const_context = ConstEvalContext::new(instance);
let mut const_evaluator = ConstExprEvaluator::default();
xs.get(src..)
.and_then(|s| s.get(..len))
.ok_or_else(|| Trap::TableOutOfBounds.into_anyhow())?
.iter()
.map(|x| unsafe {
let raw = const_evaluator
.eval(&mut const_context, x)
.expect("const expr should be valid");
let mut store = AutoAssertNoGc::new(
(*const_context.instance.store()).store_opaque_mut(),
);
Val::_from_raw(&mut store, raw, elem_ty)
})
.collect::<Vec<_>>()
}
};
let store = (*instance.store()).store_opaque_mut();
for (i, val) in vals.into_iter().enumerate() {
let i = u32::try_from(i).unwrap();
let j = dst.checked_add(i).unwrap();
array._set(store, j, val)?;
}
Ok(())
})
}
#[cfg(feature = "gc")]
unsafe fn array_copy(
_instance: &mut Instance,
_dst_array_type_index: u32,
_dst_array: u32,
_dst_index: u32,
_src_array_type_index: u32,
_src_array: u32,
_src_index: u32,
_len: u32,
) -> Result<()> {
bail!("the `array.copy` instruction is not yet implemented")
}
#[cfg(feature = "threads")]
fn memory_atomic_notify(
instance: &mut Instance,
memory_index: u32,
addr_index: u64,
count: u32,
) -> Result<u32, Trap> {
let memory = MemoryIndex::from_u32(memory_index);
instance
.get_runtime_memory(memory)
.atomic_notify(addr_index, count)
}
#[cfg(feature = "threads")]
fn memory_atomic_wait32(
instance: &mut Instance,
memory_index: u32,
addr_index: u64,
expected: u32,
timeout: u64,
) -> Result<u32, Trap> {
let timeout = (timeout as i64 >= 0).then(|| Duration::from_nanos(timeout));
let memory = MemoryIndex::from_u32(memory_index);
Ok(instance
.get_runtime_memory(memory)
.atomic_wait32(addr_index, expected, timeout)? as u32)
}
#[cfg(feature = "threads")]
fn memory_atomic_wait64(
instance: &mut Instance,
memory_index: u32,
addr_index: u64,
expected: u64,
timeout: u64,
) -> Result<u32, Trap> {
let timeout = (timeout as i64 >= 0).then(|| Duration::from_nanos(timeout));
let memory = MemoryIndex::from_u32(memory_index);
Ok(instance
.get_runtime_memory(memory)
.atomic_wait64(addr_index, expected, timeout)? as u32)
}
unsafe fn out_of_gas(instance: &mut Instance) -> Result<()> {
(*instance.store()).out_of_gas()
}
unsafe fn new_epoch(instance: &mut Instance) -> Result<u64> {
(*instance.store()).new_epoch()
}
#[cfg(feature = "wmemcheck")]
unsafe fn check_malloc(instance: &mut Instance, addr: u32, len: u32) -> Result<u32> {
if let Some(wmemcheck_state) = &mut instance.wmemcheck_state {
let result = wmemcheck_state.malloc(addr as usize, len as usize);
wmemcheck_state.memcheck_on();
match result {
Ok(()) => {
return Ok(0);
}
Err(DoubleMalloc { addr, len }) => {
bail!("Double malloc at addr {:#x} of size {}", addr, len)
}
Err(OutOfBounds { addr, len }) => {
bail!("Malloc out of bounds at addr {:#x} of size {}", addr, len);
}
_ => {
panic!("unreachable")
}
}
}
Ok(0)
}
#[cfg(feature = "wmemcheck")]
unsafe fn check_free(instance: &mut Instance, addr: u32) -> Result<u32> {
if let Some(wmemcheck_state) = &mut instance.wmemcheck_state {
let result = wmemcheck_state.free(addr as usize);
wmemcheck_state.memcheck_on();
match result {
Ok(()) => {
return Ok(0);
}
Err(InvalidFree { addr }) => {
bail!("Invalid free at addr {:#x}", addr)
}
_ => {
panic!("unreachable")
}
}
}
Ok(0)
}
#[cfg(feature = "wmemcheck")]
fn check_load(instance: &mut Instance, num_bytes: u32, addr: u32, offset: u32) -> Result<u32> {
if let Some(wmemcheck_state) = &mut instance.wmemcheck_state {
let result = wmemcheck_state.read(addr as usize + offset as usize, num_bytes as usize);
match result {
Ok(()) => {
return Ok(0);
}
Err(InvalidRead { addr, len }) => {
bail!("Invalid load at addr {:#x} of size {}", addr, len);
}
Err(OutOfBounds { addr, len }) => {
bail!("Load out of bounds at addr {:#x} of size {}", addr, len);
}
_ => {
panic!("unreachable")
}
}
}
Ok(0)
}
#[cfg(feature = "wmemcheck")]
fn check_store(instance: &mut Instance, num_bytes: u32, addr: u32, offset: u32) -> Result<u32> {
if let Some(wmemcheck_state) = &mut instance.wmemcheck_state {
let result = wmemcheck_state.write(addr as usize + offset as usize, num_bytes as usize);
match result {
Ok(()) => {
return Ok(0);
}
Err(InvalidWrite { addr, len }) => {
bail!("Invalid store at addr {:#x} of size {}", addr, len)
}
Err(OutOfBounds { addr, len }) => {
bail!("Store out of bounds at addr {:#x} of size {}", addr, len)
}
_ => {
panic!("unreachable")
}
}
}
Ok(0)
}
#[cfg(feature = "wmemcheck")]
fn malloc_start(instance: &mut Instance) {
if let Some(wmemcheck_state) = &mut instance.wmemcheck_state {
wmemcheck_state.memcheck_off();
}
}
#[cfg(feature = "wmemcheck")]
fn free_start(instance: &mut Instance) {
if let Some(wmemcheck_state) = &mut instance.wmemcheck_state {
wmemcheck_state.memcheck_off();
}
}
#[cfg(feature = "wmemcheck")]
fn update_stack_pointer(_instance: &mut Instance, _value: u32) {
}
#[cfg(feature = "wmemcheck")]
fn update_mem_size(instance: &mut Instance, num_pages: u32) {
if let Some(wmemcheck_state) = &mut instance.wmemcheck_state {
const KIB: usize = 1024;
let num_bytes = num_pages as usize * 64 * KIB;
wmemcheck_state.update_mem_size(num_bytes);
}
}
fn trap(_instance: &mut Instance, code: u8) -> Result<(), TrapReason> {
Err(TrapReason::Wasm(
wasmtime_environ::Trap::from_u8(code).unwrap(),
))
}
fn f64_to_i64(_instance: &mut Instance, val: f64) -> Result<u64, TrapReason> {
if val.is_nan() {
return Err(TrapReason::Wasm(Trap::BadConversionToInteger));
}
let val = relocs::truncf64(val);
if val <= -9223372036854777856.0 || val >= 9223372036854775808.0 {
return Err(TrapReason::Wasm(Trap::IntegerOverflow));
}
#[allow(clippy::cast_possible_truncation)]
return Ok((val as i64).unsigned());
}
fn f64_to_u64(_instance: &mut Instance, val: f64) -> Result<u64, TrapReason> {
if val.is_nan() {
return Err(TrapReason::Wasm(Trap::BadConversionToInteger));
}
let val = relocs::truncf64(val);
if val <= -1.0 || val >= 18446744073709551616.0 {
return Err(TrapReason::Wasm(Trap::IntegerOverflow));
}
#[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)]
return Ok(val as u64);
}
fn f64_to_i32(_instance: &mut Instance, val: f64) -> Result<u32, TrapReason> {
if val.is_nan() {
return Err(TrapReason::Wasm(Trap::BadConversionToInteger));
}
let val = relocs::truncf64(val);
if val <= -2147483649.0 || val >= 2147483648.0 {
return Err(TrapReason::Wasm(Trap::IntegerOverflow));
}
#[allow(clippy::cast_possible_truncation)]
return Ok((val as i32).unsigned());
}
fn f64_to_u32(_instance: &mut Instance, val: f64) -> Result<u32, TrapReason> {
if val.is_nan() {
return Err(TrapReason::Wasm(Trap::BadConversionToInteger));
}
let val = relocs::truncf64(val);
if val <= -1.0 || val >= 4294967296.0 {
return Err(TrapReason::Wasm(Trap::IntegerOverflow));
}
#[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)]
return Ok(val as u32);
}
#[allow(missing_docs)]
pub mod relocs {
macro_rules! float_function {
(std: $std:path, core: $core:path,) => {{
#[cfg(feature = "std")]
let func = $std;
#[cfg(not(feature = "std"))]
let func = $core;
func
}};
}
pub extern "C" fn floorf32(f: f32) -> f32 {
let func = float_function! {
std: f32::floor,
core: libm::floorf,
};
func(f)
}
pub extern "C" fn floorf64(f: f64) -> f64 {
let func = float_function! {
std: f64::floor,
core: libm::floor,
};
func(f)
}
pub extern "C" fn ceilf32(f: f32) -> f32 {
let func = float_function! {
std: f32::ceil,
core: libm::ceilf,
};
func(f)
}
pub extern "C" fn ceilf64(f: f64) -> f64 {
let func = float_function! {
std: f64::ceil,
core: libm::ceil,
};
func(f)
}
pub extern "C" fn truncf32(f: f32) -> f32 {
let func = float_function! {
std: f32::trunc,
core: libm::truncf,
};
func(f)
}
pub extern "C" fn truncf64(f: f64) -> f64 {
let func = float_function! {
std: f64::trunc,
core: libm::trunc,
};
func(f)
}
const TOINT_32: f32 = 1.0 / f32::EPSILON;
const TOINT_64: f64 = 1.0 / f64::EPSILON;
pub extern "C" fn nearestf32(x: f32) -> f32 {
let i = x.to_bits();
let e = i >> 23 & 0xff;
if e >= 0x7f_u32 + 23 {
if e == 0xff {
if i & 0x7fffff != 0 {
return f32::from_bits(i | (1 << 22));
}
}
x
} else {
let abs = float_function! {
std: f32::abs,
core: libm::fabsf,
};
let copysign = float_function! {
std: f32::copysign,
core: libm::copysignf,
};
copysign(abs(x) + TOINT_32 - TOINT_32, x)
}
}
pub extern "C" fn nearestf64(x: f64) -> f64 {
let i = x.to_bits();
let e = i >> 52 & 0x7ff;
if e >= 0x3ff_u64 + 52 {
if e == 0x7ff {
if i & 0xfffffffffffff != 0 {
return f64::from_bits(i | (1 << 51));
}
}
x
} else {
let abs = float_function! {
std: f64::abs,
core: libm::fabs,
};
let copysign = float_function! {
std: f64::copysign,
core: libm::copysign,
};
copysign(abs(x) + TOINT_64 - TOINT_64, x)
}
}
pub extern "C" fn fmaf32(a: f32, b: f32, c: f32) -> f32 {
let func = float_function! {
std: f32::mul_add,
core: libm::fmaf,
};
func(a, b, c)
}
pub extern "C" fn fmaf64(a: f64, b: f64, c: f64) -> f64 {
let func = float_function! {
std: f64::mul_add,
core: libm::fma,
};
func(a, b, c)
}
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64::__m128i;
#[cfg(target_arch = "x86_64")]
#[allow(improper_ctypes_definitions)]
pub extern "C" fn x86_pshufb(a: __m128i, b: __m128i) -> __m128i {
union U {
reg: __m128i,
mem: [u8; 16],
}
unsafe {
let a = U { reg: a }.mem;
let b = U { reg: b }.mem;
let select = |arr: &[u8; 16], byte: u8| {
if byte & 0x80 != 0 {
0x00
} else {
arr[(byte & 0xf) as usize]
}
};
U {
mem: [
select(&a, b[0]),
select(&a, b[1]),
select(&a, b[2]),
select(&a, b[3]),
select(&a, b[4]),
select(&a, b[5]),
select(&a, b[6]),
select(&a, b[7]),
select(&a, b[8]),
select(&a, b[9]),
select(&a, b[10]),
select(&a, b[11]),
select(&a, b[12]),
select(&a, b[13]),
select(&a, b[14]),
select(&a, b[15]),
],
}
.reg
}
}
}