#![deny(missing_docs)]
#![warn(clippy::cast_sign_loss)]
use crate::prelude::*;
use crate::store::StoreOpaque;
use alloc::sync::Arc;
use core::fmt;
use core::mem;
use core::ops::Deref;
use core::ops::DerefMut;
use core::ptr::NonNull;
use core::sync::atomic::{AtomicUsize, Ordering};
use wasmtime_environ::{
DefinedFuncIndex, DefinedMemoryIndex, HostPtr, ModuleInternedTypeIndex, VMOffsets,
VMSharedTypeIndex,
};
mod arch;
mod async_yield;
#[cfg(feature = "component-model")]
pub mod component;
mod const_expr;
mod export;
mod gc;
mod imports;
mod instance;
mod memory;
mod mmap;
mod mmap_vec;
mod send_sync_ptr;
mod store_box;
mod sys;
mod table;
mod traphandlers;
mod vmcontext;
mod threads;
pub use self::threads::*;
#[cfg(feature = "debug-builtins")]
pub mod debug_builtins;
pub mod libcalls;
pub mod mpk;
#[cfg(feature = "debug-builtins")]
pub use wasmtime_jit_debug::gdb_jit_int::GdbJitImageRegistration;
pub use crate::runtime::vm::arch::get_stack_pointer;
pub use crate::runtime::vm::async_yield::*;
pub use crate::runtime::vm::export::*;
pub use crate::runtime::vm::gc::*;
pub use crate::runtime::vm::imports::Imports;
pub use crate::runtime::vm::instance::{
GcHeapAllocationIndex, Instance, InstanceAllocationRequest, InstanceAllocator,
InstanceAllocatorImpl, InstanceHandle, MemoryAllocationIndex, OnDemandInstanceAllocator,
StorePtr, TableAllocationIndex,
};
#[cfg(feature = "pooling-allocator")]
pub use crate::runtime::vm::instance::{
InstanceLimits, PoolConcurrencyLimitError, PoolingInstanceAllocator,
PoolingInstanceAllocatorConfig,
};
pub use crate::runtime::vm::memory::{Memory, RuntimeLinearMemory, RuntimeMemoryCreator};
pub use crate::runtime::vm::mmap::Mmap;
pub use crate::runtime::vm::mmap_vec::MmapVec;
pub use crate::runtime::vm::mpk::MpkEnabled;
pub use crate::runtime::vm::store_box::*;
pub use crate::runtime::vm::sys::unwind::UnwindRegistration;
pub use crate::runtime::vm::table::{Table, TableElement};
pub use crate::runtime::vm::traphandlers::*;
pub use crate::runtime::vm::vmcontext::{
VMArrayCallFunction, VMArrayCallHostFuncContext, VMContext, VMFuncRef, VMFunctionBody,
VMFunctionImport, VMGlobalDefinition, VMGlobalImport, VMMemoryDefinition, VMMemoryImport,
VMOpaqueContext, VMRuntimeLimits, VMTableImport, VMWasmCallFunction, ValRaw,
};
pub use send_sync_ptr::SendSyncPtr;
mod module_id;
pub use module_id::CompiledModuleId;
mod cow;
pub use crate::runtime::vm::cow::{MemoryImage, MemoryImageSlot, ModuleMemoryImages};
pub unsafe trait VMStore {
fn store_opaque(&self) -> &StoreOpaque;
fn store_opaque_mut(&mut self) -> &mut StoreOpaque;
fn memory_growing(
&mut self,
current: usize,
desired: usize,
maximum: Option<usize>,
) -> Result<bool, Error>;
fn memory_grow_failed(&mut self, error: Error) -> Result<()>;
fn table_growing(
&mut self,
current: usize,
desired: usize,
maximum: Option<usize>,
) -> Result<bool, Error>;
fn table_grow_failed(&mut self, error: Error) -> Result<()>;
fn out_of_gas(&mut self) -> Result<(), Error>;
fn new_epoch(&mut self) -> Result<u64, Error>;
fn gc(&mut self, root: Option<VMGcRef>) -> Result<Option<VMGcRef>>;
#[cfg(feature = "component-model")]
fn component_calls(&mut self) -> &mut component::CallContexts;
}
impl Deref for dyn VMStore {
type Target = StoreOpaque;
fn deref(&self) -> &Self::Target {
self.store_opaque()
}
}
impl DerefMut for dyn VMStore {
fn deref_mut(&mut self) -> &mut Self::Target {
self.store_opaque_mut()
}
}
#[derive(Clone)]
pub enum ModuleRuntimeInfo {
Module(crate::Module),
Bare(Box<BareModuleInfo>),
}
#[derive(Clone)]
pub struct BareModuleInfo {
module: Arc<wasmtime_environ::Module>,
one_signature: Option<VMSharedTypeIndex>,
offsets: VMOffsets<HostPtr>,
}
impl ModuleRuntimeInfo {
pub(crate) fn bare(module: Arc<wasmtime_environ::Module>) -> Self {
ModuleRuntimeInfo::bare_maybe_imported_func(module, None)
}
pub(crate) fn bare_maybe_imported_func(
module: Arc<wasmtime_environ::Module>,
one_signature: Option<VMSharedTypeIndex>,
) -> Self {
ModuleRuntimeInfo::Bare(Box::new(BareModuleInfo {
offsets: VMOffsets::new(HostPtr, &module),
module,
one_signature,
}))
}
pub(crate) fn env_module(&self) -> &Arc<wasmtime_environ::Module> {
match self {
ModuleRuntimeInfo::Module(m) => m.env_module(),
ModuleRuntimeInfo::Bare(b) => &b.module,
}
}
fn engine_type_index(&self, module_index: ModuleInternedTypeIndex) -> VMSharedTypeIndex {
match self {
ModuleRuntimeInfo::Module(m) => m
.code_object()
.signatures()
.shared_type(module_index)
.expect("bad module-level interned type index"),
ModuleRuntimeInfo::Bare(_) => unreachable!(),
}
}
fn function(&self, index: DefinedFuncIndex) -> NonNull<VMWasmCallFunction> {
let module = match self {
ModuleRuntimeInfo::Module(m) => m,
ModuleRuntimeInfo::Bare(_) => unreachable!(),
};
let ptr = module
.compiled_module()
.finished_function(index)
.as_ptr()
.cast::<VMWasmCallFunction>()
.cast_mut();
NonNull::new(ptr).unwrap()
}
fn array_to_wasm_trampoline(&self, index: DefinedFuncIndex) -> Option<VMArrayCallFunction> {
let m = match self {
ModuleRuntimeInfo::Module(m) => m,
ModuleRuntimeInfo::Bare(_) => unreachable!(),
};
let ptr = m
.compiled_module()
.array_to_wasm_trampoline(index)?
.as_ptr();
Some(unsafe { mem::transmute::<*const u8, VMArrayCallFunction>(ptr) })
}
fn memory_image(
&self,
memory: DefinedMemoryIndex,
) -> anyhow::Result<Option<&Arc<MemoryImage>>> {
match self {
ModuleRuntimeInfo::Module(m) => {
let images = m.memory_images()?;
Ok(images.and_then(|images| images.get_memory_image(memory)))
}
ModuleRuntimeInfo::Bare(_) => Ok(None),
}
}
fn unique_id(&self) -> Option<CompiledModuleId> {
match self {
ModuleRuntimeInfo::Module(m) => Some(m.id()),
ModuleRuntimeInfo::Bare(_) => None,
}
}
fn wasm_data(&self) -> &[u8] {
match self {
ModuleRuntimeInfo::Module(m) => m.compiled_module().code_memory().wasm_data(),
ModuleRuntimeInfo::Bare(_) => &[],
}
}
fn type_ids(&self) -> &[VMSharedTypeIndex] {
match self {
ModuleRuntimeInfo::Module(m) => m
.code_object()
.signatures()
.as_module_map()
.values()
.as_slice(),
ModuleRuntimeInfo::Bare(b) => match &b.one_signature {
Some(s) => core::slice::from_ref(s),
None => &[],
},
}
}
pub(crate) fn offsets(&self) -> &VMOffsets<HostPtr> {
match self {
ModuleRuntimeInfo::Module(m) => m.offsets(),
ModuleRuntimeInfo::Bare(b) => &b.offsets,
}
}
}
pub fn host_page_size() -> usize {
static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0);
return match PAGE_SIZE.load(Ordering::Relaxed) {
0 => {
let size = sys::vm::get_page_size();
assert!(size != 0);
PAGE_SIZE.store(size, Ordering::Relaxed);
size
}
n => n,
};
}
pub fn usize_is_multiple_of_host_page_size(bytes: usize) -> bool {
bytes % host_page_size() == 0
}
pub fn round_u64_up_to_host_pages(bytes: u64) -> Result<u64> {
let page_size = u64::try_from(crate::runtime::vm::host_page_size()).err2anyhow()?;
debug_assert!(page_size.is_power_of_two());
bytes
.checked_add(page_size - 1)
.ok_or_else(|| anyhow!(
"{bytes} is too large to be rounded up to a multiple of the host page size of {page_size}"
))
.map(|val| val & !(page_size - 1))
}
pub fn round_usize_up_to_host_pages(bytes: usize) -> Result<usize> {
let bytes = u64::try_from(bytes).err2anyhow()?;
let rounded = round_u64_up_to_host_pages(bytes)?;
Ok(usize::try_from(rounded).err2anyhow()?)
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum WaitResult {
Ok = 0,
Mismatch = 1,
TimedOut = 2,
}
#[derive(Debug)]
pub struct WasmFault {
pub memory_size: usize,
pub wasm_address: u64,
}
impl fmt::Display for WasmFault {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"memory fault at wasm address 0x{:x} in linear memory of size 0x{:x}",
self.wasm_address, self.memory_size,
)
}
}