#[cfg(all(feature = "gc", feature = "debug"))]
use crate::OwnedRooted;
use crate::RootSet;
#[cfg(feature = "gc")]
use crate::ThrownException;
#[cfg(feature = "component-model-async")]
use crate::component::ComponentStoreData;
#[cfg(feature = "component-model")]
use crate::component::concurrent;
use crate::error::OutOfMemory;
#[cfg(feature = "async")]
use crate::fiber;
use crate::module::RegisteredModuleId;
use crate::prelude::*;
#[cfg(feature = "gc")]
use crate::runtime::vm::GcRootsList;
#[cfg(feature = "stack-switching")]
use crate::runtime::vm::VMContRef;
use crate::runtime::vm::mpk::ProtectionKey;
use crate::runtime::vm::{
self, ExportMemory, GcStore, Imports, InstanceAllocationRequest, InstanceAllocator,
InstanceHandle, Interpreter, InterpreterRef, ModuleRuntimeInfo, OnDemandInstanceAllocator,
SendSyncPtr, SignalHandler, StoreBox, Unwind, VMContext, VMFuncRef, VMGcRef, VMStore,
VMStoreContext,
};
use crate::trampoline::VMHostGlobalContext;
#[cfg(feature = "debug")]
use crate::{BreakpointState, DebugHandler};
use crate::{Engine, Module, Val, ValRaw, module::ModuleRegistry};
#[cfg(feature = "gc")]
use crate::{ExnRef, Rooted};
use crate::{Global, Instance, Table};
use core::convert::Infallible;
use core::fmt;
use core::marker;
use core::mem::{self, ManuallyDrop, MaybeUninit};
use core::num::NonZeroU64;
use core::ops::{Deref, DerefMut};
use core::pin::Pin;
use core::ptr::NonNull;
use wasmtime_environ::{DefinedGlobalIndex, DefinedTableIndex, EntityRef, PrimaryMap, TripleExt};
mod context;
pub use self::context::*;
mod data;
pub use self::data::*;
mod func_refs;
use func_refs::FuncRefs;
#[cfg(feature = "component-model-async")]
mod token;
#[cfg(feature = "component-model-async")]
pub(crate) use token::StoreToken;
#[cfg(feature = "async")]
mod async_;
#[cfg(all(feature = "async", feature = "call-hook"))]
pub use self::async_::CallHookHandler;
#[cfg(feature = "gc")]
use super::vm::VMExnRef;
#[cfg(feature = "gc")]
mod gc;
pub struct Store<T: 'static> {
inner: ManuallyDrop<Box<StoreInner<T>>>,
}
#[derive(Copy, Clone, Debug)]
pub enum CallHook {
CallingWasm,
ReturningFromWasm,
CallingHost,
ReturningFromHost,
}
impl CallHook {
pub fn entering_host(&self) -> bool {
match self {
CallHook::ReturningFromWasm | CallHook::CallingHost => true,
_ => false,
}
}
pub fn exiting_host(&self) -> bool {
match self {
CallHook::ReturningFromHost | CallHook::CallingWasm => true,
_ => false,
}
}
}
pub struct StoreInner<T: 'static> {
inner: StoreOpaque,
limiter: Option<ResourceLimiterInner<T>>,
call_hook: Option<CallHookInner<T>>,
#[cfg(target_has_atomic = "64")]
epoch_deadline_behavior:
Option<Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>>,
data_no_provenance: ManuallyDrop<T>,
#[cfg(feature = "debug")]
debug_handler: Option<Box<dyn StoreDebugHandler<T>>>,
}
#[cfg(feature = "debug")]
trait StoreDebugHandler<T: 'static>: Send + Sync {
fn handle<'a>(
self: Box<Self>,
store: StoreContextMut<'a, T>,
event: crate::DebugEvent<'a>,
) -> Box<dyn Future<Output = ()> + Send + 'a>;
}
#[cfg(feature = "debug")]
impl<D> StoreDebugHandler<D::Data> for D
where
D: DebugHandler,
D::Data: Send,
{
fn handle<'a>(
self: Box<Self>,
store: StoreContextMut<'a, D::Data>,
event: crate::DebugEvent<'a>,
) -> Box<dyn Future<Output = ()> + Send + 'a> {
let handler: D = (*self).clone();
store.0.debug_handler = Some(self);
Box::new(async move { handler.handle(store, event).await })
}
}
enum ResourceLimiterInner<T> {
Sync(Box<dyn (FnMut(&mut T) -> &mut dyn crate::ResourceLimiter) + Send + Sync>),
#[cfg(feature = "async")]
Async(Box<dyn (FnMut(&mut T) -> &mut dyn crate::ResourceLimiterAsync) + Send + Sync>),
}
pub enum StoreResourceLimiter<'a> {
Sync(&'a mut dyn crate::ResourceLimiter),
#[cfg(feature = "async")]
Async(&'a mut dyn crate::ResourceLimiterAsync),
}
impl StoreResourceLimiter<'_> {
pub(crate) async fn memory_growing(
&mut self,
current: usize,
desired: usize,
maximum: Option<usize>,
) -> Result<bool, Error> {
match self {
Self::Sync(s) => s.memory_growing(current, desired, maximum),
#[cfg(feature = "async")]
Self::Async(s) => s.memory_growing(current, desired, maximum).await,
}
}
pub(crate) fn memory_grow_failed(&mut self, error: crate::Error) -> Result<()> {
match self {
Self::Sync(s) => s.memory_grow_failed(error),
#[cfg(feature = "async")]
Self::Async(s) => s.memory_grow_failed(error),
}
}
pub(crate) async fn table_growing(
&mut self,
current: usize,
desired: usize,
maximum: Option<usize>,
) -> Result<bool, Error> {
match self {
Self::Sync(s) => s.table_growing(current, desired, maximum),
#[cfg(feature = "async")]
Self::Async(s) => s.table_growing(current, desired, maximum).await,
}
}
pub(crate) fn table_grow_failed(&mut self, error: crate::Error) -> Result<()> {
match self {
Self::Sync(s) => s.table_grow_failed(error),
#[cfg(feature = "async")]
Self::Async(s) => s.table_grow_failed(error),
}
}
}
enum CallHookInner<T: 'static> {
#[cfg(feature = "call-hook")]
Sync(Box<dyn FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync>),
#[cfg(all(feature = "async", feature = "call-hook"))]
Async(Box<dyn CallHookHandler<T> + Send + Sync>),
#[expect(
dead_code,
reason = "forcing, regardless of cfg, the type param to be used"
)]
ForceTypeParameterToBeUsed {
uninhabited: Infallible,
_marker: marker::PhantomData<T>,
},
}
#[non_exhaustive]
pub enum UpdateDeadline {
Interrupt,
Continue(u64),
#[cfg(feature = "async")]
Yield(u64),
#[cfg(feature = "async")]
YieldCustom(
u64,
::core::pin::Pin<Box<dyn ::core::future::Future<Output = ()> + Send>>,
),
}
impl<T> Deref for StoreInner<T> {
type Target = StoreOpaque;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<T> DerefMut for StoreInner<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
pub struct StoreOpaque {
_marker: marker::PhantomPinned,
engine: Engine,
vm_store_context: VMStoreContext,
#[cfg(feature = "stack-switching")]
continuations: Vec<Box<VMContRef>>,
instances: wasmtime_environ::collections::PrimaryMap<InstanceId, StoreInstance>,
#[cfg(feature = "component-model")]
num_component_instances: usize,
signal_handler: Option<SignalHandler>,
modules: ModuleRegistry,
func_refs: FuncRefs,
host_globals: PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>>,
gc_store: Option<GcStore>,
gc_roots: RootSet,
#[cfg(feature = "gc")]
gc_roots_list: GcRootsList,
#[cfg(feature = "gc")]
gc_host_alloc_types: crate::hash_set::HashSet<crate::type_registry::RegisteredType>,
#[cfg(feature = "gc")]
pending_exception: Option<VMExnRef>,
instance_count: usize,
instance_limit: usize,
memory_count: usize,
memory_limit: usize,
table_count: usize,
table_limit: usize,
#[cfg(feature = "async")]
async_state: fiber::AsyncState,
fuel_reserve: u64,
pub(crate) fuel_yield_interval: Option<NonZeroU64>,
store_data: StoreData,
traitobj: StorePtr,
default_caller_vmctx: SendSyncPtr<VMContext>,
hostcall_val_storage: Vec<Val>,
wasm_val_raw_storage: Vec<ValRaw>,
pkey: Option<ProtectionKey>,
#[cfg(feature = "component-model")]
component_host_table: vm::component::HandleTable,
#[cfg(feature = "component-model")]
component_calls: vm::component::CallContexts,
#[cfg(feature = "component-model")]
host_resource_data: crate::component::HostResourceData,
#[cfg(feature = "component-model")]
concurrent_state: Option<concurrent::ConcurrentState>,
executor: Executor,
#[cfg(feature = "debug")]
breakpoints: BreakpointState,
}
struct StorePtr(Option<NonNull<dyn VMStore>>);
unsafe impl Send for StorePtr {}
unsafe impl Sync for StorePtr {}
pub(crate) enum Executor {
Interpreter(Interpreter),
#[cfg(has_host_compiler_backend)]
Native,
}
impl Executor {
pub(crate) fn new(engine: &Engine) -> Self {
#[cfg(has_host_compiler_backend)]
if cfg!(feature = "pulley") && engine.target().is_pulley() {
Executor::Interpreter(Interpreter::new(engine))
} else {
Executor::Native
}
#[cfg(not(has_host_compiler_backend))]
{
debug_assert!(engine.target().is_pulley());
Executor::Interpreter(Interpreter::new(engine))
}
}
}
pub(crate) enum ExecutorRef<'a> {
Interpreter(InterpreterRef<'a>),
#[cfg(has_host_compiler_backend)]
Native,
}
#[doc(hidden)]
pub struct AutoAssertNoGc<'a> {
store: &'a mut StoreOpaque,
entered: bool,
}
impl<'a> AutoAssertNoGc<'a> {
#[inline]
pub fn new(store: &'a mut StoreOpaque) -> Self {
let entered = if !cfg!(feature = "gc") {
false
} else if let Some(gc_store) = store.gc_store.as_mut() {
gc_store.gc_heap.enter_no_gc_scope();
true
} else {
false
};
AutoAssertNoGc { store, entered }
}
#[inline]
pub unsafe fn disabled(store: &'a mut StoreOpaque) -> Self {
if cfg!(debug_assertions) {
AutoAssertNoGc::new(store)
} else {
AutoAssertNoGc {
store,
entered: false,
}
}
}
}
impl core::ops::Deref for AutoAssertNoGc<'_> {
type Target = StoreOpaque;
#[inline]
fn deref(&self) -> &Self::Target {
&*self.store
}
}
impl core::ops::DerefMut for AutoAssertNoGc<'_> {
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut *self.store
}
}
impl Drop for AutoAssertNoGc<'_> {
#[inline]
fn drop(&mut self) {
if self.entered {
self.store.unwrap_gc_store_mut().gc_heap.exit_no_gc_scope();
}
}
}
struct StoreInstance {
handle: InstanceHandle,
kind: StoreInstanceKind,
}
enum StoreInstanceKind {
Real {
module_id: RegisteredModuleId,
},
Dummy,
}
impl<T> Store<T> {
pub fn new(engine: &Engine, data: T) -> Self {
Self::try_new(engine, data).expect(
"allocation failure during `Store::new` (use `Store::try_new` to handle such errors)",
)
}
pub fn try_new(engine: &Engine, data: T) -> Result<Self> {
let store_data = StoreData::new();
log::trace!("creating new store {:?}", store_data.id());
let pkey = engine.allocator().next_available_pkey();
let inner = StoreOpaque {
_marker: marker::PhantomPinned,
engine: engine.clone(),
vm_store_context: Default::default(),
#[cfg(feature = "stack-switching")]
continuations: Vec::new(),
instances: wasmtime_environ::collections::PrimaryMap::new(),
#[cfg(feature = "component-model")]
num_component_instances: 0,
signal_handler: None,
gc_store: None,
gc_roots: RootSet::default(),
#[cfg(feature = "gc")]
gc_roots_list: GcRootsList::default(),
#[cfg(feature = "gc")]
gc_host_alloc_types: Default::default(),
#[cfg(feature = "gc")]
pending_exception: None,
modules: ModuleRegistry::default(),
func_refs: FuncRefs::default(),
host_globals: PrimaryMap::new(),
instance_count: 0,
instance_limit: crate::DEFAULT_INSTANCE_LIMIT,
memory_count: 0,
memory_limit: crate::DEFAULT_MEMORY_LIMIT,
table_count: 0,
table_limit: crate::DEFAULT_TABLE_LIMIT,
#[cfg(feature = "async")]
async_state: Default::default(),
fuel_reserve: 0,
fuel_yield_interval: None,
store_data,
traitobj: StorePtr(None),
default_caller_vmctx: SendSyncPtr::new(NonNull::dangling()),
hostcall_val_storage: Vec::new(),
wasm_val_raw_storage: Vec::new(),
pkey,
#[cfg(feature = "component-model")]
component_host_table: Default::default(),
#[cfg(feature = "component-model")]
component_calls: Default::default(),
#[cfg(feature = "component-model")]
host_resource_data: Default::default(),
executor: Executor::new(engine),
#[cfg(feature = "component-model")]
concurrent_state: if engine.tunables().concurrency_support {
#[cfg(feature = "component-model-async")]
{
Some(Default::default())
}
#[cfg(not(feature = "component-model-async"))]
{
unreachable!()
}
} else {
None
},
#[cfg(feature = "debug")]
breakpoints: Default::default(),
};
let mut inner = try_new::<Box<_>>(StoreInner {
inner,
limiter: None,
call_hook: None,
#[cfg(target_has_atomic = "64")]
epoch_deadline_behavior: None,
data_no_provenance: ManuallyDrop::new(data),
#[cfg(feature = "debug")]
debug_handler: None,
})?;
let store_data =
<NonNull<ManuallyDrop<T>>>::from(&mut inner.data_no_provenance).cast::<()>();
inner.inner.vm_store_context.store_data = store_data.into();
inner.traitobj = StorePtr(Some(NonNull::from(&mut *inner)));
let allocator = OnDemandInstanceAllocator::default();
let info = engine.empty_module_runtime_info();
allocator
.validate_module(info.env_module(), info.offsets())
.unwrap();
unsafe {
let result = vm::assert_ready(inner.allocate_instance(
None,
AllocateInstanceKind::Dummy {
allocator: &allocator,
},
info,
Default::default(),
));
let id = match result {
Ok(id) => id,
Err(e) => {
if e.is::<OutOfMemory>() {
return Err(e);
}
panic!("instance allocator failed to allocate default callee")
}
};
let default_caller_vmctx = inner.instance(id).vmctx();
inner.default_caller_vmctx = default_caller_vmctx.into();
}
Ok(Self {
inner: ManuallyDrop::new(inner),
})
}
#[inline]
pub fn data(&self) -> &T {
self.inner.data()
}
#[inline]
pub fn data_mut(&mut self) -> &mut T {
self.inner.data_mut()
}
fn run_manual_drop_routines(&mut self) {
#[cfg(feature = "component-model-async")]
if self.inner.concurrent_state.is_some() {
ComponentStoreData::drop_fibers_and_futures(&mut **self.inner);
}
self.inner.flush_fiber_stack();
}
pub fn into_data(mut self) -> T {
self.run_manual_drop_routines();
unsafe {
let mut inner = ManuallyDrop::take(&mut self.inner);
core::mem::forget(self);
ManuallyDrop::take(&mut inner.data_no_provenance)
}
}
pub fn limiter(
&mut self,
mut limiter: impl (FnMut(&mut T) -> &mut dyn crate::ResourceLimiter) + Send + Sync + 'static,
) {
let inner = &mut self.inner;
let (instance_limit, table_limit, memory_limit) = {
let l = limiter(inner.data_mut());
(l.instances(), l.tables(), l.memories())
};
let innermost = &mut inner.inner;
innermost.instance_limit = instance_limit;
innermost.table_limit = table_limit;
innermost.memory_limit = memory_limit;
inner.limiter = Some(ResourceLimiterInner::Sync(Box::new(limiter)));
}
#[cfg(feature = "call-hook")]
pub fn call_hook(
&mut self,
hook: impl FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync + 'static,
) {
self.inner.call_hook = Some(CallHookInner::Sync(Box::new(hook)));
}
pub fn engine(&self) -> &Engine {
self.inner.engine()
}
#[cfg(feature = "gc")]
pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) -> Result<()> {
StoreContextMut(&mut self.inner).gc(why)
}
pub fn get_fuel(&self) -> Result<u64> {
self.inner.get_fuel()
}
pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
self.inner.set_fuel(fuel)
}
#[cfg(feature = "async")]
pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
self.inner.fuel_async_yield_interval(interval)
}
#[cfg(target_has_atomic = "64")]
pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
self.inner.set_epoch_deadline(ticks_beyond_current);
}
#[cfg(target_has_atomic = "64")]
pub fn epoch_deadline_trap(&mut self) {
self.inner.epoch_deadline_trap();
}
#[cfg(target_has_atomic = "64")]
pub fn epoch_deadline_callback(
&mut self,
callback: impl FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync + 'static,
) {
self.inner.epoch_deadline_callback(Box::new(callback));
}
#[cfg(feature = "gc")]
pub fn throw<R>(&mut self, exception: Rooted<ExnRef>) -> Result<R, ThrownException> {
self.inner.throw_impl(exception);
Err(ThrownException)
}
#[cfg(feature = "gc")]
pub fn take_pending_exception(&mut self) -> Option<Rooted<ExnRef>> {
self.inner.take_pending_exception_rooted()
}
#[cfg(feature = "gc")]
pub fn has_pending_exception(&self) -> bool {
self.inner.pending_exception.is_some()
}
#[cfg(feature = "debug")]
pub fn debug_frames(&mut self) -> Option<crate::DebugFrameCursor<'_, T>> {
self.as_context_mut().debug_frames()
}
#[cfg(feature = "debug")]
pub fn edit_breakpoints(&mut self) -> Option<crate::BreakpointEdit<'_>> {
self.as_context_mut().edit_breakpoints()
}
#[cfg(feature = "debug")]
pub fn breakpoints(&self) -> Option<impl Iterator<Item = crate::Breakpoint> + '_> {
self.as_context().breakpoints()
}
#[cfg(feature = "debug")]
pub fn is_single_step(&self) -> bool {
self.as_context().is_single_step()
}
#[cfg(feature = "debug")]
pub fn set_debug_handler(&mut self, handler: impl DebugHandler<Data = T>)
where
T: Send,
{
self.inner.set_async_required(Asyncness::Yes);
assert!(
self.engine().tunables().debug_guest,
"debug hooks require guest debugging to be enabled"
);
self.inner.debug_handler = Some(Box::new(handler));
}
#[cfg(feature = "debug")]
pub fn clear_debug_handler(&mut self) {
self.inner.debug_handler = None;
}
}
impl<'a, T> StoreContext<'a, T> {
pub fn engine(&self) -> &Engine {
self.0.engine()
}
pub fn data(&self) -> &'a T {
self.0.data()
}
pub fn get_fuel(&self) -> Result<u64> {
self.0.get_fuel()
}
}
impl<'a, T> StoreContextMut<'a, T> {
pub fn data(&self) -> &T {
self.0.data()
}
pub fn data_mut(&mut self) -> &mut T {
self.0.data_mut()
}
pub fn engine(&self) -> &Engine {
self.0.engine()
}
#[cfg(feature = "gc")]
pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) -> Result<()> {
let (mut limiter, store) = self.0.validate_sync_resource_limiter_and_store_opaque()?;
vm::assert_ready(store.gc(
limiter.as_mut(),
None,
why.map(|e| e.bytes_needed()),
Asyncness::No,
));
Ok(())
}
pub fn get_fuel(&self) -> Result<u64> {
self.0.get_fuel()
}
pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
self.0.set_fuel(fuel)
}
#[cfg(feature = "async")]
pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
self.0.fuel_async_yield_interval(interval)
}
#[cfg(target_has_atomic = "64")]
pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
self.0.set_epoch_deadline(ticks_beyond_current);
}
#[cfg(target_has_atomic = "64")]
pub fn epoch_deadline_trap(&mut self) {
self.0.epoch_deadline_trap();
}
#[cfg(feature = "gc")]
pub fn throw<R>(&mut self, exception: Rooted<ExnRef>) -> Result<R, ThrownException> {
self.0.inner.throw_impl(exception);
Err(ThrownException)
}
#[cfg(feature = "gc")]
pub fn take_pending_exception(&mut self) -> Option<Rooted<ExnRef>> {
self.0.inner.take_pending_exception_rooted()
}
#[cfg(feature = "gc")]
pub fn has_pending_exception(&self) -> bool {
self.0.inner.pending_exception.is_some()
}
}
impl<T> StoreInner<T> {
#[inline]
fn data(&self) -> &T {
let data: *const ManuallyDrop<T> = &raw const self.data_no_provenance;
let provenance = self.inner.vm_store_context.store_data.as_ptr().cast::<T>();
let ptr = provenance.with_addr(data.addr());
debug_assert_ne!(ptr, core::ptr::null_mut());
debug_assert_eq!(ptr.addr(), (&raw const self.data_no_provenance).addr());
unsafe { &*ptr }
}
#[inline]
fn data_limiter_and_opaque(
&mut self,
) -> (
&mut T,
Option<&mut ResourceLimiterInner<T>>,
&mut StoreOpaque,
) {
let data: *mut ManuallyDrop<T> = &raw mut self.data_no_provenance;
let provenance = self.inner.vm_store_context.store_data.as_ptr().cast::<T>();
let ptr = provenance.with_addr(data.addr());
debug_assert_ne!(ptr, core::ptr::null_mut());
debug_assert_eq!(ptr.addr(), (&raw const self.data_no_provenance).addr());
let data = unsafe { &mut *ptr };
let limiter = self.limiter.as_mut();
(data, limiter, &mut self.inner)
}
#[inline]
fn data_mut(&mut self) -> &mut T {
self.data_limiter_and_opaque().0
}
#[inline]
pub fn call_hook(&mut self, s: CallHook) -> Result<()> {
if self.inner.pkey.is_none() && self.call_hook.is_none() {
Ok(())
} else {
self.call_hook_slow_path(s)
}
}
fn call_hook_slow_path(&mut self, s: CallHook) -> Result<()> {
if let Some(pkey) = &self.inner.pkey {
let allocator = self.engine().allocator();
match s {
CallHook::CallingWasm | CallHook::ReturningFromHost => {
allocator.restrict_to_pkey(*pkey)
}
CallHook::ReturningFromWasm | CallHook::CallingHost => allocator.allow_all_pkeys(),
}
}
if let Some(mut call_hook) = self.call_hook.take() {
let result = self.invoke_call_hook(&mut call_hook, s);
self.call_hook = Some(call_hook);
return result;
}
Ok(())
}
fn invoke_call_hook(&mut self, call_hook: &mut CallHookInner<T>, s: CallHook) -> Result<()> {
match call_hook {
#[cfg(feature = "call-hook")]
CallHookInner::Sync(hook) => hook((&mut *self).as_context_mut(), s),
#[cfg(all(feature = "async", feature = "call-hook"))]
CallHookInner::Async(handler) => {
if !self.can_block() {
bail!("couldn't grab async_cx for call hook")
}
return (&mut *self)
.as_context_mut()
.with_blocking(|store, cx| cx.block_on(handler.handle_call_event(store, s)))?;
}
CallHookInner::ForceTypeParameterToBeUsed { uninhabited, .. } => {
let _ = s;
match *uninhabited {}
}
}
}
#[cfg(not(feature = "async"))]
fn flush_fiber_stack(&mut self) {
}
pub(crate) fn validate_sync_resource_limiter_and_store_opaque(
&mut self,
) -> Result<(Option<StoreResourceLimiter<'_>>, &mut StoreOpaque)> {
let (limiter, store) = self.resource_limiter_and_store_opaque();
if !matches!(limiter, None | Some(StoreResourceLimiter::Sync(_))) {
bail!(
"when using an async resource limiter `*_async` functions must \
be used instead"
);
}
Ok((limiter, store))
}
}
fn get_fuel(injected_fuel: i64, fuel_reserve: u64) -> u64 {
fuel_reserve.saturating_add_signed(-injected_fuel)
}
fn refuel(
injected_fuel: &mut i64,
fuel_reserve: &mut u64,
yield_interval: Option<NonZeroU64>,
) -> bool {
let fuel = get_fuel(*injected_fuel, *fuel_reserve);
if fuel > 0 {
set_fuel(injected_fuel, fuel_reserve, yield_interval, fuel);
true
} else {
false
}
}
fn set_fuel(
injected_fuel: &mut i64,
fuel_reserve: &mut u64,
yield_interval: Option<NonZeroU64>,
new_fuel_amount: u64,
) {
let interval = yield_interval.unwrap_or(NonZeroU64::MAX).get();
let injected = core::cmp::min(interval, new_fuel_amount);
let injected = core::cmp::min(injected, i64::MAX as u64);
*fuel_reserve = new_fuel_amount - injected;
*injected_fuel = -(injected as i64);
}
#[doc(hidden)]
impl StoreOpaque {
pub fn id(&self) -> StoreId {
self.store_data.id()
}
pub fn bump_resource_counts(&mut self, module: &Module) -> Result<()> {
fn bump(slot: &mut usize, max: usize, amt: usize, desc: &str) -> Result<()> {
let new = slot.saturating_add(amt);
if new > max {
bail!("resource limit exceeded: {desc} count too high at {new}");
}
*slot = new;
Ok(())
}
let module = module.env_module();
let memories = module.num_defined_memories();
let tables = module.num_defined_tables();
bump(&mut self.instance_count, self.instance_limit, 1, "instance")?;
bump(
&mut self.memory_count,
self.memory_limit,
memories,
"memory",
)?;
bump(&mut self.table_count, self.table_limit, tables, "table")?;
Ok(())
}
#[inline]
pub fn engine(&self) -> &Engine {
&self.engine
}
#[inline]
pub fn store_data(&self) -> &StoreData {
&self.store_data
}
#[inline]
pub fn store_data_mut(&mut self) -> &mut StoreData {
&mut self.store_data
}
pub fn store_data_mut_and_registry(&mut self) -> (&mut StoreData, &ModuleRegistry) {
(&mut self.store_data, &self.modules)
}
#[cfg(feature = "debug")]
pub(crate) fn breakpoints_and_registry_mut(
&mut self,
) -> (&mut BreakpointState, &mut ModuleRegistry) {
(&mut self.breakpoints, &mut self.modules)
}
#[cfg(feature = "debug")]
pub(crate) fn breakpoints_and_registry(&self) -> (&BreakpointState, &ModuleRegistry) {
(&self.breakpoints, &self.modules)
}
#[inline]
pub(crate) fn modules(&self) -> &ModuleRegistry {
&self.modules
}
pub(crate) fn register_module(&mut self, module: &Module) -> Result<RegisteredModuleId> {
self.modules.register_module(module, &self.engine)
}
#[cfg(feature = "component-model")]
pub(crate) fn register_component(
&mut self,
component: &crate::component::Component,
) -> Result<()> {
self.modules.register_component(component, &self.engine)
}
pub(crate) fn func_refs_and_modules(&mut self) -> (&mut FuncRefs, &ModuleRegistry) {
(&mut self.func_refs, &self.modules)
}
pub(crate) fn host_globals(
&self,
) -> &PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
&self.host_globals
}
pub(crate) fn host_globals_mut(
&mut self,
) -> &mut PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
&mut self.host_globals
}
pub fn module_for_instance(&self, instance: StoreInstanceId) -> Option<&'_ Module> {
instance.store_id().assert_belongs_to(self.id());
match self.instances[instance.instance()].kind {
StoreInstanceKind::Dummy => None,
StoreInstanceKind::Real { module_id } => {
let module = self
.modules()
.module_by_id(module_id)
.expect("should always have a registered module for real instances");
Some(module)
}
}
}
#[inline]
pub fn instance(&self, id: InstanceId) -> &vm::Instance {
self.instances[id].handle.get()
}
#[inline]
pub fn instance_mut(&mut self, id: InstanceId) -> Pin<&mut vm::Instance> {
self.instances[id].handle.get_mut()
}
#[inline]
pub fn instance_and_module_registry_mut(
&mut self,
id: InstanceId,
) -> (Pin<&mut vm::Instance>, &ModuleRegistry) {
(self.instances[id].handle.get_mut(), &self.modules)
}
#[inline]
pub unsafe fn optional_gc_store_and_instances_mut<const N: usize>(
&mut self,
ids: [InstanceId; N],
) -> (Option<&mut GcStore>, [Pin<&mut vm::Instance>; N]) {
let instances = self
.instances
.get_disjoint_mut(ids)
.unwrap()
.map(|h| h.handle.get_mut());
(self.gc_store.as_mut(), instances)
}
pub fn optional_gc_store_and_instance_mut(
&mut self,
id: InstanceId,
) -> (Option<&mut GcStore>, Pin<&mut vm::Instance>) {
(self.gc_store.as_mut(), self.instances[id].handle.get_mut())
}
pub fn optional_gc_store_and_registry_and_instance_mut(
&mut self,
id: InstanceId,
) -> (
Option<&mut GcStore>,
&ModuleRegistry,
Pin<&mut vm::Instance>,
) {
(
self.gc_store.as_mut(),
&self.modules,
self.instances[id].handle.get_mut(),
)
}
pub fn all_instances<'a>(&'a mut self) -> impl ExactSizeIterator<Item = Instance> + 'a {
let instances = self
.instances
.iter()
.filter_map(|(id, inst)| {
if let StoreInstanceKind::Dummy = inst.kind {
None
} else {
Some(id)
}
})
.collect::<Vec<_>>();
instances
.into_iter()
.map(|i| Instance::from_wasmtime(i, self))
}
pub fn all_memories<'a>(&'a self) -> impl Iterator<Item = ExportMemory> + 'a {
let id = self.id();
self.instances
.iter()
.flat_map(move |(_, instance)| instance.handle.get().defined_memories(id))
}
pub fn for_each_table(&mut self, mut f: impl FnMut(&mut Self, Table)) {
for id in self.instances.keys() {
let instance = StoreInstanceId::new(self.id(), id);
for table in 0..self.instance(id).env_module().num_defined_tables() {
let table = DefinedTableIndex::new(table);
f(self, Table::from_raw(instance, table));
}
}
}
pub fn for_each_global(&mut self, mut f: impl FnMut(&mut Self, Global)) {
for global in self.host_globals.keys() {
let global = Global::new_host(self, global);
f(self, global);
}
for id in self.instances.keys() {
for index in 0..self.instance(id).env_module().num_defined_globals() {
let index = DefinedGlobalIndex::new(index);
let global = Global::new_instance(self, id, index);
f(self, global);
}
}
}
#[cfg(all(feature = "std", any(unix, windows)))]
pub fn set_signal_handler(&mut self, handler: Option<SignalHandler>) {
self.signal_handler = handler;
}
#[inline]
pub fn vm_store_context(&self) -> &VMStoreContext {
&self.vm_store_context
}
#[inline]
pub fn vm_store_context_mut(&mut self) -> &mut VMStoreContext {
&mut self.vm_store_context
}
#[inline]
pub(crate) async fn ensure_gc_store(
&mut self,
limiter: Option<&mut StoreResourceLimiter<'_>>,
) -> Result<&mut GcStore> {
if self.gc_store.is_some() {
return Ok(self.gc_store.as_mut().unwrap());
}
self.allocate_gc_store(limiter).await
}
#[inline(never)]
async fn allocate_gc_store(
&mut self,
limiter: Option<&mut StoreResourceLimiter<'_>>,
) -> Result<&mut GcStore> {
log::trace!("allocating GC heap for store {:?}", self.id());
assert!(self.gc_store.is_none());
assert_eq!(
self.vm_store_context.gc_heap.base.as_non_null(),
NonNull::dangling(),
);
assert_eq!(self.vm_store_context.gc_heap.current_length(), 0);
let gc_store = allocate_gc_store(self, limiter).await?;
self.vm_store_context.gc_heap = gc_store.vmmemory_definition();
return Ok(self.gc_store.insert(gc_store));
#[cfg(feature = "gc")]
async fn allocate_gc_store(
store: &mut StoreOpaque,
limiter: Option<&mut StoreResourceLimiter<'_>>,
) -> Result<GcStore> {
use wasmtime_environ::packed_option::ReservedValue;
let engine = store.engine();
let mem_ty = engine.tunables().gc_heap_memory_type();
ensure!(
engine.features().gc_types(),
"cannot allocate a GC store when GC is disabled at configuration time"
);
let mut request = InstanceAllocationRequest {
id: InstanceId::reserved_value(),
runtime_info: engine.empty_module_runtime_info(),
imports: vm::Imports::default(),
store,
limiter,
};
let (mem_alloc_index, mem) = engine
.allocator()
.allocate_memory(&mut request, &mem_ty, None)
.await?;
let gc_runtime = engine
.gc_runtime()
.context("no GC runtime: GC disabled at compile time or configuration time")?;
let (index, heap) =
engine
.allocator()
.allocate_gc_heap(engine, &**gc_runtime, mem_alloc_index, mem)?;
Ok(GcStore::new(index, heap))
}
#[cfg(not(feature = "gc"))]
async fn allocate_gc_store(
_: &mut StoreOpaque,
_: Option<&mut StoreResourceLimiter<'_>>,
) -> Result<GcStore> {
bail!("cannot allocate a GC store: the `gc` feature was disabled at compile time")
}
}
#[inline]
#[cfg(feature = "gc")]
pub(crate) fn require_gc_store(&self) -> Result<&GcStore> {
match &self.gc_store {
Some(gc_store) => Ok(gc_store),
None => bail!("GC heap not initialized yet"),
}
}
#[inline]
#[cfg(feature = "gc")]
pub(crate) fn require_gc_store_mut(&mut self) -> Result<&mut GcStore> {
match &mut self.gc_store {
Some(gc_store) => Ok(gc_store),
None => bail!("GC heap not initialized yet"),
}
}
#[inline]
pub(crate) fn optional_gc_store_mut(&mut self) -> Option<&mut GcStore> {
if cfg!(not(feature = "gc")) || !self.engine.features().gc_types() {
debug_assert!(self.gc_store.is_none());
None
} else {
self.gc_store.as_mut()
}
}
#[inline]
#[track_caller]
pub(crate) fn unwrap_gc_store(&self) -> &GcStore {
self.gc_store
.as_ref()
.expect("attempted to access the store's GC heap before it has been allocated")
}
#[inline]
#[track_caller]
pub(crate) fn unwrap_gc_store_mut(&mut self) -> &mut GcStore {
self.gc_store
.as_mut()
.expect("attempted to access the store's GC heap before it has been allocated")
}
#[inline]
pub(crate) fn gc_roots(&self) -> &RootSet {
&self.gc_roots
}
#[inline]
#[cfg(feature = "gc")]
pub(crate) fn gc_roots_mut(&mut self) -> &mut RootSet {
&mut self.gc_roots
}
#[inline]
pub(crate) fn exit_gc_lifo_scope(&mut self, scope: usize) {
self.gc_roots.exit_lifo_scope(self.gc_store.as_mut(), scope);
}
#[cfg(feature = "gc")]
async fn do_gc(&mut self, asyncness: Asyncness) {
if self.gc_store.is_none() {
return;
}
log::trace!("============ Begin GC ===========");
let mut roots = core::mem::take(&mut self.gc_roots_list);
self.trace_roots(&mut roots, asyncness).await;
self.unwrap_gc_store_mut()
.gc(asyncness, unsafe { roots.iter() })
.await;
roots.clear();
self.gc_roots_list = roots;
log::trace!("============ End GC ===========");
}
#[cfg(feature = "gc")]
async fn trace_roots(&mut self, gc_roots_list: &mut GcRootsList, asyncness: Asyncness) {
log::trace!("Begin trace GC roots");
assert!(gc_roots_list.is_empty());
self.trace_wasm_stack_roots(gc_roots_list);
if asyncness != Asyncness::No {
vm::Yield::new().await;
}
#[cfg(feature = "stack-switching")]
{
self.trace_wasm_continuation_roots(gc_roots_list);
if asyncness != Asyncness::No {
vm::Yield::new().await;
}
}
self.trace_vmctx_roots(gc_roots_list);
if asyncness != Asyncness::No {
vm::Yield::new().await;
}
self.trace_user_roots(gc_roots_list);
self.trace_pending_exception_roots(gc_roots_list);
log::trace!("End trace GC roots")
}
#[cfg(feature = "gc")]
fn trace_wasm_stack_frame(
&self,
gc_roots_list: &mut GcRootsList,
frame: crate::runtime::vm::Frame,
) {
let pc = frame.pc();
debug_assert!(pc != 0, "we should always get a valid PC for Wasm frames");
let fp = frame.fp() as *mut usize;
debug_assert!(
!fp.is_null(),
"we should always get a valid frame pointer for Wasm frames"
);
let (module_with_code, _offset) = self
.modules()
.module_and_code_by_pc(pc)
.expect("should have module info for Wasm frame");
if let Some(stack_map) = module_with_code.lookup_stack_map(pc) {
log::trace!(
"We have a stack map that maps {} bytes in this Wasm frame",
stack_map.frame_size()
);
let sp = unsafe { stack_map.sp(fp) };
for stack_slot in unsafe { stack_map.live_gc_refs(sp) } {
unsafe {
self.trace_wasm_stack_slot(gc_roots_list, stack_slot);
}
}
}
#[cfg(feature = "debug")]
if let Some(frame_table) = module_with_code.module().frame_table() {
let relpc = module_with_code
.text_offset(pc)
.expect("PC should be within module");
for stack_slot in super::debug::gc_refs_in_frame(frame_table, relpc, fp) {
unsafe {
self.trace_wasm_stack_slot(gc_roots_list, stack_slot);
}
}
}
}
#[cfg(feature = "gc")]
unsafe fn trace_wasm_stack_slot(&self, gc_roots_list: &mut GcRootsList, stack_slot: *mut u32) {
use crate::runtime::vm::SendSyncPtr;
use core::ptr::NonNull;
let raw: u32 = unsafe { core::ptr::read(stack_slot) };
log::trace!("Stack slot @ {stack_slot:p} = {raw:#x}");
let gc_ref = vm::VMGcRef::from_raw_u32(raw);
if gc_ref.is_some() {
unsafe {
gc_roots_list
.add_wasm_stack_root(SendSyncPtr::new(NonNull::new(stack_slot).unwrap()));
}
}
}
#[cfg(feature = "gc")]
fn trace_wasm_stack_roots(&mut self, gc_roots_list: &mut GcRootsList) {
use crate::runtime::vm::Backtrace;
log::trace!("Begin trace GC roots :: Wasm stack");
Backtrace::trace(self, |frame| {
self.trace_wasm_stack_frame(gc_roots_list, frame);
core::ops::ControlFlow::Continue(())
});
log::trace!("End trace GC roots :: Wasm stack");
}
#[cfg(all(feature = "gc", feature = "stack-switching"))]
fn trace_wasm_continuation_roots(&mut self, gc_roots_list: &mut GcRootsList) {
use crate::{runtime::vm::Backtrace, vm::VMStackState};
log::trace!("Begin trace GC roots :: continuations");
for continuation in &self.continuations {
let state = continuation.common_stack_information.state;
match state {
VMStackState::Suspended => {
Backtrace::trace_suspended_continuation(self, continuation.deref(), |frame| {
self.trace_wasm_stack_frame(gc_roots_list, frame);
core::ops::ControlFlow::Continue(())
});
}
VMStackState::Running => {
}
VMStackState::Parent => {
}
VMStackState::Fresh | VMStackState::Returned => {
}
}
}
log::trace!("End trace GC roots :: continuations");
}
#[cfg(feature = "gc")]
fn trace_vmctx_roots(&mut self, gc_roots_list: &mut GcRootsList) {
log::trace!("Begin trace GC roots :: vmctx");
self.for_each_global(|store, global| global.trace_root(store, gc_roots_list));
self.for_each_table(|store, table| table.trace_roots(store, gc_roots_list));
log::trace!("End trace GC roots :: vmctx");
}
#[cfg(feature = "gc")]
fn trace_user_roots(&mut self, gc_roots_list: &mut GcRootsList) {
log::trace!("Begin trace GC roots :: user");
self.gc_roots.trace_roots(gc_roots_list);
log::trace!("End trace GC roots :: user");
}
#[cfg(feature = "gc")]
fn trace_pending_exception_roots(&mut self, gc_roots_list: &mut GcRootsList) {
log::trace!("Begin trace GC roots :: pending exception");
if let Some(pending_exception) = self.pending_exception.as_mut() {
unsafe {
let root = pending_exception.as_gc_ref_mut();
gc_roots_list.add_root(root.into(), "Pending exception");
}
}
log::trace!("End trace GC roots :: pending exception");
}
#[cfg(feature = "gc")]
pub(crate) fn insert_gc_host_alloc_type(&mut self, ty: crate::type_registry::RegisteredType) {
self.gc_host_alloc_types.insert(ty);
}
pub(crate) fn init_gc_ref(
&mut self,
dest: &mut MaybeUninit<Option<VMGcRef>>,
gc_ref: Option<&VMGcRef>,
) {
if GcStore::needs_init_barrier(gc_ref) {
self.unwrap_gc_store_mut().init_gc_ref(dest, gc_ref)
} else {
dest.write(gc_ref.map(|r| r.copy_i31()));
}
}
pub(crate) fn write_gc_ref(&mut self, dest: &mut Option<VMGcRef>, gc_ref: Option<&VMGcRef>) {
GcStore::write_gc_ref_optional_store(self.optional_gc_store_mut(), dest, gc_ref)
}
pub(crate) fn clone_gc_ref(&mut self, gc_ref: &VMGcRef) -> VMGcRef {
if gc_ref.is_i31() {
gc_ref.copy_i31()
} else {
self.unwrap_gc_store_mut().clone_gc_ref(gc_ref)
}
}
pub fn get_fuel(&self) -> Result<u64> {
crate::ensure!(
self.engine().tunables().consume_fuel,
"fuel is not configured in this store"
);
let injected_fuel = unsafe { *self.vm_store_context.fuel_consumed.get() };
Ok(get_fuel(injected_fuel, self.fuel_reserve))
}
pub(crate) fn refuel(&mut self) -> bool {
let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
refuel(
injected_fuel,
&mut self.fuel_reserve,
self.fuel_yield_interval,
)
}
pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
crate::ensure!(
self.engine().tunables().consume_fuel,
"fuel is not configured in this store"
);
let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
set_fuel(
injected_fuel,
&mut self.fuel_reserve,
self.fuel_yield_interval,
fuel,
);
Ok(())
}
#[cfg(feature = "async")]
pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
crate::ensure!(
self.engine().tunables().consume_fuel,
"fuel is not configured in this store"
);
crate::ensure!(
interval != Some(0),
"fuel_async_yield_interval must not be 0"
);
self.set_async_required(Asyncness::Yes);
self.fuel_yield_interval = interval.and_then(|i| NonZeroU64::new(i));
self.set_fuel(self.get_fuel()?)
}
#[inline]
pub fn signal_handler(&self) -> Option<*const SignalHandler> {
let handler = self.signal_handler.as_ref()?;
Some(handler)
}
#[inline]
pub fn vm_store_context_ptr(&self) -> NonNull<VMStoreContext> {
NonNull::from(&self.vm_store_context)
}
#[inline]
pub fn default_caller(&self) -> NonNull<VMContext> {
self.default_caller_vmctx.as_non_null()
}
#[inline]
pub fn traitobj(&self) -> NonNull<dyn VMStore> {
self.traitobj.0.unwrap()
}
#[inline]
pub fn take_hostcall_val_storage(&mut self) -> Vec<Val> {
mem::take(&mut self.hostcall_val_storage)
}
#[inline]
pub fn save_hostcall_val_storage(&mut self, storage: Vec<Val>) {
if storage.capacity() > self.hostcall_val_storage.capacity() {
self.hostcall_val_storage = storage;
}
}
#[inline]
pub fn take_wasm_val_raw_storage(&mut self) -> Vec<ValRaw> {
mem::take(&mut self.wasm_val_raw_storage)
}
#[inline]
pub fn save_wasm_val_raw_storage(&mut self, storage: Vec<ValRaw>) {
if storage.capacity() > self.wasm_val_raw_storage.capacity() {
self.wasm_val_raw_storage = storage;
}
}
pub(crate) fn wasm_fault(&self, pc: usize, addr: usize) -> Option<vm::WasmFault> {
if addr <= mem::size_of::<VMFuncRef>() {
const _: () = {
assert!(mem::size_of::<VMFuncRef>() <= 512);
};
return None;
}
let mut fault = None;
for (_, instance) in self.instances.iter() {
if let Some(f) = instance.handle.get().wasm_fault(addr) {
assert!(fault.is_none());
fault = Some(f);
}
}
if fault.is_some() {
return fault;
}
cfg_if::cfg_if! {
if #[cfg(feature = "std")] {
eprintln!(
"\
Wasmtime caught a segfault for a wasm program because the faulting instruction
is allowed to segfault due to how linear memories are implemented. The address
that was accessed, however, is not known to any linear memory in use within this
Store. This may be indicative of a critical bug in Wasmtime's code generation
because all addresses which are known to be reachable from wasm won't reach this
message.
pc: 0x{pc:x}
address: 0x{addr:x}
This is a possible security issue because WebAssembly has accessed something it
shouldn't have been able to. Other accesses may have succeeded and this one just
happened to be caught. The process will now be aborted to prevent this damage
from going any further and to alert what's going on. If this is a security
issue please reach out to the Wasmtime team via its security policy
at https://bytecodealliance.org/security.
"
);
std::process::abort();
} else if #[cfg(panic = "abort")] {
let _ = pc;
panic!("invalid fault");
} else {
let _ = pc;
struct PanicAgainOnDrop;
impl Drop for PanicAgainOnDrop {
fn drop(&mut self) {
panic!("panicking again to trigger a process abort");
}
}
let _bomb = PanicAgainOnDrop;
panic!("invalid fault");
}
}
}
#[inline]
#[cfg(feature = "pooling-allocator")]
pub(crate) fn get_pkey(&self) -> Option<ProtectionKey> {
self.pkey
}
#[inline]
#[cfg(feature = "component-model")]
pub(crate) fn component_resource_state(
&mut self,
) -> (
&mut vm::component::CallContexts,
&mut vm::component::HandleTable,
&mut crate::component::HostResourceData,
) {
(
&mut self.component_calls,
&mut self.component_host_table,
&mut self.host_resource_data,
)
}
#[cfg(feature = "component-model")]
pub(crate) fn push_component_instance(&mut self, instance: crate::component::Instance) {
let _ = instance;
self.num_component_instances += 1;
}
#[inline]
#[cfg(feature = "component-model")]
pub(crate) fn component_resource_state_with_instance(
&mut self,
instance: crate::component::Instance,
) -> (
&mut vm::component::CallContexts,
&mut vm::component::HandleTable,
&mut crate::component::HostResourceData,
Pin<&mut vm::component::ComponentInstance>,
) {
(
&mut self.component_calls,
&mut self.component_host_table,
&mut self.host_resource_data,
instance.id().from_data_get_mut(&mut self.store_data),
)
}
#[cfg(feature = "component-model")]
pub(crate) fn component_resource_state_with_instance_and_concurrent_state(
&mut self,
instance: crate::component::Instance,
) -> (
&mut vm::component::CallContexts,
&mut vm::component::HandleTable,
&mut crate::component::HostResourceData,
Pin<&mut vm::component::ComponentInstance>,
Option<&mut concurrent::ConcurrentState>,
) {
(
&mut self.component_calls,
&mut self.component_host_table,
&mut self.host_resource_data,
instance.id().from_data_get_mut(&mut self.store_data),
self.concurrent_state.as_mut(),
)
}
#[cfg(feature = "async")]
pub(crate) fn fiber_async_state_mut(&mut self) -> &mut fiber::AsyncState {
&mut self.async_state
}
#[cfg(feature = "component-model-async")]
pub(crate) fn concurrent_state_mut(&mut self) -> &mut concurrent::ConcurrentState {
debug_assert!(self.concurrency_support());
self.concurrent_state.as_mut().unwrap()
}
#[inline]
#[cfg(feature = "component-model")]
pub(crate) fn concurrency_support(&self) -> bool {
let support = self.concurrent_state.is_some();
debug_assert_eq!(support, self.engine().tunables().concurrency_support);
support
}
#[cfg(feature = "async")]
pub(crate) fn has_pkey(&self) -> bool {
self.pkey.is_some()
}
pub(crate) fn executor(&mut self) -> ExecutorRef<'_> {
match &mut self.executor {
Executor::Interpreter(i) => ExecutorRef::Interpreter(i.as_interpreter_ref()),
#[cfg(has_host_compiler_backend)]
Executor::Native => ExecutorRef::Native,
}
}
#[cfg(feature = "async")]
pub(crate) fn swap_executor(&mut self, executor: &mut Executor) {
mem::swap(&mut self.executor, executor);
}
pub(crate) fn unwinder(&self) -> &'static dyn Unwind {
match &self.executor {
Executor::Interpreter(i) => i.unwinder(),
#[cfg(has_host_compiler_backend)]
Executor::Native => &vm::UnwindHost,
}
}
#[cfg(feature = "stack-switching")]
pub fn allocate_continuation(&mut self) -> Result<*mut VMContRef> {
let mut continuation = Box::new(VMContRef::empty());
let stack_size = self.engine.config().async_stack_size;
let stack = crate::vm::VMContinuationStack::new(stack_size)?;
continuation.stack = stack;
let ptr = continuation.deref_mut() as *mut VMContRef;
self.continuations.push(continuation);
Ok(ptr)
}
pub(crate) async unsafe fn allocate_instance(
&mut self,
limiter: Option<&mut StoreResourceLimiter<'_>>,
kind: AllocateInstanceKind<'_>,
runtime_info: &ModuleRuntimeInfo,
imports: Imports<'_>,
) -> Result<InstanceId> {
let id = self.instances.next_key();
let allocator = match kind {
AllocateInstanceKind::Module(_) => self.engine().allocator(),
AllocateInstanceKind::Dummy { allocator } => allocator,
};
let handle = unsafe {
allocator
.allocate_module(InstanceAllocationRequest {
id,
runtime_info,
imports,
store: self,
limiter,
})
.await?
};
let actual = match kind {
AllocateInstanceKind::Module(module_id) => {
log::trace!(
"Adding instance to store: store={:?}, module={module_id:?}, instance={id:?}",
self.id()
);
self.instances.push(StoreInstance {
handle,
kind: StoreInstanceKind::Real { module_id },
})?
}
AllocateInstanceKind::Dummy { .. } => {
log::trace!(
"Adding dummy instance to store: store={:?}, instance={id:?}",
self.id()
);
self.instances.push(StoreInstance {
handle,
kind: StoreInstanceKind::Dummy,
})?
}
};
assert_eq!(id, actual);
Ok(id)
}
#[cfg(feature = "gc")]
pub(crate) fn set_pending_exception(&mut self, exnref: VMExnRef) {
self.pending_exception = Some(exnref);
}
#[cfg(feature = "gc")]
pub(crate) fn take_pending_exception(&mut self) -> Option<VMExnRef> {
self.pending_exception.take()
}
#[cfg(feature = "gc")]
pub fn has_pending_exception(&self) -> bool {
self.pending_exception.is_some()
}
#[cfg(feature = "gc")]
fn take_pending_exception_rooted(&mut self) -> Option<Rooted<ExnRef>> {
let vmexnref = self.take_pending_exception()?;
let mut nogc = AutoAssertNoGc::new(self);
Some(Rooted::new(&mut nogc, vmexnref.into()))
}
#[cfg(all(feature = "gc", feature = "debug"))]
pub(crate) fn pending_exception_owned_rooted(
&mut self,
) -> Result<Option<OwnedRooted<ExnRef>>, crate::error::OutOfMemory> {
let mut nogc = AutoAssertNoGc::new(self);
nogc.pending_exception
.take()
.map(|vmexnref| {
let cloned = nogc.clone_gc_ref(vmexnref.as_gc_ref());
nogc.pending_exception = Some(cloned.into_exnref_unchecked());
OwnedRooted::new(&mut nogc, vmexnref.into())
})
.transpose()
}
#[cfg(feature = "gc")]
fn throw_impl(&mut self, exception: Rooted<ExnRef>) {
let mut nogc = AutoAssertNoGc::new(self);
let exnref = exception._to_raw(&mut nogc).unwrap();
let exnref = VMGcRef::from_raw_u32(exnref)
.expect("exception cannot be null")
.into_exnref_unchecked();
nogc.set_pending_exception(exnref);
}
#[cfg(target_has_atomic = "64")]
pub(crate) fn set_epoch_deadline(&mut self, delta: u64) {
let current_epoch = self.engine().current_epoch();
let epoch_deadline = self.vm_store_context.epoch_deadline.get_mut();
*epoch_deadline = current_epoch + delta;
}
pub(crate) fn get_epoch_deadline(&mut self) -> u64 {
*self.vm_store_context.epoch_deadline.get_mut()
}
#[inline]
pub(crate) fn validate_sync_call(&self) -> Result<()> {
#[cfg(feature = "async")]
if self.async_state.async_required {
bail!("store configuration requires that `*_async` functions are used instead");
}
Ok(())
}
pub(crate) fn can_block(&mut self) -> bool {
#[cfg(feature = "async")]
if true {
return self.fiber_async_state_mut().can_block();
}
false
}
#[cfg(not(feature = "async"))]
pub(crate) fn set_async_required(&mut self, asyncness: Asyncness) {
match asyncness {
Asyncness::No => {}
}
}
}
pub(crate) enum AllocateInstanceKind<'a> {
Module(RegisteredModuleId),
Dummy {
allocator: &'a dyn InstanceAllocator,
},
}
unsafe impl<T> VMStore for StoreInner<T> {
#[cfg(feature = "component-model-async")]
fn component_async_store(
&mut self,
) -> &mut dyn crate::runtime::component::VMComponentAsyncStore {
self
}
fn store_opaque(&self) -> &StoreOpaque {
&self.inner
}
fn store_opaque_mut(&mut self) -> &mut StoreOpaque {
&mut self.inner
}
fn resource_limiter_and_store_opaque(
&mut self,
) -> (Option<StoreResourceLimiter<'_>>, &mut StoreOpaque) {
let (data, limiter, opaque) = self.data_limiter_and_opaque();
let limiter = limiter.map(|l| match l {
ResourceLimiterInner::Sync(s) => StoreResourceLimiter::Sync(s(data)),
#[cfg(feature = "async")]
ResourceLimiterInner::Async(s) => StoreResourceLimiter::Async(s(data)),
});
(limiter, opaque)
}
#[cfg(target_has_atomic = "64")]
fn new_epoch_updated_deadline(&mut self) -> Result<UpdateDeadline> {
let mut behavior = self.epoch_deadline_behavior.take();
let update = match &mut behavior {
Some(callback) => callback((&mut *self).as_context_mut()),
None => Ok(UpdateDeadline::Interrupt),
};
self.epoch_deadline_behavior = behavior;
update
}
#[cfg(feature = "component-model")]
fn component_calls(&mut self) -> &mut vm::component::CallContexts {
&mut self.component_calls
}
#[cfg(feature = "debug")]
fn block_on_debug_handler(&mut self, event: crate::DebugEvent<'_>) -> crate::Result<()> {
if let Some(handler) = self.debug_handler.take() {
if !self.can_block() {
bail!("could not invoke debug handler without async context");
}
log::trace!("about to raise debug event {event:?}");
StoreContextMut(self).with_blocking(|store, cx| {
cx.block_on(Pin::from(handler.handle(store, event)).as_mut())
})
} else {
Ok(())
}
}
}
impl<T> StoreInner<T> {
#[cfg(target_has_atomic = "64")]
fn epoch_deadline_trap(&mut self) {
self.epoch_deadline_behavior = None;
}
#[cfg(target_has_atomic = "64")]
fn epoch_deadline_callback(
&mut self,
callback: Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>,
) {
self.epoch_deadline_behavior = Some(callback);
}
}
impl<T: Default> Default for Store<T> {
fn default() -> Store<T> {
Store::new(&Engine::default(), T::default())
}
}
impl<T: fmt::Debug> fmt::Debug for Store<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let inner = &**self.inner as *const StoreInner<T>;
f.debug_struct("Store")
.field("inner", &inner)
.field("data", self.inner.data())
.finish()
}
}
impl<T> Drop for Store<T> {
fn drop(&mut self) {
self.run_manual_drop_routines();
unsafe {
ManuallyDrop::drop(&mut self.inner.data_no_provenance);
ManuallyDrop::drop(&mut self.inner);
}
}
}
impl Drop for StoreOpaque {
fn drop(&mut self) {
unsafe {
let allocator = self.engine.allocator();
let ondemand = OnDemandInstanceAllocator::default();
let store_id = self.id();
#[cfg(feature = "gc")]
if let Some(gc_store) = self.gc_store.take() {
let gc_alloc_index = gc_store.allocation_index;
log::trace!("store {store_id:?} is deallocating GC heap {gc_alloc_index:?}");
debug_assert!(self.engine.features().gc_types());
let (mem_alloc_index, mem) =
allocator.deallocate_gc_heap(gc_alloc_index, gc_store.gc_heap);
allocator.deallocate_memory(None, mem_alloc_index, mem);
}
for (id, instance) in self.instances.iter_mut() {
log::trace!("store {store_id:?} is deallocating {id:?}");
let allocator = match instance.kind {
StoreInstanceKind::Dummy => &ondemand,
_ => allocator,
};
allocator.deallocate_module(&mut instance.handle);
}
#[cfg(feature = "component-model")]
{
for _ in 0..self.num_component_instances {
allocator.decrement_component_instance_count();
}
}
}
}
}
#[cfg_attr(
not(any(feature = "gc", feature = "async")),
// NB: Rust 1.89, current stable, does not fire this lint. Rust 1.90,
// however, does, so use #[allow] until our MSRV is 1.90.
allow(dead_code, reason = "don't want to put #[cfg] on all impls below too")
)]
pub(crate) trait AsStoreOpaque {
fn as_store_opaque(&mut self) -> &mut StoreOpaque;
}
impl AsStoreOpaque for StoreOpaque {
fn as_store_opaque(&mut self) -> &mut StoreOpaque {
self
}
}
impl AsStoreOpaque for dyn VMStore {
fn as_store_opaque(&mut self) -> &mut StoreOpaque {
self
}
}
impl<T: 'static> AsStoreOpaque for Store<T> {
fn as_store_opaque(&mut self) -> &mut StoreOpaque {
&mut self.inner.inner
}
}
impl<T: 'static> AsStoreOpaque for StoreInner<T> {
fn as_store_opaque(&mut self) -> &mut StoreOpaque {
self
}
}
impl<T: AsStoreOpaque + ?Sized> AsStoreOpaque for &mut T {
fn as_store_opaque(&mut self) -> &mut StoreOpaque {
T::as_store_opaque(self)
}
}
#[derive(PartialEq, Eq, Copy, Clone)]
pub enum Asyncness {
No,
#[cfg(feature = "async")]
Yes,
}
impl core::ops::BitOr for Asyncness {
type Output = Self;
fn bitor(self, rhs: Self) -> Self::Output {
match (self, rhs) {
(Asyncness::No, Asyncness::No) => Asyncness::No,
#[cfg(feature = "async")]
(Asyncness::Yes, _) | (_, Asyncness::Yes) => Asyncness::Yes,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
struct FuelTank {
pub consumed_fuel: i64,
pub reserve_fuel: u64,
pub yield_interval: Option<NonZeroU64>,
}
impl FuelTank {
fn new() -> Self {
FuelTank {
consumed_fuel: 0,
reserve_fuel: 0,
yield_interval: None,
}
}
fn get_fuel(&self) -> u64 {
get_fuel(self.consumed_fuel, self.reserve_fuel)
}
fn refuel(&mut self) -> bool {
refuel(
&mut self.consumed_fuel,
&mut self.reserve_fuel,
self.yield_interval,
)
}
fn set_fuel(&mut self, fuel: u64) {
set_fuel(
&mut self.consumed_fuel,
&mut self.reserve_fuel,
self.yield_interval,
fuel,
);
}
}
#[test]
fn smoke() {
let mut tank = FuelTank::new();
tank.set_fuel(10);
assert_eq!(tank.consumed_fuel, -10);
assert_eq!(tank.reserve_fuel, 0);
tank.yield_interval = NonZeroU64::new(10);
tank.set_fuel(25);
assert_eq!(tank.consumed_fuel, -10);
assert_eq!(tank.reserve_fuel, 15);
}
#[test]
fn does_not_lose_precision() {
let mut tank = FuelTank::new();
tank.set_fuel(u64::MAX);
assert_eq!(tank.get_fuel(), u64::MAX);
tank.set_fuel(i64::MAX as u64);
assert_eq!(tank.get_fuel(), i64::MAX as u64);
tank.set_fuel(i64::MAX as u64 + 1);
assert_eq!(tank.get_fuel(), i64::MAX as u64 + 1);
}
#[test]
fn yielding_does_not_lose_precision() {
let mut tank = FuelTank::new();
tank.yield_interval = NonZeroU64::new(10);
tank.set_fuel(u64::MAX);
assert_eq!(tank.get_fuel(), u64::MAX);
assert_eq!(tank.consumed_fuel, -10);
assert_eq!(tank.reserve_fuel, u64::MAX - 10);
tank.yield_interval = NonZeroU64::new(u64::MAX);
tank.set_fuel(u64::MAX);
assert_eq!(tank.get_fuel(), u64::MAX);
assert_eq!(tank.consumed_fuel, -i64::MAX);
assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
tank.yield_interval = NonZeroU64::new((i64::MAX as u64) + 1);
tank.set_fuel(u64::MAX);
assert_eq!(tank.get_fuel(), u64::MAX);
assert_eq!(tank.consumed_fuel, -i64::MAX);
assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
}
#[test]
fn refueling() {
let mut tank = FuelTank::new();
tank.yield_interval = NonZeroU64::new(10);
tank.reserve_fuel = 42;
tank.consumed_fuel = 4;
assert!(tank.refuel());
assert_eq!(tank.reserve_fuel, 28);
assert_eq!(tank.consumed_fuel, -10);
tank.yield_interval = NonZeroU64::new(1);
tank.reserve_fuel = 8;
tank.consumed_fuel = 4;
assert_eq!(tank.get_fuel(), 4);
assert!(tank.refuel());
assert_eq!(tank.reserve_fuel, 3);
assert_eq!(tank.consumed_fuel, -1);
assert_eq!(tank.get_fuel(), 4);
tank.yield_interval = NonZeroU64::new(10);
tank.reserve_fuel = 3;
tank.consumed_fuel = 4;
assert_eq!(tank.get_fuel(), 0);
assert!(!tank.refuel());
assert_eq!(tank.reserve_fuel, 3);
assert_eq!(tank.consumed_fuel, 4);
assert_eq!(tank.get_fuel(), 0);
}
#[test]
fn store_data_provenance() {
unsafe fn run_wasm(store: &mut Store<u32>) {
let ptr = store
.inner
.inner
.vm_store_context
.store_data
.as_ptr()
.cast::<u32>();
unsafe { *ptr += 1 }
}
let engine = Engine::default();
let mut store = Store::new(&engine, 0_u32);
assert_eq!(*store.data(), 0);
*store.data_mut() += 1;
assert_eq!(*store.data(), 1);
unsafe { run_wasm(&mut store) }
assert_eq!(*store.data(), 2);
*store.data_mut() += 1;
assert_eq!(*store.data(), 3);
}
}