Skip to main content

wasmtime/runtime/
store.rs

1//! Wasmtime's "store" type
2//!
3//! This module, and its submodules, contain the `Store` type and various types
4//! used to interact with it. At first glance this is a pretty confusing module
5//! where you need to know the difference between:
6//!
7//! * `Store<T>`
8//! * `StoreContext<T>`
9//! * `StoreContextMut<T>`
10//! * `AsContext`
11//! * `AsContextMut`
12//! * `StoreInner<T>`
13//! * `StoreOpaque`
14//! * `StoreData`
15//!
16//! There's... quite a lot going on here, and it's easy to be confused. This
17//! comment is ideally going to serve the purpose of clarifying what all these
18//! types are for and why they're motivated.
19//!
20//! First it's important to know what's "internal" and what's "external". Almost
21//! everything above is defined as `pub`, but only some of the items are
22//! reexported to the outside world to be usable from this crate. Otherwise all
23//! items are `pub` within this `store` module, and the `store` module is
24//! private to the `wasmtime` crate. Notably `Store<T>`, `StoreContext<T>`,
25//! `StoreContextMut<T>`, `AsContext`, and `AsContextMut` are all public
26//! interfaces to the `wasmtime` crate. You can think of these as:
27//!
28//! * `Store<T>` - an owned reference to a store, the "root of everything"
29//! * `StoreContext<T>` - basically `&StoreInner<T>`
30//! * `StoreContextMut<T>` - more-or-less `&mut StoreInner<T>` with caveats.
31//!   Explained later.
32//! * `AsContext` - similar to `AsRef`, but produces `StoreContext<T>`
33//! * `AsContextMut` - similar to `AsMut`, but produces `StoreContextMut<T>`
34//!
35//! Next comes the internal structure of the `Store<T>` itself. This looks like:
36//!
37//! * `Store<T>` - this type is just a pointer large. It's primarily just
38//!   intended to be consumed by the outside world. Note that the "just a
39//!   pointer large" is a load-bearing implementation detail in Wasmtime. This
40//!   enables it to store a pointer to its own trait object which doesn't need
41//!   to change over time.
42//!
43//! * `StoreInner<T>` - the first layer of the contents of a `Store<T>`, what's
44//!   stored inside the `Box`. This is the general Rust pattern when one struct
45//!   is a layer over another. The surprising part, though, is that this is
46//!   further subdivided. This structure only contains things which actually
47//!   need `T` itself. The downside of this structure is that it's always
48//!   generic and means that code is monomorphized into consumer crates. We
49//!   strive to have things be as monomorphic as possible in `wasmtime` so this
50//!   type is not heavily used.
51//!
52//! * `StoreOpaque` - this is the primary contents of the `StoreInner<T>` type.
53//!   Stored inline in the outer type the "opaque" here means that it's a
54//!   "store" but it doesn't have access to the `T`. This is the primary
55//!   "internal" reference that Wasmtime uses since `T` is rarely needed by the
56//!   internals of Wasmtime.
57//!
58//! * `StoreData` - this is a final helper struct stored within `StoreOpaque`.
59//!   All references of Wasm items into a `Store` are actually indices into a
60//!   table in this structure, and the `StoreData` being separate makes it a bit
61//!   easier to manage/define/work with. There's no real fundamental reason this
62//!   is split out, although sometimes it's useful to have separate borrows into
63//!   these tables than the `StoreOpaque`.
64//!
65//! A major caveat with these representations is that the internal `&mut
66//! StoreInner<T>` is never handed out publicly to consumers of this crate, only
67//! through a wrapper of `StoreContextMut<'_, T>`. The reason for this is that
68//! we want to provide mutable, but not destructive, access to the contents of a
69//! `Store`. For example if a `StoreInner<T>` were replaced with some other
70//! `StoreInner<T>` then that would drop live instances, possibly those
71//! currently executing beneath the current stack frame. This would not be a
72//! safe operation.
73//!
74//! This means, though, that the `wasmtime` crate, which liberally uses `&mut
75//! StoreOpaque` internally, has to be careful to never actually destroy the
76//! contents of `StoreOpaque`. This is an invariant that we, as the authors of
77//! `wasmtime`, must uphold for the public interface to be safe.
78
79use crate::instance::InstanceData;
80use crate::linker::Definition;
81use crate::module::RegisteredModuleId;
82use crate::prelude::*;
83use crate::runtime::vm::mpk::{self, ProtectionKey, ProtectionMask};
84use crate::runtime::vm::{
85    Backtrace, ExportGlobal, GcHeapAllocationIndex, GcRootsList, GcStore,
86    InstanceAllocationRequest, InstanceAllocator, InstanceHandle, ModuleRuntimeInfo,
87    OnDemandInstanceAllocator, SignalHandler, StoreBox, StorePtr, VMContext, VMFuncRef, VMGcRef,
88    VMRuntimeLimits, WasmFault,
89};
90use crate::trampoline::VMHostGlobalContext;
91use crate::type_registry::RegisteredType;
92use crate::RootSet;
93use crate::{module::ModuleRegistry, Engine, Module, Trap, Val, ValRaw};
94use crate::{Global, Instance, Memory, RootScope, Table, Uninhabited};
95use alloc::sync::Arc;
96use core::cell::UnsafeCell;
97use core::fmt;
98use core::future::Future;
99use core::marker;
100use core::mem::{self, ManuallyDrop};
101use core::num::NonZeroU64;
102use core::ops::{Deref, DerefMut};
103use core::pin::Pin;
104use core::ptr;
105use core::sync::atomic::AtomicU64;
106use core::task::{Context, Poll};
107
108mod context;
109pub use self::context::*;
110mod data;
111pub use self::data::*;
112mod func_refs;
113use func_refs::FuncRefs;
114
115/// A [`Store`] is a collection of WebAssembly instances and host-defined state.
116///
117/// All WebAssembly instances and items will be attached to and refer to a
118/// [`Store`]. For example instances, functions, globals, and tables are all
119/// attached to a [`Store`]. Instances are created by instantiating a
120/// [`Module`](crate::Module) within a [`Store`].
121///
122/// A [`Store`] is intended to be a short-lived object in a program. No form
123/// of GC is implemented at this time so once an instance is created within a
124/// [`Store`] it will not be deallocated until the [`Store`] itself is dropped.
125/// This makes [`Store`] unsuitable for creating an unbounded number of
126/// instances in it because [`Store`] will never release this memory. It's
127/// recommended to have a [`Store`] correspond roughly to the lifetime of a
128/// "main instance" that an embedding is interested in executing.
129///
130/// ## Type parameter `T`
131///
132/// Each [`Store`] has a type parameter `T` associated with it. This `T`
133/// represents state defined by the host. This state will be accessible through
134/// the [`Caller`](crate::Caller) type that host-defined functions get access
135/// to. This `T` is suitable for storing `Store`-specific information which
136/// imported functions may want access to.
137///
138/// The data `T` can be accessed through methods like [`Store::data`] and
139/// [`Store::data_mut`].
140///
141/// ## Stores, contexts, oh my
142///
143/// Most methods in Wasmtime take something of the form
144/// [`AsContext`](crate::AsContext) or [`AsContextMut`](crate::AsContextMut) as
145/// the first argument. These two traits allow ergonomically passing in the
146/// context you currently have to any method. The primary two sources of
147/// contexts are:
148///
149/// * `Store<T>`
150/// * `Caller<'_, T>`
151///
152/// corresponding to what you create and what you have access to in a host
153/// function. You can also explicitly acquire a [`StoreContext`] or
154/// [`StoreContextMut`] and pass that around as well.
155///
156/// Note that all methods on [`Store`] are mirrored onto [`StoreContext`],
157/// [`StoreContextMut`], and [`Caller`](crate::Caller). This way no matter what
158/// form of context you have you can call various methods, create objects, etc.
159///
160/// ## Stores and `Default`
161///
162/// You can create a store with default configuration settings using
163/// `Store::default()`. This will create a brand new [`Engine`] with default
164/// configuration (see [`Config`](crate::Config) for more information).
165///
166/// ## Cross-store usage of items
167///
168/// In `wasmtime` wasm items such as [`Global`] and [`Memory`] "belong" to a
169/// [`Store`]. The store they belong to is the one they were created with
170/// (passed in as a parameter) or instantiated with. This store is the only
171/// store that can be used to interact with wasm items after they're created.
172///
173/// The `wasmtime` crate will panic if the [`Store`] argument passed in to these
174/// operations is incorrect. In other words it's considered a programmer error
175/// rather than a recoverable error for the wrong [`Store`] to be used when
176/// calling APIs.
177pub struct Store<T> {
178    // for comments about `ManuallyDrop`, see `Store::into_data`
179    inner: ManuallyDrop<Box<StoreInner<T>>>,
180}
181
182#[derive(Copy, Clone, Debug)]
183/// Passed to the argument of [`Store::call_hook`] to indicate a state transition in
184/// the WebAssembly VM.
185pub enum CallHook {
186    /// Indicates the VM is calling a WebAssembly function, from the host.
187    CallingWasm,
188    /// Indicates the VM is returning from a WebAssembly function, to the host.
189    ReturningFromWasm,
190    /// Indicates the VM is calling a host function, from WebAssembly.
191    CallingHost,
192    /// Indicates the VM is returning from a host function, to WebAssembly.
193    ReturningFromHost,
194}
195
196impl CallHook {
197    /// Indicates the VM is entering host code (exiting WebAssembly code)
198    pub fn entering_host(&self) -> bool {
199        match self {
200            CallHook::ReturningFromWasm | CallHook::CallingHost => true,
201            _ => false,
202        }
203    }
204    /// Indicates the VM is exiting host code (entering WebAssembly code)
205    pub fn exiting_host(&self) -> bool {
206        match self {
207            CallHook::ReturningFromHost | CallHook::CallingWasm => true,
208            _ => false,
209        }
210    }
211}
212
213/// Internal contents of a `Store<T>` that live on the heap.
214///
215/// The members of this struct are those that need to be generic over `T`, the
216/// store's internal type storage. Otherwise all things that don't rely on `T`
217/// should go into `StoreOpaque`.
218pub struct StoreInner<T> {
219    /// Generic metadata about the store that doesn't need access to `T`.
220    inner: StoreOpaque,
221
222    limiter: Option<ResourceLimiterInner<T>>,
223    call_hook: Option<CallHookInner<T>>,
224    epoch_deadline_behavior:
225        Option<Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>>,
226    // for comments about `ManuallyDrop`, see `Store::into_data`
227    data: ManuallyDrop<T>,
228}
229
230enum ResourceLimiterInner<T> {
231    Sync(Box<dyn FnMut(&mut T) -> &mut (dyn crate::ResourceLimiter) + Send + Sync>),
232    #[cfg(feature = "async")]
233    Async(Box<dyn FnMut(&mut T) -> &mut (dyn crate::ResourceLimiterAsync) + Send + Sync>),
234}
235
236/// An object that can take callbacks when the runtime enters or exits hostcalls.
237#[cfg(all(feature = "async", feature = "call-hook"))]
238#[async_trait::async_trait]
239pub trait CallHookHandler<T>: Send {
240    /// A callback to run when wasmtime is about to enter a host call, or when about to
241    /// exit the hostcall.
242    async fn handle_call_event(&self, t: StoreContextMut<'_, T>, ch: CallHook) -> Result<()>;
243}
244
245enum CallHookInner<T> {
246    #[cfg(feature = "call-hook")]
247    Sync(Box<dyn FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync>),
248    #[cfg(all(feature = "async", feature = "call-hook"))]
249    Async(Box<dyn CallHookHandler<T> + Send + Sync>),
250    #[allow(dead_code)]
251    ForceTypeParameterToBeUsed {
252        uninhabited: Uninhabited,
253        _marker: marker::PhantomData<T>,
254    },
255}
256
257/// What to do after returning from a callback when the engine epoch reaches
258/// the deadline for a Store during execution of a function using that store.
259pub enum UpdateDeadline {
260    /// Extend the deadline by the specified number of ticks.
261    Continue(u64),
262    /// Extend the deadline by the specified number of ticks after yielding to
263    /// the async executor loop. This can only be used with an async [`Store`]
264    /// configured via [`Config::async_support`](crate::Config::async_support).
265    #[cfg(feature = "async")]
266    Yield(u64),
267}
268
269// Forward methods on `StoreOpaque` to also being on `StoreInner<T>`
270impl<T> Deref for StoreInner<T> {
271    type Target = StoreOpaque;
272    fn deref(&self) -> &Self::Target {
273        &self.inner
274    }
275}
276
277impl<T> DerefMut for StoreInner<T> {
278    fn deref_mut(&mut self) -> &mut Self::Target {
279        &mut self.inner
280    }
281}
282
283/// Monomorphic storage for a `Store<T>`.
284///
285/// This structure contains the bulk of the metadata about a `Store`. This is
286/// used internally in Wasmtime when dependence on the `T` of `Store<T>` isn't
287/// necessary, allowing code to be monomorphic and compiled into the `wasmtime`
288/// crate itself.
289pub struct StoreOpaque {
290    // This `StoreOpaque` structure has references to itself. These aren't
291    // immediately evident, however, so we need to tell the compiler that it
292    // contains self-references. This notably suppresses `noalias` annotations
293    // when this shows up in compiled code because types of this structure do
294    // indeed alias itself. An example of this is `default_callee` holds a
295    // `*mut dyn Store` to the address of this `StoreOpaque` itself, indeed
296    // aliasing!
297    //
298    // It's somewhat unclear to me at this time if this is 100% sufficient to
299    // get all the right codegen in all the right places. For example does
300    // `Store` need to internally contain a `Pin<Box<StoreInner<T>>>`? Do the
301    // contexts need to contain `Pin<&mut StoreInner<T>>`? I'm not familiar
302    // enough with `Pin` to understand if it's appropriate here (we do, for
303    // example want to allow movement in and out of `data: T`, just not movement
304    // of most of the other members). It's also not clear if using `Pin` in a
305    // few places buys us much other than a bunch of `unsafe` that we already
306    // sort of hand-wave away.
307    //
308    // In any case this seems like a good mid-ground for now where we're at
309    // least telling the compiler something about all the aliasing happening
310    // within a `Store`.
311    _marker: marker::PhantomPinned,
312
313    engine: Engine,
314    runtime_limits: VMRuntimeLimits,
315    instances: Vec<StoreInstance>,
316    #[cfg(feature = "component-model")]
317    num_component_instances: usize,
318    signal_handler: Option<Box<SignalHandler<'static>>>,
319    modules: ModuleRegistry,
320    func_refs: FuncRefs,
321    host_globals: Vec<StoreBox<VMHostGlobalContext>>,
322
323    // GC-related fields.
324    gc_store: Option<GcStore>,
325    gc_roots: RootSet,
326    gc_roots_list: GcRootsList,
327    // Types for which the embedder has created an allocator for.
328    gc_host_alloc_types: hashbrown::HashSet<RegisteredType>,
329
330    // Numbers of resources instantiated in this store, and their limits
331    instance_count: usize,
332    instance_limit: usize,
333    memory_count: usize,
334    memory_limit: usize,
335    table_count: usize,
336    table_limit: usize,
337    #[cfg(feature = "async")]
338    async_state: AsyncState,
339    // If fuel_yield_interval is enabled, then we store the remaining fuel (that isn't in
340    // runtime_limits) here. The total amount of fuel is the runtime limits and reserve added
341    // together. Then when we run out of gas, we inject the yield amount from the reserve
342    // until the reserve is empty.
343    fuel_reserve: u64,
344    fuel_yield_interval: Option<NonZeroU64>,
345    /// Indexed data within this `Store`, used to store information about
346    /// globals, functions, memories, etc.
347    ///
348    /// Note that this is `ManuallyDrop` because it needs to be dropped before
349    /// `rooted_host_funcs` below. This structure contains pointers which are
350    /// otherwise kept alive by the `Arc` references in `rooted_host_funcs`.
351    store_data: ManuallyDrop<StoreData>,
352    default_caller: InstanceHandle,
353
354    /// Used to optimzed wasm->host calls when the host function is defined with
355    /// `Func::new` to avoid allocating a new vector each time a function is
356    /// called.
357    hostcall_val_storage: Vec<Val>,
358    /// Same as `hostcall_val_storage`, but for the direction of the host
359    /// calling wasm.
360    wasm_val_raw_storage: Vec<ValRaw>,
361
362    /// A list of lists of definitions which have been used to instantiate
363    /// within this `Store`.
364    ///
365    /// Note that not all instantiations end up pushing to this list. At the
366    /// time of this writing only the `InstancePre<T>` type will push to this
367    /// list. Pushes to this list are typically accompanied with
368    /// `HostFunc::to_func_store_rooted` to clone an `Arc` here once which
369    /// preserves a strong reference to the `Arc` for each `HostFunc` stored
370    /// within the list of `Definition`s.
371    ///
372    /// Note that this is `ManuallyDrop` as it must be dropped after
373    /// `store_data` above, where the function pointers are stored.
374    rooted_host_funcs: ManuallyDrop<Vec<Arc<[Definition]>>>,
375
376    /// Keep track of what protection key is being used during allocation so
377    /// that the right memory pages can be enabled when entering WebAssembly
378    /// guest code.
379    pkey: Option<ProtectionKey>,
380
381    /// Runtime state for components used in the handling of resources, borrow,
382    /// and calls. These also interact with the `ResourceAny` type and its
383    /// internal representation.
384    #[cfg(feature = "component-model")]
385    component_host_table: crate::runtime::vm::component::ResourceTable,
386    #[cfg(feature = "component-model")]
387    component_calls: crate::runtime::vm::component::CallContexts,
388    #[cfg(feature = "component-model")]
389    host_resource_data: crate::component::HostResourceData,
390}
391
392#[cfg(feature = "async")]
393struct AsyncState {
394    current_suspend: UnsafeCell<*mut wasmtime_fiber::Suspend<Result<()>, (), Result<()>>>,
395    current_poll_cx: UnsafeCell<*mut Context<'static>>,
396}
397
398// Lots of pesky unsafe cells and pointers in this structure. This means we need
399// to declare explicitly that we use this in a threadsafe fashion.
400#[cfg(feature = "async")]
401unsafe impl Send for AsyncState {}
402#[cfg(feature = "async")]
403unsafe impl Sync for AsyncState {}
404
405/// An RAII type to automatically mark a region of code as unsafe for GC.
406#[doc(hidden)]
407pub struct AutoAssertNoGc<'a> {
408    store: &'a mut StoreOpaque,
409    entered: bool,
410}
411
412impl<'a> AutoAssertNoGc<'a> {
413    #[inline]
414    pub fn new(store: &'a mut StoreOpaque) -> Self {
415        let entered = if !cfg!(feature = "gc") {
416            false
417        } else if let Some(gc_store) = store.gc_store.as_mut() {
418            gc_store.gc_heap.enter_no_gc_scope();
419            true
420        } else {
421            false
422        };
423
424        AutoAssertNoGc { store, entered }
425    }
426
427    /// Creates an `AutoAssertNoGc` value which is forcibly "not entered" and
428    /// disables checks for no GC happening for the duration of this value.
429    ///
430    /// This is used when it is statically otherwise known that a GC doesn't
431    /// happen for the various types involved.
432    ///
433    /// # Unsafety
434    ///
435    /// This method is `unsafe` as it does not provide the same safety
436    /// guarantees as `AutoAssertNoGc::new`. It must be guaranteed by the
437    /// caller that a GC doesn't happen.
438    #[inline]
439    pub unsafe fn disabled(store: &'a mut StoreOpaque) -> Self {
440        if cfg!(debug_assertions) {
441            AutoAssertNoGc::new(store)
442        } else {
443            AutoAssertNoGc {
444                store,
445                entered: false,
446            }
447        }
448    }
449}
450
451impl core::ops::Deref for AutoAssertNoGc<'_> {
452    type Target = StoreOpaque;
453
454    #[inline]
455    fn deref(&self) -> &Self::Target {
456        &*self.store
457    }
458}
459
460impl core::ops::DerefMut for AutoAssertNoGc<'_> {
461    #[inline]
462    fn deref_mut(&mut self) -> &mut Self::Target {
463        &mut *self.store
464    }
465}
466
467impl Drop for AutoAssertNoGc<'_> {
468    #[inline]
469    fn drop(&mut self) {
470        if self.entered {
471            self.store.unwrap_gc_store_mut().gc_heap.exit_no_gc_scope();
472        }
473    }
474}
475
476/// Used to associate instances with the store.
477///
478/// This is needed to track if the instance was allocated explicitly with the on-demand
479/// instance allocator.
480struct StoreInstance {
481    handle: InstanceHandle,
482    kind: StoreInstanceKind,
483}
484
485enum StoreInstanceKind {
486    /// An actual, non-dummy instance.
487    Real {
488        /// The id of this instance's module inside our owning store's
489        /// `ModuleRegistry`.
490        module_id: RegisteredModuleId,
491    },
492
493    /// This is a dummy instance that is just an implementation detail for
494    /// something else. For example, host-created memories internally create a
495    /// dummy instance.
496    ///
497    /// Regardless of the configured instance allocator for the engine, dummy
498    /// instances always use the on-demand allocator to deallocate the instance.
499    Dummy,
500}
501
502impl<T> Store<T> {
503    /// Creates a new [`Store`] to be associated with the given [`Engine`] and
504    /// `data` provided.
505    ///
506    /// The created [`Store`] will place no additional limits on the size of
507    /// linear memories or tables at runtime. Linear memories and tables will
508    /// be allowed to grow to any upper limit specified in their definitions.
509    /// The store will limit the number of instances, linear memories, and
510    /// tables created to 10,000. This can be overridden with the
511    /// [`Store::limiter`] configuration method.
512    pub fn new(engine: &Engine, data: T) -> Self {
513        let pkey = engine.allocator().next_available_pkey();
514
515        let mut inner = Box::new(StoreInner {
516            inner: StoreOpaque {
517                _marker: marker::PhantomPinned,
518                engine: engine.clone(),
519                runtime_limits: Default::default(),
520                instances: Vec::new(),
521                #[cfg(feature = "component-model")]
522                num_component_instances: 0,
523                signal_handler: None,
524                gc_store: None,
525                gc_roots: RootSet::default(),
526                gc_roots_list: GcRootsList::default(),
527                gc_host_alloc_types: hashbrown::HashSet::default(),
528                modules: ModuleRegistry::default(),
529                func_refs: FuncRefs::default(),
530                host_globals: Vec::new(),
531                instance_count: 0,
532                instance_limit: crate::DEFAULT_INSTANCE_LIMIT,
533                memory_count: 0,
534                memory_limit: crate::DEFAULT_MEMORY_LIMIT,
535                table_count: 0,
536                table_limit: crate::DEFAULT_TABLE_LIMIT,
537                #[cfg(feature = "async")]
538                async_state: AsyncState {
539                    current_suspend: UnsafeCell::new(ptr::null_mut()),
540                    current_poll_cx: UnsafeCell::new(ptr::null_mut()),
541                },
542                fuel_reserve: 0,
543                fuel_yield_interval: None,
544                store_data: ManuallyDrop::new(StoreData::new()),
545                default_caller: InstanceHandle::null(),
546                hostcall_val_storage: Vec::new(),
547                wasm_val_raw_storage: Vec::new(),
548                rooted_host_funcs: ManuallyDrop::new(Vec::new()),
549                pkey,
550                #[cfg(feature = "component-model")]
551                component_host_table: Default::default(),
552                #[cfg(feature = "component-model")]
553                component_calls: Default::default(),
554                #[cfg(feature = "component-model")]
555                host_resource_data: Default::default(),
556            },
557            limiter: None,
558            call_hook: None,
559            epoch_deadline_behavior: None,
560            data: ManuallyDrop::new(data),
561        });
562
563        // Wasmtime uses the callee argument to host functions to learn about
564        // the original pointer to the `Store` itself, allowing it to
565        // reconstruct a `StoreContextMut<T>`. When we initially call a `Func`,
566        // however, there's no "callee" to provide. To fix this we allocate a
567        // single "default callee" for the entire `Store`. This is then used as
568        // part of `Func::call` to guarantee that the `callee: *mut VMContext`
569        // is never null.
570        inner.default_caller = {
571            let module = Arc::new(wasmtime_environ::Module::default());
572            let shim = ModuleRuntimeInfo::bare(module);
573            let allocator = OnDemandInstanceAllocator::default();
574            allocator
575                .validate_module(shim.module(), shim.offsets())
576                .unwrap();
577            let mut instance = unsafe {
578                allocator
579                    .allocate_module(InstanceAllocationRequest {
580                        host_state: Box::new(()),
581                        imports: Default::default(),
582                        store: StorePtr::empty(),
583                        runtime_info: &shim,
584                        wmemcheck: engine.config().wmemcheck,
585                        pkey: None,
586                    })
587                    .expect("failed to allocate default callee")
588            };
589
590            // Note the erasure of the lifetime here into `'static`, so in
591            // general usage of this trait object must be strictly bounded to
592            // the `Store` itself, and is a variant that we have to maintain
593            // throughout Wasmtime.
594            unsafe {
595                let traitobj = mem::transmute::<
596                    *mut (dyn crate::runtime::vm::Store + '_),
597                    *mut (dyn crate::runtime::vm::Store + 'static),
598                >(&mut *inner);
599                instance.set_store(traitobj);
600            }
601            instance
602        };
603
604        Self {
605            inner: ManuallyDrop::new(inner),
606        }
607    }
608
609    /// Access the underlying data owned by this `Store`.
610    #[inline]
611    pub fn data(&self) -> &T {
612        self.inner.data()
613    }
614
615    /// Access the underlying data owned by this `Store`.
616    #[inline]
617    pub fn data_mut(&mut self) -> &mut T {
618        self.inner.data_mut()
619    }
620
621    /// Consumes this [`Store`], destroying it, and returns the underlying data.
622    pub fn into_data(mut self) -> T {
623        // This is an unsafe operation because we want to avoid having a runtime
624        // check or boolean for whether the data is actually contained within a
625        // `Store`. The data itself is stored as `ManuallyDrop` since we're
626        // manually managing the memory here, and there's also a `ManuallyDrop`
627        // around the `Box<StoreInner<T>>`. The way this works though is a bit
628        // tricky, so here's how things get dropped appropriately:
629        //
630        // * When a `Store<T>` is normally dropped, the custom destructor for
631        //   `Store<T>` will drop `T`, then the `self.inner` field. The
632        //   rustc-glue destructor runs for `Box<StoreInner<T>>` which drops
633        //   `StoreInner<T>`. This cleans up all internal fields and doesn't
634        //   touch `T` because it's wrapped in `ManuallyDrop`.
635        //
636        // * When calling this method we skip the top-level destructor for
637        //   `Store<T>` with `mem::forget`. This skips both the destructor for
638        //   `T` and the destructor for `StoreInner<T>`. We do, however, run the
639        //   destructor for `Box<StoreInner<T>>` which, like above, will skip
640        //   the destructor for `T` since it's `ManuallyDrop`.
641        //
642        // In both cases all the other fields of `StoreInner<T>` should all get
643        // dropped, and the manual management of destructors is basically
644        // between this method and `Drop for Store<T>`. Note that this also
645        // means that `Drop for StoreInner<T>` cannot access `self.data`, so
646        // there is a comment indicating this as well.
647        unsafe {
648            let mut inner = ManuallyDrop::take(&mut self.inner);
649            core::mem::forget(self);
650            ManuallyDrop::take(&mut inner.data)
651        }
652    }
653
654    /// Configures the [`ResourceLimiter`] used to limit resource creation
655    /// within this [`Store`].
656    ///
657    /// Whenever resources such as linear memory, tables, or instances are
658    /// allocated the `limiter` specified here is invoked with the store's data
659    /// `T` and the returned [`ResourceLimiter`] is used to limit the operation
660    /// being allocated. The returned [`ResourceLimiter`] is intended to live
661    /// within the `T` itself, for example by storing a
662    /// [`StoreLimits`](crate::StoreLimits).
663    ///
664    /// Note that this limiter is only used to limit the creation/growth of
665    /// resources in the future, this does not retroactively attempt to apply
666    /// limits to the [`Store`].
667    ///
668    /// # Examples
669    ///
670    /// ```
671    /// use wasmtime::*;
672    ///
673    /// struct MyApplicationState {
674    ///     my_state: u32,
675    ///     limits: StoreLimits,
676    /// }
677    ///
678    /// let engine = Engine::default();
679    /// let my_state = MyApplicationState {
680    ///     my_state: 42,
681    ///     limits: StoreLimitsBuilder::new()
682    ///         .memory_size(1 << 20 /* 1 MB */)
683    ///         .instances(2)
684    ///         .build(),
685    /// };
686    /// let mut store = Store::new(&engine, my_state);
687    /// store.limiter(|state| &mut state.limits);
688    ///
689    /// // Creation of smaller memories is allowed
690    /// Memory::new(&mut store, MemoryType::new(1, None)).unwrap();
691    ///
692    /// // Creation of a larger memory, however, will exceed the 1MB limit we've
693    /// // configured
694    /// assert!(Memory::new(&mut store, MemoryType::new(1000, None)).is_err());
695    ///
696    /// // The number of instances in this store is limited to 2, so the third
697    /// // instance here should fail.
698    /// let module = Module::new(&engine, "(module)").unwrap();
699    /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
700    /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
701    /// assert!(Instance::new(&mut store, &module, &[]).is_err());
702    /// ```
703    ///
704    /// [`ResourceLimiter`]: crate::ResourceLimiter
705    pub fn limiter(
706        &mut self,
707        mut limiter: impl FnMut(&mut T) -> &mut (dyn crate::ResourceLimiter) + Send + Sync + 'static,
708    ) {
709        // Apply the limits on instances, tables, and memory given by the limiter:
710        let inner = &mut self.inner;
711        let (instance_limit, table_limit, memory_limit) = {
712            let l = limiter(&mut inner.data);
713            (l.instances(), l.tables(), l.memories())
714        };
715        let innermost = &mut inner.inner;
716        innermost.instance_limit = instance_limit;
717        innermost.table_limit = table_limit;
718        innermost.memory_limit = memory_limit;
719
720        // Save the limiter accessor function:
721        inner.limiter = Some(ResourceLimiterInner::Sync(Box::new(limiter)));
722    }
723
724    /// Configures the [`ResourceLimiterAsync`](crate::ResourceLimiterAsync)
725    /// used to limit resource creation within this [`Store`].
726    ///
727    /// This method is an asynchronous variant of the [`Store::limiter`] method
728    /// where the embedder can block the wasm request for more resources with
729    /// host `async` execution of futures.
730    ///
731    /// By using a [`ResourceLimiterAsync`](`crate::ResourceLimiterAsync`)
732    /// with a [`Store`], you can no longer use
733    /// [`Memory::new`](`crate::Memory::new`),
734    /// [`Memory::grow`](`crate::Memory::grow`),
735    /// [`Table::new`](`crate::Table::new`), and
736    /// [`Table::grow`](`crate::Table::grow`). Instead, you must use their
737    /// `async` variants: [`Memory::new_async`](`crate::Memory::new_async`),
738    /// [`Memory::grow_async`](`crate::Memory::grow_async`),
739    /// [`Table::new_async`](`crate::Table::new_async`), and
740    /// [`Table::grow_async`](`crate::Table::grow_async`).
741    ///
742    /// Note that this limiter is only used to limit the creation/growth of
743    /// resources in the future, this does not retroactively attempt to apply
744    /// limits to the [`Store`]. Additionally this must be used with an async
745    /// [`Store`] configured via
746    /// [`Config::async_support`](crate::Config::async_support).
747    #[cfg(feature = "async")]
748    pub fn limiter_async(
749        &mut self,
750        mut limiter: impl FnMut(&mut T) -> &mut (dyn crate::ResourceLimiterAsync)
751            + Send
752            + Sync
753            + 'static,
754    ) {
755        debug_assert!(self.inner.async_support());
756        // Apply the limits on instances, tables, and memory given by the limiter:
757        let inner = &mut self.inner;
758        let (instance_limit, table_limit, memory_limit) = {
759            let l = limiter(&mut inner.data);
760            (l.instances(), l.tables(), l.memories())
761        };
762        let innermost = &mut inner.inner;
763        innermost.instance_limit = instance_limit;
764        innermost.table_limit = table_limit;
765        innermost.memory_limit = memory_limit;
766
767        // Save the limiter accessor function:
768        inner.limiter = Some(ResourceLimiterInner::Async(Box::new(limiter)));
769    }
770
771    /// Configures an async function that runs on calls and returns between
772    /// WebAssembly and host code. For the non-async equivalent of this method,
773    /// see [`Store::call_hook`].
774    ///
775    /// The function is passed a [`CallHook`] argument, which indicates which
776    /// state transition the VM is making.
777    ///
778    /// This function's future may return a [`Trap`]. If a trap is returned
779    /// when an import was called, it is immediately raised as-if the host
780    /// import had returned the trap. If a trap is returned after wasm returns
781    /// to the host then the wasm function's result is ignored and this trap is
782    /// returned instead.
783    ///
784    /// After this function returns a trap, it may be called for subsequent
785    /// returns to host or wasm code as the trap propagates to the root call.
786    #[cfg(all(feature = "async", feature = "call-hook"))]
787    pub fn call_hook_async(&mut self, hook: impl CallHookHandler<T> + Send + Sync + 'static) {
788        self.inner.call_hook = Some(CallHookInner::Async(Box::new(hook)));
789    }
790
791    /// Configure a function that runs on calls and returns between WebAssembly
792    /// and host code.
793    ///
794    /// The function is passed a [`CallHook`] argument, which indicates which
795    /// state transition the VM is making.
796    ///
797    /// This function may return a [`Trap`]. If a trap is returned when an
798    /// import was called, it is immediately raised as-if the host import had
799    /// returned the trap. If a trap is returned after wasm returns to the host
800    /// then the wasm function's result is ignored and this trap is returned
801    /// instead.
802    ///
803    /// After this function returns a trap, it may be called for subsequent returns
804    /// to host or wasm code as the trap propagates to the root call.
805    #[cfg(feature = "call-hook")]
806    pub fn call_hook(
807        &mut self,
808        hook: impl FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync + 'static,
809    ) {
810        self.inner.call_hook = Some(CallHookInner::Sync(Box::new(hook)));
811    }
812
813    /// Returns the [`Engine`] that this store is associated with.
814    pub fn engine(&self) -> &Engine {
815        self.inner.engine()
816    }
817
818    /// Perform garbage collection.
819    ///
820    /// Note that it is not required to actively call this function. GC will
821    /// automatically happen according to various internal heuristics. This is
822    /// provided if fine-grained control over the GC is desired.
823    ///
824    /// This method is only available when the `gc` Cargo feature is enabled.
825    #[cfg(feature = "gc")]
826    pub fn gc(&mut self) {
827        self.inner.gc()
828    }
829
830    /// Perform garbage collection asynchronously.
831    ///
832    /// Note that it is not required to actively call this function. GC will
833    /// automatically happen according to various internal heuristics. This is
834    /// provided if fine-grained control over the GC is desired.
835    ///
836    /// This method is only available when the `gc` Cargo feature is enabled.
837    #[cfg(all(feature = "async", feature = "gc"))]
838    pub async fn gc_async(&mut self)
839    where
840        T: Send,
841    {
842        self.inner.gc_async().await;
843    }
844
845    /// Returns the amount fuel in this [`Store`]. When fuel is enabled, it must
846    /// be configured via [`Store::set_fuel`].
847    ///
848    /// # Errors
849    ///
850    /// This function will return an error if fuel consumption is not enabled
851    /// via [`Config::consume_fuel`](crate::Config::consume_fuel).
852    pub fn get_fuel(&self) -> Result<u64> {
853        self.inner.get_fuel()
854    }
855
856    /// Set the fuel to this [`Store`] for wasm to consume while executing.
857    ///
858    /// For this method to work fuel consumption must be enabled via
859    /// [`Config::consume_fuel`](crate::Config::consume_fuel). By default a
860    /// [`Store`] starts with 0 fuel for wasm to execute with (meaning it will
861    /// immediately trap). This function must be called for the store to have
862    /// some fuel to allow WebAssembly to execute.
863    ///
864    /// Most WebAssembly instructions consume 1 unit of fuel. Some
865    /// instructions, such as `nop`, `drop`, `block`, and `loop`, consume 0
866    /// units, as any execution cost associated with them involves other
867    /// instructions which do consume fuel.
868    ///
869    /// Note that when fuel is entirely consumed it will cause wasm to trap.
870    ///
871    /// # Errors
872    ///
873    /// This function will return an error if fuel consumption is not enabled via
874    /// [`Config::consume_fuel`](crate::Config::consume_fuel).
875    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
876        self.inner.set_fuel(fuel)
877    }
878
879    /// Configures a [`Store`] to yield execution of async WebAssembly code
880    /// periodically.
881    ///
882    /// When a [`Store`] is configured to consume fuel with
883    /// [`Config::consume_fuel`](crate::Config::consume_fuel) this method will
884    /// configure WebAssembly to be suspended and control will be yielded back to the
885    /// caller every `interval` units of fuel consumed. This is only suitable with use of
886    /// a store associated with an [async config](crate::Config::async_support) because
887    /// only then are futures used and yields are possible.
888    ///
889    /// The purpose of this behavior is to ensure that futures which represent
890    /// execution of WebAssembly do not execute too long inside their
891    /// `Future::poll` method. This allows for some form of cooperative
892    /// multitasking where WebAssembly will voluntarily yield control
893    /// periodically (based on fuel consumption) back to the running thread.
894    ///
895    /// Note that futures returned by this crate will automatically flag
896    /// themselves to get re-polled if a yield happens. This means that
897    /// WebAssembly will continue to execute, just after giving the host an
898    /// opportunity to do something else.
899    ///
900    /// The `interval` parameter indicates how much fuel should be
901    /// consumed between yields of an async future. When fuel runs out wasm will trap.
902    ///
903    /// # Error
904    ///
905    /// This method will error if it is not called on a store associated with an [async
906    /// config](crate::Config::async_support).
907    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
908        self.inner.fuel_async_yield_interval(interval)
909    }
910
911    /// Sets the epoch deadline to a certain number of ticks in the future.
912    ///
913    /// When the Wasm guest code is compiled with epoch-interruption
914    /// instrumentation
915    /// ([`Config::epoch_interruption()`](crate::Config::epoch_interruption)),
916    /// and when the `Engine`'s epoch is incremented
917    /// ([`Engine::increment_epoch()`](crate::Engine::increment_epoch))
918    /// past a deadline, execution can be configured to either trap or
919    /// yield and then continue.
920    ///
921    /// This deadline is always set relative to the current epoch:
922    /// `ticks_beyond_current` ticks in the future. The deadline can
923    /// be set explicitly via this method, or refilled automatically
924    /// on a yield if configured via
925    /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update). After
926    /// this method is invoked, the deadline is reached when
927    /// [`Engine::increment_epoch()`] has been invoked at least
928    /// `ticks_beyond_current` times.
929    ///
930    /// By default a store will trap immediately with an epoch deadline of 0
931    /// (which has always "elapsed"). This method is required to be configured
932    /// for stores with epochs enabled to some future epoch deadline.
933    ///
934    /// See documentation on
935    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
936    /// for an introduction to epoch-based interruption.
937    pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
938        self.inner.set_epoch_deadline(ticks_beyond_current);
939    }
940
941    /// Configures epoch-deadline expiration to trap.
942    ///
943    /// When epoch-interruption-instrumented code is executed on this
944    /// store and the epoch deadline is reached before completion,
945    /// with the store configured in this way, execution will
946    /// terminate with a trap as soon as an epoch check in the
947    /// instrumented code is reached.
948    ///
949    /// This behavior is the default if the store is not otherwise
950    /// configured via
951    /// [`epoch_deadline_trap()`](Store::epoch_deadline_trap),
952    /// [`epoch_deadline_callback()`](Store::epoch_deadline_callback) or
953    /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update).
954    ///
955    /// This setting is intended to allow for coarse-grained
956    /// interruption, but not a deterministic deadline of a fixed,
957    /// finite interval. For deterministic interruption, see the
958    /// "fuel" mechanism instead.
959    ///
960    /// Note that when this is used it's required to call
961    /// [`Store::set_epoch_deadline`] or otherwise wasm will always immediately
962    /// trap.
963    ///
964    /// See documentation on
965    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
966    /// for an introduction to epoch-based interruption.
967    pub fn epoch_deadline_trap(&mut self) {
968        self.inner.epoch_deadline_trap();
969    }
970
971    /// Configures epoch-deadline expiration to invoke a custom callback
972    /// function.
973    ///
974    /// When epoch-interruption-instrumented code is executed on this
975    /// store and the epoch deadline is reached before completion, the
976    /// provided callback function is invoked.
977    ///
978    /// This callback should either return an [`UpdateDeadline`], or
979    /// return an error, which will terminate execution with a trap.
980    ///
981    /// The [`UpdateDeadline`] is a positive number of ticks to
982    /// add to the epoch deadline, as well as indicating what
983    /// to do after the callback returns. If the [`Store`] is
984    /// configured with async support, then the callback may return
985    /// [`UpdateDeadline::Yield`] to yield to the async executor before
986    /// updating the epoch deadline. Alternatively, the callback may
987    /// return [`UpdateDeadline::Continue`] to update the epoch deadline
988    /// immediately.
989    ///
990    /// This setting is intended to allow for coarse-grained
991    /// interruption, but not a deterministic deadline of a fixed,
992    /// finite interval. For deterministic interruption, see the
993    /// "fuel" mechanism instead.
994    ///
995    /// See documentation on
996    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
997    /// for an introduction to epoch-based interruption.
998    pub fn epoch_deadline_callback(
999        &mut self,
1000        callback: impl FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync + 'static,
1001    ) {
1002        self.inner.epoch_deadline_callback(Box::new(callback));
1003    }
1004
1005    /// Configures epoch-deadline expiration to yield to the async
1006    /// caller and the update the deadline.
1007    ///
1008    /// When epoch-interruption-instrumented code is executed on this
1009    /// store and the epoch deadline is reached before completion,
1010    /// with the store configured in this way, execution will yield
1011    /// (the future will return `Pending` but re-awake itself for
1012    /// later execution) and, upon resuming, the store will be
1013    /// configured with an epoch deadline equal to the current epoch
1014    /// plus `delta` ticks.
1015    ///
1016    /// This setting is intended to allow for cooperative timeslicing
1017    /// of multiple CPU-bound Wasm guests in different stores, all
1018    /// executing under the control of an async executor. To drive
1019    /// this, stores should be configured to "yield and update"
1020    /// automatically with this function, and some external driver (a
1021    /// thread that wakes up periodically, or a timer
1022    /// signal/interrupt) should call
1023    /// [`Engine::increment_epoch()`](crate::Engine::increment_epoch).
1024    ///
1025    /// See documentation on
1026    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1027    /// for an introduction to epoch-based interruption.
1028    #[cfg(feature = "async")]
1029    pub fn epoch_deadline_async_yield_and_update(&mut self, delta: u64) {
1030        self.inner.epoch_deadline_async_yield_and_update(delta);
1031    }
1032}
1033
1034impl<'a, T> StoreContext<'a, T> {
1035    pub(crate) fn async_support(&self) -> bool {
1036        self.0.async_support()
1037    }
1038
1039    /// Returns the underlying [`Engine`] this store is connected to.
1040    pub fn engine(&self) -> &Engine {
1041        self.0.engine()
1042    }
1043
1044    /// Access the underlying data owned by this `Store`.
1045    ///
1046    /// Same as [`Store::data`].
1047    pub fn data(&self) -> &'a T {
1048        self.0.data()
1049    }
1050
1051    /// Returns the remaining fuel in this store.
1052    ///
1053    /// For more information see [`Store::get_fuel`].
1054    pub fn get_fuel(&self) -> Result<u64> {
1055        self.0.get_fuel()
1056    }
1057}
1058
1059impl<'a, T> StoreContextMut<'a, T> {
1060    /// Access the underlying data owned by this `Store`.
1061    ///
1062    /// Same as [`Store::data`].
1063    pub fn data(&self) -> &T {
1064        self.0.data()
1065    }
1066
1067    /// Access the underlying data owned by this `Store`.
1068    ///
1069    /// Same as [`Store::data_mut`].
1070    pub fn data_mut(&mut self) -> &mut T {
1071        self.0.data_mut()
1072    }
1073
1074    /// Returns the underlying [`Engine`] this store is connected to.
1075    pub fn engine(&self) -> &Engine {
1076        self.0.engine()
1077    }
1078
1079    /// Perform garbage collection of `ExternRef`s.
1080    ///
1081    /// Same as [`Store::gc`].
1082    ///
1083    /// This method is only available when the `gc` Cargo feature is enabled.
1084    #[cfg(feature = "gc")]
1085    pub fn gc(&mut self) {
1086        self.0.gc()
1087    }
1088
1089    /// Perform garbage collection of `ExternRef`s.
1090    ///
1091    /// Same as [`Store::gc`].
1092    ///
1093    /// This method is only available when the `gc` Cargo feature is enabled.
1094    #[cfg(all(feature = "async", feature = "gc"))]
1095    pub async fn gc_async(&mut self)
1096    where
1097        T: Send,
1098    {
1099        self.0.gc_async().await;
1100    }
1101
1102    /// Returns remaining fuel in this store.
1103    ///
1104    /// For more information see [`Store::get_fuel`]
1105    pub fn get_fuel(&self) -> Result<u64> {
1106        self.0.get_fuel()
1107    }
1108
1109    /// Set the amount of fuel in this store.
1110    ///
1111    /// For more information see [`Store::set_fuel`]
1112    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1113        self.0.set_fuel(fuel)
1114    }
1115
1116    /// Configures this `Store` to periodically yield while executing futures.
1117    ///
1118    /// For more information see [`Store::fuel_async_yield_interval`]
1119    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1120        self.0.fuel_async_yield_interval(interval)
1121    }
1122
1123    /// Sets the epoch deadline to a certain number of ticks in the future.
1124    ///
1125    /// For more information see [`Store::set_epoch_deadline`].
1126    pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
1127        self.0.set_epoch_deadline(ticks_beyond_current);
1128    }
1129
1130    /// Configures epoch-deadline expiration to trap.
1131    ///
1132    /// For more information see [`Store::epoch_deadline_trap`].
1133    pub fn epoch_deadline_trap(&mut self) {
1134        self.0.epoch_deadline_trap();
1135    }
1136
1137    /// Configures epoch-deadline expiration to yield to the async
1138    /// caller and the update the deadline.
1139    ///
1140    /// For more information see
1141    /// [`Store::epoch_deadline_async_yield_and_update`].
1142    #[cfg(feature = "async")]
1143    pub fn epoch_deadline_async_yield_and_update(&mut self, delta: u64) {
1144        self.0.epoch_deadline_async_yield_and_update(delta);
1145    }
1146}
1147
1148impl<T> StoreInner<T> {
1149    #[inline]
1150    fn data(&self) -> &T {
1151        &self.data
1152    }
1153
1154    #[inline]
1155    fn data_mut(&mut self) -> &mut T {
1156        &mut self.data
1157    }
1158
1159    #[inline]
1160    pub fn call_hook(&mut self, s: CallHook) -> Result<()> {
1161        if self.inner.pkey.is_none() && self.call_hook.is_none() {
1162            Ok(())
1163        } else {
1164            self.call_hook_slow_path(s)
1165        }
1166    }
1167
1168    fn call_hook_slow_path(&mut self, s: CallHook) -> Result<()> {
1169        if let Some(pkey) = &self.inner.pkey {
1170            let allocator = self.engine().allocator();
1171            match s {
1172                CallHook::CallingWasm | CallHook::ReturningFromHost => {
1173                    allocator.restrict_to_pkey(*pkey)
1174                }
1175                CallHook::ReturningFromWasm | CallHook::CallingHost => allocator.allow_all_pkeys(),
1176            }
1177        }
1178
1179        // Temporarily take the configured behavior to avoid mutably borrowing
1180        // multiple times.
1181        if let Some(mut call_hook) = self.call_hook.take() {
1182            let result = self.invoke_call_hook(&mut call_hook, s);
1183            self.call_hook = Some(call_hook);
1184            result
1185        } else {
1186            Ok(())
1187        }
1188    }
1189
1190    fn invoke_call_hook(&mut self, call_hook: &mut CallHookInner<T>, s: CallHook) -> Result<()> {
1191        match call_hook {
1192            #[cfg(feature = "call-hook")]
1193            CallHookInner::Sync(hook) => hook((&mut *self).as_context_mut(), s),
1194
1195            #[cfg(all(feature = "async", feature = "call-hook"))]
1196            CallHookInner::Async(handler) => unsafe {
1197                self.inner
1198                    .async_cx()
1199                    .ok_or_else(|| anyhow!("couldn't grab async_cx for call hook"))?
1200                    .block_on(
1201                        handler
1202                            .handle_call_event((&mut *self).as_context_mut(), s)
1203                            .as_mut(),
1204                    )?
1205            },
1206
1207            CallHookInner::ForceTypeParameterToBeUsed { uninhabited, .. } => {
1208                let _ = s;
1209                match *uninhabited {}
1210            }
1211        }
1212    }
1213}
1214
1215fn get_fuel(injected_fuel: i64, fuel_reserve: u64) -> u64 {
1216    fuel_reserve.saturating_add_signed(-injected_fuel)
1217}
1218
1219// Add remaining fuel from the reserve into the active fuel if there is any left.
1220fn refuel(
1221    injected_fuel: &mut i64,
1222    fuel_reserve: &mut u64,
1223    yield_interval: Option<NonZeroU64>,
1224) -> bool {
1225    let fuel = get_fuel(*injected_fuel, *fuel_reserve);
1226    if fuel > 0 {
1227        set_fuel(injected_fuel, fuel_reserve, yield_interval, fuel);
1228        true
1229    } else {
1230        false
1231    }
1232}
1233
1234fn set_fuel(
1235    injected_fuel: &mut i64,
1236    fuel_reserve: &mut u64,
1237    yield_interval: Option<NonZeroU64>,
1238    new_fuel_amount: u64,
1239) {
1240    let interval = yield_interval.unwrap_or(NonZeroU64::MAX).get();
1241    // If we're yielding periodically we only store the "active" amount of fuel into consumed_ptr
1242    // for the VM to use.
1243    let injected = core::cmp::min(interval, new_fuel_amount);
1244    // Fuel in the VM is stored as an i64, so we have to cap the amount of fuel we inject into the
1245    // VM at once to be i64 range.
1246    let injected = core::cmp::min(injected, i64::MAX as u64);
1247    // Add whatever is left over after injection to the reserve for later use.
1248    *fuel_reserve = new_fuel_amount - injected;
1249    // Within the VM we increment to count fuel, so inject a negative amount. The VM will halt when
1250    // this counter is positive.
1251    *injected_fuel = -(injected as i64);
1252}
1253
1254#[doc(hidden)]
1255impl StoreOpaque {
1256    pub fn id(&self) -> StoreId {
1257        self.store_data.id()
1258    }
1259
1260    pub fn bump_resource_counts(&mut self, module: &Module) -> Result<()> {
1261        fn bump(slot: &mut usize, max: usize, amt: usize, desc: &str) -> Result<()> {
1262            let new = slot.saturating_add(amt);
1263            if new > max {
1264                bail!(
1265                    "resource limit exceeded: {} count too high at {}",
1266                    desc,
1267                    new
1268                );
1269            }
1270            *slot = new;
1271            Ok(())
1272        }
1273
1274        let module = module.env_module();
1275        let memories = module.memory_plans.len() - module.num_imported_memories;
1276        let tables = module.table_plans.len() - module.num_imported_tables;
1277
1278        bump(&mut self.instance_count, self.instance_limit, 1, "instance")?;
1279        bump(
1280            &mut self.memory_count,
1281            self.memory_limit,
1282            memories,
1283            "memory",
1284        )?;
1285        bump(&mut self.table_count, self.table_limit, tables, "table")?;
1286
1287        Ok(())
1288    }
1289
1290    #[inline]
1291    pub fn async_support(&self) -> bool {
1292        cfg!(feature = "async") && self.engine().config().async_support
1293    }
1294
1295    #[inline]
1296    pub fn engine(&self) -> &Engine {
1297        &self.engine
1298    }
1299
1300    #[inline]
1301    pub fn store_data(&self) -> &StoreData {
1302        &self.store_data
1303    }
1304
1305    #[inline]
1306    pub fn store_data_mut(&mut self) -> &mut StoreData {
1307        &mut self.store_data
1308    }
1309
1310    #[inline]
1311    pub(crate) fn modules(&self) -> &ModuleRegistry {
1312        &self.modules
1313    }
1314
1315    #[inline]
1316    pub(crate) fn modules_mut(&mut self) -> &mut ModuleRegistry {
1317        &mut self.modules
1318    }
1319
1320    pub(crate) fn func_refs(&mut self) -> &mut FuncRefs {
1321        &mut self.func_refs
1322    }
1323
1324    pub(crate) fn fill_func_refs(&mut self) {
1325        self.func_refs.fill(&mut self.modules);
1326    }
1327
1328    pub(crate) fn push_instance_pre_func_refs(&mut self, func_refs: Arc<[VMFuncRef]>) {
1329        self.func_refs.push_instance_pre_func_refs(func_refs);
1330    }
1331
1332    pub(crate) fn host_globals(&mut self) -> &mut Vec<StoreBox<VMHostGlobalContext>> {
1333        &mut self.host_globals
1334    }
1335
1336    pub fn module_for_instance(&self, instance: InstanceId) -> Option<&'_ Module> {
1337        match self.instances[instance.0].kind {
1338            StoreInstanceKind::Dummy => None,
1339            StoreInstanceKind::Real { module_id } => {
1340                let module = self
1341                    .modules()
1342                    .lookup_module_by_id(module_id)
1343                    .expect("should always have a registered module for real instances");
1344                Some(module)
1345            }
1346        }
1347    }
1348
1349    pub unsafe fn add_instance(
1350        &mut self,
1351        handle: InstanceHandle,
1352        module_id: RegisteredModuleId,
1353    ) -> InstanceId {
1354        self.instances.push(StoreInstance {
1355            handle: handle.clone(),
1356            kind: StoreInstanceKind::Real { module_id },
1357        });
1358        InstanceId(self.instances.len() - 1)
1359    }
1360
1361    /// Add a dummy instance that to the store.
1362    ///
1363    /// These are instances that are just implementation details of something
1364    /// else (e.g. host-created memories that are not actually defined in any
1365    /// Wasm module) and therefore shouldn't show up in things like core dumps.
1366    pub unsafe fn add_dummy_instance(&mut self, handle: InstanceHandle) -> InstanceId {
1367        self.instances.push(StoreInstance {
1368            handle: handle.clone(),
1369            kind: StoreInstanceKind::Dummy,
1370        });
1371        InstanceId(self.instances.len() - 1)
1372    }
1373
1374    pub fn instance(&self, id: InstanceId) -> &InstanceHandle {
1375        &self.instances[id.0].handle
1376    }
1377
1378    pub fn instance_mut(&mut self, id: InstanceId) -> &mut InstanceHandle {
1379        &mut self.instances[id.0].handle
1380    }
1381
1382    /// Get all instances (ignoring dummy instances) within this store.
1383    pub fn all_instances<'a>(&'a mut self) -> impl ExactSizeIterator<Item = Instance> + 'a {
1384        let instances = self
1385            .instances
1386            .iter()
1387            .enumerate()
1388            .filter_map(|(idx, inst)| {
1389                let id = InstanceId::from_index(idx);
1390                if let StoreInstanceKind::Dummy = inst.kind {
1391                    None
1392                } else {
1393                    Some(InstanceData::from_id(id))
1394                }
1395            })
1396            .collect::<Vec<_>>();
1397        instances
1398            .into_iter()
1399            .map(|i| Instance::from_wasmtime(i, self))
1400    }
1401
1402    /// Get all memories (host- or Wasm-defined) within this store.
1403    pub fn all_memories<'a>(&'a mut self) -> impl Iterator<Item = Memory> + 'a {
1404        // NB: Host-created memories have dummy instances. Therefore, we can get
1405        // all memories in the store by iterating over all instances (including
1406        // dummy instances) and getting each of their defined memories.
1407        let mems = self
1408            .instances
1409            .iter_mut()
1410            .flat_map(|instance| instance.handle.defined_memories())
1411            .collect::<Vec<_>>();
1412        mems.into_iter()
1413            .map(|memory| unsafe { Memory::from_wasmtime_memory(memory, self) })
1414    }
1415
1416    /// Iterate over all tables (host- or Wasm-defined) within this store.
1417    pub fn for_each_table(&mut self, mut f: impl FnMut(&mut Self, Table)) {
1418        // NB: Host-created tables have dummy instances. Therefore, we can get
1419        // all memories in the store by iterating over all instances (including
1420        // dummy instances) and getting each of their defined memories.
1421
1422        struct TempTakeInstances<'a> {
1423            instances: Vec<StoreInstance>,
1424            store: &'a mut StoreOpaque,
1425        }
1426
1427        impl<'a> TempTakeInstances<'a> {
1428            fn new(store: &'a mut StoreOpaque) -> Self {
1429                let instances = mem::take(&mut store.instances);
1430                Self { instances, store }
1431            }
1432        }
1433
1434        impl Drop for TempTakeInstances<'_> {
1435            fn drop(&mut self) {
1436                assert!(self.store.instances.is_empty());
1437                self.store.instances = mem::take(&mut self.instances);
1438            }
1439        }
1440
1441        let mut temp = TempTakeInstances::new(self);
1442        for instance in temp.instances.iter_mut() {
1443            for table in instance.handle.defined_tables() {
1444                let table = unsafe { Table::from_wasmtime_table(table, temp.store) };
1445                f(temp.store, table);
1446            }
1447        }
1448    }
1449
1450    /// Iterate over all globals (host- or Wasm-defined) within this store.
1451    pub fn for_each_global(&mut self, mut f: impl FnMut(&mut Self, Global)) {
1452        struct TempTakeHostGlobalsAndInstances<'a> {
1453            host_globals: Vec<StoreBox<VMHostGlobalContext>>,
1454            instances: Vec<StoreInstance>,
1455            store: &'a mut StoreOpaque,
1456        }
1457
1458        impl<'a> TempTakeHostGlobalsAndInstances<'a> {
1459            fn new(store: &'a mut StoreOpaque) -> Self {
1460                let host_globals = mem::take(&mut store.host_globals);
1461                let instances = mem::take(&mut store.instances);
1462                Self {
1463                    host_globals,
1464                    instances,
1465                    store,
1466                }
1467            }
1468        }
1469
1470        impl Drop for TempTakeHostGlobalsAndInstances<'_> {
1471            fn drop(&mut self) {
1472                assert!(self.store.host_globals.is_empty());
1473                self.store.host_globals = mem::take(&mut self.host_globals);
1474                assert!(self.store.instances.is_empty());
1475                self.store.instances = mem::take(&mut self.instances);
1476            }
1477        }
1478
1479        let mut temp = TempTakeHostGlobalsAndInstances::new(self);
1480        unsafe {
1481            // First enumerate all the host-created globals.
1482            for global in temp.host_globals.iter() {
1483                let export = ExportGlobal {
1484                    definition: &mut (*global.get()).global as *mut _,
1485                    vmctx: core::ptr::null_mut(),
1486                    global: (*global.get()).ty.to_wasm_type(),
1487                };
1488                let global = Global::from_wasmtime_global(export, temp.store);
1489                f(temp.store, global);
1490            }
1491
1492            // Then enumerate all instances' defined globals.
1493            for instance in temp.instances.iter_mut() {
1494                for (_, export) in instance.handle.defined_globals() {
1495                    let global = Global::from_wasmtime_global(export, temp.store);
1496                    f(temp.store, global);
1497                }
1498            }
1499        }
1500    }
1501
1502    #[cfg_attr(not(target_os = "linux"), allow(dead_code))] // not used on all platforms
1503    pub fn set_signal_handler(&mut self, handler: Option<Box<SignalHandler<'static>>>) {
1504        self.signal_handler = handler;
1505    }
1506
1507    #[inline]
1508    pub fn runtime_limits(&self) -> &VMRuntimeLimits {
1509        &self.runtime_limits
1510    }
1511
1512    #[inline(never)]
1513    pub(crate) fn allocate_gc_heap(&mut self) -> Result<()> {
1514        assert!(self.gc_store.is_none());
1515        let gc_store = allocate_gc_store(self.engine())?;
1516        self.gc_store = Some(gc_store);
1517        return Ok(());
1518
1519        #[cfg(feature = "gc")]
1520        fn allocate_gc_store(engine: &Engine) -> Result<GcStore> {
1521            let (index, heap) = if engine
1522                .config()
1523                .features
1524                .contains(wasmparser::WasmFeatures::REFERENCE_TYPES)
1525            {
1526                engine
1527                    .allocator()
1528                    .allocate_gc_heap(&**engine.gc_runtime())?
1529            } else {
1530                (
1531                    GcHeapAllocationIndex::default(),
1532                    crate::runtime::vm::disabled_gc_heap(),
1533                )
1534            };
1535            Ok(GcStore::new(index, heap))
1536        }
1537
1538        #[cfg(not(feature = "gc"))]
1539        fn allocate_gc_store(_engine: &Engine) -> Result<GcStore> {
1540            Ok(GcStore::new(
1541                GcHeapAllocationIndex::default(),
1542                crate::runtime::vm::disabled_gc_heap(),
1543            ))
1544        }
1545    }
1546
1547    #[inline]
1548    #[cfg(feature = "gc")]
1549    pub(crate) fn gc_store(&self) -> Result<&GcStore> {
1550        match &self.gc_store {
1551            Some(gc_store) => Ok(gc_store),
1552            None => bail!("GC heap not initialized yet"),
1553        }
1554    }
1555
1556    #[inline]
1557    pub(crate) fn gc_store_mut(&mut self) -> Result<&mut GcStore> {
1558        if self.gc_store.is_none() {
1559            self.allocate_gc_heap()?;
1560        }
1561        Ok(self.unwrap_gc_store_mut())
1562    }
1563
1564    #[inline]
1565    #[cfg(feature = "gc")]
1566    pub(crate) fn unwrap_gc_store(&self) -> &GcStore {
1567        self.gc_store
1568            .as_ref()
1569            .expect("attempted to access the store's GC heap before it has been allocated")
1570    }
1571
1572    #[inline]
1573    pub(crate) fn unwrap_gc_store_mut(&mut self) -> &mut GcStore {
1574        self.gc_store
1575            .as_mut()
1576            .expect("attempted to access the store's GC heap before it has been allocated")
1577    }
1578
1579    #[inline]
1580    pub(crate) fn gc_roots(&self) -> &RootSet {
1581        &self.gc_roots
1582    }
1583
1584    #[inline]
1585    pub(crate) fn gc_roots_mut(&mut self) -> &mut RootSet {
1586        &mut self.gc_roots
1587    }
1588
1589    #[inline]
1590    pub(crate) fn exit_gc_lifo_scope(&mut self, scope: usize) {
1591        self.gc_roots.exit_lifo_scope(self.gc_store.as_mut(), scope);
1592    }
1593
1594    #[cfg(feature = "gc")]
1595    pub fn gc(&mut self) {
1596        // If the GC heap hasn't been initialized, there is nothing to collect.
1597        if self.gc_store.is_none() {
1598            return;
1599        }
1600
1601        // Take the GC roots out of `self` so we can borrow it mutably but still
1602        // call mutable methods on `self`.
1603        let mut roots = core::mem::take(&mut self.gc_roots_list);
1604
1605        self.trace_roots(&mut roots);
1606        self.unwrap_gc_store_mut().gc(unsafe { roots.iter() });
1607
1608        // Restore the GC roots for the next GC.
1609        roots.clear();
1610        self.gc_roots_list = roots;
1611    }
1612
1613    #[inline]
1614    #[cfg(not(feature = "gc"))]
1615    pub fn gc(&mut self) {
1616        // Nothing to collect.
1617        //
1618        // Note that this is *not* a public method, this is just defined for the
1619        // crate-internal `StoreOpaque` type. This is a convenience so that we
1620        // don't have to `cfg` every call site.
1621    }
1622
1623    #[cfg(feature = "gc")]
1624    fn trace_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1625        log::trace!("Begin trace GC roots");
1626
1627        // We shouldn't have any leftover, stale GC roots.
1628        assert!(gc_roots_list.is_empty());
1629
1630        self.trace_wasm_stack_roots(gc_roots_list);
1631        self.trace_vmctx_roots(gc_roots_list);
1632        self.trace_user_roots(gc_roots_list);
1633
1634        log::trace!("End trace GC roots")
1635    }
1636
1637    #[cfg(all(feature = "async", feature = "gc"))]
1638    pub async fn gc_async(&mut self) {
1639        assert!(
1640            self.async_support(),
1641            "cannot use `gc_async` without enabling async support in the config",
1642        );
1643
1644        // If the GC heap hasn't been initialized, there is nothing to collect.
1645        if self.gc_store.is_none() {
1646            return;
1647        }
1648
1649        // Take the GC roots out of `self` so we can borrow it mutably but still
1650        // call mutable methods on `self`.
1651        let mut roots = std::mem::take(&mut self.gc_roots_list);
1652
1653        self.trace_roots_async(&mut roots).await;
1654        self.unwrap_gc_store_mut()
1655            .gc_async(unsafe { roots.iter() })
1656            .await;
1657
1658        // Restore the GC roots for the next GC.
1659        roots.clear();
1660        self.gc_roots_list = roots;
1661    }
1662
1663    #[inline]
1664    #[cfg(all(feature = "async", not(feature = "gc")))]
1665    pub async fn gc_async(&mut self) {
1666        // Nothing to collect.
1667        //
1668        // Note that this is *not* a public method, this is just defined for the
1669        // crate-internal `StoreOpaque` type. This is a convenience so that we
1670        // don't have to `cfg` every call site.
1671    }
1672
1673    #[cfg(all(feature = "async", feature = "gc"))]
1674    async fn trace_roots_async(&mut self, gc_roots_list: &mut GcRootsList) {
1675        use crate::runtime::vm::Yield;
1676
1677        log::trace!("Begin trace GC roots");
1678
1679        // We shouldn't have any leftover, stale GC roots.
1680        assert!(gc_roots_list.is_empty());
1681
1682        self.trace_wasm_stack_roots(gc_roots_list);
1683        Yield::new().await;
1684        self.trace_vmctx_roots(gc_roots_list);
1685        Yield::new().await;
1686        self.trace_user_roots(gc_roots_list);
1687
1688        log::trace!("End trace GC roots")
1689    }
1690
1691    #[cfg(feature = "gc")]
1692    fn trace_wasm_stack_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1693        use core::ptr::NonNull;
1694
1695        use crate::runtime::vm::{ModuleInfoLookup, SendSyncPtr};
1696
1697        log::trace!("Begin trace GC roots :: Wasm stack");
1698
1699        Backtrace::trace(self.vmruntime_limits().cast_const(), |frame| {
1700            let pc = frame.pc();
1701            debug_assert!(pc != 0, "we should always get a valid PC for Wasm frames");
1702
1703            let fp = frame.fp();
1704            debug_assert!(
1705                fp != 0,
1706                "we should always get a valid frame pointer for Wasm frames"
1707            );
1708            let module_info = self
1709                .modules()
1710                .lookup(pc)
1711                .expect("should have module info for Wasm frame");
1712
1713            let stack_map = match module_info.lookup_stack_map(pc) {
1714                Some(sm) => sm,
1715                None => {
1716                    log::trace!("No stack map for this Wasm frame");
1717                    return core::ops::ControlFlow::Continue(());
1718                }
1719            };
1720            log::trace!(
1721                "We have a stack map that maps {} words in this Wasm frame",
1722                stack_map.mapped_words()
1723            );
1724
1725            let sp = fp - stack_map.mapped_words() as usize * mem::size_of::<usize>();
1726
1727            for i in 0..(stack_map.mapped_words() as usize) {
1728                // Stack maps have one bit per word in the frame, and the
1729                // zero^th bit is the *lowest* addressed word in the frame,
1730                // i.e. the closest to the SP. So to get the `i`^th word in
1731                // this frame, we add `i * sizeof(word)` to the SP.
1732                let stack_slot = sp + i * mem::size_of::<usize>();
1733                let stack_slot = stack_slot as *mut u64;
1734
1735                if !stack_map.get_bit(i) {
1736                    log::trace!("Stack slot @ {stack_slot:p} does not contain gc_refs");
1737                    continue;
1738                }
1739
1740                let gc_ref = unsafe { core::ptr::read(stack_slot) };
1741                log::trace!("Stack slot @ {stack_slot:p} = {gc_ref:#x}");
1742
1743                let gc_ref = VMGcRef::from_r64(gc_ref)
1744                    .expect("we should never use the high 32 bits of an r64");
1745
1746                if gc_ref.is_some() {
1747                    unsafe {
1748                        gc_roots_list.add_wasm_stack_root(SendSyncPtr::new(
1749                            NonNull::new(stack_slot).unwrap(),
1750                        ));
1751                    }
1752                }
1753            }
1754
1755            core::ops::ControlFlow::Continue(())
1756        });
1757
1758        log::trace!("End trace GC roots :: Wasm stack");
1759    }
1760
1761    #[cfg(feature = "gc")]
1762    fn trace_vmctx_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1763        log::trace!("Begin trace GC roots :: vmctx");
1764        self.for_each_global(|store, global| global.trace_root(store, gc_roots_list));
1765        self.for_each_table(|store, table| table.trace_roots(store, gc_roots_list));
1766        log::trace!("End trace GC roots :: vmctx");
1767    }
1768
1769    #[cfg(feature = "gc")]
1770    fn trace_user_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1771        log::trace!("Begin trace GC roots :: user");
1772        self.gc_roots.trace_roots(gc_roots_list);
1773        log::trace!("End trace GC roots :: user");
1774    }
1775
1776    /// Insert a type into this store. This makes it suitable for the embedder
1777    /// to allocate instances of this type in this store, and we don't have to
1778    /// worry about the type being reclaimed (since it is possible that none of
1779    /// the Wasm modules in this store are holding it alive).
1780    pub(crate) fn insert_gc_host_alloc_type(&mut self, ty: RegisteredType) {
1781        self.gc_host_alloc_types.insert(ty);
1782    }
1783
1784    /// Yields the async context, assuming that we are executing on a fiber and
1785    /// that fiber is not in the process of dying. This function will return
1786    /// None in the latter case (the fiber is dying), and panic if
1787    /// `async_support()` is false.
1788    #[cfg(feature = "async")]
1789    #[inline]
1790    pub fn async_cx(&self) -> Option<AsyncCx> {
1791        assert!(self.async_support());
1792
1793        let poll_cx_box_ptr = self.async_state.current_poll_cx.get();
1794        if poll_cx_box_ptr.is_null() {
1795            return None;
1796        }
1797
1798        let poll_cx_inner_ptr = unsafe { *poll_cx_box_ptr };
1799        if poll_cx_inner_ptr.is_null() {
1800            return None;
1801        }
1802
1803        Some(AsyncCx {
1804            current_suspend: self.async_state.current_suspend.get(),
1805            current_poll_cx: poll_cx_box_ptr,
1806            track_pkey_context_switch: self.pkey.is_some(),
1807        })
1808    }
1809
1810    pub fn get_fuel(&self) -> Result<u64> {
1811        anyhow::ensure!(
1812            self.engine().tunables().consume_fuel,
1813            "fuel is not configured in this store"
1814        );
1815        let injected_fuel = unsafe { *self.runtime_limits.fuel_consumed.get() };
1816        Ok(get_fuel(injected_fuel, self.fuel_reserve))
1817    }
1818
1819    fn refuel(&mut self) -> bool {
1820        let injected_fuel = unsafe { &mut *self.runtime_limits.fuel_consumed.get() };
1821        refuel(
1822            injected_fuel,
1823            &mut self.fuel_reserve,
1824            self.fuel_yield_interval,
1825        )
1826    }
1827
1828    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1829        anyhow::ensure!(
1830            self.engine().tunables().consume_fuel,
1831            "fuel is not configured in this store"
1832        );
1833        let injected_fuel = unsafe { &mut *self.runtime_limits.fuel_consumed.get() };
1834        set_fuel(
1835            injected_fuel,
1836            &mut self.fuel_reserve,
1837            self.fuel_yield_interval,
1838            fuel,
1839        );
1840        Ok(())
1841    }
1842
1843    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1844        anyhow::ensure!(
1845            self.engine().tunables().consume_fuel,
1846            "fuel is not configured in this store"
1847        );
1848        anyhow::ensure!(
1849            self.engine().config().async_support,
1850            "async support is not configured in this store"
1851        );
1852        anyhow::ensure!(
1853            interval != Some(0),
1854            "fuel_async_yield_interval must not be 0"
1855        );
1856        self.fuel_yield_interval = interval.and_then(|i| NonZeroU64::new(i));
1857        // Reset the fuel active + reserve states by resetting the amount.
1858        self.set_fuel(self.get_fuel()?)
1859    }
1860
1861    /// Yields execution to the caller on out-of-gas or epoch interruption.
1862    ///
1863    /// This only works on async futures and stores, and assumes that we're
1864    /// executing on a fiber. This will yield execution back to the caller once.
1865    #[cfg(feature = "async")]
1866    fn async_yield_impl(&mut self) -> Result<()> {
1867        use crate::runtime::vm::Yield;
1868
1869        let mut future = Yield::new();
1870
1871        // When control returns, we have a `Result<()>` passed
1872        // in from the host fiber. If this finished successfully then
1873        // we were resumed normally via a `poll`, so keep going.  If
1874        // the future was dropped while we were yielded, then we need
1875        // to clean up this fiber. Do so by raising a trap which will
1876        // abort all wasm and get caught on the other side to clean
1877        // things up.
1878        unsafe {
1879            self.async_cx()
1880                .expect("attempted to pull async context during shutdown")
1881                .block_on(Pin::new_unchecked(&mut future))
1882        }
1883    }
1884
1885    #[inline]
1886    pub fn signal_handler(&self) -> Option<*const SignalHandler<'static>> {
1887        let handler = self.signal_handler.as_ref()?;
1888        Some(&**handler as *const _)
1889    }
1890
1891    #[inline]
1892    pub fn vmruntime_limits(&self) -> *mut VMRuntimeLimits {
1893        &self.runtime_limits as *const VMRuntimeLimits as *mut VMRuntimeLimits
1894    }
1895
1896    #[inline]
1897    pub fn default_caller(&self) -> *mut VMContext {
1898        self.default_caller.vmctx()
1899    }
1900
1901    pub fn traitobj(&self) -> *mut dyn crate::runtime::vm::Store {
1902        self.default_caller.store()
1903    }
1904
1905    /// Takes the cached `Vec<Val>` stored internally across hostcalls to get
1906    /// used as part of calling the host in a `Func::new` method invocation.
1907    #[inline]
1908    pub fn take_hostcall_val_storage(&mut self) -> Vec<Val> {
1909        mem::take(&mut self.hostcall_val_storage)
1910    }
1911
1912    /// Restores the vector previously taken by `take_hostcall_val_storage`
1913    /// above back into the store, allowing it to be used in the future for the
1914    /// next wasm->host call.
1915    #[inline]
1916    pub fn save_hostcall_val_storage(&mut self, storage: Vec<Val>) {
1917        if storage.capacity() > self.hostcall_val_storage.capacity() {
1918            self.hostcall_val_storage = storage;
1919        }
1920    }
1921
1922    /// Same as `take_hostcall_val_storage`, but for the direction of the host
1923    /// calling wasm.
1924    #[inline]
1925    pub fn take_wasm_val_raw_storage(&mut self) -> Vec<ValRaw> {
1926        mem::take(&mut self.wasm_val_raw_storage)
1927    }
1928
1929    /// Same as `save_hostcall_val_storage`, but for the direction of the host
1930    /// calling wasm.
1931    #[inline]
1932    pub fn save_wasm_val_raw_storage(&mut self, storage: Vec<ValRaw>) {
1933        if storage.capacity() > self.wasm_val_raw_storage.capacity() {
1934            self.wasm_val_raw_storage = storage;
1935        }
1936    }
1937
1938    pub(crate) fn push_rooted_funcs(&mut self, funcs: Arc<[Definition]>) {
1939        self.rooted_host_funcs.push(funcs);
1940    }
1941
1942    /// Translates a WebAssembly fault at the native `pc` and native `addr` to a
1943    /// WebAssembly-relative fault.
1944    ///
1945    /// This function may abort the process if `addr` is not found to actually
1946    /// reside in any linear memory. In such a situation it means that the
1947    /// segfault was erroneously caught by Wasmtime and is possibly indicative
1948    /// of a code generator bug.
1949    ///
1950    /// This function returns `None` for dynamically-bounds-checked-memories
1951    /// with spectre mitigations enabled since the hardware fault address is
1952    /// always zero in these situations which means that the trapping context
1953    /// doesn't have enough information to report the fault address.
1954    pub(crate) fn wasm_fault(&self, pc: usize, addr: usize) -> Option<WasmFault> {
1955        // There are a few instances where a "close to zero" pointer is loaded
1956        // and we expect that to happen:
1957        //
1958        // * Explicitly bounds-checked memories with spectre-guards enabled will
1959        //   cause out-of-bounds accesses to get routed to address 0, so allow
1960        //   wasm instructions to fault on the null address.
1961        // * `call_indirect` when invoking a null function pointer may load data
1962        //   from the a `VMFuncRef` whose address is null, meaning any field of
1963        //   `VMFuncRef` could be the address of the fault.
1964        //
1965        // In these situations where the address is so small it won't be in any
1966        // instance, so skip the checks below.
1967        if addr <= mem::size_of::<VMFuncRef>() {
1968            const _: () = {
1969                // static-assert that `VMFuncRef` isn't too big to ensure that
1970                // it lives solely within the first page as we currently only
1971                // have the guarantee that the first page of memory is unmapped,
1972                // no more.
1973                assert!(mem::size_of::<VMFuncRef>() <= 512);
1974            };
1975            return None;
1976        }
1977
1978        // Search all known instances in this store for this address. Note that
1979        // this is probably not the speediest way to do this. Traps, however,
1980        // are generally not expected to be super fast and additionally stores
1981        // probably don't have all that many instances or memories.
1982        //
1983        // If this loop becomes hot in the future, however, it should be
1984        // possible to precompute maps about linear memories in a store and have
1985        // a quicker lookup.
1986        let mut fault = None;
1987        for instance in self.instances.iter() {
1988            if let Some(f) = instance.handle.wasm_fault(addr) {
1989                assert!(fault.is_none());
1990                fault = Some(f);
1991            }
1992        }
1993        if fault.is_some() {
1994            return fault;
1995        }
1996
1997        cfg_if::cfg_if! {
1998            if #[cfg(any(feature = "std", unix, windows))] {
1999                // With the standard library a rich error can be printed here
2000                // to stderr and the native abort path is used.
2001                eprintln!(
2002                    "\
2003Wasmtime caught a segfault for a wasm program because the faulting instruction
2004is allowed to segfault due to how linear memories are implemented. The address
2005that was accessed, however, is not known to any linear memory in use within this
2006Store. This may be indicative of a critical bug in Wasmtime's code generation
2007because all addresses which are known to be reachable from wasm won't reach this
2008message.
2009
2010    pc:      0x{pc:x}
2011    address: 0x{addr:x}
2012
2013This is a possible security issue because WebAssembly has accessed something it
2014shouldn't have been able to. Other accesses may have succeeded and this one just
2015happened to be caught. The process will now be aborted to prevent this damage
2016from going any further and to alert what's going on. If this is a security
2017issue please reach out to the Wasmtime team via its security policy
2018at https://bytecodealliance.org/security.
2019"
2020                );
2021                std::process::abort();
2022            } else if #[cfg(panic = "abort")] {
2023                // Without the standard library but with `panic=abort` then
2024                // it's safe to panic as that's known to halt execution. For
2025                // now avoid the above error message as well since without
2026                // `std` it's probably best to be a bit more size-conscious.
2027                let _ = pc;
2028                panic!("invalid fault");
2029            } else {
2030                // Without `std` and with `panic = "unwind"` there's no way to
2031                // abort the process portably, so flag a compile time error.
2032                //
2033                // NB: if this becomes a problem in the future one option would
2034                // be to extend the `capi.rs` module for no_std platforms, but
2035                // it remains yet to be seen at this time if this is hit much.
2036                compile_error!("either `std` or `panic=abort` must be enabled");
2037                None
2038            }
2039        }
2040    }
2041
2042    /// Retrieve the store's protection key.
2043    #[inline]
2044    pub(crate) fn get_pkey(&self) -> Option<ProtectionKey> {
2045        self.pkey
2046    }
2047
2048    #[inline]
2049    #[cfg(feature = "component-model")]
2050    pub(crate) fn component_resource_state(
2051        &mut self,
2052    ) -> (
2053        &mut crate::runtime::vm::component::CallContexts,
2054        &mut crate::runtime::vm::component::ResourceTable,
2055        &mut crate::component::HostResourceData,
2056    ) {
2057        (
2058            &mut self.component_calls,
2059            &mut self.component_host_table,
2060            &mut self.host_resource_data,
2061        )
2062    }
2063
2064    #[cfg(feature = "component-model")]
2065    pub(crate) fn push_component_instance(&mut self, instance: crate::component::Instance) {
2066        // We don't actually need the instance itself right now, but it seems
2067        // like something we will almost certainly eventually want to keep
2068        // around, so force callers to provide it.
2069        let _ = instance;
2070
2071        self.num_component_instances += 1;
2072    }
2073}
2074
2075impl<T> StoreContextMut<'_, T> {
2076    /// Executes a synchronous computation `func` asynchronously on a new fiber.
2077    ///
2078    /// This function will convert the synchronous `func` into an asynchronous
2079    /// future. This is done by running `func` in a fiber on a separate native
2080    /// stack which can be suspended and resumed from.
2081    ///
2082    /// Most of the nitty-gritty here is how we juggle the various contexts
2083    /// necessary to suspend the fiber later on and poll sub-futures. It's hoped
2084    /// that the various comments are illuminating as to what's going on here.
2085    #[cfg(feature = "async")]
2086    pub(crate) async fn on_fiber<R>(
2087        &mut self,
2088        func: impl FnOnce(&mut StoreContextMut<'_, T>) -> R + Send,
2089    ) -> Result<R>
2090    where
2091        T: Send,
2092    {
2093        let config = self.engine().config();
2094        debug_assert!(self.0.async_support());
2095        debug_assert!(config.async_stack_size > 0);
2096
2097        let mut slot = None;
2098        let future = {
2099            let current_poll_cx = self.0.async_state.current_poll_cx.get();
2100            let current_suspend = self.0.async_state.current_suspend.get();
2101            let stack = self.engine().allocator().allocate_fiber_stack()?;
2102
2103            let engine = self.engine().clone();
2104            let slot = &mut slot;
2105            let fiber = wasmtime_fiber::Fiber::new(stack, move |keep_going, suspend| {
2106                // First check and see if we were interrupted/dropped, and only
2107                // continue if we haven't been.
2108                keep_going?;
2109
2110                // Configure our store's suspension context for the rest of the
2111                // execution of this fiber. Note that a raw pointer is stored here
2112                // which is only valid for the duration of this closure.
2113                // Consequently we at least replace it with the previous value when
2114                // we're done. This reset is also required for correctness because
2115                // otherwise our value will overwrite another active fiber's value.
2116                // There should be a test that segfaults in `async_functions.rs` if
2117                // this `Replace` is removed.
2118                unsafe {
2119                    let _reset = Reset(current_suspend, *current_suspend);
2120                    *current_suspend = suspend;
2121
2122                    *slot = Some(func(self));
2123                    Ok(())
2124                }
2125            })?;
2126
2127            // Once we have the fiber representing our synchronous computation, we
2128            // wrap that in a custom future implementation which does the
2129            // translation from the future protocol to our fiber API.
2130            FiberFuture {
2131                fiber: Some(fiber),
2132                current_poll_cx,
2133                engine,
2134                state: Some(crate::runtime::vm::AsyncWasmCallState::new()),
2135            }
2136        };
2137        future.await?;
2138
2139        return Ok(slot.unwrap());
2140
2141        struct FiberFuture<'a> {
2142            fiber: Option<wasmtime_fiber::Fiber<'a, Result<()>, (), Result<()>>>,
2143            current_poll_cx: *mut *mut Context<'static>,
2144            engine: Engine,
2145            // See comments in `FiberFuture::resume` for this
2146            state: Option<crate::runtime::vm::AsyncWasmCallState>,
2147        }
2148
2149        // This is surely the most dangerous `unsafe impl Send` in the entire
2150        // crate. There are two members in `FiberFuture` which cause it to not
2151        // be `Send`. One is `current_poll_cx` and is entirely uninteresting.
2152        // This is just used to manage `Context` pointers across `await` points
2153        // in the future, and requires raw pointers to get it to happen easily.
2154        // Nothing too weird about the `Send`-ness, values aren't actually
2155        // crossing threads.
2156        //
2157        // The really interesting piece is `fiber`. Now the "fiber" here is
2158        // actual honest-to-god Rust code which we're moving around. What we're
2159        // doing is the equivalent of moving our thread's stack to another OS
2160        // thread. Turns out we, in general, have no idea what's on the stack
2161        // and would generally have no way to verify that this is actually safe
2162        // to do!
2163        //
2164        // Thankfully, though, Wasmtime has the power. Without being glib it's
2165        // actually worth examining what's on the stack. It's unfortunately not
2166        // super-local to this function itself. Our closure to `Fiber::new` runs
2167        // `func`, which is given to us from the outside. Thankfully, though, we
2168        // have tight control over this. Usage of `on_fiber` is typically done
2169        // *just* before entering WebAssembly itself, so we'll have a few stack
2170        // frames of Rust code (all in Wasmtime itself) before we enter wasm.
2171        //
2172        // Once we've entered wasm, well then we have a whole bunch of wasm
2173        // frames on the stack. We've got this nifty thing called Cranelift,
2174        // though, which allows us to also have complete control over everything
2175        // on the stack!
2176        //
2177        // Finally, when wasm switches back to the fiber's starting pointer
2178        // (this future we're returning) then it means wasm has reentered Rust.
2179        // Suspension can only happen via the `block_on` function of an
2180        // `AsyncCx`. This, conveniently, also happens entirely in Wasmtime
2181        // controlled code!
2182        //
2183        // There's an extremely important point that should be called out here.
2184        // User-provided futures **are not on the stack** during suspension
2185        // points. This is extremely crucial because we in general cannot reason
2186        // about Send/Sync for stack-local variables since rustc doesn't analyze
2187        // them at all. With our construction, though, we are guaranteed that
2188        // Wasmtime owns all stack frames between the stack of a fiber and when
2189        // the fiber suspends (and it could move across threads). At this time
2190        // the only user-provided piece of data on the stack is the future
2191        // itself given to us. Lo-and-behold as you might notice the future is
2192        // required to be `Send`!
2193        //
2194        // What this all boils down to is that we, as the authors of Wasmtime,
2195        // need to be extremely careful that on the async fiber stack we only
2196        // store Send things. For example we can't start using `Rc` willy nilly
2197        // by accident and leave a copy in TLS somewhere. (similarly we have to
2198        // be ready for TLS to change while we're executing wasm code between
2199        // suspension points).
2200        //
2201        // While somewhat onerous it shouldn't be too too hard (the TLS bit is
2202        // the hardest bit so far). This does mean, though, that no user should
2203        // ever have to worry about the `Send`-ness of Wasmtime. If rustc says
2204        // it's ok, then it's ok.
2205        //
2206        // With all that in mind we unsafely assert here that wasmtime is
2207        // correct. We declare the fiber as only containing Send data on its
2208        // stack, despite not knowing for sure at compile time that this is
2209        // correct. That's what `unsafe` in Rust is all about, though, right?
2210        unsafe impl Send for FiberFuture<'_> {}
2211
2212        impl FiberFuture<'_> {
2213            fn fiber(&self) -> &wasmtime_fiber::Fiber<'_, Result<()>, (), Result<()>> {
2214                self.fiber.as_ref().unwrap()
2215            }
2216
2217            /// This is a helper function to call `resume` on the underlying
2218            /// fiber while correctly managing Wasmtime's thread-local data.
2219            ///
2220            /// Wasmtime's implementation of traps leverages thread-local data
2221            /// to get access to metadata during a signal. This thread-local
2222            /// data is a linked list of "activations" where the nodes of the
2223            /// linked list are stored on the stack. It would be invalid as a
2224            /// result to suspend a computation with the head of the linked list
2225            /// on this stack then move the stack to another thread and resume
2226            /// it. That means that a different thread would point to our stack
2227            /// and our thread doesn't point to our stack at all!
2228            ///
2229            /// Basically management of TLS is required here one way or another.
2230            /// The strategy currently settled on is to manage the list of
2231            /// activations created by this fiber as a unit. When a fiber
2232            /// resumes the linked list is prepended to the current thread's
2233            /// list. When the fiber is suspended then the fiber's list of
2234            /// activations are all removed en-masse and saved within the fiber.
2235            fn resume(&mut self, val: Result<()>) -> Result<Result<()>, ()> {
2236                unsafe {
2237                    let prev = self.state.take().unwrap().push();
2238                    let restore = Restore {
2239                        fiber: self,
2240                        state: Some(prev),
2241                    };
2242                    return restore.fiber.fiber().resume(val);
2243                }
2244
2245                struct Restore<'a, 'b> {
2246                    fiber: &'a mut FiberFuture<'b>,
2247                    state: Option<crate::runtime::vm::PreviousAsyncWasmCallState>,
2248                }
2249
2250                impl Drop for Restore<'_, '_> {
2251                    fn drop(&mut self) {
2252                        unsafe {
2253                            self.fiber.state = Some(self.state.take().unwrap().restore());
2254                        }
2255                    }
2256                }
2257            }
2258        }
2259
2260        impl Future for FiberFuture<'_> {
2261            type Output = Result<()>;
2262
2263            fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
2264                // We need to carry over this `cx` into our fiber's runtime
2265                // for when it tries to poll sub-futures that are created. Doing
2266                // this must be done unsafely, however, since `cx` is only alive
2267                // for this one singular function call. Here we do a `transmute`
2268                // to extend the lifetime of `Context` so it can be stored in
2269                // our `Store`, and then we replace the current polling context
2270                // with this one.
2271                //
2272                // Note that the replace is done for weird situations where
2273                // futures might be switching contexts and there's multiple
2274                // wasmtime futures in a chain of futures.
2275                //
2276                // On exit from this function, though, we reset the polling
2277                // context back to what it was to signify that `Store` no longer
2278                // has access to this pointer.
2279                unsafe {
2280                    let _reset = Reset(self.current_poll_cx, *self.current_poll_cx);
2281                    *self.current_poll_cx =
2282                        core::mem::transmute::<&mut Context<'_>, *mut Context<'static>>(cx);
2283
2284                    // After that's set up we resume execution of the fiber, which
2285                    // may also start the fiber for the first time. This either
2286                    // returns `Ok` saying the fiber finished (yay!) or it
2287                    // returns `Err` with the payload passed to `suspend`, which
2288                    // in our case is `()`.
2289                    match self.resume(Ok(())) {
2290                        Ok(result) => Poll::Ready(result),
2291
2292                        // If `Err` is returned that means the fiber polled a
2293                        // future but it said "Pending", so we propagate that
2294                        // here.
2295                        //
2296                        // An additional safety check is performed when leaving
2297                        // this function to help bolster the guarantees of
2298                        // `unsafe impl Send` above. Notably this future may get
2299                        // re-polled on a different thread. Wasmtime's
2300                        // thread-local state points to the stack, however,
2301                        // meaning that it would be incorrect to leave a pointer
2302                        // in TLS when this function returns. This function
2303                        // performs a runtime assert to verify that this is the
2304                        // case, notably that the one TLS pointer Wasmtime uses
2305                        // is not pointing anywhere within the stack. If it is
2306                        // then that's a bug indicating that TLS management in
2307                        // Wasmtime is incorrect.
2308                        Err(()) => {
2309                            if let Some(range) = self.fiber().stack().range() {
2310                                crate::runtime::vm::AsyncWasmCallState::assert_current_state_not_in_range(range);
2311                            }
2312                            Poll::Pending
2313                        }
2314                    }
2315                }
2316            }
2317        }
2318
2319        // Dropping futures is pretty special in that it means the future has
2320        // been requested to be cancelled. Here we run the risk of dropping an
2321        // in-progress fiber, and if we were to do nothing then the fiber would
2322        // leak all its owned stack resources.
2323        //
2324        // To handle this we implement `Drop` here and, if the fiber isn't done,
2325        // resume execution of the fiber saying "hey please stop you're
2326        // interrupted". Our `Trap` created here (which has the stack trace
2327        // of whomever dropped us) will then get propagated in whatever called
2328        // `block_on`, and the idea is that the trap propagates all the way back
2329        // up to the original fiber start, finishing execution.
2330        //
2331        // We don't actually care about the fiber's return value here (no one's
2332        // around to look at it), we just assert the fiber finished to
2333        // completion.
2334        impl Drop for FiberFuture<'_> {
2335            fn drop(&mut self) {
2336                if !self.fiber().done() {
2337                    let result = self.resume(Err(anyhow!("future dropped")));
2338                    // This resumption with an error should always complete the
2339                    // fiber. While it's technically possible for host code to catch
2340                    // the trap and re-resume, we'd ideally like to signal that to
2341                    // callers that they shouldn't be doing that.
2342                    debug_assert!(result.is_ok());
2343                }
2344
2345                self.state.take().unwrap().assert_null();
2346
2347                unsafe {
2348                    self.engine
2349                        .allocator()
2350                        .deallocate_fiber_stack(self.fiber.take().unwrap().into_stack());
2351                }
2352            }
2353        }
2354    }
2355}
2356
2357#[cfg(feature = "async")]
2358pub struct AsyncCx {
2359    current_suspend: *mut *mut wasmtime_fiber::Suspend<Result<()>, (), Result<()>>,
2360    current_poll_cx: *mut *mut Context<'static>,
2361    track_pkey_context_switch: bool,
2362}
2363
2364#[cfg(feature = "async")]
2365impl AsyncCx {
2366    /// Blocks on the asynchronous computation represented by `future` and
2367    /// produces the result here, in-line.
2368    ///
2369    /// This function is designed to only work when it's currently executing on
2370    /// a native fiber. This fiber provides the ability for us to handle the
2371    /// future's `Pending` state as "jump back to whomever called the fiber in
2372    /// an asynchronous fashion and propagate `Pending`". This tight coupling
2373    /// with `on_fiber` below is what powers the asynchronicity of calling wasm.
2374    /// Note that the asynchronous part only applies to host functions, wasm
2375    /// itself never really does anything asynchronous at this time.
2376    ///
2377    /// This function takes a `future` and will (appear to) synchronously wait
2378    /// on the result. While this function is executing it will fiber switch
2379    /// to-and-from the original frame calling `on_fiber` which should be a
2380    /// guarantee due to how async stores are configured.
2381    ///
2382    /// The return value here is either the output of the future `T`, or a trap
2383    /// which represents that the asynchronous computation was cancelled. It is
2384    /// not recommended to catch the trap and try to keep executing wasm, so
2385    /// we've tried to liberally document this.
2386    pub unsafe fn block_on<U>(
2387        &self,
2388        mut future: Pin<&mut (dyn Future<Output = U> + Send)>,
2389    ) -> Result<U> {
2390        // Take our current `Suspend` context which was configured as soon as
2391        // our fiber started. Note that we must load it at the front here and
2392        // save it on our stack frame. While we're polling the future other
2393        // fibers may be started for recursive computations, and the current
2394        // suspend context is only preserved at the edges of the fiber, not
2395        // during the fiber itself.
2396        //
2397        // For a little bit of extra safety we also replace the current value
2398        // with null to try to catch any accidental bugs on our part early.
2399        // This is all pretty unsafe so we're trying to be careful...
2400        //
2401        // Note that there should be a segfaulting test  in `async_functions.rs`
2402        // if this `Reset` is removed.
2403        let suspend = *self.current_suspend;
2404        let _reset = Reset(self.current_suspend, suspend);
2405        *self.current_suspend = ptr::null_mut();
2406        assert!(!suspend.is_null());
2407
2408        loop {
2409            let future_result = {
2410                let poll_cx = *self.current_poll_cx;
2411                let _reset = Reset(self.current_poll_cx, poll_cx);
2412                *self.current_poll_cx = ptr::null_mut();
2413                assert!(!poll_cx.is_null());
2414                future.as_mut().poll(&mut *poll_cx)
2415            };
2416
2417            match future_result {
2418                Poll::Ready(t) => break Ok(t),
2419                Poll::Pending => {}
2420            }
2421
2422            // In order to prevent this fiber's MPK state from being munged by
2423            // other fibers while it is suspended, we save and restore it once
2424            // once execution resumes. Note that when MPK is not supported,
2425            // these are noops.
2426            let previous_mask = if self.track_pkey_context_switch {
2427                let previous_mask = mpk::current_mask();
2428                mpk::allow(ProtectionMask::all());
2429                previous_mask
2430            } else {
2431                ProtectionMask::all()
2432            };
2433            (*suspend).suspend(())?;
2434            if self.track_pkey_context_switch {
2435                mpk::allow(previous_mask);
2436            }
2437        }
2438    }
2439}
2440
2441unsafe impl<T> crate::runtime::vm::Store for StoreInner<T> {
2442    fn vmruntime_limits(&self) -> *mut VMRuntimeLimits {
2443        <StoreOpaque>::vmruntime_limits(self)
2444    }
2445
2446    fn epoch_ptr(&self) -> *const AtomicU64 {
2447        self.engine.epoch_counter() as *const _
2448    }
2449
2450    fn maybe_gc_store(&mut self) -> Option<&mut GcStore> {
2451        self.gc_store.as_mut()
2452    }
2453
2454    fn memory_growing(
2455        &mut self,
2456        current: usize,
2457        desired: usize,
2458        maximum: Option<usize>,
2459    ) -> Result<bool, anyhow::Error> {
2460        match self.limiter {
2461            Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
2462                limiter(&mut self.data).memory_growing(current, desired, maximum)
2463            }
2464            #[cfg(feature = "async")]
2465            Some(ResourceLimiterInner::Async(ref mut limiter)) => unsafe {
2466                self.inner
2467                    .async_cx()
2468                    .expect("ResourceLimiterAsync requires async Store")
2469                    .block_on(
2470                        limiter(&mut self.data)
2471                            .memory_growing(current, desired, maximum)
2472                            .as_mut(),
2473                    )?
2474            },
2475            None => Ok(true),
2476        }
2477    }
2478
2479    fn memory_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
2480        match self.limiter {
2481            Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
2482                limiter(&mut self.data).memory_grow_failed(error)
2483            }
2484            #[cfg(feature = "async")]
2485            Some(ResourceLimiterInner::Async(ref mut limiter)) => {
2486                limiter(&mut self.data).memory_grow_failed(error)
2487            }
2488            None => {
2489                log::debug!("ignoring memory growth failure error: {error:?}");
2490                Ok(())
2491            }
2492        }
2493    }
2494
2495    fn table_growing(
2496        &mut self,
2497        current: u32,
2498        desired: u32,
2499        maximum: Option<u32>,
2500    ) -> Result<bool, anyhow::Error> {
2501        // Need to borrow async_cx before the mut borrow of the limiter.
2502        // self.async_cx() panicks when used with a non-async store, so
2503        // wrap this in an option.
2504        #[cfg(feature = "async")]
2505        let async_cx = if self.async_support()
2506            && matches!(self.limiter, Some(ResourceLimiterInner::Async(_)))
2507        {
2508            Some(self.async_cx().unwrap())
2509        } else {
2510            None
2511        };
2512
2513        match self.limiter {
2514            Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
2515                limiter(&mut self.data).table_growing(current, desired, maximum)
2516            }
2517            #[cfg(feature = "async")]
2518            Some(ResourceLimiterInner::Async(ref mut limiter)) => unsafe {
2519                async_cx
2520                    .expect("ResourceLimiterAsync requires async Store")
2521                    .block_on(
2522                        limiter(&mut self.data)
2523                            .table_growing(current, desired, maximum)
2524                            .as_mut(),
2525                    )?
2526            },
2527            None => Ok(true),
2528        }
2529    }
2530
2531    fn table_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
2532        match self.limiter {
2533            Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
2534                limiter(&mut self.data).table_grow_failed(error)
2535            }
2536            #[cfg(feature = "async")]
2537            Some(ResourceLimiterInner::Async(ref mut limiter)) => {
2538                limiter(&mut self.data).table_grow_failed(error)
2539            }
2540            None => {
2541                log::debug!("ignoring table growth failure: {error:?}");
2542                Ok(())
2543            }
2544        }
2545    }
2546
2547    fn out_of_gas(&mut self) -> Result<()> {
2548        if !self.refuel() {
2549            return Err(Trap::OutOfFuel).err2anyhow();
2550        }
2551        #[cfg(feature = "async")]
2552        if self.fuel_yield_interval.is_some() {
2553            self.async_yield_impl()?;
2554        }
2555        Ok(())
2556    }
2557
2558    fn new_epoch(&mut self) -> Result<u64, anyhow::Error> {
2559        // Temporarily take the configured behavior to avoid mutably borrowing
2560        // multiple times.
2561        let mut behavior = self.epoch_deadline_behavior.take();
2562        let delta_result = match &mut behavior {
2563            None => Err(Trap::Interrupt).err2anyhow(),
2564            Some(callback) => callback((&mut *self).as_context_mut()).and_then(|update| {
2565                let delta = match update {
2566                    UpdateDeadline::Continue(delta) => delta,
2567
2568                    #[cfg(feature = "async")]
2569                    UpdateDeadline::Yield(delta) => {
2570                        assert!(
2571                            self.async_support(),
2572                            "cannot use `UpdateDeadline::Yield` without enabling async support in the config"
2573                        );
2574                        // Do the async yield. May return a trap if future was
2575                        // canceled while we're yielded.
2576                        self.async_yield_impl()?;
2577                        delta
2578                    }
2579                };
2580
2581                // Set a new deadline and return the new epoch deadline so
2582                // the Wasm code doesn't have to reload it.
2583                self.set_epoch_deadline(delta);
2584                Ok(self.get_epoch_deadline())
2585            })
2586        };
2587
2588        // Put back the original behavior which was replaced by `take`.
2589        self.epoch_deadline_behavior = behavior;
2590        delta_result
2591    }
2592
2593    #[cfg(feature = "gc")]
2594    fn gc(&mut self, root: Option<VMGcRef>) -> Result<Option<VMGcRef>> {
2595        let mut scope = RootScope::new(self);
2596        let store = scope.as_context_mut().0;
2597        let store_id = store.id();
2598        let root = root.map(|r| store.gc_roots_mut().push_lifo_root(store_id, r));
2599
2600        if store.async_support() {
2601            #[cfg(feature = "async")]
2602            unsafe {
2603                let async_cx = store.async_cx();
2604                let mut future = store.gc_async();
2605                async_cx
2606                    .expect("attempted to pull async context during shutdown")
2607                    .block_on(Pin::new_unchecked(&mut future))?;
2608            }
2609        } else {
2610            (**store).gc();
2611        }
2612
2613        let root = match root {
2614            None => None,
2615            Some(r) => {
2616                let r = r
2617                    .get_gc_ref(store)
2618                    .expect("still in scope")
2619                    .unchecked_copy();
2620                Some(store.gc_store_mut()?.clone_gc_ref(&r))
2621            }
2622        };
2623
2624        Ok(root)
2625    }
2626
2627    #[cfg(not(feature = "gc"))]
2628    fn gc(&mut self, root: Option<VMGcRef>) -> Result<Option<VMGcRef>> {
2629        Ok(root)
2630    }
2631
2632    #[cfg(feature = "component-model")]
2633    fn component_calls(&mut self) -> &mut crate::runtime::vm::component::CallContexts {
2634        &mut self.component_calls
2635    }
2636}
2637
2638impl<T> StoreInner<T> {
2639    pub(crate) fn set_epoch_deadline(&mut self, delta: u64) {
2640        // Set a new deadline based on the "epoch deadline delta".
2641        //
2642        // Safety: this is safe because the epoch deadline in the
2643        // `VMRuntimeLimits` is accessed only here and by Wasm guest code
2644        // running in this store, and we have a `&mut self` here.
2645        //
2646        // Also, note that when this update is performed while Wasm is
2647        // on the stack, the Wasm will reload the new value once we
2648        // return into it.
2649        let epoch_deadline = unsafe { (*self.vmruntime_limits()).epoch_deadline.get_mut() };
2650        *epoch_deadline = self.engine().current_epoch() + delta;
2651    }
2652
2653    fn epoch_deadline_trap(&mut self) {
2654        self.epoch_deadline_behavior = None;
2655    }
2656
2657    fn epoch_deadline_callback(
2658        &mut self,
2659        callback: Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>,
2660    ) {
2661        self.epoch_deadline_behavior = Some(callback);
2662    }
2663
2664    fn epoch_deadline_async_yield_and_update(&mut self, delta: u64) {
2665        assert!(
2666            self.async_support(),
2667            "cannot use `epoch_deadline_async_yield_and_update` without enabling async support in the config"
2668        );
2669        #[cfg(feature = "async")]
2670        {
2671            self.epoch_deadline_behavior =
2672                Some(Box::new(move |_store| Ok(UpdateDeadline::Yield(delta))));
2673        }
2674        let _ = delta; // suppress warning in non-async build
2675    }
2676
2677    fn get_epoch_deadline(&self) -> u64 {
2678        // Safety: this is safe because, as above, it is only invoked
2679        // from within `new_epoch` which is called from guest Wasm
2680        // code, which will have an exclusive borrow on the Store.
2681        let epoch_deadline = unsafe { (*self.vmruntime_limits()).epoch_deadline.get_mut() };
2682        *epoch_deadline
2683    }
2684}
2685
2686impl<T: Default> Default for Store<T> {
2687    fn default() -> Store<T> {
2688        Store::new(&Engine::default(), T::default())
2689    }
2690}
2691
2692impl<T: fmt::Debug> fmt::Debug for Store<T> {
2693    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2694        let inner = &**self.inner as *const StoreInner<T>;
2695        f.debug_struct("Store")
2696            .field("inner", &inner)
2697            .field("data", &self.inner.data)
2698            .finish()
2699    }
2700}
2701
2702impl<T> Drop for Store<T> {
2703    fn drop(&mut self) {
2704        // for documentation on this `unsafe`, see `into_data`.
2705        unsafe {
2706            ManuallyDrop::drop(&mut self.inner.data);
2707            ManuallyDrop::drop(&mut self.inner);
2708        }
2709    }
2710}
2711
2712impl Drop for StoreOpaque {
2713    fn drop(&mut self) {
2714        // NB it's important that this destructor does not access `self.data`.
2715        // That is deallocated by `Drop for Store<T>` above.
2716
2717        unsafe {
2718            let allocator = self.engine.allocator();
2719            let ondemand = OnDemandInstanceAllocator::default();
2720            for instance in self.instances.iter_mut() {
2721                if let StoreInstanceKind::Dummy = instance.kind {
2722                    ondemand.deallocate_module(&mut instance.handle);
2723                } else {
2724                    allocator.deallocate_module(&mut instance.handle);
2725                }
2726            }
2727            ondemand.deallocate_module(&mut self.default_caller);
2728
2729            #[cfg(feature = "gc")]
2730            if let Some(gc_store) = self.gc_store.take() {
2731                allocator.deallocate_gc_heap(gc_store.allocation_index, gc_store.gc_heap);
2732            }
2733
2734            #[cfg(feature = "component-model")]
2735            {
2736                for _ in 0..self.num_component_instances {
2737                    allocator.decrement_component_instance_count();
2738                }
2739            }
2740
2741            // See documentation for these fields on `StoreOpaque` for why they
2742            // must be dropped in this order.
2743            ManuallyDrop::drop(&mut self.store_data);
2744            ManuallyDrop::drop(&mut self.rooted_host_funcs);
2745        }
2746    }
2747}
2748
2749impl crate::runtime::vm::ModuleInfoLookup for ModuleRegistry {
2750    fn lookup(&self, pc: usize) -> Option<&dyn crate::runtime::vm::ModuleInfo> {
2751        self.lookup_module_info(pc)
2752    }
2753}
2754
2755struct Reset<T: Copy>(*mut T, T);
2756
2757impl<T: Copy> Drop for Reset<T> {
2758    fn drop(&mut self) {
2759        unsafe {
2760            *self.0 = self.1;
2761        }
2762    }
2763}
2764
2765#[cfg(test)]
2766mod tests {
2767    use super::{get_fuel, refuel, set_fuel};
2768    use std::num::NonZeroU64;
2769
2770    struct FuelTank {
2771        pub consumed_fuel: i64,
2772        pub reserve_fuel: u64,
2773        pub yield_interval: Option<NonZeroU64>,
2774    }
2775
2776    impl FuelTank {
2777        fn new() -> Self {
2778            FuelTank {
2779                consumed_fuel: 0,
2780                reserve_fuel: 0,
2781                yield_interval: None,
2782            }
2783        }
2784        fn get_fuel(&self) -> u64 {
2785            get_fuel(self.consumed_fuel, self.reserve_fuel)
2786        }
2787        fn refuel(&mut self) -> bool {
2788            refuel(
2789                &mut self.consumed_fuel,
2790                &mut self.reserve_fuel,
2791                self.yield_interval,
2792            )
2793        }
2794        fn set_fuel(&mut self, fuel: u64) {
2795            set_fuel(
2796                &mut self.consumed_fuel,
2797                &mut self.reserve_fuel,
2798                self.yield_interval,
2799                fuel,
2800            );
2801        }
2802    }
2803
2804    #[test]
2805    fn smoke() {
2806        let mut tank = FuelTank::new();
2807        tank.set_fuel(10);
2808        assert_eq!(tank.consumed_fuel, -10);
2809        assert_eq!(tank.reserve_fuel, 0);
2810
2811        tank.yield_interval = NonZeroU64::new(10);
2812        tank.set_fuel(25);
2813        assert_eq!(tank.consumed_fuel, -10);
2814        assert_eq!(tank.reserve_fuel, 15);
2815    }
2816
2817    #[test]
2818    fn does_not_lose_precision() {
2819        let mut tank = FuelTank::new();
2820        tank.set_fuel(u64::MAX);
2821        assert_eq!(tank.get_fuel(), u64::MAX);
2822
2823        tank.set_fuel(i64::MAX as u64);
2824        assert_eq!(tank.get_fuel(), i64::MAX as u64);
2825
2826        tank.set_fuel(i64::MAX as u64 + 1);
2827        assert_eq!(tank.get_fuel(), i64::MAX as u64 + 1);
2828    }
2829
2830    #[test]
2831    fn yielding_does_not_lose_precision() {
2832        let mut tank = FuelTank::new();
2833
2834        tank.yield_interval = NonZeroU64::new(10);
2835        tank.set_fuel(u64::MAX);
2836        assert_eq!(tank.get_fuel(), u64::MAX);
2837        assert_eq!(tank.consumed_fuel, -10);
2838        assert_eq!(tank.reserve_fuel, u64::MAX - 10);
2839
2840        tank.yield_interval = NonZeroU64::new(u64::MAX);
2841        tank.set_fuel(u64::MAX);
2842        assert_eq!(tank.get_fuel(), u64::MAX);
2843        assert_eq!(tank.consumed_fuel, -i64::MAX);
2844        assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
2845
2846        tank.yield_interval = NonZeroU64::new((i64::MAX as u64) + 1);
2847        tank.set_fuel(u64::MAX);
2848        assert_eq!(tank.get_fuel(), u64::MAX);
2849        assert_eq!(tank.consumed_fuel, -i64::MAX);
2850        assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
2851    }
2852
2853    #[test]
2854    fn refueling() {
2855        // It's possible to fuel to have consumed over the limit as some instructions can consume
2856        // multiple units of fuel at once. Refueling should be strict in it's consumption and not
2857        // add more fuel than there is.
2858        let mut tank = FuelTank::new();
2859
2860        tank.yield_interval = NonZeroU64::new(10);
2861        tank.reserve_fuel = 42;
2862        tank.consumed_fuel = 4;
2863        assert!(tank.refuel());
2864        assert_eq!(tank.reserve_fuel, 28);
2865        assert_eq!(tank.consumed_fuel, -10);
2866
2867        tank.yield_interval = NonZeroU64::new(1);
2868        tank.reserve_fuel = 8;
2869        tank.consumed_fuel = 4;
2870        assert_eq!(tank.get_fuel(), 4);
2871        assert!(tank.refuel());
2872        assert_eq!(tank.reserve_fuel, 3);
2873        assert_eq!(tank.consumed_fuel, -1);
2874        assert_eq!(tank.get_fuel(), 4);
2875
2876        tank.yield_interval = NonZeroU64::new(10);
2877        tank.reserve_fuel = 3;
2878        tank.consumed_fuel = 4;
2879        assert_eq!(tank.get_fuel(), 0);
2880        assert!(!tank.refuel());
2881        assert_eq!(tank.reserve_fuel, 3);
2882        assert_eq!(tank.consumed_fuel, 4);
2883        assert_eq!(tank.get_fuel(), 0);
2884    }
2885}