dusk_wasmtime/runtime/store.rs
1//! Wasmtime's "store" type
2//!
3//! This module, and its submodules, contain the `Store` type and various types
4//! used to interact with it. At first glance this is a pretty confusing module
5//! where you need to know the difference between:
6//!
7//! * `Store<T>`
8//! * `StoreContext<T>`
9//! * `StoreContextMut<T>`
10//! * `AsContext`
11//! * `AsContextMut`
12//! * `StoreInner<T>`
13//! * `StoreOpaque`
14//! * `StoreData`
15//!
16//! There's... quite a lot going on here, and it's easy to be confused. This
17//! comment is ideally going to serve the purpose of clarifying what all these
18//! types are for and why they're motivated.
19//!
20//! First it's important to know what's "internal" and what's "external". Almost
21//! everything above is defined as `pub`, but only some of the items are
22//! reexported to the outside world to be usable from this crate. Otherwise all
23//! items are `pub` within this `store` module, and the `store` module is
24//! private to the `wasmtime` crate. Notably `Store<T>`, `StoreContext<T>`,
25//! `StoreContextMut<T>`, `AsContext`, and `AsContextMut` are all public
26//! interfaces to the `wasmtime` crate. You can think of these as:
27//!
28//! * `Store<T>` - an owned reference to a store, the "root of everything"
29//! * `StoreContext<T>` - basically `&StoreInner<T>`
30//! * `StoreContextMut<T>` - more-or-less `&mut StoreInner<T>` with caveats.
31//! Explained later.
32//! * `AsContext` - similar to `AsRef`, but produces `StoreContext<T>`
33//! * `AsContextMut` - similar to `AsMut`, but produces `StoreContextMut<T>`
34//!
35//! Next comes the internal structure of the `Store<T>` itself. This looks like:
36//!
37//! * `Store<T>` - this type is just a pointer large. It's primarily just
38//! intended to be consumed by the outside world. Note that the "just a
39//! pointer large" is a load-bearing implementation detail in Wasmtime. This
40//! enables it to store a pointer to its own trait object which doesn't need
41//! to change over time.
42//!
43//! * `StoreInner<T>` - the first layer of the contents of a `Store<T>`, what's
44//! stored inside the `Box`. This is the general Rust pattern when one struct
45//! is a layer over another. The surprising part, though, is that this is
46//! further subdivided. This structure only contains things which actually
47//! need `T` itself. The downside of this structure is that it's always
48//! generic and means that code is monomorphized into consumer crates. We
49//! strive to have things be as monomorphic as possible in `wasmtime` so this
50//! type is not heavily used.
51//!
52//! * `StoreOpaque` - this is the primary contents of the `StoreInner<T>` type.
53//! Stored inline in the outer type the "opaque" here means that it's a
54//! "store" but it doesn't have access to the `T`. This is the primary
55//! "internal" reference that Wasmtime uses since `T` is rarely needed by the
56//! internals of Wasmtime.
57//!
58//! * `StoreData` - this is a final helper struct stored within `StoreOpaque`.
59//! All references of Wasm items into a `Store` are actually indices into a
60//! table in this structure, and the `StoreData` being separate makes it a bit
61//! easier to manage/define/work with. There's no real fundamental reason this
62//! is split out, although sometimes it's useful to have separate borrows into
63//! these tables than the `StoreOpaque`.
64//!
65//! A major caveat with these representations is that the internal `&mut
66//! StoreInner<T>` is never handed out publicly to consumers of this crate, only
67//! through a wrapper of `StoreContextMut<'_, T>`. The reason for this is that
68//! we want to provide mutable, but not destructive, access to the contents of a
69//! `Store`. For example if a `StoreInner<T>` were replaced with some other
70//! `StoreInner<T>` then that would drop live instances, possibly those
71//! currently executing beneath the current stack frame. This would not be a
72//! safe operation.
73//!
74//! This means, though, that the `wasmtime` crate, which liberally uses `&mut
75//! StoreOpaque` internally, has to be careful to never actually destroy the
76//! contents of `StoreOpaque`. This is an invariant that we, as the authors of
77//! `wasmtime`, must uphold for the public interface to be safe.
78
79use crate::instance::InstanceData;
80use crate::linker::Definition;
81use crate::module::{BareModuleInfo, RegisteredModuleId};
82use crate::trampoline::VMHostGlobalContext;
83use crate::RootSet;
84use crate::{module::ModuleRegistry, Engine, Module, Trap, Val, ValRaw};
85use crate::{Global, Instance, Memory, RootScope, Table};
86use anyhow::{anyhow, bail, Result};
87use once_cell::sync::OnceCell;
88use std::cell::UnsafeCell;
89use std::fmt;
90use std::future::Future;
91use std::marker;
92use std::mem::{self, ManuallyDrop};
93use std::num::NonZeroU64;
94use std::ops::{Deref, DerefMut};
95use std::pin::Pin;
96use std::ptr;
97use std::sync::atomic::AtomicU64;
98use std::sync::Arc;
99use std::task::{Context, Poll};
100use wasmtime_runtime::mpk::{self, ProtectionKey, ProtectionMask};
101use wasmtime_runtime::{
102 Backtrace, ExportGlobal, GcHeapAllocationIndex, GcRootsList, GcStore,
103 InstanceAllocationRequest, InstanceAllocator, InstanceHandle, OnDemandInstanceAllocator,
104 SignalHandler, StoreBox, StorePtr, VMContext, VMFuncRef, VMGcRef, VMRuntimeLimits, WasmFault,
105};
106
107mod context;
108pub use self::context::*;
109mod data;
110pub use self::data::*;
111mod func_refs;
112use func_refs::FuncRefs;
113
114/// A [`Store`] is a collection of WebAssembly instances and host-defined state.
115///
116/// All WebAssembly instances and items will be attached to and refer to a
117/// [`Store`]. For example instances, functions, globals, and tables are all
118/// attached to a [`Store`]. Instances are created by instantiating a
119/// [`Module`](crate::Module) within a [`Store`].
120///
121/// A [`Store`] is intended to be a short-lived object in a program. No form
122/// of GC is implemented at this time so once an instance is created within a
123/// [`Store`] it will not be deallocated until the [`Store`] itself is dropped.
124/// This makes [`Store`] unsuitable for creating an unbounded number of
125/// instances in it because [`Store`] will never release this memory. It's
126/// recommended to have a [`Store`] correspond roughly to the lifetime of a
127/// "main instance" that an embedding is interested in executing.
128///
129/// ## Type parameter `T`
130///
131/// Each [`Store`] has a type parameter `T` associated with it. This `T`
132/// represents state defined by the host. This state will be accessible through
133/// the [`Caller`](crate::Caller) type that host-defined functions get access
134/// to. This `T` is suitable for storing `Store`-specific information which
135/// imported functions may want access to.
136///
137/// The data `T` can be accessed through methods like [`Store::data`] and
138/// [`Store::data_mut`].
139///
140/// ## Stores, contexts, oh my
141///
142/// Most methods in Wasmtime take something of the form
143/// [`AsContext`](crate::AsContext) or [`AsContextMut`](crate::AsContextMut) as
144/// the first argument. These two traits allow ergonomically passing in the
145/// context you currently have to any method. The primary two sources of
146/// contexts are:
147///
148/// * `Store<T>`
149/// * `Caller<'_, T>`
150///
151/// corresponding to what you create and what you have access to in a host
152/// function. You can also explicitly acquire a [`StoreContext`] or
153/// [`StoreContextMut`] and pass that around as well.
154///
155/// Note that all methods on [`Store`] are mirrored onto [`StoreContext`],
156/// [`StoreContextMut`], and [`Caller`](crate::Caller). This way no matter what
157/// form of context you have you can call various methods, create objects, etc.
158///
159/// ## Stores and `Default`
160///
161/// You can create a store with default configuration settings using
162/// `Store::default()`. This will create a brand new [`Engine`] with default
163/// configuration (see [`Config`](crate::Config) for more information).
164///
165/// ## Cross-store usage of items
166///
167/// In `wasmtime` wasm items such as [`Global`] and [`Memory`] "belong" to a
168/// [`Store`]. The store they belong to is the one they were created with
169/// (passed in as a parameter) or instantiated with. This store is the only
170/// store that can be used to interact with wasm items after they're created.
171///
172/// The `wasmtime` crate will panic if the [`Store`] argument passed in to these
173/// operations is incorrect. In other words it's considered a programmer error
174/// rather than a recoverable error for the wrong [`Store`] to be used when
175/// calling APIs.
176pub struct Store<T> {
177 // for comments about `ManuallyDrop`, see `Store::into_data`
178 inner: ManuallyDrop<Box<StoreInner<T>>>,
179}
180
181#[derive(Copy, Clone, Debug)]
182/// Passed to the argument of [`Store::call_hook`] to indicate a state transition in
183/// the WebAssembly VM.
184pub enum CallHook {
185 /// Indicates the VM is calling a WebAssembly function, from the host.
186 CallingWasm,
187 /// Indicates the VM is returning from a WebAssembly function, to the host.
188 ReturningFromWasm,
189 /// Indicates the VM is calling a host function, from WebAssembly.
190 CallingHost,
191 /// Indicates the VM is returning from a host function, to WebAssembly.
192 ReturningFromHost,
193}
194
195impl CallHook {
196 /// Indicates the VM is entering host code (exiting WebAssembly code)
197 pub fn entering_host(&self) -> bool {
198 match self {
199 CallHook::ReturningFromWasm | CallHook::CallingHost => true,
200 _ => false,
201 }
202 }
203 /// Indicates the VM is exiting host code (entering WebAssembly code)
204 pub fn exiting_host(&self) -> bool {
205 match self {
206 CallHook::ReturningFromHost | CallHook::CallingWasm => true,
207 _ => false,
208 }
209 }
210}
211
212/// Internal contents of a `Store<T>` that live on the heap.
213///
214/// The members of this struct are those that need to be generic over `T`, the
215/// store's internal type storage. Otherwise all things that don't rely on `T`
216/// should go into `StoreOpaque`.
217pub struct StoreInner<T> {
218 /// Generic metadata about the store that doesn't need access to `T`.
219 inner: StoreOpaque,
220
221 limiter: Option<ResourceLimiterInner<T>>,
222 call_hook: Option<CallHookInner<T>>,
223 epoch_deadline_behavior:
224 Option<Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>>,
225 // for comments about `ManuallyDrop`, see `Store::into_data`
226 data: ManuallyDrop<T>,
227}
228
229enum ResourceLimiterInner<T> {
230 Sync(Box<dyn FnMut(&mut T) -> &mut (dyn crate::ResourceLimiter) + Send + Sync>),
231 #[cfg(feature = "async")]
232 Async(Box<dyn FnMut(&mut T) -> &mut (dyn crate::ResourceLimiterAsync) + Send + Sync>),
233}
234
235/// An object that can take callbacks when the runtime enters or exits hostcalls.
236#[cfg(feature = "async")]
237#[async_trait::async_trait]
238pub trait CallHookHandler<T>: Send {
239 /// A callback to run when wasmtime is about to enter a host call, or when about to
240 /// exit the hostcall.
241 async fn handle_call_event(&self, t: &mut T, ch: CallHook) -> Result<()>;
242}
243
244enum CallHookInner<T> {
245 Sync(Box<dyn FnMut(&mut T, CallHook) -> Result<()> + Send + Sync>),
246 #[cfg(feature = "async")]
247 Async(Box<dyn CallHookHandler<T> + Send + Sync>),
248}
249
250/// What to do after returning from a callback when the engine epoch reaches
251/// the deadline for a Store during execution of a function using that store.
252pub enum UpdateDeadline {
253 /// Extend the deadline by the specified number of ticks.
254 Continue(u64),
255 /// Extend the deadline by the specified number of ticks after yielding to
256 /// the async executor loop. This can only be used with an async [`Store`]
257 /// configured via [`Config::async_support`](crate::Config::async_support).
258 #[cfg(feature = "async")]
259 Yield(u64),
260}
261
262// Forward methods on `StoreOpaque` to also being on `StoreInner<T>`
263impl<T> Deref for StoreInner<T> {
264 type Target = StoreOpaque;
265 fn deref(&self) -> &Self::Target {
266 &self.inner
267 }
268}
269
270impl<T> DerefMut for StoreInner<T> {
271 fn deref_mut(&mut self) -> &mut Self::Target {
272 &mut self.inner
273 }
274}
275
276/// Monomorphic storage for a `Store<T>`.
277///
278/// This structure contains the bulk of the metadata about a `Store`. This is
279/// used internally in Wasmtime when dependence on the `T` of `Store<T>` isn't
280/// necessary, allowing code to be monomorphic and compiled into the `wasmtime`
281/// crate itself.
282pub struct StoreOpaque {
283 // This `StoreOpaque` structure has references to itself. These aren't
284 // immediately evident, however, so we need to tell the compiler that it
285 // contains self-references. This notably suppresses `noalias` annotations
286 // when this shows up in compiled code because types of this structure do
287 // indeed alias itself. An example of this is `default_callee` holds a
288 // `*mut dyn Store` to the address of this `StoreOpaque` itself, indeed
289 // aliasing!
290 //
291 // It's somewhat unclear to me at this time if this is 100% sufficient to
292 // get all the right codegen in all the right places. For example does
293 // `Store` need to internally contain a `Pin<Box<StoreInner<T>>>`? Do the
294 // contexts need to contain `Pin<&mut StoreInner<T>>`? I'm not familiar
295 // enough with `Pin` to understand if it's appropriate here (we do, for
296 // example want to allow movement in and out of `data: T`, just not movement
297 // of most of the other members). It's also not clear if using `Pin` in a
298 // few places buys us much other than a bunch of `unsafe` that we already
299 // sort of hand-wave away.
300 //
301 // In any case this seems like a good mid-ground for now where we're at
302 // least telling the compiler something about all the aliasing happening
303 // within a `Store`.
304 _marker: marker::PhantomPinned,
305
306 engine: Engine,
307 runtime_limits: VMRuntimeLimits,
308 instances: Vec<StoreInstance>,
309 #[cfg(feature = "component-model")]
310 num_component_instances: usize,
311 signal_handler: Option<Box<SignalHandler<'static>>>,
312 modules: ModuleRegistry,
313 func_refs: FuncRefs,
314 host_globals: Vec<StoreBox<VMHostGlobalContext>>,
315
316 // GC-related fields.
317 gc_store: OnceCell<GcStore>,
318 gc_roots: RootSet,
319 gc_roots_list: GcRootsList,
320
321 // Numbers of resources instantiated in this store, and their limits
322 instance_count: usize,
323 instance_limit: usize,
324 memory_count: usize,
325 memory_limit: usize,
326 table_count: usize,
327 table_limit: usize,
328 #[cfg(feature = "async")]
329 async_state: AsyncState,
330 // If fuel_yield_interval is enabled, then we store the remaining fuel (that isn't in
331 // runtime_limits) here. The total amount of fuel is the runtime limits and reserve added
332 // together. Then when we run out of gas, we inject the yield amount from the reserve
333 // until the reserve is empty.
334 fuel_reserve: u64,
335 fuel_yield_interval: Option<NonZeroU64>,
336 /// Indexed data within this `Store`, used to store information about
337 /// globals, functions, memories, etc.
338 ///
339 /// Note that this is `ManuallyDrop` because it needs to be dropped before
340 /// `rooted_host_funcs` below. This structure contains pointers which are
341 /// otherwise kept alive by the `Arc` references in `rooted_host_funcs`.
342 store_data: ManuallyDrop<StoreData>,
343 default_caller: InstanceHandle,
344
345 /// Used to optimzed wasm->host calls when the host function is defined with
346 /// `Func::new` to avoid allocating a new vector each time a function is
347 /// called.
348 hostcall_val_storage: Vec<Val>,
349 /// Same as `hostcall_val_storage`, but for the direction of the host
350 /// calling wasm.
351 wasm_val_raw_storage: Vec<ValRaw>,
352
353 /// A list of lists of definitions which have been used to instantiate
354 /// within this `Store`.
355 ///
356 /// Note that not all instantiations end up pushing to this list. At the
357 /// time of this writing only the `InstancePre<T>` type will push to this
358 /// list. Pushes to this list are typically accompanied with
359 /// `HostFunc::to_func_store_rooted` to clone an `Arc` here once which
360 /// preserves a strong reference to the `Arc` for each `HostFunc` stored
361 /// within the list of `Definition`s.
362 ///
363 /// Note that this is `ManuallyDrop` as it must be dropped after
364 /// `store_data` above, where the function pointers are stored.
365 rooted_host_funcs: ManuallyDrop<Vec<Arc<[Definition]>>>,
366
367 /// Keep track of what protection key is being used during allocation so
368 /// that the right memory pages can be enabled when entering WebAssembly
369 /// guest code.
370 pkey: Option<ProtectionKey>,
371
372 /// Runtime state for components used in the handling of resources, borrow,
373 /// and calls. These also interact with the `ResourceAny` type and its
374 /// internal representation.
375 #[cfg(feature = "component-model")]
376 component_host_table: wasmtime_runtime::component::ResourceTable,
377 #[cfg(feature = "component-model")]
378 component_calls: wasmtime_runtime::component::CallContexts,
379 #[cfg(feature = "component-model")]
380 host_resource_data: crate::component::HostResourceData,
381}
382
383#[cfg(feature = "async")]
384struct AsyncState {
385 current_suspend: UnsafeCell<*const wasmtime_fiber::Suspend<Result<()>, (), Result<()>>>,
386 current_poll_cx: UnsafeCell<*mut Context<'static>>,
387}
388
389// Lots of pesky unsafe cells and pointers in this structure. This means we need
390// to declare explicitly that we use this in a threadsafe fashion.
391#[cfg(feature = "async")]
392unsafe impl Send for AsyncState {}
393#[cfg(feature = "async")]
394unsafe impl Sync for AsyncState {}
395
396/// An RAII type to automatically mark a region of code as unsafe for GC.
397#[doc(hidden)]
398pub struct AutoAssertNoGc<'a> {
399 store: &'a mut StoreOpaque,
400 entered: bool,
401}
402
403impl<'a> AutoAssertNoGc<'a> {
404 #[inline]
405 pub fn new(store: &'a mut StoreOpaque) -> Self {
406 let entered = if let Some(gc_store) = store.gc_store.get_mut() {
407 gc_store.gc_heap.enter_no_gc_scope();
408 true
409 } else {
410 false
411 };
412
413 AutoAssertNoGc { store, entered }
414 }
415}
416
417impl std::ops::Deref for AutoAssertNoGc<'_> {
418 type Target = StoreOpaque;
419
420 #[inline]
421 fn deref(&self) -> &Self::Target {
422 &*self.store
423 }
424}
425
426impl std::ops::DerefMut for AutoAssertNoGc<'_> {
427 #[inline]
428 fn deref_mut(&mut self) -> &mut Self::Target {
429 &mut *self.store
430 }
431}
432
433impl Drop for AutoAssertNoGc<'_> {
434 #[inline]
435 fn drop(&mut self) {
436 if self.entered {
437 self.store.unwrap_gc_store_mut().gc_heap.exit_no_gc_scope();
438 }
439 }
440}
441
442/// Used to associate instances with the store.
443///
444/// This is needed to track if the instance was allocated explicitly with the on-demand
445/// instance allocator.
446struct StoreInstance {
447 handle: InstanceHandle,
448 kind: StoreInstanceKind,
449}
450
451enum StoreInstanceKind {
452 /// An actual, non-dummy instance.
453 Real {
454 /// The id of this instance's module inside our owning store's
455 /// `ModuleRegistry`.
456 module_id: RegisteredModuleId,
457 },
458
459 /// This is a dummy instance that is just an implementation detail for
460 /// something else. For example, host-created memories internally create a
461 /// dummy instance.
462 ///
463 /// Regardless of the configured instance allocator for the engine, dummy
464 /// instances always use the on-demand allocator to deallocate the instance.
465 Dummy,
466}
467
468impl<T> Store<T> {
469 /// Creates a new [`Store`] to be associated with the given [`Engine`] and
470 /// `data` provided.
471 ///
472 /// The created [`Store`] will place no additional limits on the size of
473 /// linear memories or tables at runtime. Linear memories and tables will
474 /// be allowed to grow to any upper limit specified in their definitions.
475 /// The store will limit the number of instances, linear memories, and
476 /// tables created to 10,000. This can be overridden with the
477 /// [`Store::limiter`] configuration method.
478 pub fn new(engine: &Engine, data: T) -> Self {
479 let pkey = engine.allocator().next_available_pkey();
480
481 let mut inner = Box::new(StoreInner {
482 inner: StoreOpaque {
483 _marker: marker::PhantomPinned,
484 engine: engine.clone(),
485 runtime_limits: Default::default(),
486 instances: Vec::new(),
487 #[cfg(feature = "component-model")]
488 num_component_instances: 0,
489 signal_handler: None,
490 gc_store: OnceCell::new(),
491 gc_roots: RootSet::default(),
492 gc_roots_list: GcRootsList::default(),
493 modules: ModuleRegistry::default(),
494 func_refs: FuncRefs::default(),
495 host_globals: Vec::new(),
496 instance_count: 0,
497 instance_limit: crate::DEFAULT_INSTANCE_LIMIT,
498 memory_count: 0,
499 memory_limit: crate::DEFAULT_MEMORY_LIMIT,
500 table_count: 0,
501 table_limit: crate::DEFAULT_TABLE_LIMIT,
502 #[cfg(feature = "async")]
503 async_state: AsyncState {
504 current_suspend: UnsafeCell::new(ptr::null()),
505 current_poll_cx: UnsafeCell::new(ptr::null_mut()),
506 },
507 fuel_reserve: 0,
508 fuel_yield_interval: None,
509 store_data: ManuallyDrop::new(StoreData::new()),
510 default_caller: InstanceHandle::null(),
511 hostcall_val_storage: Vec::new(),
512 wasm_val_raw_storage: Vec::new(),
513 rooted_host_funcs: ManuallyDrop::new(Vec::new()),
514 pkey,
515 #[cfg(feature = "component-model")]
516 component_host_table: Default::default(),
517 #[cfg(feature = "component-model")]
518 component_calls: Default::default(),
519 #[cfg(feature = "component-model")]
520 host_resource_data: Default::default(),
521 },
522 limiter: None,
523 call_hook: None,
524 epoch_deadline_behavior: None,
525 data: ManuallyDrop::new(data),
526 });
527
528 // Wasmtime uses the callee argument to host functions to learn about
529 // the original pointer to the `Store` itself, allowing it to
530 // reconstruct a `StoreContextMut<T>`. When we initially call a `Func`,
531 // however, there's no "callee" to provide. To fix this we allocate a
532 // single "default callee" for the entire `Store`. This is then used as
533 // part of `Func::call` to guarantee that the `callee: *mut VMContext`
534 // is never null.
535 inner.default_caller = {
536 let module = Arc::new(wasmtime_environ::Module::default());
537 let shim = BareModuleInfo::empty(module).into_traitobj();
538 let allocator = OnDemandInstanceAllocator::default();
539 allocator
540 .validate_module(shim.module(), shim.offsets())
541 .unwrap();
542 let mut instance = unsafe {
543 allocator
544 .allocate_module(InstanceAllocationRequest {
545 host_state: Box::new(()),
546 imports: Default::default(),
547 store: StorePtr::empty(),
548 runtime_info: &shim,
549 wmemcheck: engine.config().wmemcheck,
550 pkey: None,
551 })
552 .expect("failed to allocate default callee")
553 };
554
555 // Note the erasure of the lifetime here into `'static`, so in
556 // general usage of this trait object must be strictly bounded to
557 // the `Store` itself, and is a variant that we have to maintain
558 // throughout Wasmtime.
559 unsafe {
560 let traitobj = std::mem::transmute::<
561 *mut (dyn wasmtime_runtime::Store + '_),
562 *mut (dyn wasmtime_runtime::Store + 'static),
563 >(&mut *inner);
564 instance.set_store(traitobj);
565 }
566 instance
567 };
568
569 Self {
570 inner: ManuallyDrop::new(inner),
571 }
572 }
573
574 /// Access the underlying data owned by this `Store`.
575 #[inline]
576 pub fn data(&self) -> &T {
577 self.inner.data()
578 }
579
580 /// Access the underlying data owned by this `Store`.
581 #[inline]
582 pub fn data_mut(&mut self) -> &mut T {
583 self.inner.data_mut()
584 }
585
586 /// Consumes this [`Store`], destroying it, and returns the underlying data.
587 pub fn into_data(mut self) -> T {
588 // This is an unsafe operation because we want to avoid having a runtime
589 // check or boolean for whether the data is actually contained within a
590 // `Store`. The data itself is stored as `ManuallyDrop` since we're
591 // manually managing the memory here, and there's also a `ManuallyDrop`
592 // around the `Box<StoreInner<T>>`. The way this works though is a bit
593 // tricky, so here's how things get dropped appropriately:
594 //
595 // * When a `Store<T>` is normally dropped, the custom destructor for
596 // `Store<T>` will drop `T`, then the `self.inner` field. The
597 // rustc-glue destructor runs for `Box<StoreInner<T>>` which drops
598 // `StoreInner<T>`. This cleans up all internal fields and doesn't
599 // touch `T` because it's wrapped in `ManuallyDrop`.
600 //
601 // * When calling this method we skip the top-level destructor for
602 // `Store<T>` with `mem::forget`. This skips both the destructor for
603 // `T` and the destructor for `StoreInner<T>`. We do, however, run the
604 // destructor for `Box<StoreInner<T>>` which, like above, will skip
605 // the destructor for `T` since it's `ManuallyDrop`.
606 //
607 // In both cases all the other fields of `StoreInner<T>` should all get
608 // dropped, and the manual management of destructors is basically
609 // between this method and `Drop for Store<T>`. Note that this also
610 // means that `Drop for StoreInner<T>` cannot access `self.data`, so
611 // there is a comment indicating this as well.
612 unsafe {
613 let mut inner = ManuallyDrop::take(&mut self.inner);
614 std::mem::forget(self);
615 ManuallyDrop::take(&mut inner.data)
616 }
617 }
618
619 /// Configures the [`ResourceLimiter`] used to limit resource creation
620 /// within this [`Store`].
621 ///
622 /// Whenever resources such as linear memory, tables, or instances are
623 /// allocated the `limiter` specified here is invoked with the store's data
624 /// `T` and the returned [`ResourceLimiter`] is used to limit the operation
625 /// being allocated. The returned [`ResourceLimiter`] is intended to live
626 /// within the `T` itself, for example by storing a
627 /// [`StoreLimits`](crate::StoreLimits).
628 ///
629 /// Note that this limiter is only used to limit the creation/growth of
630 /// resources in the future, this does not retroactively attempt to apply
631 /// limits to the [`Store`].
632 ///
633 /// # Examples
634 ///
635 /// ```
636 /// use wasmtime::*;
637 ///
638 /// struct MyApplicationState {
639 /// my_state: u32,
640 /// limits: StoreLimits,
641 /// }
642 ///
643 /// let engine = Engine::default();
644 /// let my_state = MyApplicationState {
645 /// my_state: 42,
646 /// limits: StoreLimitsBuilder::new()
647 /// .memory_size(1 << 20 /* 1 MB */)
648 /// .instances(2)
649 /// .build(),
650 /// };
651 /// let mut store = Store::new(&engine, my_state);
652 /// store.limiter(|state| &mut state.limits);
653 ///
654 /// // Creation of smaller memories is allowed
655 /// Memory::new(&mut store, MemoryType::new(1, None)).unwrap();
656 ///
657 /// // Creation of a larger memory, however, will exceed the 1MB limit we've
658 /// // configured
659 /// assert!(Memory::new(&mut store, MemoryType::new(1000, None)).is_err());
660 ///
661 /// // The number of instances in this store is limited to 2, so the third
662 /// // instance here should fail.
663 /// let module = Module::new(&engine, "(module)").unwrap();
664 /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
665 /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
666 /// assert!(Instance::new(&mut store, &module, &[]).is_err());
667 /// ```
668 ///
669 /// [`ResourceLimiter`]: crate::ResourceLimiter
670 pub fn limiter(
671 &mut self,
672 mut limiter: impl FnMut(&mut T) -> &mut (dyn crate::ResourceLimiter) + Send + Sync + 'static,
673 ) {
674 // Apply the limits on instances, tables, and memory given by the limiter:
675 let inner = &mut self.inner;
676 let (instance_limit, table_limit, memory_limit) = {
677 let l = limiter(&mut inner.data);
678 (l.instances(), l.tables(), l.memories())
679 };
680 let innermost = &mut inner.inner;
681 innermost.instance_limit = instance_limit;
682 innermost.table_limit = table_limit;
683 innermost.memory_limit = memory_limit;
684
685 // Save the limiter accessor function:
686 inner.limiter = Some(ResourceLimiterInner::Sync(Box::new(limiter)));
687 }
688
689 /// Configures the [`ResourceLimiterAsync`](crate::ResourceLimiterAsync)
690 /// used to limit resource creation within this [`Store`].
691 ///
692 /// This method is an asynchronous variant of the [`Store::limiter`] method
693 /// where the embedder can block the wasm request for more resources with
694 /// host `async` execution of futures.
695 ///
696 /// By using a [`ResourceLimiterAsync`](`crate::ResourceLimiterAsync`)
697 /// with a [`Store`], you can no longer use
698 /// [`Memory::new`](`crate::Memory::new`),
699 /// [`Memory::grow`](`crate::Memory::grow`),
700 /// [`Table::new`](`crate::Table::new`), and
701 /// [`Table::grow`](`crate::Table::grow`). Instead, you must use their
702 /// `async` variants: [`Memory::new_async`](`crate::Memory::new_async`),
703 /// [`Memory::grow_async`](`crate::Memory::grow_async`),
704 /// [`Table::new_async`](`crate::Table::new_async`), and
705 /// [`Table::grow_async`](`crate::Table::grow_async`).
706 ///
707 /// Note that this limiter is only used to limit the creation/growth of
708 /// resources in the future, this does not retroactively attempt to apply
709 /// limits to the [`Store`]. Additionally this must be used with an async
710 /// [`Store`] configured via
711 /// [`Config::async_support`](crate::Config::async_support).
712 #[cfg(feature = "async")]
713 #[cfg_attr(docsrs, doc(cfg(feature = "async")))]
714 pub fn limiter_async(
715 &mut self,
716 mut limiter: impl FnMut(&mut T) -> &mut (dyn crate::ResourceLimiterAsync)
717 + Send
718 + Sync
719 + 'static,
720 ) {
721 debug_assert!(self.inner.async_support());
722 // Apply the limits on instances, tables, and memory given by the limiter:
723 let inner = &mut self.inner;
724 let (instance_limit, table_limit, memory_limit) = {
725 let l = limiter(&mut inner.data);
726 (l.instances(), l.tables(), l.memories())
727 };
728 let innermost = &mut inner.inner;
729 innermost.instance_limit = instance_limit;
730 innermost.table_limit = table_limit;
731 innermost.memory_limit = memory_limit;
732
733 // Save the limiter accessor function:
734 inner.limiter = Some(ResourceLimiterInner::Async(Box::new(limiter)));
735 }
736
737 #[cfg_attr(docsrs, doc(cfg(feature = "async")))]
738 /// Configures an async function that runs on calls and returns between
739 /// WebAssembly and host code. For the non-async equivalent of this method,
740 /// see [`Store::call_hook`].
741 ///
742 /// The function is passed a [`CallHook`] argument, which indicates which
743 /// state transition the VM is making.
744 ///
745 /// This function's future may return a [`Trap`]. If a trap is returned
746 /// when an import was called, it is immediately raised as-if the host
747 /// import had returned the trap. If a trap is returned after wasm returns
748 /// to the host then the wasm function's result is ignored and this trap is
749 /// returned instead.
750 ///
751 /// After this function returns a trap, it may be called for subsequent
752 /// returns to host or wasm code as the trap propagates to the root call.
753 #[cfg(feature = "async")]
754 pub fn call_hook_async(&mut self, hook: impl CallHookHandler<T> + Send + Sync + 'static) {
755 self.inner.call_hook = Some(CallHookInner::Async(Box::new(hook)));
756 }
757
758 /// Configure a function that runs on calls and returns between WebAssembly
759 /// and host code.
760 ///
761 /// The function is passed a [`CallHook`] argument, which indicates which
762 /// state transition the VM is making.
763 ///
764 /// This function may return a [`Trap`]. If a trap is returned when an
765 /// import was called, it is immediately raised as-if the host import had
766 /// returned the trap. If a trap is returned after wasm returns to the host
767 /// then the wasm function's result is ignored and this trap is returned
768 /// instead.
769 ///
770 /// After this function returns a trap, it may be called for subsequent returns
771 /// to host or wasm code as the trap propagates to the root call.
772 pub fn call_hook(
773 &mut self,
774 hook: impl FnMut(&mut T, CallHook) -> Result<()> + Send + Sync + 'static,
775 ) {
776 self.inner.call_hook = Some(CallHookInner::Sync(Box::new(hook)));
777 }
778
779 /// Returns the [`Engine`] that this store is associated with.
780 pub fn engine(&self) -> &Engine {
781 self.inner.engine()
782 }
783
784 /// Perform garbage collection.
785 ///
786 /// Note that it is not required to actively call this function. GC will
787 /// automatically happen according to various internal heuristics. This is
788 /// provided if fine-grained control over the GC is desired.
789 ///
790 /// This method is only available when the `gc` Cargo feature is enabled.
791 #[cfg(feature = "gc")]
792 pub fn gc(&mut self) {
793 self.inner.gc()
794 }
795
796 /// Perform garbage collection asynchronously.
797 ///
798 /// Note that it is not required to actively call this function. GC will
799 /// automatically happen according to various internal heuristics. This is
800 /// provided if fine-grained control over the GC is desired.
801 ///
802 /// This method is only available when the `gc` Cargo feature is enabled.
803 #[cfg(all(feature = "async", feature = "gc"))]
804 pub async fn gc_async(&mut self)
805 where
806 T: Send,
807 {
808 self.inner.gc_async().await;
809 }
810
811 /// Returns the amount fuel in this [`Store`]. When fuel is enabled, it must
812 /// be configured via [`Store::set_fuel`].
813 ///
814 /// # Errors
815 ///
816 /// This function will return an error if fuel consumption is not enabled
817 /// via [`Config::consume_fuel`](crate::Config::consume_fuel).
818 pub fn get_fuel(&self) -> Result<u64> {
819 self.inner.get_fuel()
820 }
821
822 /// Set the fuel to this [`Store`] for wasm to consume while executing.
823 ///
824 /// For this method to work fuel consumption must be enabled via
825 /// [`Config::consume_fuel`](crate::Config::consume_fuel). By default a
826 /// [`Store`] starts with 0 fuel for wasm to execute with (meaning it will
827 /// immediately trap). This function must be called for the store to have
828 /// some fuel to allow WebAssembly to execute.
829 ///
830 /// Most WebAssembly instructions consume 1 unit of fuel. Some
831 /// instructions, such as `nop`, `drop`, `block`, and `loop`, consume 0
832 /// units, as any execution cost associated with them involves other
833 /// instructions which do consume fuel.
834 ///
835 /// Note that when fuel is entirely consumed it will cause wasm to trap.
836 ///
837 /// # Errors
838 ///
839 /// This function will return an error if fuel consumption is not enabled via
840 /// [`Config::consume_fuel`](crate::Config::consume_fuel).
841 pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
842 self.inner.set_fuel(fuel)
843 }
844
845 /// Configures a [`Store`] to yield execution of async WebAssembly code
846 /// periodically.
847 ///
848 /// When a [`Store`] is configured to consume fuel with
849 /// [`Config::consume_fuel`](crate::Config::consume_fuel) this method will
850 /// configure WebAssembly to be suspended and control will be yielded back to the
851 /// caller every `interval` units of fuel consumed. This is only suitable with use of
852 /// a store associated with an [async config](crate::Config::async_support) because
853 /// only then are futures used and yields are possible.
854 ///
855 /// The purpose of this behavior is to ensure that futures which represent
856 /// execution of WebAssembly do not execute too long inside their
857 /// `Future::poll` method. This allows for some form of cooperative
858 /// multitasking where WebAssembly will voluntarily yield control
859 /// periodically (based on fuel consumption) back to the running thread.
860 ///
861 /// Note that futures returned by this crate will automatically flag
862 /// themselves to get re-polled if a yield happens. This means that
863 /// WebAssembly will continue to execute, just after giving the host an
864 /// opportunity to do something else.
865 ///
866 /// The `interval` parameter indicates how much fuel should be
867 /// consumed between yields of an async future. When fuel runs out wasm will trap.
868 ///
869 /// # Error
870 ///
871 /// This method will error if it is not called on a store associated with an [async
872 /// config](crate::Config::async_support).
873 pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
874 self.inner.fuel_async_yield_interval(interval)
875 }
876
877 /// Sets the epoch deadline to a certain number of ticks in the future.
878 ///
879 /// When the Wasm guest code is compiled with epoch-interruption
880 /// instrumentation
881 /// ([`Config::epoch_interruption()`](crate::Config::epoch_interruption)),
882 /// and when the `Engine`'s epoch is incremented
883 /// ([`Engine::increment_epoch()`](crate::Engine::increment_epoch))
884 /// past a deadline, execution can be configured to either trap or
885 /// yield and then continue.
886 ///
887 /// This deadline is always set relative to the current epoch:
888 /// `delta_beyond_current` ticks in the future. The deadline can
889 /// be set explicitly via this method, or refilled automatically
890 /// on a yield if configured via
891 /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update). After
892 /// this method is invoked, the deadline is reached when
893 /// [`Engine::increment_epoch()`] has been invoked at least
894 /// `ticks_beyond_current` times.
895 ///
896 /// By default a store will trap immediately with an epoch deadline of 0
897 /// (which has always "elapsed"). This method is required to be configured
898 /// for stores with epochs enabled to some future epoch deadline.
899 ///
900 /// See documentation on
901 /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
902 /// for an introduction to epoch-based interruption.
903 pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
904 self.inner.set_epoch_deadline(ticks_beyond_current);
905 }
906
907 /// Configures epoch-deadline expiration to trap.
908 ///
909 /// When epoch-interruption-instrumented code is executed on this
910 /// store and the epoch deadline is reached before completion,
911 /// with the store configured in this way, execution will
912 /// terminate with a trap as soon as an epoch check in the
913 /// instrumented code is reached.
914 ///
915 /// This behavior is the default if the store is not otherwise
916 /// configured via
917 /// [`epoch_deadline_trap()`](Store::epoch_deadline_trap),
918 /// [`epoch_deadline_callback()`](Store::epoch_deadline_callback) or
919 /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update).
920 ///
921 /// This setting is intended to allow for coarse-grained
922 /// interruption, but not a deterministic deadline of a fixed,
923 /// finite interval. For deterministic interruption, see the
924 /// "fuel" mechanism instead.
925 ///
926 /// Note that when this is used it's required to call
927 /// [`Store::set_epoch_deadline`] or otherwise wasm will always immediately
928 /// trap.
929 ///
930 /// See documentation on
931 /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
932 /// for an introduction to epoch-based interruption.
933 pub fn epoch_deadline_trap(&mut self) {
934 self.inner.epoch_deadline_trap();
935 }
936
937 /// Configures epoch-deadline expiration to invoke a custom callback
938 /// function.
939 ///
940 /// When epoch-interruption-instrumented code is executed on this
941 /// store and the epoch deadline is reached before completion, the
942 /// provided callback function is invoked.
943 ///
944 /// This callback should either return an [`UpdateDeadline`], or
945 /// return an error, which will terminate execution with a trap.
946 ///
947 /// The [`UpdateDeadline`] is a positive number of ticks to
948 /// add to the epoch deadline, as well as indicating what
949 /// to do after the callback returns. If the [`Store`] is
950 /// configured with async support, then the callback may return
951 /// [`UpdateDeadline::Yield`] to yield to the async executor before
952 /// updating the epoch deadline. Alternatively, the callback may
953 /// return [`UpdateDeadline::Continue`] to update the epoch deadline
954 /// immediately.
955 ///
956 /// This setting is intended to allow for coarse-grained
957 /// interruption, but not a deterministic deadline of a fixed,
958 /// finite interval. For deterministic interruption, see the
959 /// "fuel" mechanism instead.
960 ///
961 /// See documentation on
962 /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
963 /// for an introduction to epoch-based interruption.
964 pub fn epoch_deadline_callback(
965 &mut self,
966 callback: impl FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync + 'static,
967 ) {
968 self.inner.epoch_deadline_callback(Box::new(callback));
969 }
970
971 #[cfg_attr(docsrs, doc(cfg(feature = "async")))]
972 /// Configures epoch-deadline expiration to yield to the async
973 /// caller and the update the deadline.
974 ///
975 /// When epoch-interruption-instrumented code is executed on this
976 /// store and the epoch deadline is reached before completion,
977 /// with the store configured in this way, execution will yield
978 /// (the future will return `Pending` but re-awake itself for
979 /// later execution) and, upon resuming, the store will be
980 /// configured with an epoch deadline equal to the current epoch
981 /// plus `delta` ticks.
982 ///
983 /// This setting is intended to allow for cooperative timeslicing
984 /// of multiple CPU-bound Wasm guests in different stores, all
985 /// executing under the control of an async executor. To drive
986 /// this, stores should be configured to "yield and update"
987 /// automatically with this function, and some external driver (a
988 /// thread that wakes up periodically, or a timer
989 /// signal/interrupt) should call
990 /// [`Engine::increment_epoch()`](crate::Engine::increment_epoch).
991 ///
992 /// See documentation on
993 /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
994 /// for an introduction to epoch-based interruption.
995 #[cfg(feature = "async")]
996 pub fn epoch_deadline_async_yield_and_update(&mut self, delta: u64) {
997 self.inner.epoch_deadline_async_yield_and_update(delta);
998 }
999}
1000
1001impl<'a, T> StoreContext<'a, T> {
1002 pub(crate) fn async_support(&self) -> bool {
1003 self.0.async_support()
1004 }
1005
1006 /// Returns the underlying [`Engine`] this store is connected to.
1007 pub fn engine(&self) -> &Engine {
1008 self.0.engine()
1009 }
1010
1011 /// Access the underlying data owned by this `Store`.
1012 ///
1013 /// Same as [`Store::data`].
1014 pub fn data(&self) -> &'a T {
1015 self.0.data()
1016 }
1017
1018 /// Returns the remaining fuel in this store.
1019 ///
1020 /// For more information see [`Store::get_fuel`].
1021 pub fn get_fuel(&self) -> Result<u64> {
1022 self.0.get_fuel()
1023 }
1024}
1025
1026impl<'a, T> StoreContextMut<'a, T> {
1027 /// Access the underlying data owned by this `Store`.
1028 ///
1029 /// Same as [`Store::data`].
1030 pub fn data(&self) -> &T {
1031 self.0.data()
1032 }
1033
1034 /// Access the underlying data owned by this `Store`.
1035 ///
1036 /// Same as [`Store::data_mut`].
1037 pub fn data_mut(&mut self) -> &mut T {
1038 self.0.data_mut()
1039 }
1040
1041 /// Returns the underlying [`Engine`] this store is connected to.
1042 pub fn engine(&self) -> &Engine {
1043 self.0.engine()
1044 }
1045
1046 /// Perform garbage collection of `ExternRef`s.
1047 ///
1048 /// Same as [`Store::gc`].
1049 ///
1050 /// This method is only available when the `gc` Cargo feature is enabled.
1051 #[cfg(feature = "gc")]
1052 pub fn gc(&mut self) {
1053 self.0.gc()
1054 }
1055
1056 /// Perform garbage collection of `ExternRef`s.
1057 ///
1058 /// Same as [`Store::gc`].
1059 ///
1060 /// This method is only available when the `gc` Cargo feature is enabled.
1061 #[cfg(all(feature = "async", feature = "gc"))]
1062 pub async fn gc_async(&mut self)
1063 where
1064 T: Send,
1065 {
1066 self.0.gc_async().await;
1067 }
1068
1069 /// Returns remaining fuel in this store.
1070 ///
1071 /// For more information see [`Store::get_fuel`]
1072 pub fn get_fuel(&self) -> Result<u64> {
1073 self.0.get_fuel()
1074 }
1075
1076 /// Set the amount of fuel in this store.
1077 ///
1078 /// For more information see [`Store::set_fuel`]
1079 pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1080 self.0.set_fuel(fuel)
1081 }
1082
1083 /// Configures this `Store` to periodically yield while executing futures.
1084 ///
1085 /// For more information see [`Store::fuel_async_yield_interval`]
1086 pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1087 self.0.fuel_async_yield_interval(interval)
1088 }
1089
1090 /// Sets the epoch deadline to a certain number of ticks in the future.
1091 ///
1092 /// For more information see [`Store::set_epoch_deadline`].
1093 pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
1094 self.0.set_epoch_deadline(ticks_beyond_current);
1095 }
1096
1097 /// Configures epoch-deadline expiration to trap.
1098 ///
1099 /// For more information see [`Store::epoch_deadline_trap`].
1100 pub fn epoch_deadline_trap(&mut self) {
1101 self.0.epoch_deadline_trap();
1102 }
1103
1104 #[cfg_attr(docsrs, doc(cfg(feature = "async")))]
1105 /// Configures epoch-deadline expiration to yield to the async
1106 /// caller and the update the deadline.
1107 ///
1108 /// For more information see
1109 /// [`Store::epoch_deadline_async_yield_and_update`].
1110 #[cfg(feature = "async")]
1111 pub fn epoch_deadline_async_yield_and_update(&mut self, delta: u64) {
1112 self.0.epoch_deadline_async_yield_and_update(delta);
1113 }
1114}
1115
1116impl<T> StoreInner<T> {
1117 #[inline]
1118 fn data(&self) -> &T {
1119 &self.data
1120 }
1121
1122 #[inline]
1123 fn data_mut(&mut self) -> &mut T {
1124 &mut self.data
1125 }
1126
1127 #[inline]
1128 pub fn call_hook(&mut self, s: CallHook) -> Result<()> {
1129 if self.inner.pkey.is_none() && self.call_hook.is_none() {
1130 Ok(())
1131 } else {
1132 self.call_hook_slow_path(s)
1133 }
1134 }
1135
1136 fn call_hook_slow_path(&mut self, s: CallHook) -> Result<()> {
1137 if let Some(pkey) = &self.inner.pkey {
1138 let allocator = self.engine().allocator();
1139 match s {
1140 CallHook::CallingWasm | CallHook::ReturningFromHost => {
1141 allocator.restrict_to_pkey(*pkey)
1142 }
1143 CallHook::ReturningFromWasm | CallHook::CallingHost => allocator.allow_all_pkeys(),
1144 }
1145 }
1146
1147 match &mut self.call_hook {
1148 Some(CallHookInner::Sync(hook)) => hook(&mut self.data, s),
1149
1150 #[cfg(feature = "async")]
1151 Some(CallHookInner::Async(handler)) => unsafe {
1152 Ok(self
1153 .inner
1154 .async_cx()
1155 .ok_or_else(|| anyhow!("couldn't grab async_cx for call hook"))?
1156 .block_on(handler.handle_call_event(&mut self.data, s).as_mut())??)
1157 },
1158
1159 None => Ok(()),
1160 }
1161 }
1162}
1163
1164fn get_fuel(injected_fuel: i64, fuel_reserve: u64) -> u64 {
1165 fuel_reserve.saturating_add_signed(-injected_fuel)
1166}
1167
1168// Add remaining fuel from the reserve into the active fuel if there is any left.
1169fn refuel(
1170 injected_fuel: &mut i64,
1171 fuel_reserve: &mut u64,
1172 yield_interval: Option<NonZeroU64>,
1173) -> bool {
1174 let fuel = get_fuel(*injected_fuel, *fuel_reserve);
1175 if fuel > 0 {
1176 set_fuel(injected_fuel, fuel_reserve, yield_interval, fuel);
1177 true
1178 } else {
1179 false
1180 }
1181}
1182
1183fn set_fuel(
1184 injected_fuel: &mut i64,
1185 fuel_reserve: &mut u64,
1186 yield_interval: Option<NonZeroU64>,
1187 new_fuel_amount: u64,
1188) {
1189 let interval = yield_interval.unwrap_or(NonZeroU64::MAX).get();
1190 // If we're yielding periodically we only store the "active" amount of fuel into consumed_ptr
1191 // for the VM to use.
1192 let injected = std::cmp::min(interval, new_fuel_amount);
1193 // Fuel in the VM is stored as an i64, so we have to cap the amount of fuel we inject into the
1194 // VM at once to be i64 range.
1195 let injected = std::cmp::min(injected, i64::MAX as u64);
1196 // Add whatever is left over after injection to the reserve for later use.
1197 *fuel_reserve = new_fuel_amount - injected;
1198 // Within the VM we increment to count fuel, so inject a negative amount. The VM will halt when
1199 // this counter is positive.
1200 *injected_fuel = -(injected as i64);
1201}
1202
1203#[doc(hidden)]
1204impl StoreOpaque {
1205 pub fn id(&self) -> StoreId {
1206 self.store_data.id()
1207 }
1208
1209 pub fn bump_resource_counts(&mut self, module: &Module) -> Result<()> {
1210 fn bump(slot: &mut usize, max: usize, amt: usize, desc: &str) -> Result<()> {
1211 let new = slot.saturating_add(amt);
1212 if new > max {
1213 bail!(
1214 "resource limit exceeded: {} count too high at {}",
1215 desc,
1216 new
1217 );
1218 }
1219 *slot = new;
1220 Ok(())
1221 }
1222
1223 let module = module.env_module();
1224 let memories = module.memory_plans.len() - module.num_imported_memories;
1225 let tables = module.table_plans.len() - module.num_imported_tables;
1226
1227 bump(&mut self.instance_count, self.instance_limit, 1, "instance")?;
1228 bump(
1229 &mut self.memory_count,
1230 self.memory_limit,
1231 memories,
1232 "memory",
1233 )?;
1234 bump(&mut self.table_count, self.table_limit, tables, "table")?;
1235
1236 Ok(())
1237 }
1238
1239 #[inline]
1240 pub fn async_support(&self) -> bool {
1241 cfg!(feature = "async") && self.engine().config().async_support
1242 }
1243
1244 #[inline]
1245 pub fn engine(&self) -> &Engine {
1246 &self.engine
1247 }
1248
1249 #[inline]
1250 pub fn store_data(&self) -> &StoreData {
1251 &self.store_data
1252 }
1253
1254 #[inline]
1255 pub fn store_data_mut(&mut self) -> &mut StoreData {
1256 &mut self.store_data
1257 }
1258
1259 #[inline]
1260 pub(crate) fn modules(&self) -> &ModuleRegistry {
1261 &self.modules
1262 }
1263
1264 #[inline]
1265 pub(crate) fn modules_mut(&mut self) -> &mut ModuleRegistry {
1266 &mut self.modules
1267 }
1268
1269 pub(crate) fn func_refs(&mut self) -> &mut FuncRefs {
1270 &mut self.func_refs
1271 }
1272
1273 pub(crate) fn fill_func_refs(&mut self) {
1274 self.func_refs.fill(&mut self.modules);
1275 }
1276
1277 pub(crate) fn push_instance_pre_func_refs(&mut self, func_refs: Arc<[VMFuncRef]>) {
1278 self.func_refs.push_instance_pre_func_refs(func_refs);
1279 }
1280
1281 pub(crate) fn host_globals(&mut self) -> &mut Vec<StoreBox<VMHostGlobalContext>> {
1282 &mut self.host_globals
1283 }
1284
1285 pub fn module_for_instance(&self, instance: InstanceId) -> Option<&'_ Module> {
1286 match self.instances[instance.0].kind {
1287 StoreInstanceKind::Dummy => None,
1288 StoreInstanceKind::Real { module_id } => {
1289 let module = self
1290 .modules()
1291 .lookup_module_by_id(module_id)
1292 .expect("should always have a registered module for real instances");
1293 Some(module)
1294 }
1295 }
1296 }
1297
1298 pub unsafe fn add_instance(
1299 &mut self,
1300 handle: InstanceHandle,
1301 module_id: RegisteredModuleId,
1302 ) -> InstanceId {
1303 self.instances.push(StoreInstance {
1304 handle: handle.clone(),
1305 kind: StoreInstanceKind::Real { module_id },
1306 });
1307 InstanceId(self.instances.len() - 1)
1308 }
1309
1310 /// Add a dummy instance that to the store.
1311 ///
1312 /// These are instances that are just implementation details of something
1313 /// else (e.g. host-created memories that are not actually defined in any
1314 /// Wasm module) and therefore shouldn't show up in things like core dumps.
1315 pub unsafe fn add_dummy_instance(&mut self, handle: InstanceHandle) -> InstanceId {
1316 self.instances.push(StoreInstance {
1317 handle: handle.clone(),
1318 kind: StoreInstanceKind::Dummy,
1319 });
1320 InstanceId(self.instances.len() - 1)
1321 }
1322
1323 pub fn instance(&self, id: InstanceId) -> &InstanceHandle {
1324 &self.instances[id.0].handle
1325 }
1326
1327 pub fn instance_mut(&mut self, id: InstanceId) -> &mut InstanceHandle {
1328 &mut self.instances[id.0].handle
1329 }
1330
1331 /// Get all instances (ignoring dummy instances) within this store.
1332 pub fn all_instances<'a>(&'a mut self) -> impl ExactSizeIterator<Item = Instance> + 'a {
1333 let instances = self
1334 .instances
1335 .iter()
1336 .enumerate()
1337 .filter_map(|(idx, inst)| {
1338 let id = InstanceId::from_index(idx);
1339 if let StoreInstanceKind::Dummy = inst.kind {
1340 None
1341 } else {
1342 Some(InstanceData::from_id(id))
1343 }
1344 })
1345 .collect::<Vec<_>>();
1346 instances
1347 .into_iter()
1348 .map(|i| Instance::from_wasmtime(i, self))
1349 }
1350
1351 /// Get all memories (host- or Wasm-defined) within this store.
1352 pub fn all_memories<'a>(&'a mut self) -> impl Iterator<Item = Memory> + 'a {
1353 // NB: Host-created memories have dummy instances. Therefore, we can get
1354 // all memories in the store by iterating over all instances (including
1355 // dummy instances) and getting each of their defined memories.
1356 let mems = self
1357 .instances
1358 .iter_mut()
1359 .flat_map(|instance| instance.handle.defined_memories())
1360 .collect::<Vec<_>>();
1361 mems.into_iter()
1362 .map(|memory| unsafe { Memory::from_wasmtime_memory(memory, self) })
1363 }
1364
1365 /// Iterate over all tables (host- or Wasm-defined) within this store.
1366 pub fn for_each_table(&mut self, mut f: impl FnMut(&mut Self, Table)) {
1367 // NB: Host-created tables have dummy instances. Therefore, we can get
1368 // all memories in the store by iterating over all instances (including
1369 // dummy instances) and getting each of their defined memories.
1370
1371 struct TempTakeInstances<'a> {
1372 instances: Vec<StoreInstance>,
1373 store: &'a mut StoreOpaque,
1374 }
1375
1376 impl<'a> TempTakeInstances<'a> {
1377 fn new(store: &'a mut StoreOpaque) -> Self {
1378 let instances = mem::take(&mut store.instances);
1379 Self { instances, store }
1380 }
1381 }
1382
1383 impl Drop for TempTakeInstances<'_> {
1384 fn drop(&mut self) {
1385 assert!(self.store.instances.is_empty());
1386 self.store.instances = mem::take(&mut self.instances);
1387 }
1388 }
1389
1390 let mut temp = TempTakeInstances::new(self);
1391 for instance in temp.instances.iter_mut() {
1392 for table in instance.handle.defined_tables() {
1393 let table = unsafe { Table::from_wasmtime_table(table, temp.store) };
1394 f(temp.store, table);
1395 }
1396 }
1397 }
1398
1399 /// Iterate over all globals (host- or Wasm-defined) within this store.
1400 pub fn for_each_global(&mut self, mut f: impl FnMut(&mut Self, Global)) {
1401 struct TempTakeHostGlobalsAndInstances<'a> {
1402 host_globals: Vec<StoreBox<VMHostGlobalContext>>,
1403 instances: Vec<StoreInstance>,
1404 store: &'a mut StoreOpaque,
1405 }
1406
1407 impl<'a> TempTakeHostGlobalsAndInstances<'a> {
1408 fn new(store: &'a mut StoreOpaque) -> Self {
1409 let host_globals = mem::take(&mut store.host_globals);
1410 let instances = mem::take(&mut store.instances);
1411 Self {
1412 host_globals,
1413 instances,
1414 store,
1415 }
1416 }
1417 }
1418
1419 impl Drop for TempTakeHostGlobalsAndInstances<'_> {
1420 fn drop(&mut self) {
1421 assert!(self.store.host_globals.is_empty());
1422 self.store.host_globals = mem::take(&mut self.host_globals);
1423 assert!(self.store.instances.is_empty());
1424 self.store.instances = mem::take(&mut self.instances);
1425 }
1426 }
1427
1428 let mut temp = TempTakeHostGlobalsAndInstances::new(self);
1429 unsafe {
1430 // First enumerate all the host-created globals.
1431 for global in temp.host_globals.iter() {
1432 let export = ExportGlobal {
1433 definition: &mut (*global.get()).global as *mut _,
1434 vmctx: std::ptr::null_mut(),
1435 global: (*global.get()).ty.to_wasm_type(),
1436 };
1437 let global = Global::from_wasmtime_global(export, temp.store);
1438 f(temp.store, global);
1439 }
1440
1441 // Then enumerate all instances' defined globals.
1442 for instance in temp.instances.iter_mut() {
1443 for (_, export) in instance.handle.defined_globals() {
1444 let global = Global::from_wasmtime_global(export, temp.store);
1445 f(temp.store, global);
1446 }
1447 }
1448 }
1449 }
1450
1451 #[cfg_attr(not(target_os = "linux"), allow(dead_code))] // not used on all platforms
1452 pub fn set_signal_handler(&mut self, handler: Option<Box<SignalHandler<'static>>>) {
1453 self.signal_handler = handler;
1454 }
1455
1456 #[inline]
1457 pub fn runtime_limits(&self) -> &VMRuntimeLimits {
1458 &self.runtime_limits
1459 }
1460
1461 #[inline(never)]
1462 pub(crate) fn allocate_gc_heap(&mut self) -> Result<()> {
1463 assert!(self.gc_store.get_mut().is_none());
1464 let gc_store = allocate_gc_store(self.engine())?;
1465 let _ = self.gc_store.set(gc_store);
1466 return Ok(());
1467
1468 #[cfg(feature = "gc")]
1469 fn allocate_gc_store(engine: &Engine) -> Result<GcStore> {
1470 let (index, heap) = if engine.config().features.reference_types {
1471 engine
1472 .allocator()
1473 .allocate_gc_heap(&**engine.gc_runtime())?
1474 } else {
1475 (
1476 GcHeapAllocationIndex::default(),
1477 wasmtime_runtime::disabled_gc_heap(),
1478 )
1479 };
1480 Ok(GcStore::new(index, heap))
1481 }
1482
1483 #[cfg(not(feature = "gc"))]
1484 fn allocate_gc_store(_engine: &Engine) -> Result<GcStore> {
1485 Ok(GcStore::new(
1486 GcHeapAllocationIndex::default(),
1487 wasmtime_runtime::disabled_gc_heap(),
1488 ))
1489 }
1490 }
1491
1492 #[inline]
1493 #[cfg(feature = "gc")]
1494 pub(crate) fn gc_store(&self) -> Result<&GcStore> {
1495 match self.gc_store.get() {
1496 Some(gc_store) => Ok(gc_store),
1497 None => Err(anyhow!("GC heap not initialized yet")),
1498 }
1499 }
1500
1501 #[inline]
1502 pub(crate) fn gc_store_mut(&mut self) -> Result<&mut GcStore> {
1503 if self.gc_store.get_mut().is_none() {
1504 self.allocate_gc_heap()?;
1505 }
1506 Ok(self.unwrap_gc_store_mut())
1507 }
1508
1509 #[inline]
1510 #[cfg(feature = "gc")]
1511 pub(crate) fn unwrap_gc_store(&self) -> &GcStore {
1512 self.gc_store
1513 .get()
1514 .expect("attempted to access the store's GC heap before it has been allocated")
1515 }
1516
1517 #[inline]
1518 pub(crate) fn unwrap_gc_store_mut(&mut self) -> &mut GcStore {
1519 self.gc_store
1520 .get_mut()
1521 .expect("attempted to access the store's GC heap before it has been allocated")
1522 }
1523
1524 #[inline]
1525 pub(crate) fn gc_roots(&self) -> &RootSet {
1526 &self.gc_roots
1527 }
1528
1529 #[inline]
1530 pub(crate) fn gc_roots_mut(&mut self) -> &mut RootSet {
1531 &mut self.gc_roots
1532 }
1533
1534 #[inline]
1535 pub(crate) fn exit_gc_lifo_scope(&mut self, scope: usize) {
1536 if let Some(gc_store) = self.gc_store.get_mut() {
1537 self.gc_roots.exit_lifo_scope(gc_store, scope);
1538 }
1539 }
1540
1541 #[cfg(feature = "gc")]
1542 pub fn gc(&mut self) {
1543 // If the GC heap hasn't been initialized, there is nothing to collect.
1544 if self.gc_store.get_mut().is_none() {
1545 return;
1546 }
1547
1548 // Take the GC roots out of `self` so we can borrow it mutably but still
1549 // call mutable methods on `self`.
1550 let mut roots = std::mem::take(&mut self.gc_roots_list);
1551
1552 self.trace_roots(&mut roots);
1553 self.unwrap_gc_store_mut().gc(unsafe { roots.iter() });
1554
1555 // Restore the GC roots for the next GC.
1556 roots.clear();
1557 self.gc_roots_list = roots;
1558 }
1559
1560 #[inline]
1561 #[cfg(not(feature = "gc"))]
1562 pub fn gc(&mut self) {
1563 // Nothing to collect.
1564 //
1565 // Note that this is *not* a public method, this is just defined for the
1566 // crate-internal `StoreOpaque` type. This is a convenience so that we
1567 // don't have to `cfg` every call site.
1568 }
1569
1570 #[cfg(feature = "gc")]
1571 fn trace_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1572 log::trace!("Begin trace GC roots");
1573
1574 // We shouldn't have any leftover, stale GC roots.
1575 assert!(gc_roots_list.is_empty());
1576
1577 self.trace_wasm_stack_roots(gc_roots_list);
1578 self.trace_vmctx_roots(gc_roots_list);
1579 self.trace_user_roots(gc_roots_list);
1580
1581 log::trace!("End trace GC roots")
1582 }
1583
1584 #[cfg(all(feature = "async", feature = "gc"))]
1585 pub async fn gc_async(&mut self) {
1586 assert!(
1587 self.async_support(),
1588 "cannot use `gc_async` without enabling async support in the config",
1589 );
1590
1591 // If the GC heap hasn't been initialized, there is nothing to collect.
1592 if self.gc_store.get_mut().is_none() {
1593 return;
1594 }
1595
1596 // Take the GC roots out of `self` so we can borrow it mutably but still
1597 // call mutable methods on `self`.
1598 let mut roots = std::mem::take(&mut self.gc_roots_list);
1599
1600 self.trace_roots_async(&mut roots).await;
1601 self.unwrap_gc_store_mut()
1602 .gc_async(unsafe { roots.iter() })
1603 .await;
1604
1605 // Restore the GC roots for the next GC.
1606 roots.clear();
1607 self.gc_roots_list = roots;
1608 }
1609
1610 #[inline]
1611 #[cfg(all(feature = "async", not(feature = "gc")))]
1612 pub async fn gc_async(&mut self) {
1613 // Nothing to collect.
1614 //
1615 // Note that this is *not* a public method, this is just defined for the
1616 // crate-internal `StoreOpaque` type. This is a convenience so that we
1617 // don't have to `cfg` every call site.
1618 }
1619
1620 #[cfg(all(feature = "async", feature = "gc"))]
1621 async fn trace_roots_async(&mut self, gc_roots_list: &mut GcRootsList) {
1622 use wasmtime_runtime::Yield;
1623
1624 log::trace!("Begin trace GC roots");
1625
1626 // We shouldn't have any leftover, stale GC roots.
1627 assert!(gc_roots_list.is_empty());
1628
1629 self.trace_wasm_stack_roots(gc_roots_list);
1630 Yield::new().await;
1631 self.trace_vmctx_roots(gc_roots_list);
1632 Yield::new().await;
1633 self.trace_user_roots(gc_roots_list);
1634
1635 log::trace!("End trace GC roots")
1636 }
1637
1638 #[cfg(feature = "gc")]
1639 fn trace_wasm_stack_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1640 use std::ptr::NonNull;
1641
1642 use wasmtime_runtime::{ModuleInfoLookup, SendSyncPtr};
1643
1644 log::trace!("Begin trace GC roots :: Wasm stack");
1645
1646 Backtrace::trace(self.vmruntime_limits().cast_const(), |frame| {
1647 let pc = frame.pc();
1648 debug_assert!(pc != 0, "we should always get a valid PC for Wasm frames");
1649
1650 let fp = frame.fp();
1651 debug_assert!(
1652 fp != 0,
1653 "we should always get a valid frame pointer for Wasm frames"
1654 );
1655 let module_info = self
1656 .modules()
1657 .lookup(pc)
1658 .expect("should have module info for Wasm frame");
1659
1660 let stack_map = match module_info.lookup_stack_map(pc) {
1661 Some(sm) => sm,
1662 None => {
1663 log::trace!("No stack map for this Wasm frame");
1664 return std::ops::ControlFlow::Continue(());
1665 }
1666 };
1667 log::trace!(
1668 "We have a stack map that maps {} words in this Wasm frame",
1669 stack_map.mapped_words()
1670 );
1671
1672 let sp = fp - stack_map.mapped_words() as usize * mem::size_of::<usize>();
1673
1674 for i in 0..(stack_map.mapped_words() as usize) {
1675 // Stack maps have one bit per word in the frame, and the
1676 // zero^th bit is the *lowest* addressed word in the frame,
1677 // i.e. the closest to the SP. So to get the `i`^th word in
1678 // this frame, we add `i * sizeof(word)` to the SP.
1679 let stack_slot = sp + i * mem::size_of::<usize>();
1680 let stack_slot = stack_slot as *mut u64;
1681
1682 if !stack_map.get_bit(i) {
1683 log::trace!("Stack slot @ {stack_slot:p} does not contain gc_refs");
1684 continue;
1685 }
1686
1687 let gc_ref = unsafe { std::ptr::read(stack_slot) };
1688 log::trace!("Stack slot @ {stack_slot:p} = {gc_ref:#x}");
1689
1690 let gc_ref = VMGcRef::from_r64(gc_ref)
1691 .expect("we should never use the high 32 bits of an r64");
1692
1693 if gc_ref.is_some() {
1694 unsafe {
1695 gc_roots_list.add_wasm_stack_root(SendSyncPtr::new(
1696 NonNull::new(stack_slot).unwrap(),
1697 ));
1698 }
1699 }
1700 }
1701
1702 std::ops::ControlFlow::Continue(())
1703 });
1704
1705 log::trace!("End trace GC roots :: Wasm stack");
1706 }
1707
1708 #[cfg(feature = "gc")]
1709 fn trace_vmctx_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1710 log::trace!("Begin trace GC roots :: vmctx");
1711 self.for_each_global(|store, global| global.trace_root(store, gc_roots_list));
1712 self.for_each_table(|store, table| table.trace_roots(store, gc_roots_list));
1713 log::trace!("End trace GC roots :: vmctx");
1714 }
1715
1716 #[cfg(feature = "gc")]
1717 fn trace_user_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1718 log::trace!("Begin trace GC roots :: user");
1719 self.gc_roots.trace_roots(gc_roots_list);
1720 log::trace!("End trace GC roots :: user");
1721 }
1722
1723 /// Yields the async context, assuming that we are executing on a fiber and
1724 /// that fiber is not in the process of dying. This function will return
1725 /// None in the latter case (the fiber is dying), and panic if
1726 /// `async_support()` is false.
1727 #[cfg(feature = "async")]
1728 #[inline]
1729 pub fn async_cx(&self) -> Option<AsyncCx> {
1730 assert!(self.async_support());
1731
1732 let poll_cx_box_ptr = self.async_state.current_poll_cx.get();
1733 if poll_cx_box_ptr.is_null() {
1734 return None;
1735 }
1736
1737 let poll_cx_inner_ptr = unsafe { *poll_cx_box_ptr };
1738 if poll_cx_inner_ptr.is_null() {
1739 return None;
1740 }
1741
1742 Some(AsyncCx {
1743 current_suspend: self.async_state.current_suspend.get(),
1744 current_poll_cx: poll_cx_box_ptr,
1745 track_pkey_context_switch: self.pkey.is_some(),
1746 })
1747 }
1748
1749 pub fn get_fuel(&self) -> Result<u64> {
1750 anyhow::ensure!(
1751 self.engine().tunables().consume_fuel,
1752 "fuel is not configured in this store"
1753 );
1754 let injected_fuel = unsafe { *self.runtime_limits.fuel_consumed.get() };
1755 Ok(get_fuel(injected_fuel, self.fuel_reserve))
1756 }
1757
1758 fn refuel(&mut self) -> bool {
1759 let injected_fuel = unsafe { &mut *self.runtime_limits.fuel_consumed.get() };
1760 refuel(
1761 injected_fuel,
1762 &mut self.fuel_reserve,
1763 self.fuel_yield_interval,
1764 )
1765 }
1766
1767 pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1768 anyhow::ensure!(
1769 self.engine().tunables().consume_fuel,
1770 "fuel is not configured in this store"
1771 );
1772 let injected_fuel = unsafe { &mut *self.runtime_limits.fuel_consumed.get() };
1773 set_fuel(
1774 injected_fuel,
1775 &mut self.fuel_reserve,
1776 self.fuel_yield_interval,
1777 fuel,
1778 );
1779 Ok(())
1780 }
1781
1782 pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1783 anyhow::ensure!(
1784 self.engine().tunables().consume_fuel,
1785 "fuel is not configured in this store"
1786 );
1787 anyhow::ensure!(
1788 self.engine().config().async_support,
1789 "async support is not configured in this store"
1790 );
1791 anyhow::ensure!(
1792 interval != Some(0),
1793 "fuel_async_yield_interval must not be 0"
1794 );
1795 self.fuel_yield_interval = interval.and_then(|i| NonZeroU64::new(i));
1796 // Reset the fuel active + reserve states by resetting the amount.
1797 self.set_fuel(self.get_fuel()?)
1798 }
1799
1800 /// Yields execution to the caller on out-of-gas or epoch interruption.
1801 ///
1802 /// This only works on async futures and stores, and assumes that we're
1803 /// executing on a fiber. This will yield execution back to the caller once.
1804 #[cfg(feature = "async")]
1805 fn async_yield_impl(&mut self) -> Result<()> {
1806 use wasmtime_runtime::Yield;
1807
1808 let mut future = Yield::new();
1809
1810 // When control returns, we have a `Result<()>` passed
1811 // in from the host fiber. If this finished successfully then
1812 // we were resumed normally via a `poll`, so keep going. If
1813 // the future was dropped while we were yielded, then we need
1814 // to clean up this fiber. Do so by raising a trap which will
1815 // abort all wasm and get caught on the other side to clean
1816 // things up.
1817 unsafe {
1818 self.async_cx()
1819 .expect("attempted to pull async context during shutdown")
1820 .block_on(Pin::new_unchecked(&mut future))
1821 }
1822 }
1823
1824 #[inline]
1825 pub fn signal_handler(&self) -> Option<*const SignalHandler<'static>> {
1826 let handler = self.signal_handler.as_ref()?;
1827 Some(&**handler as *const _)
1828 }
1829
1830 #[inline]
1831 pub fn vmruntime_limits(&self) -> *mut VMRuntimeLimits {
1832 &self.runtime_limits as *const VMRuntimeLimits as *mut VMRuntimeLimits
1833 }
1834
1835 #[inline]
1836 pub fn default_caller(&self) -> *mut VMContext {
1837 self.default_caller.vmctx()
1838 }
1839
1840 pub fn traitobj(&self) -> *mut dyn wasmtime_runtime::Store {
1841 self.default_caller.store()
1842 }
1843
1844 /// Takes the cached `Vec<Val>` stored internally across hostcalls to get
1845 /// used as part of calling the host in a `Func::new` method invocation.
1846 #[inline]
1847 pub fn take_hostcall_val_storage(&mut self) -> Vec<Val> {
1848 mem::take(&mut self.hostcall_val_storage)
1849 }
1850
1851 /// Restores the vector previously taken by `take_hostcall_val_storage`
1852 /// above back into the store, allowing it to be used in the future for the
1853 /// next wasm->host call.
1854 #[inline]
1855 pub fn save_hostcall_val_storage(&mut self, storage: Vec<Val>) {
1856 if storage.capacity() > self.hostcall_val_storage.capacity() {
1857 self.hostcall_val_storage = storage;
1858 }
1859 }
1860
1861 /// Same as `take_hostcall_val_storage`, but for the direction of the host
1862 /// calling wasm.
1863 #[inline]
1864 pub fn take_wasm_val_raw_storage(&mut self) -> Vec<ValRaw> {
1865 mem::take(&mut self.wasm_val_raw_storage)
1866 }
1867
1868 /// Same as `save_hostcall_val_storage`, but for the direction of the host
1869 /// calling wasm.
1870 #[inline]
1871 pub fn save_wasm_val_raw_storage(&mut self, storage: Vec<ValRaw>) {
1872 if storage.capacity() > self.wasm_val_raw_storage.capacity() {
1873 self.wasm_val_raw_storage = storage;
1874 }
1875 }
1876
1877 pub(crate) fn push_rooted_funcs(&mut self, funcs: Arc<[Definition]>) {
1878 self.rooted_host_funcs.push(funcs);
1879 }
1880
1881 /// Translates a WebAssembly fault at the native `pc` and native `addr` to a
1882 /// WebAssembly-relative fault.
1883 ///
1884 /// This function may abort the process if `addr` is not found to actually
1885 /// reside in any linear memory. In such a situation it means that the
1886 /// segfault was erroneously caught by Wasmtime and is possibly indicative
1887 /// of a code generator bug.
1888 ///
1889 /// This function returns `None` for dynamically-bounds-checked-memories
1890 /// with spectre mitigations enabled since the hardware fault address is
1891 /// always zero in these situations which means that the trapping context
1892 /// doesn't have enough information to report the fault address.
1893 pub(crate) fn wasm_fault(&self, pc: usize, addr: usize) -> Option<WasmFault> {
1894 // There are a few instances where a "close to zero" pointer is loaded
1895 // and we expect that to happen:
1896 //
1897 // * Explicitly bounds-checked memories with spectre-guards enabled will
1898 // cause out-of-bounds accesses to get routed to address 0, so allow
1899 // wasm instructions to fault on the null address.
1900 // * `call_indirect` when invoking a null function pointer may load data
1901 // from the a `VMFuncRef` whose address is null, meaning any field of
1902 // `VMFuncRef` could be the address of the fault.
1903 //
1904 // In these situations where the address is so small it won't be in any
1905 // instance, so skip the checks below.
1906 if addr <= mem::size_of::<VMFuncRef>() {
1907 const _: () = {
1908 // static-assert that `VMFuncRef` isn't too big to ensure that
1909 // it lives solely within the first page as we currently only
1910 // have the guarantee that the first page of memory is unmapped,
1911 // no more.
1912 assert!(mem::size_of::<VMFuncRef>() <= 512);
1913 };
1914 return None;
1915 }
1916
1917 // Search all known instances in this store for this address. Note that
1918 // this is probably not the speediest way to do this. Traps, however,
1919 // are generally not expected to be super fast and additionally stores
1920 // probably don't have all that many instances or memories.
1921 //
1922 // If this loop becomes hot in the future, however, it should be
1923 // possible to precompute maps about linear memories in a store and have
1924 // a quicker lookup.
1925 let mut fault = None;
1926 for instance in self.instances.iter() {
1927 if let Some(f) = instance.handle.wasm_fault(addr) {
1928 assert!(fault.is_none());
1929 fault = Some(f);
1930 }
1931 }
1932 if fault.is_some() {
1933 return fault;
1934 }
1935
1936 eprintln!(
1937 "\
1938Wasmtime caught a segfault for a wasm program because the faulting instruction
1939is allowed to segfault due to how linear memories are implemented. The address
1940that was accessed, however, is not known to any linear memory in use within this
1941Store. This may be indicative of a critical bug in Wasmtime's code generation
1942because all addresses which are known to be reachable from wasm won't reach this
1943message.
1944
1945 pc: 0x{pc:x}
1946 address: 0x{addr:x}
1947
1948This is a possible security issue because WebAssembly has accessed something it
1949shouldn't have been able to. Other accesses may have succeeded and this one just
1950happened to be caught. The process will now be aborted to prevent this damage
1951from going any further and to alert what's going on. If this is a security
1952issue please reach out to the Wasmtime team via its security policy
1953at https://bytecodealliance.org/security.
1954"
1955 );
1956 std::process::abort();
1957 }
1958
1959 /// Retrieve the store's protection key.
1960 #[inline]
1961 pub(crate) fn get_pkey(&self) -> Option<ProtectionKey> {
1962 self.pkey.clone()
1963 }
1964
1965 #[inline]
1966 #[cfg(feature = "component-model")]
1967 pub(crate) fn component_resource_state(
1968 &mut self,
1969 ) -> (
1970 &mut wasmtime_runtime::component::CallContexts,
1971 &mut wasmtime_runtime::component::ResourceTable,
1972 &mut crate::component::HostResourceData,
1973 ) {
1974 (
1975 &mut self.component_calls,
1976 &mut self.component_host_table,
1977 &mut self.host_resource_data,
1978 )
1979 }
1980
1981 #[cfg(feature = "component-model")]
1982 pub(crate) fn push_component_instance(&mut self, instance: crate::component::Instance) {
1983 // We don't actually need the instance itself right now, but it seems
1984 // like something we will almost certainly eventually want to keep
1985 // around, so force callers to provide it.
1986 let _ = instance;
1987
1988 self.num_component_instances += 1;
1989 }
1990}
1991
1992impl<T> StoreContextMut<'_, T> {
1993 /// Executes a synchronous computation `func` asynchronously on a new fiber.
1994 ///
1995 /// This function will convert the synchronous `func` into an asynchronous
1996 /// future. This is done by running `func` in a fiber on a separate native
1997 /// stack which can be suspended and resumed from.
1998 ///
1999 /// Most of the nitty-gritty here is how we juggle the various contexts
2000 /// necessary to suspend the fiber later on and poll sub-futures. It's hoped
2001 /// that the various comments are illuminating as to what's going on here.
2002 #[cfg(feature = "async")]
2003 pub(crate) async fn on_fiber<R>(
2004 &mut self,
2005 func: impl FnOnce(&mut StoreContextMut<'_, T>) -> R + Send,
2006 ) -> Result<R>
2007 where
2008 T: Send,
2009 {
2010 let config = self.engine().config();
2011 debug_assert!(self.0.async_support());
2012 debug_assert!(config.async_stack_size > 0);
2013
2014 let mut slot = None;
2015 let future = {
2016 let current_poll_cx = self.0.async_state.current_poll_cx.get();
2017 let current_suspend = self.0.async_state.current_suspend.get();
2018 let stack = self.engine().allocator().allocate_fiber_stack()?;
2019
2020 let engine = self.engine().clone();
2021 let slot = &mut slot;
2022 let fiber = wasmtime_fiber::Fiber::new(stack, move |keep_going, suspend| {
2023 // First check and see if we were interrupted/dropped, and only
2024 // continue if we haven't been.
2025 keep_going?;
2026
2027 // Configure our store's suspension context for the rest of the
2028 // execution of this fiber. Note that a raw pointer is stored here
2029 // which is only valid for the duration of this closure.
2030 // Consequently we at least replace it with the previous value when
2031 // we're done. This reset is also required for correctness because
2032 // otherwise our value will overwrite another active fiber's value.
2033 // There should be a test that segfaults in `async_functions.rs` if
2034 // this `Replace` is removed.
2035 unsafe {
2036 let _reset = Reset(current_suspend, *current_suspend);
2037 *current_suspend = suspend;
2038
2039 *slot = Some(func(self));
2040 Ok(())
2041 }
2042 })?;
2043
2044 // Once we have the fiber representing our synchronous computation, we
2045 // wrap that in a custom future implementation which does the
2046 // translation from the future protocol to our fiber API.
2047 FiberFuture {
2048 fiber,
2049 current_poll_cx,
2050 engine,
2051 state: Some(wasmtime_runtime::AsyncWasmCallState::new()),
2052 }
2053 };
2054 future.await?;
2055
2056 return Ok(slot.unwrap());
2057
2058 struct FiberFuture<'a> {
2059 fiber: wasmtime_fiber::Fiber<'a, Result<()>, (), Result<()>>,
2060 current_poll_cx: *mut *mut Context<'static>,
2061 engine: Engine,
2062 // See comments in `FiberFuture::resume` for this
2063 state: Option<wasmtime_runtime::AsyncWasmCallState>,
2064 }
2065
2066 // This is surely the most dangerous `unsafe impl Send` in the entire
2067 // crate. There are two members in `FiberFuture` which cause it to not
2068 // be `Send`. One is `current_poll_cx` and is entirely uninteresting.
2069 // This is just used to manage `Context` pointers across `await` points
2070 // in the future, and requires raw pointers to get it to happen easily.
2071 // Nothing too weird about the `Send`-ness, values aren't actually
2072 // crossing threads.
2073 //
2074 // The really interesting piece is `fiber`. Now the "fiber" here is
2075 // actual honest-to-god Rust code which we're moving around. What we're
2076 // doing is the equivalent of moving our thread's stack to another OS
2077 // thread. Turns out we, in general, have no idea what's on the stack
2078 // and would generally have no way to verify that this is actually safe
2079 // to do!
2080 //
2081 // Thankfully, though, Wasmtime has the power. Without being glib it's
2082 // actually worth examining what's on the stack. It's unfortunately not
2083 // super-local to this function itself. Our closure to `Fiber::new` runs
2084 // `func`, which is given to us from the outside. Thankfully, though, we
2085 // have tight control over this. Usage of `on_fiber` is typically done
2086 // *just* before entering WebAssembly itself, so we'll have a few stack
2087 // frames of Rust code (all in Wasmtime itself) before we enter wasm.
2088 //
2089 // Once we've entered wasm, well then we have a whole bunch of wasm
2090 // frames on the stack. We've got this nifty thing called Cranelift,
2091 // though, which allows us to also have complete control over everything
2092 // on the stack!
2093 //
2094 // Finally, when wasm switches back to the fiber's starting pointer
2095 // (this future we're returning) then it means wasm has reentered Rust.
2096 // Suspension can only happen via the `block_on` function of an
2097 // `AsyncCx`. This, conveniently, also happens entirely in Wasmtime
2098 // controlled code!
2099 //
2100 // There's an extremely important point that should be called out here.
2101 // User-provided futures **are not on the stack** during suspension
2102 // points. This is extremely crucial because we in general cannot reason
2103 // about Send/Sync for stack-local variables since rustc doesn't analyze
2104 // them at all. With our construction, though, we are guaranteed that
2105 // Wasmtime owns all stack frames between the stack of a fiber and when
2106 // the fiber suspends (and it could move across threads). At this time
2107 // the only user-provided piece of data on the stack is the future
2108 // itself given to us. Lo-and-behold as you might notice the future is
2109 // required to be `Send`!
2110 //
2111 // What this all boils down to is that we, as the authors of Wasmtime,
2112 // need to be extremely careful that on the async fiber stack we only
2113 // store Send things. For example we can't start using `Rc` willy nilly
2114 // by accident and leave a copy in TLS somewhere. (similarly we have to
2115 // be ready for TLS to change while we're executing wasm code between
2116 // suspension points).
2117 //
2118 // While somewhat onerous it shouldn't be too too hard (the TLS bit is
2119 // the hardest bit so far). This does mean, though, that no user should
2120 // ever have to worry about the `Send`-ness of Wasmtime. If rustc says
2121 // it's ok, then it's ok.
2122 //
2123 // With all that in mind we unsafely assert here that wasmtime is
2124 // correct. We declare the fiber as only containing Send data on its
2125 // stack, despite not knowing for sure at compile time that this is
2126 // correct. That's what `unsafe` in Rust is all about, though, right?
2127 unsafe impl Send for FiberFuture<'_> {}
2128
2129 impl FiberFuture<'_> {
2130 /// This is a helper function to call `resume` on the underlying
2131 /// fiber while correctly managing Wasmtime's thread-local data.
2132 ///
2133 /// Wasmtime's implementation of traps leverages thread-local data
2134 /// to get access to metadata during a signal. This thread-local
2135 /// data is a linked list of "activations" where the nodes of the
2136 /// linked list are stored on the stack. It would be invalid as a
2137 /// result to suspend a computation with the head of the linked list
2138 /// on this stack then move the stack to another thread and resume
2139 /// it. That means that a different thread would point to our stack
2140 /// and our thread doesn't point to our stack at all!
2141 ///
2142 /// Basically management of TLS is required here one way or another.
2143 /// The strategy currently settled on is to manage the list of
2144 /// activations created by this fiber as a unit. When a fiber
2145 /// resumes the linked list is prepended to the current thread's
2146 /// list. When the fiber is suspended then the fiber's list of
2147 /// activations are all removed en-masse and saved within the fiber.
2148 fn resume(&mut self, val: Result<()>) -> Result<Result<()>, ()> {
2149 unsafe {
2150 let prev = self.state.take().unwrap().push();
2151 let restore = Restore {
2152 fiber: self,
2153 state: Some(prev),
2154 };
2155 return restore.fiber.fiber.resume(val);
2156 }
2157
2158 struct Restore<'a, 'b> {
2159 fiber: &'a mut FiberFuture<'b>,
2160 state: Option<wasmtime_runtime::PreviousAsyncWasmCallState>,
2161 }
2162
2163 impl Drop for Restore<'_, '_> {
2164 fn drop(&mut self) {
2165 unsafe {
2166 self.fiber.state = Some(self.state.take().unwrap().restore());
2167 }
2168 }
2169 }
2170 }
2171 }
2172
2173 impl Future for FiberFuture<'_> {
2174 type Output = Result<()>;
2175
2176 fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
2177 // We need to carry over this `cx` into our fiber's runtime
2178 // for when it tries to poll sub-futures that are created. Doing
2179 // this must be done unsafely, however, since `cx` is only alive
2180 // for this one singular function call. Here we do a `transmute`
2181 // to extend the lifetime of `Context` so it can be stored in
2182 // our `Store`, and then we replace the current polling context
2183 // with this one.
2184 //
2185 // Note that the replace is done for weird situations where
2186 // futures might be switching contexts and there's multiple
2187 // wasmtime futures in a chain of futures.
2188 //
2189 // On exit from this function, though, we reset the polling
2190 // context back to what it was to signify that `Store` no longer
2191 // has access to this pointer.
2192 unsafe {
2193 let _reset = Reset(self.current_poll_cx, *self.current_poll_cx);
2194 *self.current_poll_cx =
2195 std::mem::transmute::<&mut Context<'_>, *mut Context<'static>>(cx);
2196
2197 // After that's set up we resume execution of the fiber, which
2198 // may also start the fiber for the first time. This either
2199 // returns `Ok` saying the fiber finished (yay!) or it
2200 // returns `Err` with the payload passed to `suspend`, which
2201 // in our case is `()`.
2202 match self.resume(Ok(())) {
2203 Ok(result) => Poll::Ready(result),
2204
2205 // If `Err` is returned that means the fiber polled a
2206 // future but it said "Pending", so we propagate that
2207 // here.
2208 //
2209 // An additional safety check is performed when leaving
2210 // this function to help bolster the guarantees of
2211 // `unsafe impl Send` above. Notably this future may get
2212 // re-polled on a different thread. Wasmtime's
2213 // thread-local state points to the stack, however,
2214 // meaning that it would be incorrect to leave a pointer
2215 // in TLS when this function returns. This function
2216 // performs a runtime assert to verify that this is the
2217 // case, notably that the one TLS pointer Wasmtime uses
2218 // is not pointing anywhere within the stack. If it is
2219 // then that's a bug indicating that TLS management in
2220 // Wasmtime is incorrect.
2221 Err(()) => {
2222 if let Some(range) = self.fiber.stack().range() {
2223 wasmtime_runtime::AsyncWasmCallState::assert_current_state_not_in_range(range);
2224 }
2225 Poll::Pending
2226 }
2227 }
2228 }
2229 }
2230 }
2231
2232 // Dropping futures is pretty special in that it means the future has
2233 // been requested to be cancelled. Here we run the risk of dropping an
2234 // in-progress fiber, and if we were to do nothing then the fiber would
2235 // leak all its owned stack resources.
2236 //
2237 // To handle this we implement `Drop` here and, if the fiber isn't done,
2238 // resume execution of the fiber saying "hey please stop you're
2239 // interrupted". Our `Trap` created here (which has the stack trace
2240 // of whomever dropped us) will then get propagated in whatever called
2241 // `block_on`, and the idea is that the trap propagates all the way back
2242 // up to the original fiber start, finishing execution.
2243 //
2244 // We don't actually care about the fiber's return value here (no one's
2245 // around to look at it), we just assert the fiber finished to
2246 // completion.
2247 impl Drop for FiberFuture<'_> {
2248 fn drop(&mut self) {
2249 if !self.fiber.done() {
2250 let result = self.resume(Err(anyhow!("future dropped")));
2251 // This resumption with an error should always complete the
2252 // fiber. While it's technically possible for host code to catch
2253 // the trap and re-resume, we'd ideally like to signal that to
2254 // callers that they shouldn't be doing that.
2255 debug_assert!(result.is_ok());
2256 }
2257
2258 self.state.take().unwrap().assert_null();
2259
2260 unsafe {
2261 self.engine
2262 .allocator()
2263 .deallocate_fiber_stack(self.fiber.stack());
2264 }
2265 }
2266 }
2267 }
2268}
2269
2270#[cfg(feature = "async")]
2271pub struct AsyncCx {
2272 current_suspend: *mut *const wasmtime_fiber::Suspend<Result<()>, (), Result<()>>,
2273 current_poll_cx: *mut *mut Context<'static>,
2274 track_pkey_context_switch: bool,
2275}
2276
2277#[cfg(feature = "async")]
2278impl AsyncCx {
2279 /// Blocks on the asynchronous computation represented by `future` and
2280 /// produces the result here, in-line.
2281 ///
2282 /// This function is designed to only work when it's currently executing on
2283 /// a native fiber. This fiber provides the ability for us to handle the
2284 /// future's `Pending` state as "jump back to whomever called the fiber in
2285 /// an asynchronous fashion and propagate `Pending`". This tight coupling
2286 /// with `on_fiber` below is what powers the asynchronicity of calling wasm.
2287 /// Note that the asynchronous part only applies to host functions, wasm
2288 /// itself never really does anything asynchronous at this time.
2289 ///
2290 /// This function takes a `future` and will (appear to) synchronously wait
2291 /// on the result. While this function is executing it will fiber switch
2292 /// to-and-from the original frame calling `on_fiber` which should be a
2293 /// guarantee due to how async stores are configured.
2294 ///
2295 /// The return value here is either the output of the future `T`, or a trap
2296 /// which represents that the asynchronous computation was cancelled. It is
2297 /// not recommended to catch the trap and try to keep executing wasm, so
2298 /// we've tried to liberally document this.
2299 pub unsafe fn block_on<U>(
2300 &self,
2301 mut future: Pin<&mut (dyn Future<Output = U> + Send)>,
2302 ) -> Result<U> {
2303 // Take our current `Suspend` context which was configured as soon as
2304 // our fiber started. Note that we must load it at the front here and
2305 // save it on our stack frame. While we're polling the future other
2306 // fibers may be started for recursive computations, and the current
2307 // suspend context is only preserved at the edges of the fiber, not
2308 // during the fiber itself.
2309 //
2310 // For a little bit of extra safety we also replace the current value
2311 // with null to try to catch any accidental bugs on our part early.
2312 // This is all pretty unsafe so we're trying to be careful...
2313 //
2314 // Note that there should be a segfaulting test in `async_functions.rs`
2315 // if this `Reset` is removed.
2316 let suspend = *self.current_suspend;
2317 let _reset = Reset(self.current_suspend, suspend);
2318 *self.current_suspend = ptr::null();
2319 assert!(!suspend.is_null());
2320
2321 loop {
2322 let future_result = {
2323 let poll_cx = *self.current_poll_cx;
2324 let _reset = Reset(self.current_poll_cx, poll_cx);
2325 *self.current_poll_cx = ptr::null_mut();
2326 assert!(!poll_cx.is_null());
2327 future.as_mut().poll(&mut *poll_cx)
2328 };
2329
2330 match future_result {
2331 Poll::Ready(t) => break Ok(t),
2332 Poll::Pending => {}
2333 }
2334
2335 // In order to prevent this fiber's MPK state from being munged by
2336 // other fibers while it is suspended, we save and restore it once
2337 // once execution resumes. Note that when MPK is not supported,
2338 // these are noops.
2339 let previous_mask = if self.track_pkey_context_switch {
2340 let previous_mask = mpk::current_mask();
2341 mpk::allow(ProtectionMask::all());
2342 previous_mask
2343 } else {
2344 ProtectionMask::all()
2345 };
2346 (*suspend).suspend(())?;
2347 if self.track_pkey_context_switch {
2348 mpk::allow(previous_mask);
2349 }
2350 }
2351 }
2352}
2353
2354unsafe impl<T> wasmtime_runtime::Store for StoreInner<T> {
2355 fn vmruntime_limits(&self) -> *mut VMRuntimeLimits {
2356 <StoreOpaque>::vmruntime_limits(self)
2357 }
2358
2359 fn epoch_ptr(&self) -> *const AtomicU64 {
2360 self.engine.epoch_counter() as *const _
2361 }
2362
2363 fn maybe_gc_store(&mut self) -> Option<&mut GcStore> {
2364 self.gc_store.get_mut()
2365 }
2366
2367 fn memory_growing(
2368 &mut self,
2369 current: usize,
2370 desired: usize,
2371 maximum: Option<usize>,
2372 ) -> Result<bool, anyhow::Error> {
2373 match self.limiter {
2374 Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
2375 limiter(&mut self.data).memory_growing(current, desired, maximum)
2376 }
2377 #[cfg(feature = "async")]
2378 Some(ResourceLimiterInner::Async(ref mut limiter)) => unsafe {
2379 self.inner
2380 .async_cx()
2381 .expect("ResourceLimiterAsync requires async Store")
2382 .block_on(
2383 limiter(&mut self.data)
2384 .memory_growing(current, desired, maximum)
2385 .as_mut(),
2386 )?
2387 },
2388 None => Ok(true),
2389 }
2390 }
2391
2392 fn memory_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
2393 match self.limiter {
2394 Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
2395 limiter(&mut self.data).memory_grow_failed(error)
2396 }
2397 #[cfg(feature = "async")]
2398 Some(ResourceLimiterInner::Async(ref mut limiter)) => {
2399 limiter(&mut self.data).memory_grow_failed(error)
2400 }
2401 None => {
2402 log::debug!("ignoring memory growth failure error: {error:?}");
2403 Ok(())
2404 }
2405 }
2406 }
2407
2408 fn table_growing(
2409 &mut self,
2410 current: u32,
2411 desired: u32,
2412 maximum: Option<u32>,
2413 ) -> Result<bool, anyhow::Error> {
2414 // Need to borrow async_cx before the mut borrow of the limiter.
2415 // self.async_cx() panicks when used with a non-async store, so
2416 // wrap this in an option.
2417 #[cfg(feature = "async")]
2418 let async_cx = if self.async_support() {
2419 Some(self.async_cx().unwrap())
2420 } else {
2421 None
2422 };
2423
2424 match self.limiter {
2425 Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
2426 limiter(&mut self.data).table_growing(current, desired, maximum)
2427 }
2428 #[cfg(feature = "async")]
2429 Some(ResourceLimiterInner::Async(ref mut limiter)) => unsafe {
2430 async_cx
2431 .expect("ResourceLimiterAsync requires async Store")
2432 .block_on(
2433 limiter(&mut self.data)
2434 .table_growing(current, desired, maximum)
2435 .as_mut(),
2436 )?
2437 },
2438 None => Ok(true),
2439 }
2440 }
2441
2442 fn table_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
2443 match self.limiter {
2444 Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
2445 limiter(&mut self.data).table_grow_failed(error)
2446 }
2447 #[cfg(feature = "async")]
2448 Some(ResourceLimiterInner::Async(ref mut limiter)) => {
2449 limiter(&mut self.data).table_grow_failed(error)
2450 }
2451 None => {
2452 log::debug!("ignoring table growth failure: {error:?}");
2453 Ok(())
2454 }
2455 }
2456 }
2457
2458 fn out_of_gas(&mut self) -> Result<()> {
2459 if !self.refuel() {
2460 return Err(Trap::OutOfFuel.into());
2461 }
2462 #[cfg(feature = "async")]
2463 if self.fuel_yield_interval.is_some() {
2464 self.async_yield_impl()?;
2465 }
2466 Ok(())
2467 }
2468
2469 fn new_epoch(&mut self) -> Result<u64, anyhow::Error> {
2470 // Temporarily take the configured behavior to avoid mutably borrowing
2471 // multiple times.
2472 let mut behavior = self.epoch_deadline_behavior.take();
2473 let delta_result = match &mut behavior {
2474 None => Err(Trap::Interrupt.into()),
2475 Some(callback) => callback((&mut *self).as_context_mut()).and_then(|update| {
2476 let delta = match update {
2477 UpdateDeadline::Continue(delta) => delta,
2478
2479 #[cfg(feature = "async")]
2480 UpdateDeadline::Yield(delta) => {
2481 assert!(
2482 self.async_support(),
2483 "cannot use `UpdateDeadline::Yield` without enabling async support in the config"
2484 );
2485 // Do the async yield. May return a trap if future was
2486 // canceled while we're yielded.
2487 self.async_yield_impl()?;
2488 delta
2489 }
2490 };
2491
2492 // Set a new deadline and return the new epoch deadline so
2493 // the Wasm code doesn't have to reload it.
2494 self.set_epoch_deadline(delta);
2495 Ok(self.get_epoch_deadline())
2496 })
2497 };
2498
2499 // Put back the original behavior which was replaced by `take`.
2500 self.epoch_deadline_behavior = behavior;
2501 delta_result
2502 }
2503
2504 #[cfg(feature = "gc")]
2505 fn gc(&mut self, root: Option<VMGcRef>) -> Result<Option<VMGcRef>> {
2506 let mut scope = RootScope::new(self);
2507 let store = scope.as_context_mut().0;
2508 let store_id = store.id();
2509 let root = root.map(|r| store.gc_roots_mut().push_lifo_root(store_id, r));
2510
2511 if store.async_support() {
2512 #[cfg(feature = "async")]
2513 unsafe {
2514 let async_cx = store.async_cx();
2515 let mut future = store.gc_async();
2516 async_cx
2517 .expect("attempted to pull async context during shutdown")
2518 .block_on(Pin::new_unchecked(&mut future))?;
2519 }
2520 } else {
2521 (**store).gc();
2522 }
2523
2524 let root = match root {
2525 None => None,
2526 Some(r) => {
2527 let r = r
2528 .unchecked_get_gc_ref(store)
2529 .expect("still in scope")
2530 .unchecked_copy();
2531 Some(store.gc_store_mut()?.clone_gc_ref(&r))
2532 }
2533 };
2534
2535 Ok(root)
2536 }
2537
2538 #[cfg(not(feature = "gc"))]
2539 fn gc(&mut self, root: Option<VMGcRef>) -> Result<Option<VMGcRef>> {
2540 Ok(root)
2541 }
2542
2543 #[cfg(feature = "component-model")]
2544 fn component_calls(&mut self) -> &mut wasmtime_runtime::component::CallContexts {
2545 &mut self.component_calls
2546 }
2547}
2548
2549impl<T> StoreInner<T> {
2550 pub(crate) fn set_epoch_deadline(&mut self, delta: u64) {
2551 // Set a new deadline based on the "epoch deadline delta".
2552 //
2553 // Safety: this is safe because the epoch deadline in the
2554 // `VMRuntimeLimits` is accessed only here and by Wasm guest code
2555 // running in this store, and we have a `&mut self` here.
2556 //
2557 // Also, note that when this update is performed while Wasm is
2558 // on the stack, the Wasm will reload the new value once we
2559 // return into it.
2560 let epoch_deadline = unsafe { (*self.vmruntime_limits()).epoch_deadline.get_mut() };
2561 *epoch_deadline = self.engine().current_epoch() + delta;
2562 }
2563
2564 fn epoch_deadline_trap(&mut self) {
2565 self.epoch_deadline_behavior = None;
2566 }
2567
2568 fn epoch_deadline_callback(
2569 &mut self,
2570 callback: Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>,
2571 ) {
2572 self.epoch_deadline_behavior = Some(callback);
2573 }
2574
2575 fn epoch_deadline_async_yield_and_update(&mut self, delta: u64) {
2576 assert!(
2577 self.async_support(),
2578 "cannot use `epoch_deadline_async_yield_and_update` without enabling async support in the config"
2579 );
2580 #[cfg(feature = "async")]
2581 {
2582 self.epoch_deadline_behavior =
2583 Some(Box::new(move |_store| Ok(UpdateDeadline::Yield(delta))));
2584 }
2585 let _ = delta; // suppress warning in non-async build
2586 }
2587
2588 fn get_epoch_deadline(&self) -> u64 {
2589 // Safety: this is safe because, as above, it is only invoked
2590 // from within `new_epoch` which is called from guest Wasm
2591 // code, which will have an exclusive borrow on the Store.
2592 let epoch_deadline = unsafe { (*self.vmruntime_limits()).epoch_deadline.get_mut() };
2593 *epoch_deadline
2594 }
2595}
2596
2597impl<T: Default> Default for Store<T> {
2598 fn default() -> Store<T> {
2599 Store::new(&Engine::default(), T::default())
2600 }
2601}
2602
2603impl<T: fmt::Debug> fmt::Debug for Store<T> {
2604 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2605 let inner = &**self.inner as *const StoreInner<T>;
2606 f.debug_struct("Store")
2607 .field("inner", &inner)
2608 .field("data", &self.inner.data)
2609 .finish()
2610 }
2611}
2612
2613impl<T> Drop for Store<T> {
2614 fn drop(&mut self) {
2615 // for documentation on this `unsafe`, see `into_data`.
2616 unsafe {
2617 ManuallyDrop::drop(&mut self.inner.data);
2618 ManuallyDrop::drop(&mut self.inner);
2619 }
2620 }
2621}
2622
2623impl Drop for StoreOpaque {
2624 fn drop(&mut self) {
2625 // NB it's important that this destructor does not access `self.data`.
2626 // That is deallocated by `Drop for Store<T>` above.
2627
2628 unsafe {
2629 let allocator = self.engine.allocator();
2630 let ondemand = OnDemandInstanceAllocator::default();
2631 for instance in self.instances.iter_mut() {
2632 if let StoreInstanceKind::Dummy = instance.kind {
2633 ondemand.deallocate_module(&mut instance.handle);
2634 } else {
2635 allocator.deallocate_module(&mut instance.handle);
2636 }
2637 }
2638 ondemand.deallocate_module(&mut self.default_caller);
2639
2640 #[cfg(feature = "gc")]
2641 if let Some(gc_store) = self.gc_store.take() {
2642 allocator.deallocate_gc_heap(gc_store.allocation_index, gc_store.gc_heap);
2643 }
2644
2645 #[cfg(feature = "component-model")]
2646 {
2647 for _ in 0..self.num_component_instances {
2648 allocator.decrement_component_instance_count();
2649 }
2650 }
2651
2652 // See documentation for these fields on `StoreOpaque` for why they
2653 // must be dropped in this order.
2654 ManuallyDrop::drop(&mut self.store_data);
2655 ManuallyDrop::drop(&mut self.rooted_host_funcs);
2656 }
2657 }
2658}
2659
2660impl wasmtime_runtime::ModuleInfoLookup for ModuleRegistry {
2661 fn lookup(&self, pc: usize) -> Option<&dyn wasmtime_runtime::ModuleInfo> {
2662 self.lookup_module_info(pc)
2663 }
2664}
2665
2666struct Reset<T: Copy>(*mut T, T);
2667
2668impl<T: Copy> Drop for Reset<T> {
2669 fn drop(&mut self) {
2670 unsafe {
2671 *self.0 = self.1;
2672 }
2673 }
2674}
2675
2676#[cfg(test)]
2677mod tests {
2678 use super::{get_fuel, refuel, set_fuel};
2679 use std::num::NonZeroU64;
2680
2681 struct FuelTank {
2682 pub consumed_fuel: i64,
2683 pub reserve_fuel: u64,
2684 pub yield_interval: Option<NonZeroU64>,
2685 }
2686
2687 impl FuelTank {
2688 fn new() -> Self {
2689 FuelTank {
2690 consumed_fuel: 0,
2691 reserve_fuel: 0,
2692 yield_interval: None,
2693 }
2694 }
2695 fn get_fuel(&self) -> u64 {
2696 get_fuel(self.consumed_fuel, self.reserve_fuel)
2697 }
2698 fn refuel(&mut self) -> bool {
2699 refuel(
2700 &mut self.consumed_fuel,
2701 &mut self.reserve_fuel,
2702 self.yield_interval,
2703 )
2704 }
2705 fn set_fuel(&mut self, fuel: u64) {
2706 set_fuel(
2707 &mut self.consumed_fuel,
2708 &mut self.reserve_fuel,
2709 self.yield_interval,
2710 fuel,
2711 );
2712 }
2713 }
2714
2715 #[test]
2716 fn smoke() {
2717 let mut tank = FuelTank::new();
2718 tank.set_fuel(10);
2719 assert_eq!(tank.consumed_fuel, -10);
2720 assert_eq!(tank.reserve_fuel, 0);
2721
2722 tank.yield_interval = NonZeroU64::new(10);
2723 tank.set_fuel(25);
2724 assert_eq!(tank.consumed_fuel, -10);
2725 assert_eq!(tank.reserve_fuel, 15);
2726 }
2727
2728 #[test]
2729 fn does_not_lose_precision() {
2730 let mut tank = FuelTank::new();
2731 tank.set_fuel(u64::MAX);
2732 assert_eq!(tank.get_fuel(), u64::MAX);
2733
2734 tank.set_fuel(i64::MAX as u64);
2735 assert_eq!(tank.get_fuel(), i64::MAX as u64);
2736
2737 tank.set_fuel(i64::MAX as u64 + 1);
2738 assert_eq!(tank.get_fuel(), i64::MAX as u64 + 1);
2739 }
2740
2741 #[test]
2742 fn yielding_does_not_lose_precision() {
2743 let mut tank = FuelTank::new();
2744
2745 tank.yield_interval = NonZeroU64::new(10);
2746 tank.set_fuel(u64::MAX);
2747 assert_eq!(tank.get_fuel(), u64::MAX);
2748 assert_eq!(tank.consumed_fuel, -10);
2749 assert_eq!(tank.reserve_fuel, u64::MAX - 10);
2750
2751 tank.yield_interval = NonZeroU64::new(u64::MAX);
2752 tank.set_fuel(u64::MAX);
2753 assert_eq!(tank.get_fuel(), u64::MAX);
2754 assert_eq!(tank.consumed_fuel, -i64::MAX);
2755 assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
2756
2757 tank.yield_interval = NonZeroU64::new((i64::MAX as u64) + 1);
2758 tank.set_fuel(u64::MAX);
2759 assert_eq!(tank.get_fuel(), u64::MAX);
2760 assert_eq!(tank.consumed_fuel, -i64::MAX);
2761 assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
2762 }
2763
2764 #[test]
2765 fn refueling() {
2766 // It's possible to fuel to have consumed over the limit as some instructions can consume
2767 // multiple units of fuel at once. Refueling should be strict in it's consumption and not
2768 // add more fuel than there is.
2769 let mut tank = FuelTank::new();
2770
2771 tank.yield_interval = NonZeroU64::new(10);
2772 tank.reserve_fuel = 42;
2773 tank.consumed_fuel = 4;
2774 assert!(tank.refuel());
2775 assert_eq!(tank.reserve_fuel, 28);
2776 assert_eq!(tank.consumed_fuel, -10);
2777
2778 tank.yield_interval = NonZeroU64::new(1);
2779 tank.reserve_fuel = 8;
2780 tank.consumed_fuel = 4;
2781 assert_eq!(tank.get_fuel(), 4);
2782 assert!(tank.refuel());
2783 assert_eq!(tank.reserve_fuel, 3);
2784 assert_eq!(tank.consumed_fuel, -1);
2785 assert_eq!(tank.get_fuel(), 4);
2786
2787 tank.yield_interval = NonZeroU64::new(10);
2788 tank.reserve_fuel = 3;
2789 tank.consumed_fuel = 4;
2790 assert_eq!(tank.get_fuel(), 0);
2791 assert!(!tank.refuel());
2792 assert_eq!(tank.reserve_fuel, 3);
2793 assert_eq!(tank.consumed_fuel, 4);
2794 assert_eq!(tank.get_fuel(), 0);
2795 }
2796}