dusk_wasmtime/
config.rs

1use anyhow::{bail, ensure, Result};
2use serde_derive::{Deserialize, Serialize};
3use std::collections::{HashMap, HashSet};
4use std::fmt;
5#[cfg(any(feature = "cache", feature = "cranelift", feature = "winch"))]
6use std::path::Path;
7use std::str::FromStr;
8use std::sync::Arc;
9use target_lexicon::Architecture;
10use wasmparser::WasmFeatures;
11#[cfg(feature = "cache")]
12use wasmtime_cache::CacheConfig;
13use wasmtime_environ::Tunables;
14
15#[cfg(feature = "runtime")]
16use crate::memory::MemoryCreator;
17#[cfg(feature = "runtime")]
18use crate::profiling_agent::{self, ProfilingAgent};
19#[cfg(feature = "runtime")]
20use crate::trampoline::MemoryCreatorProxy;
21#[cfg(feature = "runtime")]
22use wasmtime_runtime::{
23    GcRuntime, InstanceAllocator, OnDemandInstanceAllocator, RuntimeMemoryCreator,
24};
25
26#[cfg(feature = "async")]
27use crate::stack::{StackCreator, StackCreatorProxy};
28#[cfg(feature = "async")]
29use wasmtime_fiber::RuntimeFiberStackCreator;
30
31#[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
32pub use wasmtime_environ::CacheStore;
33#[cfg(feature = "pooling-allocator")]
34use wasmtime_runtime::mpk;
35#[cfg(feature = "pooling-allocator")]
36pub use wasmtime_runtime::MpkEnabled;
37
38/// Represents the module instance allocation strategy to use.
39#[derive(Clone)]
40pub enum InstanceAllocationStrategy {
41    /// The on-demand instance allocation strategy.
42    ///
43    /// Resources related to a module instance are allocated at instantiation time and
44    /// immediately deallocated when the `Store` referencing the instance is dropped.
45    ///
46    /// This is the default allocation strategy for Wasmtime.
47    OnDemand,
48    /// The pooling instance allocation strategy.
49    ///
50    /// A pool of resources is created in advance and module instantiation reuses resources
51    /// from the pool. Resources are returned to the pool when the `Store` referencing the instance
52    /// is dropped.
53    #[cfg(feature = "pooling-allocator")]
54    Pooling(PoolingAllocationConfig),
55}
56
57impl InstanceAllocationStrategy {
58    /// The default pooling instance allocation strategy.
59    #[cfg(feature = "pooling-allocator")]
60    pub fn pooling() -> Self {
61        Self::Pooling(Default::default())
62    }
63}
64
65impl Default for InstanceAllocationStrategy {
66    fn default() -> Self {
67        Self::OnDemand
68    }
69}
70
71#[derive(Clone)]
72/// Configure the strategy used for versioning in serializing and deserializing [`crate::Module`].
73pub enum ModuleVersionStrategy {
74    /// Use the wasmtime crate's Cargo package version.
75    WasmtimeVersion,
76    /// Use a custom version string. Must be at most 255 bytes.
77    Custom(String),
78    /// Emit no version string in serialization, and accept all version strings in deserialization.
79    None,
80}
81
82impl Default for ModuleVersionStrategy {
83    fn default() -> Self {
84        ModuleVersionStrategy::WasmtimeVersion
85    }
86}
87
88impl std::hash::Hash for ModuleVersionStrategy {
89    fn hash<H: std::hash::Hasher>(&self, hasher: &mut H) {
90        match self {
91            Self::WasmtimeVersion => env!("CARGO_PKG_VERSION").hash(hasher),
92            Self::Custom(s) => s.hash(hasher),
93            Self::None => {}
94        };
95    }
96}
97
98/// Global configuration options used to create an [`Engine`](crate::Engine)
99/// and customize its behavior.
100///
101/// This structure exposed a builder-like interface and is primarily consumed by
102/// [`Engine::new()`](crate::Engine::new).
103///
104/// The validation of `Config` is deferred until the engine is being built, thus
105/// a problematic config may cause `Engine::new` to fail.
106#[derive(Clone)]
107pub struct Config {
108    #[cfg(any(feature = "cranelift", feature = "winch"))]
109    compiler_config: CompilerConfig,
110    profiling_strategy: ProfilingStrategy,
111    tunables: ConfigTunables,
112
113    #[cfg(feature = "cache")]
114    pub(crate) cache_config: CacheConfig,
115    #[cfg(feature = "runtime")]
116    pub(crate) mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>,
117    pub(crate) allocation_strategy: InstanceAllocationStrategy,
118    pub(crate) max_wasm_stack: usize,
119    pub(crate) features: WasmFeatures,
120    pub(crate) wasm_backtrace: bool,
121    pub(crate) wasm_backtrace_details_env_used: bool,
122    pub(crate) native_unwind_info: Option<bool>,
123    #[cfg(feature = "async")]
124    pub(crate) async_stack_size: usize,
125    #[cfg(feature = "async")]
126    pub(crate) stack_creator: Option<Arc<dyn RuntimeFiberStackCreator>>,
127    pub(crate) async_support: bool,
128    pub(crate) module_version: ModuleVersionStrategy,
129    pub(crate) parallel_compilation: bool,
130    pub(crate) memory_init_cow: bool,
131    pub(crate) memory_guaranteed_dense_image_size: u64,
132    pub(crate) force_memory_init_memfd: bool,
133    pub(crate) wmemcheck: bool,
134    pub(crate) coredump_on_trap: bool,
135    pub(crate) macos_use_mach_ports: bool,
136}
137
138#[derive(Default, Clone)]
139struct ConfigTunables {
140    static_memory_bound: Option<u64>,
141    static_memory_offset_guard_size: Option<u64>,
142    dynamic_memory_offset_guard_size: Option<u64>,
143    dynamic_memory_growth_reserve: Option<u64>,
144    generate_native_debuginfo: Option<bool>,
145    parse_wasm_debuginfo: Option<bool>,
146    consume_fuel: Option<bool>,
147    epoch_interruption: Option<bool>,
148    static_memory_bound_is_maximum: Option<bool>,
149    guard_before_linear_memory: Option<bool>,
150    generate_address_map: Option<bool>,
151    debug_adapter_modules: Option<bool>,
152    relaxed_simd_deterministic: Option<bool>,
153    tail_callable: Option<bool>,
154}
155
156/// User-provided configuration for the compiler.
157#[cfg(any(feature = "cranelift", feature = "winch"))]
158#[derive(Debug, Clone)]
159struct CompilerConfig {
160    strategy: Strategy,
161    target: Option<target_lexicon::Triple>,
162    settings: HashMap<String, String>,
163    flags: HashSet<String>,
164    #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
165    cache_store: Option<Arc<dyn CacheStore>>,
166    clif_dir: Option<std::path::PathBuf>,
167    wmemcheck: bool,
168}
169
170#[cfg(any(feature = "cranelift", feature = "winch"))]
171impl CompilerConfig {
172    fn new(strategy: Strategy) -> Self {
173        Self {
174            strategy,
175            target: None,
176            settings: HashMap::new(),
177            flags: HashSet::new(),
178            #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
179            cache_store: None,
180            clif_dir: None,
181            wmemcheck: false,
182        }
183    }
184
185    /// Ensures that the key is not set or equals to the given value.
186    /// If the key is not set, it will be set to the given value.
187    ///
188    /// # Returns
189    ///
190    /// Returns true if successfully set or already had the given setting
191    /// value, or false if the setting was explicitly set to something
192    /// else previously.
193    fn ensure_setting_unset_or_given(&mut self, k: &str, v: &str) -> bool {
194        if let Some(value) = self.settings.get(k) {
195            if value != v {
196                return false;
197            }
198        } else {
199            self.settings.insert(k.to_string(), v.to_string());
200        }
201        true
202    }
203}
204
205#[cfg(any(feature = "cranelift", feature = "winch"))]
206impl Default for CompilerConfig {
207    fn default() -> Self {
208        Self::new(Strategy::Auto)
209    }
210}
211
212impl Config {
213    /// Creates a new configuration object with the default configuration
214    /// specified.
215    pub fn new() -> Self {
216        let mut ret = Self {
217            tunables: ConfigTunables::default(),
218            #[cfg(any(feature = "cranelift", feature = "winch"))]
219            compiler_config: CompilerConfig::default(),
220            #[cfg(feature = "cache")]
221            cache_config: CacheConfig::new_cache_disabled(),
222            profiling_strategy: ProfilingStrategy::None,
223            #[cfg(feature = "runtime")]
224            mem_creator: None,
225            allocation_strategy: InstanceAllocationStrategy::OnDemand,
226            // 512k of stack -- note that this is chosen currently to not be too
227            // big, not be too small, and be a good default for most platforms.
228            // One platform of particular note is Windows where the stack size
229            // of the main thread seems to, by default, be smaller than that of
230            // Linux and macOS. This 512k value at least lets our current test
231            // suite pass on the main thread of Windows (using `--test-threads
232            // 1` forces this), or at least it passed when this change was
233            // committed.
234            max_wasm_stack: 512 * 1024,
235            wasm_backtrace: true,
236            wasm_backtrace_details_env_used: false,
237            native_unwind_info: None,
238            features: WasmFeatures::default(),
239            #[cfg(feature = "async")]
240            async_stack_size: 2 << 20,
241            #[cfg(feature = "async")]
242            stack_creator: None,
243            async_support: false,
244            module_version: ModuleVersionStrategy::default(),
245            parallel_compilation: !cfg!(miri),
246            memory_init_cow: true,
247            memory_guaranteed_dense_image_size: 16 << 20,
248            force_memory_init_memfd: false,
249            wmemcheck: false,
250            coredump_on_trap: false,
251            macos_use_mach_ports: !cfg!(miri),
252        };
253        #[cfg(any(feature = "cranelift", feature = "winch"))]
254        {
255            ret.cranelift_debug_verifier(false);
256            ret.cranelift_opt_level(OptLevel::Speed);
257        }
258
259        // Conditionally enabled features depending on compile-time crate
260        // features. Note that if these features are disabled then `Config` has
261        // no way of re-enabling them.
262        ret.features.reference_types = cfg!(feature = "gc");
263        ret.features.threads = cfg!(feature = "threads");
264        ret.features.component_model = cfg!(feature = "component-model");
265
266        // If GC is disabled at compile time also disable it in features
267        // forcibly irrespective of `wasmparser` defaults. Note that these also
268        // aren't yet fully implemented in Wasmtime.
269        if !cfg!(feature = "gc") {
270            ret.features.function_references = false;
271            ret.features.gc = false;
272        }
273
274        ret.wasm_multi_value(true);
275        ret.wasm_bulk_memory(true);
276        ret.wasm_simd(true);
277        ret.wasm_backtrace_details(WasmBacktraceDetails::Environment);
278
279        // This is on-by-default in `wasmparser` since it's a stage 4+ proposal
280        // but it's not implemented in Wasmtime yet so disable it.
281        ret.features.tail_call = false;
282
283        ret
284    }
285
286    /// Sets the target triple for the [`Config`].
287    ///
288    /// By default, the host target triple is used for the [`Config`].
289    ///
290    /// This method can be used to change the target triple.
291    ///
292    /// Cranelift flags will not be inferred for the given target and any
293    /// existing target-specific Cranelift flags will be cleared.
294    ///
295    /// # Errors
296    ///
297    /// This method will error if the given target triple is not supported.
298    #[cfg(any(feature = "cranelift", feature = "winch"))]
299    #[cfg_attr(docsrs, doc(cfg(any(feature = "cranelift", feature = "winch"))))]
300    pub fn target(&mut self, target: &str) -> Result<&mut Self> {
301        self.compiler_config.target =
302            Some(target_lexicon::Triple::from_str(target).map_err(|e| anyhow::anyhow!(e))?);
303
304        Ok(self)
305    }
306
307    /// Enables the incremental compilation cache in Cranelift, using the provided `CacheStore`
308    /// backend for storage.
309    #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
310    pub fn enable_incremental_compilation(
311        &mut self,
312        cache_store: Arc<dyn CacheStore>,
313    ) -> Result<&mut Self> {
314        self.compiler_config.cache_store = Some(cache_store);
315        Ok(self)
316    }
317
318    /// Whether or not to enable support for asynchronous functions in Wasmtime.
319    ///
320    /// When enabled, the config can optionally define host functions with `async`.
321    /// Instances created and functions called with this `Config` *must* be called
322    /// through their asynchronous APIs, however. For example using
323    /// [`Func::call`](crate::Func::call) will panic when used with this config.
324    ///
325    /// # Asynchronous Wasm
326    ///
327    /// WebAssembly does not currently have a way to specify at the bytecode
328    /// level what is and isn't async. Host-defined functions, however, may be
329    /// defined as `async`. WebAssembly imports always appear synchronous, which
330    /// gives rise to a bit of an impedance mismatch here. To solve this
331    /// Wasmtime supports "asynchronous configs" which enables calling these
332    /// asynchronous functions in a way that looks synchronous to the executing
333    /// WebAssembly code.
334    ///
335    /// An asynchronous config must always invoke wasm code asynchronously,
336    /// meaning we'll always represent its computation as a
337    /// [`Future`](std::future::Future). The `poll` method of the futures
338    /// returned by Wasmtime will perform the actual work of calling the
339    /// WebAssembly. Wasmtime won't manage its own thread pools or similar,
340    /// that's left up to the embedder.
341    ///
342    /// To implement futures in a way that WebAssembly sees asynchronous host
343    /// functions as synchronous, all async Wasmtime futures will execute on a
344    /// separately allocated native stack from the thread otherwise executing
345    /// Wasmtime. This separate native stack can then be switched to and from.
346    /// Using this whenever an `async` host function returns a future that
347    /// resolves to `Pending` we switch away from the temporary stack back to
348    /// the main stack and propagate the `Pending` status.
349    ///
350    /// In general it's encouraged that the integration with `async` and
351    /// wasmtime is designed early on in your embedding of Wasmtime to ensure
352    /// that it's planned that WebAssembly executes in the right context of your
353    /// application.
354    ///
355    /// # Execution in `poll`
356    ///
357    /// The [`Future::poll`](std::future::Future::poll) method is the main
358    /// driving force behind Rust's futures. That method's own documentation
359    /// states "an implementation of `poll` should strive to return quickly, and
360    /// should not block". This, however, can be at odds with executing
361    /// WebAssembly code as part of the `poll` method itself. If your
362    /// WebAssembly is untrusted then this could allow the `poll` method to take
363    /// arbitrarily long in the worst case, likely blocking all other
364    /// asynchronous tasks.
365    ///
366    /// To remedy this situation you have a a few possible ways to solve this:
367    ///
368    /// * The most efficient solution is to enable
369    ///   [`Config::epoch_interruption`] in conjunction with
370    ///   [`crate::Store::epoch_deadline_async_yield_and_update`]. Coupled with
371    ///   periodic calls to [`crate::Engine::increment_epoch`] this will cause
372    ///   executing WebAssembly to periodically yield back according to the
373    ///   epoch configuration settings. This enables `Future::poll` to take at
374    ///   most a certain amount of time according to epoch configuration
375    ///   settings and when increments happen. The benefit of this approach is
376    ///   that the instrumentation in compiled code is quite lightweight, but a
377    ///   downside can be that the scheduling is somewhat nondeterministic since
378    ///   increments are usually timer-based which are not always deterministic.
379    ///
380    ///   Note that to prevent infinite execution of wasm it's recommended to
381    ///   place a timeout on the entire future representing executing wasm code
382    ///   and the periodic yields with epochs should ensure that when the
383    ///   timeout is reached it's appropriately recognized.
384    ///
385    /// * Alternatively you can enable the
386    ///   [`Config::consume_fuel`](crate::Config::consume_fuel) method as well
387    ///   as [`crate::Store::fuel_async_yield_interval`] When doing so this will
388    ///   configure Wasmtime futures to yield periodically while they're
389    ///   executing WebAssembly code. After consuming the specified amount of
390    ///   fuel wasm futures will return `Poll::Pending` from their `poll`
391    ///   method, and will get automatically re-polled later. This enables the
392    ///   `Future::poll` method to take roughly a fixed amount of time since
393    ///   fuel is guaranteed to get consumed while wasm is executing. Unlike
394    ///   epoch-based preemption this is deterministic since wasm always
395    ///   consumes a fixed amount of fuel per-operation. The downside of this
396    ///   approach, however, is that the compiled code instrumentation is
397    ///   significantly more expensive than epoch checks.
398    ///
399    ///   Note that to prevent infinite execution of wasm it's recommended to
400    ///   place a timeout on the entire future representing executing wasm code
401    ///   and the periodic yields with epochs should ensure that when the
402    ///   timeout is reached it's appropriately recognized.
403    ///
404    /// In all cases special care needs to be taken when integrating
405    /// asynchronous wasm into your application. You should carefully plan where
406    /// WebAssembly will execute and what compute resources will be allotted to
407    /// it. If Wasmtime doesn't support exactly what you'd like just yet, please
408    /// feel free to open an issue!
409    #[cfg(feature = "async")]
410    #[cfg_attr(docsrs, doc(cfg(feature = "async")))]
411    pub fn async_support(&mut self, enable: bool) -> &mut Self {
412        self.async_support = enable;
413        self
414    }
415
416    /// Configures whether DWARF debug information will be emitted during
417    /// compilation.
418    ///
419    /// Note that the `debug-builtins` compile-time Cargo feature must also be
420    /// enabled for native debuggers such as GDB or LLDB to be able to debug
421    /// guest WebAssembly programs.
422    ///
423    /// By default this option is `false`.
424    pub fn debug_info(&mut self, enable: bool) -> &mut Self {
425        self.tunables.generate_native_debuginfo = Some(enable);
426        self
427    }
428
429    /// Configures whether [`WasmBacktrace`] will be present in the context of
430    /// errors returned from Wasmtime.
431    ///
432    /// A backtrace may be collected whenever an error is returned from a host
433    /// function call through to WebAssembly or when WebAssembly itself hits a
434    /// trap condition, such as an out-of-bounds memory access. This flag
435    /// indicates, in these conditions, whether the backtrace is collected or
436    /// not.
437    ///
438    /// Currently wasm backtraces are implemented through frame pointer walking.
439    /// This means that collecting a backtrace is expected to be a fast and
440    /// relatively cheap operation. Additionally backtrace collection is
441    /// suitable in concurrent environments since one thread capturing a
442    /// backtrace won't block other threads.
443    ///
444    /// Collected backtraces are attached via [`anyhow::Error::context`] to
445    /// errors returned from host functions. The [`WasmBacktrace`] type can be
446    /// acquired via [`anyhow::Error::downcast_ref`] to inspect the backtrace.
447    /// When this option is disabled then this context is never applied to
448    /// errors coming out of wasm.
449    ///
450    /// This option is `true` by default.
451    ///
452    /// [`WasmBacktrace`]: crate::WasmBacktrace
453    pub fn wasm_backtrace(&mut self, enable: bool) -> &mut Self {
454        self.wasm_backtrace = enable;
455        self
456    }
457
458    /// Configures whether backtraces in `Trap` will parse debug info in the wasm file to
459    /// have filename/line number information.
460    ///
461    /// When enabled this will causes modules to retain debugging information
462    /// found in wasm binaries. This debug information will be used when a trap
463    /// happens to symbolicate each stack frame and attempt to print a
464    /// filename/line number for each wasm frame in the stack trace.
465    ///
466    /// By default this option is `WasmBacktraceDetails::Environment`, meaning
467    /// that wasm will read `WASMTIME_BACKTRACE_DETAILS` to indicate whether details
468    /// should be parsed.
469    pub fn wasm_backtrace_details(&mut self, enable: WasmBacktraceDetails) -> &mut Self {
470        self.wasm_backtrace_details_env_used = false;
471        self.tunables.parse_wasm_debuginfo = match enable {
472            WasmBacktraceDetails::Enable => Some(true),
473            WasmBacktraceDetails::Disable => Some(false),
474            WasmBacktraceDetails::Environment => {
475                self.wasm_backtrace_details_env_used = true;
476                std::env::var("WASMTIME_BACKTRACE_DETAILS")
477                    .map(|s| Some(s == "1"))
478                    .unwrap_or(Some(false))
479            }
480        };
481        self
482    }
483
484    /// Configures whether to generate native unwind information
485    /// (e.g. `.eh_frame` on Linux).
486    ///
487    /// This configuration option only exists to help third-party stack
488    /// capturing mechanisms, such as the system's unwinder or the `backtrace`
489    /// crate, determine how to unwind through Wasm frames. It does not affect
490    /// whether Wasmtime can capture Wasm backtraces or not. The presence of
491    /// [`WasmBacktrace`] is controlled by the [`Config::wasm_backtrace`]
492    /// option.
493    ///
494    /// Native unwind information is included:
495    /// - When targeting Windows, since the Windows ABI requires it.
496    /// - By default.
497    ///
498    /// [`WasmBacktrace`]: crate::WasmBacktrace
499    pub fn native_unwind_info(&mut self, enable: bool) -> &mut Self {
500        self.native_unwind_info = Some(enable);
501        self
502    }
503
504    /// Configures whether execution of WebAssembly will "consume fuel" to
505    /// either halt or yield execution as desired.
506    ///
507    /// This can be used to deterministically prevent infinitely-executing
508    /// WebAssembly code by instrumenting generated code to consume fuel as it
509    /// executes. When fuel runs out a trap is raised, however [`Store`] can be
510    /// configured to yield execution periodically via
511    /// [`crate::Store::fuel_async_yield_interval`].
512    ///
513    /// Note that a [`Store`] starts with no fuel, so if you enable this option
514    /// you'll have to be sure to pour some fuel into [`Store`] before
515    /// executing some code.
516    ///
517    /// By default this option is `false`.
518    ///
519    /// [`Store`]: crate::Store
520    pub fn consume_fuel(&mut self, enable: bool) -> &mut Self {
521        self.tunables.consume_fuel = Some(enable);
522        self
523    }
524
525    /// Enables epoch-based interruption.
526    ///
527    /// When executing code in async mode, we sometimes want to
528    /// implement a form of cooperative timeslicing: long-running Wasm
529    /// guest code should periodically yield to the executor
530    /// loop. This yielding could be implemented by using "fuel" (see
531    /// [`consume_fuel`](Config::consume_fuel)). However, fuel
532    /// instrumentation is somewhat expensive: it modifies the
533    /// compiled form of the Wasm code so that it maintains a precise
534    /// instruction count, frequently checking this count against the
535    /// remaining fuel. If one does not need this precise count or
536    /// deterministic interruptions, and only needs a periodic
537    /// interrupt of some form, then It would be better to have a more
538    /// lightweight mechanism.
539    ///
540    /// Epoch-based interruption is that mechanism. There is a global
541    /// "epoch", which is a counter that divides time into arbitrary
542    /// periods (or epochs). This counter lives on the
543    /// [`Engine`](crate::Engine) and can be incremented by calling
544    /// [`Engine::increment_epoch`](crate::Engine::increment_epoch).
545    /// Epoch-based instrumentation works by setting a "deadline
546    /// epoch". The compiled code knows the deadline, and at certain
547    /// points, checks the current epoch against that deadline. It
548    /// will yield if the deadline has been reached.
549    ///
550    /// The idea is that checking an infrequently-changing counter is
551    /// cheaper than counting and frequently storing a precise metric
552    /// (instructions executed) locally. The interruptions are not
553    /// deterministic, but if the embedder increments the epoch in a
554    /// periodic way (say, every regular timer tick by a thread or
555    /// signal handler), then we can ensure that all async code will
556    /// yield to the executor within a bounded time.
557    ///
558    /// The deadline check cannot be avoided by malicious wasm code. It is safe
559    /// to use epoch deadlines to limit the execution time of untrusted
560    /// code.
561    ///
562    /// The [`Store`](crate::Store) tracks the deadline, and controls
563    /// what happens when the deadline is reached during
564    /// execution. Several behaviors are possible:
565    ///
566    /// - Trap if code is executing when the epoch deadline is
567    ///   met. See
568    ///   [`Store::epoch_deadline_trap`](crate::Store::epoch_deadline_trap).
569    ///
570    /// - Call an arbitrary function. This function may chose to trap or
571    ///   increment the epoch. See
572    ///   [`Store::epoch_deadline_callback`](crate::Store::epoch_deadline_callback).
573    ///
574    /// - Yield to the executor loop, then resume when the future is
575    ///   next polled. See
576    ///   [`Store::epoch_deadline_async_yield_and_update`](crate::Store::epoch_deadline_async_yield_and_update).
577    ///
578    /// Trapping is the default. The yielding behaviour may be used for
579    /// the timeslicing behavior described above.
580    ///
581    /// This feature is available with or without async support.
582    /// However, without async support, the timeslicing behaviour is
583    /// not available. This means epoch-based interruption can only
584    /// serve as a simple external-interruption mechanism.
585    ///
586    /// An initial deadline must be set before executing code by calling
587    /// [`Store::set_epoch_deadline`](crate::Store::set_epoch_deadline). If this
588    /// deadline is not configured then wasm will immediately trap.
589    ///
590    /// ## When to use fuel vs. epochs
591    ///
592    /// In general, epoch-based interruption results in faster
593    /// execution. This difference is sometimes significant: in some
594    /// measurements, up to 2-3x. This is because epoch-based
595    /// interruption does less work: it only watches for a global
596    /// rarely-changing counter to increment, rather than keeping a
597    /// local frequently-changing counter and comparing it to a
598    /// deadline.
599    ///
600    /// Fuel, in contrast, should be used when *deterministic*
601    /// yielding or trapping is needed. For example, if it is required
602    /// that the same function call with the same starting state will
603    /// always either complete or trap with an out-of-fuel error,
604    /// deterministically, then fuel with a fixed bound should be
605    /// used.
606    ///
607    /// # See Also
608    ///
609    /// - [`Engine::increment_epoch`](crate::Engine::increment_epoch)
610    /// - [`Store::set_epoch_deadline`](crate::Store::set_epoch_deadline)
611    /// - [`Store::epoch_deadline_trap`](crate::Store::epoch_deadline_trap)
612    /// - [`Store::epoch_deadline_callback`](crate::Store::epoch_deadline_callback)
613    /// - [`Store::epoch_deadline_async_yield_and_update`](crate::Store::epoch_deadline_async_yield_and_update)
614    pub fn epoch_interruption(&mut self, enable: bool) -> &mut Self {
615        self.tunables.epoch_interruption = Some(enable);
616        self
617    }
618
619    /// Configures the maximum amount of stack space available for
620    /// executing WebAssembly code.
621    ///
622    /// WebAssembly has well-defined semantics on stack overflow. This is
623    /// intended to be a knob which can help configure how much stack space
624    /// wasm execution is allowed to consume. Note that the number here is not
625    /// super-precise, but rather wasm will take at most "pretty close to this
626    /// much" stack space.
627    ///
628    /// If a wasm call (or series of nested wasm calls) take more stack space
629    /// than the `size` specified then a stack overflow trap will be raised.
630    ///
631    /// Caveat: this knob only limits the stack space consumed by wasm code.
632    /// More importantly, it does not ensure that this much stack space is
633    /// available on the calling thread stack. Exhausting the thread stack
634    /// typically leads to an **abort** of the process.
635    ///
636    /// Here are some examples of how that could happen:
637    ///
638    /// - Let's assume this option is set to 2 MiB and then a thread that has
639    ///   a stack with 512 KiB left.
640    ///
641    ///   If wasm code consumes more than 512 KiB then the process will be aborted.
642    ///
643    /// - Assuming the same conditions, but this time wasm code does not consume
644    ///   any stack but calls into a host function. The host function consumes
645    ///   more than 512 KiB of stack space. The process will be aborted.
646    ///
647    /// There's another gotcha related to recursive calling into wasm: the stack
648    /// space consumed by a host function is counted towards this limit. The
649    /// host functions are not prevented from consuming more than this limit.
650    /// However, if the host function that used more than this limit and called
651    /// back into wasm, then the execution will trap immediatelly because of
652    /// stack overflow.
653    ///
654    /// When the `async` feature is enabled, this value cannot exceed the
655    /// `async_stack_size` option. Be careful not to set this value too close
656    /// to `async_stack_size` as doing so may limit how much stack space
657    /// is available for host functions.
658    ///
659    /// By default this option is 512 KiB.
660    ///
661    /// # Errors
662    ///
663    /// The `Engine::new` method will fail if the `size` specified here is
664    /// either 0 or larger than the [`Config::async_stack_size`] configuration.
665    pub fn max_wasm_stack(&mut self, size: usize) -> &mut Self {
666        self.max_wasm_stack = size;
667        self
668    }
669
670    /// Configures the size of the stacks used for asynchronous execution.
671    ///
672    /// This setting configures the size of the stacks that are allocated for
673    /// asynchronous execution. The value cannot be less than `max_wasm_stack`.
674    ///
675    /// The amount of stack space guaranteed for host functions is
676    /// `async_stack_size - max_wasm_stack`, so take care not to set these two values
677    /// close to one another; doing so may cause host functions to overflow the
678    /// stack and abort the process.
679    ///
680    /// By default this option is 2 MiB.
681    ///
682    /// # Errors
683    ///
684    /// The `Engine::new` method will fail if the value for this option is
685    /// smaller than the [`Config::max_wasm_stack`] option.
686    #[cfg(feature = "async")]
687    #[cfg_attr(docsrs, doc(cfg(feature = "async")))]
688    pub fn async_stack_size(&mut self, size: usize) -> &mut Self {
689        self.async_stack_size = size;
690        self
691    }
692
693    /// Configures whether the WebAssembly tail calls proposal will be enabled
694    /// for compilation or not.
695    ///
696    /// The [WebAssembly tail calls proposal] introduces the `return_call` and
697    /// `return_call_indirect` instructions. These instructions allow for Wasm
698    /// programs to implement some recursive algorithms with *O(1)* stack space
699    /// usage.
700    ///
701    /// This feature is disabled by default.
702    ///
703    /// [WebAssembly tail calls proposal]: https://github.com/WebAssembly/tail-call
704    pub fn wasm_tail_call(&mut self, enable: bool) -> &mut Self {
705        self.features.tail_call = enable;
706        self.tunables.tail_callable = Some(enable);
707        self
708    }
709
710    /// Configures whether the WebAssembly [threads] proposal will be enabled
711    /// for compilation.
712    ///
713    /// This feature gates items such as shared memories and atomic
714    /// instructions. Note that the threads feature depends on the bulk memory
715    /// feature, which is enabled by default. Additionally note that while the
716    /// wasm feature is called "threads" it does not actually include the
717    /// ability to spawn threads. Spawning threads is part of the [wasi-threads]
718    /// proposal which is a separately gated feature in Wasmtime.
719    ///
720    /// Embeddings of Wasmtime are able to build their own custom threading
721    /// scheme on top of the core wasm threads proposal, however.
722    ///
723    /// This is `true` by default.
724    ///
725    /// [threads]: https://github.com/webassembly/threads
726    /// [wasi-threads]: https://github.com/webassembly/wasi-threads
727    #[cfg(feature = "threads")]
728    #[cfg_attr(docsrs, doc(cfg(feature = "threads")))]
729    pub fn wasm_threads(&mut self, enable: bool) -> &mut Self {
730        self.features.threads = enable;
731        self
732    }
733
734    /// Configures whether the [WebAssembly reference types proposal][proposal]
735    /// will be enabled for compilation.
736    ///
737    /// This feature gates items such as the `externref` and `funcref` types as
738    /// well as allowing a module to define multiple tables.
739    ///
740    /// Note that the reference types proposal depends on the bulk memory proposal.
741    ///
742    /// This feature is `true` by default.
743    ///
744    /// # Errors
745    ///
746    /// The validation of this feature are deferred until the engine is being built,
747    /// and thus may cause `Engine::new` fail if the `bulk_memory` feature is disabled.
748    ///
749    /// [proposal]: https://github.com/webassembly/reference-types
750    #[cfg(feature = "gc")]
751    #[cfg_attr(docsrs, doc(cfg(feature = "gc")))]
752    pub fn wasm_reference_types(&mut self, enable: bool) -> &mut Self {
753        self.features.reference_types = enable;
754        self
755    }
756
757    /// Configures whether the [WebAssembly function references
758    /// proposal][proposal] will be enabled for compilation.
759    ///
760    /// This feature gates non-nullable reference types, function reference
761    /// types, `call_ref`, `ref.func`, and non-nullable reference related
762    /// instructions.
763    ///
764    /// Note that the function references proposal depends on the reference
765    /// types proposal.
766    ///
767    /// This feature is `false` by default.
768    ///
769    /// [proposal]: https://github.com/WebAssembly/function-references
770    #[cfg(feature = "gc")]
771    #[cfg_attr(docsrs, doc(cfg(feature = "gc")))]
772    pub fn wasm_function_references(&mut self, enable: bool) -> &mut Self {
773        self.features.function_references = enable;
774        self
775    }
776
777    /// Configures whether the [WebAssembly Garbage Collection
778    /// proposal][proposal] will be enabled for compilation.
779    ///
780    /// This feature gates `struct` and `array` type definitions and references,
781    /// the `i31ref` type, and all related instructions.
782    ///
783    /// Note that the function references proposal depends on the typed function
784    /// references proposal.
785    ///
786    /// This feature is `false` by default.
787    ///
788    /// **Warning: Wasmtime's implementation of the GC proposal is still in
789    /// progress and generally not ready for primetime.**
790    ///
791    /// [proposal]: https://github.com/WebAssembly/gc
792    #[cfg(feature = "gc")]
793    #[cfg_attr(docsrs, doc(cfg(feature = "gc")))]
794    pub fn wasm_gc(&mut self, enable: bool) -> &mut Self {
795        self.features.gc = enable;
796        self
797    }
798
799    /// Configures whether the WebAssembly SIMD proposal will be
800    /// enabled for compilation.
801    ///
802    /// The [WebAssembly SIMD proposal][proposal]. This feature gates items such
803    /// as the `v128` type and all of its operators being in a module. Note that
804    /// this does not enable the [relaxed simd proposal].
805    ///
806    /// On x86_64 platforms note that enabling this feature requires SSE 4.2 and
807    /// below to be available on the target platform. Compilation will fail if
808    /// the compile target does not include SSE 4.2.
809    ///
810    /// This is `true` by default.
811    ///
812    /// [proposal]: https://github.com/webassembly/simd
813    /// [relaxed simd proposal]: https://github.com/WebAssembly/relaxed-simd
814    pub fn wasm_simd(&mut self, enable: bool) -> &mut Self {
815        self.features.simd = enable;
816        self
817    }
818
819    /// Configures whether the WebAssembly Relaxed SIMD proposal will be
820    /// enabled for compilation.
821    ///
822    /// The relaxed SIMD proposal adds new instructions to WebAssembly which,
823    /// for some specific inputs, are allowed to produce different results on
824    /// different hosts. More-or-less this proposal enables exposing
825    /// platform-specific semantics of SIMD instructions in a controlled
826    /// fashion to a WebAssembly program. From an embedder's perspective this
827    /// means that WebAssembly programs may execute differently depending on
828    /// whether the host is x86_64 or AArch64, for example.
829    ///
830    /// By default Wasmtime lowers relaxed SIMD instructions to the fastest
831    /// lowering for the platform it's running on. This means that, by default,
832    /// some relaxed SIMD instructions may have different results for the same
833    /// inputs across x86_64 and AArch64. This behavior can be disabled through
834    /// the [`Config::relaxed_simd_deterministic`] option which will force
835    /// deterministic behavior across all platforms, as classified by the
836    /// specification, at the cost of performance.
837    ///
838    /// This is `true` by default.
839    ///
840    /// [proposal]: https://github.com/webassembly/relaxed-simd
841    pub fn wasm_relaxed_simd(&mut self, enable: bool) -> &mut Self {
842        self.features.relaxed_simd = enable;
843        self
844    }
845
846    /// This option can be used to control the behavior of the [relaxed SIMD
847    /// proposal's][proposal] instructions.
848    ///
849    /// The relaxed SIMD proposal introduces instructions that are allowed to
850    /// have different behavior on different architectures, primarily to afford
851    /// an efficient implementation on all architectures. This means, however,
852    /// that the same module may execute differently on one host than another,
853    /// which typically is not otherwise the case. This option is provided to
854    /// force Wasmtime to generate deterministic code for all relaxed simd
855    /// instructions, at the cost of performance, for all architectures. When
856    /// this option is enabled then the deterministic behavior of all
857    /// instructions in the relaxed SIMD proposal is selected.
858    ///
859    /// This is `false` by default.
860    ///
861    /// [proposal]: https://github.com/webassembly/relaxed-simd
862    pub fn relaxed_simd_deterministic(&mut self, enable: bool) -> &mut Self {
863        self.tunables.relaxed_simd_deterministic = Some(enable);
864        self
865    }
866
867    /// Configures whether the [WebAssembly bulk memory operations
868    /// proposal][proposal] will be enabled for compilation.
869    ///
870    /// This feature gates items such as the `memory.copy` instruction, passive
871    /// data/table segments, etc, being in a module.
872    ///
873    /// This is `true` by default.
874    ///
875    /// Feature `reference_types`, which is also `true` by default, requires
876    /// this feature to be enabled. Thus disabling this feature must also disable
877    /// `reference_types` as well using [`wasm_reference_types`](crate::Config::wasm_reference_types).
878    ///
879    /// # Errors
880    ///
881    /// Disabling this feature without disabling `reference_types` will cause
882    /// `Engine::new` to fail.
883    ///
884    /// [proposal]: https://github.com/webassembly/bulk-memory-operations
885    pub fn wasm_bulk_memory(&mut self, enable: bool) -> &mut Self {
886        self.features.bulk_memory = enable;
887        self
888    }
889
890    /// Configures whether the WebAssembly multi-value [proposal] will
891    /// be enabled for compilation.
892    ///
893    /// This feature gates functions and blocks returning multiple values in a
894    /// module, for example.
895    ///
896    /// This is `true` by default.
897    ///
898    /// [proposal]: https://github.com/webassembly/multi-value
899    pub fn wasm_multi_value(&mut self, enable: bool) -> &mut Self {
900        self.features.multi_value = enable;
901        self
902    }
903
904    /// Configures whether the WebAssembly multi-memory [proposal] will
905    /// be enabled for compilation.
906    ///
907    /// This feature gates modules having more than one linear memory
908    /// declaration or import.
909    ///
910    /// This is `true` by default.
911    ///
912    /// [proposal]: https://github.com/webassembly/multi-memory
913    pub fn wasm_multi_memory(&mut self, enable: bool) -> &mut Self {
914        self.features.multi_memory = enable;
915        self
916    }
917
918    /// Configures whether the WebAssembly memory64 [proposal] will
919    /// be enabled for compilation.
920    ///
921    /// Note that this the upstream specification is not finalized and Wasmtime
922    /// may also have bugs for this feature since it hasn't been exercised
923    /// much.
924    ///
925    /// This is `false` by default.
926    ///
927    /// [proposal]: https://github.com/webassembly/memory64
928    pub fn wasm_memory64(&mut self, enable: bool) -> &mut Self {
929        self.features.memory64 = enable;
930        self
931    }
932
933    /// Configures whether the WebAssembly component-model [proposal] will
934    /// be enabled for compilation.
935    ///
936    /// Note that this feature is a work-in-progress and is incomplete.
937    ///
938    /// This is `false` by default.
939    ///
940    /// [proposal]: https://github.com/webassembly/component-model
941    #[cfg(feature = "component-model")]
942    pub fn wasm_component_model(&mut self, enable: bool) -> &mut Self {
943        self.features.component_model = enable;
944        self
945    }
946
947    /// Configures which compilation strategy will be used for wasm modules.
948    ///
949    /// This method can be used to configure which compiler is used for wasm
950    /// modules, and for more documentation consult the [`Strategy`] enumeration
951    /// and its documentation.
952    ///
953    /// The default value for this is `Strategy::Auto`.
954    #[cfg(any(feature = "cranelift", feature = "winch"))]
955    #[cfg_attr(docsrs, doc(cfg(any(feature = "cranelift", feature = "winch"))))]
956    pub fn strategy(&mut self, strategy: Strategy) -> &mut Self {
957        self.compiler_config.strategy = strategy;
958        self
959    }
960
961    /// Creates a default profiler based on the profiling strategy chosen.
962    ///
963    /// Profiler creation calls the type's default initializer where the purpose is
964    /// really just to put in place the type used for profiling.
965    ///
966    /// Some [`ProfilingStrategy`] require specific platforms or particular feature
967    /// to be enabled, such as `ProfilingStrategy::JitDump` requires the `jitdump`
968    /// feature.
969    ///
970    /// # Errors
971    ///
972    /// The validation of this field is deferred until the engine is being built, and thus may
973    /// cause `Engine::new` fail if the required feature is disabled, or the platform is not
974    /// supported.
975    pub fn profiler(&mut self, profile: ProfilingStrategy) -> &mut Self {
976        self.profiling_strategy = profile;
977        self
978    }
979
980    /// Configures whether the debug verifier of Cranelift is enabled or not.
981    ///
982    /// When Cranelift is used as a code generation backend this will configure
983    /// it to have the `enable_verifier` flag which will enable a number of debug
984    /// checks inside of Cranelift. This is largely only useful for the
985    /// developers of wasmtime itself.
986    ///
987    /// The default value for this is `false`
988    #[cfg(any(feature = "cranelift", feature = "winch"))]
989    #[cfg_attr(docsrs, doc(cfg(any(feature = "cranelift", feature = "winch"))))]
990    pub fn cranelift_debug_verifier(&mut self, enable: bool) -> &mut Self {
991        let val = if enable { "true" } else { "false" };
992        self.compiler_config
993            .settings
994            .insert("enable_verifier".to_string(), val.to_string());
995        self
996    }
997
998    /// Configures the Cranelift code generator optimization level.
999    ///
1000    /// When the Cranelift code generator is used you can configure the
1001    /// optimization level used for generated code in a few various ways. For
1002    /// more information see the documentation of [`OptLevel`].
1003    ///
1004    /// The default value for this is `OptLevel::None`.
1005    #[cfg(any(feature = "cranelift", feature = "winch"))]
1006    #[cfg_attr(docsrs, doc(cfg(any(feature = "cranelift", feature = "winch"))))]
1007    pub fn cranelift_opt_level(&mut self, level: OptLevel) -> &mut Self {
1008        let val = match level {
1009            OptLevel::None => "none",
1010            OptLevel::Speed => "speed",
1011            OptLevel::SpeedAndSize => "speed_and_size",
1012        };
1013        self.compiler_config
1014            .settings
1015            .insert("opt_level".to_string(), val.to_string());
1016        self
1017    }
1018
1019    /// Configures whether Cranelift should perform a NaN-canonicalization pass.
1020    ///
1021    /// When Cranelift is used as a code generation backend this will configure
1022    /// it to replace NaNs with a single canonical value. This is useful for
1023    /// users requiring entirely deterministic WebAssembly computation.  This is
1024    /// not required by the WebAssembly spec, so it is not enabled by default.
1025    ///
1026    /// Note that this option affects not only WebAssembly's `f32` and `f64`
1027    /// types but additionally the `v128` type. This option will cause
1028    /// operations using any of these types to have extra checks placed after
1029    /// them to normalize NaN values as needed.
1030    ///
1031    /// The default value for this is `false`
1032    #[cfg(any(feature = "cranelift", feature = "winch"))]
1033    #[cfg_attr(docsrs, doc(cfg(any(feature = "cranelift", feature = "winch"))))]
1034    pub fn cranelift_nan_canonicalization(&mut self, enable: bool) -> &mut Self {
1035        let val = if enable { "true" } else { "false" };
1036        self.compiler_config
1037            .settings
1038            .insert("enable_nan_canonicalization".to_string(), val.to_string());
1039        self
1040    }
1041
1042    /// Controls whether proof-carrying code (PCC) is used to validate
1043    /// lowering of Wasm sandbox checks.
1044    ///
1045    /// Proof-carrying code carries "facts" about program values from
1046    /// the IR all the way to machine code, and checks those facts
1047    /// against known machine-instruction semantics. This guards
1048    /// against bugs in instruction lowering that might create holes
1049    /// in the Wasm sandbox.
1050    ///
1051    /// PCC is designed to be fast: it does not require complex
1052    /// solvers or logic engines to verify, but only a linear pass
1053    /// over a trail of "breadcrumbs" or facts at each intermediate
1054    /// value. Thus, it is appropriate to enable in production.
1055    #[cfg(any(feature = "cranelift", feature = "winch"))]
1056    #[cfg_attr(docsrs, doc(cfg(any(feature = "cranelift", feature = "winch"))))]
1057    pub fn cranelift_pcc(&mut self, enable: bool) -> &mut Self {
1058        let val = if enable { "true" } else { "false" };
1059        self.compiler_config
1060            .settings
1061            .insert("enable_pcc".to_string(), val.to_string());
1062        self
1063    }
1064
1065    /// Allows setting a Cranelift boolean flag or preset. This allows
1066    /// fine-tuning of Cranelift settings.
1067    ///
1068    /// Since Cranelift flags may be unstable, this method should not be considered to be stable
1069    /// either; other `Config` functions should be preferred for stability.
1070    ///
1071    /// # Safety
1072    ///
1073    /// This is marked as unsafe, because setting the wrong flag might break invariants,
1074    /// resulting in execution hazards.
1075    ///
1076    /// # Errors
1077    ///
1078    /// The validation of the flags are deferred until the engine is being built, and thus may
1079    /// cause `Engine::new` fail if the flag's name does not exist, or the value is not appropriate
1080    /// for the flag type.
1081    #[cfg(any(feature = "cranelift", feature = "winch"))]
1082    #[cfg_attr(docsrs, doc(cfg(any(feature = "cranelift", feature = "winch"))))]
1083    pub unsafe fn cranelift_flag_enable(&mut self, flag: &str) -> &mut Self {
1084        self.compiler_config.flags.insert(flag.to_string());
1085        self
1086    }
1087
1088    /// Allows settings another Cranelift flag defined by a flag name and value. This allows
1089    /// fine-tuning of Cranelift settings.
1090    ///
1091    /// Since Cranelift flags may be unstable, this method should not be considered to be stable
1092    /// either; other `Config` functions should be preferred for stability.
1093    ///
1094    /// # Safety
1095    ///
1096    /// This is marked as unsafe, because setting the wrong flag might break invariants,
1097    /// resulting in execution hazards.
1098    ///
1099    /// # Errors
1100    ///
1101    /// The validation of the flags are deferred until the engine is being built, and thus may
1102    /// cause `Engine::new` fail if the flag's name does not exist, or incompatible with other
1103    /// settings.
1104    ///
1105    /// For example, feature `wasm_backtrace` will set `unwind_info` to `true`, but if it's
1106    /// manually set to false then it will fail.
1107    #[cfg(any(feature = "cranelift", feature = "winch"))]
1108    #[cfg_attr(docsrs, doc(cfg(any(feature = "cranelift", feature = "winch"))))]
1109    pub unsafe fn cranelift_flag_set(&mut self, name: &str, value: &str) -> &mut Self {
1110        self.compiler_config
1111            .settings
1112            .insert(name.to_string(), value.to_string());
1113        self
1114    }
1115
1116    /// Loads cache configuration specified at `path`.
1117    ///
1118    /// This method will read the file specified by `path` on the filesystem and
1119    /// attempt to load cache configuration from it. This method can also fail
1120    /// due to I/O errors, misconfiguration, syntax errors, etc. For expected
1121    /// syntax in the configuration file see the [documentation online][docs].
1122    ///
1123    /// By default cache configuration is not enabled or loaded.
1124    ///
1125    /// This method is only available when the `cache` feature of this crate is
1126    /// enabled.
1127    ///
1128    /// # Errors
1129    ///
1130    /// This method can fail due to any error that happens when loading the file
1131    /// pointed to by `path` and attempting to load the cache configuration.
1132    ///
1133    /// [docs]: https://bytecodealliance.github.io/wasmtime/cli-cache.html
1134    #[cfg(feature = "cache")]
1135    #[cfg_attr(docsrs, doc(cfg(feature = "cache")))]
1136    pub fn cache_config_load(&mut self, path: impl AsRef<Path>) -> Result<&mut Self> {
1137        self.cache_config = CacheConfig::from_file(Some(path.as_ref()))?;
1138        Ok(self)
1139    }
1140
1141    /// Disable caching.
1142    ///
1143    /// Every call to [`Module::new(my_wasm)`][crate::Module::new] will
1144    /// recompile `my_wasm`, even when it is unchanged.
1145    ///
1146    /// By default, new configs do not have caching enabled. This method is only
1147    /// useful for disabling a previous cache configuration.
1148    ///
1149    /// This method is only available when the `cache` feature of this crate is
1150    /// enabled.
1151    #[cfg(feature = "cache")]
1152    #[cfg_attr(docsrs, doc(cfg(feature = "cache")))]
1153    pub fn disable_cache(&mut self) -> &mut Self {
1154        self.cache_config = CacheConfig::new_cache_disabled();
1155        self
1156    }
1157
1158    /// Loads cache configuration from the system default path.
1159    ///
1160    /// This commit is the same as [`Config::cache_config_load`] except that it
1161    /// does not take a path argument and instead loads the default
1162    /// configuration present on the system. This is located, for example, on
1163    /// Unix at `$HOME/.config/wasmtime/config.toml` and is typically created
1164    /// with the `wasmtime config new` command.
1165    ///
1166    /// By default cache configuration is not enabled or loaded.
1167    ///
1168    /// This method is only available when the `cache` feature of this crate is
1169    /// enabled.
1170    ///
1171    /// # Errors
1172    ///
1173    /// This method can fail due to any error that happens when loading the
1174    /// default system configuration. Note that it is not an error if the
1175    /// default config file does not exist, in which case the default settings
1176    /// for an enabled cache are applied.
1177    ///
1178    /// [docs]: https://bytecodealliance.github.io/wasmtime/cli-cache.html
1179    #[cfg(feature = "cache")]
1180    #[cfg_attr(docsrs, doc(cfg(feature = "cache")))]
1181    pub fn cache_config_load_default(&mut self) -> Result<&mut Self> {
1182        self.cache_config = CacheConfig::from_file(None)?;
1183        Ok(self)
1184    }
1185
1186    /// Sets a custom memory creator.
1187    ///
1188    /// Custom memory creators are used when creating host `Memory` objects or when
1189    /// creating instance linear memories for the on-demand instance allocation strategy.
1190    #[cfg(feature = "runtime")]
1191    pub fn with_host_memory(&mut self, mem_creator: Arc<dyn MemoryCreator>) -> &mut Self {
1192        self.mem_creator = Some(Arc::new(MemoryCreatorProxy(mem_creator)));
1193        self
1194    }
1195
1196    /// Sets a custom stack creator.
1197    ///
1198    /// Custom memory creators are used when creating creating async instance stacks for
1199    /// the on-demand instance allocation strategy.
1200    #[cfg(feature = "async")]
1201    #[cfg_attr(docsrs, doc(cfg(feature = "async")))]
1202    pub fn with_host_stack(&mut self, stack_creator: Arc<dyn StackCreator>) -> &mut Self {
1203        self.stack_creator = Some(Arc::new(StackCreatorProxy(stack_creator)));
1204        self
1205    }
1206
1207    /// Sets the instance allocation strategy to use.
1208    ///
1209    /// When using the pooling instance allocation strategy, all linear memories
1210    /// will be created as "static" and the
1211    /// [`Config::static_memory_maximum_size`] and
1212    /// [`Config::static_memory_guard_size`] options will be used to configure
1213    /// the virtual memory allocations of linear memories.
1214    pub fn allocation_strategy(&mut self, strategy: InstanceAllocationStrategy) -> &mut Self {
1215        self.allocation_strategy = strategy;
1216        self
1217    }
1218
1219    /// Configures the maximum size, in bytes, where a linear memory is
1220    /// considered static, above which it'll be considered dynamic.
1221    ///
1222    /// > Note: this value has important performance ramifications, be sure to
1223    /// > understand what this value does before tweaking it and benchmarking.
1224    ///
1225    /// This function configures the threshold for wasm memories whether they're
1226    /// implemented as a dynamically relocatable chunk of memory or a statically
1227    /// located chunk of memory. The `max_size` parameter here is the size, in
1228    /// bytes, where if the maximum size of a linear memory is below `max_size`
1229    /// then it will be statically allocated with enough space to never have to
1230    /// move. If the maximum size of a linear memory is larger than `max_size`
1231    /// then wasm memory will be dynamically located and may move in memory
1232    /// through growth operations.
1233    ///
1234    /// Specifying a `max_size` of 0 means that all memories will be dynamic and
1235    /// may be relocated through `memory.grow`. Also note that if any wasm
1236    /// memory's maximum size is below `max_size` then it will still reserve
1237    /// `max_size` bytes in the virtual memory space.
1238    ///
1239    /// ## Static vs Dynamic Memory
1240    ///
1241    /// Linear memories represent contiguous arrays of bytes, but they can also
1242    /// be grown through the API and wasm instructions. When memory is grown if
1243    /// space hasn't been preallocated then growth may involve relocating the
1244    /// base pointer in memory. Memories in Wasmtime are classified in two
1245    /// different ways:
1246    ///
1247    /// * **static** - these memories preallocate all space necessary they'll
1248    ///   ever need, meaning that the base pointer of these memories is never
1249    ///   moved. Static memories may take more virtual memory space because of
1250    ///   pre-reserving space for memories.
1251    ///
1252    /// * **dynamic** - these memories are not preallocated and may move during
1253    ///   growth operations. Dynamic memories consume less virtual memory space
1254    ///   because they don't need to preallocate space for future growth.
1255    ///
1256    /// Static memories can be optimized better in JIT code because once the
1257    /// base address is loaded in a function it's known that we never need to
1258    /// reload it because it never changes, `memory.grow` is generally a pretty
1259    /// fast operation because the wasm memory is never relocated, and under
1260    /// some conditions bounds checks can be elided on memory accesses.
1261    ///
1262    /// Dynamic memories can't be quite as heavily optimized because the base
1263    /// address may need to be reloaded more often, they may require relocating
1264    /// lots of data on `memory.grow`, and dynamic memories require
1265    /// unconditional bounds checks on all memory accesses.
1266    ///
1267    /// ## Should you use static or dynamic memory?
1268    ///
1269    /// In general you probably don't need to change the value of this property.
1270    /// The defaults here are optimized for each target platform to consume a
1271    /// reasonable amount of physical memory while also generating speedy
1272    /// machine code.
1273    ///
1274    /// One of the main reasons you may want to configure this today is if your
1275    /// environment can't reserve virtual memory space for each wasm linear
1276    /// memory. On 64-bit platforms wasm memories require a 6GB reservation by
1277    /// default, and system limits may prevent this in some scenarios. In this
1278    /// case you may wish to force memories to be allocated dynamically meaning
1279    /// that the virtual memory footprint of creating a wasm memory should be
1280    /// exactly what's used by the wasm itself.
1281    ///
1282    /// For 32-bit memories a static memory must contain at least 4GB of
1283    /// reserved address space plus a guard page to elide any bounds checks at
1284    /// all. Smaller static memories will use similar bounds checks as dynamic
1285    /// memories.
1286    ///
1287    /// ## Default
1288    ///
1289    /// The default value for this property depends on the host platform. For
1290    /// 64-bit platforms there's lots of address space available, so the default
1291    /// configured here is 4GB. WebAssembly linear memories currently max out at
1292    /// 4GB which means that on 64-bit platforms Wasmtime by default always uses
1293    /// a static memory. This, coupled with a sufficiently sized guard region,
1294    /// should produce the fastest JIT code on 64-bit platforms, but does
1295    /// require a large address space reservation for each wasm memory.
1296    ///
1297    /// For 32-bit platforms this value defaults to 1GB. This means that wasm
1298    /// memories whose maximum size is less than 1GB will be allocated
1299    /// statically, otherwise they'll be considered dynamic.
1300    ///
1301    /// ## Static Memory and Pooled Instance Allocation
1302    ///
1303    /// When using the pooling instance allocator memories are considered to
1304    /// always be static memories, they are never dynamic. This setting
1305    /// configures the size of linear memory to reserve for each memory in the
1306    /// pooling allocator.
1307    ///
1308    /// Note that the pooling allocator can reduce the amount of memory needed
1309    /// for pooling allocation by using memory protection; see
1310    /// `PoolingAllocatorConfig::memory_protection_keys` for details.
1311    pub fn static_memory_maximum_size(&mut self, max_size: u64) -> &mut Self {
1312        let max_pages = max_size / u64::from(wasmtime_environ::WASM_PAGE_SIZE);
1313        self.tunables.static_memory_bound = Some(max_pages);
1314        self
1315    }
1316
1317    /// Indicates that the "static" style of memory should always be used.
1318    ///
1319    /// This configuration option enables selecting the "static" option for all
1320    /// linear memories created within this `Config`. This means that all
1321    /// memories will be allocated up-front and will never move. Additionally
1322    /// this means that all memories are synthetically limited by the
1323    /// [`Config::static_memory_maximum_size`] option, regardless of what the
1324    /// actual maximum size is on the memory's original type.
1325    ///
1326    /// For the difference between static and dynamic memories, see the
1327    /// [`Config::static_memory_maximum_size`].
1328    pub fn static_memory_forced(&mut self, force: bool) -> &mut Self {
1329        self.tunables.static_memory_bound_is_maximum = Some(force);
1330        self
1331    }
1332
1333    /// Configures the size, in bytes, of the guard region used at the end of a
1334    /// static memory's address space reservation.
1335    ///
1336    /// > Note: this value has important performance ramifications, be sure to
1337    /// > understand what this value does before tweaking it and benchmarking.
1338    ///
1339    /// All WebAssembly loads/stores are bounds-checked and generate a trap if
1340    /// they're out-of-bounds. Loads and stores are often very performance
1341    /// critical, so we want the bounds check to be as fast as possible!
1342    /// Accelerating these memory accesses is the motivation for a guard after a
1343    /// memory allocation.
1344    ///
1345    /// Memories (both static and dynamic) can be configured with a guard at the
1346    /// end of them which consists of unmapped virtual memory. This unmapped
1347    /// memory will trigger a memory access violation (e.g. segfault) if
1348    /// accessed. This allows JIT code to elide bounds checks if it can prove
1349    /// that an access, if out of bounds, would hit the guard region. This means
1350    /// that having such a guard of unmapped memory can remove the need for
1351    /// bounds checks in JIT code.
1352    ///
1353    /// For the difference between static and dynamic memories, see the
1354    /// [`Config::static_memory_maximum_size`].
1355    ///
1356    /// ## How big should the guard be?
1357    ///
1358    /// In general, like with configuring `static_memory_maximum_size`, you
1359    /// probably don't want to change this value from the defaults. Otherwise,
1360    /// though, the size of the guard region affects the number of bounds checks
1361    /// needed for generated wasm code. More specifically, loads/stores with
1362    /// immediate offsets will generate bounds checks based on how big the guard
1363    /// page is.
1364    ///
1365    /// For 32-bit wasm memories a 4GB static memory is required to even start
1366    /// removing bounds checks. A 4GB guard size will guarantee that the module
1367    /// has zero bounds checks for memory accesses. A 2GB guard size will
1368    /// eliminate all bounds checks with an immediate offset less than 2GB. A
1369    /// guard size of zero means that all memory accesses will still have bounds
1370    /// checks.
1371    ///
1372    /// ## Default
1373    ///
1374    /// The default value for this property is 2GB on 64-bit platforms. This
1375    /// allows eliminating almost all bounds checks on loads/stores with an
1376    /// immediate offset of less than 2GB. On 32-bit platforms this defaults to
1377    /// 64KB.
1378    ///
1379    /// ## Errors
1380    ///
1381    /// The `Engine::new` method will return an error if this option is smaller
1382    /// than the value configured for [`Config::dynamic_memory_guard_size`].
1383    pub fn static_memory_guard_size(&mut self, guard_size: u64) -> &mut Self {
1384        let guard_size = round_up_to_pages(guard_size);
1385        self.tunables.static_memory_offset_guard_size = Some(guard_size);
1386        self
1387    }
1388
1389    /// Configures the size, in bytes, of the guard region used at the end of a
1390    /// dynamic memory's address space reservation.
1391    ///
1392    /// For the difference between static and dynamic memories, see the
1393    /// [`Config::static_memory_maximum_size`]
1394    ///
1395    /// For more information about what a guard is, see the documentation on
1396    /// [`Config::static_memory_guard_size`].
1397    ///
1398    /// Note that the size of the guard region for dynamic memories is not super
1399    /// critical for performance. Making it reasonably-sized can improve
1400    /// generated code slightly, but for maximum performance you'll want to lean
1401    /// towards static memories rather than dynamic anyway.
1402    ///
1403    /// Also note that the dynamic memory guard size must be smaller than the
1404    /// static memory guard size, so if a large dynamic memory guard is
1405    /// specified then the static memory guard size will also be automatically
1406    /// increased.
1407    ///
1408    /// ## Default
1409    ///
1410    /// This value defaults to 64KB.
1411    ///
1412    /// ## Errors
1413    ///
1414    /// The `Engine::new` method will return an error if this option is larger
1415    /// than the value configured for [`Config::static_memory_guard_size`].
1416    pub fn dynamic_memory_guard_size(&mut self, guard_size: u64) -> &mut Self {
1417        let guard_size = round_up_to_pages(guard_size);
1418        self.tunables.dynamic_memory_offset_guard_size = Some(guard_size);
1419        self
1420    }
1421
1422    /// Configures the size, in bytes, of the extra virtual memory space
1423    /// reserved after a "dynamic" memory for growing into.
1424    ///
1425    /// For the difference between static and dynamic memories, see the
1426    /// [`Config::static_memory_maximum_size`]
1427    ///
1428    /// Dynamic memories can be relocated in the process's virtual address space
1429    /// on growth and do not always reserve their entire space up-front. This
1430    /// means that a growth of the memory may require movement in the address
1431    /// space, which in the worst case can copy a large number of bytes from one
1432    /// region to another.
1433    ///
1434    /// This setting configures how many bytes are reserved after the initial
1435    /// reservation for a dynamic memory for growing into. A value of 0 here
1436    /// means that no extra bytes are reserved and all calls to `memory.grow`
1437    /// will need to relocate the wasm linear memory (copying all the bytes). A
1438    /// value of 1 megabyte, however, means that `memory.grow` can allocate up
1439    /// to a megabyte of extra memory before the memory needs to be moved in
1440    /// linear memory.
1441    ///
1442    /// Note that this is a currently simple heuristic for optimizing the growth
1443    /// of dynamic memories, primarily implemented for the memory64 proposal
1444    /// where all memories are currently "dynamic". This is unlikely to be a
1445    /// one-size-fits-all style approach and if you're an embedder running into
1446    /// issues with dynamic memories and growth and are interested in having
1447    /// other growth strategies available here please feel free to [open an
1448    /// issue on the Wasmtime repository][issue]!
1449    ///
1450    /// [issue]: https://github.com/bytecodealliance/wasmtime/issues/ne
1451    ///
1452    /// ## Default
1453    ///
1454    /// For 64-bit platforms this defaults to 2GB, and for 32-bit platforms this
1455    /// defaults to 1MB.
1456    pub fn dynamic_memory_reserved_for_growth(&mut self, reserved: u64) -> &mut Self {
1457        self.tunables.dynamic_memory_growth_reserve = Some(round_up_to_pages(reserved));
1458        self
1459    }
1460
1461    /// Indicates whether a guard region is present before allocations of
1462    /// linear memory.
1463    ///
1464    /// Guard regions before linear memories are never used during normal
1465    /// operation of WebAssembly modules, even if they have out-of-bounds
1466    /// loads. The only purpose for a preceding guard region in linear memory
1467    /// is extra protection against possible bugs in code generators like
1468    /// Cranelift. This setting does not affect performance in any way, but will
1469    /// result in larger virtual memory reservations for linear memories (it
1470    /// won't actually ever use more memory, just use more of the address
1471    /// space).
1472    ///
1473    /// The size of the guard region before linear memory is the same as the
1474    /// guard size that comes after linear memory, which is configured by
1475    /// [`Config::static_memory_guard_size`] and
1476    /// [`Config::dynamic_memory_guard_size`].
1477    ///
1478    /// ## Default
1479    ///
1480    /// This value defaults to `true`.
1481    pub fn guard_before_linear_memory(&mut self, guard: bool) -> &mut Self {
1482        self.tunables.guard_before_linear_memory = Some(guard);
1483        self
1484    }
1485
1486    /// Configure the version information used in serialized and deserialzied [`crate::Module`]s.
1487    /// This effects the behavior of [`crate::Module::serialize()`], as well as
1488    /// [`crate::Module::deserialize()`] and related functions.
1489    ///
1490    /// The default strategy is to use the wasmtime crate's Cargo package version.
1491    pub fn module_version(&mut self, strategy: ModuleVersionStrategy) -> Result<&mut Self> {
1492        match strategy {
1493            // This case requires special precondition for assertion in SerializedModule::to_bytes
1494            ModuleVersionStrategy::Custom(ref v) => {
1495                if v.as_bytes().len() > 255 {
1496                    bail!("custom module version cannot be more than 255 bytes: {}", v);
1497                }
1498            }
1499            _ => {}
1500        }
1501        self.module_version = strategy;
1502        Ok(self)
1503    }
1504
1505    /// Configure wether wasmtime should compile a module using multiple
1506    /// threads.
1507    ///
1508    /// Disabling this will result in a single thread being used to compile
1509    /// the wasm bytecode.
1510    ///
1511    /// By default parallel compilation is enabled.
1512    #[cfg(feature = "parallel-compilation")]
1513    #[cfg_attr(docsrs, doc(cfg(feature = "parallel-compilation")))]
1514    pub fn parallel_compilation(&mut self, parallel: bool) -> &mut Self {
1515        self.parallel_compilation = parallel;
1516        self
1517    }
1518
1519    /// Configures whether compiled artifacts will contain information to map
1520    /// native program addresses back to the original wasm module.
1521    ///
1522    /// This configuration option is `true` by default and, if enabled,
1523    /// generates the appropriate tables in compiled modules to map from native
1524    /// address back to wasm source addresses. This is used for displaying wasm
1525    /// program counters in backtraces as well as generating filenames/line
1526    /// numbers if so configured as well (and the original wasm module has DWARF
1527    /// debugging information present).
1528    pub fn generate_address_map(&mut self, generate: bool) -> &mut Self {
1529        self.tunables.generate_address_map = Some(generate);
1530        self
1531    }
1532
1533    /// Configures whether copy-on-write memory-mapped data is used to
1534    /// initialize a linear memory.
1535    ///
1536    /// Initializing linear memory via a copy-on-write mapping can drastically
1537    /// improve instantiation costs of a WebAssembly module because copying
1538    /// memory is deferred. Additionally if a page of memory is only ever read
1539    /// from WebAssembly and never written too then the same underlying page of
1540    /// data will be reused between all instantiations of a module meaning that
1541    /// if a module is instantiated many times this can lower the overall memory
1542    /// required needed to run that module.
1543    ///
1544    /// The main disadvantage of copy-on-write initialization, however, is that
1545    /// it may be possible for highly-parallel scenarios to be less scalable. If
1546    /// a page is read initially by a WebAssembly module then that page will be
1547    /// mapped to a read-only copy shared between all WebAssembly instances. If
1548    /// the same page is then written, however, then a private copy is created
1549    /// and swapped out from the read-only version. This also requires an [IPI],
1550    /// however, which can be a significant bottleneck in high-parallelism
1551    /// situations.
1552    ///
1553    /// This feature is only applicable when a WebAssembly module meets specific
1554    /// criteria to be initialized in this fashion, such as:
1555    ///
1556    /// * Only memories defined in the module can be initialized this way.
1557    /// * Data segments for memory must use statically known offsets.
1558    /// * Data segments for memory must all be in-bounds.
1559    ///
1560    /// Modules which do not meet these criteria will fall back to
1561    /// initialization of linear memory based on copying memory.
1562    ///
1563    /// This feature of Wasmtime is also platform-specific:
1564    ///
1565    /// * Linux - this feature is supported for all instances of [`Module`].
1566    ///   Modules backed by an existing mmap (such as those created by
1567    ///   [`Module::deserialize_file`]) will reuse that mmap to cow-initialize
1568    ///   memory. Other instance of [`Module`] may use the `memfd_create`
1569    ///   syscall to create an initialization image to `mmap`.
1570    /// * Unix (not Linux) - this feature is only supported when loading modules
1571    ///   from a precompiled file via [`Module::deserialize_file`] where there
1572    ///   is a file descriptor to use to map data into the process. Note that
1573    ///   the module must have been compiled with this setting enabled as well.
1574    /// * Windows - there is no support for this feature at this time. Memory
1575    ///   initialization will always copy bytes.
1576    ///
1577    /// By default this option is enabled.
1578    ///
1579    /// [`Module::deserialize_file`]: crate::Module::deserialize_file
1580    /// [`Module`]: crate::Module
1581    /// [IPI]: https://en.wikipedia.org/wiki/Inter-processor_interrupt
1582    pub fn memory_init_cow(&mut self, enable: bool) -> &mut Self {
1583        self.memory_init_cow = enable;
1584        self
1585    }
1586
1587    /// A configuration option to force the usage of `memfd_create` on Linux to
1588    /// be used as the backing source for a module's initial memory image.
1589    ///
1590    /// When [`Config::memory_init_cow`] is enabled, which is enabled by
1591    /// default, module memory initialization images are taken from a module's
1592    /// original mmap if possible. If a precompiled module was loaded from disk
1593    /// this means that the disk's file is used as an mmap source for the
1594    /// initial linear memory contents. This option can be used to force, on
1595    /// Linux, that instead of using the original file on disk a new in-memory
1596    /// file is created with `memfd_create` to hold the contents of the initial
1597    /// image.
1598    ///
1599    /// This option can be used to avoid possibly loading the contents of memory
1600    /// from disk through a page fault. Instead with `memfd_create` the contents
1601    /// of memory are always in RAM, meaning that even page faults which
1602    /// initially populate a wasm linear memory will only work with RAM instead
1603    /// of ever hitting the disk that the original precompiled module is stored
1604    /// on.
1605    ///
1606    /// This option is disabled by default.
1607    pub fn force_memory_init_memfd(&mut self, enable: bool) -> &mut Self {
1608        self.force_memory_init_memfd = enable;
1609        self
1610    }
1611
1612    /// Configures whether or not a coredump should be generated and attached to
1613    /// the anyhow::Error when a trap is raised.
1614    ///
1615    /// This option is disabled by default.
1616    #[cfg(feature = "coredump")]
1617    #[cfg_attr(docsrs, doc(cfg(feature = "coredump")))]
1618    pub fn coredump_on_trap(&mut self, enable: bool) -> &mut Self {
1619        self.coredump_on_trap = enable;
1620        self
1621    }
1622
1623    /// Enables memory error checking for wasm programs.
1624    ///
1625    /// This option is disabled by default.
1626    #[cfg(any(feature = "cranelift", feature = "winch"))]
1627    pub fn wmemcheck(&mut self, enable: bool) -> &mut Self {
1628        self.wmemcheck = enable;
1629        self.compiler_config.wmemcheck = enable;
1630        self
1631    }
1632
1633    /// Configures the "guaranteed dense image size" for copy-on-write
1634    /// initialized memories.
1635    ///
1636    /// When using the [`Config::memory_init_cow`] feature to initialize memory
1637    /// efficiently (which is enabled by default), compiled modules contain an
1638    /// image of the module's initial heap. If the module has a fairly sparse
1639    /// initial heap, with just a few data segments at very different offsets,
1640    /// this could result in a large region of zero bytes in the image. In
1641    /// other words, it's not very memory-efficient.
1642    ///
1643    /// We normally use a heuristic to avoid this: if less than half
1644    /// of the initialized range (first non-zero to last non-zero
1645    /// byte) of any memory in the module has pages with nonzero
1646    /// bytes, then we avoid creating a memory image for the entire module.
1647    ///
1648    /// However, if the embedder always needs the instantiation-time efficiency
1649    /// of copy-on-write initialization, and is otherwise carefully controlling
1650    /// parameters of the modules (for example, by limiting the maximum heap
1651    /// size of the modules), then it may be desirable to ensure a memory image
1652    /// is created even if this could go against the heuristic above. Thus, we
1653    /// add another condition: there is a size of initialized data region up to
1654    /// which we *always* allow a memory image. The embedder can set this to a
1655    /// known maximum heap size if they desire to always get the benefits of
1656    /// copy-on-write images.
1657    ///
1658    /// In the future we may implement a "best of both worlds"
1659    /// solution where we have a dense image up to some limit, and
1660    /// then support a sparse list of initializers beyond that; this
1661    /// would get most of the benefit of copy-on-write and pay the incremental
1662    /// cost of eager initialization only for those bits of memory
1663    /// that are out-of-bounds. However, for now, an embedder desiring
1664    /// fast instantiation should ensure that this setting is as large
1665    /// as the maximum module initial memory content size.
1666    ///
1667    /// By default this value is 16 MiB.
1668    pub fn memory_guaranteed_dense_image_size(&mut self, size_in_bytes: u64) -> &mut Self {
1669        self.memory_guaranteed_dense_image_size = size_in_bytes;
1670        self
1671    }
1672
1673    pub(crate) fn validate(&self) -> Result<Tunables> {
1674        if self.features.reference_types && !self.features.bulk_memory {
1675            bail!("feature 'reference_types' requires 'bulk_memory' to be enabled");
1676        }
1677        if self.features.threads && !self.features.bulk_memory {
1678            bail!("feature 'threads' requires 'bulk_memory' to be enabled");
1679        }
1680        if self.features.function_references && !self.features.reference_types {
1681            bail!("feature 'function_references' requires 'reference_types' to be enabled");
1682        }
1683        if self.features.gc && !self.features.function_references {
1684            bail!("feature 'gc' requires 'function_references' to be enabled");
1685        }
1686        #[cfg(feature = "async")]
1687        if self.async_support && self.max_wasm_stack > self.async_stack_size {
1688            bail!("max_wasm_stack size cannot exceed the async_stack_size");
1689        }
1690        if self.max_wasm_stack == 0 {
1691            bail!("max_wasm_stack size cannot be zero");
1692        }
1693        #[cfg(not(feature = "wmemcheck"))]
1694        if self.wmemcheck {
1695            bail!("wmemcheck (memory checker) was requested but is not enabled in this build");
1696        }
1697
1698        #[cfg(not(any(feature = "cranelift", feature = "winch")))]
1699        let mut tunables = Tunables::default_host();
1700        #[cfg(any(feature = "cranelift", feature = "winch"))]
1701        let mut tunables = match &self.compiler_config.target.as_ref() {
1702            Some(target) => Tunables::default_for_target(target)?,
1703            None => Tunables::default_host(),
1704        };
1705
1706        macro_rules! set_fields {
1707            ($($field:ident)*) => (
1708                let ConfigTunables {
1709                    $($field,)*
1710                } = &self.tunables;
1711
1712                $(
1713                    if let Some(e) = $field {
1714                        tunables.$field = *e;
1715                    }
1716                )*
1717            )
1718        }
1719
1720        set_fields! {
1721            static_memory_bound
1722            static_memory_offset_guard_size
1723            dynamic_memory_offset_guard_size
1724            dynamic_memory_growth_reserve
1725            generate_native_debuginfo
1726            parse_wasm_debuginfo
1727            consume_fuel
1728            epoch_interruption
1729            static_memory_bound_is_maximum
1730            guard_before_linear_memory
1731            generate_address_map
1732            debug_adapter_modules
1733            relaxed_simd_deterministic
1734            tail_callable
1735        }
1736
1737        // If we're going to compile with winch, we must use the winch calling convention.
1738        #[cfg(any(feature = "cranelift", feature = "winch"))]
1739        {
1740            tunables.winch_callable = match self.compiler_config.strategy {
1741                Strategy::Auto => !cfg!(feature = "cranelift") && cfg!(feature = "winch"),
1742                Strategy::Cranelift => false,
1743                Strategy::Winch => true,
1744            };
1745        }
1746
1747        if tunables.static_memory_offset_guard_size < tunables.dynamic_memory_offset_guard_size {
1748            bail!("static memory guard size cannot be smaller than dynamic memory guard size");
1749        }
1750        Ok(tunables)
1751    }
1752
1753    #[cfg(feature = "runtime")]
1754    pub(crate) fn build_allocator(
1755        &self,
1756        tunables: &Tunables,
1757    ) -> Result<Box<dyn InstanceAllocator + Send + Sync>> {
1758        #[cfg(feature = "async")]
1759        let stack_size = self.async_stack_size;
1760
1761        #[cfg(not(feature = "async"))]
1762        let stack_size = 0;
1763
1764        let _ = tunables;
1765
1766        match &self.allocation_strategy {
1767            InstanceAllocationStrategy::OnDemand => {
1768                #[allow(unused_mut)]
1769                let mut allocator = Box::new(OnDemandInstanceAllocator::new(
1770                    self.mem_creator.clone(),
1771                    stack_size,
1772                ));
1773                #[cfg(feature = "async")]
1774                if let Some(stack_creator) = &self.stack_creator {
1775                    allocator.set_stack_creator(stack_creator.clone());
1776                }
1777                Ok(allocator)
1778            }
1779            #[cfg(feature = "pooling-allocator")]
1780            InstanceAllocationStrategy::Pooling(config) => {
1781                let mut config = config.config;
1782                config.stack_size = stack_size;
1783                Ok(Box::new(wasmtime_runtime::PoolingInstanceAllocator::new(
1784                    &config, tunables,
1785                )?))
1786            }
1787        }
1788    }
1789
1790    #[cfg(feature = "runtime")]
1791    pub(crate) fn build_gc_runtime(&self) -> Result<Arc<dyn GcRuntime>> {
1792        Ok(Arc::new(wasmtime_runtime::default_gc_runtime()) as Arc<dyn GcRuntime>)
1793    }
1794
1795    #[cfg(feature = "runtime")]
1796    pub(crate) fn build_profiler(&self) -> Result<Box<dyn ProfilingAgent>> {
1797        Ok(match self.profiling_strategy {
1798            ProfilingStrategy::PerfMap => profiling_agent::new_perfmap()?,
1799            ProfilingStrategy::JitDump => profiling_agent::new_jitdump()?,
1800            ProfilingStrategy::VTune => profiling_agent::new_vtune()?,
1801            ProfilingStrategy::None => profiling_agent::new_null(),
1802        })
1803    }
1804
1805    #[cfg(any(feature = "cranelift", feature = "winch"))]
1806    pub(crate) fn build_compiler(
1807        mut self,
1808        tunables: &Tunables,
1809    ) -> Result<(Self, Box<dyn wasmtime_environ::Compiler>)> {
1810        let target = self.compiler_config.target.clone();
1811
1812        let mut compiler = match self.compiler_config.strategy {
1813            #[cfg(feature = "cranelift")]
1814            Strategy::Auto => wasmtime_cranelift::builder(target)?,
1815            #[cfg(all(feature = "winch", not(feature = "cranelift")))]
1816            Strategy::Auto => wasmtime_winch::builder(target)?,
1817            #[cfg(feature = "cranelift")]
1818            Strategy::Cranelift => wasmtime_cranelift::builder(target)?,
1819            #[cfg(not(feature = "cranelift"))]
1820            Strategy::Cranelift => bail!("cranelift support not compiled in"),
1821            #[cfg(feature = "winch")]
1822            Strategy::Winch => wasmtime_winch::builder(target)?,
1823            #[cfg(not(feature = "winch"))]
1824            Strategy::Winch => bail!("winch support not compiled in"),
1825        };
1826
1827        if let Some(path) = &self.compiler_config.clif_dir {
1828            compiler.clif_dir(path)?;
1829        }
1830
1831        // If probestack is enabled for a target, Wasmtime will always use the
1832        // inline strategy which doesn't require us to define a `__probestack`
1833        // function or similar.
1834        self.compiler_config
1835            .settings
1836            .insert("probestack_strategy".into(), "inline".into());
1837
1838        let host = target_lexicon::Triple::host();
1839        let target = self
1840            .compiler_config
1841            .target
1842            .as_ref()
1843            .unwrap_or(&host)
1844            .clone();
1845
1846        // On supported targets, we enable stack probing by default.
1847        // This is required on Windows because of the way Windows
1848        // commits its stacks, but it's also a good idea on other
1849        // platforms to ensure guard pages are hit for large frame
1850        // sizes.
1851        if probestack_supported(target.architecture) {
1852            self.compiler_config
1853                .flags
1854                .insert("enable_probestack".into());
1855        }
1856
1857        if self.features.tail_call {
1858            ensure!(
1859                target.architecture != Architecture::S390x,
1860                "Tail calls are not supported on s390x yet: \
1861                 https://github.com/bytecodealliance/wasmtime/issues/6530"
1862            );
1863        }
1864
1865        if let Some(unwind_requested) = self.native_unwind_info {
1866            if !self
1867                .compiler_config
1868                .ensure_setting_unset_or_given("unwind_info", &unwind_requested.to_string())
1869            {
1870                bail!("incompatible settings requested for Cranelift and Wasmtime `unwind-info` settings");
1871            }
1872        }
1873
1874        if target.operating_system == target_lexicon::OperatingSystem::Windows {
1875            if !self
1876                .compiler_config
1877                .ensure_setting_unset_or_given("unwind_info", "true")
1878            {
1879                bail!("`native_unwind_info` cannot be disabled on Windows");
1880            }
1881        }
1882
1883        // We require frame pointers for correct stack walking, which is safety
1884        // critical in the presence of reference types, and otherwise it is just
1885        // really bad developer experience to get wrong.
1886        self.compiler_config
1887            .settings
1888            .insert("preserve_frame_pointers".into(), "true".into());
1889
1890        // check for incompatible compiler options and set required values
1891        if self.features.reference_types {
1892            if !self
1893                .compiler_config
1894                .ensure_setting_unset_or_given("enable_safepoints", "true")
1895            {
1896                bail!("compiler option 'enable_safepoints' must be enabled when 'reference types' is enabled");
1897            }
1898        }
1899
1900        if self.features.relaxed_simd && !self.features.simd {
1901            bail!("cannot disable the simd proposal but enable the relaxed simd proposal");
1902        }
1903
1904        // Apply compiler settings and flags
1905        for (k, v) in self.compiler_config.settings.iter() {
1906            compiler.set(k, v)?;
1907        }
1908        for flag in self.compiler_config.flags.iter() {
1909            compiler.enable(flag)?;
1910        }
1911
1912        #[cfg(feature = "incremental-cache")]
1913        if let Some(cache_store) = &self.compiler_config.cache_store {
1914            compiler.enable_incremental_compilation(cache_store.clone())?;
1915        }
1916
1917        compiler.set_tunables(tunables.clone())?;
1918        compiler.wmemcheck(self.compiler_config.wmemcheck);
1919
1920        Ok((self, compiler.build()?))
1921    }
1922
1923    /// Internal setting for whether adapter modules for components will have
1924    /// extra WebAssembly instructions inserted performing more debug checks
1925    /// then are necessary.
1926    #[cfg(feature = "component-model")]
1927    pub fn debug_adapter_modules(&mut self, debug: bool) -> &mut Self {
1928        self.tunables.debug_adapter_modules = Some(debug);
1929        self
1930    }
1931
1932    /// Enables clif output when compiling a WebAssembly module.
1933    #[cfg(any(feature = "cranelift", feature = "winch"))]
1934    pub fn emit_clif(&mut self, path: &Path) -> &mut Self {
1935        self.compiler_config.clif_dir = Some(path.to_path_buf());
1936        self
1937    }
1938
1939    /// Configures whether, when on macOS, Mach ports are used for exception
1940    /// handling instead of traditional Unix-based signal handling.
1941    ///
1942    /// WebAssembly traps in Wasmtime are implemented with native faults, for
1943    /// example a `SIGSEGV` will occur when a WebAssembly guest accesses
1944    /// out-of-bounds memory. Handling this can be configured to either use Unix
1945    /// signals or Mach ports on macOS. By default Mach ports are used.
1946    ///
1947    /// Mach ports enable Wasmtime to work by default with foreign
1948    /// error-handling systems such as breakpad which also use Mach ports to
1949    /// handle signals. In this situation Wasmtime will continue to handle guest
1950    /// faults gracefully while any non-guest faults will get forwarded to
1951    /// process-level handlers such as breakpad. Some more background on this
1952    /// can be found in #2456.
1953    ///
1954    /// A downside of using mach ports, however, is that they don't interact
1955    /// well with `fork()`. Forking a Wasmtime process on macOS will produce a
1956    /// child process that cannot successfully run WebAssembly. In this
1957    /// situation traditional Unix signal handling should be used as that's
1958    /// inherited and works across forks.
1959    ///
1960    /// If your embedding wants to use a custom error handler which leverages
1961    /// Mach ports and you additionally wish to `fork()` the process and use
1962    /// Wasmtime in the child process that's not currently possible. Please
1963    /// reach out to us if you're in this bucket!
1964    ///
1965    /// This option defaults to `true`, using Mach ports by default.
1966    pub fn macos_use_mach_ports(&mut self, mach_ports: bool) -> &mut Self {
1967        self.macos_use_mach_ports = mach_ports;
1968        self
1969    }
1970}
1971
1972/// If building without the runtime feature we can't determine the page size of
1973/// the platform where the execution will happen so just keep the original
1974/// values.
1975#[cfg(not(feature = "runtime"))]
1976fn round_up_to_pages(val: u64) -> u64 {
1977    val
1978}
1979
1980#[cfg(feature = "runtime")]
1981fn round_up_to_pages(val: u64) -> u64 {
1982    let page_size = wasmtime_runtime::page_size() as u64;
1983    debug_assert!(page_size.is_power_of_two());
1984    val.checked_add(page_size - 1)
1985        .map(|val| val & !(page_size - 1))
1986        .unwrap_or(u64::MAX / page_size + 1)
1987}
1988
1989impl Default for Config {
1990    fn default() -> Config {
1991        Config::new()
1992    }
1993}
1994
1995impl fmt::Debug for Config {
1996    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1997        let mut f = f.debug_struct("Config");
1998        f.field("debug_info", &self.tunables.generate_native_debuginfo)
1999            .field("wasm_threads", &self.features.threads)
2000            .field("wasm_reference_types", &self.features.reference_types)
2001            .field(
2002                "wasm_function_references",
2003                &self.features.function_references,
2004            )
2005            .field("wasm_gc", &self.features.gc)
2006            .field("wasm_bulk_memory", &self.features.bulk_memory)
2007            .field("wasm_simd", &self.features.simd)
2008            .field("wasm_relaxed_simd", &self.features.relaxed_simd)
2009            .field("wasm_multi_value", &self.features.multi_value)
2010            .field("parallel_compilation", &self.parallel_compilation);
2011        #[cfg(any(feature = "cranelift", feature = "winch"))]
2012        {
2013            f.field("compiler_config", &self.compiler_config);
2014        }
2015
2016        if let Some(enable) = self.tunables.parse_wasm_debuginfo {
2017            f.field("parse_wasm_debuginfo", &enable);
2018        }
2019        if let Some(size) = self.tunables.static_memory_bound {
2020            f.field(
2021                "static_memory_maximum_size",
2022                &(u64::from(size) * u64::from(wasmtime_environ::WASM_PAGE_SIZE)),
2023            );
2024        }
2025        if let Some(size) = self.tunables.static_memory_offset_guard_size {
2026            f.field("static_memory_guard_size", &size);
2027        }
2028        if let Some(size) = self.tunables.dynamic_memory_offset_guard_size {
2029            f.field("dynamic_memory_guard_size", &size);
2030        }
2031        if let Some(enable) = self.tunables.guard_before_linear_memory {
2032            f.field("guard_before_linear_memory", &enable);
2033        }
2034        f.finish()
2035    }
2036}
2037
2038/// Possible Compilation strategies for a wasm module.
2039///
2040/// This is used as an argument to the [`Config::strategy`] method.
2041#[non_exhaustive]
2042#[derive(PartialEq, Eq, Clone, Debug, Copy)]
2043pub enum Strategy {
2044    /// An indicator that the compilation strategy should be automatically
2045    /// selected.
2046    ///
2047    /// This is generally what you want for most projects and indicates that the
2048    /// `wasmtime` crate itself should make the decision about what the best
2049    /// code generator for a wasm module is.
2050    ///
2051    /// Currently this always defaults to Cranelift, but the default value may
2052    /// change over time.
2053    Auto,
2054
2055    /// Currently the default backend, Cranelift aims to be a reasonably fast
2056    /// code generator which generates high quality machine code.
2057    Cranelift,
2058
2059    /// A baseline compiler for WebAssembly, currently under active development and not ready for
2060    /// production applications.
2061    Winch,
2062}
2063
2064/// Possible optimization levels for the Cranelift codegen backend.
2065#[non_exhaustive]
2066#[derive(Copy, Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
2067pub enum OptLevel {
2068    /// No optimizations performed, minimizes compilation time by disabling most
2069    /// optimizations.
2070    None,
2071    /// Generates the fastest possible code, but may take longer.
2072    Speed,
2073    /// Similar to `speed`, but also performs transformations aimed at reducing
2074    /// code size.
2075    SpeedAndSize,
2076}
2077
2078/// Select which profiling technique to support.
2079#[derive(Debug, Clone, Copy, PartialEq)]
2080pub enum ProfilingStrategy {
2081    /// No profiler support.
2082    None,
2083
2084    /// Collect function name information as the "perf map" file format, used with `perf` on Linux.
2085    PerfMap,
2086
2087    /// Collect profiling info for "jitdump" file format, used with `perf` on
2088    /// Linux.
2089    JitDump,
2090
2091    /// Collect profiling info using the "ittapi", used with `VTune` on Linux.
2092    VTune,
2093}
2094
2095/// Select how wasm backtrace detailed information is handled.
2096#[derive(Debug, Clone, Copy)]
2097pub enum WasmBacktraceDetails {
2098    /// Support is unconditionally enabled and wasmtime will parse and read
2099    /// debug information.
2100    Enable,
2101
2102    /// Support is disabled, and wasmtime will not parse debug information for
2103    /// backtrace details.
2104    Disable,
2105
2106    /// Support for backtrace details is conditional on the
2107    /// `WASMTIME_BACKTRACE_DETAILS` environment variable.
2108    Environment,
2109}
2110
2111/// Configuration options used with [`InstanceAllocationStrategy::Pooling`] to
2112/// change the behavior of the pooling instance allocator.
2113///
2114/// This structure has a builder-style API in the same manner as [`Config`] and
2115/// is configured with [`Config::allocation_strategy`].
2116///
2117/// Note that usage of the pooling allocator does not affect compiled
2118/// WebAssembly code. Compiled `*.cwasm` files, for example, are usable both
2119/// with and without the pooling allocator.
2120///
2121/// ## Advantages of Pooled Allocation
2122///
2123/// The main benefit of the pooling allocator is to make WebAssembly
2124/// instantiation both faster and more scalable in terms of parallelism.
2125/// Allocation is faster because virtual memory is already configured and ready
2126/// to go within the pool, there's no need to [`mmap`] (for example on Unix) a
2127/// new region and configure it with guard pages. By avoiding [`mmap`] this
2128/// avoids whole-process virtual memory locks which can improve scalability and
2129/// performance through avoiding this.
2130///
2131/// Additionally with pooled allocation it's possible to create "affine slots"
2132/// to a particular WebAssembly module or component over time. For example if
2133/// the same module is multiple times over time the pooling allocator will, by
2134/// default, attempt to reuse the same slot. This mean that the slot has been
2135/// pre-configured and can retain virtual memory mappings for a copy-on-write
2136/// image, for example (see [`Config::memory_init_cow`] for more information.
2137/// This means that in a steady state instance deallocation is a single
2138/// [`madvise`] to reset linear memory to its original contents followed by a
2139/// single (optional) [`mprotect`] during the next instantiation to shrink
2140/// memory back to its original size. Compared to non-pooled allocation this
2141/// avoids the need to [`mmap`] a new region of memory, [`munmap`] it, and
2142/// [`mprotect`] regions too.
2143///
2144/// Another benefit of pooled allocation is that it's possible to configure
2145/// things such that no virtual memory management is required at all in a steady
2146/// state. For example a pooling allocator can be configured with
2147/// [`Config::memory_init_cow`] disabledd, dynamic bounds checks enabled
2148/// through
2149/// [`Config::static_memory_maximum_size(0)`](Config::static_memory_maximum_size),
2150/// and sufficient space through
2151/// [`PoolingAllocationConfig::table_keep_resident`] /
2152/// [`PoolingAllocationConfig::linear_memory_keep_resident`]. With all these
2153/// options in place no virtual memory tricks are used at all and everything is
2154/// manually managed by Wasmtime (for example resetting memory is a
2155/// `memset(0)`). This is not as fast in a single-threaded scenario but can
2156/// provide benefits in high-parallelism situations as no virtual memory locks
2157/// or IPIs need happen.
2158///
2159/// ## Disadvantages of Pooled Allocation
2160///
2161/// Despite the above advantages to instantiation performance the pooling
2162/// allocator is not enabled by default in Wasmtime. One reason is that the
2163/// performance advantages are not necessarily portable, for example while the
2164/// pooling allocator works on Windows it has not been tuned for performance on
2165/// Windows in the same way it has on Linux.
2166///
2167/// Additionally the main cost of the pooling allocator is that it requires a
2168/// very large reservation of virtual memory (on the order of most of the
2169/// addressable virtual address space). WebAssembly 32-bit linear memories in
2170/// Wasmtime are, by default 4G address space reservations with a 2G guard
2171/// region both before and after the linear memory. Memories in the pooling
2172/// allocator are contiguous which means that we only need a guard after linear
2173/// memory because the previous linear memory's slot post-guard is our own
2174/// pre-guard. This means that, by default, the pooling allocator uses 6G of
2175/// virtual memory per WebAssembly linear memory slot. 6G of virtual memory is
2176/// 32.5 bits of a 64-bit address. Many 64-bit systems can only actually use
2177/// 48-bit addresses by default (although this can be extended on architectures
2178/// nowadays too), and of those 48 bits one of them is reserved to indicate
2179/// kernel-vs-userspace. This leaves 47-32.5=14.5 bits left, meaning you can
2180/// only have at most 64k slots of linear memories on many systems by default.
2181/// This is a relatively small number and shows how the pooling allocator can
2182/// quickly exhaust all of virtual memory.
2183///
2184/// Another disadvantage of the pooling allocator is that it may keep memory
2185/// alive when nothing is using it. A previously used slot for an instance might
2186/// have paged-in memory that will not get paged out until the
2187/// [`Engine`](crate::Engine) owning the pooling allocator is dropped. While
2188/// suitable for some applications this behavior may not be suitable for all
2189/// applications.
2190///
2191/// Finally the last disadvantage of the pooling allocator is that the
2192/// configuration values for the maximum number of instances, memories, tables,
2193/// etc, must all be fixed up-front. There's not always a clear answer as to
2194/// what these values should be so not all applications may be able to work
2195/// with this constraint.
2196///
2197/// [`madvise`]: https://man7.org/linux/man-pages/man2/madvise.2.html
2198/// [`mprotect`]: https://man7.org/linux/man-pages/man2/mprotect.2.html
2199/// [`mmap`]: https://man7.org/linux/man-pages/man2/mmap.2.html
2200/// [`munmap`]: https://man7.org/linux/man-pages/man2/munmap.2.html
2201#[cfg(feature = "pooling-allocator")]
2202#[derive(Debug, Clone, Default)]
2203pub struct PoolingAllocationConfig {
2204    config: wasmtime_runtime::PoolingInstanceAllocatorConfig,
2205}
2206
2207#[cfg(feature = "pooling-allocator")]
2208impl PoolingAllocationConfig {
2209    /// Configures the maximum number of "unused warm slots" to retain in the
2210    /// pooling allocator.
2211    ///
2212    /// The pooling allocator operates over slots to allocate from, and each
2213    /// slot is considered "cold" if it's never been used before or "warm" if
2214    /// it's been used by some module in the past. Slots in the pooling
2215    /// allocator additionally track an "affinity" flag to a particular core
2216    /// wasm module. When a module is instantiated into a slot then the slot is
2217    /// considered affine to that module, even after the instance has been
2218    /// deallocated.
2219    ///
2220    /// When a new instance is created then a slot must be chosen, and the
2221    /// current algorithm for selecting a slot is:
2222    ///
2223    /// * If there are slots that are affine to the module being instantiated,
2224    ///   then the most recently used slot is selected to be allocated from.
2225    ///   This is done to improve reuse of resources such as memory mappings and
2226    ///   additionally try to benefit from temporal locality for things like
2227    ///   caches.
2228    ///
2229    /// * Otherwise if there are more than N affine slots to other modules, then
2230    ///   one of those affine slots is chosen to be allocated. The slot chosen
2231    ///   is picked on a least-recently-used basis.
2232    ///
2233    /// * Finally, if there are less than N affine slots to other modules, then
2234    ///   the non-affine slots are allocated from.
2235    ///
2236    /// This setting, `max_unused_warm_slots`, is the value for N in the above
2237    /// algorithm. The purpose of this setting is to have a knob over the RSS
2238    /// impact of "unused slots" for a long-running wasm server.
2239    ///
2240    /// If this setting is set to 0, for example, then affine slots are
2241    /// aggressively reused on a least-recently-used basis. A "cold" slot is
2242    /// only used if there are no affine slots available to allocate from. This
2243    /// means that the set of slots used over the lifetime of a program is the
2244    /// same as the maximum concurrent number of wasm instances.
2245    ///
2246    /// If this setting is set to infinity, however, then cold slots are
2247    /// prioritized to be allocated from. This means that the set of slots used
2248    /// over the lifetime of a program will approach
2249    /// [`PoolingAllocationConfig::total_memories`], or the maximum number of
2250    /// slots in the pooling allocator.
2251    ///
2252    /// Wasmtime does not aggressively decommit all resources associated with a
2253    /// slot when the slot is not in use. For example the
2254    /// [`PoolingAllocationConfig::linear_memory_keep_resident`] option can be
2255    /// used to keep memory associated with a slot, even when it's not in use.
2256    /// This means that the total set of used slots in the pooling instance
2257    /// allocator can impact the overall RSS usage of a program.
2258    ///
2259    /// The default value for this option is `100`.
2260    pub fn max_unused_warm_slots(&mut self, max: u32) -> &mut Self {
2261        self.config.max_unused_warm_slots = max;
2262        self
2263    }
2264
2265    /// Configures whether or not stacks used for async futures are reset to
2266    /// zero after usage.
2267    ///
2268    /// When the [`async_support`](Config::async_support) method is enabled for
2269    /// Wasmtime and the [`call_async`] variant
2270    /// of calling WebAssembly is used then Wasmtime will create a separate
2271    /// runtime execution stack for each future produced by [`call_async`].
2272    /// During the deallocation process Wasmtime won't by default reset the
2273    /// contents of the stack back to zero.
2274    ///
2275    /// When this option is enabled it can be seen as a defense-in-depth
2276    /// mechanism to reset a stack back to zero. This is not required for
2277    /// correctness and can be a costly operation in highly concurrent
2278    /// environments due to modifications of the virtual address space requiring
2279    /// process-wide synchronization.
2280    ///
2281    /// This option defaults to `false`.
2282    ///
2283    /// [`call_async`]: crate::TypedFunc::call_async
2284    #[cfg(feature = "async")]
2285    #[cfg_attr(docsrs, doc(cfg(feature = "async")))]
2286    pub fn async_stack_zeroing(&mut self, enable: bool) -> &mut Self {
2287        self.config.async_stack_zeroing = enable;
2288        self
2289    }
2290
2291    /// How much memory, in bytes, to keep resident for async stacks allocated
2292    /// with the pooling allocator.
2293    ///
2294    /// When [`PoolingAllocationConfig::async_stack_zeroing`] is enabled then
2295    /// Wasmtime will reset the contents of async stacks back to zero upon
2296    /// deallocation. This option can be used to perform the zeroing operation
2297    /// with `memset` up to a certain threshold of bytes instead of using system
2298    /// calls to reset the stack to zero.
2299    ///
2300    /// Note that when using this option the memory with async stacks will
2301    /// never be decommitted.
2302    #[cfg(feature = "async")]
2303    #[cfg_attr(docsrs, doc(cfg(feature = "async")))]
2304    pub fn async_stack_keep_resident(&mut self, size: usize) -> &mut Self {
2305        let size = round_up_to_pages(size as u64) as usize;
2306        self.config.async_stack_keep_resident = size;
2307        self
2308    }
2309
2310    /// How much memory, in bytes, to keep resident for each linear memory
2311    /// after deallocation.
2312    ///
2313    /// This option is only applicable on Linux and has no effect on other
2314    /// platforms.
2315    ///
2316    /// By default Wasmtime will use `madvise` to reset the entire contents of
2317    /// linear memory back to zero when a linear memory is deallocated. This
2318    /// option can be used to use `memset` instead to set memory back to zero
2319    /// which can, in some configurations, reduce the number of page faults
2320    /// taken when a slot is reused.
2321    pub fn linear_memory_keep_resident(&mut self, size: usize) -> &mut Self {
2322        let size = round_up_to_pages(size as u64) as usize;
2323        self.config.linear_memory_keep_resident = size;
2324        self
2325    }
2326
2327    /// How much memory, in bytes, to keep resident for each table after
2328    /// deallocation.
2329    ///
2330    /// This option is only applicable on Linux and has no effect on other
2331    /// platforms.
2332    ///
2333    /// This option is the same as
2334    /// [`PoolingAllocationConfig::linear_memory_keep_resident`] except that it
2335    /// is applicable to tables instead.
2336    pub fn table_keep_resident(&mut self, size: usize) -> &mut Self {
2337        let size = round_up_to_pages(size as u64) as usize;
2338        self.config.table_keep_resident = size;
2339        self
2340    }
2341
2342    /// The maximum number of concurrent component instances supported (default
2343    /// is `1000`).
2344    ///
2345    /// This provides an upper-bound on the total size of component
2346    /// metadata-related allocations, along with
2347    /// [`PoolingAllocationConfig::max_component_instance_size`]. The upper bound is
2348    ///
2349    /// ```text
2350    /// total_component_instances * max_component_instance_size
2351    /// ```
2352    ///
2353    /// where `max_component_instance_size` is rounded up to the size and alignment
2354    /// of the internal representation of the metadata.
2355    pub fn total_component_instances(&mut self, count: u32) -> &mut Self {
2356        self.config.limits.total_component_instances = count;
2357        self
2358    }
2359
2360    /// The maximum size, in bytes, allocated for a component instance's
2361    /// `VMComponentContext` metadata.
2362    ///
2363    /// The [`wasmtime::component::Instance`][crate::component::Instance] type
2364    /// has a static size but its internal `VMComponentContext` is dynamically
2365    /// sized depending on the component being instantiated. This size limit
2366    /// loosely correlates to the size of the component, taking into account
2367    /// factors such as:
2368    ///
2369    /// * number of lifted and lowered functions,
2370    /// * number of memories
2371    /// * number of inner instances
2372    /// * number of resources
2373    ///
2374    /// If the allocated size per instance is too small then instantiation of a
2375    /// module will fail at runtime with an error indicating how many bytes were
2376    /// needed.
2377    ///
2378    /// The default value for this is 1MiB.
2379    ///
2380    /// This provides an upper-bound on the total size of component
2381    /// metadata-related allocations, along with
2382    /// [`PoolingAllocationConfig::total_component_instances`]. The upper bound is
2383    ///
2384    /// ```text
2385    /// total_component_instances * max_component_instance_size
2386    /// ```
2387    ///
2388    /// where `max_component_instance_size` is rounded up to the size and alignment
2389    /// of the internal representation of the metadata.
2390    pub fn max_component_instance_size(&mut self, size: usize) -> &mut Self {
2391        self.config.limits.component_instance_size = size;
2392        self
2393    }
2394
2395    /// The maximum number of core instances a single component may contain
2396    /// (default is `20`).
2397    ///
2398    /// This method (along with
2399    /// [`PoolingAllocationConfig::max_memories_per_component`],
2400    /// [`PoolingAllocationConfig::max_tables_per_component`], and
2401    /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
2402    /// the amount of resources a single component allocation consumes.
2403    ///
2404    /// If a component will instantiate more core instances than `count`, then
2405    /// the component will fail to instantiate.
2406    pub fn max_core_instances_per_component(&mut self, count: u32) -> &mut Self {
2407        self.config.limits.max_core_instances_per_component = count;
2408        self
2409    }
2410
2411    /// The maximum number of Wasm linear memories that a single component may
2412    /// transitively contain (default is `20`).
2413    ///
2414    /// This method (along with
2415    /// [`PoolingAllocationConfig::max_core_instances_per_component`],
2416    /// [`PoolingAllocationConfig::max_tables_per_component`], and
2417    /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
2418    /// the amount of resources a single component allocation consumes.
2419    ///
2420    /// If a component transitively contains more linear memories than `count`,
2421    /// then the component will fail to instantiate.
2422    pub fn max_memories_per_component(&mut self, count: u32) -> &mut Self {
2423        self.config.limits.max_memories_per_component = count;
2424        self
2425    }
2426
2427    /// The maximum number of tables that a single component may transitively
2428    /// contain (default is `20`).
2429    ///
2430    /// This method (along with
2431    /// [`PoolingAllocationConfig::max_core_instances_per_component`],
2432    /// [`PoolingAllocationConfig::max_memories_per_component`],
2433    /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
2434    /// the amount of resources a single component allocation consumes.
2435    ///
2436    /// If a component will transitively contains more tables than `count`, then
2437    /// the component will fail to instantiate.
2438    pub fn max_tables_per_component(&mut self, count: u32) -> &mut Self {
2439        self.config.limits.max_tables_per_component = count;
2440        self
2441    }
2442
2443    /// The maximum number of concurrent Wasm linear memories supported (default
2444    /// is `1000`).
2445    ///
2446    /// This value has a direct impact on the amount of memory allocated by the pooling
2447    /// instance allocator.
2448    ///
2449    /// The pooling instance allocator allocates a memory pool, where each entry
2450    /// in the pool contains the reserved address space for each linear memory
2451    /// supported by an instance.
2452    ///
2453    /// The memory pool will reserve a large quantity of host process address
2454    /// space to elide the bounds checks required for correct WebAssembly memory
2455    /// semantics. Even with 64-bit address spaces, the address space is limited
2456    /// when dealing with a large number of linear memories.
2457    ///
2458    /// For example, on Linux x86_64, the userland address space limit is 128
2459    /// TiB. That might seem like a lot, but each linear memory will *reserve* 6
2460    /// GiB of space by default.
2461    pub fn total_memories(&mut self, count: u32) -> &mut Self {
2462        self.config.limits.total_memories = count;
2463        self
2464    }
2465
2466    /// The maximum number of concurrent tables supported (default is `1000`).
2467    ///
2468    /// This value has a direct impact on the amount of memory allocated by the
2469    /// pooling instance allocator.
2470    ///
2471    /// The pooling instance allocator allocates a table pool, where each entry
2472    /// in the pool contains the space needed for each WebAssembly table
2473    /// supported by an instance (see `table_elements` to control the size of
2474    /// each table).
2475    pub fn total_tables(&mut self, count: u32) -> &mut Self {
2476        self.config.limits.total_tables = count;
2477        self
2478    }
2479
2480    /// The maximum number of execution stacks allowed for asynchronous
2481    /// execution, when enabled (default is `1000`).
2482    ///
2483    /// This value has a direct impact on the amount of memory allocated by the
2484    /// pooling instance allocator.
2485    #[cfg(feature = "async")]
2486    pub fn total_stacks(&mut self, count: u32) -> &mut Self {
2487        self.config.limits.total_stacks = count;
2488        self
2489    }
2490
2491    /// The maximum number of concurrent core instances supported (default is
2492    /// `1000`).
2493    ///
2494    /// This provides an upper-bound on the total size of core instance
2495    /// metadata-related allocations, along with
2496    /// [`PoolingAllocationConfig::max_core_instance_size`]. The upper bound is
2497    ///
2498    /// ```text
2499    /// total_core_instances * max_core_instance_size
2500    /// ```
2501    ///
2502    /// where `max_core_instance_size` is rounded up to the size and alignment of
2503    /// the internal representation of the metadata.
2504    pub fn total_core_instances(&mut self, count: u32) -> &mut Self {
2505        self.config.limits.total_core_instances = count;
2506        self
2507    }
2508
2509    /// The maximum size, in bytes, allocated for a core instance's `VMContext`
2510    /// metadata.
2511    ///
2512    /// The [`Instance`][crate::Instance] type has a static size but its
2513    /// `VMContext` metadata is dynamically sized depending on the module being
2514    /// instantiated. This size limit loosely correlates to the size of the Wasm
2515    /// module, taking into account factors such as:
2516    ///
2517    /// * number of functions
2518    /// * number of globals
2519    /// * number of memories
2520    /// * number of tables
2521    /// * number of function types
2522    ///
2523    /// If the allocated size per instance is too small then instantiation of a
2524    /// module will fail at runtime with an error indicating how many bytes were
2525    /// needed.
2526    ///
2527    /// The default value for this is 1MiB.
2528    ///
2529    /// This provides an upper-bound on the total size of core instance
2530    /// metadata-related allocations, along with
2531    /// [`PoolingAllocationConfig::total_core_instances`]. The upper bound is
2532    ///
2533    /// ```text
2534    /// total_core_instances * max_core_instance_size
2535    /// ```
2536    ///
2537    /// where `max_core_instance_size` is rounded up to the size and alignment of
2538    /// the internal representation of the metadata.
2539    pub fn max_core_instance_size(&mut self, size: usize) -> &mut Self {
2540        self.config.limits.core_instance_size = size;
2541        self
2542    }
2543
2544    /// The maximum number of defined tables for a core module (default is `1`).
2545    ///
2546    /// This value controls the capacity of the `VMTableDefinition` table in
2547    /// each instance's `VMContext` structure.
2548    ///
2549    /// The allocated size of the table will be `tables *
2550    /// sizeof(VMTableDefinition)` for each instance regardless of how many
2551    /// tables are defined by an instance's module.
2552    pub fn max_tables_per_module(&mut self, tables: u32) -> &mut Self {
2553        self.config.limits.max_tables_per_module = tables;
2554        self
2555    }
2556
2557    /// The maximum table elements for any table defined in a module (default is
2558    /// `10000`).
2559    ///
2560    /// If a table's minimum element limit is greater than this value, the
2561    /// module will fail to instantiate.
2562    ///
2563    /// If a table's maximum element limit is unbounded or greater than this
2564    /// value, the maximum will be `table_elements` for the purpose of any
2565    /// `table.grow` instruction.
2566    ///
2567    /// This value is used to reserve the maximum space for each supported
2568    /// table; table elements are pointer-sized in the Wasmtime runtime.
2569    /// Therefore, the space reserved for each instance is `tables *
2570    /// table_elements * sizeof::<*const ()>`.
2571    pub fn table_elements(&mut self, elements: u32) -> &mut Self {
2572        self.config.limits.table_elements = elements;
2573        self
2574    }
2575
2576    /// The maximum number of defined linear memories for a module (default is
2577    /// `1`).
2578    ///
2579    /// This value controls the capacity of the `VMMemoryDefinition` table in
2580    /// each core instance's `VMContext` structure.
2581    ///
2582    /// The allocated size of the table will be `memories *
2583    /// sizeof(VMMemoryDefinition)` for each core instance regardless of how
2584    /// many memories are defined by the core instance's module.
2585    pub fn max_memories_per_module(&mut self, memories: u32) -> &mut Self {
2586        self.config.limits.max_memories_per_module = memories;
2587        self
2588    }
2589
2590    /// The maximum number of Wasm pages for any linear memory defined in a
2591    /// module (default is `160`).
2592    ///
2593    /// The default of `160` means at most 10 MiB of host memory may be
2594    /// committed for each instance.
2595    ///
2596    /// If a memory's minimum page limit is greater than this value, the module
2597    /// will fail to instantiate.
2598    ///
2599    /// If a memory's maximum page limit is unbounded or greater than this
2600    /// value, the maximum will be `memory_pages` for the purpose of any
2601    /// `memory.grow` instruction.
2602    ///
2603    /// This value is used to control the maximum accessible space for each
2604    /// linear memory of a core instance.
2605    ///
2606    /// The reservation size of each linear memory is controlled by the
2607    /// `static_memory_maximum_size` setting and this value cannot exceed the
2608    /// configured static memory maximum size.
2609    pub fn memory_pages(&mut self, pages: u64) -> &mut Self {
2610        self.config.limits.memory_pages = pages;
2611        self
2612    }
2613
2614    /// Configures whether memory protection keys (MPK) should be used for more
2615    /// efficient layout of pool-allocated memories.
2616    ///
2617    /// When using the pooling allocator (see [`Config::allocation_strategy`],
2618    /// [`InstanceAllocationStrategy::Pooling`]), memory protection keys can
2619    /// reduce the total amount of allocated virtual memory by eliminating guard
2620    /// regions between WebAssembly memories in the pool. It does so by
2621    /// "coloring" memory regions with different memory keys and setting which
2622    /// regions are accessible each time executions switches from host to guest
2623    /// (or vice versa).
2624    ///
2625    /// MPK is only available on Linux (called `pku` there) and recent x86
2626    /// systems; we check for MPK support at runtime by examining the `CPUID`
2627    /// register. This configuration setting can be in three states:
2628    ///
2629    /// - `auto`: if MPK support is available the guard regions are removed; if
2630    ///   not, the guard regions remain
2631    /// - `enable`: use MPK to eliminate guard regions; fail if MPK is not
2632    ///   supported
2633    /// - `disable`: never use MPK
2634    ///
2635    /// By default this value is `disabled`, but may become `auto` in future
2636    /// releases.
2637    ///
2638    /// __WARNING__: this configuration options is still experimental--use at
2639    /// your own risk! MPK uses kernel and CPU features to protect memory
2640    /// regions; you may observe segmentation faults if anything is
2641    /// misconfigured.
2642    pub fn memory_protection_keys(&mut self, enable: MpkEnabled) -> &mut Self {
2643        self.config.memory_protection_keys = enable;
2644        self
2645    }
2646
2647    /// Sets an upper limit on how many memory protection keys (MPK) Wasmtime
2648    /// will use.
2649    ///
2650    /// This setting is only applicable when
2651    /// [`PoolingAllocationConfig::memory_protection_keys`] is set to `enable`
2652    /// or `auto`. Configuring this above the HW and OS limits (typically 15)
2653    /// has no effect.
2654    ///
2655    /// If multiple Wasmtime engines are used in the same process, note that all
2656    /// engines will share the same set of allocated keys; this setting will
2657    /// limit how many keys are allocated initially and thus available to all
2658    /// other engines.
2659    pub fn max_memory_protection_keys(&mut self, max: usize) -> &mut Self {
2660        self.config.max_memory_protection_keys = max;
2661        self
2662    }
2663
2664    /// Check if memory protection keys (MPK) are available on the current host.
2665    ///
2666    /// This is a convenience method for determining MPK availability using the
2667    /// same method that [`MpkEnabled::Auto`] does. See
2668    /// [`PoolingAllocationConfig::memory_protection_keys`] for more
2669    /// information.
2670    pub fn are_memory_protection_keys_available() -> bool {
2671        mpk::is_supported()
2672    }
2673
2674    /// The maximum number of concurrent GC heaps supported (default is `1000`).
2675    ///
2676    /// This value has a direct impact on the amount of memory allocated by the
2677    /// pooling instance allocator.
2678    ///
2679    /// The pooling instance allocator allocates a GC heap pool, where each
2680    /// entry in the pool contains the space needed for each GC heap used by a
2681    /// store.
2682    #[cfg(feature = "gc")]
2683    #[cfg_attr(docsrs, doc(cfg(feature = "gc")))]
2684    pub fn total_gc_heaps(&mut self, count: u32) -> &mut Self {
2685        self.config.limits.total_gc_heaps = count;
2686        self
2687    }
2688}
2689
2690pub(crate) fn probestack_supported(arch: Architecture) -> bool {
2691    matches!(
2692        arch,
2693        Architecture::X86_64 | Architecture::Aarch64(_) | Architecture::Riscv64(_)
2694    )
2695}