substrate_wasmtime/
runtime.rs

1use crate::externals::MemoryCreator;
2use crate::trampoline::{MemoryCreatorProxy, StoreInstanceHandle};
3use crate::Module;
4use anyhow::{bail, Result};
5use std::cell::RefCell;
6use std::cmp;
7use std::convert::TryFrom;
8use std::fmt;
9use std::hash::{Hash, Hasher};
10use std::path::Path;
11use std::rc::{Rc, Weak};
12use std::sync::Arc;
13use wasmparser::Validator;
14use wasmtime_environ::settings::{self, Configurable, SetError};
15use wasmtime_environ::{ir, isa, isa::TargetIsa, wasm, CacheConfig, Tunables};
16use wasmtime_jit::{native, CompilationStrategy, Compiler};
17use wasmtime_profiling::{JitDumpAgent, NullProfilerAgent, ProfilingAgent, VTuneAgent};
18use wasmtime_runtime::{
19    debug_builtins, InstanceHandle, RuntimeMemoryCreator, SignalHandler, SignatureRegistry,
20    StackMapRegistry, VMExternRef, VMExternRefActivationsTable, VMInterrupts,
21    VMSharedSignatureIndex,
22};
23
24// Runtime Environment
25
26// Configuration
27
28/// Global configuration options used to create an [`Engine`] and customize its
29/// behavior.
30///
31/// This structure exposed a builder-like interface and is primarily consumed by
32/// [`Engine::new()`]
33#[derive(Clone)]
34pub struct Config {
35    pub(crate) flags: settings::Builder,
36    pub(crate) isa_flags: isa::Builder,
37    pub(crate) tunables: Tunables,
38    pub(crate) strategy: CompilationStrategy,
39    pub(crate) cache_config: CacheConfig,
40    pub(crate) profiler: Arc<dyn ProfilingAgent>,
41    pub(crate) memory_creator: Option<MemoryCreatorProxy>,
42    pub(crate) max_wasm_stack: usize,
43    wasm_threads: bool,
44    wasm_reference_types: bool,
45    pub(crate) wasm_bulk_memory: bool,
46    wasm_simd: bool,
47    wasm_multi_value: bool,
48}
49
50impl Config {
51    /// Creates a new configuration object with the default configuration
52    /// specified.
53    pub fn new() -> Config {
54        let mut tunables = Tunables::default();
55        if cfg!(windows) {
56            // For now, use a smaller footprint on Windows so that we don't
57            // don't outstrip the paging file.
58            tunables.static_memory_bound = cmp::min(tunables.static_memory_bound, 0x100);
59            tunables.static_memory_offset_guard_size =
60                cmp::min(tunables.static_memory_offset_guard_size, 0x10000);
61        }
62
63        let mut flags = settings::builder();
64
65        // There are two possible traps for division, and this way
66        // we get the proper one if code traps.
67        flags
68            .enable("avoid_div_traps")
69            .expect("should be valid flag");
70
71        // Invert cranelift's default-on verification to instead default off.
72        flags
73            .set("enable_verifier", "false")
74            .expect("should be valid flag");
75
76        // Turn on cranelift speed optimizations by default
77        flags
78            .set("opt_level", "speed")
79            .expect("should be valid flag");
80
81        // We don't use probestack as a stack limit mechanism
82        flags
83            .set("enable_probestack", "false")
84            .expect("should be valid flag");
85
86        Config {
87            tunables,
88            flags,
89            isa_flags: native::builder(),
90            strategy: CompilationStrategy::Auto,
91            cache_config: CacheConfig::new_cache_disabled(),
92            profiler: Arc::new(NullProfilerAgent),
93            memory_creator: None,
94            max_wasm_stack: 1 << 20,
95            wasm_threads: false,
96            wasm_reference_types: false,
97            wasm_bulk_memory: false,
98            wasm_simd: false,
99            wasm_multi_value: true,
100        }
101    }
102
103    /// Configures whether DWARF debug information will be emitted during
104    /// compilation.
105    ///
106    /// By default this option is `false`.
107    pub fn debug_info(&mut self, enable: bool) -> &mut Self {
108        self.tunables.debug_info = enable;
109        self
110    }
111
112    /// Configures whether functions and loops will be interruptable via the
113    /// [`Store::interrupt_handle`] method.
114    ///
115    /// For more information see the documentation on
116    /// [`Store::interrupt_handle`].
117    ///
118    /// By default this option is `false`.
119    pub fn interruptable(&mut self, enable: bool) -> &mut Self {
120        self.tunables.interruptable = enable;
121        self
122    }
123
124    /// Configures the maximum amount of native stack space available to
125    /// executing WebAssembly code.
126    ///
127    /// WebAssembly code currently executes on the native call stack for its own
128    /// call frames. WebAssembly, however, also has well-defined semantics on
129    /// stack overflow. This is intended to be a knob which can help configure
130    /// how much native stack space a wasm module is allowed to consume. Note
131    /// that the number here is not super-precise, but rather wasm will take at
132    /// most "pretty close to this much" stack space.
133    ///
134    /// If a wasm call (or series of nested wasm calls) take more stack space
135    /// than the `size` specified then a stack overflow trap will be raised.
136    ///
137    /// By default this option is 1 MB.
138    pub fn max_wasm_stack(&mut self, size: usize) -> &mut Self {
139        self.max_wasm_stack = size;
140        self
141    }
142
143    /// Configures whether the WebAssembly threads proposal will be enabled for
144    /// compilation.
145    ///
146    /// The [WebAssembly threads proposal][threads] is not currently fully
147    /// standardized and is undergoing development. Additionally the support in
148    /// wasmtime itself is still being worked on. Support for this feature can
149    /// be enabled through this method for appropriate wasm modules.
150    ///
151    /// This feature gates items such as shared memories and atomic
152    /// instructions. Note that enabling the threads feature will
153    /// also enable the bulk memory feature.
154    ///
155    /// This is `false` by default.
156    ///
157    /// > **Note**: Wasmtime does not implement everything for the wasm threads
158    /// > spec at this time, so bugs, panics, and possibly segfaults should be
159    /// > expected. This should not be enabled in a production setting right
160    /// > now.
161    ///
162    /// [threads]: https://github.com/webassembly/threads
163    pub fn wasm_threads(&mut self, enable: bool) -> &mut Self {
164        self.wasm_threads = enable;
165        // The threads proposal depends on the bulk memory proposal
166        if enable {
167            self.wasm_bulk_memory(true);
168        }
169        self
170    }
171
172    /// Configures whether the WebAssembly reference types proposal will be
173    /// enabled for compilation.
174    ///
175    /// The [WebAssembly reference types proposal][proposal] is not currently
176    /// fully standardized and is undergoing development. Additionally the
177    /// support in wasmtime itself is still being worked on. Support for this
178    /// feature can be enabled through this method for appropriate wasm
179    /// modules.
180    ///
181    /// This feature gates items such as the `externref` type and multiple tables
182    /// being in a module. Note that enabling the reference types feature will
183    /// also enable the bulk memory feature.
184    ///
185    /// This is `false` by default.
186    ///
187    /// > **Note**: Wasmtime does not implement everything for the reference
188    /// > types proposal spec at this time, so bugs, panics, and possibly
189    /// > segfaults should be expected. This should not be enabled in a
190    /// > production setting right now.
191    ///
192    /// [proposal]: https://github.com/webassembly/reference-types
193    pub fn wasm_reference_types(&mut self, enable: bool) -> &mut Self {
194        self.wasm_reference_types = enable;
195
196        self.flags
197            .set("enable_safepoints", if enable { "true" } else { "false" })
198            .unwrap();
199
200        // The reference types proposal depends on the bulk memory proposal.
201        if enable {
202            self.wasm_bulk_memory(true);
203        }
204
205        self
206    }
207
208    /// Configures whether the WebAssembly SIMD proposal will be
209    /// enabled for compilation.
210    ///
211    /// The [WebAssembly SIMD proposal][proposal] is not currently
212    /// fully standardized and is undergoing development. Additionally the
213    /// support in wasmtime itself is still being worked on. Support for this
214    /// feature can be enabled through this method for appropriate wasm
215    /// modules.
216    ///
217    /// This feature gates items such as the `v128` type and all of its
218    /// operators being in a module.
219    ///
220    /// This is `false` by default.
221    ///
222    /// > **Note**: Wasmtime does not implement everything for the wasm simd
223    /// > spec at this time, so bugs, panics, and possibly segfaults should be
224    /// > expected. This should not be enabled in a production setting right
225    /// > now.
226    ///
227    /// [proposal]: https://github.com/webassembly/simd
228    pub fn wasm_simd(&mut self, enable: bool) -> &mut Self {
229        self.wasm_simd = enable;
230        let val = if enable { "true" } else { "false" };
231        self.flags
232            .set("enable_simd", val)
233            .expect("should be valid flag");
234        self
235    }
236
237    /// Configures whether the WebAssembly bulk memory operations proposal will
238    /// be enabled for compilation.
239    ///
240    /// The [WebAssembly bulk memory operations proposal][proposal] is not
241    /// currently fully standardized and is undergoing development.
242    /// Additionally the support in wasmtime itself is still being worked on.
243    /// Support for this feature can be enabled through this method for
244    /// appropriate wasm modules.
245    ///
246    /// This feature gates items such as the `memory.copy` instruction, passive
247    /// data/table segments, etc, being in a module.
248    ///
249    /// This is `false` by default.
250    ///
251    /// [proposal]: https://github.com/webassembly/bulk-memory-operations
252    pub fn wasm_bulk_memory(&mut self, enable: bool) -> &mut Self {
253        self.wasm_bulk_memory = enable;
254        self
255    }
256
257    /// Configures whether the WebAssembly multi-value proposal will
258    /// be enabled for compilation.
259    ///
260    /// This feature gates functions and blocks returning multiple values in a
261    /// module, for example.
262    ///
263    /// This is `true` by default.
264    ///
265    /// [proposal]: https://github.com/webassembly/multi-value
266    pub fn wasm_multi_value(&mut self, enable: bool) -> &mut Self {
267        self.wasm_multi_value = enable;
268        self
269    }
270
271    /// Configures which compilation strategy will be used for wasm modules.
272    ///
273    /// This method can be used to configure which compiler is used for wasm
274    /// modules, and for more documentation consult the [`Strategy`] enumeration
275    /// and its documentation.
276    ///
277    /// The default value for this is `Strategy::Auto`.
278    ///
279    /// # Errors
280    ///
281    /// Some compilation strategies require compile-time options of `wasmtime`
282    /// itself to be set, but if they're not set and the strategy is specified
283    /// here then an error will be returned.
284    pub fn strategy(&mut self, strategy: Strategy) -> Result<&mut Self> {
285        self.strategy = match strategy {
286            Strategy::Auto => CompilationStrategy::Auto,
287            Strategy::Cranelift => CompilationStrategy::Cranelift,
288            #[cfg(feature = "lightbeam")]
289            Strategy::Lightbeam => CompilationStrategy::Lightbeam,
290            #[cfg(not(feature = "lightbeam"))]
291            Strategy::Lightbeam => {
292                anyhow::bail!("lightbeam compilation strategy wasn't enabled at compile time");
293            }
294        };
295        Ok(self)
296    }
297
298    /// Creates a default profiler based on the profiling strategy choosen
299    ///
300    /// Profiler creation calls the type's default initializer where the purpose is
301    /// really just to put in place the type used for profiling.
302    pub fn profiler(&mut self, profile: ProfilingStrategy) -> Result<&mut Self> {
303        self.profiler = match profile {
304            ProfilingStrategy::JitDump => Arc::new(JitDumpAgent::new()?) as Arc<dyn ProfilingAgent>,
305            ProfilingStrategy::VTune => Arc::new(VTuneAgent::new()?) as Arc<dyn ProfilingAgent>,
306            ProfilingStrategy::None => Arc::new(NullProfilerAgent),
307        };
308        Ok(self)
309    }
310
311    /// Configures whether the debug verifier of Cranelift is enabled or not.
312    ///
313    /// When Cranelift is used as a code generation backend this will configure
314    /// it to have the `enable_verifier` flag which will enable a number of debug
315    /// checks inside of Cranelift. This is largely only useful for the
316    /// developers of wasmtime itself.
317    ///
318    /// The default value for this is `false`
319    pub fn cranelift_debug_verifier(&mut self, enable: bool) -> &mut Self {
320        let val = if enable { "true" } else { "false" };
321        self.flags
322            .set("enable_verifier", val)
323            .expect("should be valid flag");
324        self
325    }
326
327    /// Configures the Cranelift code generator optimization level.
328    ///
329    /// When the Cranelift code generator is used you can configure the
330    /// optimization level used for generated code in a few various ways. For
331    /// more information see the documentation of [`OptLevel`].
332    ///
333    /// The default value for this is `OptLevel::None`.
334    pub fn cranelift_opt_level(&mut self, level: OptLevel) -> &mut Self {
335        let val = match level {
336            OptLevel::None => "none",
337            OptLevel::Speed => "speed",
338            OptLevel::SpeedAndSize => "speed_and_size",
339        };
340        self.flags
341            .set("opt_level", val)
342            .expect("should be valid flag");
343        self
344    }
345
346    /// Configures whether Cranelift should perform a NaN-canonicalization pass.
347    ///
348    /// When Cranelift is used as a code generation backend this will configure
349    /// it to replace NaNs with a single canonical value. This is useful for users
350    /// requiring entirely deterministic WebAssembly computation.
351    /// This is not required by the WebAssembly spec, so it is not enabled by default.
352    ///
353    /// The default value for this is `false`
354    pub fn cranelift_nan_canonicalization(&mut self, enable: bool) -> &mut Self {
355        let val = if enable { "true" } else { "false" };
356        self.flags
357            .set("enable_nan_canonicalization", val)
358            .expect("should be valid flag");
359        self
360    }
361
362    /// Allows settings another Cranelift flag defined by a flag name and value. This allows
363    /// fine-tuning of Cranelift settings.
364    ///
365    /// Since Cranelift flags may be unstable, this method should not be considered to be stable
366    /// either; other `Config` functions should be preferred for stability.
367    ///
368    /// Note that this is marked as unsafe, because setting the wrong flag might break invariants,
369    /// resulting in execution hazards.
370    ///
371    /// # Errors
372    ///
373    /// This method can fail if the flag's name does not exist, or the value is not appropriate for
374    /// the flag type.
375    pub unsafe fn cranelift_other_flag(&mut self, name: &str, value: &str) -> Result<&mut Self> {
376        if let Err(err) = self.flags.set(name, value) {
377            match err {
378                SetError::BadName(_) => {
379                    // Try the target-specific flags.
380                    self.isa_flags.set(name, value)?;
381                }
382                _ => bail!(err),
383            }
384        }
385        Ok(self)
386    }
387
388    /// Loads cache configuration specified at `path`.
389    ///
390    /// This method will read the file specified by `path` on the filesystem and
391    /// attempt to load cache configuration from it. This method can also fail
392    /// due to I/O errors, misconfiguration, syntax errors, etc. For expected
393    /// syntax in the configuration file see the [documentation online][docs].
394    ///
395    /// By default cache configuration is not enabled or loaded.
396    ///
397    /// # Errors
398    ///
399    /// This method can fail due to any error that happens when loading the file
400    /// pointed to by `path` and attempting to load the cache configuration.
401    ///
402    /// [docs]: https://bytecodealliance.github.io/wasmtime/cli-cache.html
403    pub fn cache_config_load(&mut self, path: impl AsRef<Path>) -> Result<&mut Self> {
404        self.cache_config = wasmtime_environ::CacheConfig::from_file(Some(path.as_ref()))?;
405        Ok(self)
406    }
407
408    /// Loads cache configuration from the system default path.
409    ///
410    /// This commit is the same as [`Config::cache_config_load`] except that it
411    /// does not take a path argument and instead loads the default
412    /// configuration present on the system. This is located, for example, on
413    /// Unix at `$HOME/.config/wasmtime/config.toml` and is typically created
414    /// with the `wasmtime config new` command.
415    ///
416    /// By default cache configuration is not enabled or loaded.
417    ///
418    /// # Errors
419    ///
420    /// This method can fail due to any error that happens when loading the
421    /// default system configuration. Note that it is not an error if the
422    /// default config file does not exist, in which case the default settings
423    /// for an enabled cache are applied.
424    ///
425    /// [docs]: https://bytecodealliance.github.io/wasmtime/cli-cache.html
426    pub fn cache_config_load_default(&mut self) -> Result<&mut Self> {
427        self.cache_config = wasmtime_environ::CacheConfig::from_file(None)?;
428        Ok(self)
429    }
430
431    /// Sets a custom memory creator
432    pub fn with_host_memory(&mut self, mem_creator: Arc<dyn MemoryCreator>) -> &mut Self {
433        self.memory_creator = Some(MemoryCreatorProxy { mem_creator });
434        self
435    }
436
437    /// Configures the maximum size, in bytes, where a linear memory is
438    /// considered static, above which it'll be considered dynamic.
439    ///
440    /// This function configures the threshold for wasm memories whether they're
441    /// implemented as a dynamically relocatable chunk of memory or a statically
442    /// located chunk of memory. The `max_size` parameter here is the size, in
443    /// bytes, where if the maximum size of a linear memory is below `max_size`
444    /// then it will be statically allocated with enough space to never have to
445    /// move. If the maximum size of a linear memory is larger than `max_size`
446    /// then wasm memory will be dynamically located and may move in memory
447    /// through growth operations.
448    ///
449    /// Specifying a `max_size` of 0 means that all memories will be dynamic and
450    /// may be relocated through `memory.grow`. Also note that if any wasm
451    /// memory's maximum size is below `max_size` then it will still reserve
452    /// `max_size` bytes in the virtual memory space.
453    ///
454    /// ## Static vs Dynamic Memory
455    ///
456    /// Linear memories represent contiguous arrays of bytes, but they can also
457    /// be grown through the API and wasm instructions. When memory is grown if
458    /// space hasn't been preallocated then growth may involve relocating the
459    /// base pointer in memory. Memories in Wasmtime are classified in two
460    /// different ways:
461    ///
462    /// * **static** - these memories preallocate all space necessary they'll
463    ///   ever need, meaning that the base pointer of these memories is never
464    ///   moved. Static memories may take more virtual memory space because of
465    ///   pre-reserving space for memories.
466    ///
467    /// * **dynamic** - these memories are not preallocated and may move during
468    ///   growth operations. Dynamic memories consume less virtual memory space
469    ///   because they don't need to preallocate space for future growth.
470    ///
471    /// Static memories can be optimized better in JIT code because once the
472    /// base address is loaded in a function it's known that we never need to
473    /// reload it because it never changes, `memory.grow` is generally a pretty
474    /// fast operation because the wasm memory is never relocated, and under
475    /// some conditions bounds checks can be elided on memory accesses.
476    ///
477    /// Dynamic memories can't be quite as heavily optimized because the base
478    /// address may need to be reloaded more often, they may require relocating
479    /// lots of data on `memory.grow`, and dynamic memories require
480    /// unconditional bounds checks on all memory accesses.
481    ///
482    /// ## Should you use static or dynamic memory?
483    ///
484    /// In general you probably don't need to change the value of this property.
485    /// The defaults here are optimized for each target platform to consume a
486    /// reasonable amount of physical memory while also generating speedy
487    /// machine code.
488    ///
489    /// One of the main reasons you may want to configure this today is if your
490    /// environment can't reserve virtual memory space for each wasm linear
491    /// memory. On 64-bit platforms wasm memories require a 6GB reservation by
492    /// default, and system limits may prevent this in some scenarios. In this
493    /// case you may wish to force memories to be allocated dynamically meaning
494    /// that the virtual memory footprint of creating a wasm memory should be
495    /// exactly what's used by the wasm itself.
496    ///
497    /// For 32-bit memories a static memory must contain at least 4GB of
498    /// reserved address space plus a guard page to elide any bounds checks at
499    /// all. Smaller static memories will use similar bounds checks as dynamic
500    /// memories.
501    ///
502    /// ## Default
503    ///
504    /// The default value for this property depends on the host platform. For
505    /// 64-bit platforms there's lots of address space available, so the default
506    /// configured here is 4GB. WebAssembly linear memories currently max out at
507    /// 4GB which means that on 64-bit platforms Wasmtime by default always uses
508    /// a static memory. This, coupled with a sufficiently sized guard region,
509    /// should produce the fastest JIT code on 64-bit platforms, but does
510    /// require a large address space reservation for each wasm memory.
511    ///
512    /// For 32-bit platforms this value defaults to 1GB. This means that wasm
513    /// memories whose maximum size is less than 1GB will be allocated
514    /// statically, otherwise they'll be considered dynamic.
515    pub fn static_memory_maximum_size(&mut self, max_size: u64) -> &mut Self {
516        let max_pages = max_size / u64::from(wasmtime_environ::WASM_PAGE_SIZE);
517        self.tunables.static_memory_bound = u32::try_from(max_pages).unwrap_or(u32::max_value());
518        self
519    }
520
521    /// Configures the size, in bytes, of the guard region used at the end of a
522    /// static memory's address space reservation.
523    ///
524    /// All WebAssembly loads/stores are bounds-checked and generate a trap if
525    /// they're out-of-bounds. Loads and stores are often very performance
526    /// critical, so we want the bounds check to be as fast as possible!
527    /// Accelerating these memory accesses is the motivation for a guard after a
528    /// memory allocation.
529    ///
530    /// Memories (both static and dynamic) can be configured with a guard at the
531    /// end of them which consists of unmapped virtual memory. This unmapped
532    /// memory will trigger a memory access violation (e.g. segfault) if
533    /// accessed. This allows JIT code to elide bounds checks if it can prove
534    /// that an access, if out of bounds, would hit the guard region. This means
535    /// that having such a guard of unmapped memory can remove the need for
536    /// bounds checks in JIT code.
537    ///
538    /// For the difference between static and dynamic memories, see the
539    /// [`Config::static_memory_maximum_size`].
540    ///
541    /// ## How big should the guard be?
542    ///
543    /// In general, like with configuring `static_memory_maximum_size`, you
544    /// probably don't want to change this value from the defaults. Otherwise,
545    /// though, the size of the guard region affects the number of bounds checks
546    /// needed for generated wasm code. More specifically, loads/stores with
547    /// immediate offsets will generate bounds checks based on how big the guard
548    /// page is.
549    ///
550    /// For 32-bit memories a 4GB static memory is required to even start
551    /// removing bounds checks. A 4GB guard size will guarantee that the module
552    /// has zero bounds checks for memory accesses. A 2GB guard size will
553    /// eliminate all bounds checks with an immediate offset less than 2GB. A
554    /// guard size of zero means that all memory accesses will still have bounds
555    /// checks.
556    ///
557    /// ## Default
558    ///
559    /// The default value for this property is 2GB on 64-bit platforms. This
560    /// allows eliminating almost all bounds checks on loads/stores with an
561    /// immediate offset of less than 2GB. On 32-bit platforms this defaults to
562    /// 64KB.
563    ///
564    /// ## Static vs Dynamic Guard Size
565    ///
566    /// Note that for now the static memory guard size must be at least as large
567    /// as the dynamic memory guard size, so configuring this property to be
568    /// smaller than the dynamic memory guard size will have no effect.
569    pub fn static_memory_guard_size(&mut self, guard_size: u64) -> &mut Self {
570        let guard_size = round_up_to_pages(guard_size);
571        let guard_size = cmp::max(guard_size, self.tunables.dynamic_memory_offset_guard_size);
572        self.tunables.static_memory_offset_guard_size = guard_size;
573        self
574    }
575
576    /// Configures the size, in bytes, of the guard region used at the end of a
577    /// dynamic memory's address space reservation.
578    ///
579    /// For the difference between static and dynamic memories, see the
580    /// [`Config::static_memory_maximum_size`]
581    ///
582    /// For more information about what a guard is, see the documentation on
583    /// [`Config::static_memory_guard_size`].
584    ///
585    /// Note that the size of the guard region for dynamic memories is not super
586    /// critical for performance. Making it reasonably-sized can improve
587    /// generated code slightly, but for maximum performance you'll want to lean
588    /// towards static memories rather than dynamic anyway.
589    ///
590    /// Also note that the dynamic memory guard size must be smaller than the
591    /// static memory guard size, so if a large dynamic memory guard is
592    /// specified then the static memory guard size will also be automatically
593    /// increased.
594    ///
595    /// ## Default
596    ///
597    /// This value defaults to 64KB.
598    pub fn dynamic_memory_guard_size(&mut self, guard_size: u64) -> &mut Self {
599        let guard_size = round_up_to_pages(guard_size);
600        self.tunables.dynamic_memory_offset_guard_size = guard_size;
601        self.tunables.static_memory_offset_guard_size =
602            cmp::max(guard_size, self.tunables.static_memory_offset_guard_size);
603        self
604    }
605
606    pub(crate) fn target_isa(&self) -> Box<dyn TargetIsa> {
607        self.isa_flags
608            .clone()
609            .finish(settings::Flags::new(self.flags.clone()))
610    }
611
612    pub(crate) fn validator(&self) -> Validator {
613        let mut ret = Validator::new();
614        ret.wasm_threads(self.wasm_threads)
615            .wasm_bulk_memory(self.wasm_bulk_memory)
616            .wasm_multi_value(self.wasm_multi_value)
617            .wasm_reference_types(self.wasm_reference_types)
618            .wasm_simd(self.wasm_simd);
619        return ret;
620    }
621
622    fn build_compiler(&self) -> Compiler {
623        let isa = self.target_isa();
624        Compiler::new(
625            isa,
626            self.strategy,
627            self.cache_config.clone(),
628            self.tunables.clone(),
629        )
630    }
631}
632
633fn round_up_to_pages(val: u64) -> u64 {
634    let page_size = region::page::size() as u64;
635    debug_assert!(page_size.is_power_of_two());
636    val.checked_add(page_size - 1)
637        .map(|val| val & !(page_size - 1))
638        .unwrap_or(u64::max_value() / page_size + 1)
639}
640
641impl Default for Config {
642    fn default() -> Config {
643        Config::new()
644    }
645}
646
647impl fmt::Debug for Config {
648    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
649        f.debug_struct("Config")
650            .field("debug_info", &self.tunables.debug_info)
651            .field("strategy", &self.strategy)
652            .field("wasm_threads", &self.wasm_threads)
653            .field("wasm_reference_types", &self.wasm_reference_types)
654            .field("wasm_bulk_memory", &self.wasm_bulk_memory)
655            .field("wasm_simd", &self.wasm_simd)
656            .field("wasm_multi_value", &self.wasm_multi_value)
657            .field(
658                "flags",
659                &settings::Flags::new(self.flags.clone()).to_string(),
660            )
661            .finish()
662    }
663}
664
665/// Possible Compilation strategies for a wasm module.
666///
667/// This is used as an argument to the [`Config::strategy`] method.
668#[non_exhaustive]
669#[derive(Clone, Debug)]
670pub enum Strategy {
671    /// An indicator that the compilation strategy should be automatically
672    /// selected.
673    ///
674    /// This is generally what you want for most projects and indicates that the
675    /// `wasmtime` crate itself should make the decision about what the best
676    /// code generator for a wasm module is.
677    ///
678    /// Currently this always defaults to Cranelift, but the default value will
679    /// change over time.
680    Auto,
681
682    /// Currently the default backend, Cranelift aims to be a reasonably fast
683    /// code generator which generates high quality machine code.
684    Cranelift,
685
686    /// A single-pass code generator that is faster than Cranelift but doesn't
687    /// produce as high-quality code.
688    ///
689    /// To successfully pass this argument to [`Config::strategy`] the
690    /// `lightbeam` feature of this crate must be enabled.
691    Lightbeam,
692}
693
694/// Possible optimization levels for the Cranelift codegen backend.
695#[non_exhaustive]
696#[derive(Clone, Debug)]
697pub enum OptLevel {
698    /// No optimizations performed, minimizes compilation time by disabling most
699    /// optimizations.
700    None,
701    /// Generates the fastest possible code, but may take longer.
702    Speed,
703    /// Similar to `speed`, but also performs transformations aimed at reducing
704    /// code size.
705    SpeedAndSize,
706}
707
708/// Select which profiling technique to support.
709#[derive(Debug, Clone, Copy)]
710pub enum ProfilingStrategy {
711    /// No profiler support.
712    None,
713
714    /// Collect profiling info for "jitdump" file format, used with `perf` on
715    /// Linux.
716    JitDump,
717
718    /// Collect profiling info using the "ittapi", used with `VTune` on Linux.
719    VTune,
720}
721
722// Engine
723
724/// An `Engine` which is a global context for compilation and management of wasm
725/// modules.
726///
727/// An engine can be safely shared across threads and is a cheap cloneable
728/// handle to the actual engine. The engine itself will be deallocate once all
729/// references to it have gone away.
730///
731/// Engines store global configuration preferences such as compilation settings,
732/// enabled features, etc. You'll likely only need at most one of these for a
733/// program.
734///
735/// ## Engines and `Clone`
736///
737/// Using `clone` on an `Engine` is a cheap operation. It will not create an
738/// entirely new engine, but rather just a new reference to the existing engine.
739/// In other words it's a shallow copy, not a deep copy.
740///
741/// ## Engines and `Default`
742///
743/// You can create an engine with default configuration settings using
744/// `Engine::default()`. Be sure to consult the documentation of [`Config`] for
745/// default settings.
746#[derive(Clone)]
747pub struct Engine {
748    inner: Arc<EngineInner>,
749}
750
751struct EngineInner {
752    config: Config,
753    compiler: Compiler,
754}
755
756impl Engine {
757    /// Creates a new [`Engine`] with the specified compilation and
758    /// configuration settings.
759    pub fn new(config: &Config) -> Engine {
760        debug_builtins::ensure_exported();
761        Engine {
762            inner: Arc::new(EngineInner {
763                config: config.clone(),
764                compiler: config.build_compiler(),
765            }),
766        }
767    }
768
769    /// Returns the configuration settings that this engine is using.
770    pub fn config(&self) -> &Config {
771        &self.inner.config
772    }
773
774    pub(crate) fn compiler(&self) -> &Compiler {
775        &self.inner.compiler
776    }
777
778    /// Returns whether the engine `a` and `b` refer to the same configuration.
779    pub fn same(a: &Engine, b: &Engine) -> bool {
780        Arc::ptr_eq(&a.inner, &b.inner)
781    }
782}
783
784impl Default for Engine {
785    fn default() -> Engine {
786        Engine::new(&Config::default())
787    }
788}
789
790// Store
791
792/// A `Store` is a collection of WebAssembly instances and host-defined items.
793///
794/// All WebAssembly instances and items will be attached to and refer to a
795/// `Store`. For example instances, functions, globals, and tables are all
796/// attached to a `Store`. Instances are created by instantiating a [`Module`]
797/// within a `Store`.
798///
799/// `Store` is not thread-safe and cannot be sent to other threads. All items
800/// which refer to a `Store` additionally are not threadsafe and can only be
801/// used on the original thread that they were created on.
802///
803/// A `Store` is not intended to be a long-lived object in a program. No form of
804/// GC is implemented at this time so once an instance is created within a
805/// `Store` it will not be deallocated until all references to the `Store` have
806/// gone away (this includes all references to items in the store). This makes
807/// `Store` unsuitable for creating an unbounded number of instances in it
808/// because `Store` will never release this memory. It's instead recommended to
809/// have a long-lived [`Engine`] and instead create a `Store` for a more scoped
810/// portion of your application.
811///
812/// # Stores and `Clone`
813///
814/// Using `clone` on a `Store` is a cheap operation. It will not create an
815/// entirely new store, but rather just a new reference to the existing object.
816/// In other words it's a shallow copy, not a deep copy.
817///
818/// ## Stores and `Default`
819///
820/// You can create a store with default configuration settings using
821/// `Store::default()`. This will create a brand new [`Engine`] with default
822/// ocnfiguration (see [`Config`] for more information).
823#[derive(Clone)]
824pub struct Store {
825    inner: Rc<StoreInner>,
826}
827
828pub(crate) struct StoreInner {
829    engine: Engine,
830    interrupts: Arc<VMInterrupts>,
831    signatures: RefCell<SignatureRegistry>,
832    instances: RefCell<Vec<InstanceHandle>>,
833    signal_handler: RefCell<Option<Box<SignalHandler<'static>>>>,
834    jit_code_ranges: RefCell<Vec<(usize, usize)>>,
835    externref_activations_table: VMExternRefActivationsTable,
836    stack_map_registry: StackMapRegistry,
837}
838
839struct HostInfoKey(VMExternRef);
840
841impl PartialEq for HostInfoKey {
842    fn eq(&self, rhs: &Self) -> bool {
843        VMExternRef::eq(&self.0, &rhs.0)
844    }
845}
846
847impl Eq for HostInfoKey {}
848
849impl Hash for HostInfoKey {
850    fn hash<H>(&self, hasher: &mut H)
851    where
852        H: Hasher,
853    {
854        VMExternRef::hash(&self.0, hasher);
855    }
856}
857
858impl Store {
859    /// Creates a new store to be associated with the given [`Engine`].
860    pub fn new(engine: &Engine) -> Store {
861        // Ensure that wasmtime_runtime's signal handlers are configured. Note
862        // that at the `Store` level it means we should perform this
863        // once-per-thread. Platforms like Unix, however, only require this
864        // once-per-program. In any case this is safe to call many times and
865        // each one that's not relevant just won't do anything.
866        wasmtime_runtime::init_traps();
867
868        Store {
869            inner: Rc::new(StoreInner {
870                engine: engine.clone(),
871                interrupts: Arc::new(Default::default()),
872                signatures: RefCell::new(Default::default()),
873                instances: RefCell::new(Vec::new()),
874                signal_handler: RefCell::new(None),
875                jit_code_ranges: RefCell::new(Vec::new()),
876                externref_activations_table: VMExternRefActivationsTable::new(),
877                stack_map_registry: StackMapRegistry::default(),
878            }),
879        }
880    }
881
882    pub(crate) fn from_inner(inner: Rc<StoreInner>) -> Store {
883        Store { inner }
884    }
885
886    /// Returns the [`Engine`] that this store is associated with.
887    pub fn engine(&self) -> &Engine {
888        &self.inner.engine
889    }
890
891    /// Returns an optional reference to a ['RuntimeMemoryCreator']
892    pub(crate) fn memory_creator(&self) -> Option<&dyn RuntimeMemoryCreator> {
893        self.engine()
894            .config()
895            .memory_creator
896            .as_ref()
897            .map(|x| x as _)
898    }
899
900    pub(crate) fn lookup_signature(&self, sig_index: VMSharedSignatureIndex) -> wasm::WasmFuncType {
901        self.inner
902            .signatures
903            .borrow()
904            .lookup_wasm(sig_index)
905            .expect("failed to lookup signature")
906    }
907
908    pub(crate) fn lookup_wasm_and_native_signatures(
909        &self,
910        sig_index: VMSharedSignatureIndex,
911    ) -> (wasm::WasmFuncType, ir::Signature) {
912        self.inner
913            .signatures
914            .borrow()
915            .lookup_wasm_and_native_signatures(sig_index)
916            .expect("failed to lookup signature")
917    }
918
919    pub(crate) fn register_signature(
920        &self,
921        wasm_sig: wasm::WasmFuncType,
922        native: ir::Signature,
923    ) -> VMSharedSignatureIndex {
924        self.inner
925            .signatures
926            .borrow_mut()
927            .register(wasm_sig, native)
928    }
929
930    pub(crate) fn signatures_mut(&self) -> std::cell::RefMut<'_, SignatureRegistry> {
931        self.inner.signatures.borrow_mut()
932    }
933
934    /// Returns whether or not the given address falls within the JIT code
935    /// managed by the compiler
936    pub(crate) fn is_in_jit_code(&self, addr: usize) -> bool {
937        self.inner
938            .jit_code_ranges
939            .borrow()
940            .iter()
941            .any(|(start, end)| *start <= addr && addr < *end)
942    }
943
944    pub(crate) fn register_jit_code(&self, mut ranges: impl Iterator<Item = (usize, usize)>) {
945        // Checking of we already registered JIT code ranges by searching
946        // first range start.
947        match ranges.next() {
948            None => (),
949            Some(first) => {
950                if !self.is_in_jit_code(first.0) {
951                    // The range is not registered -- add all ranges (including
952                    // first one) to the jit_code_ranges.
953                    let mut jit_code_ranges = self.inner.jit_code_ranges.borrow_mut();
954                    jit_code_ranges.push(first);
955                    jit_code_ranges.extend(ranges);
956                }
957            }
958        }
959    }
960
961    pub(crate) fn register_stack_maps(&self, module: &Module) {
962        let module = &module.compiled_module();
963        self.stack_map_registry().register_stack_maps(
964            module
965                .finished_functions()
966                .values()
967                .zip(module.stack_maps().values())
968                .map(|(func, stack_maps)| unsafe {
969                    let ptr = (**func).as_ptr();
970                    let len = (**func).len();
971                    let start = ptr as usize;
972                    let end = ptr as usize + len;
973                    let range = start..end;
974                    (range, &stack_maps[..])
975                }),
976        );
977    }
978
979    pub(crate) unsafe fn add_instance(&self, handle: InstanceHandle) -> StoreInstanceHandle {
980        self.inner.instances.borrow_mut().push(handle.clone());
981        StoreInstanceHandle {
982            store: self.clone(),
983            handle,
984        }
985    }
986
987    pub(crate) fn existing_instance_handle(&self, handle: InstanceHandle) -> StoreInstanceHandle {
988        debug_assert!(self
989            .inner
990            .instances
991            .borrow()
992            .iter()
993            .any(|i| i.vmctx_ptr() == handle.vmctx_ptr()));
994        StoreInstanceHandle {
995            store: self.clone(),
996            handle,
997        }
998    }
999
1000    pub(crate) fn weak(&self) -> Weak<StoreInner> {
1001        Rc::downgrade(&self.inner)
1002    }
1003
1004    pub(crate) fn upgrade(weak: &Weak<StoreInner>) -> Option<Self> {
1005        let inner = weak.upgrade()?;
1006        Some(Self { inner })
1007    }
1008
1009    pub(crate) fn signal_handler(&self) -> std::cell::Ref<'_, Option<Box<SignalHandler<'static>>>> {
1010        self.inner.signal_handler.borrow()
1011    }
1012
1013    pub(crate) fn signal_handler_mut(
1014        &self,
1015    ) -> std::cell::RefMut<'_, Option<Box<SignalHandler<'static>>>> {
1016        self.inner.signal_handler.borrow_mut()
1017    }
1018
1019    pub(crate) fn interrupts(&self) -> &Arc<VMInterrupts> {
1020        &self.inner.interrupts
1021    }
1022
1023    /// Returns whether the stores `a` and `b` refer to the same underlying
1024    /// `Store`.
1025    ///
1026    /// Because the `Store` type is reference counted multiple clones may point
1027    /// to the same underlying storage, and this method can be used to determine
1028    /// whether two stores are indeed the same.
1029    pub fn same(a: &Store, b: &Store) -> bool {
1030        Rc::ptr_eq(&a.inner, &b.inner)
1031    }
1032
1033    /// Creates an [`InterruptHandle`] which can be used to interrupt the
1034    /// execution of instances within this `Store`.
1035    ///
1036    /// An [`InterruptHandle`] handle is a mechanism of ensuring that guest code
1037    /// doesn't execute for too long. For example it's used to prevent wasm
1038    /// programs for executing infinitely in infinite loops or recursive call
1039    /// chains.
1040    ///
1041    /// The [`InterruptHandle`] type is sendable to other threads so you can
1042    /// interact with it even while the thread with this `Store` is executing
1043    /// wasm code.
1044    ///
1045    /// There's one method on an interrupt handle:
1046    /// [`InterruptHandle::interrupt`]. This method is used to generate an
1047    /// interrupt and cause wasm code to exit "soon".
1048    ///
1049    /// ## When are interrupts delivered?
1050    ///
1051    /// The term "interrupt" here refers to one of two different behaviors that
1052    /// are interrupted in wasm:
1053    ///
1054    /// * The head of every loop in wasm has a check to see if it's interrupted.
1055    /// * The prologue of every function has a check to see if it's interrupted.
1056    ///
1057    /// This interrupt mechanism makes no attempt to signal interrupts to
1058    /// native code. For example if a host function is blocked, then sending
1059    /// an interrupt will not interrupt that operation.
1060    ///
1061    /// Interrupts are consumed as soon as possible when wasm itself starts
1062    /// executing. This means that if you interrupt wasm code then it basically
1063    /// guarantees that the next time wasm is executing on the target thread it
1064    /// will return quickly (either normally if it were already in the process
1065    /// of returning or with a trap from the interrupt). Once an interrupt
1066    /// trap is generated then an interrupt is consumed, and further execution
1067    /// will not be interrupted (unless another interrupt is set).
1068    ///
1069    /// When implementing interrupts you'll want to ensure that the delivery of
1070    /// interrupts into wasm code is also handled in your host imports and
1071    /// functionality. Host functions need to either execute for bounded amounts
1072    /// of time or you'll need to arrange for them to be interrupted as well.
1073    ///
1074    /// ## Return Value
1075    ///
1076    /// This function returns a `Result` since interrupts are not always
1077    /// enabled. Interrupts are enabled via the [`Config::interruptable`]
1078    /// method, and if this store's [`Config`] hasn't been configured to enable
1079    /// interrupts then an error is returned.
1080    ///
1081    /// ## Examples
1082    ///
1083    /// ```
1084    /// # use anyhow::Result;
1085    /// # use wasmtime::*;
1086    /// # fn main() -> Result<()> {
1087    /// // Enable interruptable code via `Config` and then create an interrupt
1088    /// // handle which we'll use later to interrupt running code.
1089    /// let engine = Engine::new(Config::new().interruptable(true));
1090    /// let store = Store::new(&engine);
1091    /// let interrupt_handle = store.interrupt_handle()?;
1092    ///
1093    /// // Compile and instantiate a small example with an infinite loop.
1094    /// let module = Module::new(&engine, r#"
1095    ///     (func (export "run") (loop br 0))
1096    /// "#)?;
1097    /// let instance = Instance::new(&store, &module, &[])?;
1098    /// let run = instance
1099    ///     .get_func("run")
1100    ///     .ok_or(anyhow::format_err!("failed to find `run` function export"))?
1101    ///     .get0::<()>()?;
1102    ///
1103    /// // Spin up a thread to send us an interrupt in a second
1104    /// std::thread::spawn(move || {
1105    ///     std::thread::sleep(std::time::Duration::from_secs(1));
1106    ///     interrupt_handle.interrupt();
1107    /// });
1108    ///
1109    /// let trap = run().unwrap_err();
1110    /// assert!(trap.to_string().contains("wasm trap: interrupt"));
1111    /// # Ok(())
1112    /// # }
1113    /// ```
1114    pub fn interrupt_handle(&self) -> Result<InterruptHandle> {
1115        if self.engine().config().tunables.interruptable {
1116            Ok(InterruptHandle {
1117                interrupts: self.interrupts().clone(),
1118            })
1119        } else {
1120            bail!("interrupts aren't enabled for this `Store`")
1121        }
1122    }
1123
1124    pub(crate) fn externref_activations_table(&self) -> &VMExternRefActivationsTable {
1125        &self.inner.externref_activations_table
1126    }
1127
1128    pub(crate) fn stack_map_registry(&self) -> &StackMapRegistry {
1129        &self.inner.stack_map_registry
1130    }
1131
1132    /// Perform garbage collection of `ExternRef`s.
1133    pub fn gc(&self) {
1134        // For this crate's API, we ensure that `set_stack_canary` invariants
1135        // are upheld for all host-->Wasm calls, and we register every module
1136        // used with this store in `self.inner.stack_map_registry`.
1137        unsafe {
1138            wasmtime_runtime::gc(
1139                &self.inner.stack_map_registry,
1140                &self.inner.externref_activations_table,
1141            );
1142        }
1143    }
1144}
1145
1146impl Default for Store {
1147    fn default() -> Store {
1148        Store::new(&Engine::default())
1149    }
1150}
1151
1152impl fmt::Debug for Store {
1153    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1154        let inner = &*self.inner as *const StoreInner;
1155        f.debug_struct("Store").field("inner", &inner).finish()
1156    }
1157}
1158
1159impl Drop for StoreInner {
1160    fn drop(&mut self) {
1161        for instance in self.instances.get_mut().iter() {
1162            unsafe {
1163                instance.dealloc();
1164            }
1165        }
1166    }
1167}
1168
1169/// A threadsafe handle used to interrupt instances executing within a
1170/// particular `Store`.
1171///
1172/// This structure is created by the [`Store::interrupt_handle`] method.
1173pub struct InterruptHandle {
1174    interrupts: Arc<VMInterrupts>,
1175}
1176
1177impl InterruptHandle {
1178    /// Flags that execution within this handle's original [`Store`] should be
1179    /// interrupted.
1180    ///
1181    /// This will not immediately interrupt execution of wasm modules, but
1182    /// rather it will interrupt wasm execution of loop headers and wasm
1183    /// execution of function entries. For more information see
1184    /// [`Store::interrupt_handle`].
1185    pub fn interrupt(&self) {
1186        self.interrupts.interrupt()
1187    }
1188}
1189
1190fn _assert_send_sync() {
1191    fn _assert<T: Send + Sync>() {}
1192    _assert::<Engine>();
1193    _assert::<Config>();
1194    _assert::<InterruptHandle>();
1195}
1196
1197#[cfg(test)]
1198mod tests {
1199    use super::*;
1200    use crate::Module;
1201    use tempfile::TempDir;
1202
1203    #[test]
1204    fn cache_accounts_for_opt_level() -> Result<()> {
1205        let td = TempDir::new()?;
1206        let config_path = td.path().join("config.toml");
1207        std::fs::write(
1208            &config_path,
1209            &format!(
1210                "
1211                    [cache]
1212                    enabled = true
1213                    directory = '{}'
1214                ",
1215                td.path().join("cache").display()
1216            ),
1217        )?;
1218        let mut cfg = Config::new();
1219        cfg.cranelift_opt_level(OptLevel::None)
1220            .cache_config_load(&config_path)?;
1221        let engine = Engine::new(&cfg);
1222        Module::new(&engine, "(module (func))")?;
1223        assert_eq!(engine.config().cache_config.cache_hits(), 0);
1224        assert_eq!(engine.config().cache_config.cache_misses(), 1);
1225        Module::new(&engine, "(module (func))")?;
1226        assert_eq!(engine.config().cache_config.cache_hits(), 1);
1227        assert_eq!(engine.config().cache_config.cache_misses(), 1);
1228
1229        let mut cfg = Config::new();
1230        cfg.cranelift_opt_level(OptLevel::Speed)
1231            .cache_config_load(&config_path)?;
1232        let engine = Engine::new(&cfg);
1233        Module::new(&engine, "(module (func))")?;
1234        assert_eq!(engine.config().cache_config.cache_hits(), 0);
1235        assert_eq!(engine.config().cache_config.cache_misses(), 1);
1236        Module::new(&engine, "(module (func))")?;
1237        assert_eq!(engine.config().cache_config.cache_hits(), 1);
1238        assert_eq!(engine.config().cache_config.cache_misses(), 1);
1239
1240        let mut cfg = Config::new();
1241        cfg.cranelift_opt_level(OptLevel::SpeedAndSize)
1242            .cache_config_load(&config_path)?;
1243        let engine = Engine::new(&cfg);
1244        Module::new(&engine, "(module (func))")?;
1245        assert_eq!(engine.config().cache_config.cache_hits(), 0);
1246        assert_eq!(engine.config().cache_config.cache_misses(), 1);
1247        Module::new(&engine, "(module (func))")?;
1248        assert_eq!(engine.config().cache_config.cache_hits(), 1);
1249        assert_eq!(engine.config().cache_config.cache_misses(), 1);
1250
1251        // FIXME(#1523) need debuginfo on aarch64 before we run this test there
1252        if !cfg!(target_arch = "aarch64") {
1253            let mut cfg = Config::new();
1254            cfg.debug_info(true).cache_config_load(&config_path)?;
1255            let engine = Engine::new(&cfg);
1256            Module::new(&engine, "(module (func))")?;
1257            assert_eq!(engine.config().cache_config.cache_hits(), 0);
1258            assert_eq!(engine.config().cache_config.cache_misses(), 1);
1259            Module::new(&engine, "(module (func))")?;
1260            assert_eq!(engine.config().cache_config.cache_hits(), 1);
1261            assert_eq!(engine.config().cache_config.cache_misses(), 1);
1262        }
1263
1264        Ok(())
1265    }
1266}