wasmtime_runtime/instance/allocator.rs
1use crate::imports::Imports;
2use crate::instance::{Instance, InstanceHandle};
3use crate::memory::Memory;
4use crate::mpk::ProtectionKey;
5use crate::table::{Table, TableElementType};
6use crate::{CompiledModuleId, ModuleRuntimeInfo, Store, VMGcRef, I31};
7use anyhow::{anyhow, bail, Result};
8use std::{alloc, any::Any, mem, ptr, sync::Arc};
9use wasmtime_environ::{
10 DefinedMemoryIndex, DefinedTableIndex, HostPtr, InitMemory, MemoryInitialization,
11 MemoryInitializer, MemoryPlan, Module, PrimaryMap, TableInitialValue, TablePlan, TableSegment,
12 Trap, VMOffsets, WasmValType, WASM_PAGE_SIZE,
13};
14
15#[cfg(feature = "gc")]
16use crate::{GcHeap, GcRuntime};
17
18#[cfg(feature = "component-model")]
19use wasmtime_environ::{
20 component::{Component, VMComponentOffsets},
21 StaticModuleIndex,
22};
23
24mod on_demand;
25pub use self::on_demand::OnDemandInstanceAllocator;
26
27#[cfg(feature = "pooling-allocator")]
28mod pooling;
29#[cfg(feature = "pooling-allocator")]
30pub use self::pooling::{InstanceLimits, PoolingInstanceAllocator, PoolingInstanceAllocatorConfig};
31
32/// Represents a request for a new runtime instance.
33pub struct InstanceAllocationRequest<'a> {
34 /// The info related to the compiled version of this module,
35 /// needed for instantiation: function metadata, JIT code
36 /// addresses, precomputed images for lazy memory and table
37 /// initialization, and the like. This Arc is cloned and held for
38 /// the lifetime of the instance.
39 pub runtime_info: &'a Arc<dyn ModuleRuntimeInfo>,
40
41 /// The imports to use for the instantiation.
42 pub imports: Imports<'a>,
43
44 /// The host state to associate with the instance.
45 pub host_state: Box<dyn Any + Send + Sync>,
46
47 /// A pointer to the "store" for this instance to be allocated. The store
48 /// correlates with the `Store` in wasmtime itself, and lots of contextual
49 /// information about the execution of wasm can be learned through the
50 /// store.
51 ///
52 /// Note that this is a raw pointer and has a static lifetime, both of which
53 /// are a bit of a lie. This is done purely so a store can learn about
54 /// itself when it gets called as a host function, and additionally so this
55 /// runtime can access internals as necessary (such as the
56 /// VMExternRefActivationsTable or the resource limiter methods).
57 ///
58 /// Note that this ends up being a self-pointer to the instance when stored.
59 /// The reason is that the instance itself is then stored within the store.
60 /// We use a number of `PhantomPinned` declarations to indicate this to the
61 /// compiler. More info on this in `wasmtime/src/store.rs`
62 pub store: StorePtr,
63
64 /// Indicates '--wmemcheck' flag.
65 pub wmemcheck: bool,
66
67 /// Request that the instance's memories be protected by a specific
68 /// protection key.
69 pub pkey: Option<ProtectionKey>,
70}
71
72/// A pointer to a Store. This Option<*mut dyn Store> is wrapped in a struct
73/// so that the function to create a &mut dyn Store is a method on a member of
74/// InstanceAllocationRequest, rather than on a &mut InstanceAllocationRequest
75/// itself, because several use-sites require a split mut borrow on the
76/// InstanceAllocationRequest.
77pub struct StorePtr(Option<*mut dyn Store>);
78
79impl StorePtr {
80 /// A pointer to no Store.
81 pub fn empty() -> Self {
82 Self(None)
83 }
84
85 /// A pointer to a Store.
86 pub fn new(ptr: *mut dyn Store) -> Self {
87 Self(Some(ptr))
88 }
89
90 /// The raw contents of this struct
91 pub fn as_raw(&self) -> Option<*mut dyn Store> {
92 self.0.clone()
93 }
94
95 /// Use the StorePtr as a mut ref to the Store.
96 ///
97 /// Safety: must not be used outside the original lifetime of the borrow.
98 pub(crate) unsafe fn get(&mut self) -> Option<&mut dyn Store> {
99 match self.0 {
100 Some(ptr) => Some(&mut *ptr),
101 None => None,
102 }
103 }
104}
105
106/// The index of a memory allocation within an `InstanceAllocator`.
107#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord)]
108pub struct MemoryAllocationIndex(u32);
109
110impl Default for MemoryAllocationIndex {
111 fn default() -> Self {
112 // A default `MemoryAllocationIndex` that can be used with
113 // `InstanceAllocator`s that don't actually need indices.
114 MemoryAllocationIndex(u32::MAX)
115 }
116}
117
118impl MemoryAllocationIndex {
119 /// Get the underlying index of this `MemoryAllocationIndex`.
120 pub fn index(&self) -> usize {
121 self.0 as usize
122 }
123}
124
125/// The index of a table allocation within an `InstanceAllocator`.
126#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord)]
127pub struct TableAllocationIndex(u32);
128
129impl Default for TableAllocationIndex {
130 fn default() -> Self {
131 // A default `TableAllocationIndex` that can be used with
132 // `InstanceAllocator`s that don't actually need indices.
133 TableAllocationIndex(u32::MAX)
134 }
135}
136
137impl TableAllocationIndex {
138 /// Get the underlying index of this `TableAllocationIndex`.
139 pub fn index(&self) -> usize {
140 self.0 as usize
141 }
142}
143
144/// The index of a table allocation within an `InstanceAllocator`.
145#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord)]
146pub struct GcHeapAllocationIndex(u32);
147
148impl Default for GcHeapAllocationIndex {
149 fn default() -> Self {
150 // A default `GcHeapAllocationIndex` that can be used with
151 // `InstanceAllocator`s that don't actually need indices.
152 GcHeapAllocationIndex(u32::MAX)
153 }
154}
155
156impl GcHeapAllocationIndex {
157 /// Get the underlying index of this `GcHeapAllocationIndex`.
158 pub fn index(&self) -> usize {
159 self.0 as usize
160 }
161}
162
163/// Trait that represents the hooks needed to implement an instance allocator.
164///
165/// Implement this trait when implementing new instance allocators, but don't
166/// use this trait when you need an instance allocator. Instead use the
167/// `InstanceAllocator` trait for that, which has additional helper methods and
168/// a blanket implementation for all types that implement this trait.
169///
170/// # Safety
171///
172/// This trait is unsafe as it requires knowledge of Wasmtime's runtime
173/// internals to implement correctly.
174pub unsafe trait InstanceAllocatorImpl {
175 /// Validate whether a component (including all of its contained core
176 /// modules) is allocatable by this instance allocator.
177 #[cfg(feature = "component-model")]
178 fn validate_component_impl<'a>(
179 &self,
180 component: &Component,
181 offsets: &VMComponentOffsets<HostPtr>,
182 get_module: &'a dyn Fn(StaticModuleIndex) -> &'a Module,
183 ) -> Result<()>;
184
185 /// Validate whether a module is allocatable by this instance allocator.
186 fn validate_module_impl(&self, module: &Module, offsets: &VMOffsets<HostPtr>) -> Result<()>;
187
188 /// Increment the count of concurrent component instances that are currently
189 /// allocated, if applicable.
190 ///
191 /// Not all instance allocators will have limits for the maximum number of
192 /// concurrent component instances that can be live at the same time, and
193 /// these allocators may implement this method with a no-op.
194 //
195 // Note: It would be nice to have an associated type that on construction
196 // does the increment and on drop does the decrement but there are two
197 // problems with this:
198 //
199 // 1. This trait's implementations are always used as trait objects, and
200 // associated types are not object safe.
201 //
202 // 2. We would want a parameterized `Drop` implementation so that we could
203 // pass in the `InstanceAllocatorImpl` on drop, but this doesn't exist in
204 // Rust. Therefore, we would be forced to add reference counting and
205 // stuff like that to keep a handle on the instance allocator from this
206 // theoretical type. That's a bummer.
207 fn increment_component_instance_count(&self) -> Result<()>;
208
209 /// The dual of `increment_component_instance_count`.
210 fn decrement_component_instance_count(&self);
211
212 /// Increment the count of concurrent core module instances that are
213 /// currently allocated, if applicable.
214 ///
215 /// Not all instance allocators will have limits for the maximum number of
216 /// concurrent core module instances that can be live at the same time, and
217 /// these allocators may implement this method with a no-op.
218 fn increment_core_instance_count(&self) -> Result<()>;
219
220 /// The dual of `increment_core_instance_count`.
221 fn decrement_core_instance_count(&self);
222
223 /// Allocate a memory for an instance.
224 ///
225 /// # Unsafety
226 ///
227 /// The memory and its associated module must have already been validated by
228 /// `Self::validate_module` and passed that validation.
229 unsafe fn allocate_memory(
230 &self,
231 request: &mut InstanceAllocationRequest,
232 memory_plan: &MemoryPlan,
233 memory_index: DefinedMemoryIndex,
234 ) -> Result<(MemoryAllocationIndex, Memory)>;
235
236 /// Deallocate an instance's previously allocated memory.
237 ///
238 /// # Unsafety
239 ///
240 /// The memory must have previously been allocated by
241 /// `Self::allocate_memory`, be at the given index, and must currently be
242 /// allocated. It must never be used again.
243 unsafe fn deallocate_memory(
244 &self,
245 memory_index: DefinedMemoryIndex,
246 allocation_index: MemoryAllocationIndex,
247 memory: Memory,
248 );
249
250 /// Allocate a table for an instance.
251 ///
252 /// # Unsafety
253 ///
254 /// The table and its associated module must have already been validated by
255 /// `Self::validate_module` and passed that validation.
256 unsafe fn allocate_table(
257 &self,
258 req: &mut InstanceAllocationRequest,
259 table_plan: &TablePlan,
260 table_index: DefinedTableIndex,
261 ) -> Result<(TableAllocationIndex, Table)>;
262
263 /// Deallocate an instance's previously allocated table.
264 ///
265 /// # Unsafety
266 ///
267 /// The table must have previously been allocated by `Self::allocate_table`,
268 /// be at the given index, and must currently be allocated. It must never be
269 /// used again.
270 unsafe fn deallocate_table(
271 &self,
272 table_index: DefinedTableIndex,
273 allocation_index: TableAllocationIndex,
274 table: Table,
275 );
276
277 /// Allocates a fiber stack for calling async functions on.
278 #[cfg(feature = "async")]
279 fn allocate_fiber_stack(&self) -> Result<wasmtime_fiber::FiberStack>;
280
281 /// Deallocates a fiber stack that was previously allocated with
282 /// `allocate_fiber_stack`.
283 ///
284 /// # Safety
285 ///
286 /// The provided stack is required to have been allocated with
287 /// `allocate_fiber_stack`.
288 #[cfg(feature = "async")]
289 unsafe fn deallocate_fiber_stack(&self, stack: &wasmtime_fiber::FiberStack);
290
291 /// Allocate a GC heap for allocating Wasm GC objects within.
292 #[cfg(feature = "gc")]
293 fn allocate_gc_heap(
294 &self,
295 gc_runtime: &dyn GcRuntime,
296 ) -> Result<(GcHeapAllocationIndex, Box<dyn GcHeap>)>;
297
298 /// Deallocate a GC heap that was previously allocated with
299 /// `allocate_gc_heap`.
300 #[cfg(feature = "gc")]
301 fn deallocate_gc_heap(&self, allocation_index: GcHeapAllocationIndex, gc_heap: Box<dyn GcHeap>);
302
303 /// Purges all lingering resources related to `module` from within this
304 /// allocator.
305 ///
306 /// Primarily present for the pooling allocator to remove mappings of
307 /// this module from slots in linear memory.
308 fn purge_module(&self, module: CompiledModuleId);
309
310 /// Use the next available protection key.
311 ///
312 /// The pooling allocator can use memory protection keys (MPK) for
313 /// compressing the guard regions protecting against OOB. Each
314 /// pool-allocated store needs its own key.
315 fn next_available_pkey(&self) -> Option<ProtectionKey>;
316
317 /// Restrict access to memory regions protected by `pkey`.
318 ///
319 /// This is useful for the pooling allocator, which can use memory
320 /// protection keys (MPK). Note: this may still allow access to other
321 /// protection keys, such as the default kernel key; see implementations of
322 /// this.
323 fn restrict_to_pkey(&self, pkey: ProtectionKey);
324
325 /// Allow access to memory regions protected by any protection key.
326 fn allow_all_pkeys(&self);
327}
328
329/// A thing that can allocate instances.
330///
331/// Don't implement this trait directly, instead implement
332/// `InstanceAllocatorImpl` and you'll get this trait for free via a blanket
333/// impl.
334pub trait InstanceAllocator: InstanceAllocatorImpl {
335 /// Validate whether a component (including all of its contained core
336 /// modules) is allocatable with this instance allocator.
337 #[cfg(feature = "component-model")]
338 fn validate_component<'a>(
339 &self,
340 component: &Component,
341 offsets: &VMComponentOffsets<HostPtr>,
342 get_module: &'a dyn Fn(StaticModuleIndex) -> &'a Module,
343 ) -> Result<()> {
344 InstanceAllocatorImpl::validate_component_impl(self, component, offsets, get_module)
345 }
346
347 /// Validate whether a core module is allocatable with this instance
348 /// allocator.
349 fn validate_module(&self, module: &Module, offsets: &VMOffsets<HostPtr>) -> Result<()> {
350 InstanceAllocatorImpl::validate_module_impl(self, module, offsets)
351 }
352
353 /// Allocates a fresh `InstanceHandle` for the `req` given.
354 ///
355 /// This will allocate memories and tables internally from this allocator
356 /// and weave that altogether into a final and complete `InstanceHandle`
357 /// ready to be registered with a store.
358 ///
359 /// Note that the returned instance must still have `.initialize(..)` called
360 /// on it to complete the instantiation process.
361 ///
362 /// # Unsafety
363 ///
364 /// The request's associated module, memories, tables, and vmctx must have
365 /// already have been validated by `Self::validate_module`.
366 unsafe fn allocate_module(
367 &self,
368 mut request: InstanceAllocationRequest,
369 ) -> Result<InstanceHandle> {
370 let module = request.runtime_info.module();
371
372 #[cfg(debug_assertions)]
373 InstanceAllocatorImpl::validate_module_impl(self, module, request.runtime_info.offsets())
374 .expect("module should have already been validated before allocation");
375
376 self.increment_core_instance_count()?;
377
378 let num_defined_memories = module.memory_plans.len() - module.num_imported_memories;
379 let mut memories = PrimaryMap::with_capacity(num_defined_memories);
380
381 let num_defined_tables = module.table_plans.len() - module.num_imported_tables;
382 let mut tables = PrimaryMap::with_capacity(num_defined_tables);
383
384 match (|| {
385 self.allocate_memories(&mut request, &mut memories)?;
386 self.allocate_tables(&mut request, &mut tables)?;
387 Ok(())
388 })() {
389 Ok(_) => Ok(Instance::new(
390 request,
391 memories,
392 tables,
393 &module.memory_plans,
394 )),
395 Err(e) => {
396 self.deallocate_memories(&mut memories);
397 self.deallocate_tables(&mut tables);
398 self.decrement_core_instance_count();
399 Err(e)
400 }
401 }
402 }
403
404 /// Deallocates the provided instance.
405 ///
406 /// This will null-out the pointer within `handle` and otherwise reclaim
407 /// resources such as tables, memories, and the instance memory itself.
408 ///
409 /// # Unsafety
410 ///
411 /// The instance must have previously been allocated by `Self::allocate`.
412 unsafe fn deallocate_module(&self, handle: &mut InstanceHandle) {
413 self.deallocate_memories(&mut handle.instance_mut().memories);
414 self.deallocate_tables(&mut handle.instance_mut().tables);
415
416 let layout = Instance::alloc_layout(handle.instance().offsets());
417 let ptr = handle.instance.take().unwrap();
418 ptr::drop_in_place(ptr.as_ptr());
419 alloc::dealloc(ptr.as_ptr().cast(), layout);
420
421 self.decrement_core_instance_count();
422 }
423
424 /// Allocate the memories for the given instance allocation request, pushing
425 /// them into `memories`.
426 ///
427 /// # Unsafety
428 ///
429 /// The request's associated module and memories must have previously been
430 /// validated by `Self::validate_module`.
431 unsafe fn allocate_memories(
432 &self,
433 request: &mut InstanceAllocationRequest,
434 memories: &mut PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)>,
435 ) -> Result<()> {
436 let module = request.runtime_info.module();
437
438 #[cfg(debug_assertions)]
439 InstanceAllocatorImpl::validate_module_impl(self, module, request.runtime_info.offsets())
440 .expect("module should have already been validated before allocation");
441
442 for (memory_index, memory_plan) in module
443 .memory_plans
444 .iter()
445 .skip(module.num_imported_memories)
446 {
447 let memory_index = module
448 .defined_memory_index(memory_index)
449 .expect("should be a defined memory since we skipped imported ones");
450
451 memories.push(self.allocate_memory(request, memory_plan, memory_index)?);
452 }
453
454 Ok(())
455 }
456
457 /// Deallocate all the memories in the given primary map.
458 ///
459 /// # Unsafety
460 ///
461 /// The memories must have previously been allocated by
462 /// `Self::allocate_memories`.
463 unsafe fn deallocate_memories(
464 &self,
465 memories: &mut PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)>,
466 ) {
467 for (memory_index, (allocation_index, memory)) in mem::take(memories) {
468 // Because deallocating memory is infallible, we don't need to worry
469 // about leaking subsequent memories if the first memory failed to
470 // deallocate. If deallocating memory ever becomes fallible, we will
471 // need to be careful here!
472 self.deallocate_memory(memory_index, allocation_index, memory);
473 }
474 }
475
476 /// Allocate tables for the given instance allocation request, pushing them
477 /// into `tables`.
478 ///
479 /// # Unsafety
480 ///
481 /// The request's associated module and tables must have previously been
482 /// validated by `Self::validate_module`.
483 unsafe fn allocate_tables(
484 &self,
485 request: &mut InstanceAllocationRequest,
486 tables: &mut PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)>,
487 ) -> Result<()> {
488 let module = request.runtime_info.module();
489
490 #[cfg(debug_assertions)]
491 InstanceAllocatorImpl::validate_module_impl(self, module, request.runtime_info.offsets())
492 .expect("module should have already been validated before allocation");
493
494 for (index, plan) in module.table_plans.iter().skip(module.num_imported_tables) {
495 let def_index = module
496 .defined_table_index(index)
497 .expect("should be a defined table since we skipped imported ones");
498
499 tables.push(self.allocate_table(request, plan, def_index)?);
500 }
501
502 Ok(())
503 }
504
505 /// Deallocate all the tables in the given primary map.
506 ///
507 /// # Unsafety
508 ///
509 /// The tables must have previously been allocated by
510 /// `Self::allocate_tables`.
511 unsafe fn deallocate_tables(
512 &self,
513 tables: &mut PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)>,
514 ) {
515 for (table_index, (allocation_index, table)) in mem::take(tables) {
516 self.deallocate_table(table_index, allocation_index, table);
517 }
518 }
519}
520
521// Every `InstanceAllocatorImpl` is an `InstanceAllocator` when used
522// correctly. Also, no one is allowed to override this trait's methods, they
523// must use the defaults. This blanket impl provides both of those things.
524impl<T: InstanceAllocatorImpl> InstanceAllocator for T {}
525
526fn get_table_init_start(init: &TableSegment, instance: &mut Instance) -> Result<u32> {
527 match init.base {
528 Some(base) => {
529 let val = unsafe { *(*instance.defined_or_imported_global_ptr(base)).as_u32() };
530
531 init.offset
532 .checked_add(val)
533 .ok_or_else(|| anyhow!("element segment global base overflows"))
534 }
535 None => Ok(init.offset),
536 }
537}
538
539fn check_table_init_bounds(instance: &mut Instance, module: &Module) -> Result<()> {
540 for segment in module.table_initialization.segments.iter() {
541 let table = unsafe { &*instance.get_table(segment.table_index) };
542 let start = get_table_init_start(segment, instance)?;
543 let start = usize::try_from(start).unwrap();
544 let end = start.checked_add(usize::try_from(segment.elements.len()).unwrap());
545
546 match end {
547 Some(end) if end <= table.size() as usize => {
548 // Initializer is in bounds
549 }
550 _ => {
551 bail!("table out of bounds: elements segment does not fit")
552 }
553 }
554 }
555
556 Ok(())
557}
558
559fn initialize_tables(instance: &mut Instance, module: &Module) -> Result<()> {
560 for (table, init) in module.table_initialization.initial_values.iter() {
561 match init {
562 // Tables are always initially null-initialized at this time
563 TableInitialValue::Null { precomputed: _ } => {}
564
565 TableInitialValue::FuncRef(idx) => {
566 let funcref = instance.get_func_ref(*idx).unwrap();
567 let table = unsafe { &mut *instance.get_defined_table(table) };
568 let init = (0..table.size()).map(|_| funcref);
569 table.init_func(0, init)?;
570 }
571
572 TableInitialValue::GlobalGet(idx) => unsafe {
573 let global = instance.defined_or_imported_global_ptr(*idx);
574 let table = &mut *instance.get_defined_table(table);
575 match table.element_type() {
576 TableElementType::Func => {
577 let funcref = (*global).as_func_ref();
578 let init = (0..table.size()).map(|_| funcref);
579 table.init_func(0, init)?;
580 }
581 TableElementType::GcRef => {
582 let gc_ref = (*global).as_gc_ref();
583 let gc_ref = gc_ref.map(|r| r.unchecked_copy());
584 let init = (0..table.size()).map(|_| {
585 gc_ref
586 .as_ref()
587 .map(|r| (*instance.store()).gc_store().clone_gc_ref(r))
588 });
589 table.init_gc_refs(0, init)?;
590 }
591 }
592 },
593
594 TableInitialValue::I31Ref(value) => {
595 let value = VMGcRef::from_i31(I31::wrapping_i32(*value));
596 let table = unsafe { &mut *instance.get_defined_table(table) };
597 let init = (0..table.size()).map(|_| {
598 // NB: Okay to use `unchecked_copy` because `i31` doesn't
599 // need GC barriers.
600 Some(value.unchecked_copy())
601 });
602 table.init_gc_refs(0, init)?;
603 }
604 }
605 }
606
607 // Note: if the module's table initializer state is in
608 // FuncTable mode, we will lazily initialize tables based on
609 // any statically-precomputed image of FuncIndexes, but there
610 // may still be "leftover segments" that could not be
611 // incorporated. So we have a unified handler here that
612 // iterates over all segments (Segments mode) or leftover
613 // segments (FuncTable mode) to initialize.
614 for segment in module.table_initialization.segments.iter() {
615 let start = get_table_init_start(segment, instance)?;
616 instance.table_init_segment(
617 segment.table_index,
618 &segment.elements,
619 start,
620 0,
621 segment.elements.len(),
622 )?;
623 }
624
625 Ok(())
626}
627
628fn get_memory_init_start(init: &MemoryInitializer, instance: &mut Instance) -> Result<u64> {
629 match init.base {
630 Some(base) => {
631 let mem64 = instance.module().memory_plans[init.memory_index]
632 .memory
633 .memory64;
634 let val = unsafe {
635 let global = instance.defined_or_imported_global_ptr(base);
636 if mem64 {
637 *(*global).as_u64()
638 } else {
639 u64::from(*(*global).as_u32())
640 }
641 };
642
643 init.offset
644 .checked_add(val)
645 .ok_or_else(|| anyhow!("data segment global base overflows"))
646 }
647 None => Ok(init.offset),
648 }
649}
650
651fn check_memory_init_bounds(
652 instance: &mut Instance,
653 initializers: &[MemoryInitializer],
654) -> Result<()> {
655 for init in initializers {
656 let memory = instance.get_memory(init.memory_index);
657 let start = get_memory_init_start(init, instance)?;
658 let end = usize::try_from(start)
659 .ok()
660 .and_then(|start| start.checked_add(init.data.len()));
661
662 match end {
663 Some(end) if end <= memory.current_length() => {
664 // Initializer is in bounds
665 }
666 _ => {
667 bail!("memory out of bounds: data segment does not fit")
668 }
669 }
670 }
671
672 Ok(())
673}
674
675fn initialize_memories(instance: &mut Instance, module: &Module) -> Result<()> {
676 let memory_size_in_pages = &|instance: &mut Instance, memory| {
677 (instance.get_memory(memory).current_length() as u64) / u64::from(WASM_PAGE_SIZE)
678 };
679
680 // Loads the `global` value and returns it as a `u64`, but sign-extends
681 // 32-bit globals which can be used as the base for 32-bit memories.
682 let get_global_as_u64 = &mut |instance: &mut Instance, global| unsafe {
683 let def = instance.defined_or_imported_global_ptr(global);
684 if module.globals[global].wasm_ty == WasmValType::I64 {
685 *(*def).as_u64()
686 } else {
687 u64::from(*(*def).as_u32())
688 }
689 };
690
691 // Delegates to the `init_memory` method which is sort of a duplicate of
692 // `instance.memory_init_segment` but is used at compile-time in other
693 // contexts so is shared here to have only one method of memory
694 // initialization.
695 //
696 // This call to `init_memory` notably implements all the bells and whistles
697 // so errors only happen if an out-of-bounds segment is found, in which case
698 // a trap is returned.
699 let ok = module.memory_initialization.init_memory(
700 instance,
701 InitMemory::Runtime {
702 memory_size_in_pages,
703 get_global_as_u64,
704 },
705 |instance, memory_index, init| {
706 // If this initializer applies to a defined memory but that memory
707 // doesn't need initialization, due to something like copy-on-write
708 // pre-initializing it via mmap magic, then this initializer can be
709 // skipped entirely.
710 if let Some(memory_index) = module.defined_memory_index(memory_index) {
711 if !instance.memories[memory_index].1.needs_init() {
712 return true;
713 }
714 }
715 let memory = instance.get_memory(memory_index);
716
717 unsafe {
718 let src = instance.wasm_data(init.data.clone());
719 let dst = memory.base.add(usize::try_from(init.offset).unwrap());
720 // FIXME audit whether this is safe in the presence of shared
721 // memory
722 // (https://github.com/bytecodealliance/wasmtime/issues/4203).
723 ptr::copy_nonoverlapping(src.as_ptr(), dst, src.len())
724 }
725 true
726 },
727 );
728 if !ok {
729 return Err(Trap::MemoryOutOfBounds.into());
730 }
731
732 Ok(())
733}
734
735fn check_init_bounds(instance: &mut Instance, module: &Module) -> Result<()> {
736 check_table_init_bounds(instance, module)?;
737
738 match &module.memory_initialization {
739 MemoryInitialization::Segmented(initializers) => {
740 check_memory_init_bounds(instance, initializers)?;
741 }
742 // Statically validated already to have everything in-bounds.
743 MemoryInitialization::Static { .. } => {}
744 }
745
746 Ok(())
747}
748
749pub(super) fn initialize_instance(
750 instance: &mut Instance,
751 module: &Module,
752 is_bulk_memory: bool,
753) -> Result<()> {
754 // If bulk memory is not enabled, bounds check the data and element segments before
755 // making any changes. With bulk memory enabled, initializers are processed
756 // in-order and side effects are observed up to the point of an out-of-bounds
757 // initializer, so the early checking is not desired.
758 if !is_bulk_memory {
759 check_init_bounds(instance, module)?;
760 }
761
762 // Initialize the tables
763 initialize_tables(instance, module)?;
764
765 // Initialize the memories
766 initialize_memories(instance, &module)?;
767
768 Ok(())
769}
770
771#[cfg(test)]
772mod tests {
773 use super::*;
774
775 #[test]
776 fn allocator_traits_are_object_safe() {
777 fn _instance_allocator(_: &dyn InstanceAllocatorImpl) {}
778 fn _instance_allocator_ext(_: &dyn InstanceAllocator) {}
779 }
780}