use crate::imports::Imports;
use crate::instance::{Instance, InstanceHandle};
use crate::memory::Memory;
use crate::table::Table;
use crate::{CompiledModuleId, ModuleRuntimeInfo, Store};
use anyhow::{anyhow, bail, Result};
use std::alloc;
use std::any::Any;
use std::convert::TryFrom;
use std::ptr;
use std::sync::Arc;
use wasmtime_environ::{
DefinedMemoryIndex, DefinedTableIndex, HostPtr, InitMemory, MemoryInitialization,
MemoryInitializer, Module, PrimaryMap, TableInitialValue, TableSegment, Trap, VMOffsets,
WasmType, WASM_PAGE_SIZE,
};
mod on_demand;
pub use self::on_demand::OnDemandInstanceAllocator;
#[cfg(feature = "pooling-allocator")]
mod pooling;
#[cfg(feature = "pooling-allocator")]
pub use self::pooling::{InstanceLimits, PoolingInstanceAllocator, PoolingInstanceAllocatorConfig};
pub struct InstanceAllocationRequest<'a> {
pub runtime_info: &'a Arc<dyn ModuleRuntimeInfo>,
pub imports: Imports<'a>,
pub host_state: Box<dyn Any + Send + Sync>,
pub store: StorePtr,
}
pub struct StorePtr(Option<*mut dyn Store>);
impl StorePtr {
pub fn empty() -> Self {
Self(None)
}
pub fn new(ptr: *mut dyn Store) -> Self {
Self(Some(ptr))
}
pub fn as_raw(&self) -> Option<*mut dyn Store> {
self.0.clone()
}
pub(crate) unsafe fn get(&mut self) -> Option<&mut dyn Store> {
match self.0 {
Some(ptr) => Some(&mut *ptr),
None => None,
}
}
}
pub unsafe trait InstanceAllocator {
fn validate(&self, module: &Module, offsets: &VMOffsets<HostPtr>) -> Result<()> {
let _ = (module, offsets);
Ok(())
}
fn allocate(&self, mut req: InstanceAllocationRequest) -> Result<InstanceHandle> {
let index = self.allocate_index(&req)?;
let module = req.runtime_info.module();
let mut memories =
PrimaryMap::with_capacity(module.memory_plans.len() - module.num_imported_memories);
let mut tables =
PrimaryMap::with_capacity(module.table_plans.len() - module.num_imported_tables);
let result = self
.allocate_memories(index, &mut req, &mut memories)
.and_then(|()| self.allocate_tables(index, &mut req, &mut tables));
if let Err(e) = result {
self.deallocate_memories(index, &mut memories);
self.deallocate_tables(index, &mut tables);
self.deallocate_index(index);
return Err(e);
}
unsafe { Ok(Instance::new(req, index, memories, tables)) }
}
fn deallocate(&self, handle: &mut InstanceHandle) {
let index = handle.instance().index;
self.deallocate_memories(index, &mut handle.instance_mut().memories);
self.deallocate_tables(index, &mut handle.instance_mut().tables);
unsafe {
let layout = Instance::alloc_layout(handle.instance().offsets());
let ptr = handle.instance.take().unwrap();
ptr::drop_in_place(ptr.as_ptr());
alloc::dealloc(ptr.as_ptr().cast(), layout);
}
self.deallocate_index(index);
}
fn allocate_index(&self, req: &InstanceAllocationRequest) -> Result<usize>;
fn deallocate_index(&self, index: usize);
fn allocate_memories(
&self,
index: usize,
req: &mut InstanceAllocationRequest,
mems: &mut PrimaryMap<DefinedMemoryIndex, Memory>,
) -> Result<()>;
fn deallocate_memories(&self, index: usize, mems: &mut PrimaryMap<DefinedMemoryIndex, Memory>);
fn allocate_tables(
&self,
index: usize,
req: &mut InstanceAllocationRequest,
tables: &mut PrimaryMap<DefinedTableIndex, Table>,
) -> Result<()>;
fn deallocate_tables(&self, index: usize, tables: &mut PrimaryMap<DefinedTableIndex, Table>);
#[cfg(feature = "async")]
fn allocate_fiber_stack(&self) -> Result<wasmtime_fiber::FiberStack>;
#[cfg(feature = "async")]
unsafe fn deallocate_fiber_stack(&self, stack: &wasmtime_fiber::FiberStack);
fn purge_module(&self, module: CompiledModuleId);
}
fn get_table_init_start(init: &TableSegment, instance: &mut Instance) -> Result<u32> {
match init.base {
Some(base) => {
let val = unsafe { *(*instance.defined_or_imported_global_ptr(base)).as_u32() };
init.offset
.checked_add(val)
.ok_or_else(|| anyhow!("element segment global base overflows"))
}
None => Ok(init.offset),
}
}
fn check_table_init_bounds(instance: &mut Instance, module: &Module) -> Result<()> {
for segment in module.table_initialization.segments.iter() {
let table = unsafe { &*instance.get_table(segment.table_index) };
let start = get_table_init_start(segment, instance)?;
let start = usize::try_from(start).unwrap();
let end = start.checked_add(segment.elements.len());
match end {
Some(end) if end <= table.size() as usize => {
}
_ => {
bail!("table out of bounds: elements segment does not fit")
}
}
}
Ok(())
}
fn initialize_tables(instance: &mut Instance, module: &Module) -> Result<()> {
for (table, init) in module.table_initialization.initial_values.iter() {
match init {
TableInitialValue::Null { precomputed: _ } => {}
TableInitialValue::FuncRef(idx) => {
let funcref = instance.get_func_ref(*idx).unwrap();
let table = unsafe { &mut *instance.get_defined_table(table) };
table.init_func(funcref)?;
}
}
}
for segment in module.table_initialization.segments.iter() {
let start = get_table_init_start(segment, instance)?;
instance.table_init_segment(
segment.table_index,
&segment.elements,
start,
0,
segment.elements.len() as u32,
)?;
}
Ok(())
}
fn get_memory_init_start(init: &MemoryInitializer, instance: &mut Instance) -> Result<u64> {
match init.base {
Some(base) => {
let mem64 = instance.module().memory_plans[init.memory_index]
.memory
.memory64;
let val = unsafe {
let global = instance.defined_or_imported_global_ptr(base);
if mem64 {
*(*global).as_u64()
} else {
u64::from(*(*global).as_u32())
}
};
init.offset
.checked_add(val)
.ok_or_else(|| anyhow!("data segment global base overflows"))
}
None => Ok(init.offset),
}
}
fn check_memory_init_bounds(
instance: &mut Instance,
initializers: &[MemoryInitializer],
) -> Result<()> {
for init in initializers {
let memory = instance.get_memory(init.memory_index);
let start = get_memory_init_start(init, instance)?;
let end = usize::try_from(start)
.ok()
.and_then(|start| start.checked_add(init.data.len()));
match end {
Some(end) if end <= memory.current_length() => {
}
_ => {
bail!("memory out of bounds: data segment does not fit")
}
}
}
Ok(())
}
fn initialize_memories(instance: &mut Instance, module: &Module) -> Result<()> {
let memory_size_in_pages = &|instance: &mut Instance, memory| {
(instance.get_memory(memory).current_length() as u64) / u64::from(WASM_PAGE_SIZE)
};
let get_global_as_u64 = &mut |instance: &mut Instance, global| unsafe {
let def = instance.defined_or_imported_global_ptr(global);
if module.globals[global].wasm_ty == WasmType::I64 {
*(*def).as_u64()
} else {
u64::from(*(*def).as_u32())
}
};
let ok = module.memory_initialization.init_memory(
instance,
InitMemory::Runtime {
memory_size_in_pages,
get_global_as_u64,
},
|instance, memory_index, init| {
if let Some(memory_index) = module.defined_memory_index(memory_index) {
if !instance.memories[memory_index].needs_init() {
return true;
}
}
let memory = instance.get_memory(memory_index);
unsafe {
let src = instance.wasm_data(init.data.clone());
let dst = memory.base.add(usize::try_from(init.offset).unwrap());
ptr::copy_nonoverlapping(src.as_ptr(), dst, src.len())
}
true
},
);
if !ok {
return Err(Trap::MemoryOutOfBounds.into());
}
Ok(())
}
fn check_init_bounds(instance: &mut Instance, module: &Module) -> Result<()> {
check_table_init_bounds(instance, module)?;
match &module.memory_initialization {
MemoryInitialization::Segmented(initializers) => {
check_memory_init_bounds(instance, initializers)?;
}
MemoryInitialization::Static { .. } => {}
}
Ok(())
}
pub(super) fn initialize_instance(
instance: &mut Instance,
module: &Module,
is_bulk_memory: bool,
) -> Result<()> {
if !is_bulk_memory {
check_init_bounds(instance, module)?;
}
initialize_tables(instance, module)?;
initialize_memories(instance, &module)?;
Ok(())
}