use crate::Trap;
use crate::prelude::*;
use crate::runtime::vm::VMMemoryImport;
use crate::store::{StoreInstanceId, StoreOpaque};
use crate::trampoline::generate_memory_export;
use crate::{AsContext, AsContextMut, Engine, MemoryType, StoreContext, StoreContextMut};
use core::cell::UnsafeCell;
use core::fmt;
use core::slice;
use core::time::Duration;
use wasmtime_environ::DefinedMemoryIndex;
pub use crate::runtime::vm::WaitResult;
#[derive(Debug)]
#[non_exhaustive]
pub struct MemoryAccessError {
_private: (),
}
impl fmt::Display for MemoryAccessError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "out of bounds memory access")
}
}
impl core::error::Error for MemoryAccessError {}
#[derive(Copy, Clone, Debug)]
#[repr(C)] pub struct Memory {
instance: StoreInstanceId,
index: DefinedMemoryIndex,
}
const _: () = {
#[repr(C)]
struct Tmp(u64, u32);
#[repr(C)]
struct C(Tmp, u32);
assert!(core::mem::size_of::<C>() == core::mem::size_of::<Memory>());
assert!(core::mem::align_of::<C>() == core::mem::align_of::<Memory>());
assert!(core::mem::offset_of!(Memory, instance) == 0);
};
impl Memory {
pub fn new(mut store: impl AsContextMut, ty: MemoryType) -> Result<Memory> {
Self::_new(store.as_context_mut().0, ty)
}
#[cfg(feature = "async")]
pub async fn new_async(
mut store: impl AsContextMut<Data: Send>,
ty: MemoryType,
) -> Result<Memory> {
let mut store = store.as_context_mut();
assert!(
store.0.async_support(),
"cannot use `new_async` without enabling async support on the config"
);
store.on_fiber(|store| Self::_new(store.0, ty)).await?
}
fn _new(store: &mut StoreOpaque, ty: MemoryType) -> Result<Memory> {
unsafe {
let export = generate_memory_export(store, &ty, None)?;
Ok(Memory::from_wasmtime_memory(export, store))
}
}
pub fn ty(&self, store: impl AsContext) -> MemoryType {
let store = store.as_context();
MemoryType::from_wasmtime_memory(self.wasmtime_ty(store.0))
}
pub fn read(
&self,
store: impl AsContext,
offset: usize,
buffer: &mut [u8],
) -> Result<(), MemoryAccessError> {
let store = store.as_context();
let slice = self
.data(&store)
.get(offset..)
.and_then(|s| s.get(..buffer.len()))
.ok_or(MemoryAccessError { _private: () })?;
buffer.copy_from_slice(slice);
Ok(())
}
pub fn write(
&self,
mut store: impl AsContextMut,
offset: usize,
buffer: &[u8],
) -> Result<(), MemoryAccessError> {
let mut context = store.as_context_mut();
self.data_mut(&mut context)
.get_mut(offset..)
.and_then(|s| s.get_mut(..buffer.len()))
.ok_or(MemoryAccessError { _private: () })?
.copy_from_slice(buffer);
Ok(())
}
pub fn data<'a, T: 'static>(&self, store: impl Into<StoreContext<'a, T>>) -> &'a [u8] {
unsafe {
let store = store.into();
let definition = store[self.instance].memory(self.index);
debug_assert!(!self.ty(store).is_shared());
slice::from_raw_parts(definition.base.as_ptr(), definition.current_length())
}
}
pub fn data_mut<'a, T: 'static>(
&self,
store: impl Into<StoreContextMut<'a, T>>,
) -> &'a mut [u8] {
unsafe {
let store = store.into();
let definition = store[self.instance].memory(self.index);
debug_assert!(!self.ty(store).is_shared());
slice::from_raw_parts_mut(definition.base.as_ptr(), definition.current_length())
}
}
pub fn data_and_store_mut<'a, T: 'static>(
&self,
store: impl Into<StoreContextMut<'a, T>>,
) -> (&'a mut [u8], &'a mut T) {
unsafe {
let mut store = store.into();
let data = &mut *(store.data_mut() as *mut T);
(self.data_mut(store), data)
}
}
pub fn data_ptr(&self, store: impl AsContext) -> *mut u8 {
store.as_context()[self.instance]
.memory(self.index)
.base
.as_ptr()
}
pub fn data_size(&self, store: impl AsContext) -> usize {
self.internal_data_size(store.as_context().0)
}
pub(crate) fn internal_data_size(&self, store: &StoreOpaque) -> usize {
store[self.instance].memory(self.index).current_length()
}
pub fn size(&self, store: impl AsContext) -> u64 {
self.internal_size(store.as_context().0)
}
pub(crate) fn internal_size(&self, store: &StoreOpaque) -> u64 {
let byte_size = self.internal_data_size(store);
let page_size = usize::try_from(self._page_size(store)).unwrap();
u64::try_from(byte_size / page_size).unwrap()
}
pub fn page_size(&self, store: impl AsContext) -> u64 {
self._page_size(store.as_context().0)
}
pub(crate) fn _page_size(&self, store: &StoreOpaque) -> u64 {
self.wasmtime_ty(store).page_size()
}
pub fn page_size_log2(&self, store: impl AsContext) -> u8 {
self._page_size_log2(store.as_context().0)
}
pub(crate) fn _page_size_log2(&self, store: &StoreOpaque) -> u8 {
self.wasmtime_ty(store).page_size_log2
}
pub fn grow(&self, mut store: impl AsContextMut, delta: u64) -> Result<u64> {
let store = store.as_context_mut().0;
let mem = self.wasmtime_memory(store);
unsafe {
match (*mem).grow(delta, Some(store))? {
Some(size) => {
let vm = (*mem).vmmemory();
store[self.instance].memory_ptr(self.index).write(vm);
let page_size = (*mem).page_size();
Ok(u64::try_from(size).unwrap() / page_size)
}
None => bail!("failed to grow memory by `{}`", delta),
}
}
}
#[cfg(feature = "async")]
pub async fn grow_async(
&self,
mut store: impl AsContextMut<Data: Send>,
delta: u64,
) -> Result<u64> {
let mut store = store.as_context_mut();
assert!(
store.0.async_support(),
"cannot use `grow_async` without enabling async support on the config"
);
store.on_fiber(|store| self.grow(store, delta)).await?
}
fn wasmtime_memory(&self, store: &mut StoreOpaque) -> *mut crate::runtime::vm::Memory {
self.instance.get_mut(store).get_defined_memory(self.index)
}
pub(crate) unsafe fn from_wasmtime_memory(
wasmtime_export: crate::runtime::vm::ExportMemory,
store: &StoreOpaque,
) -> Memory {
Memory {
instance: store.vmctx_id(wasmtime_export.vmctx),
index: wasmtime_export.index,
}
}
pub(crate) fn wasmtime_ty<'a>(&self, store: &'a StoreOpaque) -> &'a wasmtime_environ::Memory {
let module = store[self.instance].env_module();
let index = module.memory_index(self.index);
&module.memories[index]
}
pub(crate) fn vmimport(&self, store: &StoreOpaque) -> crate::runtime::vm::VMMemoryImport {
let instance = &store[self.instance];
crate::runtime::vm::VMMemoryImport {
from: instance.memory_ptr(self.index).into(),
vmctx: instance.vmctx().into(),
index: self.index,
}
}
pub(crate) fn comes_from_same_store(&self, store: &StoreOpaque) -> bool {
store.id() == self.instance.store_id()
}
#[cfg(feature = "coredump")]
pub(crate) fn hash_key(&self, store: &StoreOpaque) -> impl core::hash::Hash + Eq + use<> {
store[self.instance].memory_ptr(self.index).as_ptr().addr()
}
}
pub unsafe trait LinearMemory: Send + Sync + 'static {
fn byte_size(&self) -> usize;
fn byte_capacity(&self) -> usize;
fn grow_to(&mut self, new_size: usize) -> Result<()>;
fn as_ptr(&self) -> *mut u8;
}
pub unsafe trait MemoryCreator: Send + Sync {
fn new_memory(
&self,
ty: MemoryType,
minimum: usize,
maximum: Option<usize>,
reserved_size_in_bytes: Option<usize>,
guard_size_in_bytes: usize,
) -> Result<Box<dyn LinearMemory>, String>;
}
#[derive(Clone)]
pub struct SharedMemory {
vm: crate::runtime::vm::SharedMemory,
engine: Engine,
page_size_log2: u8,
}
impl SharedMemory {
#[cfg(feature = "threads")]
pub fn new(engine: &Engine, ty: MemoryType) -> Result<Self> {
if !ty.is_shared() {
bail!("shared memory must have the `shared` flag enabled on its memory type")
}
debug_assert!(ty.maximum().is_some());
let tunables = engine.tunables();
let ty = ty.wasmtime_memory();
let page_size_log2 = ty.page_size_log2;
let memory = crate::runtime::vm::SharedMemory::new(ty, tunables)?;
Ok(Self {
vm: memory,
engine: engine.clone(),
page_size_log2,
})
}
pub fn ty(&self) -> MemoryType {
MemoryType::from_wasmtime_memory(&self.vm.ty())
}
pub fn size(&self) -> u64 {
let byte_size = u64::try_from(self.data_size()).unwrap();
let page_size = u64::from(self.page_size());
byte_size / page_size
}
pub fn page_size(&self) -> u32 {
debug_assert!(self.page_size_log2 == 0 || self.page_size_log2 == 16);
1 << self.page_size_log2
}
pub fn data_size(&self) -> usize {
self.vm.byte_size()
}
pub fn data(&self) -> &[UnsafeCell<u8>] {
unsafe {
let definition = self.vm.vmmemory_ptr().as_ref();
slice::from_raw_parts(definition.base.as_ptr().cast(), definition.current_length())
}
}
pub fn grow(&self, delta: u64) -> Result<u64> {
match self.vm.grow(delta, None)? {
Some((old_size, _new_size)) => {
Ok(u64::try_from(old_size).unwrap() / u64::from(self.page_size()))
}
None => bail!("failed to grow memory by `{}`", delta),
}
}
pub fn atomic_notify(&self, addr: u64, count: u32) -> Result<u32, Trap> {
self.vm.atomic_notify(addr, count)
}
pub fn atomic_wait32(
&self,
addr: u64,
expected: u32,
timeout: Option<Duration>,
) -> Result<WaitResult, Trap> {
self.vm.atomic_wait32(addr, expected, timeout)
}
pub fn atomic_wait64(
&self,
addr: u64,
expected: u64,
timeout: Option<Duration>,
) -> Result<WaitResult, Trap> {
self.vm.atomic_wait64(addr, expected, timeout)
}
pub(crate) fn engine(&self) -> &Engine {
&self.engine
}
pub(crate) fn vmimport(&self, store: &mut StoreOpaque) -> crate::runtime::vm::VMMemoryImport {
let export_memory = generate_memory_export(store, &self.ty(), Some(&self.vm)).unwrap();
VMMemoryImport {
from: export_memory.definition.into(),
vmctx: export_memory.vmctx.into(),
index: export_memory.index,
}
}
pub(crate) unsafe fn from_wasmtime_memory(
wasmtime_export: crate::runtime::vm::ExportMemory,
store: &StoreOpaque,
) -> Self {
#[cfg_attr(not(feature = "threads"), allow(unused_variables, unreachable_code))]
crate::runtime::vm::Instance::from_vmctx(wasmtime_export.vmctx, |handle| {
let memory_index = handle.env_module().memory_index(wasmtime_export.index);
let page_size = handle.memory_page_size(memory_index);
debug_assert!(page_size.is_power_of_two());
let page_size_log2 = u8::try_from(page_size.ilog2()).unwrap();
let memory = handle
.get_defined_memory(wasmtime_export.index)
.as_mut()
.unwrap();
match memory.as_shared_memory() {
Some(mem) => Self {
vm: mem.clone(),
engine: store.engine().clone(),
page_size_log2,
},
None => panic!("unable to convert from a shared memory"),
}
})
}
}
impl fmt::Debug for SharedMemory {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SharedMemory").finish_non_exhaustive()
}
}
#[cfg(test)]
mod tests {
use crate::*;
#[test]
fn respect_tunables() {
let mut cfg = Config::new();
cfg.memory_reservation(0).memory_guard_size(0);
let mut store = Store::new(&Engine::new(&cfg).unwrap(), ());
let ty = MemoryType::new(1, None);
let mem = Memory::new(&mut store, ty).unwrap();
let store = store.as_context();
let tunables = store.engine().tunables();
assert_eq!(tunables.memory_guard_size, 0);
assert!(
!mem.wasmtime_ty(store.0)
.can_elide_bounds_check(tunables, 12)
);
}
#[test]
fn hash_key_is_stable_across_duplicate_store_data_entries() -> Result<()> {
let mut store = Store::<()>::default();
let module = Module::new(
store.engine(),
r#"
(module
(memory (export "m") 1 1)
)
"#,
)?;
let instance = Instance::new(&mut store, &module, &[])?;
let m1 = instance.get_memory(&mut store, "m").unwrap();
let m2 = instance.get_memory(&mut store, "m").unwrap();
assert_eq!(m1.data(&store)[0], 0);
assert_eq!(m2.data(&store)[0], 0);
m1.data_mut(&mut store)[0] = 42;
assert_eq!(m1.data(&mut store)[0], 42);
assert_eq!(m2.data(&mut store)[0], 42);
assert!(m1.hash_key(&store.as_context().0) == m2.hash_key(&store.as_context().0));
let instance2 = Instance::new(&mut store, &module, &[])?;
let m3 = instance2.get_memory(&mut store, "m").unwrap();
assert!(m1.hash_key(&store.as_context().0) != m3.hash_key(&store.as_context().0));
Ok(())
}
}