use crate::Engine;
use crate::prelude::*;
use crate::runtime::store::StoreResourceLimiter;
use crate::runtime::vm::vmcontext::VMMemoryDefinition;
#[cfg(has_virtual_memory)]
use crate::runtime::vm::{HostAlignedByteCount, MmapOffset};
use crate::runtime::vm::{MemoryImage, MemoryImageSlot, SendSyncPtr};
use alloc::sync::Arc;
use core::{ops::Range, ptr::NonNull};
use wasmtime_environ::Tunables;
#[cfg(feature = "threads")]
use wasmtime_environ::Trap;
#[cfg(has_virtual_memory)]
mod mmap;
#[cfg(has_virtual_memory)]
pub use self::mmap::MmapMemory;
mod malloc;
pub use self::malloc::MallocMemory;
#[cfg(feature = "pooling-allocator")]
mod static_;
#[cfg(feature = "pooling-allocator")]
use self::static_::StaticMemory;
#[cfg(feature = "threads")]
mod shared_memory;
#[cfg(feature = "threads")]
pub use shared_memory::SharedMemory;
#[cfg(not(feature = "threads"))]
mod shared_memory_disabled;
#[cfg(not(feature = "threads"))]
pub use shared_memory_disabled::SharedMemory;
pub trait RuntimeMemoryCreator: Send + Sync {
fn new_memory(
&self,
ty: &wasmtime_environ::Memory,
tunables: &Tunables,
minimum: usize,
maximum: Option<usize>,
) -> Result<Box<dyn RuntimeLinearMemory>>;
}
pub struct DefaultMemoryCreator;
impl RuntimeMemoryCreator for DefaultMemoryCreator {
fn new_memory(
&self,
ty: &wasmtime_environ::Memory,
tunables: &Tunables,
minimum: usize,
maximum: Option<usize>,
) -> Result<Box<dyn RuntimeLinearMemory>> {
#[cfg(has_virtual_memory)]
if tunables.signals_based_traps
|| tunables.memory_guard_size > 0
|| tunables.memory_reservation > 0
|| tunables.memory_init_cow
{
return Ok(
try_new::<Box<_>>(MmapMemory::new(ty, tunables, minimum, maximum)?)?
as Box<dyn RuntimeLinearMemory>,
);
}
let _ = maximum;
Ok(
try_new::<Box<_>>(MallocMemory::new(ty, tunables, minimum)?)?
as Box<dyn RuntimeLinearMemory>,
)
}
}
pub trait RuntimeLinearMemory: Send + Sync {
fn byte_size(&self) -> usize;
fn byte_capacity(&self) -> usize;
fn grow_to(&mut self, size: usize) -> Result<()>;
fn base(&self) -> MemoryBase;
fn vmmemory(&self) -> VMMemoryDefinition;
#[doc(hidden)]
fn set_byte_size(&mut self, len: usize) {
let _ = len;
panic!("CoW images used with this memory and it doesn't support it");
}
}
#[derive(Clone, Debug)]
pub enum MemoryBase {
Raw(SendSyncPtr<u8>),
#[cfg(has_virtual_memory)]
Mmap(MmapOffset),
}
impl MemoryBase {
pub fn new_raw(ptr: *mut u8) -> Self {
Self::Raw(NonNull::new(ptr).expect("pointer is non-null").into())
}
pub fn as_non_null(&self) -> NonNull<u8> {
match self {
Self::Raw(ptr) => ptr.as_non_null(),
#[cfg(has_virtual_memory)]
Self::Mmap(mmap_offset) => mmap_offset.as_non_null(),
}
}
pub fn as_mut_ptr(&self) -> *mut u8 {
self.as_non_null().as_ptr()
}
}
pub enum Memory {
Local(LocalMemory),
Shared(SharedMemory),
}
impl Memory {
pub async fn new_dynamic(
ty: &wasmtime_environ::Memory,
engine: &Engine,
creator: &dyn RuntimeMemoryCreator,
memory_image: Option<&Arc<MemoryImage>>,
limiter: Option<&mut StoreResourceLimiter<'_>>,
) -> Result<Self> {
let (minimum, maximum) = Self::limit_new(ty, limiter).await?;
let tunables = engine.tunables();
let allocation = creator.new_memory(ty, tunables, minimum, maximum)?;
let memory = LocalMemory::new(ty, tunables, allocation, memory_image)?;
Ok(if ty.shared {
Memory::Shared(SharedMemory::wrap(engine, ty, memory)?)
} else {
Memory::Local(memory)
})
}
#[cfg(feature = "pooling-allocator")]
pub async fn new_static(
ty: &wasmtime_environ::Memory,
tunables: &Tunables,
base: MemoryBase,
base_capacity: usize,
memory_image: MemoryImageSlot,
limiter: Option<&mut StoreResourceLimiter<'_>>,
) -> Result<Self> {
let (minimum, maximum) = Self::limit_new(ty, limiter).await?;
let pooled_memory = StaticMemory::new(base, base_capacity, minimum, maximum)?;
let allocation = Box::new(pooled_memory);
let mut memory = LocalMemory::new(ty, tunables, allocation, None)?;
assert!(memory.memory_image.is_none());
memory.memory_image = Some(memory_image);
memory.memory_may_move = false;
Ok(if ty.shared {
todo!("using shared memory with the pooling allocator is a work in progress");
} else {
Memory::Local(memory)
})
}
pub(crate) async fn limit_new(
ty: &wasmtime_environ::Memory,
limiter: Option<&mut StoreResourceLimiter<'_>>,
) -> Result<(usize, Option<usize>)> {
let page_size = usize::try_from(ty.page_size()).unwrap();
let absolute_max = 0usize.wrapping_sub(page_size);
let minimum = ty
.minimum_byte_size()
.ok()
.and_then(|m| usize::try_from(m).ok());
let maximum = ty
.maximum_byte_size()
.ok()
.and_then(|m| usize::try_from(m).ok());
if let Some(limiter) = limiter {
if !limiter
.memory_growing(0, minimum.unwrap_or(absolute_max), maximum)
.await?
{
bail!(
"memory minimum size of {} pages exceeds memory limits",
ty.limits.min
);
}
}
let minimum = minimum.ok_or_else(|| {
format_err!(
"memory minimum size of {} pages exceeds memory limits",
ty.limits.min
)
})?;
if !ty.allow_growth_to(minimum) {
bail!(
"memory minimum size of {} pages exceeds memory limits",
ty.limits.min
);
}
Ok((minimum, maximum))
}
pub fn page_size(&self) -> u64 {
self.ty().page_size()
}
pub fn byte_size(&self) -> usize {
match self {
Memory::Local(mem) => mem.byte_size(),
Memory::Shared(mem) => mem.byte_size(),
}
}
pub(crate) fn needs_init(&self) -> bool {
match self {
Memory::Local(mem) => mem.needs_init(),
Memory::Shared(mem) => mem.needs_init(),
}
}
pub async unsafe fn grow(
&mut self,
delta_pages: u64,
limiter: Option<&mut StoreResourceLimiter<'_>>,
) -> Result<Option<usize>, Error> {
let new_size = delta_pages
.checked_mul(self.page_size())
.and_then(|new_bytes| {
let new_bytes = usize::try_from(new_bytes).ok()?;
self.byte_size().checked_add(new_bytes)
});
match new_size {
Some(new_size) => {
if !self.ty().allow_growth_to(new_size) {
bail!(
"disallowing growth to {new_size:#x} bytes based on \
page size"
)
}
}
None => {
if let Some(limiter) = limiter {
let err = crate::format_err!("memory growth exceeds address space");
limiter.memory_grow_failed(err)?;
}
return Ok(None);
}
}
let result = match self {
Memory::Local(mem) => mem.grow(delta_pages, limiter).await?,
Memory::Shared(mem) => mem.grow(delta_pages)?,
};
match result {
Some((old, _new)) => Ok(Some(old)),
None => Ok(None),
}
}
pub fn vmmemory(&self) -> VMMemoryDefinition {
match self {
Memory::Local(mem) => mem.vmmemory(),
Memory::Shared(_) => unreachable!(),
}
}
#[cfg(feature = "pooling-allocator")]
pub fn unwrap_static_image(self) -> MemoryImageSlot {
match self {
Memory::Local(mem) => mem.unwrap_static_image(),
Memory::Shared(_) => panic!("expected a local memory"),
}
}
pub fn is_shared_memory(&self) -> bool {
matches!(self, Memory::Shared(_))
}
pub fn as_shared_memory(&self) -> Option<&SharedMemory> {
match self {
Memory::Local(_) => None,
Memory::Shared(mem) => Some(mem),
}
}
#[cfg(feature = "threads")]
pub fn atomic_notify(&mut self, addr: u64, count: u32) -> Result<u32, Trap> {
match self.as_shared_memory() {
Some(m) => m.atomic_notify(addr, count),
None => {
validate_atomic_addr(&self.vmmemory(), addr, 4, 4)?;
Ok(0)
}
}
}
#[cfg(feature = "threads")]
pub fn atomic_wait32(
&mut self,
addr: u64,
expected: u32,
timeout: Option<core::time::Duration>,
) -> Result<crate::WaitResult, Trap> {
match self.as_shared_memory() {
Some(m) => m.atomic_wait32(addr, expected, timeout),
None => {
validate_atomic_addr(&self.vmmemory(), addr, 4, 4)?;
Err(Trap::AtomicWaitNonSharedMemory)
}
}
}
#[cfg(feature = "threads")]
pub fn atomic_wait64(
&mut self,
addr: u64,
expected: u64,
timeout: Option<core::time::Duration>,
) -> Result<crate::WaitResult, Trap> {
match self.as_shared_memory() {
Some(m) => m.atomic_wait64(addr, expected, timeout),
None => {
validate_atomic_addr(&self.vmmemory(), addr, 8, 8)?;
Err(Trap::AtomicWaitNonSharedMemory)
}
}
}
pub fn wasm_accessible(&self) -> Range<usize> {
match self {
Memory::Local(mem) => mem.wasm_accessible(),
Memory::Shared(mem) => mem.wasm_accessible(),
}
}
fn ty(&self) -> &wasmtime_environ::Memory {
match self {
Memory::Local(mem) => mem.ty(),
Memory::Shared(mem) => mem.ty(),
}
}
}
pub struct LocalMemory {
alloc: Box<dyn RuntimeLinearMemory>,
ty: wasmtime_environ::Memory,
memory_may_move: bool,
memory_guard_size: usize,
memory_reservation: usize,
memory_image: Option<MemoryImageSlot>,
}
impl LocalMemory {
pub fn new(
ty: &wasmtime_environ::Memory,
tunables: &Tunables,
alloc: Box<dyn RuntimeLinearMemory>,
memory_image: Option<&Arc<MemoryImage>>,
) -> Result<LocalMemory> {
let memory_image = match memory_image {
#[cfg(has_virtual_memory)]
Some(image) => {
if let Ok(byte_size) = HostAlignedByteCount::new(alloc.byte_size()) {
let mmap_base = match alloc.base() {
MemoryBase::Mmap(offset) => offset,
MemoryBase::Raw { .. } => {
unreachable!("memory_image is Some only for mmap-based memories")
}
};
let mut slot =
MemoryImageSlot::create(mmap_base, byte_size, alloc.byte_capacity());
slot.instantiate(alloc.byte_size(), Some(image), ty, tunables)?;
Some(slot)
} else {
None
}
}
#[cfg(not(has_virtual_memory))]
Some(_) => unreachable!(),
None => None,
};
Ok(LocalMemory {
ty: *ty,
alloc,
memory_may_move: ty.memory_may_move(tunables),
memory_image,
memory_guard_size: tunables.memory_guard_size.try_into().unwrap(),
memory_reservation: tunables.memory_reservation.try_into().unwrap(),
})
}
pub fn ty(&self) -> &wasmtime_environ::Memory {
&self.ty
}
pub async fn grow(
&mut self,
delta_pages: u64,
mut limiter: Option<&mut StoreResourceLimiter<'_>>,
) -> Result<Option<(usize, usize)>, Error> {
let old_byte_size = self.alloc.byte_size();
if delta_pages == 0 {
return Ok(Some((old_byte_size, old_byte_size)));
}
let page_size = usize::try_from(self.ty().page_size()).unwrap();
let absolute_max = 0usize.wrapping_sub(page_size);
let new_byte_size = usize::try_from(delta_pages)
.unwrap_or(usize::MAX)
.saturating_mul(page_size)
.saturating_add(old_byte_size)
.min(absolute_max);
let maximum = self
.ty
.maximum_byte_size()
.ok()
.and_then(|n| usize::try_from(n).ok());
if let Some(limiter) = &mut limiter {
if !limiter
.memory_growing(old_byte_size, new_byte_size, maximum)
.await?
{
return Ok(None);
}
}
let base_ptr_before = self.alloc.base().as_mut_ptr();
let required_to_not_move_memory = new_byte_size <= self.alloc.byte_capacity();
let result = (|| -> Result<()> {
if let Some(max) = maximum {
if new_byte_size > max {
bail!("Memory maximum size exceeded");
}
}
if !self.memory_may_move && new_byte_size > self.alloc.byte_capacity() {
bail!("Memory maximum size exceeded");
}
if let Some(image) = &mut self.memory_image {
if new_byte_size <= self.alloc.byte_capacity() {
image.set_heap_limit(new_byte_size)?;
self.alloc.set_byte_size(new_byte_size);
return Ok(());
}
assert!(cfg!(unix));
assert!(self.memory_may_move);
self.memory_image = None;
}
self.alloc.grow_to(new_byte_size)
})();
match result {
Ok(()) => {
if required_to_not_move_memory {
assert_eq!(base_ptr_before, self.alloc.base().as_mut_ptr());
}
Ok(Some((old_byte_size, new_byte_size)))
}
Err(e) => {
if let Some(limiter) = limiter {
limiter.memory_grow_failed(e)?;
}
Ok(None)
}
}
}
pub fn vmmemory(&self) -> VMMemoryDefinition {
self.alloc.vmmemory()
}
pub fn byte_size(&self) -> usize {
self.alloc.byte_size()
}
pub fn needs_init(&self) -> bool {
match &self.memory_image {
Some(image) => !image.has_image(),
None => true,
}
}
pub fn wasm_accessible(&self) -> Range<usize> {
let base = self.alloc.base().as_mut_ptr() as usize;
let end =
base + self.alloc.byte_capacity().max(self.memory_reservation) + self.memory_guard_size;
base..end
}
#[cfg(feature = "pooling-allocator")]
pub fn unwrap_static_image(self) -> MemoryImageSlot {
self.memory_image.unwrap()
}
}
#[cfg(feature = "threads")]
pub fn validate_atomic_addr(
def: &VMMemoryDefinition,
addr: u64,
access_size: u64,
access_alignment: u64,
) -> Result<*mut u8, Trap> {
debug_assert!(access_alignment.is_power_of_two());
if !(addr % access_alignment == 0) {
return Err(Trap::HeapMisaligned);
}
let length = u64::try_from(def.current_length()).unwrap();
if !(addr.saturating_add(access_size) <= length) {
return Err(Trap::MemoryOutOfBounds);
}
let addr = usize::try_from(addr).unwrap();
Ok(def.base.as_ptr().wrapping_add(addr))
}