use crate::Engine;
use crate::prelude::*;
use crate::runtime::vm::memory::{LocalMemory, MmapMemory, validate_atomic_addr};
use crate::runtime::vm::parking_spot::{ParkingSpot, Waiter};
use crate::runtime::vm::{self, Memory, VMMemoryDefinition, WaitResult};
use std::cell::RefCell;
use std::ops::Range;
use std::ptr::NonNull;
use std::sync::atomic::{AtomicU32, AtomicU64, Ordering};
use std::sync::{Arc, RwLock};
use std::time::{Duration, Instant};
use wasmtime_environ::Trap;
#[derive(Clone)]
pub struct SharedMemory(Arc<SharedMemoryInner>);
struct SharedMemoryInner {
memory: RwLock<LocalMemory>,
spot: ParkingSpot,
ty: wasmtime_environ::Memory,
def: LongTermVMMemoryDefinition,
}
impl SharedMemory {
pub fn new(engine: &Engine, ty: &wasmtime_environ::Memory) -> Result<Self> {
let tunables = engine.tunables();
let (minimum_bytes, maximum_bytes) = vm::assert_ready(Memory::limit_new(ty, None))?;
let mmap_memory = MmapMemory::new(ty, tunables, minimum_bytes, maximum_bytes)?;
let boxed: Box<dyn crate::runtime::vm::RuntimeLinearMemory> =
try_new::<Box<_>>(mmap_memory)?;
Self::wrap(engine, ty, LocalMemory::new(ty, tunables, boxed, None)?)
}
pub fn wrap(
engine: &Engine,
ty: &wasmtime_environ::Memory,
memory: LocalMemory,
) -> Result<Self> {
if !engine.config().shared_memory {
bail!(
"shared memory support is disabled for this engine -- see `Config::shared_memory`"
);
}
if !ty.shared {
bail!("shared memory must have a `shared` memory type");
}
Ok(Self(try_new::<Arc<_>>(SharedMemoryInner {
ty: *ty,
spot: ParkingSpot::default(),
def: LongTermVMMemoryDefinition(memory.vmmemory()),
memory: RwLock::new(memory),
})?))
}
pub fn ty(&self) -> &wasmtime_environ::Memory {
&self.0.ty
}
pub fn as_memory(self) -> Memory {
Memory::Shared(self)
}
pub fn vmmemory_ptr(&self) -> NonNull<VMMemoryDefinition> {
NonNull::from(&self.0.def.0)
}
pub fn grow(&self, delta_pages: u64) -> Result<Option<(usize, usize)>, Error> {
let mut memory = self.0.memory.write().unwrap();
let result = vm::assert_ready(memory.grow(delta_pages, None))?;
if let Some((_old_size_in_bytes, new_size_in_bytes)) = result {
self.0
.def
.0
.current_length
.store(new_size_in_bytes, Ordering::SeqCst);
}
Ok(result)
}
pub fn atomic_notify(&self, addr_index: u64, count: u32) -> Result<u32, Trap> {
let ptr = validate_atomic_addr(&self.0.def.0, addr_index, 4, 4)?;
log::trace!("memory.atomic.notify(addr={addr_index:#x}, count={count})");
let ptr = unsafe { &*ptr };
Ok(self.0.spot.notify(ptr, count))
}
pub fn atomic_wait32(
&self,
addr_index: u64,
expected: u32,
timeout: Option<Duration>,
) -> Result<WaitResult, Trap> {
let addr = validate_atomic_addr(&self.0.def.0, addr_index, 4, 4)?;
log::trace!(
"memory.atomic.wait32(addr={addr_index:#x}, expected={expected}, timeout={timeout:?})"
);
assert!(std::mem::size_of::<AtomicU32>() == 4);
assert!(std::mem::align_of::<AtomicU32>() <= 4);
let atomic = unsafe { AtomicU32::from_ptr(addr.cast()) };
let deadline = timeout.map(|d| Instant::now() + d);
WAITER.with(|waiter| {
let mut waiter = waiter.borrow_mut();
Ok(self.0.spot.wait32(atomic, expected, deadline, &mut waiter))
})
}
pub fn atomic_wait64(
&self,
addr_index: u64,
expected: u64,
timeout: Option<Duration>,
) -> Result<WaitResult, Trap> {
let addr = validate_atomic_addr(&self.0.def.0, addr_index, 8, 8)?;
log::trace!(
"memory.atomic.wait64(addr={addr_index:#x}, expected={expected}, timeout={timeout:?})"
);
assert!(std::mem::size_of::<AtomicU64>() == 8);
assert!(std::mem::align_of::<AtomicU64>() <= 8);
let atomic = unsafe { AtomicU64::from_ptr(addr.cast()) };
let deadline = timeout.map(|d| Instant::now() + d);
WAITER.with(|waiter| {
let mut waiter = waiter.borrow_mut();
Ok(self.0.spot.wait64(atomic, expected, deadline, &mut waiter))
})
}
pub(crate) fn byte_size(&self) -> usize {
self.0.memory.read().unwrap().byte_size()
}
pub(crate) fn needs_init(&self) -> bool {
self.0.memory.read().unwrap().needs_init()
}
pub(crate) fn wasm_accessible(&self) -> Range<usize> {
self.0.memory.read().unwrap().wasm_accessible()
}
}
thread_local! {
static WAITER: RefCell<Waiter> = const { RefCell::new(Waiter::new()) };
}
struct LongTermVMMemoryDefinition(VMMemoryDefinition);
unsafe impl Send for LongTermVMMemoryDefinition {}
unsafe impl Sync for LongTermVMMemoryDefinition {}