Struct wasmtime_runtime::SharedMemory
source · pub struct SharedMemory(_);
Expand description
For shared memory (and only for shared memory), this lock-version restricts
access when growing the memory or checking its size. This is to conform with
the thread proposal: “When IsSharedArrayBuffer(...)
is true, the return
value should be the result of an atomic read-modify-write of the new size to
the internal length
slot.”
Implementations§
sourcepub fn new(plan: MemoryPlan) -> Result<Self>
pub fn new(plan: MemoryPlan) -> Result<Self>
Construct a new SharedMemory
.
sourcepub fn wrap(
plan: &MemoryPlan,
memory: Box<dyn RuntimeLinearMemory>,
ty: Memory
) -> Result<Self>
pub fn wrap(
plan: &MemoryPlan,
memory: Box<dyn RuntimeLinearMemory>,
ty: Memory
) -> Result<Self>
Wrap an existing Memory with the locking provided by a SharedMemory.
Examples found in repository?
445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
pub fn new(plan: MemoryPlan) -> Result<Self> {
let (minimum_bytes, maximum_bytes) = Memory::limit_new(&plan, None)?;
let mmap_memory = MmapMemory::new(&plan, minimum_bytes, maximum_bytes, None)?;
Self::wrap(&plan, Box::new(mmap_memory), plan.memory)
}
/// Wrap an existing [Memory] with the locking provided by a [SharedMemory].
pub fn wrap(
plan: &MemoryPlan,
mut memory: Box<dyn RuntimeLinearMemory>,
ty: wasmtime_environ::Memory,
) -> Result<Self> {
if !ty.shared {
bail!("shared memory must have a `shared` memory type");
}
if !matches!(plan.style, MemoryStyle::Static { .. }) {
bail!("shared memory can only be built from a static memory allocation")
}
assert!(
memory.as_any_mut().type_id() != std::any::TypeId::of::<SharedMemory>(),
"cannot re-wrap a shared memory"
);
Ok(Self(Arc::new(SharedMemoryInner {
ty,
spot: ParkingSpot::default(),
def: LongTermVMMemoryDefinition(memory.vmmemory()),
memory: RwLock::new(memory),
})))
}
/// Return the memory type for this [`SharedMemory`].
pub fn ty(&self) -> wasmtime_environ::Memory {
self.0.ty
}
/// Convert this shared memory into a [`Memory`].
pub fn as_memory(self) -> Memory {
Memory(Box::new(self))
}
/// Return a pointer to the shared memory's [VMMemoryDefinition].
pub fn vmmemory_ptr(&self) -> *const VMMemoryDefinition {
&self.0.def.0
}
/// Same as `RuntimeLinearMemory::grow`, except with `&self`.
pub fn grow(
&self,
delta_pages: u64,
store: Option<&mut dyn Store>,
) -> Result<Option<(usize, usize)>, Error> {
let mut memory = self.0.memory.write().unwrap();
let result = memory.grow(delta_pages, store)?;
if let Some((_old_size_in_bytes, new_size_in_bytes)) = result {
// Store the new size to the `VMMemoryDefinition` for JIT-generated
// code (and runtime functions) to access. No other code can be
// growing this memory due to the write lock, but code in other
// threads could have access to this shared memory and we want them
// to see the most consistent version of the `current_length`; a
// weaker consistency is possible if we accept them seeing an older,
// smaller memory size (assumption: memory only grows) but presently
// we are aiming for accuracy.
//
// Note that it could be possible to access a memory address that is
// now-valid due to changes to the page flags in `grow` above but
// beyond the `memory.size` that we are about to assign to. In these
// and similar cases, discussion in the thread proposal concluded
// that: "multiple accesses in one thread racing with another
// thread's `memory.grow` that are in-bounds only after the grow
// commits may independently succeed or trap" (see
// https://github.com/WebAssembly/threads/issues/26#issuecomment-433930711).
// In other words, some non-determinism is acceptable when using
// `memory.size` on work being done by `memory.grow`.
self.0
.def
.0
.current_length
.store(new_size_in_bytes, Ordering::SeqCst);
}
Ok(result)
}
/// Implementation of `memory.atomic.notify` for this shared memory.
pub fn atomic_notify(&self, addr_index: u64, count: u32) -> Result<u32, Trap> {
validate_atomic_addr(&self.0.def.0, addr_index, 4, 4)?;
Ok(self.0.spot.unpark(addr_index, count))
}
/// Implementation of `memory.atomic.wait32` for this shared memory.
pub fn atomic_wait32(
&self,
addr_index: u64,
expected: u32,
timeout: Option<Instant>,
) -> Result<WaitResult, Trap> {
let addr = validate_atomic_addr(&self.0.def.0, addr_index, 4, 4)?;
// SAFETY: `addr_index` was validated by `validate_atomic_addr` above.
assert!(std::mem::size_of::<AtomicU32>() == 4);
assert!(std::mem::align_of::<AtomicU32>() <= 4);
let atomic = unsafe { &*(addr as *const AtomicU32) };
// We want the sequential consistency of `SeqCst` to ensure that the `load` sees the value that the `notify` will/would see.
// All WASM atomic operations are also `SeqCst`.
let validate = || atomic.load(Ordering::SeqCst) == expected;
Ok(self.0.spot.park(addr_index, validate, timeout))
}
/// Implementation of `memory.atomic.wait64` for this shared memory.
pub fn atomic_wait64(
&self,
addr_index: u64,
expected: u64,
timeout: Option<Instant>,
) -> Result<WaitResult, Trap> {
let addr = validate_atomic_addr(&self.0.def.0, addr_index, 8, 8)?;
// SAFETY: `addr_index` was validated by `validate_atomic_addr` above.
assert!(std::mem::size_of::<AtomicU64>() == 8);
assert!(std::mem::align_of::<AtomicU64>() <= 8);
let atomic = unsafe { &*(addr as *const AtomicU64) };
// We want the sequential consistency of `SeqCst` to ensure that the `load` sees the value that the `notify` will/would see.
// All WASM atomic operations are also `SeqCst`.
let validate = || atomic.load(Ordering::SeqCst) == expected;
Ok(self.0.spot.park(addr_index, validate, timeout))
}
}
/// Shared memory needs some representation of a `VMMemoryDefinition` for
/// JIT-generated code to access. This structure owns the base pointer and
/// length to the actual memory and we share this definition across threads by:
/// - never changing the base pointer; according to the specification, shared
/// memory must be created with a known maximum size so it can be allocated
/// once and never moved
/// - carefully changing the length, using atomic accesses in both the runtime
/// and JIT-generated code.
struct LongTermVMMemoryDefinition(VMMemoryDefinition);
unsafe impl Send for LongTermVMMemoryDefinition {}
unsafe impl Sync for LongTermVMMemoryDefinition {}
/// Proxy all calls through the [`RwLock`].
impl RuntimeLinearMemory for SharedMemory {
fn byte_size(&self) -> usize {
self.0.memory.read().unwrap().byte_size()
}
fn maximum_byte_size(&self) -> Option<usize> {
self.0.memory.read().unwrap().maximum_byte_size()
}
fn grow(
&mut self,
delta_pages: u64,
store: Option<&mut dyn Store>,
) -> Result<Option<(usize, usize)>, Error> {
SharedMemory::grow(self, delta_pages, store)
}
fn grow_to(&mut self, size: usize) -> Result<()> {
self.0.memory.write().unwrap().grow_to(size)
}
fn vmmemory(&mut self) -> VMMemoryDefinition {
// `vmmemory()` is used for writing the `VMMemoryDefinition` of a memory
// into its `VMContext`; this should never be possible for a shared
// memory because the only `VMMemoryDefinition` for it should be stored
// in its own `def` field.
unreachable!()
}
fn needs_init(&self) -> bool {
self.0.memory.read().unwrap().needs_init()
}
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
self
}
}
/// Representation of a runtime wasm linear memory.
pub struct Memory(Box<dyn RuntimeLinearMemory>);
impl Memory {
/// Create a new dynamic (movable) memory instance for the specified plan.
pub fn new_dynamic(
plan: &MemoryPlan,
creator: &dyn RuntimeMemoryCreator,
store: &mut dyn Store,
memory_image: Option<&Arc<MemoryImage>>,
) -> Result<Self> {
let (minimum, maximum) = Self::limit_new(plan, Some(store))?;
let allocation = creator.new_memory(plan, minimum, maximum, memory_image)?;
let allocation = if plan.memory.shared {
Box::new(SharedMemory::wrap(plan, allocation, plan.memory)?)
} else {
allocation
};
Ok(Memory(allocation))
}
sourcepub fn ty(&self) -> Memory
pub fn ty(&self) -> Memory
Return the memory type for this SharedMemory
.
sourcepub fn vmmemory_ptr(&self) -> *const VMMemoryDefinition
pub fn vmmemory_ptr(&self) -> *const VMMemoryDefinition
Return a pointer to the shared memory’s VMMemoryDefinition.
Examples found in repository?
903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988
unsafe fn initialize_vmctx(
&mut self,
module: &Module,
offsets: &VMOffsets<HostPtr>,
store: StorePtr,
imports: Imports,
) {
assert!(std::ptr::eq(module, self.module().as_ref()));
*self.vmctx_plus_offset(offsets.vmctx_magic()) = VMCONTEXT_MAGIC;
self.set_callee(None);
self.set_store(store.as_raw());
// Initialize shared signatures
let signatures = self.runtime_info.signature_ids();
*self.vmctx_plus_offset(offsets.vmctx_signature_ids_array()) = signatures.as_ptr();
// Initialize the built-in functions
*self.vmctx_plus_offset(offsets.vmctx_builtin_functions()) = &VMBuiltinFunctionsArray::INIT;
// Initialize the imports
debug_assert_eq!(imports.functions.len(), module.num_imported_funcs);
ptr::copy_nonoverlapping(
imports.functions.as_ptr(),
self.vmctx_plus_offset(offsets.vmctx_imported_functions_begin()),
imports.functions.len(),
);
debug_assert_eq!(imports.tables.len(), module.num_imported_tables);
ptr::copy_nonoverlapping(
imports.tables.as_ptr(),
self.vmctx_plus_offset(offsets.vmctx_imported_tables_begin()),
imports.tables.len(),
);
debug_assert_eq!(imports.memories.len(), module.num_imported_memories);
ptr::copy_nonoverlapping(
imports.memories.as_ptr(),
self.vmctx_plus_offset(offsets.vmctx_imported_memories_begin()),
imports.memories.len(),
);
debug_assert_eq!(imports.globals.len(), module.num_imported_globals);
ptr::copy_nonoverlapping(
imports.globals.as_ptr(),
self.vmctx_plus_offset(offsets.vmctx_imported_globals_begin()),
imports.globals.len(),
);
// N.B.: there is no need to initialize the anyfuncs array because
// we eagerly construct each element in it whenever asked for a
// reference to that element. In other words, there is no state
// needed to track the lazy-init, so we don't need to initialize
// any state now.
// Initialize the defined tables
let mut ptr = self.vmctx_plus_offset(offsets.vmctx_tables_begin());
for i in 0..module.table_plans.len() - module.num_imported_tables {
ptr::write(ptr, self.tables[DefinedTableIndex::new(i)].vmtable());
ptr = ptr.add(1);
}
// Initialize the defined memories. This fills in both the
// `defined_memories` table and the `owned_memories` table at the same
// time. Entries in `defined_memories` hold a pointer to a definition
// (all memories) whereas the `owned_memories` hold the actual
// definitions of memories owned (not shared) in the module.
let mut ptr = self.vmctx_plus_offset(offsets.vmctx_memories_begin());
let mut owned_ptr = self.vmctx_plus_offset(offsets.vmctx_owned_memories_begin());
for i in 0..module.memory_plans.len() - module.num_imported_memories {
let defined_memory_index = DefinedMemoryIndex::new(i);
let memory_index = module.memory_index(defined_memory_index);
if module.memory_plans[memory_index].memory.shared {
let def_ptr = self.memories[defined_memory_index]
.as_shared_memory()
.unwrap()
.vmmemory_ptr();
ptr::write(ptr, def_ptr.cast_mut());
} else {
ptr::write(owned_ptr, self.memories[defined_memory_index].vmmemory());
ptr::write(ptr, owned_ptr);
owned_ptr = owned_ptr.add(1);
}
ptr = ptr.add(1);
}
// Initialize the defined globals
self.initialize_vmctx_globals(module);
}
sourcepub fn grow(
&self,
delta_pages: u64,
store: Option<&mut dyn Store>
) -> Result<Option<(usize, usize)>, Error>
pub fn grow(
&self,
delta_pages: u64,
store: Option<&mut dyn Store>
) -> Result<Option<(usize, usize)>, Error>
Same as RuntimeLinearMemory::grow
, except with &self
.
sourcepub fn atomic_notify(&self, addr_index: u64, count: u32) -> Result<u32, Trap>
pub fn atomic_notify(&self, addr_index: u64, count: u32) -> Result<u32, Trap>
Implementation of memory.atomic.notify
for this shared memory.
sourcepub fn atomic_wait32(
&self,
addr_index: u64,
expected: u32,
timeout: Option<Instant>
) -> Result<WaitResult, Trap>
pub fn atomic_wait32(
&self,
addr_index: u64,
expected: u32,
timeout: Option<Instant>
) -> Result<WaitResult, Trap>
Implementation of memory.atomic.wait32
for this shared memory.
Examples found in repository?
847 848 849 850 851 852 853 854 855 856 857 858 859 860
pub fn atomic_wait32(
&mut self,
addr: u64,
expected: u32,
deadline: Option<Instant>,
) -> Result<WaitResult, Trap> {
match self.0.as_any_mut().downcast_mut::<SharedMemory>() {
Some(m) => m.atomic_wait32(addr, expected, deadline),
None => {
validate_atomic_addr(&self.vmmemory(), addr, 4, 4)?;
Err(Trap::AtomicWaitNonSharedMemory)
}
}
}
sourcepub fn atomic_wait64(
&self,
addr_index: u64,
expected: u64,
timeout: Option<Instant>
) -> Result<WaitResult, Trap>
pub fn atomic_wait64(
&self,
addr_index: u64,
expected: u64,
timeout: Option<Instant>
) -> Result<WaitResult, Trap>
Implementation of memory.atomic.wait64
for this shared memory.
Examples found in repository?
863 864 865 866 867 868 869 870 871 872 873 874 875 876
pub fn atomic_wait64(
&mut self,
addr: u64,
expected: u64,
deadline: Option<Instant>,
) -> Result<WaitResult, Trap> {
match self.0.as_any_mut().downcast_mut::<SharedMemory>() {
Some(m) => m.atomic_wait64(addr, expected, deadline),
None => {
validate_atomic_addr(&self.vmmemory(), addr, 8, 8)?;
Err(Trap::AtomicWaitNonSharedMemory)
}
}
}
Trait Implementations§
source§fn clone(&self) -> SharedMemory
fn clone(&self) -> SharedMemory
1.0.0 · source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source
. Read moreProxy all calls through the RwLock
.
source§fn maximum_byte_size(&self) -> Option<usize>
fn maximum_byte_size(&self) -> Option<usize>
None
if the memory is unbounded.source§fn grow(
&mut self,
delta_pages: u64,
store: Option<&mut dyn Store>
) -> Result<Option<(usize, usize)>, Error>
fn grow(
&mut self,
delta_pages: u64,
store: Option<&mut dyn Store>
) -> Result<Option<(usize, usize)>, Error>
delta_pages
. Read moresource§fn grow_to(&mut self, size: usize) -> Result<()>
fn grow_to(&mut self, size: usize) -> Result<()>
source§fn vmmemory(&mut self) -> VMMemoryDefinition
fn vmmemory(&mut self) -> VMMemoryDefinition
VMMemoryDefinition
for exposing the memory to compiled wasm
code.source§fn needs_init(&self) -> bool
fn needs_init(&self) -> bool
MemoryImage
passed to
RuntimeMemoryCreator::new_memory()
.