pub trait RuntimeLinearMemory: Send + Sync {
fn byte_size(&self) -> usize;
fn maximum_byte_size(&self) -> Option<usize>;
fn grow_to(&mut self, size: usize) -> Result<()>;
fn vmmemory(&mut self) -> VMMemoryDefinition;
fn needs_init(&self) -> bool;
fn as_any_mut(&mut self) -> &mut dyn Any;
fn grow(
&mut self,
delta_pages: u64,
store: Option<&mut dyn Store>
) -> Result<Option<(usize, usize)>, Error> { ... }
}
Expand description
A linear memory
Required Methods§
sourcefn maximum_byte_size(&self) -> Option<usize>
fn maximum_byte_size(&self) -> Option<usize>
Returns the maximum number of bytes the memory can grow to.
Returns None
if the memory is unbounded.
sourcefn grow_to(&mut self, size: usize) -> Result<()>
fn grow_to(&mut self, size: usize) -> Result<()>
Grow memory to the specified amount of bytes.
Returns an error if memory can’t be grown by the specified amount of bytes.
sourcefn vmmemory(&mut self) -> VMMemoryDefinition
fn vmmemory(&mut self) -> VMMemoryDefinition
Return a VMMemoryDefinition
for exposing the memory to compiled wasm
code.
sourcefn needs_init(&self) -> bool
fn needs_init(&self) -> bool
Does this memory need initialization? It may not if it already
has initial contents courtesy of the MemoryImage
passed to
RuntimeMemoryCreator::new_memory()
.
sourcefn as_any_mut(&mut self) -> &mut dyn Any
fn as_any_mut(&mut self) -> &mut dyn Any
Used for optional dynamic downcasting.
Provided Methods§
sourcefn grow(
&mut self,
delta_pages: u64,
store: Option<&mut dyn Store>
) -> Result<Option<(usize, usize)>, Error>
fn grow(
&mut self,
delta_pages: u64,
store: Option<&mut dyn Store>
) -> Result<Option<(usize, usize)>, Error>
Grows a memory by delta_pages
.
This performs the necessary checks on the growth before delegating to
the underlying grow_to
implementation. A default implementation of
this memory is provided here since this is assumed to be the same for
most kinds of memory; one exception is shared memory, which must perform
all the steps of the default implementation plus the required locking.
The store
is used only for error reporting.
Examples found in repository?
491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808
pub fn grow(
&self,
delta_pages: u64,
store: Option<&mut dyn Store>,
) -> Result<Option<(usize, usize)>, Error> {
let mut memory = self.0.memory.write().unwrap();
let result = memory.grow(delta_pages, store)?;
if let Some((_old_size_in_bytes, new_size_in_bytes)) = result {
// Store the new size to the `VMMemoryDefinition` for JIT-generated
// code (and runtime functions) to access. No other code can be
// growing this memory due to the write lock, but code in other
// threads could have access to this shared memory and we want them
// to see the most consistent version of the `current_length`; a
// weaker consistency is possible if we accept them seeing an older,
// smaller memory size (assumption: memory only grows) but presently
// we are aiming for accuracy.
//
// Note that it could be possible to access a memory address that is
// now-valid due to changes to the page flags in `grow` above but
// beyond the `memory.size` that we are about to assign to. In these
// and similar cases, discussion in the thread proposal concluded
// that: "multiple accesses in one thread racing with another
// thread's `memory.grow` that are in-bounds only after the grow
// commits may independently succeed or trap" (see
// https://github.com/WebAssembly/threads/issues/26#issuecomment-433930711).
// In other words, some non-determinism is acceptable when using
// `memory.size` on work being done by `memory.grow`.
self.0
.def
.0
.current_length
.store(new_size_in_bytes, Ordering::SeqCst);
}
Ok(result)
}
/// Implementation of `memory.atomic.notify` for this shared memory.
pub fn atomic_notify(&self, addr_index: u64, count: u32) -> Result<u32, Trap> {
validate_atomic_addr(&self.0.def.0, addr_index, 4, 4)?;
Ok(self.0.spot.unpark(addr_index, count))
}
/// Implementation of `memory.atomic.wait32` for this shared memory.
pub fn atomic_wait32(
&self,
addr_index: u64,
expected: u32,
timeout: Option<Instant>,
) -> Result<WaitResult, Trap> {
let addr = validate_atomic_addr(&self.0.def.0, addr_index, 4, 4)?;
// SAFETY: `addr_index` was validated by `validate_atomic_addr` above.
assert!(std::mem::size_of::<AtomicU32>() == 4);
assert!(std::mem::align_of::<AtomicU32>() <= 4);
let atomic = unsafe { &*(addr as *const AtomicU32) };
// We want the sequential consistency of `SeqCst` to ensure that the `load` sees the value that the `notify` will/would see.
// All WASM atomic operations are also `SeqCst`.
let validate = || atomic.load(Ordering::SeqCst) == expected;
Ok(self.0.spot.park(addr_index, validate, timeout))
}
/// Implementation of `memory.atomic.wait64` for this shared memory.
pub fn atomic_wait64(
&self,
addr_index: u64,
expected: u64,
timeout: Option<Instant>,
) -> Result<WaitResult, Trap> {
let addr = validate_atomic_addr(&self.0.def.0, addr_index, 8, 8)?;
// SAFETY: `addr_index` was validated by `validate_atomic_addr` above.
assert!(std::mem::size_of::<AtomicU64>() == 8);
assert!(std::mem::align_of::<AtomicU64>() <= 8);
let atomic = unsafe { &*(addr as *const AtomicU64) };
// We want the sequential consistency of `SeqCst` to ensure that the `load` sees the value that the `notify` will/would see.
// All WASM atomic operations are also `SeqCst`.
let validate = || atomic.load(Ordering::SeqCst) == expected;
Ok(self.0.spot.park(addr_index, validate, timeout))
}
}
/// Shared memory needs some representation of a `VMMemoryDefinition` for
/// JIT-generated code to access. This structure owns the base pointer and
/// length to the actual memory and we share this definition across threads by:
/// - never changing the base pointer; according to the specification, shared
/// memory must be created with a known maximum size so it can be allocated
/// once and never moved
/// - carefully changing the length, using atomic accesses in both the runtime
/// and JIT-generated code.
struct LongTermVMMemoryDefinition(VMMemoryDefinition);
unsafe impl Send for LongTermVMMemoryDefinition {}
unsafe impl Sync for LongTermVMMemoryDefinition {}
/// Proxy all calls through the [`RwLock`].
impl RuntimeLinearMemory for SharedMemory {
fn byte_size(&self) -> usize {
self.0.memory.read().unwrap().byte_size()
}
fn maximum_byte_size(&self) -> Option<usize> {
self.0.memory.read().unwrap().maximum_byte_size()
}
fn grow(
&mut self,
delta_pages: u64,
store: Option<&mut dyn Store>,
) -> Result<Option<(usize, usize)>, Error> {
SharedMemory::grow(self, delta_pages, store)
}
fn grow_to(&mut self, size: usize) -> Result<()> {
self.0.memory.write().unwrap().grow_to(size)
}
fn vmmemory(&mut self) -> VMMemoryDefinition {
// `vmmemory()` is used for writing the `VMMemoryDefinition` of a memory
// into its `VMContext`; this should never be possible for a shared
// memory because the only `VMMemoryDefinition` for it should be stored
// in its own `def` field.
unreachable!()
}
fn needs_init(&self) -> bool {
self.0.memory.read().unwrap().needs_init()
}
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
self
}
}
/// Representation of a runtime wasm linear memory.
pub struct Memory(Box<dyn RuntimeLinearMemory>);
impl Memory {
/// Create a new dynamic (movable) memory instance for the specified plan.
pub fn new_dynamic(
plan: &MemoryPlan,
creator: &dyn RuntimeMemoryCreator,
store: &mut dyn Store,
memory_image: Option<&Arc<MemoryImage>>,
) -> Result<Self> {
let (minimum, maximum) = Self::limit_new(plan, Some(store))?;
let allocation = creator.new_memory(plan, minimum, maximum, memory_image)?;
let allocation = if plan.memory.shared {
Box::new(SharedMemory::wrap(plan, allocation, plan.memory)?)
} else {
allocation
};
Ok(Memory(allocation))
}
/// Create a new static (immovable) memory instance for the specified plan.
pub fn new_static(
plan: &MemoryPlan,
base: &'static mut [u8],
memory_image: MemoryImageSlot,
store: &mut dyn Store,
) -> Result<Self> {
let (minimum, maximum) = Self::limit_new(plan, Some(store))?;
let pooled_memory = StaticMemory::new(base, minimum, maximum, memory_image)?;
let allocation = Box::new(pooled_memory);
let allocation: Box<dyn RuntimeLinearMemory> = if plan.memory.shared {
// FIXME: since the pooling allocator owns the memory allocation
// (which is torn down with the instance), the current shared memory
// implementation will cause problems; see
// https://github.com/bytecodealliance/wasmtime/issues/4244.
todo!("using shared memory with the pooling allocator is a work in progress");
} else {
allocation
};
Ok(Memory(allocation))
}
/// Calls the `store`'s limiter to optionally prevent a memory from being allocated.
///
/// Returns the minimum size and optional maximum size of the memory, in
/// bytes.
fn limit_new(
plan: &MemoryPlan,
store: Option<&mut dyn Store>,
) -> Result<(usize, Option<usize>)> {
// Sanity-check what should already be true from wasm module validation.
let absolute_max = if plan.memory.memory64 {
WASM64_MAX_PAGES
} else {
WASM32_MAX_PAGES
};
assert!(plan.memory.minimum <= absolute_max);
assert!(plan.memory.maximum.is_none() || plan.memory.maximum.unwrap() <= absolute_max);
// This is the absolute possible maximum that the module can try to
// allocate, which is our entire address space minus a wasm page. That
// shouldn't ever actually work in terms of an allocation because
// presumably the kernel wants *something* for itself, but this is used
// to pass to the `store`'s limiter for a requested size
// to approximate the scale of the request that the wasm module is
// making. This is necessary because the limiter works on `usize` bytes
// whereas we're working with possibly-overflowing `u64` calculations
// here. To actually faithfully represent the byte requests of modules
// we'd have to represent things as `u128`, but that's kinda
// overkill for this purpose.
let absolute_max = 0usize.wrapping_sub(WASM_PAGE_SIZE);
// If the minimum memory size overflows the size of our own address
// space, then we can't satisfy this request, but defer the error to
// later so the `store` can be informed that an effective oom is
// happening.
let minimum = plan
.memory
.minimum
.checked_mul(WASM_PAGE_SIZE_U64)
.and_then(|m| usize::try_from(m).ok());
// The plan stores the maximum size in units of wasm pages, but we
// use units of bytes. Unlike for the `minimum` size we silently clamp
// the effective maximum size to `absolute_max` above if the maximum is
// too large. This should be ok since as a wasm runtime we get to
// arbitrarily decide the actual maximum size of memory, regardless of
// what's actually listed on the memory itself.
let mut maximum = plan.memory.maximum.map(|max| {
usize::try_from(max)
.ok()
.and_then(|m| m.checked_mul(WASM_PAGE_SIZE))
.unwrap_or(absolute_max)
});
// If this is a 32-bit memory and no maximum is otherwise listed then we
// need to still specify a maximum size of 4GB. If the host platform is
// 32-bit then there's no need to limit the maximum this way since no
// allocation of 4GB can succeed, but for 64-bit platforms this is
// required to limit memories to 4GB.
if !plan.memory.memory64 && maximum.is_none() {
maximum = usize::try_from(1u64 << 32).ok();
}
// Inform the store's limiter what's about to happen. This will let the
// limiter reject anything if necessary, and this also guarantees that
// we should call the limiter for all requested memories, even if our
// `minimum` calculation overflowed. This means that the `minimum` we're
// informing the limiter is lossy and may not be 100% accurate, but for
// now the expected uses of limiter means that's ok.
if let Some(store) = store {
// We ignore the store limits for shared memories since they are
// technically not created within a store (though, trickily, they
// may be associated with one in order to get a `vmctx`).
if !plan.memory.shared {
if !store.memory_growing(0, minimum.unwrap_or(absolute_max), maximum)? {
bail!(
"memory minimum size of {} pages exceeds memory limits",
plan.memory.minimum
);
}
}
}
// At this point we need to actually handle overflows, so bail out with
// an error if we made it this far.
let minimum = minimum.ok_or_else(|| {
format_err!(
"memory minimum size of {} pages exceeds memory limits",
plan.memory.minimum
)
})?;
Ok((minimum, maximum))
}
/// Returns the number of allocated wasm pages.
pub fn byte_size(&self) -> usize {
self.0.byte_size()
}
/// Returns the maximum number of pages the memory can grow to at runtime.
///
/// Returns `None` if the memory is unbounded.
///
/// The runtime maximum may not be equal to the maximum from the linear memory's
/// Wasm type when it is being constrained by an instance allocator.
pub fn maximum_byte_size(&self) -> Option<usize> {
self.0.maximum_byte_size()
}
/// Returns whether or not this memory needs initialization. It
/// may not if it already has initial content thanks to a CoW
/// mechanism.
pub(crate) fn needs_init(&self) -> bool {
self.0.needs_init()
}
/// Grow memory by the specified amount of wasm pages.
///
/// Returns `None` if memory can't be grown by the specified amount
/// of wasm pages. Returns `Some` with the old size of memory, in bytes, on
/// successful growth.
///
/// # Safety
///
/// Resizing the memory can reallocate the memory buffer for dynamic memories.
/// An instance's `VMContext` may have pointers to the memory's base and will
/// need to be fixed up after growing the memory.
///
/// Generally, prefer using `InstanceHandle::memory_grow`, which encapsulates
/// this unsafety.
///
/// Ensure that the provided Store is not used to get access any Memory
/// which lives inside it.
pub unsafe fn grow(
&mut self,
delta_pages: u64,
store: Option<&mut dyn Store>,
) -> Result<Option<usize>, Error> {
self.0
.grow(delta_pages, store)
.map(|opt| opt.map(|(old, _new)| old))
}
Implementors§
Proxy all calls through the RwLock
.