use crate::sys::vm::{self, MemoryImageSource};
use crate::{MmapVec, SendSyncPtr};
use anyhow::Result;
use std::ffi::c_void;
use std::ops::Range;
use std::ptr::NonNull;
use std::sync::Arc;
use wasmtime_environ::{
DefinedMemoryIndex, MemoryInitialization, MemoryPlan, MemoryStyle, Module, PrimaryMap,
};
pub struct ModuleMemoryImages {
memories: PrimaryMap<DefinedMemoryIndex, Option<Arc<MemoryImage>>>,
}
impl ModuleMemoryImages {
pub fn get_memory_image(&self, defined_index: DefinedMemoryIndex) -> Option<&Arc<MemoryImage>> {
self.memories[defined_index].as_ref()
}
}
#[derive(Debug, PartialEq)]
pub struct MemoryImage {
source: MemoryImageSource,
len: usize,
source_offset: u64,
linear_memory_offset: usize,
}
impl MemoryImage {
fn new(
page_size: u32,
offset: u64,
data: &[u8],
mmap: Option<&MmapVec>,
) -> Result<Option<MemoryImage>> {
let len = data.len();
assert_eq!(offset % u64::from(page_size), 0);
assert_eq!((len as u32) % page_size, 0);
let linear_memory_offset = match usize::try_from(offset) {
Ok(offset) => offset,
Err(_) => return Ok(None),
};
if let Some(mmap) = mmap {
let start = mmap.as_ptr() as usize;
let end = start + mmap.len();
let data_start = data.as_ptr() as usize;
let data_end = data_start + data.len();
assert!(start <= data_start && data_end <= end);
assert_eq!((start as u32) % page_size, 0);
assert_eq!((data_start as u32) % page_size, 0);
assert_eq!((data_end as u32) % page_size, 0);
assert_eq!((mmap.original_offset() as u32) % page_size, 0);
if let Some(file) = mmap.original_file() {
if let Some(source) = MemoryImageSource::from_file(file) {
return Ok(Some(MemoryImage {
source,
source_offset: u64::try_from(mmap.original_offset() + (data_start - start))
.unwrap(),
linear_memory_offset,
len,
}));
}
}
}
if let Some(source) = MemoryImageSource::from_data(data)? {
return Ok(Some(MemoryImage {
source,
source_offset: 0,
linear_memory_offset,
len,
}));
}
Ok(None)
}
unsafe fn map_at(&self, base: *mut u8) -> Result<()> {
self.source.map_at(
base.add(self.linear_memory_offset),
self.len,
self.source_offset,
)?;
Ok(())
}
unsafe fn remap_as_zeros_at(&self, base: *mut u8) -> Result<()> {
self.source
.remap_as_zeros_at(base.add(self.linear_memory_offset), self.len)?;
Ok(())
}
}
impl ModuleMemoryImages {
pub fn new(
module: &Module,
wasm_data: &[u8],
mmap: Option<&MmapVec>,
) -> Result<Option<ModuleMemoryImages>> {
let map = match &module.memory_initialization {
MemoryInitialization::Static { map } => map,
_ => return Ok(None),
};
let mut memories = PrimaryMap::with_capacity(map.len());
let page_size = crate::page_size() as u32;
for (memory_index, init) in map {
let defined_memory = match module.defined_memory_index(memory_index) {
Some(idx) => idx,
None => return Ok(None),
};
let init = match init {
Some(init) => init,
None => {
memories.push(None);
continue;
}
};
let data = &wasm_data[init.data.start as usize..init.data.end as usize];
let image = match MemoryImage::new(page_size, init.offset, data, mmap)? {
Some(image) => image,
None => return Ok(None),
};
let idx = memories.push(Some(Arc::new(image)));
assert_eq!(idx, defined_memory);
}
Ok(Some(ModuleMemoryImages { memories }))
}
}
#[derive(Debug)]
pub struct MemoryImageSlot {
base: SendSyncPtr<u8>,
static_size: usize,
image: Option<Arc<MemoryImage>>,
accessible: usize,
dirty: bool,
clear_on_drop: bool,
}
impl MemoryImageSlot {
pub(crate) fn create(base_addr: *mut c_void, accessible: usize, static_size: usize) -> Self {
MemoryImageSlot {
base: NonNull::new(base_addr.cast()).unwrap().into(),
static_size,
accessible,
image: None,
dirty: false,
clear_on_drop: true,
}
}
#[cfg(feature = "pooling-allocator")]
pub(crate) fn dummy() -> MemoryImageSlot {
MemoryImageSlot {
base: NonNull::dangling().into(),
static_size: 0,
image: None,
accessible: 0,
dirty: false,
clear_on_drop: false,
}
}
pub(crate) fn no_clear_on_drop(&mut self) {
self.clear_on_drop = false;
}
pub(crate) fn set_heap_limit(&mut self, size_bytes: usize) -> Result<()> {
assert!(size_bytes <= self.static_size);
if size_bytes <= self.accessible {
return Ok(());
}
self.set_protection(self.accessible..size_bytes, true)?;
self.accessible = size_bytes;
Ok(())
}
pub(crate) fn instantiate(
&mut self,
initial_size_bytes: usize,
maybe_image: Option<&Arc<MemoryImage>>,
plan: &MemoryPlan,
) -> Result<()> {
assert!(!self.dirty);
assert!(initial_size_bytes <= self.static_size);
if self.image.as_ref() != maybe_image {
self.remove_image()?;
}
if self.accessible < initial_size_bytes {
self.set_protection(self.accessible..initial_size_bytes, true)?;
self.accessible = initial_size_bytes;
}
if initial_size_bytes < self.accessible
&& (plan.offset_guard_size > 0 || matches!(plan.style, MemoryStyle::Static { .. }))
{
self.set_protection(initial_size_bytes..self.accessible, false)?;
self.accessible = initial_size_bytes;
}
assert!(initial_size_bytes <= self.accessible);
if self.image.as_ref() != maybe_image {
if let Some(image) = maybe_image.as_ref() {
assert!(
image.linear_memory_offset.checked_add(image.len).unwrap()
<= initial_size_bytes
);
if image.len > 0 {
unsafe {
image.map_at(self.base.as_ptr())?;
}
}
}
self.image = maybe_image.cloned();
}
self.dirty = true;
Ok(())
}
pub(crate) fn remove_image(&mut self) -> Result<()> {
if let Some(image) = &self.image {
unsafe {
image.remap_as_zeros_at(self.base.as_ptr())?;
}
self.image = None;
}
Ok(())
}
#[allow(dead_code)] pub(crate) fn clear_and_remain_ready(&mut self, keep_resident: usize) -> Result<()> {
assert!(self.dirty);
unsafe {
self.reset_all_memory_contents(keep_resident)?;
}
self.dirty = false;
Ok(())
}
#[allow(dead_code)] unsafe fn reset_all_memory_contents(&mut self, keep_resident: usize) -> Result<()> {
if !vm::supports_madvise_dontneed() {
return self.reset_with_anon_memory();
}
match &self.image {
Some(image) => {
assert!(self.accessible >= image.linear_memory_offset + image.len);
if image.linear_memory_offset < keep_resident {
let image_end = image.linear_memory_offset + image.len;
let mem_after_image = self.accessible - image_end;
let remaining_memset =
(keep_resident - image.linear_memory_offset).min(mem_after_image);
std::ptr::write_bytes(self.base.as_ptr(), 0u8, image.linear_memory_offset);
self.madvise_reset(image.linear_memory_offset, image.len)?;
std::ptr::write_bytes(self.base.as_ptr().add(image_end), 0u8, remaining_memset);
self.madvise_reset(
image_end + remaining_memset,
mem_after_image - remaining_memset,
)?;
} else {
std::ptr::write_bytes(self.base.as_ptr(), 0u8, keep_resident);
self.madvise_reset(keep_resident, self.accessible - keep_resident)?;
}
}
None => {
let size_to_memset = keep_resident.min(self.accessible);
std::ptr::write_bytes(self.base.as_ptr(), 0u8, size_to_memset);
self.madvise_reset(size_to_memset, self.accessible - size_to_memset)?;
}
}
Ok(())
}
#[allow(dead_code)] unsafe fn madvise_reset(&self, base: usize, len: usize) -> Result<()> {
assert!(base + len <= self.accessible);
if len == 0 {
return Ok(());
}
vm::madvise_dontneed(self.base.as_ptr().add(base), len)?;
Ok(())
}
fn set_protection(&self, range: Range<usize>, readwrite: bool) -> Result<()> {
assert!(range.start <= range.end);
assert!(range.end <= self.static_size);
if range.len() == 0 {
return Ok(());
}
unsafe {
let start = self.base.as_ptr().add(range.start);
if readwrite {
vm::expose_existing_mapping(start, range.len())?;
} else {
vm::hide_existing_mapping(start, range.len())?;
}
}
Ok(())
}
pub(crate) fn has_image(&self) -> bool {
self.image.is_some()
}
#[allow(dead_code)] pub(crate) fn is_dirty(&self) -> bool {
self.dirty
}
fn reset_with_anon_memory(&mut self) -> Result<()> {
if self.static_size == 0 {
assert!(self.image.is_none());
assert_eq!(self.accessible, 0);
return Ok(());
}
unsafe {
vm::erase_existing_mapping(self.base.as_ptr(), self.static_size)?;
}
self.image = None;
self.accessible = 0;
Ok(())
}
}
impl Drop for MemoryImageSlot {
fn drop(&mut self) {
if self.clear_on_drop {
self.reset_with_anon_memory().unwrap();
}
}
}
#[cfg(all(test, target_os = "linux", not(miri)))]
mod test {
use std::sync::Arc;
use super::{MemoryImage, MemoryImageSlot, MemoryImageSource, MemoryPlan, MemoryStyle};
use crate::mmap::Mmap;
use anyhow::Result;
use wasmtime_environ::Memory;
fn create_memfd_with_data(offset: usize, data: &[u8]) -> Result<MemoryImage> {
let page_size = crate::page_size();
assert_eq!(offset & (page_size - 1), 0);
let image_len = (data.len() + page_size - 1) & !(page_size - 1);
Ok(MemoryImage {
source: MemoryImageSource::from_data(data)?.unwrap(),
len: image_len,
source_offset: 0,
linear_memory_offset: offset,
})
}
fn dummy_memory_plan(style: MemoryStyle) -> MemoryPlan {
MemoryPlan {
style,
memory: Memory {
minimum: 0,
maximum: None,
shared: false,
memory64: false,
},
pre_guard_size: 0,
offset_guard_size: 0,
}
}
#[test]
fn instantiate_no_image() {
let plan = dummy_memory_plan(MemoryStyle::Static { bound: 4 << 30 });
let mut mmap = Mmap::accessible_reserved(0, 4 << 20).unwrap();
let mut memfd = MemoryImageSlot::create(mmap.as_mut_ptr() as *mut _, 0, 4 << 20);
memfd.no_clear_on_drop();
assert!(!memfd.is_dirty());
memfd.instantiate(64 << 10, None, &plan).unwrap();
assert!(memfd.is_dirty());
let slice = unsafe { mmap.slice_mut(0..65536) };
assert_eq!(0, slice[0]);
assert_eq!(0, slice[65535]);
slice[1024] = 42;
assert_eq!(42, slice[1024]);
memfd.set_heap_limit(128 << 10).unwrap();
let slice = unsafe { mmap.slice(0..1 << 20) };
assert_eq!(42, slice[1024]);
assert_eq!(0, slice[131071]);
memfd.clear_and_remain_ready(0).unwrap();
assert!(!memfd.is_dirty());
memfd.instantiate(64 << 10, None, &plan).unwrap();
let slice = unsafe { mmap.slice(0..65536) };
assert_eq!(0, slice[1024]);
}
#[test]
fn instantiate_image() {
let plan = dummy_memory_plan(MemoryStyle::Static { bound: 4 << 30 });
let mut mmap = Mmap::accessible_reserved(0, 4 << 20).unwrap();
let mut memfd = MemoryImageSlot::create(mmap.as_mut_ptr() as *mut _, 0, 4 << 20);
memfd.no_clear_on_drop();
let image = Arc::new(create_memfd_with_data(4096, &[1, 2, 3, 4]).unwrap());
memfd.instantiate(64 << 10, Some(&image), &plan).unwrap();
assert!(memfd.has_image());
let slice = unsafe { mmap.slice_mut(0..65536) };
assert_eq!(&[1, 2, 3, 4], &slice[4096..4100]);
slice[4096] = 5;
memfd.clear_and_remain_ready(0).unwrap();
memfd.instantiate(64 << 10, Some(&image), &plan).unwrap();
let slice = unsafe { mmap.slice_mut(0..65536) };
assert_eq!(&[1, 2, 3, 4], &slice[4096..4100]);
memfd.clear_and_remain_ready(0).unwrap();
memfd.instantiate(64 << 10, None, &plan).unwrap();
assert!(!memfd.has_image());
let slice = unsafe { mmap.slice_mut(0..65536) };
assert_eq!(&[0, 0, 0, 0], &slice[4096..4100]);
memfd.clear_and_remain_ready(0).unwrap();
memfd.instantiate(64 << 10, Some(&image), &plan).unwrap();
let slice = unsafe { mmap.slice_mut(0..65536) };
assert_eq!(&[1, 2, 3, 4], &slice[4096..4100]);
let image2 = Arc::new(create_memfd_with_data(4096, &[10, 11, 12, 13]).unwrap());
memfd.clear_and_remain_ready(0).unwrap();
memfd.instantiate(128 << 10, Some(&image2), &plan).unwrap();
let slice = unsafe { mmap.slice_mut(0..65536) };
assert_eq!(&[10, 11, 12, 13], &slice[4096..4100]);
memfd.clear_and_remain_ready(0).unwrap();
memfd.instantiate(64 << 10, Some(&image), &plan).unwrap();
let slice = unsafe { mmap.slice_mut(0..65536) };
assert_eq!(&[1, 2, 3, 4], &slice[4096..4100]);
}
#[test]
#[cfg(target_os = "linux")]
fn memset_instead_of_madvise() {
let plan = dummy_memory_plan(MemoryStyle::Static { bound: 100 });
let mut mmap = Mmap::accessible_reserved(0, 4 << 20).unwrap();
let mut memfd = MemoryImageSlot::create(mmap.as_mut_ptr() as *mut _, 0, 4 << 20);
memfd.no_clear_on_drop();
for image_off in [0, 4096, 8 << 10] {
let image = Arc::new(create_memfd_with_data(image_off, &[1, 2, 3, 4]).unwrap());
for amt_to_memset in [0, 4096, 10 << 12, 1 << 20, 10 << 20] {
memfd.instantiate(64 << 10, Some(&image), &plan).unwrap();
assert!(memfd.has_image());
let slice = unsafe { mmap.slice_mut(0..64 << 10) };
if image_off > 0 {
assert_eq!(slice[image_off - 1], 0);
}
assert_eq!(slice[image_off + 5], 0);
assert_eq!(&[1, 2, 3, 4], &slice[image_off..][..4]);
slice[image_off] = 5;
assert_eq!(&[5, 2, 3, 4], &slice[image_off..][..4]);
memfd.clear_and_remain_ready(amt_to_memset).unwrap();
}
}
for amt_to_memset in [0, 4096, 10 << 12, 1 << 20, 10 << 20] {
memfd.instantiate(64 << 10, None, &plan).unwrap();
let mem = unsafe { mmap.slice_mut(0..64 << 10) };
for chunk in mem.chunks_mut(1024) {
assert_eq!(chunk[0], 0);
chunk[0] = 5;
}
memfd.clear_and_remain_ready(amt_to_memset).unwrap();
}
}
#[test]
#[cfg(target_os = "linux")]
fn dynamic() {
let plan = dummy_memory_plan(MemoryStyle::Dynamic { reserve: 200 });
let mut mmap = Mmap::accessible_reserved(0, 4 << 20).unwrap();
let mut memfd = MemoryImageSlot::create(mmap.as_mut_ptr() as *mut _, 0, 4 << 20);
memfd.no_clear_on_drop();
let image = Arc::new(create_memfd_with_data(4096, &[1, 2, 3, 4]).unwrap());
let initial = 64 << 10;
memfd.instantiate(initial, Some(&image), &plan).unwrap();
assert!(memfd.has_image());
let slice = unsafe { mmap.slice_mut(0..(64 << 10) + 4096) };
assert_eq!(&[1, 2, 3, 4], &slice[4096..4100]);
slice[4096] = 5;
assert_eq!(&[5, 2, 3, 4], &slice[4096..4100]);
memfd.clear_and_remain_ready(0).unwrap();
assert_eq!(&[1, 2, 3, 4], &slice[4096..4100]);
memfd.instantiate(initial, Some(&image), &plan).unwrap();
assert_eq!(&[1, 2, 3, 4], &slice[4096..4100]);
memfd.set_heap_limit(initial * 2).unwrap();
assert_eq!(&[0, 0], &slice[initial..initial + 2]);
slice[initial] = 100;
assert_eq!(&[100, 0], &slice[initial..initial + 2]);
memfd.clear_and_remain_ready(0).unwrap();
assert_eq!(&[0, 0], &slice[initial..initial + 2]);
memfd.instantiate(initial, Some(&image), &plan).unwrap();
assert_eq!(&[0, 0], &slice[initial..initial + 2]);
memfd.set_heap_limit(initial * 2).unwrap();
assert_eq!(&[0, 0], &slice[initial..initial + 2]);
slice[initial] = 100;
assert_eq!(&[100, 0], &slice[initial..initial + 2]);
memfd.clear_and_remain_ready(0).unwrap();
memfd.instantiate(64 << 10, None, &plan).unwrap();
assert!(!memfd.has_image());
assert_eq!(&[0, 0, 0, 0], &slice[4096..4100]);
assert_eq!(&[0, 0], &slice[initial..initial + 2]);
}
}