use crate::alloc::{host_page_size, instance_heap_offset, Alloc, Limits, Slot};
use crate::embed_ctx::CtxMap;
use crate::error::Error;
use crate::instance::{new_instance_handle, Instance, InstanceHandle};
use crate::module::Module;
use crate::region::{Region, RegionCreate, RegionInternal};
use libc::c_void;
#[cfg(not(target_os = "linux"))]
use libc::memset;
use nix::sys::mman::{madvise, mmap, munmap, MapFlags, MmapAdvise, ProtFlags};
use std::ptr;
use std::sync::{Arc, RwLock, Weak};
pub struct MmapRegion {
capacity: usize,
freelist: RwLock<Vec<Slot>>,
limits: Limits,
min_heap_alignment: usize,
}
impl Region for MmapRegion {
fn free_slots(&self) -> usize {
self.freelist.read().unwrap().len()
}
fn used_slots(&self) -> usize {
self.capacity() - self.free_slots()
}
fn capacity(&self) -> usize {
self.capacity
}
}
impl RegionInternal for MmapRegion {
fn new_instance_with(
&self,
module: Arc<dyn Module>,
embed_ctx: CtxMap,
) -> Result<InstanceHandle, Error> {
let slot = self
.freelist
.write()
.unwrap()
.pop()
.ok_or(Error::RegionFull(self.capacity))?;
if slot.heap as usize % host_page_size() != 0 {
lucet_bail!("heap is not page-aligned; this is a bug");
}
let limits = &slot.limits;
module.validate_runtime_spec(limits)?;
for (ptr, len) in [
(slot.stack, limits.stack_size),
(slot.globals, limits.globals_size),
(slot.sigstack, limits.signal_stack_size),
]
.iter()
{
unsafe { mprotect(*ptr, *len, ProtFlags::PROT_READ | ProtFlags::PROT_WRITE)? };
}
let inst_ptr = slot.start as *mut Instance;
let region = slot
.region
.upgrade()
.expect("backing region of slot (`self`) exists");
let alloc = Alloc {
heap_accessible_size: 0,
heap_inaccessible_size: slot.limits.heap_address_space_size,
slot: Some(slot),
region,
};
let inst = new_instance_handle(inst_ptr, module, alloc, embed_ctx)?;
Ok(inst)
}
fn drop_alloc(&self, alloc: &mut Alloc) {
let slot = alloc
.slot
.take()
.expect("alloc didn't have a slot during drop; dropped twice?");
if slot.heap as usize % host_page_size() != 0 {
panic!("heap is not page-aligned");
}
for (ptr, len) in [
(slot.heap, alloc.heap_accessible_size),
(slot.stack, slot.limits.stack_size),
(slot.globals, slot.limits.globals_size),
(slot.sigstack, slot.limits.signal_stack_size),
]
.iter()
{
unsafe {
#[cfg(not(target_os = "linux"))]
{
mprotect(*ptr, *len, ProtFlags::PROT_READ | ProtFlags::PROT_WRITE)
.expect("mprotect succeeds during drop");
memset(*ptr, 0, *len);
}
mprotect(*ptr, *len, ProtFlags::PROT_NONE).expect("mprotect succeeds during drop");
madvise(*ptr, *len, MmapAdvise::MADV_DONTNEED)
.expect("madvise succeeds during drop");
}
}
self.freelist.write().unwrap().push(slot);
}
fn expand_heap(&self, slot: &Slot, start: u32, len: u32) -> Result<(), Error> {
unsafe {
mprotect(
(slot.heap as usize + start as usize) as *mut c_void,
len as usize,
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
)?;
}
Ok(())
}
fn reset_heap(&self, alloc: &mut Alloc, module: &dyn Module) -> Result<(), Error> {
let heap = alloc.slot().heap;
if alloc.heap_accessible_size > 0 {
let heap_size = alloc.slot().limits.heap_address_space_size;
unsafe {
#[cfg(not(target_os = "linux"))]
{
mprotect(
heap,
alloc.heap_accessible_size,
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
)?;
memset(heap, 0, alloc.heap_accessible_size);
}
mprotect(heap, heap_size, ProtFlags::PROT_NONE)?;
madvise(heap, heap_size, MmapAdvise::MADV_DONTNEED)?;
}
}
let initial_size = module
.heap_spec()
.map(|h| h.initial_size as usize)
.unwrap_or(0);
if initial_size > 0 {
unsafe {
mprotect(
heap,
initial_size,
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
)?
};
}
alloc.heap_accessible_size = initial_size;
alloc.heap_inaccessible_size = alloc.slot().limits.heap_address_space_size - initial_size;
let heap = unsafe { alloc.heap_mut() };
let initial_pages =
initial_size
.checked_div(host_page_size())
.ok_or(lucet_incorrect_module!(
"initial heap size {} is not divisible by host page size ({})",
initial_size,
host_page_size()
))?;
for page_num in 0..initial_pages {
let page_base = page_num * host_page_size();
if heap.len() < page_base {
return Err(lucet_incorrect_module!(
"sparse page data length exceeded initial heap size"
));
}
if let Some(contents) = module.get_sparse_page_data(page_num) {
heap[page_base..page_base + host_page_size()].copy_from_slice(contents);
}
}
Ok(())
}
fn as_dyn_internal(&self) -> &dyn RegionInternal {
self
}
}
impl Drop for MmapRegion {
fn drop(&mut self) {
for slot in self.freelist.get_mut().unwrap().drain(0..) {
Self::free_slot(slot);
}
}
}
impl RegionCreate for MmapRegion {
const TYPE_NAME: &'static str = "MmapRegion";
fn create(instance_capacity: usize, limits: &Limits) -> Result<Arc<Self>, Error> {
MmapRegion::create(instance_capacity, limits)
}
}
impl MmapRegion {
pub fn create(instance_capacity: usize, limits: &Limits) -> Result<Arc<Self>, Error> {
limits.validate()?;
let region = Arc::new(MmapRegion {
capacity: instance_capacity,
freelist: RwLock::new(Vec::with_capacity(instance_capacity)),
limits: limits.clone(),
min_heap_alignment: 0,
});
{
let mut freelist = region.freelist.write().unwrap();
for _ in 0..instance_capacity {
freelist.push(MmapRegion::create_slot(®ion)?);
}
}
Ok(region)
}
pub fn create_aligned(
instance_capacity: usize,
limits: &Limits,
heap_alignment: usize,
) -> Result<Arc<Self>, Error> {
limits.validate()?;
let is_power_of_2 = (heap_alignment & (heap_alignment - 1)) == 0;
if !is_power_of_2 {
return Err(Error::InvalidArgument(
"heap_alignment must be a power of 2",
));
}
let region = Arc::new(MmapRegion {
capacity: instance_capacity,
freelist: RwLock::new(Vec::with_capacity(instance_capacity)),
limits: limits.clone(),
min_heap_alignment: heap_alignment,
});
{
let mut freelist = region.freelist.write().unwrap();
for _ in 0..instance_capacity {
freelist.push(MmapRegion::create_slot(®ion)?);
}
}
Ok(region)
}
fn create_slot(region: &Arc<MmapRegion>) -> Result<Slot, Error> {
let mem = if region.min_heap_alignment == 0 {
unsafe {
mmap(
ptr::null_mut(),
region.limits.total_memory_size(),
ProtFlags::PROT_NONE,
MapFlags::MAP_ANON | MapFlags::MAP_PRIVATE,
0,
0,
)?
}
} else {
unsafe {
mmap_aligned(
region.limits.total_memory_size(),
ProtFlags::PROT_NONE,
MapFlags::MAP_ANON | MapFlags::MAP_PRIVATE,
region.min_heap_alignment,
instance_heap_offset(),
)?
}
};
unsafe {
mprotect(
mem,
instance_heap_offset(),
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
)?
};
let heap = mem as usize + instance_heap_offset();
let stack = heap + region.limits.heap_address_space_size + host_page_size();
let globals = stack + region.limits.stack_size;
let sigstack = globals + region.limits.globals_size + host_page_size();
Ok(Slot {
start: mem,
heap: heap as *mut c_void,
stack: stack as *mut c_void,
globals: globals as *mut c_void,
sigstack: sigstack as *mut c_void,
limits: region.limits.clone(),
region: Arc::downgrade(region) as Weak<dyn RegionInternal>,
})
}
fn free_slot(slot: Slot) {
let res = unsafe { munmap(slot.start, slot.limits.total_memory_size()) };
res.expect("munmap succeeded");
}
}
unsafe fn mmap_aligned(
requested_length: usize,
prot: ProtFlags,
flags: MapFlags,
alignment: usize,
alignment_offset: usize,
) -> Result<*mut c_void, Error> {
let addr = ptr::null_mut();
let fd = 0;
let offset = 0;
let padded_length = requested_length + alignment + alignment_offset;
let unaligned = mmap(addr, padded_length, prot, flags, fd, offset)? as usize;
let aligned_nonoffset = (unaligned + (alignment - 1)) & !(alignment - 1);
let aligned = if aligned_nonoffset - alignment_offset >= unaligned {
aligned_nonoffset - alignment_offset
} else {
aligned_nonoffset - alignment_offset + alignment
};
if aligned < unaligned
|| (aligned + (requested_length - 1)) > (unaligned + (padded_length - 1))
|| (aligned + alignment_offset) % alignment != 0
{
let _ = munmap(unaligned as *mut c_void, padded_length);
return Err(Error::Unsupported("Could not align memory".to_string()));
}
{
let unused_front = aligned - unaligned;
if unused_front != 0 {
if munmap(unaligned as *mut c_void, unused_front).is_err() {
let _ = munmap(unaligned as *mut c_void, padded_length);
return Err(Error::Unsupported("Could not align memory".to_string()));
}
}
}
{
let unused_back = (unaligned + (padded_length - 1)) - (aligned + (requested_length - 1));
if unused_back != 0 {
if munmap((aligned + requested_length) as *mut c_void, unused_back).is_err() {
let _ = munmap(unaligned as *mut c_void, padded_length);
return Err(Error::Unsupported("Could not align memory".to_string()));
}
}
}
return Ok(aligned as *mut c_void);
}
unsafe fn mprotect(addr: *mut c_void, length: libc::size_t, prot: ProtFlags) -> nix::Result<()> {
nix::errno::Errno::result(libc::mprotect(addr, length, prot.bits())).map(drop)
}
#[cfg(test)]
mod tests2 {
use super::*;
use nix::sys::mman::{munmap, MapFlags, ProtFlags};
#[test]
fn test_aligned_mem() {
let kb: usize = 1024;
let mb: usize = 1024 * kb;
struct TestProps {
pub mem_size: usize,
pub mem_align: usize,
pub offset: usize,
};
let tests = vec![
TestProps {
mem_size: 1 * mb,
mem_align: 1 * mb,
offset: 0,
},
TestProps {
mem_size: 1 * mb,
mem_align: 2 * mb,
offset: 0,
},
TestProps {
mem_size: 32 * mb,
mem_align: 32 * mb,
offset: 0,
},
TestProps {
mem_size: 32 * mb,
mem_align: 32 * mb,
offset: 4 * kb,
},
];
for test in tests {
let mem = unsafe {
mmap_aligned(
test.mem_size,
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
MapFlags::MAP_ANON | MapFlags::MAP_PRIVATE,
test.mem_align,
test.offset,
)
.unwrap()
};
let actual_align = ((mem as usize) + test.offset) % test.mem_align;
assert_eq!(actual_align, 0);
let mem_slice =
unsafe { std::slice::from_raw_parts_mut(mem as *mut u8, test.mem_size) };
for loc in mem_slice {
*loc = 1;
}
unsafe {
munmap(mem, test.mem_size).unwrap();
}
}
}
}