use crate::error::Error;
use crate::module::Module;
use crate::region::RegionInternal;
use libc::c_void;
use lucet_module::GlobalValue;
use nix::unistd::{sysconf, SysconfVar};
use std::sync::{Arc, Once, Weak};
pub const HOST_PAGE_SIZE_EXPECTED: usize = 4096;
static mut HOST_PAGE_SIZE: usize = 0;
static HOST_PAGE_SIZE_INIT: Once = Once::new();
pub fn host_page_size() -> usize {
unsafe {
HOST_PAGE_SIZE_INIT.call_once(|| match sysconf(SysconfVar::PAGE_SIZE) {
Ok(Some(sz)) => {
if sz as usize == HOST_PAGE_SIZE_EXPECTED {
HOST_PAGE_SIZE = HOST_PAGE_SIZE_EXPECTED;
} else {
panic!(
"host page size was {}; expected {}",
sz, HOST_PAGE_SIZE_EXPECTED
);
}
}
_ => panic!("could not get host page size from sysconf"),
});
HOST_PAGE_SIZE
}
}
pub fn instance_heap_offset() -> usize {
1 * host_page_size()
}
#[repr(C)]
pub struct Slot {
pub start: *mut c_void,
pub heap: *mut c_void,
pub stack: *mut c_void,
pub globals: *mut c_void,
pub sigstack: *mut c_void,
pub limits: Limits,
pub region: Weak<dyn RegionInternal>,
}
unsafe impl Send for Slot {}
unsafe impl Sync for Slot {}
impl Slot {
pub fn stack_top(&self) -> *mut c_void {
(self.stack as usize + self.limits.stack_size) as *mut c_void
}
}
pub struct Alloc {
pub heap_accessible_size: usize,
pub heap_inaccessible_size: usize,
pub slot: Option<Slot>,
pub region: Arc<dyn RegionInternal>,
}
impl Drop for Alloc {
fn drop(&mut self) {
self.region.clone().drop_alloc(self);
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum AddrLocation {
Heap,
InaccessibleHeap,
StackGuard,
Stack,
Globals,
SigStackGuard,
SigStack,
Unknown,
}
impl AddrLocation {
pub fn is_fault_fatal(self) -> bool {
use AddrLocation::*;
match self {
SigStackGuard | Unknown => true,
_ => false,
}
}
}
impl Alloc {
pub fn addr_location(&self, addr: *const c_void) -> AddrLocation {
let addr = addr as usize;
let heap_start = self.slot().heap as usize;
let heap_inaccessible_start = heap_start + self.heap_accessible_size;
let heap_inaccessible_end = heap_start + self.slot().limits.heap_address_space_size;
if (addr >= heap_start) && (addr < heap_inaccessible_start) {
return AddrLocation::Heap;
}
if (addr >= heap_inaccessible_start) && (addr < heap_inaccessible_end) {
return AddrLocation::InaccessibleHeap;
}
let stack_start = self.slot().stack as usize;
let stack_end = stack_start + self.slot().limits.stack_size;
let stack_guard_start = stack_start - host_page_size();
if (addr >= stack_guard_start) && (addr < stack_start) {
return AddrLocation::StackGuard;
}
if (addr >= stack_start) && (addr < stack_end) {
return AddrLocation::Stack;
}
let globals_start = self.slot().globals as usize;
let globals_end = globals_start + self.slot().limits.globals_size;
if (addr >= globals_start) && (addr < globals_end) {
return AddrLocation::Globals;
}
let sigstack_start = self.slot().sigstack as usize;
let sigstack_end = sigstack_start + self.slot().limits.signal_stack_size;
let sigstack_guard_start = sigstack_start - host_page_size();
if (addr >= sigstack_guard_start) && (addr < sigstack_start) {
return AddrLocation::SigStackGuard;
}
if (addr >= sigstack_start) && (addr < sigstack_end) {
return AddrLocation::SigStack;
}
AddrLocation::Unknown
}
pub fn expand_heap(&mut self, expand_bytes: u32, module: &dyn Module) -> Result<u32, Error> {
let slot = self.slot();
if expand_bytes == 0 {
return Ok(self.heap_accessible_size as u32);
}
let host_page_size = host_page_size() as u32;
if self.heap_accessible_size as u32 % host_page_size != 0 {
lucet_bail!("heap is not page-aligned; this is a bug");
}
if expand_bytes > std::u32::MAX - host_page_size - 1 {
bail_limits_exceeded!("expanded heap would overflow address space");
}
let expand_pagealigned =
((expand_bytes + host_page_size - 1) / host_page_size) * host_page_size;
if expand_pagealigned as usize > self.heap_inaccessible_size {
bail_limits_exceeded!("expanded heap would overflow addressable memory");
}
let guard_remaining = self.heap_inaccessible_size - expand_pagealigned as usize;
if let Some(heap_spec) = module.heap_spec() {
if guard_remaining < heap_spec.guard_size as usize {
bail_limits_exceeded!("expansion would leave guard memory too small");
}
if let Some(max_size) = heap_spec.max_size {
if self.heap_accessible_size + expand_pagealigned as usize > max_size as usize {
bail_limits_exceeded!(
"expansion would exceed module-specified heap limit: {:?}",
max_size
);
}
}
} else {
return Err(Error::NoLinearMemory("cannot expand heap".to_owned()));
}
if self.heap_accessible_size + expand_pagealigned as usize > slot.limits.heap_memory_size {
bail_limits_exceeded!(
"expansion would exceed runtime-specified heap limit: {:?}",
slot.limits
);
}
let newly_accessible = self.heap_accessible_size;
self.region
.clone()
.expand_heap(slot, newly_accessible as u32, expand_pagealigned)?;
self.heap_accessible_size += expand_pagealigned as usize;
self.heap_inaccessible_size -= expand_pagealigned as usize;
Ok(newly_accessible as u32)
}
pub fn reset_heap(&mut self, module: &dyn Module) -> Result<(), Error> {
self.region.clone().reset_heap(self, module)
}
pub fn heap_len(&self) -> usize {
self.heap_accessible_size
}
pub fn slot(&self) -> &Slot {
self.slot
.as_ref()
.expect("alloc missing its slot before drop")
}
pub unsafe fn heap(&self) -> &[u8] {
std::slice::from_raw_parts(self.slot().heap as *mut u8, self.heap_accessible_size)
}
pub unsafe fn heap_mut(&mut self) -> &mut [u8] {
std::slice::from_raw_parts_mut(self.slot().heap as *mut u8, self.heap_accessible_size)
}
pub unsafe fn heap_u32(&self) -> &[u32] {
assert!(self.slot().heap as usize % 4 == 0, "heap is 4-byte aligned");
assert!(
self.heap_accessible_size % 4 == 0,
"heap size is multiple of 4-bytes"
);
std::slice::from_raw_parts(self.slot().heap as *mut u32, self.heap_accessible_size / 4)
}
pub unsafe fn heap_u32_mut(&mut self) -> &mut [u32] {
assert!(self.slot().heap as usize % 4 == 0, "heap is 4-byte aligned");
assert!(
self.heap_accessible_size % 4 == 0,
"heap size is multiple of 4-bytes"
);
std::slice::from_raw_parts_mut(self.slot().heap as *mut u32, self.heap_accessible_size / 4)
}
pub unsafe fn heap_u64(&self) -> &[u64] {
assert!(self.slot().heap as usize % 8 == 0, "heap is 8-byte aligned");
assert!(
self.heap_accessible_size % 8 == 0,
"heap size is multiple of 8-bytes"
);
std::slice::from_raw_parts(self.slot().heap as *mut u64, self.heap_accessible_size / 8)
}
pub unsafe fn heap_u64_mut(&mut self) -> &mut [u64] {
assert!(self.slot().heap as usize % 8 == 0, "heap is 8-byte aligned");
assert!(
self.heap_accessible_size % 8 == 0,
"heap size is multiple of 8-bytes"
);
std::slice::from_raw_parts_mut(self.slot().heap as *mut u64, self.heap_accessible_size / 8)
}
pub unsafe fn stack_mut(&mut self) -> &mut [u8] {
std::slice::from_raw_parts_mut(self.slot().stack as *mut u8, self.slot().limits.stack_size)
}
pub unsafe fn stack_u64_mut(&mut self) -> &mut [u64] {
assert!(
self.slot().stack as usize % 8 == 0,
"stack is 8-byte aligned"
);
assert!(
self.slot().limits.stack_size % 8 == 0,
"stack size is multiple of 8-bytes"
);
std::slice::from_raw_parts_mut(
self.slot().stack as *mut u64,
self.slot().limits.stack_size / 8,
)
}
pub unsafe fn globals(&self) -> &[GlobalValue] {
std::slice::from_raw_parts(
self.slot().globals as *const GlobalValue,
self.slot().limits.globals_size / std::mem::size_of::<GlobalValue>(),
)
}
pub unsafe fn globals_mut(&mut self) -> &mut [GlobalValue] {
std::slice::from_raw_parts_mut(
self.slot().globals as *mut GlobalValue,
self.slot().limits.globals_size / std::mem::size_of::<GlobalValue>(),
)
}
pub unsafe fn sigstack_mut(&mut self) -> &mut [u8] {
std::slice::from_raw_parts_mut(
self.slot().sigstack as *mut u8,
self.slot().limits.signal_stack_size,
)
}
pub fn mem_in_heap<T>(&self, ptr: *const T, len: usize) -> bool {
let start = ptr as usize;
let end = start + len;
let heap_start = self.slot().heap as usize;
let heap_end = heap_start + self.heap_accessible_size;
start <= end
&& start >= heap_start
&& start < heap_end
&& end >= heap_start
&& end <= heap_end
}
}
#[derive(Clone, Debug)]
#[repr(C)]
pub struct Limits {
pub heap_memory_size: usize,
pub heap_address_space_size: usize,
pub stack_size: usize,
pub globals_size: usize,
pub signal_stack_size: usize,
}
#[cfg(target_os = "macos")]
pub const MINSIGSTKSZ: usize = 32 * 1024;
#[cfg(not(target_os = "macos"))]
pub const MINSIGSTKSZ: usize = libc::MINSIGSTKSZ;
#[cfg(all(debug_assertions, not(target_os = "macos")))]
pub const DEFAULT_SIGNAL_STACK_SIZE: usize = 12 * 1024;
#[cfg(all(debug_assertions, target_os = "macos"))]
pub const DEFAULT_SIGNAL_STACK_SIZE: usize = libc::SIGSTKSZ;
#[cfg(not(debug_assertions))]
pub const DEFAULT_SIGNAL_STACK_SIZE: usize = libc::SIGSTKSZ;
impl Limits {
pub const fn default() -> Limits {
Limits {
heap_memory_size: 16 * 64 * 1024,
heap_address_space_size: 0x200000000,
stack_size: 128 * 1024,
globals_size: 4096,
signal_stack_size: DEFAULT_SIGNAL_STACK_SIZE,
}
}
}
impl Limits {
pub fn total_memory_size(&self) -> usize {
[
instance_heap_offset(),
self.heap_address_space_size,
host_page_size(),
self.stack_size,
self.globals_size,
host_page_size(),
self.signal_stack_size,
]
.iter()
.try_fold(0usize, |acc, &x| acc.checked_add(x))
.expect("total_memory_size doesn't overflow")
}
pub fn validate(&self) -> Result<(), Error> {
if self.heap_memory_size % host_page_size() != 0 {
return Err(Error::InvalidArgument(
"memory size must be a multiple of host page size",
));
}
if self.heap_address_space_size % host_page_size() != 0 {
return Err(Error::InvalidArgument(
"address space size must be a multiple of host page size",
));
}
if self.heap_memory_size > self.heap_address_space_size {
return Err(Error::InvalidArgument(
"address space size must be at least as large as memory size",
));
}
if self.stack_size % host_page_size() != 0 {
return Err(Error::InvalidArgument(
"stack size must be a multiple of host page size",
));
}
if self.globals_size % host_page_size() != 0 {
return Err(Error::InvalidArgument(
"globals size must be a multiple of host page size",
));
}
if self.stack_size <= 0 {
return Err(Error::InvalidArgument("stack size must be greater than 0"));
}
if self.signal_stack_size < MINSIGSTKSZ {
return Err(Error::InvalidArgument(
"signal stack size must be at least MINSIGSTKSZ (defined in <signal.h>)",
));
}
if cfg!(debug_assertions) && self.signal_stack_size < 12 * 1024 {
return Err(Error::InvalidArgument(
"signal stack size must be at least 12KiB for debug builds",
));
}
if self.signal_stack_size % host_page_size() != 0 {
return Err(Error::InvalidArgument(
"signal stack size must be a multiple of host page size",
));
}
Ok(())
}
}
pub mod tests;