#[cfg(any(
all(test, feature = "check_contracts_in_tests"),
feature = "check_contracts"
))]
use contracts::*;
#[cfg(not(any(
all(test, feature = "check_contracts_in_tests"),
feature = "check_contracts"
)))]
use disabled_contracts::*;
use std::collections::HashMap;
use std::ffi::c_void;
use std::num::NonZeroU32;
use std::num::NonZeroUsize;
use std::ptr::NonNull;
use std::sync::atomic::AtomicUsize;
use std::sync::Mutex;
#[cfg(any(
all(test, feature = "check_contracts_in_tests"),
feature = "check_contracts"
))]
use crate::debug_arange_map;
use crate::mapper::Mapper;
#[cfg(not(feature = "test_only_small_constants"))]
const DATA_ALIGNMENT: usize = 1 << 30;
#[cfg(not(feature = "test_only_small_constants"))]
pub const GUARD_PAGE_SIZE: usize = 2 << 20;
#[cfg(not(feature = "test_only_small_constants"))]
const METADATA_PAGE_SIZE: usize = 2 << 20;
#[cfg(not(feature = "test_only_small_constants"))]
pub const SPAN_ALIGNMENT: usize = 16 << 10;
#[cfg(feature = "test_only_small_constants")]
const DATA_ALIGNMENT: usize = 2 << 20;
#[cfg(feature = "test_only_small_constants")]
pub const GUARD_PAGE_SIZE: usize = 16 << 10;
#[cfg(feature = "test_only_small_constants")]
const METADATA_PAGE_SIZE: usize = 16 << 10;
#[cfg(feature = "test_only_small_constants")]
pub const SPAN_ALIGNMENT: usize = 4 << 10;
pub const MAX_SPAN_SIZE: usize = DATA_ALIGNMENT / 16;
#[cfg(not(feature = "test_only_small_constants"))]
pub const DEFAULT_DESIRED_SPAN_SIZE: usize = (1 << 20) - SPAN_ALIGNMENT;
#[cfg(feature = "test_only_small_constants")]
pub const DEFAULT_DESIRED_SPAN_SIZE: usize = (8 << 10) - SPAN_ALIGNMENT;
static_assertions::const_assert!(DEFAULT_DESIRED_SPAN_SIZE <= MAX_SPAN_SIZE);
const MAPPED_REGION_SIZE: usize = 2 * DATA_ALIGNMENT + 3 * GUARD_PAGE_SIZE + METADATA_PAGE_SIZE;
const PREFIX_SIZE: usize = GUARD_PAGE_SIZE + METADATA_PAGE_SIZE + GUARD_PAGE_SIZE;
const SUFFIX_SIZE: usize = GUARD_PAGE_SIZE;
static_assertions::const_assert_eq!(
MAPPED_REGION_SIZE,
2 * DATA_ALIGNMENT + PREFIX_SIZE + SUFFIX_SIZE
);
static_assertions::const_assert!(
DATA_ALIGNMENT / SPAN_ALIGNMENT <= METADATA_PAGE_SIZE / std::mem::size_of::<SpanMetadata>()
);
static_assertions::const_assert!(std::mem::size_of::<MetaArray>() <= METADATA_PAGE_SIZE);
#[derive(Debug)]
#[repr(C)]
pub struct SpanMetadata {
pub(crate) class_id: Option<NonZeroU32>,
pub(crate) bump_limit: u32,
pub(crate) bump_ptr: AtomicUsize,
pub(crate) span_begin: usize,
}
#[allow(unused)]
extern "C" {
fn unused_span_metadata_is_zero_safe() -> SpanMetadata;
}
#[derive(Debug)]
pub struct MilledRange {
pub meta: &'static mut SpanMetadata,
pub trail: &'static mut [SpanMetadata],
pub data: *mut c_void,
pub data_size: usize,
}
#[derive(Debug)]
struct MetaArray {
chunk_meta: [SpanMetadata; DATA_ALIGNMENT / SPAN_ALIGNMENT],
}
#[derive(Debug)]
struct Chunk {
meta: *mut MetaArray,
spans: usize, span_count: usize,
next_free_span: usize,
}
unsafe impl Send for Chunk {}
#[derive(Debug)]
pub struct Mill {
mapper: &'static dyn Mapper,
current_chunk: Mutex<Option<Chunk>>,
}
pub fn get_mill(mapper_name: Option<&str>) -> Result<&'static Mill, &'static str> {
lazy_static::lazy_static! {
static ref MILLS: Mutex<HashMap<usize, &'static Mill>> = Default::default();
}
let mapper: &'static _ = crate::mapper::get_mapper(mapper_name)?;
let address = mapper as *const _ as *const () as usize;
let mut mills = MILLS.lock().unwrap();
Ok(mills
.entry(address)
.or_insert_with(|| Box::leak(Box::new(Mill::new(mapper)))))
}
impl SpanMetadata {
pub fn from_allocation_address(address: usize) -> *mut SpanMetadata {
let base = address - (address % DATA_ALIGNMENT);
let index = (address - base) / SPAN_ALIGNMENT; let meta = base - GUARD_PAGE_SIZE - METADATA_PAGE_SIZE;
unsafe { (meta as *mut SpanMetadata).add(index) }
}
}
#[derive(Debug)]
struct AllocatedChunk<'a> {
mapper: &'a dyn Mapper,
base: NonZeroUsize, top: NonZeroUsize, bottom_slop_end: usize, pub meta: *mut MetaArray,
pub data: *mut c_void,
pub data_end: usize,
top_slop_begin: usize, }
impl<'a> AllocatedChunk<'a> {
pub fn new(mapper: &'a dyn Mapper) -> Result<AllocatedChunk<'a>, i32> {
let page_size = mapper.page_size();
let mut size = MAPPED_REGION_SIZE;
if (size % page_size) > 0 {
size = page_size * (1 + (size / page_size));
}
let (region, actual): (NonNull<c_void>, usize) =
mapper.reserve(size, DATA_ALIGNMENT, PREFIX_SIZE, SUFFIX_SIZE)?;
Ok(AllocatedChunk::new_from_range(
mapper,
NonZeroUsize::new(region.as_ptr() as usize).expect("NonNull should be NonZero"),
actual,
)
.expect("mapper returned a bad region"))
}
fn new_from_range(
mapper: &'a dyn Mapper,
base: NonZeroUsize,
size: usize,
) -> Result<AllocatedChunk<'a>, &'static str> {
let page_size = mapper.page_size();
if (base.get() % page_size) != 0 {
return Err("base is incorrectly aligned");
}
if (size % page_size) != 0 {
return Err("size is incorrectly aligned");
}
let top = NonZeroUsize::new(
base.get()
.checked_add(size)
.ok_or("input region wraps around")?,
)
.expect("must be non-zero");
let mut data = DATA_ALIGNMENT
.checked_mul((base.get() / DATA_ALIGNMENT) + 1)
.ok_or("overflow in alignment")?;
assert!(data >= base.get());
if (data - base.get()) < PREFIX_SIZE {
data = data.checked_add(DATA_ALIGNMENT).ok_or("overflow in bump")?;
}
let mut bottom_slop_end = data.checked_sub(PREFIX_SIZE).unwrap();
bottom_slop_end -= bottom_slop_end % page_size;
let meta = bottom_slop_end.checked_add(GUARD_PAGE_SIZE).unwrap() as *mut MetaArray;
let data_end = data
.checked_add(DATA_ALIGNMENT)
.ok_or("overflow in data_end")?;
let mut suffix_end = data_end
.checked_add(SUFFIX_SIZE)
.ok_or("overflow in suffix_end")?;
if (suffix_end % page_size) > 0 {
suffix_end = page_size * (1 + (suffix_end / page_size));
}
if suffix_end > top.get() {
return Err("region too small");
}
Ok(AllocatedChunk {
mapper,
base,
top,
bottom_slop_end,
meta,
data: data as *mut c_void,
data_end,
top_slop_begin: suffix_end,
})
}
pub fn check_rep(&self) {
let page_size = self.mapper.page_size();
assert_eq!(self.base.get() % page_size, 0, "self: {:?}", self);
assert_eq!(self.top.get() % page_size, 0, "self: {:?}", self);
assert!(self.base.get() <= self.top.get(), "self: {:?}", self);
assert_eq!(self.bottom_slop_end % page_size, 0, "self: {:?}", self);
assert_eq!(self.top_slop_begin % page_size, 0, "self: {:?}", self);
assert!(self.bottom_slop_end >= self.base.get(), "self: {:?}", self);
assert!(self.top_slop_begin <= self.top.get(), "self: {:?}", self);
assert!(
self.bottom_slop_end <= self.top_slop_begin,
"self: {:?}",
self
);
assert!(
self.meta as usize >= self.bottom_slop_end + GUARD_PAGE_SIZE,
"self: {:?}",
self
);
assert!(
self.meta as usize + METADATA_PAGE_SIZE <= self.top_slop_begin,
"self: {:?}",
self
);
assert_eq!(
self.meta as usize + METADATA_PAGE_SIZE + GUARD_PAGE_SIZE,
self.data as usize,
"self: {:?}",
self
);
assert_eq!(self.data as usize % DATA_ALIGNMENT, 0, "self: {:?}", self);
assert!(
self.data as usize >= self.bottom_slop_end,
"self: {:?}",
self
);
assert!(
self.data as usize + DATA_ALIGNMENT + GUARD_PAGE_SIZE <= self.top_slop_begin,
"self: {:?}",
self
);
assert_eq!(
SpanMetadata::from_allocation_address(self.data as usize),
self.meta as *mut SpanMetadata
);
assert_eq!(
SpanMetadata::from_allocation_address(self.data as usize + (DATA_ALIGNMENT - 1)),
unsafe { (self.meta as *mut SpanMetadata).add(DATA_ALIGNMENT / SPAN_ALIGNMENT - 1) }
);
}
#[ensures(ret.is_ok() == debug_arange_map::is_metadata(old(self.meta as usize), METADATA_PAGE_SIZE).is_ok(),
"The metadata region is marked as such on success.")]
#[ensures(ret.is_ok() == debug_arange_map::is_data(old(self.data as usize), DATA_ALIGNMENT).is_ok(),
"The data region is marked as such on success.")]
pub fn call_with_chunk<T>(self, f: impl FnOnce(&Self) -> Result<T, i32>) -> Result<T, i32> {
self.check_rep();
self.allocate()?;
let ret = f(&self);
if ret.is_err() {
self.release_all()?;
} else {
let _ = self.commit();
}
ret
}
fn release_all(self) -> Result<(), i32> {
self.mapper.release(
NonNull::new(self.base.get() as *mut c_void).expect("must be valid"),
self.top.get() - self.base.get(),
)
}
#[ensures(debug_arange_map::is_metadata(self.meta as usize, METADATA_PAGE_SIZE).is_ok(),
"The metadata region is marked as such.")]
#[ensures(debug_arange_map::is_data(self.data as usize, DATA_ALIGNMENT).is_ok(),
"The data region is marked as such.")]
fn allocate(&self) -> Result<(), i32> {
fn rounded_allocate(
page_size: usize,
mut begin: usize,
size: usize,
allocator: impl FnOnce(NonNull<c_void>, usize) -> Result<(), i32>,
) -> Result<(), i32> {
let mut top = begin + size;
begin -= begin % page_size;
if (top % page_size) > 0 {
top = page_size * (1 + (top / page_size));
}
allocator(
NonNull::new(begin as *mut c_void).expect("must be valid"),
top - begin,
)
}
let page_size = self.mapper.page_size();
rounded_allocate(
page_size,
self.meta as usize,
METADATA_PAGE_SIZE,
|begin, size| self.mapper.allocate_meta(begin, size),
)?;
rounded_allocate(
page_size,
self.data as usize,
DATA_ALIGNMENT,
|begin, size| self.mapper.allocate_data(begin, size),
)
}
#[invariant(debug_arange_map::is_metadata(self.meta as usize, METADATA_PAGE_SIZE).is_ok(),
"The metadata region is marked as such.")]
#[invariant(debug_arange_map::is_data(self.data as usize, DATA_ALIGNMENT).is_ok(),
"The data region is marked as such.")]
fn commit(self) -> Result<(), i32> {
fn release(mapper: &dyn Mapper, begin: usize, end: usize) -> Result<(), i32> {
let page_size = mapper.page_size();
assert!(begin <= end);
assert_eq!(begin % page_size, 0);
assert_eq!(end % page_size, 0);
if begin == end {
return Ok(());
}
mapper.release(
NonNull::new(begin as *mut c_void).expect("must be valid"),
end - begin,
)
}
release(self.mapper, self.base.get(), self.bottom_slop_end)?;
release(self.mapper, self.top_slop_begin, self.top.get())
}
}
impl Mill {
pub fn new(mapper: &'static dyn Mapper) -> Self {
extern "C" {
fn slitter__data_alignment() -> usize;
fn slitter__guard_page_size() -> usize;
fn slitter__metadata_page_size() -> usize;
fn slitter__span_alignment() -> usize;
fn slitter__span_metadata_size() -> usize;
}
unsafe {
assert_eq!(DATA_ALIGNMENT, slitter__data_alignment());
assert_eq!(GUARD_PAGE_SIZE, slitter__guard_page_size());
assert_eq!(METADATA_PAGE_SIZE, slitter__metadata_page_size());
assert_eq!(SPAN_ALIGNMENT, slitter__span_alignment());
assert_eq!(
std::mem::size_of::<SpanMetadata>(),
slitter__span_metadata_size()
);
}
Self {
mapper,
current_chunk: Default::default(),
}
}
#[ensures(ret.is_ok() ->
debug_arange_map::is_metadata(ret.as_ref().unwrap().meta as usize,
METADATA_PAGE_SIZE).is_ok(),
"The metadata region is marked as such.")]
#[ensures(ret.is_ok() ->
debug_arange_map::is_data(ret.as_ref().unwrap().spans,
ret.as_ref().unwrap().span_count * SPAN_ALIGNMENT).is_ok(),
"The data region is marked as such.")]
fn allocate_chunk(mapper: &dyn Mapper) -> Result<Chunk, i32> {
AllocatedChunk::new(mapper)?.call_with_chunk(|chunk| {
let meta = unsafe { chunk.meta.as_mut() }.expect("must be valid");
Ok(Chunk {
meta,
spans: chunk.data as usize,
span_count: DATA_ALIGNMENT / SPAN_ALIGNMENT,
next_free_span: 0,
})
})
}
#[requires(debug_arange_map::is_metadata(chunk.meta as usize,
METADATA_PAGE_SIZE).is_ok(),
"The metadata region must be marked as such")]
#[requires(debug_arange_map::is_data(chunk.spans as usize,
chunk.span_count * SPAN_ALIGNMENT).is_ok(),
"The data region must be marked as such")]
#[requires(min <= DATA_ALIGNMENT / SPAN_ALIGNMENT,
"The request must fit in a data chunk.")]
#[ensures(ret.is_none() -> chunk.next_free_span == old(chunk.next_free_span),
"Must leave the chunk untouched on success")]
#[ensures(ret.is_some() ->
ret.as_ref().unwrap().data_size >= min * SPAN_ALIGNMENT,
"Must allocate enough for the min size on success.")]
#[ensures(ret.is_some() ->
chunk.next_free_span == old(chunk.next_free_span) + ret.as_ref().unwrap().data_size / SPAN_ALIGNMENT,
"Must consume the bump pointer on success.")]
fn allocate_span(chunk: &mut Chunk, min: usize, desired: usize) -> Option<MilledRange> {
if chunk.next_free_span >= chunk.span_count {
return None;
}
let remaining = chunk.span_count - chunk.next_free_span;
if remaining < min {
return None;
}
let allocated = remaining.min(desired);
let index = chunk.next_free_span;
chunk.next_free_span += allocated;
let meta: &'static mut _ = unsafe { chunk.meta.as_mut() }.unwrap();
let meta2: &'static mut _ = unsafe { chunk.meta.as_mut() }.unwrap();
Some(MilledRange {
meta: &mut meta.chunk_meta[index],
trail: &mut meta2.chunk_meta[index + 1..index + allocated],
data: (chunk.spans + index * SPAN_ALIGNMENT) as *mut c_void,
data_size: allocated * SPAN_ALIGNMENT,
})
}
#[requires(min_size <= MAX_SPAN_SIZE)]
#[requires(min_size <= desired_size.unwrap_or(min_size))]
pub fn get_span(
&self,
min_size: usize,
desired_size: Option<usize>,
) -> Result<MilledRange, i32> {
assert!(min_size <= MAX_SPAN_SIZE);
let desired = desired_size
.unwrap_or(DEFAULT_DESIRED_SPAN_SIZE)
.clamp(min_size, MAX_SPAN_SIZE);
let min_span_count =
(min_size / SPAN_ALIGNMENT) + ((min_size % SPAN_ALIGNMENT) > 0) as usize;
let desired_span_count =
(desired / SPAN_ALIGNMENT) + ((desired % SPAN_ALIGNMENT) > 0) as usize;
let mut chunk_or = self.current_chunk.lock().unwrap();
if chunk_or.is_none() {
*chunk_or = Some(Mill::allocate_chunk(self.mapper)?);
}
if let Some(range) = Mill::allocate_span(
chunk_or.as_mut().unwrap(),
min_span_count,
desired_span_count,
) {
return Ok(range);
}
*chunk_or = Some(Mill::allocate_chunk(self.mapper)?);
Ok(Mill::allocate_span(
chunk_or.as_mut().unwrap(),
min_span_count,
desired_span_count,
)
.expect("New chunk must have a span"))
}
}
#[test]
fn test_allocated_span_valid() {
let mapper = crate::mapper::get_mapper(None).expect("Default mapper exists");
assert_eq!(GUARD_PAGE_SIZE % mapper.page_size(), 0);
assert_eq!(METADATA_PAGE_SIZE % mapper.page_size(), 0);
let at_start = AllocatedChunk::new_from_range(
mapper,
NonZeroUsize::new(mapper.page_size()).unwrap(),
MAPPED_REGION_SIZE,
)
.expect("must construct");
at_start.check_rep();
let at_end = AllocatedChunk::new_from_range(
mapper,
NonZeroUsize::new(usize::MAX - MAPPED_REGION_SIZE - mapper.page_size() + 1).unwrap(),
MAPPED_REGION_SIZE,
)
.expect("must construct");
at_end.check_rep();
let aligned = AllocatedChunk::new_from_range(
mapper,
NonZeroUsize::new(DATA_ALIGNMENT).unwrap(),
MAPPED_REGION_SIZE,
)
.expect("must construct");
aligned.check_rep();
let unaligned = AllocatedChunk::new_from_range(
mapper,
NonZeroUsize::new(DATA_ALIGNMENT + mapper.page_size()).unwrap(),
MAPPED_REGION_SIZE,
)
.expect("must construct");
unaligned.check_rep();
let offset_guard = AllocatedChunk::new_from_range(
mapper,
NonZeroUsize::new(DATA_ALIGNMENT - GUARD_PAGE_SIZE).unwrap(),
MAPPED_REGION_SIZE,
)
.expect("must construct");
offset_guard.check_rep();
let offset_meta = AllocatedChunk::new_from_range(
mapper,
NonZeroUsize::new(DATA_ALIGNMENT - GUARD_PAGE_SIZE - METADATA_PAGE_SIZE).unwrap(),
MAPPED_REGION_SIZE,
)
.expect("must construct");
offset_meta.check_rep();
let off_by_one = AllocatedChunk::new_from_range(
mapper,
NonZeroUsize::new(
DATA_ALIGNMENT - 2 * GUARD_PAGE_SIZE - METADATA_PAGE_SIZE + mapper.page_size(),
)
.unwrap(),
MAPPED_REGION_SIZE,
)
.expect("must construct");
off_by_one.check_rep();
let exact_fit = AllocatedChunk::new_from_range(
mapper,
NonZeroUsize::new(DATA_ALIGNMENT - 2 * GUARD_PAGE_SIZE - METADATA_PAGE_SIZE).unwrap(),
MAPPED_REGION_SIZE,
)
.expect("must construct");
exact_fit.check_rep();
}