#[cfg(any(
all(test, feature = "check_contracts_in_tests"),
feature = "check_contracts"
))]
use contracts::*;
#[cfg(not(any(
all(test, feature = "check_contracts_in_tests"),
feature = "check_contracts"
)))]
use disabled_contracts::*;
use std::alloc::Layout;
use std::ffi::c_void;
use std::mem::MaybeUninit;
use std::num::NonZeroUsize;
use std::ptr::NonNull;
use std::sync::atomic::AtomicPtr;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::sync::Mutex;
#[cfg(any(
all(test, feature = "check_contracts_in_tests"),
feature = "check_contracts"
))]
use crate::debug_allocation_map;
#[cfg(any(
all(test, feature = "check_contracts_in_tests"),
feature = "check_contracts"
))]
use crate::debug_arange_map;
#[cfg(any(
all(test, feature = "check_contracts_in_tests"),
feature = "check_contracts"
))]
use crate::debug_type_map;
use crate::linear_ref::LinearRef;
use crate::mill;
use crate::mill::Mill;
use crate::mill::SpanMetadata;
use crate::mill::MAX_SPAN_SIZE;
use crate::Class;
const MAX_ALLOCATION_BATCH: usize = 100;
static_assertions::const_assert!(
(crate::magazine_impl::MAGAZINE_SIZE as usize) < MAX_ALLOCATION_BATCH
);
pub const MAX_OBJECT_ALIGNMENT: usize = 4096;
static_assertions::const_assert!(MAX_OBJECT_ALIGNMENT <= mill::MAX_SPAN_SIZE);
#[derive(Debug)]
pub struct Press {
bump: AtomicPtr<SpanMetadata>,
mill: Mutex<&'static Mill>,
layout: Layout,
class: Class,
}
#[inline]
pub fn check_allocation(class: Class, address: usize) -> Result<(), &'static str> {
let meta_ptr = SpanMetadata::from_allocation_address(address);
let meta = unsafe { meta_ptr.as_mut() }.ok_or("Derived a bad metadata address")?;
if meta.class_id != Some(class.id()) {
Err("Incorrect class id")
} else {
Ok(())
}
}
impl Press {
pub fn new(
class: Class,
mut layout: Layout,
mapper_name: Option<&str>,
) -> Result<Self, &'static str> {
if layout.align() > MAX_OBJECT_ALIGNMENT {
return Err("slitter only supports alignment up to 4 KB");
}
layout = layout.pad_to_align();
assert_eq!(layout.size() % layout.align(), 0);
if layout.size() > MAX_SPAN_SIZE / 2 {
Err("Class elements too large (after alignment)")
} else {
Ok(Self {
bump: Default::default(),
mill: Mutex::new(mill::get_mill(mapper_name)?),
layout,
class,
})
}
}
#[cfg(any(
all(test, feature = "check_contracts_in_tests"),
feature = "check_contracts"
))]
fn associate_range(&self, begin: usize, count: usize) -> Result<(), &'static str> {
for i in 0..count {
debug_type_map::associate_class(self.class, begin + i * self.layout.size())?;
}
Ok(())
}
#[cfg(any(
all(test, feature = "check_contracts_in_tests"),
feature = "check_contracts"
))]
fn is_range_associated_and_free(&self, begin: usize, count: usize) -> Result<(), &'static str> {
for i in 0..count {
let address = NonNull::new((begin + i * self.layout.size()) as *mut c_void)
.ok_or("allocated NULL pointer")?;
debug_type_map::ptr_is_class(self.class, &address)?;
debug_allocation_map::can_be_allocated(self.class, &address)?;
}
Ok(())
}
#[cfg(any(
all(test, feature = "check_contracts_in_tests"),
feature = "check_contracts"
))]
fn check_allocation_range(&self, begin: usize, count: usize) -> Result<(), &'static str> {
for i in 0..count {
check_allocation(self.class, begin + i * self.layout.size())?;
}
Ok(())
}
#[requires(debug_arange_map::is_metadata(meta as * mut SpanMetadata as usize,
std::mem::size_of::<SpanMetadata>()).is_ok(),
"The `meta` reference must come from a metadata range.")]
#[ensures(ret.is_some() -> ret.unwrap().1.get() <= max_count.get(),
"We never return more than `max_count` allocations.")]
#[ensures(ret.is_some() -> ret.unwrap().0.get() as usize % self.layout.align() == 0,
"The base address is correctly aligned.")]
#[ensures(ret.is_some() -> self.associate_range(ret.unwrap().0.get(), ret.unwrap().1.get()).is_ok(),
"On success, it must be possible to associate the returned address with `self.class`.")]
#[ensures(ret.is_some() ->
debug_arange_map::is_data(ret.unwrap().0.get(), self.layout.size() * ret.unwrap().1.get()).is_ok(),
"On success, the returned data must come from a data range.")]
#[ensures(ret.is_some() -> self.check_allocation_range(ret.unwrap().0.get(), ret.unwrap().1.get()).is_ok(),
"On success, the allocations must all have the class metadata set up.")]
fn try_allocate_from_span(
&self,
meta: &mut SpanMetadata,
max_count: NonZeroUsize,
) -> Option<(NonZeroUsize, NonZeroUsize)> {
let desired = max_count.get().clamp(0, MAX_ALLOCATION_BATCH);
let limit = meta.bump_limit as usize;
let allocated_id = meta.bump_ptr.fetch_add(desired, Ordering::Relaxed);
if allocated_id >= limit {
return None;
}
let actual = (limit - allocated_id).clamp(0, desired);
Some((
NonZeroUsize::new(meta.span_begin + allocated_id * self.layout.size())?,
NonZeroUsize::new(actual)?,
))
}
#[cfg(any(
all(test, feature = "check_contracts_in_tests"),
feature = "check_contracts"
))]
fn assert_new_bump_is_safe(&self, bump: *mut SpanMetadata) {
assert!(
debug_arange_map::is_metadata(bump as usize, std::mem::size_of::<SpanMetadata>())
.is_ok()
);
let meta = unsafe { bump.as_mut() }.expect("must be valid");
assert_eq!(meta.span_begin % self.layout.align(), 0);
for i in 0..meta.bump_limit as usize {
let address = meta.span_begin + i * self.layout.size();
assert!(debug_arange_map::is_data(address, self.layout.size()).is_ok());
assert!(check_allocation(self.class, address).is_ok());
}
}
#[cfg(not(any(
all(test, feature = "check_contracts_in_tests"),
feature = "check_contracts"
)))]
#[inline]
fn assert_new_bump_is_safe(&self, _bump: *mut SpanMetadata) {}
#[ensures(ret.is_ok() ->
self.bump.load(Ordering::Relaxed) != old(self.bump.load(Ordering::Relaxed)),
"On success, the bump Span has been updated.")]
#[ensures(debug_arange_map::is_metadata(self.bump.load(Ordering::Relaxed) as usize,
std::mem::size_of::<SpanMetadata>()).is_ok(),
"The bump struct must point to a valid metadata range.")]
fn try_replace_span(&self, expected: *mut SpanMetadata) -> Result<(), i32> {
if self.bump.load(Ordering::Relaxed) != expected {
return Ok(());
}
let mill = self.mill.lock().unwrap();
if self.bump.load(Ordering::Relaxed) != expected {
return Ok(());
}
let range = mill.get_span(self.layout.size(), None)?;
let meta: &mut _ = range.meta;
assert_eq!(meta.class_id, None);
meta.class_id = Some(self.class.id());
meta.bump_limit = (range.data_size / self.layout.size()) as u32;
assert!(
meta.bump_limit > 0,
"layout.size > MAX_SPAN_SIZE, but we check for that in the constructor."
);
meta.bump_ptr = AtomicUsize::new(0);
meta.span_begin = range.data as usize;
for trailing_meta in range.trail {
assert_eq!(trailing_meta.class_id, None);
trailing_meta.class_id = Some(self.class.id());
}
assert_eq!(self.bump.load(Ordering::Relaxed), expected);
self.assert_new_bump_is_safe(meta);
self.bump.store(meta, Ordering::Release);
Ok(())
}
#[ensures(ret.is_ok() && ret.unwrap().is_some() ->
ret.unwrap().unwrap().1.get() <= max_count.get(),
"We never overallocate.")]
#[ensures(ret.is_ok() && ret.unwrap().is_some() ->
self.is_range_associated_and_free(ret.unwrap().unwrap().0.get(), ret.unwrap().unwrap().1.get()).is_ok(),
"Successful allocations are fresh, or match the class and avoid double-allocation.")]
#[ensures(ret.is_ok() && ret.unwrap().is_some() ->
self.check_allocation_range(ret.unwrap().unwrap().0.get(), ret.unwrap().unwrap().1.get()).is_ok(),
"Sucessful allocations must have the allocation metadata set correctly.")]
fn try_allocate_once(
&self,
max_count: NonZeroUsize,
) -> Result<Option<(NonZeroUsize, NonZeroUsize)>, i32> {
let meta_ptr: *mut SpanMetadata = self.bump.load(Ordering::Acquire);
if let Some(meta) = unsafe { meta_ptr.as_mut() } {
if let Some(result) = self.try_allocate_from_span(meta, max_count) {
return Ok(Some(result));
}
}
self.try_replace_span(meta_ptr).map(|_| None)
}
#[ensures(ret.is_some() ->
ret.unwrap().1.get() <= max_count.get(),
"We never overallocate.")]
#[ensures(ret.is_some() ->
self.is_range_associated_and_free(ret.unwrap().0.get(), ret.unwrap().1.get()).is_ok(),
"Successful allocations are fresh, or match the class and avoid double-allocation.")]
#[ensures(ret.is_some() ->
self.check_allocation_range(ret.unwrap().0.get(), ret.unwrap().1.get()).is_ok(),
"Sucessful allocations must have the allocation metadata set correctly.")]
fn try_allocate(&self, max_count: NonZeroUsize) -> Option<(NonZeroUsize, NonZeroUsize)> {
loop {
match self.try_allocate_once(max_count) {
Err(_) => return None, Ok(Some(result)) => return Some(result),
_ => continue,
}
}
}
#[ensures(ret.is_some() ->
debug_allocation_map::can_be_allocated(self.class, ret.as_ref().unwrap().get()).is_ok(),
"Successful allocations are fresh, or match the class and avoid double-allocation.")]
#[ensures(ret.is_some() ->
debug_type_map::is_class(self.class, ret.as_ref().unwrap()).is_ok(),
"On success, the new allocation has the correct type.")]
#[ensures(ret.is_some() ->
check_allocation(self.class, ret.as_ref().unwrap().get().as_ptr() as usize).is_ok(),
"Sucessful allocations must have the allocation metadata set correctly.")]
pub fn allocate_one_object(&self) -> Option<LinearRef> {
let (address, _count) = self.try_allocate(NonZeroUsize::new(1).unwrap())?;
debug_assert_eq!(_count.get(), 1);
Some(LinearRef::new(unsafe {
NonNull::new_unchecked(address.get() as *mut c_void)
}))
}
#[ensures(ret.1.is_some() ->
debug_allocation_map::can_be_allocated(self.class, ret.1.as_ref().unwrap().get()).is_ok(),
"Successful allocations are fresh, or match the class and avoid double-allocation.")]
#[ensures(ret.1.is_some() ->
debug_type_map::is_class(self.class, ret.1.as_ref().unwrap()).is_ok(),
"On success, the new allocation has the correct type.")]
#[ensures(ret.1.is_some() ->
check_allocation(self.class, ret.1.as_ref().unwrap().get().as_ptr() as usize).is_ok(),
"Sucessful allocations must have the allocation metadata set correctly.")]
#[ensures(ret.1.is_none() -> ret.0 == 0,
"We always try to satisfy the return value first.")]
pub fn allocate_many_objects(
&self,
dst: &mut [MaybeUninit<LinearRef>],
) -> (usize, Option<LinearRef>) {
let elsize = self.layout.size();
match self.try_allocate(NonZeroUsize::new(dst.len() + 1).expect("Should not overflow")) {
Some((base, count)) => {
let mut address = base.get();
let mut get_ref = || {
let ret =
LinearRef::new(unsafe { NonNull::new_unchecked(address as *mut c_void) });
address += elsize;
ret
};
let ret = Some(get_ref());
let mut populated = 0;
for uninit in dst.iter_mut().take(count.get() - 1) {
unsafe { uninit.as_mut_ptr().write(get_ref()) };
populated += 1;
}
debug_assert!(populated <= count.get());
(populated, ret)
}
None => (0, None),
}
}
}