use crate::global_state::GlobalState;
use crate::util::address::Address;
#[cfg(feature = "analysis")]
use crate::util::analysis::AnalysisManager;
use crate::util::heap::gc_trigger::GCTrigger;
use crate::util::options::Options;
use crate::MMTK;
use std::cell::RefCell;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use crate::policy::space::Space;
use crate::util::constants::*;
use crate::util::opaque_pointer::*;
use crate::vm::VMBinding;
use crate::vm::{ActivePlan, Collection};
use downcast_rs::Downcast;
#[repr(C)]
#[derive(Debug)]
pub enum AllocationError {
HeapOutOfMemory,
MmapOutOfMemory,
}
#[repr(C)]
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub struct AllocationOptions {
pub allow_overcommit: bool,
pub at_safepoint: bool,
pub allow_oom_call: bool,
}
impl Default for AllocationOptions {
fn default() -> Self {
Self {
allow_overcommit: false,
at_safepoint: true,
allow_oom_call: true,
}
}
}
impl AllocationOptions {
pub(crate) fn is_default(&self) -> bool {
*self == AllocationOptions::default()
}
}
struct AllocationOptionsHolder {
alloc_options: RefCell<AllocationOptions>,
}
unsafe impl Sync for AllocationOptionsHolder {}
impl AllocationOptionsHolder {
pub fn new(alloc_options: AllocationOptions) -> Self {
Self {
alloc_options: RefCell::new(alloc_options),
}
}
pub fn set_alloc_options(&self, options: AllocationOptions) {
let mut alloc_options = self.alloc_options.borrow_mut();
*alloc_options = options;
}
pub fn clear_alloc_options(&self) {
let mut alloc_options = self.alloc_options.borrow_mut();
*alloc_options = AllocationOptions::default();
}
pub fn get_alloc_options(&self) -> AllocationOptions {
let alloc_options = self.alloc_options.borrow();
*alloc_options
}
}
pub fn align_allocation_no_fill<VM: VMBinding>(
region: Address,
alignment: usize,
offset: usize,
) -> Address {
align_allocation_inner::<VM>(region, alignment, offset, VM::MIN_ALIGNMENT, false)
}
pub fn align_allocation<VM: VMBinding>(
region: Address,
alignment: usize,
offset: usize,
) -> Address {
align_allocation_inner::<VM>(region, alignment, offset, VM::MIN_ALIGNMENT, true)
}
pub fn align_allocation_inner<VM: VMBinding>(
region: Address,
alignment: usize,
offset: usize,
known_alignment: usize,
fillalignmentgap: bool,
) -> Address {
debug_assert!(known_alignment >= VM::MIN_ALIGNMENT);
#[allow(clippy::assertions_on_constants)]
{
debug_assert!(VM::MIN_ALIGNMENT >= BYTES_IN_INT);
}
debug_assert!(!(fillalignmentgap && region.is_zero()));
debug_assert!(alignment <= VM::MAX_ALIGNMENT);
debug_assert!(region.is_aligned_to(VM::ALLOC_END_ALIGNMENT));
debug_assert!((alignment & (VM::MIN_ALIGNMENT - 1)) == 0);
debug_assert!((offset & (VM::MIN_ALIGNMENT - 1)) == 0);
if alignment <= known_alignment || VM::MAX_ALIGNMENT <= VM::MIN_ALIGNMENT {
return region;
}
let mask = (alignment - 1) as isize; let neg_off: isize = -(offset as isize); let delta = neg_off.wrapping_sub_unsigned(region.as_usize()) & mask;
if fillalignmentgap && (VM::ALIGNMENT_VALUE != 0) {
fill_alignment_gap::<VM>(region, region + delta);
}
region + delta
}
pub fn fill_alignment_gap<VM: VMBinding>(start: Address, end: Address) {
if VM::ALIGNMENT_VALUE != 0 {
let start_ptr = start.to_mut_ptr::<u8>();
unsafe {
std::ptr::write_bytes(start_ptr, VM::ALIGNMENT_VALUE, end - start);
}
}
}
pub fn get_maximum_aligned_size<VM: VMBinding>(size: usize, alignment: usize) -> usize {
get_maximum_aligned_size_inner::<VM>(size, alignment, VM::MIN_ALIGNMENT)
}
pub fn get_maximum_aligned_size_inner<VM: VMBinding>(
size: usize,
alignment: usize,
known_alignment: usize,
) -> usize {
trace!(
"size={}, alignment={}, known_alignment={}, MIN_ALIGNMENT={}",
size,
alignment,
known_alignment,
VM::MIN_ALIGNMENT
);
debug_assert!(size == size & !(known_alignment - 1));
debug_assert!(known_alignment >= VM::MIN_ALIGNMENT);
if VM::MAX_ALIGNMENT <= VM::MIN_ALIGNMENT || alignment <= known_alignment {
size
} else {
size + alignment - known_alignment
}
}
#[cfg(debug_assertions)]
pub(crate) fn assert_allocation_args<VM: VMBinding>(size: usize, align: usize, offset: usize) {
debug_assert!(size >= MIN_OBJECT_SIZE);
debug_assert!(align >= VM::MIN_ALIGNMENT);
debug_assert!(align <= VM::MAX_ALIGNMENT);
debug_assert!(VM::USE_ALLOCATION_OFFSET || offset == 0);
}
pub struct AllocatorContext<VM: VMBinding> {
alloc_options: AllocationOptionsHolder,
pub state: Arc<GlobalState>,
pub options: Arc<Options>,
pub gc_trigger: Arc<GCTrigger<VM>>,
#[cfg(feature = "analysis")]
pub analysis_manager: Arc<AnalysisManager<VM>>,
}
impl<VM: VMBinding> AllocatorContext<VM> {
pub fn new(mmtk: &MMTK<VM>) -> Self {
Self {
alloc_options: AllocationOptionsHolder::new(AllocationOptions::default()),
state: mmtk.state.clone(),
options: mmtk.options.clone(),
gc_trigger: mmtk.gc_trigger.clone(),
#[cfg(feature = "analysis")]
analysis_manager: mmtk.analysis_manager.clone(),
}
}
pub fn set_alloc_options(&self, options: AllocationOptions) {
self.alloc_options.set_alloc_options(options);
}
pub fn clear_alloc_options(&self) {
self.alloc_options.clear_alloc_options();
}
pub fn get_alloc_options(&self) -> AllocationOptions {
self.alloc_options.get_alloc_options()
}
}
pub trait Allocator<VM: VMBinding>: Downcast {
fn get_tls(&self) -> VMThread;
fn get_space(&self) -> &'static dyn Space<VM>;
fn get_context(&self) -> &AllocatorContext<VM>;
fn does_thread_local_allocation(&self) -> bool;
fn get_thread_local_buffer_granularity(&self) -> usize {
assert!(self.does_thread_local_allocation(), "An allocator that does not thread local allocation does not have a buffer granularity.");
unimplemented!()
}
fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address;
fn alloc_with_options(
&mut self,
size: usize,
align: usize,
offset: usize,
alloc_options: AllocationOptions,
) -> Address {
self.get_context().set_alloc_options(alloc_options);
let ret = self.alloc(size, align, offset);
self.get_context().clear_alloc_options();
ret
}
#[inline(never)]
fn alloc_slow(&mut self, size: usize, align: usize, offset: usize) -> Address {
self.alloc_slow_inline(size, align, offset)
}
fn alloc_slow_with_options(
&mut self,
size: usize,
align: usize,
offset: usize,
alloc_options: AllocationOptions,
) -> Address {
self.get_context().set_alloc_options(alloc_options);
let ret = self.alloc_slow(size, align, offset);
self.get_context().clear_alloc_options();
ret
}
fn alloc_slow_inline(&mut self, size: usize, align: usize, offset: usize) -> Address {
let tls = self.get_tls();
let is_mutator = VM::VMActivePlan::is_mutator(tls);
let stress_test = self.get_context().options.is_stress_test_gc_enabled();
let mut emergency_collection = false;
let mut previous_result_zero = false;
loop {
let result = if is_mutator && stress_test && *self.get_context().options.precise_stress
{
let need_poll = is_mutator && self.get_context().gc_trigger.should_do_stress_gc();
self.alloc_slow_once_precise_stress(size, align, offset, need_poll)
} else {
self.alloc_slow_once_traced(size, align, offset)
};
if !is_mutator {
debug_assert!(!result.is_zero());
return result;
}
if !result.is_zero() {
if !self
.get_context()
.state
.allocation_success
.load(Ordering::Relaxed)
{
self.get_context()
.state
.allocation_success
.store(true, Ordering::SeqCst);
}
if stress_test && self.get_context().state.is_initialized() && !previous_result_zero
{
let allocated_size = if *self.get_context().options.precise_stress
|| !self.does_thread_local_allocation()
{
size
} else {
crate::util::conversions::raw_align_up(
size,
self.get_thread_local_buffer_granularity(),
)
};
let _allocation_bytes = self
.get_context()
.state
.increase_allocation_bytes_by(allocated_size);
#[cfg(feature = "analysis")]
if _allocation_bytes > *self.get_context().options.analysis_factor {
trace!(
"Analysis: allocation_bytes = {} more than analysis_factor = {}",
_allocation_bytes,
*self.get_context().options.analysis_factor
);
self.get_context()
.analysis_manager
.alloc_hook(size, align, offset);
}
}
return result;
}
assert!(result.is_zero());
if !self.get_context().get_alloc_options().at_safepoint {
return Address::ZERO;
}
if emergency_collection && self.get_context().state.is_emergency_collection() {
trace!("Emergency collection");
let fail_with_oom = !self
.get_context()
.state
.allocation_success
.swap(true, Ordering::SeqCst);
trace!("fail with oom={}", fail_with_oom);
if fail_with_oom {
trace!("Throw HeapOutOfMemory!");
VM::VMCollection::out_of_memory(tls, AllocationError::HeapOutOfMemory);
self.get_context()
.state
.allocation_success
.store(false, Ordering::SeqCst);
return result;
}
}
emergency_collection = self.get_context().state.is_emergency_collection();
trace!("Got emergency collection as {}", emergency_collection);
previous_result_zero = true;
}
}
fn alloc_slow_once(&mut self, size: usize, align: usize, offset: usize) -> Address;
fn alloc_slow_once_traced(&mut self, size: usize, align: usize, offset: usize) -> Address {
probe!(mmtk, alloc_slow_once_start);
#[allow(clippy::let_and_return)]
let ret = self.alloc_slow_once(size, align, offset);
probe!(mmtk, alloc_slow_once_end);
ret
}
fn alloc_slow_once_precise_stress(
&mut self,
size: usize,
align: usize,
offset: usize,
need_poll: bool,
) -> Address {
if self.does_thread_local_allocation() && need_poll {
warn!("{} does not support stress GC (An allocator that does thread local allocation needs to implement allow_slow_once_stress_test()).", std::any::type_name::<Self>());
}
self.alloc_slow_once_traced(size, align, offset)
}
fn on_mutator_destroy(&mut self) {
}
}
impl_downcast!(Allocator<VM> where VM: VMBinding);