use super::PlanConstraints;
use crate::global_state::GlobalState;
use crate::mmtk::MMTK;
use crate::plan::gc_work::{ClearCommonPlanUnlogBits, SetCommonPlanUnlogBits};
use crate::plan::tracing::ObjectQueue;
use crate::plan::Mutator;
use crate::policy::immortalspace::ImmortalSpace;
use crate::policy::largeobjectspace::LargeObjectSpace;
use crate::policy::space::{PlanCreateSpaceArgs, Space};
#[cfg(feature = "vm_space")]
use crate::policy::vmspace::VMSpace;
use crate::scheduler::*;
use crate::util::alloc::allocators::AllocatorSelector;
use crate::util::copy::{CopyConfig, GCWorkerCopyContext};
use crate::util::heap::gc_trigger::GCTrigger;
use crate::util::heap::gc_trigger::SpaceStats;
use crate::util::heap::layout::Mmapper;
use crate::util::heap::layout::VMMap;
use crate::util::heap::HeapMeta;
use crate::util::heap::VMRequest;
use crate::util::metadata::log_bit::UnlogBitsOperation;
use crate::util::metadata::side_metadata::SideMetadataSanity;
use crate::util::metadata::side_metadata::SideMetadataSpec;
use crate::util::options::Options;
use crate::util::options::PlanSelector;
use crate::util::statistics::stats::Stats;
use crate::util::{conversions, ObjectReference};
use crate::util::{VMMutatorThread, VMWorkerThread};
use crate::vm::*;
use downcast_rs::Downcast;
use enum_map::EnumMap;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use mmtk_macros::{HasSpaces, PlanTraceObject};
pub fn create_mutator<VM: VMBinding>(
tls: VMMutatorThread,
mmtk: &'static MMTK<VM>,
) -> Box<Mutator<VM>> {
Box::new(match *mmtk.options.plan {
PlanSelector::NoGC => crate::plan::nogc::mutator::create_nogc_mutator(tls, mmtk),
PlanSelector::SemiSpace => crate::plan::semispace::mutator::create_ss_mutator(tls, mmtk),
PlanSelector::GenCopy => {
crate::plan::generational::copying::mutator::create_gencopy_mutator(tls, mmtk)
}
PlanSelector::GenImmix => {
crate::plan::generational::immix::mutator::create_genimmix_mutator(tls, mmtk)
}
PlanSelector::MarkSweep => crate::plan::marksweep::mutator::create_ms_mutator(tls, mmtk),
PlanSelector::Immix => crate::plan::immix::mutator::create_immix_mutator(tls, mmtk),
PlanSelector::PageProtect => {
crate::plan::pageprotect::mutator::create_pp_mutator(tls, mmtk)
}
PlanSelector::MarkCompact => {
crate::plan::markcompact::mutator::create_markcompact_mutator(tls, mmtk)
}
PlanSelector::StickyImmix => {
crate::plan::sticky::immix::mutator::create_stickyimmix_mutator(tls, mmtk)
}
PlanSelector::ConcurrentImmix => {
crate::plan::concurrent::immix::mutator::create_concurrent_immix_mutator(tls, mmtk)
}
PlanSelector::Compressor => {
crate::plan::compressor::mutator::create_compressor_mutator(tls, mmtk)
}
})
}
pub fn create_plan<VM: VMBinding>(
plan: PlanSelector,
args: CreateGeneralPlanArgs<VM>,
) -> Box<dyn Plan<VM = VM>> {
let plan = match plan {
PlanSelector::NoGC => {
Box::new(crate::plan::nogc::NoGC::new(args)) as Box<dyn Plan<VM = VM>>
}
PlanSelector::SemiSpace => {
Box::new(crate::plan::semispace::SemiSpace::new(args)) as Box<dyn Plan<VM = VM>>
}
PlanSelector::GenCopy => Box::new(crate::plan::generational::copying::GenCopy::new(args))
as Box<dyn Plan<VM = VM>>,
PlanSelector::GenImmix => Box::new(crate::plan::generational::immix::GenImmix::new(args))
as Box<dyn Plan<VM = VM>>,
PlanSelector::MarkSweep => {
Box::new(crate::plan::marksweep::MarkSweep::new(args)) as Box<dyn Plan<VM = VM>>
}
PlanSelector::Immix => {
Box::new(crate::plan::immix::Immix::new(args)) as Box<dyn Plan<VM = VM>>
}
PlanSelector::PageProtect => {
Box::new(crate::plan::pageprotect::PageProtect::new(args)) as Box<dyn Plan<VM = VM>>
}
PlanSelector::MarkCompact => {
Box::new(crate::plan::markcompact::MarkCompact::new(args)) as Box<dyn Plan<VM = VM>>
}
PlanSelector::StickyImmix => {
Box::new(crate::plan::sticky::immix::StickyImmix::new(args)) as Box<dyn Plan<VM = VM>>
}
PlanSelector::ConcurrentImmix => {
Box::new(crate::plan::concurrent::immix::ConcurrentImmix::new(args))
as Box<dyn Plan<VM = VM>>
}
PlanSelector::Compressor => {
Box::new(crate::plan::compressor::Compressor::new(args)) as Box<dyn Plan<VM = VM>>
}
};
let sft_map: &mut dyn crate::policy::sft_map::SFTMap =
unsafe { crate::mmtk::SFT_MAP.get_mut() }.as_mut();
plan.for_each_space(&mut |s| {
sft_map.notify_space_creation(s.as_sft());
s.initialize_sft(sft_map);
});
plan
}
pub fn create_gc_worker_context<VM: VMBinding>(
tls: VMWorkerThread,
mmtk: &'static MMTK<VM>,
) -> GCWorkerCopyContext<VM> {
GCWorkerCopyContext::<VM>::new(tls, mmtk, mmtk.get_plan().create_copy_config())
}
pub trait Plan: 'static + HasSpaces + Sync + Downcast {
fn constraints(&self) -> &'static PlanConstraints;
fn create_copy_config(&'static self) -> CopyConfig<Self::VM> {
CopyConfig::default()
}
fn base(&self) -> &BasePlan<Self::VM>;
fn base_mut(&mut self) -> &mut BasePlan<Self::VM>;
fn schedule_collection(&'static self, _scheduler: &GCWorkScheduler<Self::VM>);
fn common(&self) -> &CommonPlan<Self::VM> {
panic!("Common Plan not handled!")
}
fn generational(
&self,
) -> Option<&dyn crate::plan::generational::global::GenerationalPlan<VM = Self::VM>> {
None
}
fn concurrent(
&self,
) -> Option<&dyn crate::plan::concurrent::global::ConcurrentPlan<VM = Self::VM>> {
None
}
fn options(&self) -> &Options {
&self.base().options
}
fn get_allocator_mapping(&self) -> &'static EnumMap<AllocationSemantics, AllocatorSelector>;
fn notify_mutators_paused(&self, _scheduler: &GCWorkScheduler<Self::VM>) {}
fn prepare(&mut self, tls: VMWorkerThread);
fn prepare_worker(&self, _worker: &mut GCWorker<Self::VM>) {}
fn release(&mut self, tls: VMWorkerThread);
fn end_of_gc(&mut self, _tls: VMWorkerThread);
fn notify_emergency_collection(&self) {
if let Some(gen) = self.generational() {
gen.force_full_heap_collection();
}
}
fn collection_required(&self, space_full: bool, space: Option<SpaceStats<Self::VM>>) -> bool;
fn get_reserved_pages(&self) -> usize {
let used_pages = self.get_used_pages();
let collection_reserve = self.get_collection_reserved_pages();
let vm_live_bytes = <Self::VM as VMBinding>::VMCollection::vm_live_bytes();
let vm_live_pages = conversions::bytes_to_pages_up(vm_live_bytes);
let total = used_pages + collection_reserve + vm_live_pages;
trace!(
"Reserved pages = {}, used pages: {}, collection reserve: {}, VM live pages: {}",
total,
used_pages,
collection_reserve,
vm_live_pages,
);
total
}
fn get_total_pages(&self) -> usize {
self.base()
.gc_trigger
.policy
.get_current_heap_size_in_pages()
}
fn get_available_pages(&self) -> usize {
let reserved_pages = self.get_reserved_pages();
let total_pages = self.get_total_pages();
let available_pages = total_pages.saturating_sub(reserved_pages);
trace!(
"Total pages = {}, reserved pages = {}, available pages = {}",
total_pages,
reserved_pages,
available_pages,
);
available_pages
}
fn get_collection_reserved_pages(&self) -> usize {
0
}
fn get_used_pages(&self) -> usize;
fn get_free_pages(&self) -> usize {
let total_pages = self.get_total_pages();
let used_pages = self.get_used_pages();
total_pages.saturating_sub(used_pages)
}
fn last_collection_was_exhaustive(&self) -> bool {
true
}
fn current_gc_may_move_object(&self) -> bool;
fn sanity_check_object(&self, _object: ObjectReference) -> bool {
true
}
fn verify_side_metadata_sanity(&self) {
let mut side_metadata_sanity_checker = SideMetadataSanity::new();
self.for_each_space(&mut |space| {
space.verify_side_metadata_sanity(&mut side_metadata_sanity_checker);
})
}
}
impl_downcast!(Plan assoc VM);
#[derive(HasSpaces, PlanTraceObject)]
pub struct BasePlan<VM: VMBinding> {
pub(crate) global_state: Arc<GlobalState>,
pub options: Arc<Options>,
pub gc_trigger: Arc<GCTrigger<VM>>,
pub scheduler: Arc<GCWorkScheduler<VM>>,
#[cfg(feature = "code_space")]
#[space]
pub code_space: ImmortalSpace<VM>,
#[cfg(feature = "code_space")]
#[space]
pub code_lo_space: ImmortalSpace<VM>,
#[cfg(feature = "ro_space")]
#[space]
pub ro_space: ImmortalSpace<VM>,
#[cfg(feature = "vm_space")]
#[space]
pub vm_space: VMSpace<VM>,
}
pub struct CreateGeneralPlanArgs<'a, VM: VMBinding> {
pub vm_map: &'static dyn VMMap,
pub mmapper: &'static dyn Mmapper,
pub options: Arc<Options>,
pub state: Arc<GlobalState>,
pub gc_trigger: Arc<crate::util::heap::gc_trigger::GCTrigger<VM>>,
pub scheduler: Arc<GCWorkScheduler<VM>>,
pub stats: &'a Stats,
pub heap: &'a mut HeapMeta,
}
pub struct CreateSpecificPlanArgs<'a, VM: VMBinding> {
pub global_args: CreateGeneralPlanArgs<'a, VM>,
pub constraints: &'static PlanConstraints,
pub global_side_metadata_specs: Vec<SideMetadataSpec>,
}
impl<VM: VMBinding> CreateSpecificPlanArgs<'_, VM> {
pub fn _get_space_args(
&mut self,
name: &'static str,
zeroed: bool,
permission_exec: bool,
unlog_allocated_object: bool,
unlog_traced_object: bool,
vmrequest: VMRequest,
) -> PlanCreateSpaceArgs<'_, VM> {
PlanCreateSpaceArgs {
name,
zeroed,
permission_exec,
vmrequest,
unlog_allocated_object,
unlog_traced_object,
global_side_metadata_specs: self.global_side_metadata_specs.clone(),
vm_map: self.global_args.vm_map,
mmapper: self.global_args.mmapper,
heap: self.global_args.heap,
constraints: self.constraints,
gc_trigger: self.global_args.gc_trigger.clone(),
scheduler: self.global_args.scheduler.clone(),
options: self.global_args.options.clone(),
global_state: self.global_args.state.clone(),
}
}
pub fn get_nursery_space_args(
&mut self,
name: &'static str,
zeroed: bool,
permission_exec: bool,
vmrequest: VMRequest,
) -> PlanCreateSpaceArgs<'_, VM> {
self._get_space_args(name, zeroed, permission_exec, false, false, vmrequest)
}
pub fn get_mature_space_args(
&mut self,
name: &'static str,
zeroed: bool,
permission_exec: bool,
vmrequest: VMRequest,
) -> PlanCreateSpaceArgs<'_, VM> {
self._get_space_args(name, zeroed, permission_exec, true, true, vmrequest)
}
pub fn get_mixed_age_space_args(
&mut self,
name: &'static str,
zeroed: bool,
permission_exec: bool,
vmrequest: VMRequest,
) -> PlanCreateSpaceArgs<'_, VM> {
self._get_space_args(name, zeroed, permission_exec, false, true, vmrequest)
}
pub fn get_normal_space_args(
&mut self,
name: &'static str,
zeroed: bool,
permission_exec: bool,
vmrequest: VMRequest,
) -> PlanCreateSpaceArgs<'_, VM> {
self._get_space_args(name, zeroed, permission_exec, false, false, vmrequest)
}
pub fn get_common_space_args(
&mut self,
generational: bool,
name: &'static str,
) -> PlanCreateSpaceArgs<'_, VM> {
self.get_base_space_args(
generational,
name,
false, )
}
pub fn get_base_space_args(
&mut self,
generational: bool,
name: &'static str,
permission_exec: bool,
) -> PlanCreateSpaceArgs<'_, VM> {
if generational {
self.get_mature_space_args(name, true, permission_exec, VMRequest::discontiguous())
} else {
self.get_normal_space_args(name, true, permission_exec, VMRequest::discontiguous())
}
}
}
impl<VM: VMBinding> BasePlan<VM> {
#[allow(unused_mut)] pub fn new(mut args: CreateSpecificPlanArgs<VM>) -> BasePlan<VM> {
let _generational = args.constraints.generational;
BasePlan {
#[cfg(feature = "code_space")]
code_space: ImmortalSpace::new(args.get_base_space_args(
_generational,
"code_space",
true,
)),
#[cfg(feature = "code_space")]
code_lo_space: ImmortalSpace::new(args.get_base_space_args(
_generational,
"code_lo_space",
true,
)),
#[cfg(feature = "ro_space")]
ro_space: ImmortalSpace::new(args.get_base_space_args(
_generational,
"ro_space",
false,
)),
#[cfg(feature = "vm_space")]
vm_space: VMSpace::new(args.get_base_space_args(
_generational,
"vm_space",
false, )),
global_state: args.global_args.state.clone(),
gc_trigger: args.global_args.gc_trigger,
options: args.global_args.options,
scheduler: args.global_args.scheduler,
}
}
pub fn get_used_pages(&self) -> usize {
#[allow(unused_mut)]
let mut pages = 0;
#[cfg(feature = "code_space")]
{
pages += self.code_space.reserved_pages();
pages += self.code_lo_space.reserved_pages();
}
#[cfg(feature = "ro_space")]
{
pages += self.ro_space.reserved_pages();
}
#[cfg(feature = "malloc_counted_size")]
{
pages += self.global_state.get_malloc_bytes_in_pages();
}
pages
}
pub fn prepare(&mut self, _tls: VMWorkerThread, _full_heap: bool) {
#[cfg(feature = "code_space")]
self.code_space.prepare();
#[cfg(feature = "code_space")]
self.code_lo_space.prepare();
#[cfg(feature = "ro_space")]
self.ro_space.prepare();
#[cfg(feature = "vm_space")]
self.vm_space.prepare();
}
pub fn release(&mut self, _tls: VMWorkerThread, _full_heap: bool) {
#[cfg(feature = "code_space")]
self.code_space.release();
#[cfg(feature = "code_space")]
self.code_lo_space.release();
#[cfg(feature = "ro_space")]
self.ro_space.release();
#[cfg(feature = "vm_space")]
self.vm_space.release();
}
pub fn clear_side_log_bits(&self) {
#[cfg(feature = "code_space")]
self.code_space.clear_side_log_bits();
#[cfg(feature = "code_space")]
self.code_lo_space.clear_side_log_bits();
#[cfg(feature = "ro_space")]
self.ro_space.clear_side_log_bits();
#[cfg(feature = "vm_space")]
self.vm_space.clear_side_log_bits();
}
pub fn set_side_log_bits(&self) {
#[cfg(feature = "code_space")]
self.code_space.set_side_log_bits();
#[cfg(feature = "code_space")]
self.code_lo_space.set_side_log_bits();
#[cfg(feature = "ro_space")]
self.ro_space.set_side_log_bits();
#[cfg(feature = "vm_space")]
self.vm_space.set_side_log_bits();
}
pub fn end_of_gc(&mut self, _tls: VMWorkerThread) {
}
pub(crate) fn collection_required<P: Plan>(&self, plan: &P, space_full: bool) -> bool {
let stress_force_gc =
crate::util::heap::gc_trigger::GCTrigger::<VM>::should_do_stress_gc_inner(
&self.global_state,
&self.options,
);
if stress_force_gc {
debug!(
"Stress GC: allocation_bytes = {}, stress_factor = {}",
self.global_state.allocation_bytes.load(Ordering::Relaxed),
*self.options.stress_factor
);
debug!("Doing stress GC");
self.global_state
.allocation_bytes
.store(0, Ordering::SeqCst);
}
debug!(
"self.get_reserved_pages()={}, self.get_total_pages()={}",
plan.get_reserved_pages(),
plan.get_total_pages()
);
let heap_full = plan.base().gc_trigger.is_heap_full();
space_full || stress_force_gc || heap_full
}
}
cfg_if::cfg_if! {
if #[cfg(feature = "immortal_as_nonmoving")] {
pub type NonMovingSpace<VM> = crate::policy::immortalspace::ImmortalSpace<VM>;
} else if #[cfg(feature = "marksweep_as_nonmoving")] {
pub type NonMovingSpace<VM> = crate::policy::marksweepspace::native_ms::MarkSweepSpace<VM>;
} else {
pub type NonMovingSpace<VM> = crate::policy::immix::ImmixSpace<VM>;
}
}
#[derive(HasSpaces, PlanTraceObject)]
pub struct CommonPlan<VM: VMBinding> {
#[space]
pub immortal: ImmortalSpace<VM>,
#[space]
pub los: LargeObjectSpace<VM>,
#[space]
#[cfg_attr(
not(any(feature = "immortal_as_nonmoving", feature = "marksweep_as_nonmoving")),
post_scan
)] pub nonmoving: NonMovingSpace<VM>,
#[parent]
pub base: BasePlan<VM>,
}
impl<VM: VMBinding> CommonPlan<VM> {
pub fn new(mut args: CreateSpecificPlanArgs<VM>) -> CommonPlan<VM> {
let needs_log_bit = args.constraints.needs_log_bit;
let generational = args.constraints.generational;
CommonPlan {
immortal: ImmortalSpace::new(args.get_common_space_args(generational, "immortal")),
los: LargeObjectSpace::new(
if generational {
args.get_mixed_age_space_args("los", true, false, VMRequest::discontiguous())
} else {
args.get_normal_space_args("los", true, false, VMRequest::discontiguous())
},
false,
needs_log_bit,
),
nonmoving: Self::new_nonmoving_space(&mut args),
base: BasePlan::new(args),
}
}
pub fn get_used_pages(&self) -> usize {
self.immortal.reserved_pages()
+ self.los.reserved_pages()
+ self.nonmoving.reserved_pages()
+ self.base.get_used_pages()
}
pub fn prepare(&mut self, tls: VMWorkerThread, full_heap: bool) {
self.immortal.prepare();
self.los.prepare(full_heap);
self.prepare_nonmoving_space(full_heap);
self.base.prepare(tls, full_heap)
}
pub fn release(&mut self, tls: VMWorkerThread, full_heap: bool) {
self.immortal.release();
self.los.release(full_heap);
self.release_nonmoving_space(full_heap);
self.base.release(tls, full_heap)
}
pub(crate) fn schedule_unlog_bits_op(&mut self, unlog_bits_op: UnlogBitsOperation) {
if VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_on_side() {
let common_plan = unsafe { &*(self as *const CommonPlan<VM>) };
match unlog_bits_op {
UnlogBitsOperation::NoOp => {}
UnlogBitsOperation::BulkSet => {
self.base.scheduler.work_buckets[WorkBucketStage::Prepare]
.add(SetCommonPlanUnlogBits { common_plan });
}
UnlogBitsOperation::BulkClear => {
self.base.scheduler.work_buckets[WorkBucketStage::Release]
.add(ClearCommonPlanUnlogBits { common_plan });
}
}
}
}
pub fn clear_side_log_bits(&self) {
self.immortal.clear_side_log_bits();
self.los.clear_side_log_bits();
self.base.clear_side_log_bits();
}
pub fn set_side_log_bits(&self) {
self.immortal.set_side_log_bits();
self.los.set_side_log_bits();
self.base.set_side_log_bits();
}
pub fn end_of_gc(&mut self, tls: VMWorkerThread) {
self.end_of_gc_nonmoving_space();
self.base.end_of_gc(tls);
}
pub fn get_immortal(&self) -> &ImmortalSpace<VM> {
&self.immortal
}
pub fn get_los(&self) -> &LargeObjectSpace<VM> {
&self.los
}
pub fn get_nonmoving(&self) -> &NonMovingSpace<VM> {
&self.nonmoving
}
fn new_nonmoving_space(args: &mut CreateSpecificPlanArgs<VM>) -> NonMovingSpace<VM> {
let space_args = args.get_common_space_args(args.constraints.generational, "nonmoving");
cfg_if::cfg_if! {
if #[cfg(any(feature = "immortal_as_nonmoving", feature = "marksweep_as_nonmoving"))] {
NonMovingSpace::new(space_args)
} else {
NonMovingSpace::new(
space_args,
crate::policy::immix::ImmixSpaceArgs {
mixed_age: false,
never_move_objects: true,
},
)
}
}
}
fn prepare_nonmoving_space(&mut self, _full_heap: bool) {
cfg_if::cfg_if! {
if #[cfg(feature = "immortal_as_nonmoving")] {
self.nonmoving.prepare();
} else if #[cfg(feature = "marksweep_as_nonmoving")] {
self.nonmoving.prepare(_full_heap);
} else {
self.nonmoving.prepare(_full_heap, None, UnlogBitsOperation::NoOp);
}
}
}
fn release_nonmoving_space(&mut self, _full_heap: bool) {
cfg_if::cfg_if! {
if #[cfg(feature = "immortal_as_nonmoving")] {
self.nonmoving.release();
} else if #[cfg(feature = "marksweep_as_nonmoving")] {
self.nonmoving.prepare(_full_heap);
} else {
self.nonmoving.release(_full_heap, UnlogBitsOperation::NoOp);
}
}
}
fn end_of_gc_nonmoving_space(&mut self) {
cfg_if::cfg_if! {
if #[cfg(feature = "immortal_as_nonmoving")] {
} else if #[cfg(feature = "marksweep_as_nonmoving")] {
self.nonmoving.end_of_gc();
} else {
self.nonmoving.end_of_gc();
}
}
}
}
use crate::policy::gc_work::TraceKind;
use crate::vm::VMBinding;
pub trait HasSpaces {
type VM: VMBinding;
fn for_each_space(&self, func: &mut dyn FnMut(&dyn Space<Self::VM>));
fn for_each_space_mut(&mut self, func: &mut dyn FnMut(&mut dyn Space<Self::VM>));
}
pub trait PlanTraceObject<VM: VMBinding> {
fn trace_object<Q: ObjectQueue, const KIND: TraceKind>(
&self,
queue: &mut Q,
object: ObjectReference,
worker: &mut GCWorker<VM>,
) -> ObjectReference;
fn post_scan_object(&self, object: ObjectReference);
fn may_move_objects<const KIND: TraceKind>() -> bool;
}
use enum_map::Enum;
#[repr(i32)]
#[derive(Clone, Copy, Debug, Enum, PartialEq, Eq)]
pub enum AllocationSemantics {
Default = 0,
Immortal = 1,
Los = 2,
Code = 3,
ReadOnly = 4,
LargeCode = 5,
NonMoving = 6,
}