use super::gc_requester::GCRequester;
use super::PlanConstraints;
use crate::mmtk::MMTK;
use crate::plan::generational::global::Gen;
use crate::plan::tracing::ObjectQueue;
use crate::plan::Mutator;
use crate::policy::immortalspace::ImmortalSpace;
use crate::policy::largeobjectspace::LargeObjectSpace;
use crate::policy::space::Space;
use crate::scheduler::*;
use crate::util::alloc::allocators::AllocatorSelector;
#[cfg(feature = "analysis")]
use crate::util::analysis::AnalysisManager;
use crate::util::copy::{CopyConfig, GCWorkerCopyContext};
use crate::util::heap::layout::heap_layout::Mmapper;
use crate::util::heap::layout::heap_layout::VMMap;
use crate::util::heap::HeapMeta;
use crate::util::heap::VMRequest;
use crate::util::metadata::side_metadata::SideMetadataSanity;
use crate::util::metadata::side_metadata::SideMetadataSpec;
use crate::util::options::Options;
use crate::util::options::PlanSelector;
use crate::util::statistics::stats::Stats;
use crate::util::ObjectReference;
use crate::util::{VMMutatorThread, VMWorkerThread};
use crate::vm::*;
use downcast_rs::Downcast;
use enum_map::EnumMap;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
use mmtk_macros::PlanTraceObject;
pub fn create_mutator<VM: VMBinding>(
tls: VMMutatorThread,
mmtk: &'static MMTK<VM>,
) -> Box<Mutator<VM>> {
Box::new(match *mmtk.options.plan {
PlanSelector::NoGC => crate::plan::nogc::mutator::create_nogc_mutator(tls, &*mmtk.plan),
PlanSelector::SemiSpace => {
crate::plan::semispace::mutator::create_ss_mutator(tls, &*mmtk.plan)
}
PlanSelector::GenCopy => {
crate::plan::generational::copying::mutator::create_gencopy_mutator(tls, mmtk)
}
PlanSelector::GenImmix => {
crate::plan::generational::immix::mutator::create_genimmix_mutator(tls, mmtk)
}
PlanSelector::MarkSweep => {
crate::plan::marksweep::mutator::create_ms_mutator(tls, &*mmtk.plan)
}
PlanSelector::Immix => crate::plan::immix::mutator::create_immix_mutator(tls, &*mmtk.plan),
PlanSelector::PageProtect => {
crate::plan::pageprotect::mutator::create_pp_mutator(tls, &*mmtk.plan)
}
PlanSelector::MarkCompact => {
crate::plan::markcompact::mutator::create_markcompact_mutator(tls, &*mmtk.plan)
}
})
}
pub fn create_plan<VM: VMBinding>(
plan: PlanSelector,
vm_map: &'static VMMap,
mmapper: &'static Mmapper,
options: Arc<Options>,
scheduler: Arc<GCWorkScheduler<VM>>,
) -> Box<dyn Plan<VM = VM>> {
let plan = match plan {
PlanSelector::NoGC => Box::new(crate::plan::nogc::NoGC::new(vm_map, mmapper, options))
as Box<dyn Plan<VM = VM>>,
PlanSelector::SemiSpace => Box::new(crate::plan::semispace::SemiSpace::new(
vm_map, mmapper, options,
)) as Box<dyn Plan<VM = VM>>,
PlanSelector::GenCopy => Box::new(crate::plan::generational::copying::GenCopy::new(
vm_map, mmapper, options,
)) as Box<dyn Plan<VM = VM>>,
PlanSelector::GenImmix => Box::new(crate::plan::generational::immix::GenImmix::new(
vm_map, mmapper, options, scheduler,
)) as Box<dyn Plan<VM = VM>>,
PlanSelector::MarkSweep => Box::new(crate::plan::marksweep::MarkSweep::new(
vm_map, mmapper, options, scheduler,
)) as Box<dyn Plan<VM = VM>>,
PlanSelector::Immix => Box::new(crate::plan::immix::Immix::new(
vm_map, mmapper, options, scheduler,
)) as Box<dyn Plan<VM = VM>>,
PlanSelector::PageProtect => Box::new(crate::plan::pageprotect::PageProtect::new(
vm_map, mmapper, options,
)) as Box<dyn Plan<VM = VM>>,
PlanSelector::MarkCompact => Box::new(crate::plan::markcompact::MarkCompact::new(
vm_map, mmapper, options,
)) as Box<dyn Plan<VM = VM>>,
};
plan.get_spaces()
.into_iter()
.for_each(|s| s.initialize_sft());
plan
}
pub fn create_gc_worker_context<VM: VMBinding>(
tls: VMWorkerThread,
mmtk: &'static MMTK<VM>,
) -> GCWorkerCopyContext<VM> {
GCWorkerCopyContext::<VM>::new(tls, &*mmtk.plan, mmtk.plan.create_copy_config())
}
pub trait Plan: 'static + Sync + Downcast {
type VM: VMBinding;
fn constraints(&self) -> &'static PlanConstraints;
fn create_copy_config(&'static self) -> CopyConfig<Self::VM> {
CopyConfig::default()
}
fn base(&self) -> &BasePlan<Self::VM>;
fn schedule_collection(&'static self, _scheduler: &GCWorkScheduler<Self::VM>);
fn common(&self) -> &CommonPlan<Self::VM> {
panic!("Common Plan not handled!")
}
fn generational(&self) -> &Gen<Self::VM> {
panic!("This is not a generational plan.")
}
fn mmapper(&self) -> &'static Mmapper {
self.base().mmapper
}
fn options(&self) -> &Options {
&self.base().options
}
fn get_spaces(&self) -> Vec<&dyn Space<Self::VM>>;
fn get_allocator_mapping(&self) -> &'static EnumMap<AllocationSemantics, AllocatorSelector>;
fn is_current_gc_nursery(&self) -> bool {
false
}
#[cfg(feature = "sanity")]
fn enter_sanity(&self) {
self.base().inside_sanity.store(true, Ordering::Relaxed)
}
#[cfg(feature = "sanity")]
fn leave_sanity(&self) {
self.base().inside_sanity.store(false, Ordering::Relaxed)
}
#[cfg(feature = "sanity")]
fn is_in_sanity(&self) -> bool {
self.base().inside_sanity.load(Ordering::Relaxed)
}
fn is_initialized(&self) -> bool {
self.base().initialized.load(Ordering::SeqCst)
}
fn should_trigger_gc_when_heap_is_full(&self) -> bool {
self.base()
.trigger_gc_when_heap_is_full
.load(Ordering::SeqCst)
}
fn prepare(&mut self, tls: VMWorkerThread);
fn prepare_worker(&self, _worker: &mut GCWorker<Self::VM>) {}
fn release(&mut self, tls: VMWorkerThread);
fn poll(&self, space_full: bool, space: Option<&dyn Space<Self::VM>>) -> bool {
if self.collection_required(space_full, space) {
self.log_poll(space, "Triggering collection");
self.base().gc_requester.request();
return true;
}
false
}
fn log_poll(&self, space: Option<&dyn Space<Self::VM>>, message: &'static str) {
if let Some(space) = space {
info!(" [POLL] {}: {}", space.get_name(), message);
} else {
info!(" [POLL] {}", message);
}
}
fn collection_required(&self, space_full: bool, _space: Option<&dyn Space<Self::VM>>) -> bool;
fn get_reserved_pages(&self) -> usize {
self.get_used_pages() + self.get_collection_reserved_pages()
}
fn get_total_pages(&self) -> usize {
self.base().heap.get_total_pages()
}
fn get_available_pages(&self) -> usize {
self.get_total_pages()
.saturating_sub(self.get_reserved_pages())
}
fn get_mature_physical_pages_available(&self) -> usize {
panic!("This is not a generational plan.")
}
fn get_collection_reserved_pages(&self) -> usize {
0
}
fn get_used_pages(&self) -> usize;
fn get_free_pages(&self) -> usize {
self.get_total_pages() - self.get_used_pages()
}
fn is_emergency_collection(&self) -> bool {
self.base().emergency_collection.load(Ordering::Relaxed)
}
fn handle_user_collection_request(&self, tls: VMMutatorThread, force: bool) {
self.base().handle_user_collection_request(tls, force)
}
fn last_collection_was_exhaustive(&self) -> bool {
self.last_collection_full_heap()
}
fn last_collection_full_heap(&self) -> bool {
true
}
fn force_full_heap_collection(&self) {}
fn modify_check(&self, object: ObjectReference) {
assert!(
!(self.base().gc_in_progress_proper() && object.is_movable()),
"GC modifying a potentially moving object via Java (i.e. not magic) obj= {}",
object
);
}
}
impl_downcast!(Plan assoc VM);
#[derive(PartialEq)]
pub enum GcStatus {
NotInGC,
GcPrepare,
GcProper,
}
#[derive(PlanTraceObject)]
pub struct BasePlan<VM: VMBinding> {
pub initialized: AtomicBool,
pub trigger_gc_when_heap_is_full: AtomicBool,
pub gc_status: Mutex<GcStatus>,
pub last_stress_pages: AtomicUsize,
pub emergency_collection: AtomicBool,
pub user_triggered_collection: AtomicBool,
pub internal_triggered_collection: AtomicBool,
pub last_internal_triggered_collection: AtomicBool,
pub allocation_success: AtomicBool,
pub max_collection_attempts: AtomicUsize,
pub cur_collection_attempts: AtomicUsize,
pub gc_requester: Arc<GCRequester<VM>>,
pub stats: Stats,
mmapper: &'static Mmapper,
pub vm_map: &'static VMMap,
pub options: Arc<Options>,
pub heap: HeapMeta,
#[cfg(feature = "sanity")]
pub inside_sanity: AtomicBool,
scanned_stacks: AtomicUsize,
stacks_prepared: AtomicBool,
pub mutator_iterator_lock: Mutex<()>,
allocation_bytes: AtomicUsize,
#[cfg(feature = "malloc_counted_size")]
malloc_bytes: AtomicUsize,
#[cfg(feature = "analysis")]
pub analysis_manager: AnalysisManager<VM>,
#[cfg(feature = "code_space")]
#[trace]
pub code_space: ImmortalSpace<VM>,
#[cfg(feature = "code_space")]
#[trace]
pub code_lo_space: ImmortalSpace<VM>,
#[cfg(feature = "ro_space")]
#[trace]
pub ro_space: ImmortalSpace<VM>,
#[cfg(feature = "vm_space")]
#[trace]
pub vm_space: ImmortalSpace<VM>,
}
#[cfg(feature = "vm_space")]
pub fn create_vm_space<VM: VMBinding>(
vm_map: &'static VMMap,
mmapper: &'static Mmapper,
heap: &mut HeapMeta,
boot_segment_bytes: usize,
constraints: &'static PlanConstraints,
global_side_metadata_specs: Vec<SideMetadataSpec>,
) -> ImmortalSpace<VM> {
use crate::util::constants::LOG_BYTES_IN_MBYTE;
debug_assert!(boot_segment_bytes > 0);
use crate::util::conversions::raw_align_up;
use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK;
let boot_segment_mb = raw_align_up(boot_segment_bytes, BYTES_IN_CHUNK) >> LOG_BYTES_IN_MBYTE;
let space = ImmortalSpace::new(
"boot",
false,
VMRequest::fixed_size(boot_segment_mb),
global_side_metadata_specs,
vm_map,
mmapper,
heap,
constraints,
);
space.ensure_mapped();
space
}
impl<VM: VMBinding> BasePlan<VM> {
#[allow(unused_mut)] #[allow(unused_variables)] #[allow(clippy::redundant_clone)] pub fn new(
vm_map: &'static VMMap,
mmapper: &'static Mmapper,
options: Arc<Options>,
mut heap: HeapMeta,
constraints: &'static PlanConstraints,
global_side_metadata_specs: Vec<SideMetadataSpec>,
) -> BasePlan<VM> {
let stats = Stats::new(&options);
#[cfg(feature = "analysis")]
let analysis_manager = AnalysisManager::new(&stats);
BasePlan {
#[cfg(feature = "code_space")]
code_space: ImmortalSpace::new(
"code_space",
true,
VMRequest::discontiguous(),
global_side_metadata_specs.clone(),
vm_map,
mmapper,
&mut heap,
constraints,
),
#[cfg(feature = "code_space")]
code_lo_space: ImmortalSpace::new(
"code_lo_space",
true,
VMRequest::discontiguous(),
global_side_metadata_specs.clone(),
vm_map,
mmapper,
&mut heap,
constraints,
),
#[cfg(feature = "ro_space")]
ro_space: ImmortalSpace::new(
"ro_space",
true,
VMRequest::discontiguous(),
global_side_metadata_specs.clone(),
vm_map,
mmapper,
&mut heap,
constraints,
),
#[cfg(feature = "vm_space")]
vm_space: create_vm_space(
vm_map,
mmapper,
&mut heap,
*options.vm_space_size,
constraints,
global_side_metadata_specs,
),
initialized: AtomicBool::new(false),
trigger_gc_when_heap_is_full: AtomicBool::new(true),
gc_status: Mutex::new(GcStatus::NotInGC),
last_stress_pages: AtomicUsize::new(0),
stacks_prepared: AtomicBool::new(false),
emergency_collection: AtomicBool::new(false),
user_triggered_collection: AtomicBool::new(false),
internal_triggered_collection: AtomicBool::new(false),
last_internal_triggered_collection: AtomicBool::new(false),
allocation_success: AtomicBool::new(false),
max_collection_attempts: AtomicUsize::new(0),
cur_collection_attempts: AtomicUsize::new(0),
gc_requester: Arc::new(GCRequester::new()),
stats,
mmapper,
heap,
vm_map,
options,
#[cfg(feature = "sanity")]
inside_sanity: AtomicBool::new(false),
scanned_stacks: AtomicUsize::new(0),
mutator_iterator_lock: Mutex::new(()),
allocation_bytes: AtomicUsize::new(0),
#[cfg(feature = "malloc_counted_size")]
malloc_bytes: AtomicUsize::new(0),
#[cfg(feature = "analysis")]
analysis_manager,
}
}
pub fn get_spaces(&self) -> Vec<&dyn Space<VM>> {
vec![
#[cfg(feature = "code_space")]
&self.code_space,
#[cfg(feature = "code_space")]
&self.code_lo_space,
#[cfg(feature = "ro_space")]
&self.ro_space,
#[cfg(feature = "vm_space")]
&self.vm_space,
]
}
pub fn handle_user_collection_request(&self, tls: VMMutatorThread, force: bool) {
if force || !*self.options.ignore_system_gc {
info!("User triggering collection");
self.user_triggered_collection
.store(true, Ordering::Relaxed);
self.gc_requester.request();
VM::VMCollection::block_for_gc(tls);
}
}
#[allow(unused)]
pub fn trigger_internal_collection_request(&self) {
self.last_internal_triggered_collection
.store(true, Ordering::Relaxed);
self.internal_triggered_collection
.store(true, Ordering::Relaxed);
self.gc_requester.request();
}
pub fn reset_collection_trigger(&self) {
self.last_internal_triggered_collection.store(
self.internal_triggered_collection.load(Ordering::SeqCst),
Ordering::Relaxed,
);
self.internal_triggered_collection
.store(false, Ordering::SeqCst);
self.user_triggered_collection
.store(false, Ordering::Relaxed);
}
pub fn get_used_pages(&self) -> usize {
#[allow(unused_mut)]
let mut pages = 0;
#[cfg(feature = "code_space")]
{
pages += self.code_space.reserved_pages();
pages += self.code_lo_space.reserved_pages();
}
#[cfg(feature = "ro_space")]
{
pages += self.ro_space.reserved_pages();
}
#[cfg(feature = "malloc_counted_size")]
{
pages += crate::util::conversions::bytes_to_pages_up(
self.malloc_bytes.load(Ordering::SeqCst),
);
}
pages
}
pub fn trace_object<Q: ObjectQueue>(
&self,
queue: &mut Q,
object: ObjectReference,
worker: &mut GCWorker<VM>,
) -> ObjectReference {
#[cfg(feature = "code_space")]
if self.code_space.in_space(object) {
trace!("trace_object: object in code space");
return self.code_space.trace_object::<Q>(queue, object);
}
#[cfg(feature = "code_space")]
if self.code_lo_space.in_space(object) {
trace!("trace_object: object in large code space");
return self.code_lo_space.trace_object::<Q>(queue, object);
}
#[cfg(feature = "ro_space")]
if self.ro_space.in_space(object) {
trace!("trace_object: object in ro_space space");
return self.ro_space.trace_object(queue, object);
}
#[cfg(feature = "vm_space")]
if self.vm_space.in_space(object) {
trace!("trace_object: object in boot space");
return self.vm_space.trace_object(queue, object);
}
VM::VMActivePlan::vm_trace_object::<Q>(queue, object, worker)
}
pub fn prepare(&mut self, _tls: VMWorkerThread, _full_heap: bool) {
#[cfg(feature = "code_space")]
self.code_space.prepare();
#[cfg(feature = "code_space")]
self.code_lo_space.prepare();
#[cfg(feature = "ro_space")]
self.ro_space.prepare();
#[cfg(feature = "vm_space")]
self.vm_space.prepare();
}
pub fn release(&mut self, _tls: VMWorkerThread, _full_heap: bool) {
#[cfg(feature = "code_space")]
self.code_space.release();
#[cfg(feature = "code_space")]
self.code_lo_space.release();
#[cfg(feature = "ro_space")]
self.ro_space.release();
#[cfg(feature = "vm_space")]
self.vm_space.release();
}
pub fn set_collection_kind<P: Plan>(&self, plan: &P) {
self.cur_collection_attempts.store(
if self.is_user_triggered_collection() {
1
} else {
self.determine_collection_attempts()
},
Ordering::Relaxed,
);
let emergency_collection = !self.is_internal_triggered_collection()
&& plan.last_collection_was_exhaustive()
&& self.cur_collection_attempts.load(Ordering::Relaxed) > 1;
self.emergency_collection
.store(emergency_collection, Ordering::Relaxed);
if emergency_collection {
plan.force_full_heap_collection();
}
}
pub fn set_gc_status(&self, s: GcStatus) {
let mut gc_status = self.gc_status.lock().unwrap();
if *gc_status == GcStatus::NotInGC {
self.stacks_prepared.store(false, Ordering::SeqCst);
self.stats.start_gc();
}
*gc_status = s;
if *gc_status == GcStatus::NotInGC {
if self.stats.get_gathering_stats() {
self.stats.end_gc();
}
}
}
pub fn stacks_prepared(&self) -> bool {
self.stacks_prepared.load(Ordering::SeqCst)
}
pub fn prepare_for_stack_scanning(&self) {
self.scanned_stacks.store(0, Ordering::SeqCst);
self.stacks_prepared.store(false, Ordering::SeqCst);
}
pub fn inform_stack_scanned(&self, n_mutators: usize) -> bool {
let old = self.scanned_stacks.fetch_add(1, Ordering::SeqCst);
debug_assert!(
old < n_mutators,
"The number of scanned stacks ({}) is more than the number of mutators ({})",
old,
n_mutators
);
let scanning_done = old + 1 == n_mutators;
if scanning_done {
self.stacks_prepared.store(true, Ordering::SeqCst);
}
scanning_done
}
pub fn gc_in_progress(&self) -> bool {
*self.gc_status.lock().unwrap() != GcStatus::NotInGC
}
pub fn gc_in_progress_proper(&self) -> bool {
*self.gc_status.lock().unwrap() == GcStatus::GcProper
}
fn determine_collection_attempts(&self) -> usize {
if !self.allocation_success.load(Ordering::Relaxed) {
self.max_collection_attempts.fetch_add(1, Ordering::Relaxed);
} else {
self.allocation_success.store(false, Ordering::Relaxed);
self.max_collection_attempts.store(1, Ordering::Relaxed);
}
self.max_collection_attempts.load(Ordering::Relaxed)
}
pub fn is_user_triggered_collection(&self) -> bool {
self.user_triggered_collection.load(Ordering::Relaxed)
}
pub fn is_internal_triggered_collection(&self) -> bool {
let is_internal_triggered = self
.last_internal_triggered_collection
.load(Ordering::SeqCst);
assert!(
!is_internal_triggered,
"We have no concurrent GC implemented. We should not have internally triggered GC"
);
is_internal_triggered
}
pub fn increase_allocation_bytes_by(&self, size: usize) -> usize {
let old_allocation_bytes = self.allocation_bytes.fetch_add(size, Ordering::SeqCst);
trace!(
"Stress GC: old_allocation_bytes = {}, size = {}, allocation_bytes = {}",
old_allocation_bytes,
size,
self.allocation_bytes.load(Ordering::Relaxed),
);
old_allocation_bytes + size
}
pub fn is_stress_test_gc_enabled(&self) -> bool {
use crate::util::constants::DEFAULT_STRESS_FACTOR;
*self.options.stress_factor != DEFAULT_STRESS_FACTOR
|| *self.options.analysis_factor != DEFAULT_STRESS_FACTOR
}
pub fn is_precise_stress(&self) -> bool {
*self.options.precise_stress
}
pub fn should_do_stress_gc(&self) -> bool {
self.initialized.load(Ordering::SeqCst)
&& (self.allocation_bytes.load(Ordering::SeqCst) > *self.options.stress_factor)
}
pub(super) fn collection_required<P: Plan>(&self, plan: &P, space_full: bool) -> bool {
let stress_force_gc = self.should_do_stress_gc();
if stress_force_gc {
debug!(
"Stress GC: allocation_bytes = {}, stress_factor = {}",
self.allocation_bytes.load(Ordering::Relaxed),
*self.options.stress_factor
);
debug!("Doing stress GC");
self.allocation_bytes.store(0, Ordering::SeqCst);
}
debug!(
"self.get_reserved_pages()={}, self.get_total_pages()={}",
plan.get_reserved_pages(),
plan.get_total_pages()
);
let heap_full = plan.get_reserved_pages() > plan.get_total_pages();
space_full || stress_force_gc || heap_full
}
#[allow(unused_variables)] pub(crate) fn verify_side_metadata_sanity(
&self,
side_metadata_sanity_checker: &mut SideMetadataSanity,
) {
#[cfg(feature = "code_space")]
self.code_space
.verify_side_metadata_sanity(side_metadata_sanity_checker);
#[cfg(feature = "ro_space")]
self.ro_space
.verify_side_metadata_sanity(side_metadata_sanity_checker);
#[cfg(feature = "vm_space")]
self.vm_space
.verify_side_metadata_sanity(side_metadata_sanity_checker);
}
#[cfg(feature = "malloc_counted_size")]
pub(crate) fn increase_malloc_bytes_by(&self, size: usize) {
self.malloc_bytes.fetch_add(size, Ordering::SeqCst);
}
#[cfg(feature = "malloc_counted_size")]
pub(crate) fn decrease_malloc_bytes_by(&self, size: usize) {
self.malloc_bytes.fetch_sub(size, Ordering::SeqCst);
}
#[cfg(feature = "malloc_counted_size")]
pub fn get_malloc_bytes(&self) -> usize {
self.malloc_bytes.load(Ordering::SeqCst)
}
}
#[derive(PlanTraceObject)]
pub struct CommonPlan<VM: VMBinding> {
#[trace]
pub immortal: ImmortalSpace<VM>,
#[trace]
pub los: LargeObjectSpace<VM>,
#[trace]
pub nonmoving: ImmortalSpace<VM>,
#[fallback_trace]
pub base: BasePlan<VM>,
}
impl<VM: VMBinding> CommonPlan<VM> {
pub fn new(
vm_map: &'static VMMap,
mmapper: &'static Mmapper,
options: Arc<Options>,
mut heap: HeapMeta,
constraints: &'static PlanConstraints,
global_side_metadata_specs: Vec<SideMetadataSpec>,
) -> CommonPlan<VM> {
CommonPlan {
immortal: ImmortalSpace::new(
"immortal",
true,
VMRequest::discontiguous(),
global_side_metadata_specs.clone(),
vm_map,
mmapper,
&mut heap,
constraints,
),
los: LargeObjectSpace::new(
"los",
true,
VMRequest::discontiguous(),
global_side_metadata_specs.clone(),
vm_map,
mmapper,
&mut heap,
constraints,
false,
),
nonmoving: ImmortalSpace::new(
"nonmoving",
true,
VMRequest::discontiguous(),
global_side_metadata_specs.clone(),
vm_map,
mmapper,
&mut heap,
constraints,
),
base: BasePlan::new(
vm_map,
mmapper,
options,
heap,
constraints,
global_side_metadata_specs,
),
}
}
pub fn get_spaces(&self) -> Vec<&dyn Space<VM>> {
let mut ret = self.base.get_spaces();
ret.push(&self.immortal);
ret.push(&self.los);
ret.push(&self.nonmoving);
ret
}
pub fn get_used_pages(&self) -> usize {
self.immortal.reserved_pages()
+ self.los.reserved_pages()
+ self.nonmoving.reserved_pages()
+ self.base.get_used_pages()
}
pub fn trace_object<Q: ObjectQueue>(
&self,
queue: &mut Q,
object: ObjectReference,
worker: &mut GCWorker<VM>,
) -> ObjectReference {
if self.immortal.in_space(object) {
trace!("trace_object: object in immortal space");
return self.immortal.trace_object(queue, object);
}
if self.los.in_space(object) {
trace!("trace_object: object in los");
return self.los.trace_object(queue, object);
}
if self.nonmoving.in_space(object) {
trace!("trace_object: object in nonmoving space");
return self.nonmoving.trace_object(queue, object);
}
self.base.trace_object::<Q>(queue, object, worker)
}
pub fn prepare(&mut self, tls: VMWorkerThread, full_heap: bool) {
self.immortal.prepare();
self.los.prepare(full_heap);
self.nonmoving.prepare();
self.base.prepare(tls, full_heap)
}
pub fn release(&mut self, tls: VMWorkerThread, full_heap: bool) {
self.immortal.release();
self.los.release(full_heap);
self.nonmoving.release();
self.base.release(tls, full_heap)
}
pub fn stacks_prepared(&self) -> bool {
self.base.stacks_prepared()
}
pub fn get_immortal(&self) -> &ImmortalSpace<VM> {
&self.immortal
}
pub fn get_los(&self) -> &LargeObjectSpace<VM> {
&self.los
}
pub fn get_nonmoving(&self) -> &ImmortalSpace<VM> {
&self.nonmoving
}
pub(crate) fn verify_side_metadata_sanity(
&self,
side_metadata_sanity_checker: &mut SideMetadataSanity,
) {
self.base
.verify_side_metadata_sanity(side_metadata_sanity_checker);
self.immortal
.verify_side_metadata_sanity(side_metadata_sanity_checker);
self.los
.verify_side_metadata_sanity(side_metadata_sanity_checker);
self.nonmoving
.verify_side_metadata_sanity(side_metadata_sanity_checker);
}
}
use crate::policy::gc_work::TraceKind;
use crate::vm::VMBinding;
pub trait PlanTraceObject<VM: VMBinding> {
fn trace_object<Q: ObjectQueue, const KIND: TraceKind>(
&self,
queue: &mut Q,
object: ObjectReference,
worker: &mut GCWorker<VM>,
) -> ObjectReference;
fn post_scan_object(&self, object: ObjectReference);
fn may_move_objects<const KIND: TraceKind>() -> bool;
}
use enum_map::Enum;
#[repr(i32)]
#[derive(Clone, Copy, Debug, Enum, PartialEq, Eq)]
pub enum AllocationSemantics {
Default = 0,
Immortal = 1,
Los = 2,
Code = 3,
ReadOnly = 4,
LargeCode = 5,
NonMoving = 6,
}