use super::work_bucket::WorkBucketStage;
use super::*;
use crate::global_state::GcStatus;
use crate::plan::ObjectsClosure;
use crate::plan::VectorObjectQueue;
use crate::util::*;
use crate::vm::edge_shape::Edge;
use crate::vm::*;
use crate::*;
use std::marker::PhantomData;
use std::ops::{Deref, DerefMut};
pub struct ScheduleCollection;
impl<VM: VMBinding> GCWork<VM> for ScheduleCollection {
fn do_work(&mut self, worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
mmtk.gc_trigger.policy.on_gc_start(mmtk);
let is_emergency = mmtk.state.set_collection_kind(
mmtk.get_plan().last_collection_was_exhaustive(),
mmtk.gc_trigger.policy.can_heap_size_grow(),
);
if is_emergency {
mmtk.get_plan().notify_emergency_collection();
}
mmtk.set_gc_status(GcStatus::GcPrepare);
mmtk.get_plan().schedule_collection(worker.scheduler());
}
}
pub struct Prepare<C: GCWorkContext> {
pub plan: *const C::PlanType,
}
unsafe impl<C: GCWorkContext> Send for Prepare<C> {}
impl<C: GCWorkContext> Prepare<C> {
pub fn new(plan: *const C::PlanType) -> Self {
Self { plan }
}
}
impl<C: GCWorkContext> GCWork<C::VM> for Prepare<C> {
fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
trace!("Prepare Global");
let plan_mut: &mut C::PlanType = unsafe { &mut *(self.plan as *const _ as *mut _) };
plan_mut.prepare(worker.tls);
if plan_mut.constraints().needs_prepare_mutator {
for mutator in <C::VM as VMBinding>::VMActivePlan::mutators() {
mmtk.scheduler.work_buckets[WorkBucketStage::Prepare]
.add(PrepareMutator::<C::VM>::new(mutator));
}
}
for w in &mmtk.scheduler.worker_group.workers_shared {
let result = w.designated_work.push(Box::new(PrepareCollector));
debug_assert!(result.is_ok());
}
}
}
pub struct PrepareMutator<VM: VMBinding> {
pub mutator: &'static mut Mutator<VM>,
}
impl<VM: VMBinding> PrepareMutator<VM> {
pub fn new(mutator: &'static mut Mutator<VM>) -> Self {
Self { mutator }
}
}
impl<VM: VMBinding> GCWork<VM> for PrepareMutator<VM> {
fn do_work(&mut self, worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
trace!("Prepare Mutator");
self.mutator.prepare(worker.tls);
}
}
#[derive(Default)]
pub struct PrepareCollector;
impl<VM: VMBinding> GCWork<VM> for PrepareCollector {
fn do_work(&mut self, worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
trace!("Prepare Collector");
worker.get_copy_context_mut().prepare();
mmtk.get_plan().prepare_worker(worker);
}
}
pub struct Release<C: GCWorkContext> {
pub plan: *const C::PlanType,
}
impl<C: GCWorkContext> Release<C> {
pub fn new(plan: *const C::PlanType) -> Self {
Self { plan }
}
}
unsafe impl<C: GCWorkContext> Send for Release<C> {}
impl<C: GCWorkContext + 'static> GCWork<C::VM> for Release<C> {
fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
trace!("Release Global");
mmtk.gc_trigger.policy.on_gc_release(mmtk);
let plan_mut: &mut C::PlanType = unsafe { &mut *(self.plan as *const _ as *mut _) };
plan_mut.release(worker.tls);
for mutator in <C::VM as VMBinding>::VMActivePlan::mutators() {
mmtk.scheduler.work_buckets[WorkBucketStage::Release]
.add(ReleaseMutator::<C::VM>::new(mutator));
}
for w in &mmtk.scheduler.worker_group.workers_shared {
let result = w.designated_work.push(Box::new(ReleaseCollector));
debug_assert!(result.is_ok());
}
#[cfg(feature = "count_live_bytes_in_gc")]
{
let live_bytes = mmtk
.scheduler
.worker_group
.get_and_clear_worker_live_bytes();
mmtk.state.set_live_bytes_in_last_gc(live_bytes);
}
}
}
pub struct ReleaseMutator<VM: VMBinding> {
pub mutator: &'static mut Mutator<VM>,
}
impl<VM: VMBinding> ReleaseMutator<VM> {
pub fn new(mutator: &'static mut Mutator<VM>) -> Self {
Self { mutator }
}
}
impl<VM: VMBinding> GCWork<VM> for ReleaseMutator<VM> {
fn do_work(&mut self, worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
trace!("Release Mutator");
self.mutator.release(worker.tls);
}
}
#[derive(Default)]
pub struct ReleaseCollector;
impl<VM: VMBinding> GCWork<VM> for ReleaseCollector {
fn do_work(&mut self, worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
trace!("Release Collector");
worker.get_copy_context_mut().release();
}
}
#[derive(Default)]
pub struct StopMutators<C: GCWorkContext>(PhantomData<C>);
impl<C: GCWorkContext> StopMutators<C> {
pub fn new() -> Self {
Self(PhantomData)
}
}
impl<C: GCWorkContext> GCWork<C::VM> for StopMutators<C> {
fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
trace!("stop_all_mutators start");
mmtk.state.prepare_for_stack_scanning();
<C::VM as VMBinding>::VMCollection::stop_all_mutators(worker.tls, |mutator| {
mmtk.scheduler.work_buckets[WorkBucketStage::Prepare]
.add(ScanMutatorRoots::<C>(mutator));
});
trace!("stop_all_mutators end");
mmtk.scheduler.notify_mutators_paused(mmtk);
mmtk.scheduler.work_buckets[WorkBucketStage::Prepare].add(ScanVMSpecificRoots::<C>::new());
}
}
#[derive(Default)]
pub struct EndOfGC {
pub elapsed: std::time::Duration,
}
impl<VM: VMBinding> GCWork<VM> for EndOfGC {
fn do_work(&mut self, worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
info!(
"End of GC ({}/{} pages, took {} ms)",
mmtk.get_plan().get_reserved_pages(),
mmtk.get_plan().get_total_pages(),
self.elapsed.as_millis()
);
#[cfg(feature = "count_live_bytes_in_gc")]
{
let live_bytes = mmtk.state.get_live_bytes_in_last_gc();
let used_bytes =
mmtk.get_plan().get_used_pages() << crate::util::constants::LOG_BYTES_IN_PAGE;
debug_assert!(
live_bytes <= used_bytes,
"Live bytes of all live objects ({} bytes) is larger than used pages ({} bytes), something is wrong.",
live_bytes, used_bytes
);
info!(
"Live objects = {} bytes ({:04.1}% of {} used pages)",
live_bytes,
live_bytes as f64 * 100.0 / used_bytes as f64,
mmtk.get_plan().get_used_pages()
);
}
let plan_mut: &mut dyn Plan<VM = VM> = unsafe { mmtk.get_plan_mut() };
plan_mut.end_of_gc(worker.tls);
#[cfg(feature = "extreme_assertions")]
if crate::util::edge_logger::should_check_duplicate_edges(mmtk.get_plan()) {
mmtk.edge_logger.reset();
}
mmtk.state.reset_collection_trigger();
mmtk.set_gc_status(GcStatus::NotInGC);
<VM as VMBinding>::VMCollection::resume_mutators(worker.tls);
}
}
pub(crate) struct ProcessEdgesWorkTracer<E: ProcessEdgesWork> {
process_edges_work: E,
stage: WorkBucketStage,
}
impl<E: ProcessEdgesWork> ObjectTracer for ProcessEdgesWorkTracer<E> {
fn trace_object(&mut self, object: ObjectReference) -> ObjectReference {
debug_assert!(!object.is_null());
let result = self.process_edges_work.trace_object(object);
self.flush_if_full();
result
}
}
impl<E: ProcessEdgesWork> ProcessEdgesWorkTracer<E> {
fn flush_if_full(&mut self) {
if self.process_edges_work.nodes.is_full() {
self.flush();
}
}
pub fn flush_if_not_empty(&mut self) {
if !self.process_edges_work.nodes.is_empty() {
self.flush();
}
}
fn flush(&mut self) {
let next_nodes = self.process_edges_work.pop_nodes();
assert!(!next_nodes.is_empty());
let work_packet = self.process_edges_work.create_scan_work(next_nodes);
let worker = self.process_edges_work.worker();
worker.scheduler().work_buckets[self.stage].add(work_packet);
}
}
pub(crate) struct ProcessEdgesWorkTracerContext<E: ProcessEdgesWork> {
stage: WorkBucketStage,
phantom_data: PhantomData<E>,
}
impl<E: ProcessEdgesWork> Clone for ProcessEdgesWorkTracerContext<E> {
fn clone(&self) -> Self {
Self { ..*self }
}
}
impl<E: ProcessEdgesWork> ObjectTracerContext<E::VM> for ProcessEdgesWorkTracerContext<E> {
type TracerType = ProcessEdgesWorkTracer<E>;
fn with_tracer<R, F>(&self, worker: &mut GCWorker<E::VM>, func: F) -> R
where
F: FnOnce(&mut Self::TracerType) -> R,
{
let mmtk = worker.mmtk;
let mut process_edges_work = E::new(vec![], false, mmtk, self.stage);
process_edges_work.set_worker(worker);
let mut tracer = ProcessEdgesWorkTracer {
process_edges_work,
stage: self.stage,
};
let result = func(&mut tracer);
tracer.flush_if_not_empty();
result
}
}
pub struct VMProcessWeakRefs<E: ProcessEdgesWork> {
phantom_data: PhantomData<E>,
}
impl<E: ProcessEdgesWork> VMProcessWeakRefs<E> {
pub fn new() -> Self {
Self {
phantom_data: PhantomData,
}
}
}
impl<E: ProcessEdgesWork> GCWork<E::VM> for VMProcessWeakRefs<E> {
fn do_work(&mut self, worker: &mut GCWorker<E::VM>, _mmtk: &'static MMTK<E::VM>) {
trace!("VMProcessWeakRefs");
let stage = WorkBucketStage::VMRefClosure;
let need_to_repeat = {
let tracer_factory = ProcessEdgesWorkTracerContext::<E> {
stage,
phantom_data: PhantomData,
};
<E::VM as VMBinding>::VMScanning::process_weak_refs(worker, tracer_factory)
};
if need_to_repeat {
let new_self = Box::new(Self::new());
worker.scheduler().work_buckets[stage].set_sentinel(new_self);
}
}
}
pub struct VMForwardWeakRefs<E: ProcessEdgesWork> {
phantom_data: PhantomData<E>,
}
impl<E: ProcessEdgesWork> VMForwardWeakRefs<E> {
pub fn new() -> Self {
Self {
phantom_data: PhantomData,
}
}
}
impl<E: ProcessEdgesWork> GCWork<E::VM> for VMForwardWeakRefs<E> {
fn do_work(&mut self, worker: &mut GCWorker<E::VM>, _mmtk: &'static MMTK<E::VM>) {
trace!("VMForwardWeakRefs");
let stage = WorkBucketStage::VMRefForwarding;
let tracer_factory = ProcessEdgesWorkTracerContext::<E> {
stage,
phantom_data: PhantomData,
};
<E::VM as VMBinding>::VMScanning::forward_weak_refs(worker, tracer_factory)
}
}
#[derive(Default)]
pub struct VMPostForwarding<VM: VMBinding> {
phantom_data: PhantomData<VM>,
}
impl<VM: VMBinding> GCWork<VM> for VMPostForwarding<VM> {
fn do_work(&mut self, worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
trace!("VMPostForwarding start");
<VM as VMBinding>::VMCollection::post_forwarding(worker.tls);
trace!("VMPostForwarding end");
}
}
pub struct ScanMutatorRoots<C: GCWorkContext>(pub &'static mut Mutator<C::VM>);
impl<C: GCWorkContext> GCWork<C::VM> for ScanMutatorRoots<C> {
fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
trace!("ScanMutatorRoots for mutator {:?}", self.0.get_tls());
let mutators = <C::VM as VMBinding>::VMActivePlan::number_of_mutators();
let factory = ProcessEdgesWorkRootsWorkFactory::<
C::VM,
C::ProcessEdgesWorkType,
C::TPProcessEdges,
>::new(mmtk);
<C::VM as VMBinding>::VMScanning::scan_roots_in_mutator_thread(
worker.tls,
unsafe { &mut *(self.0 as *mut _) },
factory,
);
self.0.flush();
if mmtk.state.inform_stack_scanned(mutators) {
<C::VM as VMBinding>::VMScanning::notify_initial_thread_scan_complete(
false, worker.tls,
);
mmtk.set_gc_status(GcStatus::GcProper);
}
}
}
#[derive(Default)]
pub struct ScanVMSpecificRoots<C: GCWorkContext>(PhantomData<C>);
impl<C: GCWorkContext> ScanVMSpecificRoots<C> {
pub fn new() -> Self {
Self(PhantomData)
}
}
impl<C: GCWorkContext> GCWork<C::VM> for ScanVMSpecificRoots<C> {
fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
trace!("ScanStaticRoots");
let factory = ProcessEdgesWorkRootsWorkFactory::<
C::VM,
C::ProcessEdgesWorkType,
C::TPProcessEdges,
>::new(mmtk);
<C::VM as VMBinding>::VMScanning::scan_vm_specific_roots(worker.tls, factory);
}
}
pub struct ProcessEdgesBase<VM: VMBinding> {
pub edges: Vec<VM::VMEdge>,
pub nodes: VectorObjectQueue,
mmtk: &'static MMTK<VM>,
worker: *mut GCWorker<VM>,
pub roots: bool,
pub bucket: WorkBucketStage,
}
unsafe impl<VM: VMBinding> Send for ProcessEdgesBase<VM> {}
impl<VM: VMBinding> ProcessEdgesBase<VM> {
pub fn new(
edges: Vec<VM::VMEdge>,
roots: bool,
mmtk: &'static MMTK<VM>,
bucket: WorkBucketStage,
) -> Self {
#[cfg(feature = "extreme_assertions")]
if crate::util::edge_logger::should_check_duplicate_edges(mmtk.get_plan()) {
for edge in &edges {
mmtk.edge_logger.log_edge(*edge);
}
}
Self {
edges,
nodes: VectorObjectQueue::new(),
mmtk,
worker: std::ptr::null_mut(),
roots,
bucket,
}
}
pub fn set_worker(&mut self, worker: &mut GCWorker<VM>) {
self.worker = worker;
}
pub fn worker(&self) -> &'static mut GCWorker<VM> {
unsafe { &mut *self.worker }
}
pub fn mmtk(&self) -> &'static MMTK<VM> {
self.mmtk
}
pub fn plan(&self) -> &'static dyn Plan<VM = VM> {
self.mmtk.get_plan()
}
pub fn pop_nodes(&mut self) -> Vec<ObjectReference> {
self.nodes.take()
}
pub fn is_roots(&self) -> bool {
self.roots
}
}
pub type EdgeOf<E> = <<E as ProcessEdgesWork>::VM as VMBinding>::VMEdge;
pub trait ProcessEdgesWork:
Send + 'static + Sized + DerefMut + Deref<Target = ProcessEdgesBase<Self::VM>>
{
type VM: VMBinding;
type ScanObjectsWorkType: ScanObjectsWork<Self::VM>;
const CAPACITY: usize = 4096;
const OVERWRITE_REFERENCE: bool = true;
const SCAN_OBJECTS_IMMEDIATELY: bool = true;
fn new(
edges: Vec<EdgeOf<Self>>,
roots: bool,
mmtk: &'static MMTK<Self::VM>,
bucket: WorkBucketStage,
) -> Self;
fn trace_object(&mut self, object: ObjectReference) -> ObjectReference;
#[cfg(feature = "sanity")]
fn cache_roots_for_sanity_gc(&mut self) {
assert!(self.roots);
self.mmtk()
.sanity_checker
.lock()
.unwrap()
.add_root_edges(self.edges.clone());
}
fn start_or_dispatch_scan_work(&mut self, mut work_packet: impl GCWork<Self::VM>) {
if Self::SCAN_OBJECTS_IMMEDIATELY {
work_packet.do_work(self.worker(), self.mmtk);
} else {
debug_assert!(self.bucket != WorkBucketStage::Unconstrained);
self.mmtk.scheduler.work_buckets[self.bucket].add(work_packet);
}
}
fn create_scan_work(&self, nodes: Vec<ObjectReference>) -> Self::ScanObjectsWorkType;
fn flush(&mut self) {
let nodes = self.pop_nodes();
if !nodes.is_empty() {
self.start_or_dispatch_scan_work(self.create_scan_work(nodes));
}
}
fn process_edge(&mut self, slot: EdgeOf<Self>) {
let object = slot.load();
if object.is_null() {
return;
}
let new_object = self.trace_object(object);
debug_assert!(!new_object.is_null());
if Self::OVERWRITE_REFERENCE && new_object != object {
slot.store(new_object);
}
}
fn process_edges(&mut self) {
probe!(mmtk, process_edges, self.edges.len(), self.is_roots());
for i in 0..self.edges.len() {
self.process_edge(self.edges[i])
}
}
}
impl<E: ProcessEdgesWork> GCWork<E::VM> for E {
fn do_work(&mut self, worker: &mut GCWorker<E::VM>, _mmtk: &'static MMTK<E::VM>) {
self.set_worker(worker);
self.process_edges();
if !self.nodes.is_empty() {
self.flush();
}
#[cfg(feature = "sanity")]
if self.roots && !_mmtk.is_in_sanity() {
self.cache_roots_for_sanity_gc();
}
trace!("ProcessEdgesWork End");
}
}
pub struct SFTProcessEdges<VM: VMBinding> {
pub base: ProcessEdgesBase<VM>,
}
impl<VM: VMBinding> ProcessEdgesWork for SFTProcessEdges<VM> {
type VM = VM;
type ScanObjectsWorkType = ScanObjects<Self>;
fn new(
edges: Vec<EdgeOf<Self>>,
roots: bool,
mmtk: &'static MMTK<VM>,
bucket: WorkBucketStage,
) -> Self {
let base = ProcessEdgesBase::new(edges, roots, mmtk, bucket);
Self { base }
}
fn trace_object(&mut self, object: ObjectReference) -> ObjectReference {
use crate::policy::sft::GCWorkerMutRef;
debug_assert!(!object.is_null());
let worker = GCWorkerMutRef::new(self.worker());
let sft = unsafe { crate::mmtk::SFT_MAP.get_unchecked(object.to_address::<VM>()) };
sft.sft_trace_object(&mut self.base.nodes, object, worker)
}
fn create_scan_work(&self, nodes: Vec<ObjectReference>) -> ScanObjects<Self> {
ScanObjects::<Self>::new(nodes, false, self.bucket)
}
}
pub(crate) struct ProcessEdgesWorkRootsWorkFactory<
VM: VMBinding,
E: ProcessEdgesWork<VM = VM>,
I: ProcessEdgesWork<VM = VM>,
> {
mmtk: &'static MMTK<VM>,
phantom: PhantomData<(E, I)>,
}
impl<VM: VMBinding, E: ProcessEdgesWork<VM = VM>, I: ProcessEdgesWork<VM = VM>> Clone
for ProcessEdgesWorkRootsWorkFactory<VM, E, I>
{
fn clone(&self) -> Self {
Self {
mmtk: self.mmtk,
phantom: PhantomData,
}
}
}
impl<VM: VMBinding, E: ProcessEdgesWork<VM = VM>, I: ProcessEdgesWork<VM = VM>>
RootsWorkFactory<EdgeOf<E>> for ProcessEdgesWorkRootsWorkFactory<VM, E, I>
{
fn create_process_edge_roots_work(&mut self, edges: Vec<EdgeOf<E>>) {
crate::memory_manager::add_work_packet(
self.mmtk,
WorkBucketStage::Closure,
E::new(edges, true, self.mmtk, WorkBucketStage::Closure),
);
}
fn create_process_pinning_roots_work(&mut self, nodes: Vec<ObjectReference>) {
crate::memory_manager::add_work_packet(
self.mmtk,
WorkBucketStage::PinningRootsTrace,
ProcessRootNode::<VM, I, E>::new(nodes, WorkBucketStage::Closure),
);
}
fn create_process_tpinning_roots_work(&mut self, nodes: Vec<ObjectReference>) {
crate::memory_manager::add_work_packet(
self.mmtk,
WorkBucketStage::TPinningClosure,
ProcessRootNode::<VM, I, I>::new(nodes, WorkBucketStage::TPinningClosure),
);
}
}
impl<VM: VMBinding, E: ProcessEdgesWork<VM = VM>, I: ProcessEdgesWork<VM = VM>>
ProcessEdgesWorkRootsWorkFactory<VM, E, I>
{
fn new(mmtk: &'static MMTK<VM>) -> Self {
Self {
mmtk,
phantom: PhantomData,
}
}
}
impl<VM: VMBinding> Deref for SFTProcessEdges<VM> {
type Target = ProcessEdgesBase<VM>;
fn deref(&self) -> &Self::Target {
&self.base
}
}
impl<VM: VMBinding> DerefMut for SFTProcessEdges<VM> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.base
}
}
pub trait ScanObjectsWork<VM: VMBinding>: GCWork<VM> + Sized {
type E: ProcessEdgesWork<VM = VM>;
fn post_scan_object(&self, object: ObjectReference);
fn make_another(&self, buffer: Vec<ObjectReference>) -> Self;
fn get_bucket(&self) -> WorkBucketStage;
fn do_work_common(
&self,
buffer: &[ObjectReference],
worker: &mut GCWorker<<Self::E as ProcessEdgesWork>::VM>,
_mmtk: &'static MMTK<<Self::E as ProcessEdgesWork>::VM>,
) {
let tls = worker.tls;
let objects_to_scan = buffer;
let mut scan_later = vec![];
{
let mut closure = ObjectsClosure::<Self::E>::new(worker, self.get_bucket());
for object in objects_to_scan.iter().copied() {
#[cfg(feature = "count_live_bytes_in_gc")]
closure
.worker
.shared
.increase_live_bytes(VM::VMObjectModel::get_current_size(object));
if <VM as VMBinding>::VMScanning::support_edge_enqueuing(tls, object) {
trace!("Scan object (edge) {}", object);
<VM as VMBinding>::VMScanning::scan_object(tls, object, &mut closure);
self.post_scan_object(object);
} else {
scan_later.push(object);
}
}
}
if !scan_later.is_empty() {
let object_tracer_context = ProcessEdgesWorkTracerContext::<Self::E> {
stage: self.get_bucket(),
phantom_data: PhantomData,
};
object_tracer_context.with_tracer(worker, |object_tracer| {
for object in scan_later.iter().copied() {
trace!("Scan object (node) {}", object);
<VM as VMBinding>::VMScanning::scan_object_and_trace_edges(
tls,
object,
object_tracer,
);
self.post_scan_object(object);
}
});
}
}
}
pub struct ScanObjects<Edges: ProcessEdgesWork> {
buffer: Vec<ObjectReference>,
#[allow(unused)]
concurrent: bool,
phantom: PhantomData<Edges>,
bucket: WorkBucketStage,
}
impl<Edges: ProcessEdgesWork> ScanObjects<Edges> {
pub fn new(buffer: Vec<ObjectReference>, concurrent: bool, bucket: WorkBucketStage) -> Self {
Self {
buffer,
concurrent,
phantom: PhantomData,
bucket,
}
}
}
impl<VM: VMBinding, E: ProcessEdgesWork<VM = VM>> ScanObjectsWork<VM> for ScanObjects<E> {
type E = E;
fn get_bucket(&self) -> WorkBucketStage {
self.bucket
}
fn post_scan_object(&self, _object: ObjectReference) {
}
fn make_another(&self, buffer: Vec<ObjectReference>) -> Self {
Self::new(buffer, self.concurrent, self.bucket)
}
}
impl<E: ProcessEdgesWork> GCWork<E::VM> for ScanObjects<E> {
fn do_work(&mut self, worker: &mut GCWorker<E::VM>, mmtk: &'static MMTK<E::VM>) {
trace!("ScanObjects");
self.do_work_common(&self.buffer, worker, mmtk);
trace!("ScanObjects End");
}
}
use crate::mmtk::MMTK;
use crate::plan::Plan;
use crate::plan::PlanTraceObject;
use crate::policy::gc_work::TraceKind;
pub struct PlanProcessEdges<
VM: VMBinding,
P: Plan<VM = VM> + PlanTraceObject<VM>,
const KIND: TraceKind,
> {
plan: &'static P,
base: ProcessEdgesBase<VM>,
}
impl<VM: VMBinding, P: PlanTraceObject<VM> + Plan<VM = VM>, const KIND: TraceKind> ProcessEdgesWork
for PlanProcessEdges<VM, P, KIND>
{
type VM = VM;
type ScanObjectsWorkType = PlanScanObjects<Self, P>;
fn new(
edges: Vec<EdgeOf<Self>>,
roots: bool,
mmtk: &'static MMTK<VM>,
bucket: WorkBucketStage,
) -> Self {
let base = ProcessEdgesBase::new(edges, roots, mmtk, bucket);
let plan = base.plan().downcast_ref::<P>().unwrap();
Self { plan, base }
}
fn create_scan_work(&self, nodes: Vec<ObjectReference>) -> Self::ScanObjectsWorkType {
PlanScanObjects::<Self, P>::new(self.plan, nodes, false, self.bucket)
}
fn trace_object(&mut self, object: ObjectReference) -> ObjectReference {
debug_assert!(!object.is_null());
let worker = self.worker();
self.plan
.trace_object::<VectorObjectQueue, KIND>(&mut self.base.nodes, object, worker)
}
fn process_edge(&mut self, slot: EdgeOf<Self>) {
let object = slot.load();
if object.is_null() {
return;
}
let new_object = self.trace_object(object);
debug_assert!(!new_object.is_null());
if P::may_move_objects::<KIND>() && new_object != object {
slot.store(new_object);
}
}
}
impl<VM: VMBinding, P: PlanTraceObject<VM> + Plan<VM = VM>, const KIND: TraceKind> Deref
for PlanProcessEdges<VM, P, KIND>
{
type Target = ProcessEdgesBase<VM>;
fn deref(&self) -> &Self::Target {
&self.base
}
}
impl<VM: VMBinding, P: PlanTraceObject<VM> + Plan<VM = VM>, const KIND: TraceKind> DerefMut
for PlanProcessEdges<VM, P, KIND>
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.base
}
}
pub struct PlanScanObjects<E: ProcessEdgesWork, P: Plan<VM = E::VM> + PlanTraceObject<E::VM>> {
plan: &'static P,
buffer: Vec<ObjectReference>,
#[allow(dead_code)]
concurrent: bool,
phantom: PhantomData<E>,
bucket: WorkBucketStage,
}
impl<E: ProcessEdgesWork, P: Plan<VM = E::VM> + PlanTraceObject<E::VM>> PlanScanObjects<E, P> {
pub fn new(
plan: &'static P,
buffer: Vec<ObjectReference>,
concurrent: bool,
bucket: WorkBucketStage,
) -> Self {
Self {
plan,
buffer,
concurrent,
phantom: PhantomData,
bucket,
}
}
}
impl<E: ProcessEdgesWork, P: Plan<VM = E::VM> + PlanTraceObject<E::VM>> ScanObjectsWork<E::VM>
for PlanScanObjects<E, P>
{
type E = E;
fn get_bucket(&self) -> WorkBucketStage {
self.bucket
}
fn post_scan_object(&self, object: ObjectReference) {
self.plan.post_scan_object(object);
}
fn make_another(&self, buffer: Vec<ObjectReference>) -> Self {
Self::new(self.plan, buffer, self.concurrent, self.bucket)
}
}
impl<E: ProcessEdgesWork, P: Plan<VM = E::VM> + PlanTraceObject<E::VM>> GCWork<E::VM>
for PlanScanObjects<E, P>
{
fn do_work(&mut self, worker: &mut GCWorker<E::VM>, mmtk: &'static MMTK<E::VM>) {
trace!("PlanScanObjects");
self.do_work_common(&self.buffer, worker, mmtk);
trace!("PlanScanObjects End");
}
}
pub(crate) struct ProcessRootNode<
VM: VMBinding,
I: ProcessEdgesWork<VM = VM>,
E: ProcessEdgesWork<VM = VM>,
> {
phantom: PhantomData<(VM, I, E)>,
roots: Vec<ObjectReference>,
bucket: WorkBucketStage,
}
impl<VM: VMBinding, I: ProcessEdgesWork<VM = VM>, E: ProcessEdgesWork<VM = VM>>
ProcessRootNode<VM, I, E>
{
pub fn new(nodes: Vec<ObjectReference>, bucket: WorkBucketStage) -> Self {
Self {
phantom: PhantomData,
roots: nodes,
bucket,
}
}
}
impl<VM: VMBinding, I: ProcessEdgesWork<VM = VM>, E: ProcessEdgesWork<VM = VM>> GCWork<VM>
for ProcessRootNode<VM, I, E>
{
fn do_work(&mut self, worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
trace!("ProcessRootNode");
#[cfg(feature = "sanity")]
{
if !mmtk.is_in_sanity() {
mmtk.sanity_checker
.lock()
.unwrap()
.add_root_nodes(self.roots.clone());
}
}
let scanned_root_objects = {
let mut process_edges_work =
I::new(vec![], true, mmtk, WorkBucketStage::PinningRootsTrace);
process_edges_work.set_worker(worker);
for object in self.roots.iter().copied() {
let new_object = process_edges_work.trace_object(object);
debug_assert_eq!(
object, new_object,
"Object moved while tracing root unmovable root object: {} -> {}",
object, new_object
);
}
process_edges_work.nodes.take()
};
let process_edges_work = E::new(vec![], false, mmtk, self.bucket);
let work = process_edges_work.create_scan_work(scanned_root_objects);
crate::memory_manager::add_work_packet(mmtk, self.bucket, work);
trace!("ProcessRootNode End");
}
}
#[derive(Default)]
pub struct UnsupportedProcessEdges<VM: VMBinding> {
phantom: PhantomData<VM>,
}
impl<VM: VMBinding> Deref for UnsupportedProcessEdges<VM> {
type Target = ProcessEdgesBase<VM>;
fn deref(&self) -> &Self::Target {
panic!("unsupported!")
}
}
impl<VM: VMBinding> DerefMut for UnsupportedProcessEdges<VM> {
fn deref_mut(&mut self) -> &mut Self::Target {
panic!("unsupported!")
}
}
impl<VM: VMBinding> ProcessEdgesWork for UnsupportedProcessEdges<VM> {
type VM = VM;
type ScanObjectsWorkType = ScanObjects<Self>;
fn new(
_edges: Vec<EdgeOf<Self>>,
_roots: bool,
_mmtk: &'static MMTK<Self::VM>,
_bucket: WorkBucketStage,
) -> Self {
panic!("unsupported!")
}
fn trace_object(&mut self, _object: ObjectReference) -> ObjectReference {
panic!("unsupported!")
}
fn create_scan_work(&self, _nodes: Vec<ObjectReference>) -> Self::ScanObjectsWorkType {
panic!("unsupported!")
}
}