use super::work_bucket::WorkBucketStage;
use super::*;
use crate::global_state::GcStatus;
use crate::plan::ObjectsClosure;
use crate::plan::VectorObjectQueue;
use crate::util::*;
use crate::vm::slot::Slot;
use crate::vm::*;
use crate::*;
use std::marker::PhantomData;
use std::ops::{Deref, DerefMut};
pub struct ScheduleCollection;
impl<VM: VMBinding> GCWork<VM> for ScheduleCollection {
fn do_work(&mut self, worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
mmtk.gc_trigger.policy.on_gc_start(mmtk);
let is_emergency = mmtk.state.set_collection_kind(
mmtk.get_plan().last_collection_was_exhaustive(),
mmtk.gc_trigger.policy.can_heap_size_grow(),
);
if is_emergency {
mmtk.get_plan().notify_emergency_collection();
}
mmtk.set_gc_status(GcStatus::GcPrepare);
mmtk.get_plan().schedule_collection(worker.scheduler());
}
}
pub struct Prepare<C: GCWorkContext> {
pub plan: *const C::PlanType,
}
unsafe impl<C: GCWorkContext> Send for Prepare<C> {}
impl<C: GCWorkContext> Prepare<C> {
pub fn new(plan: *const C::PlanType) -> Self {
Self { plan }
}
}
impl<C: GCWorkContext> GCWork<C::VM> for Prepare<C> {
fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
trace!("Prepare Global");
let plan_mut: &mut C::PlanType = unsafe { &mut *(self.plan as *const _ as *mut _) };
plan_mut.prepare(worker.tls);
if plan_mut.constraints().needs_prepare_mutator {
let prepare_mutator_packets = <C::VM as VMBinding>::VMActivePlan::mutators()
.map(|mutator| Box::new(PrepareMutator::<C::VM>::new(mutator)) as _)
.collect::<Vec<_>>();
debug_assert_eq!(
prepare_mutator_packets.len(),
<C::VM as VMBinding>::VMActivePlan::number_of_mutators()
);
mmtk.scheduler.work_buckets[WorkBucketStage::Prepare].bulk_add(prepare_mutator_packets);
}
for w in &mmtk.scheduler.worker_group.workers_shared {
let result = w.designated_work.push(Box::new(PrepareCollector));
debug_assert!(result.is_ok());
}
}
}
pub struct PrepareMutator<VM: VMBinding> {
pub mutator: &'static mut Mutator<VM>,
}
impl<VM: VMBinding> PrepareMutator<VM> {
pub fn new(mutator: &'static mut Mutator<VM>) -> Self {
Self { mutator }
}
}
impl<VM: VMBinding> GCWork<VM> for PrepareMutator<VM> {
fn do_work(&mut self, worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
trace!("Prepare Mutator");
self.mutator.prepare(worker.tls);
}
}
#[derive(Default)]
pub struct PrepareCollector;
impl<VM: VMBinding> GCWork<VM> for PrepareCollector {
fn do_work(&mut self, worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
trace!("Prepare Collector");
worker.get_copy_context_mut().prepare();
mmtk.get_plan().prepare_worker(worker);
}
}
pub struct Release<C: GCWorkContext> {
pub plan: *const C::PlanType,
}
impl<C: GCWorkContext> Release<C> {
pub fn new(plan: *const C::PlanType) -> Self {
Self { plan }
}
}
unsafe impl<C: GCWorkContext> Send for Release<C> {}
impl<C: GCWorkContext + 'static> GCWork<C::VM> for Release<C> {
fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
trace!("Release Global");
mmtk.gc_trigger.policy.on_gc_release(mmtk);
let plan_mut: &mut C::PlanType = unsafe { &mut *(self.plan as *const _ as *mut _) };
plan_mut.release(worker.tls);
let release_mutator_packets = <C::VM as VMBinding>::VMActivePlan::mutators()
.map(|mutator| Box::new(ReleaseMutator::<C::VM>::new(mutator)) as _)
.collect::<Vec<_>>();
debug_assert_eq!(
release_mutator_packets.len(),
<C::VM as VMBinding>::VMActivePlan::number_of_mutators()
);
mmtk.scheduler.work_buckets[WorkBucketStage::Release].bulk_add(release_mutator_packets);
for w in &mmtk.scheduler.worker_group.workers_shared {
let result = w.designated_work.push(Box::new(ReleaseCollector));
debug_assert!(result.is_ok());
}
}
}
pub struct ReleaseMutator<VM: VMBinding> {
pub mutator: &'static mut Mutator<VM>,
}
impl<VM: VMBinding> ReleaseMutator<VM> {
pub fn new(mutator: &'static mut Mutator<VM>) -> Self {
Self { mutator }
}
}
impl<VM: VMBinding> GCWork<VM> for ReleaseMutator<VM> {
fn do_work(&mut self, worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
trace!("Release Mutator");
self.mutator.release(worker.tls);
}
}
#[derive(Default)]
pub struct ReleaseCollector;
impl<VM: VMBinding> GCWork<VM> for ReleaseCollector {
fn do_work(&mut self, worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
trace!("Release Collector");
worker.get_copy_context_mut().release();
}
}
#[derive(Default)]
pub struct StopMutators<C: GCWorkContext> {
skip_mutator_roots: bool,
flush_mutator: bool,
phantom: PhantomData<C>,
}
impl<C: GCWorkContext> StopMutators<C> {
pub fn new() -> Self {
Self {
skip_mutator_roots: false,
flush_mutator: false,
phantom: PhantomData,
}
}
pub fn new_no_scan_roots() -> Self {
Self {
skip_mutator_roots: true,
flush_mutator: true,
phantom: PhantomData,
}
}
}
impl<C: GCWorkContext> GCWork<C::VM> for StopMutators<C> {
fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
trace!("stop_all_mutators start");
mmtk.state.prepare_for_stack_scanning();
<C::VM as VMBinding>::VMCollection::stop_all_mutators(worker.tls, |mutator| {
if self.flush_mutator {
mutator.flush();
}
if !self.skip_mutator_roots {
mmtk.scheduler.work_buckets[WorkBucketStage::Prepare]
.add(ScanMutatorRoots::<C>(mutator));
}
});
trace!("stop_all_mutators end");
mmtk.get_plan().notify_mutators_paused(&mmtk.scheduler);
mmtk.scheduler.notify_mutators_paused(mmtk);
mmtk.scheduler.work_buckets[WorkBucketStage::Prepare].add(ScanVMSpecificRoots::<C>::new());
}
}
pub(crate) struct ProcessEdgesWorkTracer<E: ProcessEdgesWork> {
process_edges_work: E,
stage: WorkBucketStage,
}
impl<E: ProcessEdgesWork> ObjectTracer for ProcessEdgesWorkTracer<E> {
fn trace_object(&mut self, object: ObjectReference) -> ObjectReference {
let result = self.process_edges_work.trace_object(object);
self.flush_if_full();
result
}
}
impl<E: ProcessEdgesWork> ProcessEdgesWorkTracer<E> {
fn flush_if_full(&mut self) {
if self.process_edges_work.nodes.is_full() {
self.flush();
}
}
pub fn flush_if_not_empty(&mut self) {
if !self.process_edges_work.nodes.is_empty() {
self.flush();
}
}
fn flush(&mut self) {
let next_nodes = self.process_edges_work.pop_nodes();
assert!(!next_nodes.is_empty());
let work_packet = self.process_edges_work.create_scan_work(next_nodes);
let worker = self.process_edges_work.worker();
worker.scheduler().work_buckets[self.stage].add(work_packet);
}
}
pub(crate) struct ProcessEdgesWorkTracerContext<E: ProcessEdgesWork> {
stage: WorkBucketStage,
phantom_data: PhantomData<E>,
}
impl<E: ProcessEdgesWork> Clone for ProcessEdgesWorkTracerContext<E> {
fn clone(&self) -> Self {
Self { ..*self }
}
}
impl<E: ProcessEdgesWork> ObjectTracerContext<E::VM> for ProcessEdgesWorkTracerContext<E> {
type TracerType = ProcessEdgesWorkTracer<E>;
fn with_tracer<R, F>(&self, worker: &mut GCWorker<E::VM>, func: F) -> R
where
F: FnOnce(&mut Self::TracerType) -> R,
{
let mmtk = worker.mmtk;
let mut process_edges_work = E::new(vec![], false, mmtk, self.stage);
process_edges_work.set_worker(worker);
let mut tracer = ProcessEdgesWorkTracer {
process_edges_work,
stage: self.stage,
};
let result = func(&mut tracer);
tracer.flush_if_not_empty();
result
}
}
pub struct VMProcessWeakRefs<E: ProcessEdgesWork> {
phantom_data: PhantomData<E>,
}
impl<E: ProcessEdgesWork> VMProcessWeakRefs<E> {
pub fn new() -> Self {
Self {
phantom_data: PhantomData,
}
}
}
impl<E: ProcessEdgesWork> GCWork<E::VM> for VMProcessWeakRefs<E> {
fn do_work(&mut self, worker: &mut GCWorker<E::VM>, _mmtk: &'static MMTK<E::VM>) {
trace!("VMProcessWeakRefs");
let stage = WorkBucketStage::VMRefClosure;
let need_to_repeat = {
let tracer_factory = ProcessEdgesWorkTracerContext::<E> {
stage,
phantom_data: PhantomData,
};
<E::VM as VMBinding>::VMScanning::process_weak_refs(worker, tracer_factory)
};
if need_to_repeat {
let new_self = Box::new(Self::new());
worker.scheduler().work_buckets[stage].set_sentinel(new_self);
}
}
}
pub struct VMForwardWeakRefs<E: ProcessEdgesWork> {
phantom_data: PhantomData<E>,
}
impl<E: ProcessEdgesWork> VMForwardWeakRefs<E> {
pub fn new() -> Self {
Self {
phantom_data: PhantomData,
}
}
}
impl<E: ProcessEdgesWork> GCWork<E::VM> for VMForwardWeakRefs<E> {
fn do_work(&mut self, worker: &mut GCWorker<E::VM>, _mmtk: &'static MMTK<E::VM>) {
trace!("VMForwardWeakRefs");
let stage = WorkBucketStage::VMRefForwarding;
let tracer_factory = ProcessEdgesWorkTracerContext::<E> {
stage,
phantom_data: PhantomData,
};
<E::VM as VMBinding>::VMScanning::forward_weak_refs(worker, tracer_factory)
}
}
#[derive(Default)]
pub struct VMPostForwarding<VM: VMBinding> {
phantom_data: PhantomData<VM>,
}
impl<VM: VMBinding> GCWork<VM> for VMPostForwarding<VM> {
fn do_work(&mut self, worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
trace!("VMPostForwarding start");
<VM as VMBinding>::VMCollection::post_forwarding(worker.tls);
trace!("VMPostForwarding end");
}
}
pub struct ScanMutatorRoots<C: GCWorkContext>(pub &'static mut Mutator<C::VM>);
impl<C: GCWorkContext> GCWork<C::VM> for ScanMutatorRoots<C> {
fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
trace!("ScanMutatorRoots for mutator {:?}", self.0.get_tls());
let mutators = <C::VM as VMBinding>::VMActivePlan::number_of_mutators();
let factory = ProcessEdgesWorkRootsWorkFactory::<
C::VM,
C::DefaultProcessEdges,
C::PinningProcessEdges,
>::new(mmtk);
<C::VM as VMBinding>::VMScanning::scan_roots_in_mutator_thread(
worker.tls,
unsafe { &mut *(self.0 as *mut _) },
factory,
);
self.0.flush();
if mmtk.state.inform_stack_scanned(mutators) {
<C::VM as VMBinding>::VMScanning::notify_initial_thread_scan_complete(
false, worker.tls,
);
mmtk.set_gc_status(GcStatus::GcProper);
}
}
}
#[derive(Default)]
pub struct ScanVMSpecificRoots<C: GCWorkContext>(PhantomData<C>);
impl<C: GCWorkContext> ScanVMSpecificRoots<C> {
pub fn new() -> Self {
Self(PhantomData)
}
}
impl<C: GCWorkContext> GCWork<C::VM> for ScanVMSpecificRoots<C> {
fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
trace!("ScanStaticRoots");
let factory = ProcessEdgesWorkRootsWorkFactory::<
C::VM,
C::DefaultProcessEdges,
C::PinningProcessEdges,
>::new(mmtk);
<C::VM as VMBinding>::VMScanning::scan_vm_specific_roots(worker.tls, factory);
}
}
pub struct ProcessEdgesBase<VM: VMBinding> {
pub slots: Vec<VM::VMSlot>,
pub nodes: VectorObjectQueue,
mmtk: &'static MMTK<VM>,
worker: *mut GCWorker<VM>,
pub roots: bool,
pub bucket: WorkBucketStage,
}
unsafe impl<VM: VMBinding> Send for ProcessEdgesBase<VM> {}
impl<VM: VMBinding> ProcessEdgesBase<VM> {
pub fn new(
slots: Vec<VM::VMSlot>,
roots: bool,
mmtk: &'static MMTK<VM>,
bucket: WorkBucketStage,
) -> Self {
#[cfg(feature = "extreme_assertions")]
if crate::util::slot_logger::should_check_duplicate_slots(mmtk.get_plan()) {
for slot in &slots {
mmtk.slot_logger.log_slot(*slot);
}
}
Self {
slots,
nodes: VectorObjectQueue::new(),
mmtk,
worker: std::ptr::null_mut(),
roots,
bucket,
}
}
pub fn set_worker(&mut self, worker: &mut GCWorker<VM>) {
self.worker = worker;
}
pub fn worker(&self) -> &'static mut GCWorker<VM> {
unsafe { &mut *self.worker }
}
pub fn mmtk(&self) -> &'static MMTK<VM> {
self.mmtk
}
pub fn plan(&self) -> &'static dyn Plan<VM = VM> {
self.mmtk.get_plan()
}
pub fn pop_nodes(&mut self) -> Vec<ObjectReference> {
self.nodes.take()
}
pub fn is_roots(&self) -> bool {
self.roots
}
}
pub type SlotOf<E> = <<E as ProcessEdgesWork>::VM as VMBinding>::VMSlot;
pub trait ProcessEdgesWork:
Send + 'static + Sized + DerefMut + Deref<Target = ProcessEdgesBase<Self::VM>>
{
type VM: VMBinding;
type ScanObjectsWorkType: ScanObjectsWork<Self::VM>;
const CAPACITY: usize = EDGES_WORK_BUFFER_SIZE;
const OVERWRITE_REFERENCE: bool = true;
const SCAN_OBJECTS_IMMEDIATELY: bool = true;
fn new(
slots: Vec<SlotOf<Self>>,
roots: bool,
mmtk: &'static MMTK<Self::VM>,
bucket: WorkBucketStage,
) -> Self;
fn trace_object(&mut self, object: ObjectReference) -> ObjectReference;
#[cfg(feature = "sanity")]
fn cache_roots_for_sanity_gc(&mut self) {
assert!(self.roots);
self.mmtk()
.sanity_checker
.lock()
.unwrap()
.add_root_slots(self.slots.clone());
}
fn start_or_dispatch_scan_work(&mut self, mut work_packet: impl GCWork<Self::VM>) {
if Self::SCAN_OBJECTS_IMMEDIATELY {
work_packet.do_work(self.worker(), self.mmtk);
} else {
debug_assert!(self.bucket != WorkBucketStage::Unconstrained);
self.mmtk.scheduler.work_buckets[self.bucket].add(work_packet);
}
}
fn create_scan_work(&self, nodes: Vec<ObjectReference>) -> Self::ScanObjectsWorkType;
fn flush(&mut self) {
let nodes = self.pop_nodes();
if !nodes.is_empty() {
self.start_or_dispatch_scan_work(self.create_scan_work(nodes));
}
}
fn process_slot(&mut self, slot: SlotOf<Self>) {
let Some(object) = slot.load() else {
return;
};
let new_object = self.trace_object(object);
if Self::OVERWRITE_REFERENCE && new_object != object {
slot.store(new_object);
}
}
fn process_slots(&mut self) {
probe!(mmtk, process_slots, self.slots.len(), self.is_roots());
for i in 0..self.slots.len() {
self.process_slot(self.slots[i])
}
}
}
impl<E: ProcessEdgesWork> GCWork<E::VM> for E {
fn do_work(&mut self, worker: &mut GCWorker<E::VM>, _mmtk: &'static MMTK<E::VM>) {
self.set_worker(worker);
self.process_slots();
if !self.nodes.is_empty() {
self.flush();
}
#[cfg(feature = "sanity")]
if self.roots && !_mmtk.is_in_sanity() {
self.cache_roots_for_sanity_gc();
}
trace!("ProcessEdgesWork End");
}
}
#[allow(dead_code)]
pub struct SFTProcessEdges<VM: VMBinding> {
pub base: ProcessEdgesBase<VM>,
}
impl<VM: VMBinding> ProcessEdgesWork for SFTProcessEdges<VM> {
type VM = VM;
type ScanObjectsWorkType = ScanObjects<Self>;
fn new(
slots: Vec<SlotOf<Self>>,
roots: bool,
mmtk: &'static MMTK<VM>,
bucket: WorkBucketStage,
) -> Self {
let base = ProcessEdgesBase::new(slots, roots, mmtk, bucket);
Self { base }
}
fn trace_object(&mut self, object: ObjectReference) -> ObjectReference {
use crate::policy::sft::GCWorkerMutRef;
let worker = GCWorkerMutRef::new(self.worker());
let sft = unsafe { crate::mmtk::SFT_MAP.get_unchecked(object.to_raw_address()) };
sft.sft_trace_object(&mut self.base.nodes, object, worker)
}
fn create_scan_work(&self, nodes: Vec<ObjectReference>) -> ScanObjects<Self> {
ScanObjects::<Self>::new(nodes, false, self.bucket)
}
}
pub(crate) struct ProcessEdgesWorkRootsWorkFactory<
VM: VMBinding,
DPE: ProcessEdgesWork<VM = VM>,
PPE: ProcessEdgesWork<VM = VM>,
> {
mmtk: &'static MMTK<VM>,
phantom: PhantomData<(DPE, PPE)>,
}
impl<VM: VMBinding, DPE: ProcessEdgesWork<VM = VM>, PPE: ProcessEdgesWork<VM = VM>> Clone
for ProcessEdgesWorkRootsWorkFactory<VM, DPE, PPE>
{
fn clone(&self) -> Self {
Self {
mmtk: self.mmtk,
phantom: PhantomData,
}
}
}
#[repr(usize)]
enum RootsKind {
NORMAL = 0,
PINNING = 1,
TPINNING = 2,
}
impl<VM: VMBinding, DPE: ProcessEdgesWork<VM = VM>, PPE: ProcessEdgesWork<VM = VM>>
RootsWorkFactory<VM::VMSlot> for ProcessEdgesWorkRootsWorkFactory<VM, DPE, PPE>
{
fn create_process_roots_work(&mut self, slots: Vec<VM::VMSlot>) {
probe!(mmtk, roots, RootsKind::NORMAL, slots.len());
crate::memory_manager::add_work_packet(
self.mmtk,
WorkBucketStage::Closure,
DPE::new(slots, true, self.mmtk, WorkBucketStage::Closure),
);
}
fn create_process_pinning_roots_work(&mut self, nodes: Vec<ObjectReference>) {
probe!(mmtk, roots, RootsKind::PINNING, nodes.len());
crate::memory_manager::add_work_packet(
self.mmtk,
WorkBucketStage::PinningRootsTrace,
ProcessRootNodes::<VM, PPE, DPE>::new(nodes, WorkBucketStage::Closure),
);
}
fn create_process_tpinning_roots_work(&mut self, nodes: Vec<ObjectReference>) {
probe!(mmtk, roots, RootsKind::TPINNING, nodes.len());
crate::memory_manager::add_work_packet(
self.mmtk,
WorkBucketStage::TPinningClosure,
ProcessRootNodes::<VM, PPE, PPE>::new(nodes, WorkBucketStage::TPinningClosure),
);
}
}
impl<VM: VMBinding, DPE: ProcessEdgesWork<VM = VM>, PPE: ProcessEdgesWork<VM = VM>>
ProcessEdgesWorkRootsWorkFactory<VM, DPE, PPE>
{
fn new(mmtk: &'static MMTK<VM>) -> Self {
Self {
mmtk,
phantom: PhantomData,
}
}
}
impl<VM: VMBinding> Deref for SFTProcessEdges<VM> {
type Target = ProcessEdgesBase<VM>;
fn deref(&self) -> &Self::Target {
&self.base
}
}
impl<VM: VMBinding> DerefMut for SFTProcessEdges<VM> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.base
}
}
pub trait ScanObjectsWork<VM: VMBinding>: GCWork<VM> + Sized {
type E: ProcessEdgesWork<VM = VM>;
fn post_scan_object(&self, object: ObjectReference);
fn get_bucket(&self) -> WorkBucketStage;
fn do_work_common(
&self,
buffer: &[ObjectReference],
worker: &mut GCWorker<<Self::E as ProcessEdgesWork>::VM>,
mmtk: &'static MMTK<<Self::E as ProcessEdgesWork>::VM>,
) {
let tls = worker.tls;
let objects_to_scan = buffer;
let mut scan_later = vec![];
{
let mut closure = ObjectsClosure::<Self::E>::new(worker, self.get_bucket());
if crate::util::rust_util::unlikely(*mmtk.get_options().count_live_bytes_in_gc) {
let mut live_bytes_stats = closure.worker.shared.live_bytes_per_space.borrow_mut();
for object in objects_to_scan.iter().copied() {
crate::scheduler::worker::GCWorkerShared::<VM>::increase_live_bytes(
&mut live_bytes_stats,
object,
);
}
}
for object in objects_to_scan.iter().copied() {
if <VM as VMBinding>::VMScanning::support_slot_enqueuing(tls, object) {
trace!("Scan object (slot) {}", object);
<VM as VMBinding>::VMScanning::scan_object(tls, object, &mut closure);
self.post_scan_object(object);
} else {
scan_later.push(object);
}
}
}
let total_objects = objects_to_scan.len();
let scan_and_trace = scan_later.len();
probe!(mmtk, scan_objects, total_objects, scan_and_trace);
if !scan_later.is_empty() {
let object_tracer_context = ProcessEdgesWorkTracerContext::<Self::E> {
stage: self.get_bucket(),
phantom_data: PhantomData,
};
object_tracer_context.with_tracer(worker, |object_tracer| {
for object in scan_later.iter().copied() {
trace!("Scan object (node) {}", object);
<VM as VMBinding>::VMScanning::scan_object_and_trace_edges(
tls,
object,
object_tracer,
);
self.post_scan_object(object);
}
});
}
}
}
pub struct ScanObjects<Edges: ProcessEdgesWork> {
buffer: Vec<ObjectReference>,
#[allow(unused)]
concurrent: bool,
phantom: PhantomData<Edges>,
bucket: WorkBucketStage,
}
impl<Edges: ProcessEdgesWork> ScanObjects<Edges> {
pub fn new(buffer: Vec<ObjectReference>, concurrent: bool, bucket: WorkBucketStage) -> Self {
Self {
buffer,
concurrent,
phantom: PhantomData,
bucket,
}
}
}
impl<VM: VMBinding, E: ProcessEdgesWork<VM = VM>> ScanObjectsWork<VM> for ScanObjects<E> {
type E = E;
fn get_bucket(&self) -> WorkBucketStage {
self.bucket
}
fn post_scan_object(&self, _object: ObjectReference) {
}
}
impl<E: ProcessEdgesWork> GCWork<E::VM> for ScanObjects<E> {
fn do_work(&mut self, worker: &mut GCWorker<E::VM>, mmtk: &'static MMTK<E::VM>) {
trace!("ScanObjects");
self.do_work_common(&self.buffer, worker, mmtk);
trace!("ScanObjects End");
}
}
use crate::mmtk::MMTK;
use crate::plan::Plan;
use crate::plan::PlanTraceObject;
use crate::policy::gc_work::TraceKind;
pub struct PlanProcessEdges<
VM: VMBinding,
P: Plan<VM = VM> + PlanTraceObject<VM>,
const KIND: TraceKind,
> {
plan: &'static P,
base: ProcessEdgesBase<VM>,
}
impl<VM: VMBinding, P: PlanTraceObject<VM> + Plan<VM = VM>, const KIND: TraceKind> ProcessEdgesWork
for PlanProcessEdges<VM, P, KIND>
{
type VM = VM;
type ScanObjectsWorkType = PlanScanObjects<Self, P>;
fn new(
slots: Vec<SlotOf<Self>>,
roots: bool,
mmtk: &'static MMTK<VM>,
bucket: WorkBucketStage,
) -> Self {
let base = ProcessEdgesBase::new(slots, roots, mmtk, bucket);
let plan = base.plan().downcast_ref::<P>().unwrap();
Self { plan, base }
}
fn create_scan_work(&self, nodes: Vec<ObjectReference>) -> Self::ScanObjectsWorkType {
PlanScanObjects::<Self, P>::new(self.plan, nodes, false, self.bucket)
}
fn trace_object(&mut self, object: ObjectReference) -> ObjectReference {
let worker = self.worker();
self.plan
.trace_object::<VectorObjectQueue, KIND>(&mut self.base.nodes, object, worker)
}
fn process_slot(&mut self, slot: SlotOf<Self>) {
let Some(object) = slot.load() else {
return;
};
let new_object = self.trace_object(object);
if P::may_move_objects::<KIND>() && new_object != object {
slot.store(new_object);
}
}
}
impl<VM: VMBinding, P: PlanTraceObject<VM> + Plan<VM = VM>, const KIND: TraceKind> Deref
for PlanProcessEdges<VM, P, KIND>
{
type Target = ProcessEdgesBase<VM>;
fn deref(&self) -> &Self::Target {
&self.base
}
}
impl<VM: VMBinding, P: PlanTraceObject<VM> + Plan<VM = VM>, const KIND: TraceKind> DerefMut
for PlanProcessEdges<VM, P, KIND>
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.base
}
}
pub struct PlanScanObjects<E: ProcessEdgesWork, P: Plan<VM = E::VM> + PlanTraceObject<E::VM>> {
plan: &'static P,
buffer: Vec<ObjectReference>,
#[allow(dead_code)]
concurrent: bool,
phantom: PhantomData<E>,
bucket: WorkBucketStage,
}
impl<E: ProcessEdgesWork, P: Plan<VM = E::VM> + PlanTraceObject<E::VM>> PlanScanObjects<E, P> {
pub fn new(
plan: &'static P,
buffer: Vec<ObjectReference>,
concurrent: bool,
bucket: WorkBucketStage,
) -> Self {
Self {
plan,
buffer,
concurrent,
phantom: PhantomData,
bucket,
}
}
}
impl<E: ProcessEdgesWork, P: Plan<VM = E::VM> + PlanTraceObject<E::VM>> ScanObjectsWork<E::VM>
for PlanScanObjects<E, P>
{
type E = E;
fn get_bucket(&self) -> WorkBucketStage {
self.bucket
}
fn post_scan_object(&self, object: ObjectReference) {
self.plan.post_scan_object(object);
}
}
impl<E: ProcessEdgesWork, P: Plan<VM = E::VM> + PlanTraceObject<E::VM>> GCWork<E::VM>
for PlanScanObjects<E, P>
{
fn do_work(&mut self, worker: &mut GCWorker<E::VM>, mmtk: &'static MMTK<E::VM>) {
trace!("PlanScanObjects");
self.do_work_common(&self.buffer, worker, mmtk);
trace!("PlanScanObjects End");
}
}
pub(crate) struct ProcessRootNodes<
VM: VMBinding,
R2OPE: ProcessEdgesWork<VM = VM>,
O2OPE: ProcessEdgesWork<VM = VM>,
> {
phantom: PhantomData<(VM, R2OPE, O2OPE)>,
roots: Vec<ObjectReference>,
bucket: WorkBucketStage,
}
impl<VM: VMBinding, R2OPE: ProcessEdgesWork<VM = VM>, O2OPE: ProcessEdgesWork<VM = VM>>
ProcessRootNodes<VM, R2OPE, O2OPE>
{
pub fn new(nodes: Vec<ObjectReference>, bucket: WorkBucketStage) -> Self {
Self {
phantom: PhantomData,
roots: nodes,
bucket,
}
}
}
impl<VM: VMBinding, R2OPE: ProcessEdgesWork<VM = VM>, O2OPE: ProcessEdgesWork<VM = VM>> GCWork<VM>
for ProcessRootNodes<VM, R2OPE, O2OPE>
{
fn do_work(&mut self, worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
trace!("ProcessRootNodes");
#[cfg(feature = "sanity")]
{
if !mmtk.is_in_sanity() {
mmtk.sanity_checker
.lock()
.unwrap()
.add_root_nodes(self.roots.clone());
}
}
let num_roots = self.roots.len();
let root_objects_to_scan = {
let mut process_edges_work =
R2OPE::new(vec![], true, mmtk, WorkBucketStage::PinningRootsTrace);
process_edges_work.set_worker(worker);
for object in self.roots.iter().copied() {
let new_object = process_edges_work.trace_object(object);
debug_assert_eq!(
object, new_object,
"Object moved while tracing root unmovable root object: {} -> {}",
object, new_object
);
}
process_edges_work.nodes.take()
};
let num_enqueued_nodes = root_objects_to_scan.len();
probe!(mmtk, process_root_nodes, num_roots, num_enqueued_nodes);
if !root_objects_to_scan.is_empty() {
let process_edges_work = O2OPE::new(vec![], false, mmtk, self.bucket);
let work = process_edges_work.create_scan_work(root_objects_to_scan);
crate::memory_manager::add_work_packet(mmtk, self.bucket, work);
}
trace!("ProcessRootNodes End");
}
}
#[derive(Default)]
pub struct UnsupportedProcessEdges<VM: VMBinding> {
phantom: PhantomData<VM>,
}
impl<VM: VMBinding> Deref for UnsupportedProcessEdges<VM> {
type Target = ProcessEdgesBase<VM>;
fn deref(&self) -> &Self::Target {
panic!("unsupported!")
}
}
impl<VM: VMBinding> DerefMut for UnsupportedProcessEdges<VM> {
fn deref_mut(&mut self) -> &mut Self::Target {
panic!("unsupported!")
}
}
impl<VM: VMBinding> ProcessEdgesWork for UnsupportedProcessEdges<VM> {
type VM = VM;
type ScanObjectsWorkType = ScanObjects<Self>;
fn new(
_slots: Vec<SlotOf<Self>>,
_roots: bool,
_mmtk: &'static MMTK<Self::VM>,
_bucket: WorkBucketStage,
) -> Self {
panic!("unsupported!")
}
fn trace_object(&mut self, _object: ObjectReference) -> ObjectReference {
panic!("unsupported!")
}
fn create_scan_work(&self, _nodes: Vec<ObjectReference>) -> Self::ScanObjectsWorkType {
panic!("unsupported!")
}
}