use atomic::Ordering;
use crate::plan::ObjectQueue;
use crate::plan::PlanConstraints;
use crate::plan::VectorObjectQueue;
use crate::policy::sft::GCWorkerMutRef;
use crate::policy::sft::SFT;
use crate::policy::space::SpaceOptions;
use crate::policy::space::{CommonSpace, Space};
use crate::util::constants::BYTES_IN_PAGE;
use crate::util::heap::layout::heap_layout::{Mmapper, VMMap};
use crate::util::heap::HeapMeta;
use crate::util::heap::{FreeListPageResource, PageResource, VMRequest};
use crate::util::metadata;
use crate::util::metadata::side_metadata::SideMetadataContext;
use crate::util::metadata::side_metadata::SideMetadataSpec;
use crate::util::opaque_pointer::*;
use crate::util::treadmill::TreadMill;
use crate::util::{Address, ObjectReference};
use crate::vm::ObjectModel;
use crate::vm::VMBinding;
#[allow(unused)]
const PAGE_MASK: usize = !(BYTES_IN_PAGE - 1);
const MARK_BIT: u8 = 0b01;
const NURSERY_BIT: u8 = 0b10;
const LOS_BIT_MASK: u8 = 0b11;
pub struct LargeObjectSpace<VM: VMBinding> {
common: CommonSpace<VM>,
pr: FreeListPageResource<VM>,
mark_state: u8,
in_nursery_gc: bool,
treadmill: TreadMill,
}
impl<VM: VMBinding> SFT for LargeObjectSpace<VM> {
fn name(&self) -> &str {
self.get_name()
}
fn is_live(&self, object: ObjectReference) -> bool {
self.test_mark_bit(object, self.mark_state)
}
#[cfg(feature = "object_pinning")]
fn pin_object(&self, _object: ObjectReference) -> bool {
false
}
#[cfg(feature = "object_pinning")]
fn unpin_object(&self, _object: ObjectReference) -> bool {
false
}
#[cfg(feature = "object_pinning")]
fn is_object_pinned(&self, _object: ObjectReference) -> bool {
true
}
fn is_movable(&self) -> bool {
false
}
#[cfg(feature = "sanity")]
fn is_sane(&self) -> bool {
true
}
fn initialize_object_metadata(&self, object: ObjectReference, alloc: bool) {
let old_value = VM::VMObjectModel::LOCAL_LOS_MARK_NURSERY_SPEC.load_atomic::<VM, u8>(
object,
None,
Ordering::SeqCst,
);
let mut new_value = (old_value & (!LOS_BIT_MASK)) | self.mark_state;
if alloc {
new_value |= NURSERY_BIT;
}
VM::VMObjectModel::LOCAL_LOS_MARK_NURSERY_SPEC.store_atomic::<VM, u8>(
object,
new_value,
None,
Ordering::SeqCst,
);
if !alloc && self.common.needs_log_bit {
VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::<VM>(object, Ordering::SeqCst);
}
#[cfg(feature = "global_alloc_bit")]
crate::util::alloc_bit::set_alloc_bit::<VM>(object);
self.treadmill.add_to_treadmill(object, alloc);
}
#[cfg(feature = "is_mmtk_object")]
#[inline(always)]
fn is_mmtk_object(&self, addr: Address) -> bool {
crate::util::alloc_bit::is_alloced_object::<VM>(addr).is_some()
}
#[inline(always)]
fn sft_trace_object(
&self,
queue: &mut VectorObjectQueue,
object: ObjectReference,
_worker: GCWorkerMutRef,
) -> ObjectReference {
self.trace_object(queue, object)
}
}
impl<VM: VMBinding> Space<VM> for LargeObjectSpace<VM> {
fn as_space(&self) -> &dyn Space<VM> {
self
}
fn as_sft(&self) -> &(dyn SFT + Sync + 'static) {
self
}
fn get_page_resource(&self) -> &dyn PageResource<VM> {
&self.pr
}
fn initialize_sft(&self) {
self.common().initialize_sft(self.as_sft())
}
fn common(&self) -> &CommonSpace<VM> {
&self.common
}
fn release_multiple_pages(&mut self, start: Address) {
self.pr.release_pages(start);
}
}
use crate::scheduler::GCWorker;
use crate::util::copy::CopySemantics;
impl<VM: VMBinding> crate::policy::gc_work::PolicyTraceObject<VM> for LargeObjectSpace<VM> {
#[inline(always)]
fn trace_object<Q: ObjectQueue, const KIND: crate::policy::gc_work::TraceKind>(
&self,
queue: &mut Q,
object: ObjectReference,
_copy: Option<CopySemantics>,
_worker: &mut GCWorker<VM>,
) -> ObjectReference {
self.trace_object(queue, object)
}
#[inline(always)]
fn may_move_objects<const KIND: crate::policy::gc_work::TraceKind>() -> bool {
false
}
}
impl<VM: VMBinding> LargeObjectSpace<VM> {
#[allow(clippy::too_many_arguments)]
pub fn new(
name: &'static str,
zeroed: bool,
vmrequest: VMRequest,
global_side_metadata_specs: Vec<SideMetadataSpec>,
vm_map: &'static VMMap,
mmapper: &'static Mmapper,
heap: &mut HeapMeta,
constraints: &'static PlanConstraints,
protect_memory_on_release: bool,
) -> Self {
let common = CommonSpace::new(
SpaceOptions {
name,
movable: false,
immortal: false,
zeroed,
needs_log_bit: constraints.needs_log_bit,
vmrequest,
side_metadata_specs: SideMetadataContext {
global: global_side_metadata_specs,
local: metadata::extract_side_metadata(&[
*VM::VMObjectModel::LOCAL_LOS_MARK_NURSERY_SPEC,
]),
},
},
vm_map,
mmapper,
heap,
);
let mut pr = if vmrequest.is_discontiguous() {
FreeListPageResource::new_discontiguous(vm_map)
} else {
FreeListPageResource::new_contiguous(common.start, common.extent, vm_map)
};
pr.protect_memory_on_release = protect_memory_on_release;
LargeObjectSpace {
pr,
common,
mark_state: 0,
in_nursery_gc: false,
treadmill: TreadMill::new(),
}
}
pub fn prepare(&mut self, full_heap: bool) {
if full_heap {
debug_assert!(self.treadmill.is_from_space_empty());
self.mark_state = MARK_BIT - self.mark_state;
}
self.treadmill.flip(full_heap);
self.in_nursery_gc = !full_heap;
}
pub fn release(&mut self, full_heap: bool) {
self.sweep_large_pages(true);
debug_assert!(self.treadmill.is_nursery_empty());
if full_heap {
self.sweep_large_pages(false);
}
}
#[allow(clippy::collapsible_if)]
pub fn trace_object<Q: ObjectQueue>(
&self,
queue: &mut Q,
object: ObjectReference,
) -> ObjectReference {
#[cfg(feature = "global_alloc_bit")]
debug_assert!(
crate::util::alloc_bit::is_alloced::<VM>(object),
"{:x}: alloc bit not set",
object
);
let nursery_object = self.is_in_nursery(object);
if !self.in_nursery_gc || nursery_object {
if self.test_and_mark(object, self.mark_state) {
self.treadmill.copy(object, nursery_object);
self.clear_nursery(object);
if nursery_object && self.common.needs_log_bit {
VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC
.mark_as_unlogged::<VM>(object, Ordering::SeqCst);
}
queue.enqueue(object);
}
}
object
}
fn sweep_large_pages(&mut self, sweep_nursery: bool) {
let sweep = |object: ObjectReference| {
#[cfg(feature = "global_alloc_bit")]
crate::util::alloc_bit::unset_alloc_bit::<VM>(object);
self.pr
.release_pages(get_super_page(object.to_object_start::<VM>()));
};
if sweep_nursery {
for object in self.treadmill.collect_nursery() {
sweep(object);
}
} else {
for object in self.treadmill.collect() {
sweep(object)
}
}
}
pub fn allocate_pages(&self, tls: VMThread, pages: usize) -> Address {
self.acquire(tls, pages)
}
fn test_and_mark(&self, object: ObjectReference, value: u8) -> bool {
loop {
let mask = if self.in_nursery_gc {
LOS_BIT_MASK
} else {
MARK_BIT
};
let old_value = VM::VMObjectModel::LOCAL_LOS_MARK_NURSERY_SPEC.load_atomic::<VM, u8>(
object,
None,
Ordering::SeqCst,
);
let mark_bit = old_value & mask;
if mark_bit == value {
return false;
}
if VM::VMObjectModel::LOCAL_LOS_MARK_NURSERY_SPEC
.compare_exchange_metadata::<VM, u8>(
object,
old_value,
old_value & !LOS_BIT_MASK | value,
None,
Ordering::SeqCst,
Ordering::SeqCst,
)
.is_ok()
{
break;
}
}
true
}
fn test_mark_bit(&self, object: ObjectReference, value: u8) -> bool {
VM::VMObjectModel::LOCAL_LOS_MARK_NURSERY_SPEC.load_atomic::<VM, u8>(
object,
None,
Ordering::SeqCst,
) & MARK_BIT
== value
}
fn is_in_nursery(&self, object: ObjectReference) -> bool {
VM::VMObjectModel::LOCAL_LOS_MARK_NURSERY_SPEC.load_atomic::<VM, u8>(
object,
None,
Ordering::Relaxed,
) & NURSERY_BIT
== NURSERY_BIT
}
fn clear_nursery(&self, object: ObjectReference) {
loop {
let old_val = VM::VMObjectModel::LOCAL_LOS_MARK_NURSERY_SPEC.load_atomic::<VM, u8>(
object,
None,
Ordering::Relaxed,
);
let new_val = old_val & !NURSERY_BIT;
if VM::VMObjectModel::LOCAL_LOS_MARK_NURSERY_SPEC
.compare_exchange_metadata::<VM, u8>(
object,
old_val,
new_val,
None,
Ordering::SeqCst,
Ordering::SeqCst,
)
.is_ok()
{
break;
}
}
}
}
fn get_super_page(cell: Address) -> Address {
cell.align_down(BYTES_IN_PAGE)
}