use crate::plan::barriers::Barrier;
use crate::plan::global::Plan;
use crate::plan::AllocationSemantics;
use crate::policy::space::Space;
use crate::util::alloc::allocator::AllocationOptions;
use crate::util::alloc::allocators::{AllocatorSelector, Allocators};
use crate::util::alloc::Allocator;
use crate::util::{Address, ObjectReference};
use crate::util::{VMMutatorThread, VMWorkerThread};
use crate::vm::VMBinding;
use crate::MMTK;
use enum_map::EnumMap;
use super::barriers::NoBarrier;
pub(crate) type SpaceMapping<VM> = Vec<(AllocatorSelector, &'static dyn Space<VM>)>;
pub(crate) fn unreachable_prepare_func<VM: VMBinding>(
_mutator: &mut Mutator<VM>,
_tls: VMWorkerThread,
) {
unreachable!("`MutatorConfig::prepare_func` must not be called for the current plan.")
}
#[allow(unused_variables)]
pub(crate) fn common_prepare_func<VM: VMBinding>(mutator: &mut Mutator<VM>, _tls: VMWorkerThread) {
#[cfg(feature = "marksweep_as_nonmoving")]
unsafe {
mutator.allocator_impl_mut_for_semantic::<crate::util::alloc::FreeListAllocator<VM>>(
AllocationSemantics::NonMoving,
)
}
.prepare();
}
pub(crate) fn unreachable_release_func<VM: VMBinding>(
_mutator: &mut Mutator<VM>,
_tls: VMWorkerThread,
) {
unreachable!("`MutatorConfig::release_func` must not be called for the current plan.")
}
#[allow(unused_variables)]
pub(crate) fn common_release_func<VM: VMBinding>(mutator: &mut Mutator<VM>, _tls: VMWorkerThread) {
cfg_if::cfg_if! {
if #[cfg(feature = "marksweep_as_nonmoving")] {
unsafe { mutator.allocator_impl_mut_for_semantic::<crate::util::alloc::FreeListAllocator<VM>>(
AllocationSemantics::NonMoving,
)}.release();
} else if #[cfg(feature = "immortal_as_nonmoving")] {
} else {
unsafe { mutator.allocator_impl_mut_for_semantic::<crate::util::alloc::ImmixAllocator<VM>>(
AllocationSemantics::NonMoving,
)}.reset();
}
}
}
#[allow(dead_code)]
pub(crate) fn no_op_release_func<VM: VMBinding>(_mutator: &mut Mutator<VM>, _tls: VMWorkerThread) {}
#[repr(C)]
pub struct MutatorConfig<VM: VMBinding> {
pub allocator_mapping: &'static EnumMap<AllocationSemantics, AllocatorSelector>,
#[allow(clippy::box_collection)]
pub space_mapping: Box<SpaceMapping<VM>>,
pub prepare_func: &'static (dyn Fn(&mut Mutator<VM>, VMWorkerThread) + Send + Sync),
pub release_func: &'static (dyn Fn(&mut Mutator<VM>, VMWorkerThread) + Send + Sync),
}
impl<VM: VMBinding> std::fmt::Debug for MutatorConfig<VM> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("MutatorConfig:\n")?;
f.write_str("Semantics mapping:\n")?;
for (semantic, selector) in self.allocator_mapping.iter() {
let space_name: &str = match self
.space_mapping
.iter()
.find(|(selector_to_find, _)| selector_to_find == selector)
{
Some((_, space)) => space.name(),
None => "!!!missing space here!!!",
};
f.write_fmt(format_args!(
"- {:?} = {:?} ({:?})\n",
semantic, selector, space_name
))?;
}
f.write_str("Space mapping:\n")?;
for (selector, space) in self.space_mapping.iter() {
f.write_fmt(format_args!("- {:?} = {:?}\n", selector, space.name()))?;
}
Ok(())
}
}
pub struct MutatorBuilder<VM: VMBinding> {
barrier: Box<dyn Barrier<VM>>,
mutator_tls: VMMutatorThread,
mmtk: &'static MMTK<VM>,
config: MutatorConfig<VM>,
}
impl<VM: VMBinding> MutatorBuilder<VM> {
pub fn new(
mutator_tls: VMMutatorThread,
mmtk: &'static MMTK<VM>,
config: MutatorConfig<VM>,
) -> Self {
MutatorBuilder {
barrier: Box::new(NoBarrier),
mutator_tls,
mmtk,
config,
}
}
pub fn barrier(mut self, barrier: Box<dyn Barrier<VM>>) -> Self {
self.barrier = barrier;
self
}
pub fn build(self) -> Mutator<VM> {
Mutator {
allocators: Allocators::<VM>::new(
self.mutator_tls,
self.mmtk,
&self.config.space_mapping,
),
barrier: self.barrier,
mutator_tls: self.mutator_tls,
plan: self.mmtk.get_plan(),
config: self.config,
}
}
}
#[repr(C)]
pub struct Mutator<VM: VMBinding> {
pub(crate) allocators: Allocators<VM>,
pub barrier: Box<dyn Barrier<VM>>,
pub mutator_tls: VMMutatorThread,
pub(crate) plan: &'static dyn Plan<VM = VM>,
pub(crate) config: MutatorConfig<VM>,
}
impl<VM: VMBinding> MutatorContext<VM> for Mutator<VM> {
fn prepare(&mut self, tls: VMWorkerThread) {
(*self.config.prepare_func)(self, tls)
}
fn release(&mut self, tls: VMWorkerThread) {
(*self.config.release_func)(self, tls)
}
fn alloc(
&mut self,
size: usize,
align: usize,
offset: usize,
allocator: AllocationSemantics,
) -> Address {
let allocator = unsafe {
self.allocators
.get_allocator_mut(self.config.allocator_mapping[allocator])
};
debug_assert!(allocator.get_context().get_alloc_options().is_default());
allocator.alloc(size, align, offset)
}
fn alloc_with_options(
&mut self,
size: usize,
align: usize,
offset: usize,
allocator: AllocationSemantics,
options: AllocationOptions,
) -> Address {
let allocator = unsafe {
self.allocators
.get_allocator_mut(self.config.allocator_mapping[allocator])
};
debug_assert!(allocator.get_context().get_alloc_options().is_default());
allocator.alloc_with_options(size, align, offset, options)
}
fn alloc_slow(
&mut self,
size: usize,
align: usize,
offset: usize,
allocator: AllocationSemantics,
) -> Address {
let allocator = unsafe {
self.allocators
.get_allocator_mut(self.config.allocator_mapping[allocator])
};
debug_assert!(allocator.get_context().get_alloc_options().is_default());
allocator.alloc_slow(size, align, offset)
}
fn alloc_slow_with_options(
&mut self,
size: usize,
align: usize,
offset: usize,
allocator: AllocationSemantics,
options: AllocationOptions,
) -> Address {
let allocator = unsafe {
self.allocators
.get_allocator_mut(self.config.allocator_mapping[allocator])
};
debug_assert!(allocator.get_context().get_alloc_options().is_default());
allocator.alloc_slow_with_options(size, align, offset, options)
}
fn post_alloc(
&mut self,
refer: ObjectReference,
_bytes: usize,
allocator: AllocationSemantics,
) {
unsafe {
self.allocators
.get_allocator_mut(self.config.allocator_mapping[allocator])
}
.get_space()
.initialize_object_metadata(refer)
}
fn get_tls(&self) -> VMMutatorThread {
self.mutator_tls
}
fn barrier(&mut self) -> &mut dyn Barrier<VM> {
&mut *self.barrier
}
}
impl<VM: VMBinding> Mutator<VM> {
fn get_all_allocator_selectors(&self) -> Vec<AllocatorSelector> {
use itertools::Itertools;
self.config
.allocator_mapping
.iter()
.map(|(_, selector)| *selector)
.sorted()
.dedup()
.filter(|selector| *selector != AllocatorSelector::None)
.collect()
}
pub fn on_destroy(&mut self) {
for selector in self.get_all_allocator_selectors() {
unsafe { self.allocators.get_allocator_mut(selector) }.on_mutator_destroy();
}
}
pub unsafe fn allocator(&self, selector: AllocatorSelector) -> &dyn Allocator<VM> {
self.allocators.get_allocator(selector)
}
pub unsafe fn allocator_mut(&mut self, selector: AllocatorSelector) -> &mut dyn Allocator<VM> {
self.allocators.get_allocator_mut(selector)
}
pub unsafe fn allocator_impl<T: Allocator<VM>>(&self, selector: AllocatorSelector) -> &T {
self.allocators.get_typed_allocator(selector)
}
pub unsafe fn allocator_impl_mut<T: Allocator<VM>>(
&mut self,
selector: AllocatorSelector,
) -> &mut T {
self.allocators.get_typed_allocator_mut(selector)
}
pub unsafe fn allocator_impl_for_semantic<T: Allocator<VM>>(
&self,
semantic: AllocationSemantics,
) -> &T {
self.allocator_impl::<T>(self.config.allocator_mapping[semantic])
}
pub unsafe fn allocator_impl_mut_for_semantic<T: Allocator<VM>>(
&mut self,
semantic: AllocationSemantics,
) -> &mut T {
self.allocator_impl_mut::<T>(self.config.allocator_mapping[semantic])
}
pub fn get_allocator_base_offset(selector: AllocatorSelector) -> usize {
use crate::util::alloc::*;
use memoffset::offset_of;
use std::mem::size_of;
offset_of!(Mutator<VM>, allocators)
+ match selector {
AllocatorSelector::BumpPointer(index) => {
offset_of!(Allocators<VM>, bump_pointer)
+ size_of::<BumpAllocator<VM>>() * index as usize
}
AllocatorSelector::FreeList(index) => {
offset_of!(Allocators<VM>, free_list)
+ size_of::<FreeListAllocator<VM>>() * index as usize
}
AllocatorSelector::Immix(index) => {
offset_of!(Allocators<VM>, immix)
+ size_of::<ImmixAllocator<VM>>() * index as usize
}
AllocatorSelector::LargeObject(index) => {
offset_of!(Allocators<VM>, large_object)
+ size_of::<LargeObjectAllocator<VM>>() * index as usize
}
AllocatorSelector::Malloc(index) => {
offset_of!(Allocators<VM>, malloc)
+ size_of::<MallocAllocator<VM>>() * index as usize
}
AllocatorSelector::MarkCompact(index) => {
offset_of!(Allocators<VM>, markcompact)
+ size_of::<MarkCompactAllocator<VM>>() * index as usize
}
AllocatorSelector::None => panic!("Expect a valid AllocatorSelector, found None"),
}
}
}
pub trait MutatorContext<VM: VMBinding>: Send + 'static {
fn prepare(&mut self, tls: VMWorkerThread);
fn release(&mut self, tls: VMWorkerThread);
fn alloc(
&mut self,
size: usize,
align: usize,
offset: usize,
allocator: AllocationSemantics,
) -> Address;
fn alloc_with_options(
&mut self,
size: usize,
align: usize,
offset: usize,
allocator: AllocationSemantics,
options: AllocationOptions,
) -> Address;
fn alloc_slow(
&mut self,
size: usize,
align: usize,
offset: usize,
allocator: AllocationSemantics,
) -> Address;
fn alloc_slow_with_options(
&mut self,
size: usize,
align: usize,
offset: usize,
allocator: AllocationSemantics,
options: AllocationOptions,
) -> Address;
fn post_alloc(&mut self, refer: ObjectReference, bytes: usize, allocator: AllocationSemantics);
fn flush_remembered_sets(&mut self) {
self.barrier().flush();
}
fn flush(&mut self) {
self.flush_remembered_sets();
}
fn get_tls(&self) -> VMMutatorThread;
fn barrier(&mut self) -> &mut dyn Barrier<VM>;
}
#[allow(dead_code)]
#[derive(Default)]
pub(crate) struct ReservedAllocators {
pub n_bump_pointer: u8,
pub n_large_object: u8,
pub n_malloc: u8,
pub n_immix: u8,
pub n_mark_compact: u8,
pub n_free_list: u8,
}
impl ReservedAllocators {
pub const DEFAULT: Self = ReservedAllocators {
n_bump_pointer: 0,
n_large_object: 0,
n_malloc: 0,
n_immix: 0,
n_mark_compact: 0,
n_free_list: 0,
};
fn validate(&self) {
use crate::util::alloc::allocators::*;
assert!(
self.n_bump_pointer as usize <= MAX_BUMP_ALLOCATORS,
"Allocator mapping declared more bump pointer allocators than the max allowed."
);
assert!(
self.n_large_object as usize <= MAX_LARGE_OBJECT_ALLOCATORS,
"Allocator mapping declared more large object allocators than the max allowed."
);
assert!(
self.n_malloc as usize <= MAX_MALLOC_ALLOCATORS,
"Allocator mapping declared more malloc allocators than the max allowed."
);
assert!(
self.n_immix as usize <= MAX_IMMIX_ALLOCATORS,
"Allocator mapping declared more immix allocators than the max allowed."
);
assert!(
self.n_mark_compact as usize <= MAX_MARK_COMPACT_ALLOCATORS,
"Allocator mapping declared more mark compact allocators than the max allowed."
);
assert!(
self.n_free_list as usize <= MAX_FREE_LIST_ALLOCATORS,
"Allocator mapping declared more free list allocators than the max allowed."
);
}
fn add_bump_pointer_allocator(&mut self) -> AllocatorSelector {
let selector = AllocatorSelector::BumpPointer(self.n_bump_pointer);
self.n_bump_pointer += 1;
selector
}
fn add_large_object_allocator(&mut self) -> AllocatorSelector {
let selector = AllocatorSelector::LargeObject(self.n_large_object);
self.n_large_object += 1;
selector
}
#[allow(dead_code)]
fn add_malloc_allocator(&mut self) -> AllocatorSelector {
let selector = AllocatorSelector::Malloc(self.n_malloc);
self.n_malloc += 1;
selector
}
#[allow(dead_code)]
fn add_immix_allocator(&mut self) -> AllocatorSelector {
let selector = AllocatorSelector::Immix(self.n_immix);
self.n_immix += 1;
selector
}
#[allow(dead_code)]
fn add_mark_compact_allocator(&mut self) -> AllocatorSelector {
let selector = AllocatorSelector::MarkCompact(self.n_mark_compact);
self.n_mark_compact += 1;
selector
}
#[allow(dead_code)]
fn add_free_list_allocator(&mut self) -> AllocatorSelector {
let selector = AllocatorSelector::FreeList(self.n_free_list);
self.n_free_list += 1;
selector
}
}
pub(crate) fn create_allocator_mapping(
mut reserved: ReservedAllocators,
include_common_plan: bool,
) -> EnumMap<AllocationSemantics, AllocatorSelector> {
let mut map = EnumMap::<AllocationSemantics, AllocatorSelector>::default();
#[cfg(feature = "code_space")]
{
map[AllocationSemantics::Code] = reserved.add_bump_pointer_allocator();
map[AllocationSemantics::LargeCode] = reserved.add_bump_pointer_allocator();
}
#[cfg(feature = "ro_space")]
{
map[AllocationSemantics::ReadOnly] = reserved.add_bump_pointer_allocator();
}
if include_common_plan {
map[AllocationSemantics::Immortal] = reserved.add_bump_pointer_allocator();
map[AllocationSemantics::Los] = reserved.add_large_object_allocator();
map[AllocationSemantics::NonMoving] = if cfg!(feature = "marksweep_as_nonmoving") {
reserved.add_free_list_allocator()
} else if cfg!(feature = "immortal_as_nonmoving") {
reserved.add_bump_pointer_allocator()
} else {
reserved.add_immix_allocator()
};
}
reserved.validate();
map
}
pub(crate) fn create_space_mapping<VM: VMBinding>(
mut reserved: ReservedAllocators,
include_common_plan: bool,
plan: &'static dyn Plan<VM = VM>,
) -> Vec<(AllocatorSelector, &'static dyn Space<VM>)> {
let mut vec: Vec<(AllocatorSelector, &'static dyn Space<VM>)> = vec![];
#[cfg(feature = "code_space")]
{
vec.push((
reserved.add_bump_pointer_allocator(),
&plan.base().code_space,
));
vec.push((
reserved.add_bump_pointer_allocator(),
&plan.base().code_lo_space,
));
}
#[cfg(feature = "ro_space")]
vec.push((reserved.add_bump_pointer_allocator(), &plan.base().ro_space));
if include_common_plan {
vec.push((
reserved.add_bump_pointer_allocator(),
plan.common().get_immortal(),
));
vec.push((
reserved.add_large_object_allocator(),
plan.common().get_los(),
));
vec.push((
if cfg!(feature = "marksweep_as_nonmoving") {
reserved.add_free_list_allocator()
} else if cfg!(feature = "immortal_as_nonmoving") {
reserved.add_bump_pointer_allocator()
} else {
reserved.add_immix_allocator()
},
plan.common().get_nonmoving(),
));
}
reserved.validate();
vec
}