use crate::scheduler::GCWork;
use crate::util::linear_scan::Region;
use crate::util::linear_scan::RegionIterator;
use crate::util::metadata::side_metadata::SideMetadataSpec;
use crate::util::Address;
use crate::vm::VMBinding;
use spin::Mutex;
use std::ops::Range;
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialOrd, PartialEq, Eq)]
pub struct Chunk(Address);
impl Region for Chunk {
const LOG_BYTES: usize = crate::util::heap::layout::vm_layout::LOG_BYTES_IN_CHUNK;
fn from_aligned_address(address: Address) -> Self {
debug_assert!(address.is_aligned_to(Self::BYTES));
Self(address)
}
fn start(&self) -> Address {
self.0
}
}
impl Chunk {
pub const ZERO: Self = Self(Address::ZERO);
pub fn iter_region<R: Region>(&self) -> RegionIterator<R> {
debug_assert!(R::LOG_BYTES < Self::LOG_BYTES);
debug_assert!(R::is_aligned(self.start()));
debug_assert!(R::is_aligned(self.end()));
let start = R::from_aligned_address(self.start());
let end = R::from_aligned_address(self.end());
RegionIterator::<R>::new(start, end)
}
}
#[repr(transparent)]
#[derive(PartialEq, Clone, Copy)]
pub struct ChunkState(u8);
impl ChunkState {
const ALLOC_BIT_MASK: u8 = 0x80;
const SPACE_INDEX_MASK: u8 = 0x0F;
pub fn allocated(space_index: usize) -> ChunkState {
debug_assert!(space_index < crate::util::heap::layout::heap_parameters::MAX_SPACES);
let mut encode = space_index as u8;
encode |= Self::ALLOC_BIT_MASK;
ChunkState(encode)
}
pub fn free() -> ChunkState {
ChunkState(0u8)
}
pub fn is_free(&self) -> bool {
self.0 == 0
}
pub fn is_allocated(&self) -> bool {
!self.is_free()
}
pub fn get_space_index(&self) -> usize {
debug_assert!(self.is_allocated());
let index = (self.0 & Self::SPACE_INDEX_MASK) as usize;
debug_assert!(index < crate::util::heap::layout::heap_parameters::MAX_SPACES);
index
}
}
impl std::fmt::Debug for ChunkState {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if self.is_free() {
write!(f, "Free")
} else {
write!(f, "Allocated({})", self.get_space_index())
}
}
}
pub struct ChunkMap {
space_index: usize,
chunk_range: Mutex<Range<Chunk>>,
}
impl ChunkMap {
pub const ALLOC_TABLE: SideMetadataSpec =
crate::util::metadata::side_metadata::spec_defs::CHUNK_MARK;
pub fn new(space_index: usize) -> Self {
Self {
space_index,
chunk_range: Mutex::new(Chunk::ZERO..Chunk::ZERO),
}
}
pub fn set_allocated(&self, chunk: Chunk, allocated: bool) {
let state = if allocated {
ChunkState::allocated(self.space_index)
} else {
ChunkState::free()
};
if self.get_internal(chunk) == state {
return;
}
#[cfg(debug_assertions)]
{
let old_state = self.get_internal(chunk);
assert!(
old_state.is_free() || old_state.get_space_index() == self.space_index,
"Chunk {:?}: old state {:?}, new state {:?}. Cannot set to new state.",
chunk,
old_state,
state
);
}
unsafe { Self::ALLOC_TABLE.store::<u8>(chunk.start(), state.0) };
if allocated {
debug_assert!(!chunk.start().is_zero());
let mut range = self.chunk_range.lock();
if range.start == Chunk::ZERO {
range.start = chunk;
range.end = chunk.next();
} else if chunk < range.start {
range.start = chunk;
} else if range.end <= chunk {
range.end = chunk.next();
}
}
}
pub fn get(&self, chunk: Chunk) -> Option<ChunkState> {
let state = self.get_internal(chunk);
(state.is_allocated() && state.get_space_index() == self.space_index).then_some(state)
}
fn get_internal(&self, chunk: Chunk) -> ChunkState {
let byte = unsafe { Self::ALLOC_TABLE.load::<u8>(chunk.start()) };
ChunkState(byte)
}
pub fn all_chunks(&self) -> impl Iterator<Item = Chunk> + '_ {
let chunk_range = self.chunk_range.lock();
RegionIterator::<Chunk>::new(chunk_range.start, chunk_range.end)
.filter(|c| self.get(*c).is_some())
}
pub fn generate_tasks<VM: VMBinding>(
&self,
func: impl Fn(Chunk) -> Box<dyn GCWork<VM>>,
) -> Vec<Box<dyn GCWork<VM>>> {
let mut work_packets: Vec<Box<dyn GCWork<VM>>> = vec![];
for chunk in self.all_chunks() {
work_packets.push(func(chunk));
}
work_packets
}
}