use std::ptr::addr_of;
use std::sync::atomic::AtomicBool;
use atomic::Ordering;
use super::heap_parameters::*;
use crate::util::constants::*;
use crate::util::Address;
use crate::util::conversions::{chunk_align_down, chunk_align_up};
pub const LOG_BYTES_IN_CHUNK: usize = 22;
pub const BYTES_IN_CHUNK: usize = 1 << LOG_BYTES_IN_CHUNK;
pub const CHUNK_MASK: usize = (1 << LOG_BYTES_IN_CHUNK) - 1;
pub const PAGES_IN_CHUNK: usize = 1 << (LOG_BYTES_IN_CHUNK - LOG_BYTES_IN_PAGE as usize);
#[derive(Clone, Debug)]
pub struct VMLayout {
pub log_address_space: usize,
pub heap_start: Address,
pub heap_end: Address,
pub log_space_extent: usize,
pub force_use_contiguous_spaces: bool,
}
impl VMLayout {
#[cfg(target_pointer_width = "32")]
pub const LOG_ARCH_ADDRESS_SPACE: usize = 32;
#[cfg(target_pointer_width = "64")]
pub const LOG_ARCH_ADDRESS_SPACE: usize = 47;
pub const fn max_space_extent(&self) -> usize {
1 << self.log_space_extent
}
pub const fn available_start(&self) -> Address {
self.heap_start
}
pub const fn available_end(&self) -> Address {
self.heap_end
}
pub const fn available_bytes(&self) -> usize {
self.available_end().get_extent(self.available_start())
}
pub const fn max_chunks(&self) -> usize {
1 << self.log_max_chunks()
}
pub const fn log_max_chunks(&self) -> usize {
Self::LOG_ARCH_ADDRESS_SPACE - LOG_BYTES_IN_CHUNK
}
pub(crate) fn space_shift_64(&self) -> usize {
self.log_space_extent
}
pub(crate) fn space_mask_64(&self) -> usize {
((1 << LOG_MAX_SPACES) - 1) << self.space_shift_64()
}
pub(crate) fn space_size_64(&self) -> usize {
self.max_space_extent()
}
pub(crate) fn log_pages_in_space64(&self) -> usize {
self.log_space_extent - LOG_BYTES_IN_PAGE as usize
}
pub(crate) fn pages_in_space64(&self) -> usize {
1 << self.log_pages_in_space64()
}
pub(crate) fn address_mask(&self) -> usize {
0x1f << self.log_space_extent
}
const fn validate(&self) {
assert!(self.heap_start.is_aligned_to(BYTES_IN_CHUNK));
assert!(self.heap_end.is_aligned_to(BYTES_IN_CHUNK));
assert!(self.heap_start.as_usize() < self.heap_end.as_usize());
assert!(self.log_address_space <= Self::LOG_ARCH_ADDRESS_SPACE);
assert!(self.log_space_extent <= self.log_address_space);
if self.force_use_contiguous_spaces {
assert!(self.log_space_extent <= (self.log_address_space - LOG_MAX_SPACES));
assert!(self.heap_start.is_aligned_to(self.max_space_extent()));
}
}
pub(crate) fn validate_address_space(&self) {
let log_mappable_bytes = crate::mmtk::MMAPPER.log_mappable_bytes();
assert!(
self.log_address_space <= log_mappable_bytes as usize,
"log_address_space is {log_address_space}, but \
the MMAPPER can only handle up to {log_mappable_bytes} bits",
log_address_space = self.log_address_space,
);
}
}
impl VMLayout {
pub const fn new_32bit() -> Self {
let layout32 = Self {
log_address_space: 32,
heap_start: chunk_align_down(unsafe { Address::from_usize(0x8000_0000) }),
heap_end: chunk_align_up(unsafe { Address::from_usize(0xd000_0000) }),
log_space_extent: 31,
force_use_contiguous_spaces: false,
};
layout32.validate();
layout32
}
#[cfg(target_pointer_width = "64")]
pub const fn new_64bit() -> Self {
let layout64 = Self {
log_address_space: 47,
heap_start: chunk_align_down(unsafe {
Address::from_usize(0x0000_0200_0000_0000usize)
}),
heap_end: chunk_align_up(unsafe { Address::from_usize(0x0000_2200_0000_0000usize) }),
log_space_extent: 41,
force_use_contiguous_spaces: true,
};
layout64.validate();
layout64
}
pub(crate) fn set_custom_vm_layout(constants: VMLayout) {
if cfg!(debug_assertions) {
assert!(
!VM_LAYOUT_FETCHED.load(Ordering::SeqCst),
"vm_layout is already been used before setup"
);
}
constants.validate();
unsafe {
VM_LAYOUT = constants;
}
}
}
impl std::default::Default for VMLayout {
#[cfg(target_pointer_width = "32")]
fn default() -> Self {
Self::new_32bit()
}
#[cfg(target_pointer_width = "64")]
fn default() -> Self {
Self::new_64bit()
}
}
#[cfg(target_pointer_width = "32")]
static mut VM_LAYOUT: VMLayout = VMLayout::new_32bit();
#[cfg(target_pointer_width = "64")]
static mut VM_LAYOUT: VMLayout = VMLayout::new_64bit();
static VM_LAYOUT_FETCHED: AtomicBool = AtomicBool::new(false);
pub fn vm_layout() -> &'static VMLayout {
if cfg!(debug_assertions) {
VM_LAYOUT_FETCHED.store(true, Ordering::SeqCst);
}
unsafe { &*addr_of!(VM_LAYOUT) }
}