use atomic_traits::Atomic;
use bytemuck::NoUninit;
use std::fmt;
use std::mem;
use std::num::NonZeroUsize;
use std::ops::*;
use std::sync::atomic::Ordering;
use crate::mmtk::{MMAPPER, SFT_MAP};
pub type ByteSize = usize;
pub type ByteOffset = isize;
#[repr(transparent)]
#[derive(Copy, Clone, Eq, Hash, PartialOrd, Ord, PartialEq, NoUninit)]
pub struct Address(usize);
impl Add<ByteSize> for Address {
type Output = Address;
fn add(self, offset: ByteSize) -> Address {
Address(self.0 + offset)
}
}
impl AddAssign<ByteSize> for Address {
fn add_assign(&mut self, offset: ByteSize) {
self.0 += offset;
}
}
impl Add<ByteOffset> for Address {
type Output = Address;
fn add(self, offset: ByteOffset) -> Address {
Address((self.0 as isize + offset) as usize)
}
}
impl AddAssign<ByteOffset> for Address {
fn add_assign(&mut self, offset: ByteOffset) {
self.0 = (self.0 as isize + offset) as usize
}
}
impl Sub<ByteSize> for Address {
type Output = Address;
fn sub(self, offset: ByteSize) -> Address {
Address(self.0 - offset)
}
}
impl SubAssign<ByteSize> for Address {
fn sub_assign(&mut self, offset: ByteSize) {
self.0 -= offset;
}
}
impl Sub<Address> for Address {
type Output = ByteSize;
fn sub(self, other: Address) -> ByteSize {
debug_assert!(
self.0 >= other.0,
"for (addr_a - addr_b), a({}) needs to be larger than b({})",
self,
other
);
self.0 - other.0
}
}
impl BitAnd<usize> for Address {
type Output = usize;
fn bitand(self, other: usize) -> usize {
self.0 & other
}
}
impl BitAnd<u8> for Address {
type Output = u8;
fn bitand(self, other: u8) -> u8 {
(self.0 as u8) & other
}
}
impl BitOr<usize> for Address {
type Output = usize;
fn bitor(self, other: usize) -> usize {
self.0 | other
}
}
impl BitOr<u8> for Address {
type Output = usize;
fn bitor(self, other: u8) -> usize {
self.0 | (other as usize)
}
}
impl Shr<usize> for Address {
type Output = usize;
fn shr(self, shift: usize) -> usize {
self.0 >> shift
}
}
impl Shl<usize> for Address {
type Output = usize;
fn shl(self, shift: usize) -> usize {
self.0 << shift
}
}
impl Address {
pub const ZERO: Self = Address(0);
pub const MAX: Self = Address(usize::MAX);
pub fn from_ptr<T>(ptr: *const T) -> Address {
Address(ptr as usize)
}
pub fn from_ref<T>(r: &T) -> Address {
Address(r as *const T as usize)
}
pub fn from_mut_ptr<T>(ptr: *mut T) -> Address {
Address(ptr as usize)
}
pub const unsafe fn zero() -> Address {
Address(0)
}
pub unsafe fn max() -> Address {
Address(usize::MAX)
}
pub const unsafe fn from_usize(raw: usize) -> Address {
Address(raw)
}
pub fn shift<T>(self, offset: isize) -> Self {
self + mem::size_of::<T>() as isize * offset
}
pub const fn get_extent(self, other: Address) -> ByteSize {
self.0 - other.0
}
pub const fn get_offset(self, other: Address) -> ByteOffset {
self.0 as isize - other.0 as isize
}
#[allow(clippy::should_implement_trait)]
pub const fn add(self, size: usize) -> Address {
Address(self.0 + size)
}
#[allow(clippy::should_implement_trait)]
pub const fn sub(self, size: usize) -> Address {
Address(self.0 - size)
}
pub const fn offset(self, offset: isize) -> Address {
Address(self.0.wrapping_add_signed(offset))
}
pub const fn and(self, mask: usize) -> usize {
self.0 & mask
}
pub const fn saturating_sub(self, size: usize) -> Address {
Address(self.0.saturating_sub(size))
}
pub unsafe fn load<T: Copy>(self) -> T {
*(self.0 as *mut T)
}
pub unsafe fn store<T>(self, value: T) {
(self.0 as *mut T).write(value);
}
pub unsafe fn atomic_load<T: Atomic>(self, order: Ordering) -> T::Type {
let loc = &*(self.0 as *const T);
loc.load(order)
}
pub unsafe fn atomic_store<T: Atomic>(self, val: T::Type, order: Ordering) {
let loc = &*(self.0 as *const T);
loc.store(val, order)
}
pub unsafe fn compare_exchange<T: Atomic>(
self,
old: T::Type,
new: T::Type,
success: Ordering,
failure: Ordering,
) -> Result<T::Type, T::Type> {
let loc = &*(self.0 as *const T);
loc.compare_exchange(old, new, success, failure)
}
pub fn is_zero(self) -> bool {
self.0 == 0
}
pub const fn align_up(self, align: ByteSize) -> Address {
use crate::util::conversions;
Address(conversions::raw_align_up(self.0, align))
}
pub const fn align_down(self, align: ByteSize) -> Address {
use crate::util::conversions;
Address(conversions::raw_align_down(self.0, align))
}
pub const fn is_aligned_to(self, align: usize) -> bool {
use crate::util::conversions;
conversions::raw_is_aligned(self.0, align)
}
pub fn to_ptr<T>(self) -> *const T {
self.0 as *const T
}
pub fn to_mut_ptr<T>(self) -> *mut T {
self.0 as *mut T
}
pub unsafe fn as_ref<'a, T>(self) -> &'a T {
&*self.to_mut_ptr()
}
pub unsafe fn as_mut_ref<'a, T>(self) -> &'a mut T {
&mut *self.to_mut_ptr()
}
pub const fn as_usize(self) -> usize {
self.0
}
pub fn chunk_index(self) -> usize {
use crate::util::conversions;
conversions::address_to_chunk_index(self)
}
pub fn is_mapped(self) -> bool {
if self.0 == 0 {
false
} else {
MMAPPER.is_mapped_address(self)
}
}
pub fn range_intersection(r1: &Range<Address>, r2: &Range<Address>) -> Range<Address> {
r1.start.max(r2.start)..r1.end.min(r2.end)
}
}
impl fmt::UpperHex for Address {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:X}", self.0)
}
}
impl fmt::LowerHex for Address {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:x}", self.0)
}
}
impl fmt::Display for Address {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:#x}", self.0)
}
}
impl fmt::Debug for Address {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:#x}", self.0)
}
}
impl std::str::FromStr for Address {
type Err = std::num::ParseIntError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let raw: usize = s.parse()?;
Ok(Address(raw))
}
}
#[cfg(test)]
mod tests {
use crate::util::Address;
#[test]
fn align_up() {
unsafe {
assert_eq!(
Address::from_usize(0x10).align_up(0x10),
Address::from_usize(0x10)
);
assert_eq!(
Address::from_usize(0x11).align_up(0x10),
Address::from_usize(0x20)
);
assert_eq!(
Address::from_usize(0x20).align_up(0x10),
Address::from_usize(0x20)
);
}
}
#[test]
fn align_down() {
unsafe {
assert_eq!(
Address::from_usize(0x10).align_down(0x10),
Address::from_usize(0x10)
);
assert_eq!(
Address::from_usize(0x11).align_down(0x10),
Address::from_usize(0x10)
);
assert_eq!(
Address::from_usize(0x20).align_down(0x10),
Address::from_usize(0x20)
);
}
}
#[test]
fn is_aligned_to() {
unsafe {
assert!(Address::from_usize(0x10).is_aligned_to(0x10));
assert!(!Address::from_usize(0x11).is_aligned_to(0x10));
assert!(Address::from_usize(0x10).is_aligned_to(0x8));
assert!(!Address::from_usize(0x10).is_aligned_to(0x20));
}
}
#[test]
fn bit_and() {
unsafe {
assert_eq!(
Address::from_usize(0b1111_1111_1100usize) & 0b1010u8,
0b1000u8
);
assert_eq!(
Address::from_usize(0b1111_1111_1100usize) & 0b1000_0000_1010usize,
0b1000_0000_1000usize
);
}
}
#[test]
fn bit_or() {
unsafe {
assert_eq!(
Address::from_usize(0b1111_1111_1100usize) | 0b1010u8,
0b1111_1111_1110usize
);
assert_eq!(
Address::from_usize(0b1111_1111_1100usize) | 0b1000_0000_1010usize,
0b1111_1111_1110usize
);
}
}
}
use crate::vm::VMBinding;
#[repr(transparent)]
#[derive(Copy, Clone, Eq, Hash, PartialOrd, Ord, PartialEq, NoUninit)]
pub struct ObjectReference(NonZeroUsize);
impl ObjectReference {
pub const ALIGNMENT: usize = crate::util::constants::BYTES_IN_ADDRESS;
pub fn to_raw_address(self) -> Address {
Address(self.0.get())
}
pub fn from_raw_address(addr: Address) -> Option<ObjectReference> {
debug_assert!(
addr.is_aligned_to(Self::ALIGNMENT),
"ObjectReference is required to be word aligned. addr: {addr}"
);
NonZeroUsize::new(addr.0).map(ObjectReference)
}
pub unsafe fn from_raw_address_unchecked(addr: Address) -> ObjectReference {
debug_assert!(!addr.is_zero());
debug_assert!(
addr.is_aligned_to(Self::ALIGNMENT),
"ObjectReference is required to be word aligned. addr: {addr}"
);
ObjectReference(NonZeroUsize::new_unchecked(addr.0))
}
pub fn to_header<VM: VMBinding>(self) -> Address {
use crate::vm::ObjectModel;
VM::VMObjectModel::ref_to_header(self)
}
pub fn to_object_start<VM: VMBinding>(self) -> Address {
use crate::vm::ObjectModel;
let object_start = VM::VMObjectModel::ref_to_object_start(self);
debug_assert!(!VM::VMObjectModel::UNIFIED_OBJECT_REFERENCE_ADDRESS || object_start == self.to_raw_address(), "The binding claims unified object reference address, but for object reference {}, ref_to_object_start() returns {}", self, object_start);
debug_assert!(
self.to_raw_address()
>= object_start + VM::VMObjectModel::OBJECT_REF_OFFSET_LOWER_BOUND,
"The invariant `object_ref >= object_start + OBJECT_REF_OFFSET_LOWER_BOUND` is violated. \
object_ref: {}, object_start: {}, OBJECT_REF_OFFSET_LOWER_BOUND: {}",
self.to_raw_address(),
object_start,
VM::VMObjectModel::OBJECT_REF_OFFSET_LOWER_BOUND,
);
object_start
}
pub fn is_reachable(self) -> bool {
unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.is_reachable(self)
}
pub fn is_live(self) -> bool {
unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.is_live(self)
}
pub fn is_movable(self) -> bool {
unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.is_movable()
}
pub fn get_forwarded_object(self) -> Option<Self> {
unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.get_forwarded_object(self)
}
pub fn is_in_any_space(self) -> bool {
unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.is_in_space(self)
}
#[cfg(feature = "sanity")]
pub fn is_sane(self) -> bool {
unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.is_sane()
}
}
impl fmt::UpperHex for ObjectReference {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:X}", self.0)
}
}
impl fmt::LowerHex for ObjectReference {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:x}", self.0)
}
}
impl fmt::Display for ObjectReference {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:#x}", self.0)
}
}
impl fmt::Debug for ObjectReference {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:#x}", self.0)
}
}