mmtk 0.32.0

MMTk is a framework for the design and implementation of high-performance and portable memory managers.
Documentation
use super::MapState;
use crate::util::heap::layout::mmapper::csm::ChunkRange;
use crate::util::heap::layout::mmapper::csm::MapStateStorage;
use crate::util::rust_util::rev_group::RevisitableGroupByForIterator;
use crate::util::Address;

use crate::util::heap::layout::vm_layout::*;
use std::fmt;
use std::sync::atomic::Ordering;
use std::sync::Mutex;

use atomic::Atomic;
use std::io::Result;

/// Logarithm of the address space size that [`ByteMapStateStorage`] is able to handle.
/// This is enough for 32-bit architectures.
/// We may increase it beyond 32 so that it is usable on 64-bit machines in certain VMs with
/// limited address spaces, too.
const LOG_MAPPABLE_BYTES: usize = 32;

/// For now, we only use `ByteMapStateStorage` for 32-bit address range.
const MMAP_NUM_CHUNKS: usize = 1 << (LOG_MAPPABLE_BYTES - LOG_BYTES_IN_CHUNK);

/// A [`MapStateStorage`] implementation based on a simple array.
///
/// Currently it is sized to cover a 32-bit address range.
pub struct ByteMapStateStorage {
    lock: Mutex<()>,
    mapped: [Atomic<MapState>; MMAP_NUM_CHUNKS],
}

impl fmt::Debug for ByteMapStateStorage {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        write!(f, "ByteMapStateStorage({})", MMAP_NUM_CHUNKS)
    }
}

impl MapStateStorage for ByteMapStateStorage {
    fn log_mappable_bytes(&self) -> u8 {
        LOG_MAPPABLE_BYTES as u8
    }

    fn get_state(&self, chunk: Address) -> MapState {
        let index = chunk >> LOG_BYTES_IN_CHUNK;
        let Some(slot) = self.mapped.get(index) else {
            return MapState::Unmapped;
        };
        slot.load(Ordering::Relaxed)
    }

    fn bulk_set_state(&self, range: ChunkRange, state: MapState) {
        if range.is_empty() {
            return;
        }

        let index_start = range.start >> LOG_BYTES_IN_CHUNK;
        let index_limit = range.limit() >> LOG_BYTES_IN_CHUNK;
        for index in index_start..index_limit {
            self.mapped[index].store(state, Ordering::Relaxed);
        }
    }

    fn bulk_transition_state<F>(&self, range: ChunkRange, mut update_fn: F) -> Result<()>
    where
        F: FnMut(ChunkRange, MapState) -> Result<Option<MapState>>,
    {
        if range.is_empty() {
            return Ok(());
        }

        if range.is_single_chunk() {
            let chunk = range.start;
            let index = chunk >> LOG_BYTES_IN_CHUNK;
            let slot: &Atomic<MapState> = &self.mapped[index];
            let state = slot.load(Ordering::Relaxed);
            if let Some(new_state) = update_fn(range, state)? {
                slot.store(new_state, Ordering::Relaxed);
            }
            return Ok(());
        }

        let index_start = range.start >> LOG_BYTES_IN_CHUNK;
        let index_limit = range.limit() >> LOG_BYTES_IN_CHUNK;

        let mut group_start = index_start;
        for group in self.mapped.as_slice()[index_start..index_limit]
            .iter()
            .revisitable_group_by(|s| s.load(Ordering::Relaxed))
        {
            let state = group.key;
            let group_end = group_start + group.len;
            let group_start_addr =
                unsafe { Address::from_usize(group_start << LOG_BYTES_IN_CHUNK) };
            let group_bytes = group.len << LOG_BYTES_IN_CHUNK;
            let group_range = ChunkRange::new_aligned(group_start_addr, group_bytes);
            if let Some(new_state) = update_fn(group_range, state)? {
                for index in group_start..group_end {
                    self.mapped[index].store(new_state, Ordering::Relaxed);
                }
            }
            group_start = group_end;
        }

        Ok(())
    }
}

impl ByteMapStateStorage {
    pub fn new() -> Self {
        ByteMapStateStorage {
            lock: Mutex::new(()),
            mapped: [const { Atomic::new(MapState::Unmapped) }; MMAP_NUM_CHUNKS],
        }
    }
}

impl Default for ByteMapStateStorage {
    fn default() -> Self {
        Self::new()
    }
}