use crate::platform;
use crate::util::page_size;
use core::ptr;
use core::sync::atomic::{AtomicBool, AtomicPtr, AtomicU64, Ordering};
pub const LARGE_ALLOC_SENTINEL: *mut u8 = usize::MAX as *mut u8;
#[derive(Clone, Copy)]
#[repr(C)]
pub struct PageInfo {
pub slab_ptr: *mut u8,
pub class_index: u8,
pub arena_index: u8,
}
impl PageInfo {
#[allow(dead_code)]
pub const fn empty() -> Self {
PageInfo {
slab_ptr: ptr::null_mut(),
class_index: 0,
arena_index: 0,
}
}
#[allow(dead_code)]
#[inline]
pub fn is_empty(&self) -> bool {
self.slab_ptr.is_null()
}
#[inline]
pub fn is_large(&self) -> bool {
self.slab_ptr == LARGE_ALLOC_SENTINEL
}
}
const LARGE_BIT: u64 = 1 << 63;
#[inline]
fn pack_slab(slab_ptr: *mut u8, class_index: u8, arena_index: u8) -> u64 {
let ptr_bits = (slab_ptr as u64) >> 12;
(ptr_bits << 16) | ((class_index as u64) << 8) | (arena_index as u64)
}
#[inline]
fn pack_large() -> u64 {
LARGE_BIT
}
#[inline(always)]
fn unpack(packed: u64) -> Option<PageInfo> {
if packed == 0 {
return None;
}
if packed & LARGE_BIT != 0 {
return Some(PageInfo {
slab_ptr: LARGE_ALLOC_SENTINEL,
class_index: 0,
arena_index: 0,
});
}
let arena_index = (packed & 0xFF) as u8;
let class_index = ((packed >> 8) & 0xFF) as u8;
let ptr_bits = packed >> 16;
let slab_ptr = (ptr_bits << 12) as *mut u8;
Some(PageInfo {
slab_ptr,
class_index,
arena_index,
})
}
const L2_SIZE: usize = 2048;
const L2_BITS: usize = 11;
const L1_BITS: usize = 18;
const L1_SIZE: usize = 1 << L1_BITS;
#[repr(C)]
struct L2Block {
entries: [AtomicU64; L2_SIZE],
}
pub struct PageMap {
l1: *mut AtomicPtr<L2Block>,
initialized: AtomicBool,
}
unsafe impl Send for PageMap {}
unsafe impl Sync for PageMap {}
impl PageMap {
#[allow(clippy::new_without_default)]
pub const fn new() -> Self {
PageMap {
l1: ptr::null_mut(),
initialized: AtomicBool::new(false),
}
}
pub unsafe fn init(&mut self) -> bool {
let l1_bytes = L1_SIZE * core::mem::size_of::<AtomicPtr<L2Block>>();
let l1_bytes_aligned = crate::util::align_up(l1_bytes, page_size());
let mem = platform::map_anonymous(l1_bytes_aligned);
if mem.is_null() {
return false;
}
self.l1 = mem as *mut AtomicPtr<L2Block>;
self.initialized.store(true, Ordering::Release);
true
}
#[inline(always)]
fn indices(ptr: *mut u8) -> (usize, usize) {
let addr = ptr as usize;
let page = addr >> crate::util::page_shift();
let l2_idx = page & (L2_SIZE - 1); let l1_idx = (page >> L2_BITS) & (L1_SIZE - 1); (l1_idx, l2_idx)
}
#[inline]
unsafe fn get_or_alloc_l2(&self, l1_idx: usize) -> *mut L2Block {
let slot = &*self.l1.add(l1_idx);
let l2 = slot.load(Ordering::Acquire);
if !l2.is_null() {
return l2;
}
self.alloc_l2(l1_idx)
}
#[cold]
unsafe fn alloc_l2(&self, l1_idx: usize) -> *mut L2Block {
let l2_bytes = core::mem::size_of::<L2Block>();
let l2_bytes_aligned = crate::util::align_up(l2_bytes, page_size());
let mem = platform::map_anonymous(l2_bytes_aligned);
if mem.is_null() {
return ptr::null_mut();
}
let new_l2 = mem as *mut L2Block;
let slot = &*self.l1.add(l1_idx);
match slot.compare_exchange(ptr::null_mut(), new_l2, Ordering::AcqRel, Ordering::Acquire) {
Ok(_) => new_l2,
Err(existing) => {
platform::unmap(mem, l2_bytes_aligned);
existing
}
}
}
pub unsafe fn register_slab(
&self,
data_start: *mut u8,
data_size: usize,
slab_ptr: *mut u8,
class_index: usize,
arena_index: usize,
) {
if !self.initialized.load(Ordering::Relaxed) {
return;
}
let packed = pack_slab(slab_ptr, class_index as u8, arena_index as u8);
let num_pages = data_size.div_ceil(page_size());
for i in 0..num_pages {
let page_addr = data_start.add(i * page_size());
let (l1_idx, l2_idx) = Self::indices(page_addr);
let l2 = self.get_or_alloc_l2(l1_idx);
if !l2.is_null() {
(*l2).entries[l2_idx].store(packed, Ordering::Release);
}
}
}
pub unsafe fn register_large(&self, user_ptr: *mut u8, data_size: usize) {
if !self.initialized.load(Ordering::Relaxed) {
return;
}
let packed = pack_large();
let num_pages = data_size.div_ceil(page_size());
for i in 0..num_pages {
let page_addr = user_ptr.add(i * page_size());
let (l1_idx, l2_idx) = Self::indices(page_addr);
let l2 = self.get_or_alloc_l2(l1_idx);
if !l2.is_null() {
(*l2).entries[l2_idx].store(packed, Ordering::Release);
}
}
}
pub unsafe fn unregister_large(&self, user_ptr: *mut u8, data_size: usize) {
if !self.initialized.load(Ordering::Relaxed) {
return;
}
let num_pages = data_size.div_ceil(page_size());
for i in 0..num_pages {
let page_addr = user_ptr.add(i * page_size());
let (l1_idx, l2_idx) = Self::indices(page_addr);
let slot = &*self.l1.add(l1_idx);
let l2 = slot.load(Ordering::Acquire);
if !l2.is_null() {
(*l2).entries[l2_idx].store(0, Ordering::Release);
}
}
}
#[inline(always)]
pub unsafe fn lookup(&self, ptr: *mut u8) -> Option<PageInfo> {
let l1 = self.l1;
if l1.is_null() {
return None;
}
let (l1_idx, l2_idx) = Self::indices(ptr);
let l2 = (*l1.add(l1_idx)).load(Ordering::Acquire);
if l2.is_null() {
return None;
}
let packed = (*l2).entries[l2_idx].load(Ordering::Acquire);
unpack(packed)
}
}
struct PageMapHolder(core::cell::UnsafeCell<PageMap>);
unsafe impl Sync for PageMapHolder {}
static PAGE_MAP: PageMapHolder = PageMapHolder(core::cell::UnsafeCell::new(PageMap::new()));
pub unsafe fn init() -> bool {
(*PAGE_MAP.0.get()).init()
}
pub unsafe fn register_slab(
data_start: *mut u8,
data_size: usize,
slab_ptr: *mut u8,
class_index: usize,
arena_index: usize,
) {
(*PAGE_MAP.0.get()).register_slab(data_start, data_size, slab_ptr, class_index, arena_index);
}
pub unsafe fn register_large(user_ptr: *mut u8, data_size: usize) {
(*PAGE_MAP.0.get()).register_large(user_ptr, data_size);
}
pub unsafe fn unregister_large(user_ptr: *mut u8, data_size: usize) {
(*PAGE_MAP.0.get()).unregister_large(user_ptr, data_size);
}
#[inline(always)]
pub unsafe fn lookup(ptr: *mut u8) -> Option<PageInfo> {
(*PAGE_MAP.0.get()).lookup(ptr)
}