use core::{
alloc::{GlobalAlloc, Layout},
arch::wasm32,
num::NonZeroUsize as NonZero,
};
pub struct MiniAlloc;
unsafe impl Sync for MiniAlloc {}
impl MiniAlloc {
pub const INIT: Self = MiniAlloc;
pub const PAGE_SIZE: usize = 1 << 16;
}
unsafe impl GlobalAlloc for MiniAlloc {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
alloc_impl(layout).unwrap_or(core::ptr::null_mut())
}
#[inline]
unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
self.alloc(layout)
}
#[inline]
unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {}
}
extern "C" {
static __heap_base: u8;
}
static mut STATE: Option<(NonZero, usize)> = None;
unsafe fn alloc_impl(layout: Layout) -> Option<*mut u8> {
let state_ref = &mut *&raw mut STATE;
let (neg_offset, neg_bound) = state_ref.get_or_insert_with(|| {
let heap_base = &__heap_base as *const u8 as usize;
let bound = MiniAlloc::PAGE_SIZE * wasm32::memory_size(0) - 1;
(
NonZero::new_unchecked(heap_base.wrapping_neg()),
bound.wrapping_neg(),
)
});
let neg_aligned = make_aligned(neg_offset.get(), layout.align());
let next_neg_offset = neg_aligned.checked_sub(layout.size())?;
let bytes_needed = neg_bound.saturating_sub(next_neg_offset + 1);
if bytes_needed != 0 {
let pages_needed = 1 + (bytes_needed - 1) / MiniAlloc::PAGE_SIZE;
if wasm32::memory_grow(0, pages_needed) == usize::MAX {
return None;
}
*neg_bound -= MiniAlloc::PAGE_SIZE * pages_needed;
}
*neg_offset = NonZero::new_unchecked(next_neg_offset);
Some(neg_aligned.wrapping_neg() as *mut u8)
}
#[inline(always)]
fn make_aligned(value: usize, align: usize) -> usize {
value & align.wrapping_neg()
}