use crate::unwrap::UnwrapOptimized;
use core::alloc::{GlobalAlloc, Layout};
#[global_allocator]
static GLOBAL: BumpPointer = BumpPointer;
struct BumpPointer;
static mut LOCAL_ALLOCATOR: BumpPointerLocal = BumpPointerLocal::new();
unsafe impl GlobalAlloc for BumpPointer {
#[inline(always)]
#[allow(static_mut_refs)]
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let (bytes, align) = (layout.size(), layout.align());
let ptr = LOCAL_ALLOCATOR.alloc(bytes, align);
core::ptr::with_exposed_provenance_mut(ptr)
}
#[inline(always)]
unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {}
}
struct BumpPointerLocal {
cursor: usize,
limit: usize,
}
impl BumpPointerLocal {
const LOG_PAGE_SIZE: usize = 16;
const PAGE_SIZE: usize = 1 << Self::LOG_PAGE_SIZE; const MEM: u32 = 0;
pub const fn new() -> Self {
Self {
cursor: 0,
limit: 0,
}
}
#[inline(always)]
fn maybe_init_inline(&mut self) {
if self.limit == 0 {
self.cursor = core::arch::wasm32::memory_size(Self::MEM)
.checked_mul(Self::PAGE_SIZE)
.unwrap_optimized();
self.limit = self.cursor;
}
}
#[inline(never)]
fn maybe_init(&mut self) {
self.maybe_init_inline()
}
#[inline(always)]
fn alloc(&mut self, bytes: usize, align: usize) -> usize {
self.maybe_init();
let start = self
.cursor
.checked_next_multiple_of(align)
.unwrap_optimized();
let new_cursor = start.checked_add(bytes).unwrap_optimized();
if new_cursor <= self.limit {
self.cursor = new_cursor;
start
} else {
self.alloc_slow(bytes, align)
}
}
#[inline(always)]
fn alloc_slow_inline(&mut self, bytes: usize, align: usize) -> usize {
let pages = bytes.div_ceil(Self::PAGE_SIZE);
if core::arch::wasm32::memory_grow(Self::MEM, pages) == usize::MAX {
core::arch::wasm32::unreachable();
}
let bytes_grown = pages.checked_mul(Self::PAGE_SIZE).unwrap_optimized();
self.limit = self.limit.checked_add(bytes_grown).unwrap_optimized();
self.alloc(bytes, align)
}
#[inline(never)]
fn alloc_slow(&mut self, bytes: usize, align: usize) -> usize {
self.alloc_slow_inline(bytes, align)
}
}