#![doc = include_str!("../README_WASM.md")]
use core::ptr::NonNull;
use crate::{base::binning::Binning, cell::TalcSyncCell, ptr_utils, source::Claim};
pub struct WasmBinning;
impl Binning for WasmBinning {
#[cfg(not(target_arch = "wasm64"))]
type AvailabilityBitField = u64;
#[cfg(target_arch = "wasm64")]
type AvailabilityBitField = [usize; 2];
fn size_to_bin(size: usize) -> u32 {
#[cfg(not(target_arch = "wasm64"))]
{
crate::base::binning::linear_extent_then_linearly_divided_exponential_binning::<2, 8>(
size,
)
}
#[cfg(target_arch = "wasm64")]
{
crate::base::binning::linear_extent_then_linearly_divided_exponential_binning::<4, 8>(
size,
)
}
}
}
pub type WasmArenaTalc = TalcSyncCell<Claim, WasmBinning>;
pub const unsafe fn new_wasm_arena_allocator<T, const N: usize>(
arena: *mut [T; N],
) -> WasmArenaTalc {
TalcSyncCell::new_wasm(Claim::array(arena))
}
pub type WasmDynamicTalc = TalcSyncCell<WasmGrowAndClaim, WasmBinning>;
pub const fn new_wasm_dynamic_allocator() -> WasmDynamicTalc {
TalcSyncCell::new_wasm(WasmGrowAndClaim)
}
#[derive(Debug)]
pub struct WasmGrowAndClaim;
unsafe impl crate::source::Source for WasmGrowAndClaim {
fn acquire<B: Binning>(
talc: &mut crate::base::Talc<Self, B>,
layout: core::alloc::Layout,
) -> Result<(), ()> {
let delta_pages = (layout.size() + crate::base::CHUNK_UNIT + (PAGE_SIZE - 1)) / PAGE_SIZE;
let prev_memory_end = match memory_grow::<0>(delta_pages) {
usize::MAX => return Err(()),
prev => prev,
};
let grown_base = (prev_memory_end * PAGE_SIZE) as *mut u8;
let grown_size = delta_pages * PAGE_SIZE;
match unsafe { talc.claim(grown_base, grown_size) } {
Some(_) => Ok(()),
None => Err(()),
}
}
}
#[derive(Debug, Default)]
pub struct WasmGrowAndExtend {
end: Option<NonNull<u8>>,
}
impl WasmGrowAndExtend {
pub const fn new() -> Self {
Self { end: None }
}
}
unsafe impl crate::source::Source for WasmGrowAndExtend {
fn acquire<B: Binning>(
talc: &mut crate::base::Talc<Self, B>,
layout: core::alloc::Layout,
) -> Result<(), ()> {
let delta_pages = (layout.size() + crate::base::CHUNK_UNIT + (PAGE_SIZE - 1)) / PAGE_SIZE;
let prev_memory_end = match memory_grow::<0>(delta_pages) {
usize::MAX => return Err(()),
prev => prev,
};
let new_base = (prev_memory_end * PAGE_SIZE) as *mut u8;
let new_bytes = delta_pages * PAGE_SIZE;
let new_end = ptr_utils::saturating_ptr_add(new_base, new_bytes);
if let Some(old_end) = talc.source.end.take() {
if old_end.as_ptr() == new_base {
let new_end = unsafe { talc.extend(old_end, new_end) };
talc.source.end = Some(new_end);
return Ok(());
}
}
talc.source.end = unsafe { talc.claim(new_base, new_bytes) };
Ok(())
}
}
const PAGE_SIZE: usize = 1024 * 64;
#[cfg(target_arch = "wasm32")]
use core::arch::wasm32::memory_grow;
#[cfg(target_arch = "wasm64")]
use core::arch::wasm64::memory_grow;
#[cfg(not(any(target_arch = "wasm32", target_arch = "wasm64")))]
fn memory_grow<const M: usize>(_pages: usize) -> usize {
panic!("not running on wasm32 or wasm64")
}