1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
#![no_std]
extern crate alloc;
use alloc::alloc::{GlobalAlloc, Layout};
use core::{
ptr::null_mut,
sync::atomic::{AtomicPtr, Ordering},
};
/// We assume the Wasm page size for purposes of initializing the heap
#[cfg(target_family = "wasm")]
const PAGE_SIZE: usize = 2usize.pow(16);
/// We require all allocations to be minimally word-aligned, i.e. 32 byte alignment
const MIN_ALIGN: usize = 32;
/// The linear memory heap must not spill over into the region reserved for procedure
/// locals, which begins at 2^30 in Miden's address space.
const HEAP_END: *mut u8 = (2usize.pow(30) / 4) as *mut u8;
/// A very simple allocator for Miden SDK-based programs.
///
/// This allocator does not free memory, it simply grows the heap until it runs out of available
/// space for further allocations.
pub struct BumpAlloc {
/// The address at which the available heap begins
top: AtomicPtr<u8>,
}
impl Default for BumpAlloc {
fn default() -> Self {
Self::new()
}
}
impl BumpAlloc {
/// Create a new instance of this allocator
///
/// NOTE: Only one instance of this allocator should ever be used at a time, as it is
/// allocating from the global heap, not from memory reserved for itself.
pub const fn new() -> Self {
Self {
top: AtomicPtr::new(null_mut()),
}
}
/// Initialize the allocator, if it has not yet been initialized
#[cfg(target_family = "wasm")]
fn maybe_init(&self) {
let top = self.top.load(Ordering::Relaxed);
if top.is_null() {
let base = unsafe { heap_base() };
let size = core::arch::wasm32::memory_size(0);
self.top.store(unsafe { base.byte_add(size * PAGE_SIZE) }, Ordering::Relaxed);
}
// TODO: Once treeify issue is fixed, switch to this implementation
/*
let _ = self.top.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |top| {
if top.is_null() {
let base = unsafe { heap_base() };
let size = core::arch::wasm32::memory_size(0);
Some(unsafe { base.byte_add(size * PAGE_SIZE) })
} else {
None
}
});
*/
}
#[cfg(not(target_family = "wasm"))]
fn maybe_init(&self) {}
}
unsafe impl GlobalAlloc for BumpAlloc {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
// Force allocations to be at minimally word-aligned. This is wasteful of memory, but
// we don't need to be particularly conservative with memory anyway, as most, if not all,
// Miden programs will be relatively short-lived. This makes interop at the Rust/Miden
// call boundary less expensive, as we can typically pass pointers directly to Miden,
// whereas without this alignment guarantee, we would have to set up temporary buffers for
// Miden code to write to, and then copy out of that buffer to whatever Rust type, e.g.
// `Vec`, we actually want.
//
// NOTE: This cannot fail, because we're always meeting minimum alignment requirements
let layout = layout
.align_to(core::cmp::max(layout.align(), MIN_ALIGN))
.unwrap()
.pad_to_align();
let size = layout.size();
let align = layout.align();
self.maybe_init();
let top = self.top.load(Ordering::Relaxed);
let available = HEAP_END.byte_offset_from(top) as usize;
if available >= size {
self.top.store(top.byte_add(size), Ordering::Relaxed);
unsafe { top.byte_offset(align as isize) }
} else {
null_mut()
}
// TODO: Once treeify issue is fixed, switch to this implementation
/*
match self.top.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |top| {
let available = HEAP_END.byte_offset_from(top) as usize;
if available < size {
None
} else {
Some(top.byte_add(size))
}
}) {
Ok(prev_top) => {
unsafe { prev_top.byte_offset(align as isize) }
}
Err(_) => null_mut(),
}
*/
}
unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {}
}
#[cfg(target_family = "wasm")]
#[link(wasm_import_module = "intrinsics::mem")]
extern "C" {
fn heap_base() -> *mut u8;
}