#![allow(dead_code)]
#[repr(u8)]
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum ArenaKind {
Terminal = 1,
NonTerm = 2,
Continuation = 3,
Suspension = 4,
}
#[repr(u8)]
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum ArenaSym {
S = 1,
K = 2,
I = 3,
ReadOne = 4,
WriteOne = 5,
B = 8,
C = 9,
SPrime = 10,
BPrime = 11,
CPrime = 12,
}
#[cfg(not(target_arch = "wasm32"))]
#[no_mangle]
pub extern "C" fn initArena(_cap: u32) -> u32 {
0
}
#[cfg(not(target_arch = "wasm32"))]
#[no_mangle]
pub extern "C" fn connectArena(_p: u32) -> u32 {
0
}
#[cfg(not(target_arch = "wasm32"))]
#[no_mangle]
pub extern "C" fn allocTerminal(_s: u32) -> u32 {
0
}
#[cfg(not(target_arch = "wasm32"))]
#[no_mangle]
pub extern "C" fn allocCons(_l: u32, _r: u32) -> u32 {
0
}
#[cfg(not(target_arch = "wasm32"))]
#[no_mangle]
pub extern "C" fn kindOf(_n: u32) -> u32 {
0
}
#[cfg(not(target_arch = "wasm32"))]
#[no_mangle]
pub extern "C" fn symOf(_n: u32) -> u32 {
0
}
#[cfg(not(target_arch = "wasm32"))]
#[no_mangle]
pub extern "C" fn leftOf(_n: u32) -> u32 {
0
}
#[cfg(not(target_arch = "wasm32"))]
#[no_mangle]
pub extern "C" fn rightOf(_n: u32) -> u32 {
0
}
#[cfg(not(target_arch = "wasm32"))]
#[no_mangle]
pub extern "C" fn reset() {}
#[cfg(not(target_arch = "wasm32"))]
#[no_mangle]
pub extern "C" fn arenaKernelStep(x: u32) -> u32 {
x
}
#[cfg(not(target_arch = "wasm32"))]
#[no_mangle]
pub extern "C" fn hostSubmit(_id: u32) -> u32 {
1
}
#[cfg(not(target_arch = "wasm32"))]
#[no_mangle]
pub extern "C" fn hostPull() -> u32 {
u32::MAX
}
#[cfg(not(target_arch = "wasm32"))]
#[no_mangle]
pub extern "C" fn workerLoop() {}
#[cfg(not(target_arch = "wasm32"))]
#[no_mangle]
pub extern "C" fn debugGetArenaBaseAddr() -> u32 {
0
}
#[cfg(not(target_arch = "wasm32"))]
#[no_mangle]
pub extern "C" fn getArenaMode() -> u32 {
0
}
#[cfg(not(target_arch = "wasm32"))]
#[no_mangle]
pub extern "C" fn debugCalculateArenaSize(_c: u32) -> u32 {
0
}
#[cfg(not(target_arch = "wasm32"))]
#[no_mangle]
pub extern "C" fn debugLockState() -> u32 {
0xffff_ffff
}
#[cfg(target_arch = "wasm32")]
mod wasm {
use crate::{ArenaKind, ArenaSym};
use core::arch::wasm32;
use core::cell::UnsafeCell;
use core::sync::atomic::{AtomicU32, AtomicU8, Ordering};
pub const EMPTY: u32 = 0xffff_ffff;
const ARENA_MAGIC: u32 = 0x534B_4941; const INITIAL_CAP: u32 = 1 << 20;
const MAX_CAP: u32 = 1 << 27;
const WASM_PAGE_SIZE: usize = 65536;
const RING_ENTRIES: u32 = 1 << 16; const TERM_CACHE_LEN: usize = 10;
#[inline(always)]
const fn align64(x: u32) -> u32 {
(x + 63) & !63
}
mod sys {
use super::*;
#[inline(always)]
pub fn wait32(ptr: &AtomicU32, expected: u32) {
unsafe {
let _ =
wasm32::memory_atomic_wait32(ptr as *const _ as *mut i32, expected as i32, -1);
}
}
#[inline(always)]
pub fn notify(ptr: &AtomicU32, count: u32) {
unsafe {
let _ = wasm32::memory_atomic_notify(ptr as *const _ as *mut i32, count);
}
}
}
#[repr(C)]
#[derive(Clone, Copy)]
pub struct Sqe {
pub node_id: u32,
pub req_id: u32,
pub max_steps: u32,
}
#[repr(C)]
#[derive(Clone, Copy)]
pub struct Cqe {
pub node_id: u32,
pub req_id: u32,
pub _pad: u32,
}
#[repr(C)]
struct Slot<T> {
seq: AtomicU32,
payload: UnsafeCell<T>,
}
unsafe impl<T> Sync for Slot<T> {}
#[repr(C, align(64))]
pub struct Ring<T> {
head: AtomicU32,
not_full: AtomicU32,
_pad1: [u8; 56],
tail: AtomicU32,
not_empty: AtomicU32,
_pad2: [u8; 56],
mask: u32,
entries: u32,
_marker: core::marker::PhantomData<T>,
}
impl<T: Copy> Ring<T> {
#[inline(always)]
fn slots_ptr(&self) -> *const Slot<T> {
unsafe { (self as *const Ring<T>).add(1) as *const Slot<T> }
}
#[inline(always)]
unsafe fn slot_at(&self, i: u32) -> &Slot<T> {
&*self.slots_ptr().add((i & self.mask) as usize)
}
#[inline(always)]
pub unsafe fn init_at(ptr: *mut u8, entries_pow2: u32) -> &'static Self {
let ring = &mut *(ptr as *mut Ring<T>);
ring.head.store(0, Ordering::Relaxed);
ring.tail.store(0, Ordering::Relaxed);
ring.not_empty.store(0, Ordering::Relaxed);
ring.not_full.store(0, Ordering::Relaxed);
ring.entries = entries_pow2;
ring.mask = entries_pow2 - 1;
for i in 0..entries_pow2 {
ring.slot_at(i).seq.store(i, Ordering::Relaxed);
}
ring
}
#[inline(always)]
pub fn try_enqueue(&self, item: T) -> bool {
unsafe {
loop {
let t = self.tail.load(Ordering::Relaxed);
let slot = self.slot_at(t);
let s = slot.seq.load(Ordering::Acquire);
let diff = s.wrapping_sub(t);
if diff == 0 {
if self
.tail
.compare_exchange_weak(
t,
t.wrapping_add(1),
Ordering::Relaxed, Ordering::Relaxed, )
.is_ok()
{
*slot.payload.get() = item;
slot.seq.store(t.wrapping_add(1), Ordering::Release);
self.not_empty.fetch_add(1, Ordering::Release);
sys::notify(&self.not_empty, 1);
return true;
}
} else if (diff as i32) < 0 {
return false;
}
}
}
}
#[inline(always)]
pub fn try_dequeue(&self) -> Option<T> {
unsafe {
loop {
let h = self.head.load(Ordering::Relaxed);
let slot = self.slot_at(h);
let s = slot.seq.load(Ordering::Acquire);
let diff = s.wrapping_sub(h.wrapping_add(1));
if diff == 0 {
if self
.head
.compare_exchange_weak(
h,
h.wrapping_add(1),
Ordering::Relaxed, Ordering::Relaxed, )
.is_ok()
{
let item = *slot.payload.get();
slot.seq.store(
h.wrapping_add(self.mask).wrapping_add(1),
Ordering::Release,
);
self.not_full.fetch_add(1, Ordering::Release);
sys::notify(&self.not_full, 1);
return Some(item);
}
} else if (diff as i32) < 0 {
return None;
}
}
}
}
#[inline(always)]
pub fn enqueue_blocking(&self, item: T) {
while !self.try_enqueue(item) {
let v = self.not_full.load(Ordering::Acquire);
if self.try_enqueue(item) {
return;
}
sys::wait32(&self.not_full, v);
}
}
#[inline(always)]
pub fn dequeue_blocking(&self) -> T {
loop {
if let Some(x) = self.try_dequeue() {
return x;
}
let v = self.not_empty.load(Ordering::Acquire);
if let Some(x) = self.try_dequeue() {
return x;
}
sys::wait32(&self.not_empty, v);
}
}
}
#[inline(always)]
const fn ring_bytes<T>(entries: u32) -> u32 {
let header = core::mem::size_of::<Ring<T>>() as u32;
let slot = core::mem::size_of::<Slot<T>>() as u32;
align64(header + entries * slot)
}
struct SabLayout {
offset_sq: u32,
offset_cq: u32,
offset_stdin: u32,
offset_stdout: u32,
offset_stdin_wait: u32,
offset_kind: u32,
offset_sym: u32,
offset_left_id: u32,
offset_right_id: u32,
offset_hash32: u32,
offset_next_idx: u32,
offset_buckets: u32,
offset_term_cache: u32,
total_size: u32,
}
#[repr(C, align(64))]
struct SabHeader {
magic: u32,
ring_entries: u32,
ring_mask: u32,
offset_sq: u32,
offset_cq: u32,
offset_stdin: u32,
offset_stdout: u32,
offset_stdin_wait: u32,
offset_kind: u32,
offset_sym: u32,
offset_left_id: u32,
offset_right_id: u32,
offset_hash32: u32,
offset_next_idx: u32,
offset_buckets: u32,
offset_term_cache: u32,
capacity: u32,
bucket_mask: u32,
resize_seq: AtomicU32,
top: AtomicU32,
}
impl SabHeader {
fn layout(capacity: u32) -> SabLayout {
let header_size = core::mem::size_of::<SabHeader>() as u32;
let offset_sq = align64(header_size);
let offset_cq = align64(offset_sq + ring_bytes::<Sqe>(RING_ENTRIES));
let offset_stdin = align64(offset_cq + ring_bytes::<Cqe>(RING_ENTRIES));
let offset_stdout = align64(offset_stdin + ring_bytes::<u8>(RING_ENTRIES));
let offset_stdin_wait = align64(offset_stdout + ring_bytes::<u8>(RING_ENTRIES));
let offset_kind = align64(offset_stdin_wait + ring_bytes::<u32>(RING_ENTRIES));
let offset_sym = offset_kind + capacity;
let offset_left_id = align64(offset_sym + capacity);
let offset_right_id = offset_left_id + 4 * capacity;
let offset_hash32 = offset_right_id + 4 * capacity;
let offset_next_idx = offset_hash32 + 4 * capacity;
let offset_buckets = align64(offset_next_idx + 4 * capacity);
let offset_term_cache = offset_buckets + 4 * capacity;
let total_size = offset_term_cache + (TERM_CACHE_LEN as u32) * 4;
SabLayout {
offset_sq,
offset_cq,
offset_stdin,
offset_stdout,
offset_stdin_wait,
offset_kind,
offset_sym,
offset_left_id,
offset_right_id,
offset_hash32,
offset_next_idx,
offset_buckets,
offset_term_cache,
total_size,
}
}
}
static mut ARENA_BASE_ADDR: u32 = 0;
static mut ARENA_MODE: u32 = 0;
static BIN_CTOR_BZ: AtomicU32 = AtomicU32::new(EMPTY);
static BIN_CTOR_B0: AtomicU32 = AtomicU32::new(EMPTY);
static BIN_CTOR_B1: AtomicU32 = AtomicU32::new(EMPTY);
#[inline(always)]
unsafe fn ensure_arena() {
if ARENA_BASE_ADDR != 0 {
return;
}
if ARENA_MODE == 1 {
wasm32::unreachable();
}
let ptr = allocate_raw_arena(INITIAL_CAP);
if ptr.is_null() {
wasm32::unreachable();
}
ARENA_MODE = 0;
}
#[inline(always)]
unsafe fn header() -> &'static SabHeader {
&*(ARENA_BASE_ADDR as *const SabHeader)
}
#[inline(always)]
unsafe fn header_mut() -> &'static mut SabHeader {
&mut *(ARENA_BASE_ADDR as *mut SabHeader)
}
#[inline(always)]
unsafe fn sq_ring() -> &'static Ring<Sqe> {
let h = header();
&*((ARENA_BASE_ADDR + h.offset_sq) as *const Ring<Sqe>)
}
#[inline(always)]
unsafe fn cq_ring() -> &'static Ring<Cqe> {
let h = header();
&*((ARENA_BASE_ADDR + h.offset_cq) as *const Ring<Cqe>)
}
#[inline(always)]
unsafe fn stdin_ring() -> &'static Ring<u8> {
let h = header();
&*((ARENA_BASE_ADDR + h.offset_stdin) as *const Ring<u8>)
}
#[inline(always)]
unsafe fn stdout_ring() -> &'static Ring<u8> {
let h = header();
&*((ARENA_BASE_ADDR + h.offset_stdout) as *const Ring<u8>)
}
#[inline(always)]
unsafe fn stdin_wait_ring() -> &'static Ring<u32> {
let h = header();
&*((ARENA_BASE_ADDR + h.offset_stdin_wait) as *const Ring<u32>)
}
#[inline(always)]
unsafe fn kind_ptr() -> *mut AtomicU8 {
(ARENA_BASE_ADDR + header().offset_kind) as *mut AtomicU8
}
#[inline(always)]
unsafe fn sym_ptr() -> *mut AtomicU8 {
(ARENA_BASE_ADDR + header().offset_sym) as *mut AtomicU8
}
#[inline(always)]
unsafe fn left_ptr() -> *mut AtomicU32 {
(ARENA_BASE_ADDR + header().offset_left_id) as *mut AtomicU32
}
#[inline(always)]
unsafe fn right_ptr() -> *mut AtomicU32 {
(ARENA_BASE_ADDR + header().offset_right_id) as *mut AtomicU32
}
#[inline(always)]
unsafe fn hash_ptr() -> *mut AtomicU32 {
(ARENA_BASE_ADDR + header().offset_hash32) as *mut AtomicU32
}
#[inline(always)]
unsafe fn next_ptr() -> *mut AtomicU32 {
(ARENA_BASE_ADDR + header().offset_next_idx) as *mut AtomicU32
}
#[inline(always)]
unsafe fn buckets_ptr() -> *mut AtomicU32 {
(ARENA_BASE_ADDR + header().offset_buckets) as *mut AtomicU32
}
#[inline(always)]
unsafe fn term_cache_ptr() -> *mut AtomicU32 {
(ARENA_BASE_ADDR + header().offset_term_cache) as *mut AtomicU32
}
fn avalanche32(mut x: u32) -> u32 {
x ^= x >> 16;
x = x.wrapping_mul(0x7feb_352d);
x ^= x >> 15;
x = x.wrapping_mul(0x846c_a68b);
x ^= x >> 16;
x
}
const GOLD: u32 = 0x9e37_79b9;
fn mix(a: u32, b: u32) -> u32 {
avalanche32(a ^ b.wrapping_mul(GOLD))
}
const POISON_SEQ: u32 = 0xffff_ffff;
#[inline(always)]
fn enter_stable() -> (u32, &'static SabHeader) {
unsafe {
let h = header();
loop {
let seq = h.resize_seq.load(Ordering::Acquire);
if seq == POISON_SEQ {
core::arch::wasm32::unreachable();
}
if seq & 1 == 1 {
core::hint::spin_loop();
continue;
}
return (seq, h);
}
}
}
#[inline(always)]
fn wait_resize_stable() {
unsafe {
let h = header();
loop {
let seq = h.resize_seq.load(Ordering::Acquire);
if seq == POISON_SEQ {
core::arch::wasm32::unreachable();
}
if (seq & 1) == 0 {
return;
}
core::hint::spin_loop();
}
}
}
#[inline(always)]
fn check_stable(seq: u32) -> bool {
unsafe { header().resize_seq.load(Ordering::Acquire) == seq }
}
unsafe fn zero_region(start: u32, len: u32) {
core::ptr::write_bytes((ARENA_BASE_ADDR + start) as *mut u8, 0, len as usize);
}
unsafe fn init_header(capacity: u32) {
let layout = SabHeader::layout(capacity);
let h = &mut *(ARENA_BASE_ADDR as *mut SabHeader);
h.magic = ARENA_MAGIC;
h.ring_entries = RING_ENTRIES;
h.ring_mask = RING_ENTRIES - 1;
h.offset_sq = layout.offset_sq;
h.offset_cq = layout.offset_cq;
h.offset_stdin = layout.offset_stdin;
h.offset_stdout = layout.offset_stdout;
h.offset_stdin_wait = layout.offset_stdin_wait;
h.offset_kind = layout.offset_kind;
h.offset_sym = layout.offset_sym;
h.offset_left_id = layout.offset_left_id;
h.offset_right_id = layout.offset_right_id;
h.offset_hash32 = layout.offset_hash32;
h.offset_next_idx = layout.offset_next_idx;
h.offset_buckets = layout.offset_buckets;
h.offset_term_cache = layout.offset_term_cache;
h.capacity = capacity;
h.bucket_mask = capacity - 1;
h.resize_seq.store(0, Ordering::Relaxed);
h.top.store(0, Ordering::Relaxed);
zero_region(
(core::mem::size_of::<SabHeader>()) as u32,
layout.total_size - core::mem::size_of::<SabHeader>() as u32,
);
Ring::<Sqe>::init_at((ARENA_BASE_ADDR + h.offset_sq) as *mut u8, RING_ENTRIES);
Ring::<Cqe>::init_at((ARENA_BASE_ADDR + h.offset_cq) as *mut u8, RING_ENTRIES);
Ring::<u8>::init_at((ARENA_BASE_ADDR + h.offset_stdin) as *mut u8, RING_ENTRIES);
Ring::<u8>::init_at((ARENA_BASE_ADDR + h.offset_stdout) as *mut u8, RING_ENTRIES);
Ring::<u32>::init_at(
(ARENA_BASE_ADDR + h.offset_stdin_wait) as *mut u8,
RING_ENTRIES,
);
let buckets = buckets_ptr();
for i in 0..capacity as usize {
buckets.add(i).write(AtomicU32::new(EMPTY));
}
let cache = term_cache_ptr();
for i in 0..TERM_CACHE_LEN {
cache.add(i).write(AtomicU32::new(EMPTY));
}
}
unsafe fn allocate_raw_arena(capacity: u32) -> *mut SabHeader {
let layout = SabHeader::layout(capacity);
let pages_needed = (layout.total_size as usize + WASM_PAGE_SIZE - 1) / WASM_PAGE_SIZE;
let old_pages = wasm32::memory_grow(0, pages_needed);
if old_pages == usize::MAX {
return core::ptr::null_mut();
}
let base_addr = (old_pages * WASM_PAGE_SIZE) as u32;
ARENA_BASE_ADDR = base_addr;
init_header(capacity);
base_addr as *mut SabHeader
}
#[no_mangle]
pub extern "C" fn initArena(initial_capacity: u32) -> u32 {
if initial_capacity < 1024
|| initial_capacity > MAX_CAP
|| !initial_capacity.is_power_of_two()
{
return 0;
}
unsafe {
if ARENA_BASE_ADDR != 0 {
return ARENA_BASE_ADDR;
}
let ptr = allocate_raw_arena(initial_capacity);
if ptr.is_null() {
return 1;
}
ARENA_MODE = 1;
ARENA_BASE_ADDR
}
}
#[no_mangle]
pub extern "C" fn connectArena(ptr_addr: u32) -> u32 {
if ptr_addr == 0 || ptr_addr % 64 != 0 {
return 0;
}
unsafe {
ARENA_BASE_ADDR = ptr_addr;
ARENA_MODE = 1;
let h = header();
if h.magic != ARENA_MAGIC {
return 5;
}
1
}
}
#[no_mangle]
pub extern "C" fn reset() {
unsafe {
ensure_arena();
let h = header_mut();
h.top.store(0, Ordering::Release);
let buckets = buckets_ptr();
for i in 0..h.capacity as usize {
(*buckets.add(i)).store(EMPTY, Ordering::Release);
}
let cache = term_cache_ptr();
for i in 0..TERM_CACHE_LEN {
(*cache.add(i)).store(EMPTY, Ordering::Release);
}
BIN_CTOR_BZ.store(EMPTY, Ordering::Release);
BIN_CTOR_B0.store(EMPTY, Ordering::Release);
BIN_CTOR_B1.store(EMPTY, Ordering::Release);
h.resize_seq
.store(h.resize_seq.load(Ordering::Relaxed) & !1, Ordering::Release);
}
}
#[no_mangle]
pub extern "C" fn kindOf(n: u32) -> u32 {
unsafe {
ensure_arena();
loop {
let (seq, h) = enter_stable();
let cap = h.capacity;
if n >= cap {
return 0;
}
let val = (*kind_ptr().add(n as usize)).load(Ordering::Acquire) as u32;
core::sync::atomic::fence(Ordering::Acquire);
if check_stable(seq) {
return val;
}
}
}
}
#[no_mangle]
pub extern "C" fn symOf(n: u32) -> u32 {
unsafe {
ensure_arena();
loop {
let (seq, h) = enter_stable();
if n >= h.capacity {
return 0;
}
let val = (*sym_ptr().add(n as usize)).load(Ordering::Acquire) as u32;
core::sync::atomic::fence(Ordering::Acquire);
if check_stable(seq) {
return val;
}
}
}
}
#[no_mangle]
pub extern "C" fn leftOf(n: u32) -> u32 {
unsafe {
ensure_arena();
loop {
let (seq, h) = enter_stable();
if n >= h.capacity {
return 0;
}
let val = (*left_ptr().add(n as usize)).load(Ordering::Acquire);
core::sync::atomic::fence(Ordering::Acquire);
if check_stable(seq) {
return val;
}
}
}
}
#[no_mangle]
pub extern "C" fn rightOf(n: u32) -> u32 {
unsafe {
ensure_arena();
loop {
let (seq, h) = enter_stable();
if n >= h.capacity {
return 0;
}
let val = (*right_ptr().add(n as usize)).load(Ordering::Acquire);
core::sync::atomic::fence(Ordering::Acquire);
if check_stable(seq) {
return val;
}
}
}
}
#[no_mangle]
pub extern "C" fn allocTerminal(sym: u32) -> u32 {
unsafe {
ensure_arena();
let h = header();
if sym < TERM_CACHE_LEN as u32 {
let cached = (*term_cache_ptr().add(sym as usize)).load(Ordering::Acquire);
if cached != EMPTY {
return cached;
}
}
loop {
wait_resize_stable();
let id = h.top.fetch_add(1, Ordering::AcqRel);
if id >= h.capacity {
grow();
continue;
}
(*kind_ptr().add(id as usize)).store(ArenaKind::Terminal as u8, Ordering::Release);
(*sym_ptr().add(id as usize)).store(sym as u8, Ordering::Release);
(*hash_ptr().add(id as usize)).store(sym, Ordering::Release);
if sym < TERM_CACHE_LEN as u32 {
(*term_cache_ptr().add(sym as usize)).store(id, Ordering::Release);
}
return id;
}
}
}
#[no_mangle]
pub extern "C" fn allocCons(l: u32, r: u32) -> u32 {
unsafe {
ensure_arena();
let hl = loop {
let (seq, h) = enter_stable();
if l >= h.capacity {
if !check_stable(seq) {
continue;
}
return EMPTY; }
let val = (*hash_ptr().add(l as usize)).load(Ordering::Acquire);
core::sync::atomic::fence(Ordering::Acquire);
if check_stable(seq) {
break val;
}
};
let hr = loop {
let (seq, h) = enter_stable();
if r >= h.capacity {
if !check_stable(seq) {
continue;
}
return EMPTY; }
let val = (*hash_ptr().add(r as usize)).load(Ordering::Acquire);
core::sync::atomic::fence(Ordering::Acquire);
if check_stable(seq) {
break val;
}
};
let hval = mix(hl, hr);
let _ = loop {
let (seq, h) = enter_stable(); let mask = h.bucket_mask;
let bucket_idx = (hval & mask) as usize;
if bucket_idx >= h.capacity as usize {
if !check_stable(seq) {
continue;
}
continue;
}
let buckets = buckets_ptr();
let next = next_ptr();
let mut cur = (*buckets.add(bucket_idx)).load(Ordering::Acquire);
let mut found = EMPTY;
while cur != EMPTY {
if cur >= h.capacity {
break;
}
let k = (*kind_ptr().add(cur as usize)).load(Ordering::Acquire);
if k == ArenaKind::NonTerm as u8 {
let ch = (*hash_ptr().add(cur as usize)).load(Ordering::Acquire);
if ch == hval {
let cl = (*left_ptr().add(cur as usize)).load(Ordering::Acquire);
let cr = (*right_ptr().add(cur as usize)).load(Ordering::Acquire);
if cl == l && cr == r {
found = cur;
break;
}
}
}
cur = (*next.add(cur as usize)).load(Ordering::Acquire);
}
if check_stable(seq) {
if found != EMPTY {
return found;
}
break bucket_idx;
}
};
loop {
wait_resize_stable();
let h = header();
let buckets = buckets_ptr();
let current_mask = h.bucket_mask;
let b = (hval & current_mask) as usize;
let id = h.top.fetch_add(1, Ordering::AcqRel);
if id >= h.capacity {
grow();
continue;
}
(*kind_ptr().add(id as usize)).store(ArenaKind::NonTerm as u8, Ordering::Release);
(*left_ptr().add(id as usize)).store(l, Ordering::Release);
(*right_ptr().add(id as usize)).store(r, Ordering::Release);
(*hash_ptr().add(id as usize)).store(hval, Ordering::Release);
let next = next_ptr();
loop {
let head = (*buckets.add(b)).load(Ordering::Acquire);
(*next.add(id as usize)).store(head, Ordering::Relaxed);
if (*buckets.add(b))
.compare_exchange(head, id, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
return id;
}
let mut cur2 = (*buckets.add(b)).load(Ordering::Acquire);
while cur2 != EMPTY {
let ck2 = (*kind_ptr().add(cur2 as usize)).load(Ordering::Acquire);
if ck2 != ArenaKind::NonTerm as u8 {
cur2 = (*next.add(cur2 as usize)).load(Ordering::Acquire);
continue;
}
let ch2 = (*hash_ptr().add(cur2 as usize)).load(Ordering::Acquire);
if ch2 == hval {
let cl2 = (*left_ptr().add(cur2 as usize)).load(Ordering::Acquire);
let cr2 = (*right_ptr().add(cur2 as usize)).load(Ordering::Acquire);
if cl2 == l && cr2 == r {
(*kind_ptr().add(id as usize)).store(0, Ordering::Release);
return cur2;
}
}
cur2 = (*next.add(cur2 as usize)).load(Ordering::Acquire);
}
}
}
}
}
#[inline(always)]
unsafe fn alloc_generic(kind: u8, sym: u8, left: u32, right: u32, hash: u32) -> u32 {
ensure_arena();
let h = header();
loop {
wait_resize_stable();
let id = h.top.fetch_add(1, Ordering::AcqRel);
if id >= h.capacity {
grow();
continue;
}
(*sym_ptr().add(id as usize)).store(sym, Ordering::Release);
(*left_ptr().add(id as usize)).store(left, Ordering::Release);
(*right_ptr().add(id as usize)).store(right, Ordering::Release);
(*hash_ptr().add(id as usize)).store(hash, Ordering::Release);
(*kind_ptr().add(id as usize)).store(kind, Ordering::Release);
return id;
}
}
fn grow() {
unsafe {
let h = header_mut();
let mut expected = h.resize_seq.load(Ordering::Acquire);
loop {
if expected & 1 == 1 {
core::hint::spin_loop();
expected = h.resize_seq.load(Ordering::Acquire);
continue;
}
if h.resize_seq
.compare_exchange(expected, expected | 1, Ordering::AcqRel, Ordering::Acquire)
.is_ok()
{
break;
}
expected = h.resize_seq.load(Ordering::Acquire);
}
let old_cap = h.capacity;
let old_offset_kind = h.offset_kind;
let old_offset_sym = h.offset_sym;
let old_offset_left = h.offset_left_id;
let old_offset_right = h.offset_right_id;
let old_offset_hash = h.offset_hash32;
let old_offset_next = h.offset_next_idx;
let old_offset_term_cache = h.offset_term_cache;
let old_top = h.top.load(Ordering::Acquire);
if old_cap >= MAX_CAP {
h.resize_seq.store(POISON_SEQ, Ordering::Release);
core::arch::wasm32::unreachable();
}
let new_cap = (old_cap * 2).min(MAX_CAP);
let layout = SabHeader::layout(new_cap);
let needed_bytes = ARENA_BASE_ADDR as usize + layout.total_size as usize;
let current_bytes = wasm32::memory_size(0) * WASM_PAGE_SIZE;
if needed_bytes > current_bytes {
let extra = needed_bytes - current_bytes;
let pages = (extra + WASM_PAGE_SIZE - 1) / WASM_PAGE_SIZE;
let res = wasm32::memory_grow(0, pages);
if res == usize::MAX {
h.resize_seq.store(POISON_SEQ, Ordering::Release);
core::arch::wasm32::unreachable();
}
}
h.capacity = new_cap;
h.bucket_mask = new_cap - 1;
h.offset_sq = layout.offset_sq;
h.offset_cq = layout.offset_cq;
h.offset_stdin = layout.offset_stdin;
h.offset_stdout = layout.offset_stdout;
h.offset_stdin_wait = layout.offset_stdin_wait;
h.offset_kind = layout.offset_kind;
h.offset_sym = layout.offset_sym;
h.offset_left_id = layout.offset_left_id;
h.offset_right_id = layout.offset_right_id;
h.offset_hash32 = layout.offset_hash32;
h.offset_next_idx = layout.offset_next_idx;
h.offset_buckets = layout.offset_buckets;
h.offset_term_cache = layout.offset_term_cache;
let count = old_top.min(old_cap);
h.top.store(count, Ordering::Release);
core::ptr::copy(
(ARENA_BASE_ADDR + old_offset_term_cache) as *const u8,
(ARENA_BASE_ADDR + h.offset_term_cache) as *mut u8,
(TERM_CACHE_LEN * 4) as usize,
);
core::ptr::copy(
(ARENA_BASE_ADDR + old_offset_next) as *const u8,
(ARENA_BASE_ADDR + h.offset_next_idx) as *mut u8,
(count as usize) * 4,
);
if new_cap > old_cap {
zero_region(h.offset_next_idx + old_cap * 4, (new_cap - old_cap) * 4);
}
core::ptr::copy(
(ARENA_BASE_ADDR + old_offset_hash) as *const u8,
(ARENA_BASE_ADDR + h.offset_hash32) as *mut u8,
(count as usize) * 4,
);
if new_cap > old_cap {
zero_region(h.offset_hash32 + old_cap * 4, (new_cap - old_cap) * 4);
}
core::ptr::copy(
(ARENA_BASE_ADDR + old_offset_right) as *const u8,
(ARENA_BASE_ADDR + h.offset_right_id) as *mut u8,
(count as usize) * 4,
);
if new_cap > old_cap {
zero_region(h.offset_right_id + old_cap * 4, (new_cap - old_cap) * 4);
}
core::ptr::copy(
(ARENA_BASE_ADDR + old_offset_left) as *const u8,
(ARENA_BASE_ADDR + h.offset_left_id) as *mut u8,
(count as usize) * 4,
);
if new_cap > old_cap {
zero_region(h.offset_left_id + old_cap * 4, (new_cap - old_cap) * 4);
}
core::ptr::copy(
(ARENA_BASE_ADDR + old_offset_sym) as *const u8,
(ARENA_BASE_ADDR + h.offset_sym) as *mut u8,
count as usize,
);
if new_cap > old_cap {
zero_region(h.offset_sym + old_cap, new_cap - old_cap);
}
core::ptr::copy(
(ARENA_BASE_ADDR + old_offset_kind) as *const u8,
(ARENA_BASE_ADDR + h.offset_kind) as *mut u8,
count as usize,
);
if new_cap > old_cap {
zero_region(h.offset_kind + old_cap, new_cap - old_cap);
}
let buckets = buckets_ptr();
let next = next_ptr();
for i in 0..new_cap as usize {
(*buckets.add(i)).store(EMPTY, Ordering::Release);
}
for i in 0..count {
let k = (*kind_ptr().add(i as usize)).load(Ordering::Acquire);
if k != ArenaKind::NonTerm as u8 {
continue;
}
let hv = (*hash_ptr().add(i as usize)).load(Ordering::Acquire);
let b = (hv & h.bucket_mask) as usize;
loop {
let head = (*buckets.add(b)).load(Ordering::Acquire);
(*next.add(i as usize)).store(head, Ordering::Relaxed);
if (*buckets.add(b))
.compare_exchange(head, i, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
break;
}
}
}
h.resize_seq.fetch_add(1, Ordering::Release);
}
}
const STAGE_LEFT: u8 = 0;
const STAGE_RIGHT: u8 = 1;
const MODE_DESCEND: u8 = 0;
const MODE_RETURN: u8 = 1;
const MODE_IO_WAIT: u8 = 2;
#[inline(always)]
unsafe fn alloc_continuation(parent: u32, target: u32, stage: u8) -> u32 {
alloc_generic(ArenaKind::Continuation as u8, stage, parent, target, 0)
}
#[inline(always)]
unsafe fn alloc_suspension(curr: u32, stack: u32, mode: u8, remaining_steps: u32) -> u32 {
alloc_generic(
ArenaKind::Suspension as u8,
mode,
curr,
stack,
remaining_steps,
)
}
#[inline(always)]
unsafe fn alloc_app(left: u32, right: u32) -> u32 {
allocCons(left, right)
}
#[inline(always)]
unsafe fn alloc_bin_ctor_bz() -> u32 {
let b = allocTerminal(ArenaSym::B as u32);
let i = allocTerminal(ArenaSym::I as u32);
let k = allocTerminal(ArenaSym::K as u32);
let bi = alloc_app(b, i);
let bik = alloc_app(bi, k);
let b_bik = alloc_app(b, bik);
alloc_app(b_bik, k)
}
#[inline(always)]
unsafe fn alloc_bin_ctor_b0() -> u32 {
let p = allocTerminal(ArenaSym::SPrime as u32);
let s = allocTerminal(ArenaSym::S as u32);
let k = allocTerminal(ArenaSym::K as u32);
let b = allocTerminal(ArenaSym::B as u32);
let i = allocTerminal(ArenaSym::I as u32);
let ps = alloc_app(p, s);
let pps = alloc_app(p, ps);
let ppps = alloc_app(p, pps);
let bi = alloc_app(b, i);
let bik = alloc_app(bi, k);
let k_bik = alloc_app(k, bik);
let k_k_bik = alloc_app(k, k_bik);
let left = alloc_app(ppps, k_k_bik);
let b_bi_k = alloc_app(b, bik); let b_bi_k_k = alloc_app(b_bi_k, k); let b_b_bi_k_k = alloc_app(b, b_bi_k_k);
let right = alloc_app(b_b_bi_k_k, k);
alloc_app(left, right)
}
#[inline(always)]
unsafe fn alloc_bin_ctor_b1() -> u32 {
let p = allocTerminal(ArenaSym::SPrime as u32);
let s = allocTerminal(ArenaSym::S as u32);
let k = allocTerminal(ArenaSym::K as u32);
let b = allocTerminal(ArenaSym::B as u32);
let i = allocTerminal(ArenaSym::I as u32);
let ps = alloc_app(p, s);
let pps = alloc_app(p, ps);
let ppps = alloc_app(p, pps);
let ki = alloc_app(k, i);
let k_ki = alloc_app(k, ki);
let k_k_ki = alloc_app(k, k_ki);
let left = alloc_app(ppps, k_k_ki);
let bi = alloc_app(b, i);
let bik = alloc_app(bi, k);
let b_bi_k = alloc_app(b, bik);
let b_bi_k_k = alloc_app(b_bi_k, k);
let b_b_bi_k_k = alloc_app(b, b_bi_k_k);
let right = alloc_app(b_b_bi_k_k, k);
alloc_app(left, right)
}
#[inline(always)]
unsafe fn ensure_bin_ctors() -> (u32, u32, u32) {
let bz = BIN_CTOR_BZ.load(Ordering::Acquire);
if bz != EMPTY {
return (
bz,
BIN_CTOR_B0.load(Ordering::Acquire),
BIN_CTOR_B1.load(Ordering::Acquire),
);
}
let b0 = alloc_bin_ctor_b0();
let b1 = alloc_bin_ctor_b1();
let bz_new = alloc_bin_ctor_bz();
BIN_CTOR_B0.store(b0, Ordering::Release);
BIN_CTOR_B1.store(b1, Ordering::Release);
BIN_CTOR_BZ.store(bz_new, Ordering::Release);
(bz_new, b0, b1)
}
#[inline(always)]
unsafe fn alloc_bin_byte(value: u8) -> u32 {
let (bz, b0, b1) = ensure_bin_ctors();
if value == 0 {
return bz;
}
let mut cur = bz;
let mut n = value;
let mut bits: [u8; 8] = [0; 8];
let mut len = 0usize;
while n > 0 {
bits[len] = n & 1;
len += 1;
n >>= 1;
}
let mut i = len;
while i > 0 {
i -= 1;
let ctor = if bits[i] == 0 { b0 } else { b1 };
cur = alloc_app(ctor, cur);
}
cur
}
#[inline(always)]
unsafe fn expr_equiv(a: u32, b: u32) -> bool {
if a == b {
return true;
}
const MAX_EQUIV_STACK: usize = 128;
let mut stack_a = [0u32; MAX_EQUIV_STACK];
let mut stack_b = [0u32; MAX_EQUIV_STACK];
let mut sp = 0usize;
stack_a[sp] = a;
stack_b[sp] = b;
sp += 1;
while sp > 0 {
sp -= 1;
let x = stack_a[sp];
let y = stack_b[sp];
if x == y {
continue;
}
let kx = kindOf(x);
let ky = kindOf(y);
if kx != ky {
return false;
}
if kx == ArenaKind::Terminal as u32 {
if symOf(x) != symOf(y) {
return false;
}
continue;
}
if kx == ArenaKind::NonTerm as u32 {
if sp + 2 > MAX_EQUIV_STACK {
return false;
}
stack_a[sp] = leftOf(x);
stack_b[sp] = leftOf(y);
sp += 1;
stack_a[sp] = rightOf(x);
stack_b[sp] = rightOf(y);
sp += 1;
continue;
}
if symOf(x) != symOf(y) || leftOf(x) != leftOf(y) || rightOf(x) != rightOf(y) {
return false;
}
}
true
}
#[inline(always)]
unsafe fn decode_bin_u8(expr: u32) -> u8 {
let (bz, b0, b1) = ensure_bin_ctors();
let mut cur = expr;
let mut result: u8 = 0;
let mut bit: u8 = 1;
for _ in 0..8 {
if expr_equiv(cur, bz) {
break;
}
if kindOf(cur) != ArenaKind::NonTerm as u32 {
break;
}
let l = leftOf(cur);
let r = rightOf(cur);
if expr_equiv(l, b0) {
cur = r;
} else if expr_equiv(l, b1) {
result |= bit;
cur = r;
} else {
break;
}
bit <<= 1;
}
result
}
#[inline(always)]
unsafe fn update_continuation(id: u32, parent: u32, target: u32, stage: u8) {
(*left_ptr().add(id as usize)).store(parent, Ordering::Relaxed);
(*right_ptr().add(id as usize)).store(target, Ordering::Relaxed);
(*sym_ptr().add(id as usize)).store(stage, Ordering::Relaxed);
(*kind_ptr().add(id as usize)).store(ArenaKind::Continuation as u8, Ordering::Release);
}
#[inline(always)]
fn hash_of_internal(n: u32) -> u32 {
unsafe {
ensure_arena();
loop {
let (seq, h) = enter_stable();
if n >= h.capacity {
return 0;
}
let val = (*hash_ptr().add(n as usize)).load(Ordering::Acquire);
core::sync::atomic::fence(Ordering::Acquire);
if check_stable(seq) {
return val;
}
}
}
}
#[inline(always)]
unsafe fn unwind_to_root(mut curr: u32, mut stack: u32) -> u32 {
while stack != EMPTY {
let recycled = stack;
stack = leftOf(recycled);
let parent_node = rightOf(recycled);
let stage = symOf(recycled) as u8;
if stage == STAGE_LEFT {
let orig_left = leftOf(parent_node);
if curr != orig_left {
curr = allocCons(curr, rightOf(parent_node));
} else {
curr = parent_node;
}
} else {
let orig_right = rightOf(parent_node);
if curr != orig_right {
curr = allocCons(leftOf(parent_node), curr);
} else {
curr = parent_node;
}
}
}
curr
}
enum StepResult {
Done(u32),
Yield(u32), }
unsafe fn step_iterative(
mut curr: u32,
mut stack: u32,
mut mode: u8,
gas: &mut u32,
remaining_steps: &mut u32,
mut free_node: u32,
) -> StepResult {
loop {
if *gas == 0 {
return StepResult::Yield(alloc_suspension(curr, stack, mode, *remaining_steps));
}
*gas -= 1;
if mode == MODE_RETURN {
if stack == EMPTY {
return StepResult::Done(curr);
}
let recycled = stack; stack = leftOf(recycled); let parent_node = rightOf(recycled);
let stage = symOf(recycled) as u8;
if stage == STAGE_LEFT {
let orig_left = leftOf(parent_node);
if curr != orig_left {
curr = allocCons(curr, rightOf(parent_node));
free_node = recycled; mode = MODE_RETURN;
continue;
}
update_continuation(recycled, stack, parent_node, STAGE_RIGHT);
stack = recycled;
mode = MODE_DESCEND;
curr = rightOf(parent_node);
continue;
} else {
let orig_right = rightOf(parent_node);
if curr != orig_right {
curr = allocCons(leftOf(parent_node), curr);
free_node = recycled;
mode = MODE_RETURN;
continue;
}
curr = parent_node;
free_node = recycled;
mode = MODE_RETURN;
continue;
}
}
let k = kindOf(curr);
if k != ArenaKind::NonTerm as u32 {
mode = MODE_RETURN;
continue;
}
let left = leftOf(curr);
let right = rightOf(curr);
if kindOf(left) == ArenaKind::Terminal as u32 && symOf(left) == ArenaSym::I as u32 {
if *remaining_steps == 0 {
return StepResult::Yield(alloc_suspension(curr, stack, mode, 0));
}
*remaining_steps = remaining_steps.saturating_sub(1);
curr = right;
mode = MODE_RETURN;
if *remaining_steps == 0 {
return StepResult::Yield(alloc_suspension(curr, stack, mode, 0));
}
continue;
}
if kindOf(left) == ArenaKind::Terminal as u32 && symOf(left) == ArenaSym::ReadOne as u32
{
if let Some(byte) = stdin_ring().try_dequeue() {
if *remaining_steps == 0 {
return StepResult::Yield(alloc_suspension(curr, stack, mode, 0));
}
*remaining_steps = remaining_steps.saturating_sub(1);
let numeral = alloc_bin_byte(byte);
curr = allocCons(right, numeral);
mode = MODE_RETURN;
if *remaining_steps == 0 {
return StepResult::Yield(alloc_suspension(curr, stack, mode, 0));
}
continue;
}
let susp_id = alloc_suspension(curr, stack, MODE_IO_WAIT, *remaining_steps);
stdin_wait_ring().enqueue_blocking(susp_id);
return StepResult::Yield(susp_id);
}
if kindOf(left) == ArenaKind::Terminal as u32
&& symOf(left) == ArenaSym::WriteOne as u32
{
if *remaining_steps == 0 {
return StepResult::Yield(alloc_suspension(curr, stack, mode, 0));
}
*remaining_steps = remaining_steps.saturating_sub(1);
let byte = decode_bin_u8(right);
stdout_ring().enqueue_blocking(byte);
curr = right;
mode = MODE_RETURN;
if *remaining_steps == 0 {
return StepResult::Yield(alloc_suspension(curr, stack, mode, 0));
}
continue;
}
if kindOf(left) == ArenaKind::NonTerm as u32 {
let ll = leftOf(left);
if kindOf(ll) == ArenaKind::Terminal as u32 && symOf(ll) == ArenaSym::K as u32 {
if *remaining_steps == 0 {
return StepResult::Yield(alloc_suspension(curr, stack, mode, 0));
}
*remaining_steps = remaining_steps.saturating_sub(1);
curr = rightOf(left);
mode = MODE_RETURN;
if *remaining_steps == 0 {
return StepResult::Yield(alloc_suspension(curr, stack, mode, 0));
}
continue;
}
if kindOf(ll) == ArenaKind::NonTerm as u32 {
let lll = leftOf(ll);
if kindOf(lll) == ArenaKind::Terminal as u32 {
let sym = symOf(lll);
if sym == ArenaSym::S as u32 {
if *remaining_steps == 0 {
return StepResult::Yield(alloc_suspension(curr, stack, mode, 0));
}
*remaining_steps = remaining_steps.saturating_sub(1);
let x = rightOf(ll);
let y = rightOf(left);
let z = right;
let xz = allocCons(x, z);
let yz = allocCons(y, z);
curr = allocCons(xz, yz);
mode = MODE_RETURN;
if *remaining_steps == 0 {
return StepResult::Yield(alloc_suspension(curr, stack, mode, 0));
}
continue;
}
if sym == ArenaSym::B as u32 {
if *remaining_steps == 0 {
return StepResult::Yield(alloc_suspension(curr, stack, mode, 0));
}
*remaining_steps = remaining_steps.saturating_sub(1);
let x = rightOf(ll);
let y = rightOf(left);
let z = right;
let yz = allocCons(y, z);
curr = allocCons(x, yz);
mode = MODE_RETURN;
if *remaining_steps == 0 {
return StepResult::Yield(alloc_suspension(curr, stack, mode, 0));
}
continue;
}
if sym == ArenaSym::C as u32 {
if *remaining_steps == 0 {
return StepResult::Yield(alloc_suspension(curr, stack, mode, 0));
}
*remaining_steps = remaining_steps.saturating_sub(1);
let x = rightOf(ll);
let y = rightOf(left);
let z = right;
let xz = allocCons(x, z);
curr = allocCons(xz, y);
mode = MODE_RETURN;
if *remaining_steps == 0 {
return StepResult::Yield(alloc_suspension(curr, stack, mode, 0));
}
continue;
}
} else if kindOf(lll) == ArenaKind::NonTerm as u32 {
let llll = leftOf(lll);
if kindOf(llll) == ArenaKind::Terminal as u32 {
let sym = symOf(llll);
if sym == ArenaSym::SPrime as u32 {
if *remaining_steps == 0 {
return StepResult::Yield(alloc_suspension(curr, stack, mode, 0));
}
*remaining_steps = remaining_steps.saturating_sub(1);
let w = rightOf(lll);
let x = rightOf(ll);
let y = rightOf(left);
let z = right;
let xz = allocCons(x, z);
let yz = allocCons(y, z);
let w_xz = allocCons(w, xz);
curr = allocCons(w_xz, yz);
mode = MODE_RETURN;
if *remaining_steps == 0 {
return StepResult::Yield(alloc_suspension(curr, stack, mode, 0));
}
continue;
}
if sym == ArenaSym::BPrime as u32 {
if *remaining_steps == 0 {
return StepResult::Yield(alloc_suspension(curr, stack, mode, 0));
}
*remaining_steps = remaining_steps.saturating_sub(1);
let w = rightOf(lll);
let x = rightOf(ll);
let y = rightOf(left);
let z = right;
let yz = allocCons(y, z);
let wx = allocCons(w, x);
curr = allocCons(wx, yz);
mode = MODE_RETURN;
if *remaining_steps == 0 {
return StepResult::Yield(alloc_suspension(curr, stack, mode, 0));
}
continue;
}
if sym == ArenaSym::CPrime as u32 {
if *remaining_steps == 0 {
return StepResult::Yield(alloc_suspension(curr, stack, mode, 0));
}
*remaining_steps = remaining_steps.saturating_sub(1);
let w = rightOf(lll);
let x = rightOf(ll);
let y = rightOf(left);
let z = right;
let xz = allocCons(x, z);
let wxz = allocCons(w, xz);
curr = allocCons(wxz, y);
mode = MODE_RETURN;
if *remaining_steps == 0 {
return StepResult::Yield(alloc_suspension(curr, stack, mode, 0));
}
continue;
}
}
}
}
}
if free_node != EMPTY {
update_continuation(free_node, stack, curr, STAGE_LEFT);
stack = free_node;
free_node = EMPTY;
} else {
stack = alloc_continuation(stack, curr, STAGE_LEFT);
}
curr = left;
mode = MODE_DESCEND;
}
}
fn step_internal(expr: u32) -> u32 {
unsafe {
let mut gas = u32::MAX;
let mut steps = u32::MAX; match step_iterative(expr, EMPTY, MODE_DESCEND, &mut gas, &mut steps, EMPTY) {
StepResult::Done(x) => x,
StepResult::Yield(_) => expr, }
}
}
#[no_mangle]
pub extern "C" fn arenaKernelStep(expr: u32) -> u32 {
unsafe {
ensure_arena();
}
step_internal(expr)
}
#[no_mangle]
pub extern "C" fn reduce(expr: u32, max: u32) -> u32 {
unsafe {
ensure_arena();
}
let limit = if max == 0xffff_ffff { u32::MAX } else { max };
let mut cur = expr;
for _ in 0..limit {
let next = step_internal(cur);
if next == cur {
break;
}
cur = next;
}
cur
}
#[no_mangle]
pub extern "C" fn hostPull() -> i64 {
unsafe {
if ARENA_BASE_ADDR == 0 {
return -1;
}
if let Some(cqe) = cq_ring().try_dequeue() {
let packed: u64 = ((cqe.req_id as u64) << 32) | (cqe.node_id as u64);
packed as i64
} else {
-1
}
}
}
#[no_mangle]
pub extern "C" fn debugGetRingEntries() -> u32 {
RING_ENTRIES
}
#[no_mangle]
pub extern "C" fn hostSubmit(node_id: u32, req_id: u32, max_steps: u32) -> u32 {
unsafe {
if ARENA_BASE_ADDR == 0 {
return 2;
}
if sq_ring().try_enqueue(Sqe {
node_id,
req_id,
max_steps,
}) {
0
} else {
1
}
}
}
#[no_mangle]
pub extern "C" fn workerLoop() {
unsafe {
let sq = sq_ring();
let cq = cq_ring();
let batch_gas: u32 = 20000;
loop {
let job = sq.dequeue_blocking();
let mut curr = job.node_id;
let mut stack = EMPTY;
let mut mode = MODE_DESCEND;
let mut remaining_steps: u32;
let mut free_node = EMPTY;
if kindOf(curr) == ArenaKind::Suspension as u32 {
let susp = curr;
curr = leftOf(susp);
stack = rightOf(susp);
mode = symOf(susp) as u8;
if mode == MODE_IO_WAIT {
mode = MODE_DESCEND;
}
remaining_steps = hash_of_internal(susp);
free_node = susp; } else {
let limit = job.max_steps;
remaining_steps = if limit == 0xffff_ffff {
u32::MAX
} else {
limit
};
}
loop {
if remaining_steps == 0 {
if stack != EMPTY {
curr = unwind_to_root(curr, stack);
stack = EMPTY;
}
cq.enqueue_blocking(Cqe {
node_id: curr,
req_id: job.req_id,
_pad: 0,
});
break;
}
let mut gas = batch_gas;
match step_iterative(
curr,
stack,
mode,
&mut gas,
&mut remaining_steps,
free_node,
) {
StepResult::Yield(susp_id) => {
cq.enqueue_blocking(Cqe {
node_id: susp_id,
req_id: job.req_id,
_pad: 0,
});
break;
}
StepResult::Done(next_node) => {
if next_node == curr {
cq.enqueue_blocking(Cqe {
node_id: curr,
req_id: job.req_id,
_pad: 0,
});
break;
}
curr = next_node;
stack = EMPTY;
mode = MODE_DESCEND;
free_node = EMPTY;
}
}
}
}
}
}
#[no_mangle]
pub extern "C" fn debugGetArenaBaseAddr() -> u32 {
unsafe { ARENA_BASE_ADDR }
}
#[no_mangle]
pub extern "C" fn getArenaMode() -> u32 {
unsafe { ARENA_MODE }
}
#[no_mangle]
pub extern "C" fn debugCalculateArenaSize(capacity: u32) -> u32 {
SabHeader::layout(capacity).total_size
}
#[no_mangle]
pub extern "C" fn debugLockState() -> u32 {
unsafe { header().resize_seq.load(Ordering::Relaxed) }
}
}
#[cfg(target_arch = "wasm32")]
pub use wasm::*;