use bytemuck::{Pod, Zeroable};
use core::sync::atomic::{AtomicU64, Ordering};
use std::collections::HashSet;
use std::sync::{Mutex, OnceLock};
static VALID_POINTERS: OnceLock<Mutex<HashSet<usize>>> = OnceLock::new();
fn get_registry() -> &'static Mutex<HashSet<usize>> {
VALID_POINTERS.get_or_init(|| Mutex::new(HashSet::new()))
}
pub const MAX_ENTITIES: usize = 8192;
#[derive(Copy, Clone, Debug, Pod, Zeroable)]
#[repr(C)]
pub struct SabSlot {
pub network_id: u64, pub x: f32, pub y: f32, pub z: f32, pub rotation: f32, pub dx: f32, pub dy: f32, pub dz: f32, pub hp: u16, pub shield: u16, pub entity_type: u16, pub flags: u8, pub mining_active: u8, pub cargo_ore: u16, pub mining_target_id: u16, }
#[derive(Debug)]
#[repr(C)]
pub struct SabHeader {
pub state: AtomicU64, pub tick: AtomicU64, pub room_min_x: core::sync::atomic::AtomicU32, pub room_min_y: core::sync::atomic::AtomicU32, pub room_max_x: core::sync::atomic::AtomicU32, pub room_max_y: core::sync::atomic::AtomicU32, pub room_bounds_seq: core::sync::atomic::AtomicU32, pub sub_tick_fraction: core::sync::atomic::AtomicU32, }
pub const SHARED_MEMORY_SIZE: usize =
core::mem::size_of::<SabHeader>() + (core::mem::size_of::<SabSlot>() * MAX_ENTITIES * 2);
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen::prelude::wasm_bindgen]
pub fn shared_world_size() -> usize {
SHARED_MEMORY_SIZE
}
pub struct SharedWorld {
ptr: *mut u8,
owns_memory: bool,
}
impl SharedWorld {
pub unsafe fn from_ptr(ptr: *mut u8) -> Self {
Self {
ptr,
owns_memory: false,
}
}
#[allow(clippy::missing_panics_doc)]
#[must_use]
pub fn new() -> Self {
let layout = core::alloc::Layout::from_size_align(SHARED_MEMORY_SIZE, 8)
.expect("Invalid SHARED_MEMORY_SIZE or alignment constants");
let ptr = unsafe { std::alloc::alloc_zeroed(layout) };
if ptr.is_null() {
std::alloc::handle_alloc_error(layout);
}
get_registry()
.lock()
.expect("Registry mutex poisoned")
.insert(ptr as usize);
Self {
ptr,
owns_memory: true,
}
}
#[allow(clippy::missing_panics_doc)]
#[must_use]
pub fn is_valid(ptr: *mut u8) -> bool {
get_registry()
.lock()
.expect("Registry mutex poisoned")
.contains(&(ptr as usize))
}
#[must_use]
pub fn as_ptr(&self) -> *mut u8 {
self.ptr
}
#[allow(clippy::cast_ptr_alignment)]
fn header(&self) -> &SabHeader {
unsafe { &*(self.ptr.cast::<SabHeader>()) }
}
#[must_use]
pub fn active_index(&self) -> u32 {
(self.header().state.load(Ordering::Acquire) & 0xFFFF_FFFF) as u32
}
#[must_use]
pub fn entity_count(&self) -> u32 {
(self.header().state.load(Ordering::Acquire) >> 32) as u32
}
#[must_use]
pub fn tick(&self) -> u64 {
self.header().tick.load(Ordering::Acquire)
}
#[must_use]
pub fn sub_tick_fraction(&self) -> f32 {
f32::from_bits(self.header().sub_tick_fraction.load(Ordering::Acquire))
}
pub fn set_sub_tick_fraction(&mut self, fraction: f32) {
self.header()
.sub_tick_fraction
.store(fraction.to_bits(), Ordering::Release);
}
#[allow(clippy::cast_ptr_alignment)]
fn get_buffer(&self, idx: usize) -> &[SabSlot] {
let offset = core::mem::size_of::<SabHeader>()
+ (idx * MAX_ENTITIES * core::mem::size_of::<SabSlot>());
unsafe { core::slice::from_raw_parts(self.ptr.add(offset).cast::<SabSlot>(), MAX_ENTITIES) }
}
#[allow(clippy::cast_ptr_alignment)]
fn get_buffer_mut(&mut self, idx: usize) -> &mut [SabSlot] {
let offset = core::mem::size_of::<SabHeader>()
+ (idx * MAX_ENTITIES * core::mem::size_of::<SabSlot>());
unsafe {
core::slice::from_raw_parts_mut(self.ptr.add(offset).cast::<SabSlot>(), MAX_ENTITIES)
}
}
#[must_use]
pub fn get_read_buffer(&self) -> &[SabSlot] {
let state = self.header().state.load(Ordering::Acquire);
let active = (state & 0xFFFF_FFFF) as usize;
let count = ((state >> 32) as usize).min(MAX_ENTITIES);
&self.get_buffer(active)[..count]
}
#[must_use]
pub fn get_write_buffer(&mut self) -> &mut [SabSlot] {
let active = self.active_index() as usize;
let inactive = 1 - active;
self.get_buffer_mut(inactive)
}
pub fn commit_write(&mut self, entity_count: u32, tick: u64) {
let active = self.active_index();
let next_active = 1 - active;
let packed = (u64::from(entity_count) << 32) | u64::from(next_active);
self.header().tick.store(tick, Ordering::Release);
self.header().state.store(packed, Ordering::Release);
}
pub fn set_room_bounds(&mut self, min_x: f32, min_y: f32, max_x: f32, max_y: f32) {
let h = self.header();
let seq = h.room_bounds_seq.load(Ordering::Relaxed);
h.room_bounds_seq
.store(seq.wrapping_add(1), Ordering::Relaxed);
core::sync::atomic::fence(Ordering::Release);
h.room_min_x.store(min_x.to_bits(), Ordering::Relaxed);
h.room_min_y.store(min_y.to_bits(), Ordering::Relaxed);
h.room_max_x.store(max_x.to_bits(), Ordering::Relaxed);
h.room_max_y.store(max_y.to_bits(), Ordering::Relaxed);
h.room_bounds_seq
.store(seq.wrapping_add(2), Ordering::Release);
}
#[must_use]
pub fn get_room_bounds(&self) -> (f32, f32, f32, f32) {
let h = self.header();
loop {
let seq1 = h.room_bounds_seq.load(Ordering::Acquire);
if seq1 & 1 != 0 {
core::hint::spin_loop();
continue;
}
let min_x = f32::from_bits(h.room_min_x.load(Ordering::Relaxed));
let min_y = f32::from_bits(h.room_min_y.load(Ordering::Relaxed));
let max_x = f32::from_bits(h.room_max_x.load(Ordering::Relaxed));
let max_y = f32::from_bits(h.room_max_y.load(Ordering::Relaxed));
core::sync::atomic::fence(Ordering::Acquire);
let seq2 = h.room_bounds_seq.load(Ordering::Relaxed);
if seq1 == seq2 {
return (min_x, min_y, max_x, max_y);
}
core::hint::spin_loop();
}
}
}
impl Drop for SharedWorld {
#[allow(clippy::missing_panics_doc)]
fn drop(&mut self) {
if self.owns_memory {
if let Ok(mut reg) = get_registry().lock() {
reg.remove(&(self.ptr as usize));
}
let layout = core::alloc::Layout::from_size_align(SHARED_MEMORY_SIZE, 8)
.expect("Invalid SHARED_MEMORY_SIZE or alignment constants");
unsafe { std::alloc::dealloc(self.ptr, layout) };
}
}
}
impl Default for SharedWorld {
fn default() -> Self {
Self::new()
}
}