aetheris_client_wasm/
shared_world.rs1use bytemuck::{Pod, Zeroable};
8use core::sync::atomic::{AtomicU64, Ordering};
9use std::collections::HashSet;
10use std::sync::{Mutex, OnceLock};
11
12static VALID_POINTERS: OnceLock<Mutex<HashSet<usize>>> = OnceLock::new();
13
14fn get_registry() -> &'static Mutex<HashSet<usize>> {
15 VALID_POINTERS.get_or_init(|| Mutex::new(HashSet::new()))
16}
17
18pub const MAX_ENTITIES: usize = 8192;
21
22#[derive(Copy, Clone, Debug, Pod, Zeroable)]
25#[repr(C)]
26pub struct SabSlot {
27 pub network_id: u64, pub x: f32, pub y: f32, pub z: f32, pub rotation: f32, pub dx: f32, pub dy: f32, pub dz: f32, pub hp: u16, pub shield: u16, pub entity_type: u16, pub flags: u8, pub mining_active: u8, pub cargo_ore: u16, pub cargo_capacity: u16, pub mining_target_id: u16, pub padding: [u8; 6], }
62
63#[derive(Debug)]
70#[repr(C)]
71pub struct SabHeader {
72 pub state: AtomicU64, pub tick: AtomicU64, pub room_min_x: core::sync::atomic::AtomicU32, pub room_min_y: core::sync::atomic::AtomicU32, pub room_max_x: core::sync::atomic::AtomicU32, pub room_max_y: core::sync::atomic::AtomicU32, pub room_bounds_seq: core::sync::atomic::AtomicU32, pub sub_tick_fraction: core::sync::atomic::AtomicU32, }
86
87pub const SHARED_MEMORY_SIZE: usize =
91 core::mem::size_of::<SabHeader>() + (core::mem::size_of::<SabSlot>() * MAX_ENTITIES * 2);
92
93#[cfg(target_arch = "wasm32")]
95#[wasm_bindgen::prelude::wasm_bindgen]
96pub fn shared_world_size() -> usize {
97 SHARED_MEMORY_SIZE
98}
99
100pub struct SharedWorld {
103 ptr: *mut u8,
104 owns_memory: bool,
105}
106
107impl SharedWorld {
108 pub unsafe fn from_ptr(ptr: *mut u8) -> Self {
114 Self {
115 ptr,
116 owns_memory: false,
117 }
118 }
119
120 #[allow(clippy::missing_panics_doc)]
122 #[must_use]
123 pub fn new() -> Self {
124 let layout = core::alloc::Layout::from_size_align(SHARED_MEMORY_SIZE, 8)
125 .expect("Invalid SHARED_MEMORY_SIZE or alignment constants");
126 let ptr = unsafe { std::alloc::alloc_zeroed(layout) };
127
128 if ptr.is_null() {
129 std::alloc::handle_alloc_error(layout);
130 }
131
132 get_registry()
134 .lock()
135 .expect("Registry mutex poisoned")
136 .insert(ptr as usize);
137
138 Self {
139 ptr,
140 owns_memory: true,
141 }
142 }
143
144 #[allow(clippy::missing_panics_doc)]
146 #[must_use]
147 pub fn is_valid(ptr: *mut u8) -> bool {
148 get_registry()
149 .lock()
150 .expect("Registry mutex poisoned")
151 .contains(&(ptr as usize))
152 }
153
154 #[must_use]
156 pub fn as_ptr(&self) -> *mut u8 {
157 self.ptr
158 }
159
160 #[allow(clippy::cast_ptr_alignment)]
161 fn header(&self) -> &SabHeader {
162 unsafe { &*(self.ptr.cast::<SabHeader>()) }
163 }
164
165 #[must_use]
167 pub fn active_index(&self) -> u32 {
168 (self.header().state.load(Ordering::Acquire) & 0xFFFF_FFFF) as u32
169 }
170
171 #[must_use]
173 pub fn entity_count(&self) -> u32 {
174 (self.header().state.load(Ordering::Acquire) >> 32) as u32
175 }
176
177 #[must_use]
179 pub fn tick(&self) -> u64 {
180 self.header().tick.load(Ordering::Acquire)
181 }
182
183 #[must_use]
185 pub fn sub_tick_fraction(&self) -> f32 {
186 f32::from_bits(self.header().sub_tick_fraction.load(Ordering::Acquire))
187 }
188
189 pub fn set_sub_tick_fraction(&mut self, fraction: f32) {
191 self.header()
192 .sub_tick_fraction
193 .store(fraction.to_bits(), Ordering::Release);
194 }
195
196 #[allow(clippy::cast_ptr_alignment)]
198 fn get_buffer(&self, idx: usize) -> &[SabSlot] {
199 let offset = core::mem::size_of::<SabHeader>()
200 + (idx * MAX_ENTITIES * core::mem::size_of::<SabSlot>());
201 unsafe { core::slice::from_raw_parts(self.ptr.add(offset).cast::<SabSlot>(), MAX_ENTITIES) }
202 }
203
204 #[allow(clippy::cast_ptr_alignment)]
206 fn get_buffer_mut(&mut self, idx: usize) -> &mut [SabSlot] {
207 let offset = core::mem::size_of::<SabHeader>()
208 + (idx * MAX_ENTITIES * core::mem::size_of::<SabSlot>());
209 unsafe {
210 core::slice::from_raw_parts_mut(self.ptr.add(offset).cast::<SabSlot>(), MAX_ENTITIES)
211 }
212 }
213
214 #[must_use]
219 pub fn get_read_buffer(&self) -> &[SabSlot] {
220 let state = self.header().state.load(Ordering::Acquire);
221 let active = (state & 0xFFFF_FFFF) as usize;
222 let count = ((state >> 32) as usize).min(MAX_ENTITIES);
223 &self.get_buffer(active)[..count]
224 }
225
226 #[must_use]
228 pub fn get_write_buffer(&mut self) -> &mut [SabSlot] {
229 let active = self.active_index() as usize;
230 let inactive = 1 - active;
231 self.get_buffer_mut(inactive)
232 }
233
234 pub fn commit_write(&mut self, entity_count: u32, tick: u64) {
236 let active = self.active_index();
237 let next_active = 1 - active;
238
239 let packed = (u64::from(entity_count) << 32) | u64::from(next_active);
240 self.header().tick.store(tick, Ordering::Release);
241 self.header().state.store(packed, Ordering::Release);
242 }
243
244 pub fn set_room_bounds(&mut self, min_x: f32, min_y: f32, max_x: f32, max_y: f32) {
249 let h = self.header();
250 let seq = h.room_bounds_seq.load(Ordering::Relaxed);
251 h.room_bounds_seq
253 .store(seq.wrapping_add(1), Ordering::Relaxed);
254 core::sync::atomic::fence(Ordering::Release);
255 h.room_min_x.store(min_x.to_bits(), Ordering::Relaxed);
256 h.room_min_y.store(min_y.to_bits(), Ordering::Relaxed);
257 h.room_max_x.store(max_x.to_bits(), Ordering::Relaxed);
258 h.room_max_y.store(max_y.to_bits(), Ordering::Relaxed);
259 h.room_bounds_seq
261 .store(seq.wrapping_add(2), Ordering::Release);
262 }
263
264 #[must_use]
267 pub fn get_room_bounds(&self) -> (f32, f32, f32, f32) {
268 let h = self.header();
269 loop {
270 let seq1 = h.room_bounds_seq.load(Ordering::Acquire);
271 if seq1 & 1 != 0 {
272 core::hint::spin_loop();
274 continue;
275 }
276 let min_x = f32::from_bits(h.room_min_x.load(Ordering::Relaxed));
277 let min_y = f32::from_bits(h.room_min_y.load(Ordering::Relaxed));
278 let max_x = f32::from_bits(h.room_max_x.load(Ordering::Relaxed));
279 let max_y = f32::from_bits(h.room_max_y.load(Ordering::Relaxed));
280 core::sync::atomic::fence(Ordering::Acquire);
281 let seq2 = h.room_bounds_seq.load(Ordering::Relaxed);
282 if seq1 == seq2 {
283 return (min_x, min_y, max_x, max_y);
284 }
285 core::hint::spin_loop();
287 }
288 }
289}
290
291impl Drop for SharedWorld {
292 #[allow(clippy::missing_panics_doc)]
293 fn drop(&mut self) {
294 if self.owns_memory {
295 if let Ok(mut reg) = get_registry().lock() {
296 reg.remove(&(self.ptr as usize));
297 }
298
299 let layout = core::alloc::Layout::from_size_align(SHARED_MEMORY_SIZE, 8)
300 .expect("Invalid SHARED_MEMORY_SIZE or alignment constants");
301
302 unsafe { std::alloc::dealloc(self.ptr, layout) };
303 }
304 }
305}
306
307impl Default for SharedWorld {
308 fn default() -> Self {
309 Self::new()
310 }
311}