Skip to main content

aetheris_client_wasm/
shared_world.rs

1//! Lock-free double-buffered compact replication layout.
2//!
3//! This module implements the "Shared World" logic which allows the Game Worker
4//! to write authoritative updates while the Render Worker reads a stable snapshot
5//! without blocking, satisfying the zero-cost synchronization requirement of M360.
6
7use bytemuck::{Pod, Zeroable};
8use core::sync::atomic::{AtomicU64, Ordering};
9use std::collections::HashSet;
10use std::sync::{Mutex, OnceLock};
11
12static VALID_POINTERS: OnceLock<Mutex<HashSet<usize>>> = OnceLock::new();
13
14fn get_registry() -> &'static Mutex<HashSet<usize>> {
15    VALID_POINTERS.get_or_init(|| Mutex::new(HashSet::new()))
16}
17
18/// Maximum number of entities supported in the compact shared buffer.
19/// Total slots = 16,384 (8,192 per buffer).
20pub const MAX_ENTITIES: usize = 8192;
21
22/// A single replicated entity state in the compact shared buffer (48 bytes).
23/// Optimized for Void Rush (2D gameplay with 3D elevation).
24#[derive(Copy, Clone, Debug, Pod, Zeroable)]
25#[repr(C)]
26pub struct SabSlot {
27    /// Network-wide unique entity identifier.
28    pub network_id: u64, // Offset 0, size 8
29    /// World-space position X.
30    pub x: f32, // Offset 8, size 4
31    /// World-space position Y.
32    pub y: f32, // Offset 12, size 4
33    /// World-space position Z.
34    pub z: f32, // Offset 16, size 4
35    /// Orientation yaw (rotation around Z axis).
36    pub rotation: f32, // Offset 20, size 4
37    /// Velocity vector X.
38    pub dx: f32, // Offset 24, size 4
39    /// Velocity vector Y.
40    pub dy: f32, // Offset 28, size 4
41    /// Velocity vector Z.
42    pub dz: f32, // Offset 32, size 4
43    /// Current health points.
44    pub hp: u16, // Offset 36, size 2
45    /// Current shield points.
46    pub shield: u16, // Offset 38, size 2
47    /// Entity type identifier.
48    pub entity_type: u16, // Offset 40, size 2
49    /// Bitfield flags (Alive: 0, Visible: 1, `LocalPlayer`: 2, Interpolate: 3, ...).
50    pub flags: u8, // Offset 42, size 1
51    /// Mining state (0: inactive, 1: active).
52    pub mining_active: u8, // Offset 43, size 1
53    /// Current cargo count.
54    pub cargo_ore: u16, // Offset 44, size 2
55    /// Network ID of the mining target (truncated to 16-bit for Phase 1).
56    pub mining_target_id: u16, // Offset 46, size 2
57}
58
59/// The header for the `SharedArrayBuffer`.
60///
61/// `state` packs `entity_count` (high 32 bits) and `flip_bit` (low 32 bits) into a
62/// single `AtomicU64` so that readers always observe a consistent pair with a single
63/// acquire load, eliminating the TOCTOU window that existed when they were separate
64/// `AtomicU32` fields.
65#[derive(Debug)]
66#[repr(C)]
67pub struct SabHeader {
68    /// Packed atomic state: `high 32 bits = entity_count`, `low 32 bits = flip_bit` (0 or 1).
69    /// Updated with a single `Release` store in `commit_write`.
70    pub state: AtomicU64, // Offset 0
71    /// The latest server tick corresponding to the data in the active buffer.
72    pub tick: AtomicU64, // Offset 8
73    pub room_min_x: core::sync::atomic::AtomicU32, // Offset 16
74    pub room_min_y: core::sync::atomic::AtomicU32, // Offset 20
75    pub room_max_x: core::sync::atomic::AtomicU32, // Offset 24
76    pub room_max_y: core::sync::atomic::AtomicU32, // Offset 28
77    /// Seqlock counter for room bounds. Odd = write in progress; even = stable.
78    pub room_bounds_seq: core::sync::atomic::AtomicU32, // Offset 32
79    /// Sub-tick progress (0.0 to 1.0) for visual interpolation.
80    pub sub_tick_fraction: core::sync::atomic::AtomicU32, // Offset 36
81}
82
83/// Total size in bytes required for the compact replication layout.
84/// 32 bytes (Header) + 384 KiB (Buffer A) + 384 KiB (Buffer B) = 768 KiB + 32 bytes.
85/// Note: Rounded to 768 KiB in documentation, exact size is 786,464 bytes.
86pub const SHARED_MEMORY_SIZE: usize =
87    core::mem::size_of::<SabHeader>() + (core::mem::size_of::<SabSlot>() * MAX_ENTITIES * 2);
88
89/// Returns the size in bytes required for the shared world buffer.
90#[cfg(target_arch = "wasm32")]
91#[wasm_bindgen::prelude::wasm_bindgen]
92pub fn shared_world_size() -> usize {
93    SHARED_MEMORY_SIZE
94}
95
96/// A lock-free double buffer for compact entity replication.
97/// This points into a `SharedArrayBuffer` allocated by the Main Thread.
98pub struct SharedWorld {
99    ptr: *mut u8,
100    owns_memory: bool,
101}
102
103impl SharedWorld {
104    /// Initializes the `SharedWorld` from a raw memory pointer.
105    ///
106    /// # Safety
107    /// The pointer must remain valid for the lifetime of this object and must
108    /// point to a region of at least `SHARED_MEMORY_SIZE` bytes.
109    pub unsafe fn from_ptr(ptr: *mut u8) -> Self {
110        Self {
111            ptr,
112            owns_memory: false,
113        }
114    }
115
116    /// Creates a new `SharedWorld` by allocating its own memory (fallback/local use).
117    #[allow(clippy::missing_panics_doc)]
118    #[must_use]
119    pub fn new() -> Self {
120        let layout = core::alloc::Layout::from_size_align(SHARED_MEMORY_SIZE, 8)
121            .expect("Invalid SHARED_MEMORY_SIZE or alignment constants");
122        let ptr = unsafe { std::alloc::alloc_zeroed(layout) };
123
124        if ptr.is_null() {
125            std::alloc::handle_alloc_error(layout);
126        }
127
128        // Register the pointer for JS-boundary validation
129        get_registry()
130            .lock()
131            .expect("Registry mutex poisoned")
132            .insert(ptr as usize);
133
134        Self {
135            ptr,
136            owns_memory: true,
137        }
138    }
139
140    /// Validates if a raw pointer was registered by a living `SharedWorld` instance.
141    #[allow(clippy::missing_panics_doc)]
142    #[must_use]
143    pub fn is_valid(ptr: *mut u8) -> bool {
144        get_registry()
145            .lock()
146            .expect("Registry mutex poisoned")
147            .contains(&(ptr as usize))
148    }
149
150    /// Returns the raw pointer to the base of the shared world buffer.
151    #[must_use]
152    pub fn as_ptr(&self) -> *mut u8 {
153        self.ptr
154    }
155
156    #[allow(clippy::cast_ptr_alignment)]
157    fn header(&self) -> &SabHeader {
158        unsafe { &*(self.ptr.cast::<SabHeader>()) }
159    }
160
161    /// Returns the active buffer index (0 or 1).
162    #[must_use]
163    pub fn active_index(&self) -> u32 {
164        (self.header().state.load(Ordering::Acquire) & 0xFFFF_FFFF) as u32
165    }
166
167    /// Returns the entity count for the active buffer.
168    #[must_use]
169    pub fn entity_count(&self) -> u32 {
170        (self.header().state.load(Ordering::Acquire) >> 32) as u32
171    }
172
173    /// Returns the server tick for the active buffer.
174    #[must_use]
175    pub fn tick(&self) -> u64 {
176        self.header().tick.load(Ordering::Acquire)
177    }
178
179    /// Returns the sub-tick progress fraction (0.0 to 1.0).
180    #[must_use]
181    pub fn sub_tick_fraction(&self) -> f32 {
182        f32::from_bits(self.header().sub_tick_fraction.load(Ordering::Acquire))
183    }
184
185    /// Updates the sub-tick progress fraction.
186    pub fn set_sub_tick_fraction(&mut self, fraction: f32) {
187        self.header()
188            .sub_tick_fraction
189            .store(fraction.to_bits(), Ordering::Release);
190    }
191
192    /// Returns a slice of the entities in the buffer index i.
193    #[allow(clippy::cast_ptr_alignment)]
194    fn get_buffer(&self, idx: usize) -> &[SabSlot] {
195        let offset = core::mem::size_of::<SabHeader>()
196            + (idx * MAX_ENTITIES * core::mem::size_of::<SabSlot>());
197        unsafe { core::slice::from_raw_parts(self.ptr.add(offset).cast::<SabSlot>(), MAX_ENTITIES) }
198    }
199
200    /// Returns a mutable slice of the entities in the buffer index i.
201    #[allow(clippy::cast_ptr_alignment)]
202    fn get_buffer_mut(&mut self, idx: usize) -> &mut [SabSlot] {
203        let offset = core::mem::size_of::<SabHeader>()
204            + (idx * MAX_ENTITIES * core::mem::size_of::<SabSlot>());
205        unsafe {
206            core::slice::from_raw_parts_mut(self.ptr.add(offset).cast::<SabSlot>(), MAX_ENTITIES)
207        }
208    }
209
210    /// Returns the entities currently visible to readers.
211    ///
212    /// Both the active buffer index and the entity count are derived from a single
213    /// atomic load, so readers always see a consistent pair.
214    #[must_use]
215    pub fn get_read_buffer(&self) -> &[SabSlot] {
216        let state = self.header().state.load(Ordering::Acquire);
217        let active = (state & 0xFFFF_FFFF) as usize;
218        let count = ((state >> 32) as usize).min(MAX_ENTITIES);
219        &self.get_buffer(active)[..count]
220    }
221
222    /// Returns the buffer currently available for writing (inactive buffer).
223    #[must_use]
224    pub fn get_write_buffer(&mut self) -> &mut [SabSlot] {
225        let active = self.active_index() as usize;
226        let inactive = 1 - active;
227        self.get_buffer_mut(inactive)
228    }
229
230    /// Swaps the active buffer and updates the entity count and tick.
231    pub fn commit_write(&mut self, entity_count: u32, tick: u64) {
232        let active = self.active_index();
233        let next_active = 1 - active;
234
235        let packed = (u64::from(entity_count) << 32) | u64::from(next_active);
236        self.header().tick.store(tick, Ordering::Release);
237        self.header().state.store(packed, Ordering::Release);
238    }
239
240    /// Updates the room bounds using a seqlock so readers always see a consistent
241    /// rectangle. The sequence number is bumped to an odd value before writing and
242    /// back to an even value (with `Release` ordering) after, matching the acquire
243    /// fence in `get_room_bounds`.
244    pub fn set_room_bounds(&mut self, min_x: f32, min_y: f32, max_x: f32, max_y: f32) {
245        let h = self.header();
246        let seq = h.room_bounds_seq.load(Ordering::Relaxed);
247        // Mark write in progress: odd sequence number.
248        h.room_bounds_seq
249            .store(seq.wrapping_add(1), Ordering::Relaxed);
250        core::sync::atomic::fence(Ordering::Release);
251        h.room_min_x.store(min_x.to_bits(), Ordering::Relaxed);
252        h.room_min_y.store(min_y.to_bits(), Ordering::Relaxed);
253        h.room_max_x.store(max_x.to_bits(), Ordering::Relaxed);
254        h.room_max_y.store(max_y.to_bits(), Ordering::Relaxed);
255        // Mark write complete: even sequence number, visible to readers.
256        h.room_bounds_seq
257            .store(seq.wrapping_add(2), Ordering::Release);
258    }
259
260    /// Reads the room bounds, retrying if a concurrent write is detected via the
261    /// seqlock. Guaranteed to return a consistent (non-torn) rectangle.
262    #[must_use]
263    pub fn get_room_bounds(&self) -> (f32, f32, f32, f32) {
264        let h = self.header();
265        loop {
266            let seq1 = h.room_bounds_seq.load(Ordering::Acquire);
267            if seq1 & 1 != 0 {
268                // Write in progress — spin.
269                core::hint::spin_loop();
270                continue;
271            }
272            let min_x = f32::from_bits(h.room_min_x.load(Ordering::Relaxed));
273            let min_y = f32::from_bits(h.room_min_y.load(Ordering::Relaxed));
274            let max_x = f32::from_bits(h.room_max_x.load(Ordering::Relaxed));
275            let max_y = f32::from_bits(h.room_max_y.load(Ordering::Relaxed));
276            core::sync::atomic::fence(Ordering::Acquire);
277            let seq2 = h.room_bounds_seq.load(Ordering::Relaxed);
278            if seq1 == seq2 {
279                return (min_x, min_y, max_x, max_y);
280            }
281            // Torn read — retry.
282            core::hint::spin_loop();
283        }
284    }
285}
286
287impl Drop for SharedWorld {
288    #[allow(clippy::missing_panics_doc)]
289    fn drop(&mut self) {
290        if self.owns_memory {
291            if let Ok(mut reg) = get_registry().lock() {
292                reg.remove(&(self.ptr as usize));
293            }
294
295            let layout = core::alloc::Layout::from_size_align(SHARED_MEMORY_SIZE, 8)
296                .expect("Invalid SHARED_MEMORY_SIZE or alignment constants");
297
298            unsafe { std::alloc::dealloc(self.ptr, layout) };
299        }
300    }
301}
302
303impl Default for SharedWorld {
304    fn default() -> Self {
305        Self::new()
306    }
307}