aetheris_client_wasm/shared_world.rs
1//! Lock-free double-buffered compact replication layout.
2//!
3//! This module implements the "Shared World" logic which allows the Game Worker
4//! to write authoritative updates while the Render Worker reads a stable snapshot
5//! without blocking, satisfying the zero-cost synchronization requirement of M360.
6
7use bytemuck::{Pod, Zeroable};
8use core::sync::atomic::{AtomicU64, Ordering};
9use std::collections::HashSet;
10use std::sync::{Mutex, OnceLock};
11
12static VALID_POINTERS: OnceLock<Mutex<HashSet<usize>>> = OnceLock::new();
13
14fn get_registry() -> &'static Mutex<HashSet<usize>> {
15 VALID_POINTERS.get_or_init(|| Mutex::new(HashSet::new()))
16}
17
18/// Maximum number of entities supported in the compact shared buffer.
19/// Total slots = 16,384 (8,192 per buffer).
20pub const MAX_ENTITIES: usize = 8192;
21
22/// A single replicated entity state in the compact shared buffer (48 bytes).
23/// Optimized for Void Rush (2D gameplay with 3D elevation).
24#[derive(Copy, Clone, Debug, Pod, Zeroable)]
25#[repr(C)]
26pub struct SabSlot {
27 /// Network-wide unique entity identifier.
28 pub network_id: u64, // Offset 0, size 8
29 /// World-space position X.
30 pub x: f32, // Offset 8, size 4
31 /// World-space position Y.
32 pub y: f32, // Offset 12, size 4
33 /// World-space position Z.
34 pub z: f32, // Offset 16, size 4
35 /// Orientation yaw (rotation around Z axis).
36 pub rotation: f32, // Offset 20, size 4
37 /// Velocity vector X.
38 pub dx: f32, // Offset 24, size 4
39 /// Velocity vector Y.
40 pub dy: f32, // Offset 28, size 4
41 /// Velocity vector Z.
42 pub dz: f32, // Offset 32, size 4
43 /// Current health points.
44 pub hp: u16, // Offset 36, size 2
45 /// Current shield points.
46 pub shield: u16, // Offset 38, size 2
47 /// Entity type identifier.
48 pub entity_type: u16, // Offset 40, size 2
49 /// Bitfield flags (Alive: 0, Visible: 1, `LocalPlayer`: 2, Interpolate: 3, ...).
50 pub flags: u8, // Offset 42, size 1
51 /// Mining state (0: inactive, 1: active).
52 pub mining_active: u8, // Offset 43, size 1
53 /// Current cargo count.
54 pub cargo_ore: u16, // Offset 44, size 2
55 /// Network ID of the mining target (truncated to 16-bit for Phase 1).
56 pub mining_target_id: u16, // Offset 46, size 2
57}
58
59/// The header for the `SharedArrayBuffer`.
60///
61/// `state` packs `entity_count` (high 32 bits) and `flip_bit` (low 32 bits) into a
62/// single `AtomicU64` so that readers always observe a consistent pair with a single
63/// acquire load, eliminating the TOCTOU window that existed when they were separate
64/// `AtomicU32` fields.
65#[derive(Debug)]
66#[repr(C)]
67pub struct SabHeader {
68 /// Packed atomic state: `high 32 bits = entity_count`, `low 32 bits = flip_bit` (0 or 1).
69 /// Updated with a single `Release` store in `commit_write`.
70 pub state: AtomicU64, // Offset 0
71 /// The latest server tick corresponding to the data in the active buffer.
72 pub tick: AtomicU64, // Offset 8
73 pub room_min_x: core::sync::atomic::AtomicU32, // Offset 16
74 pub room_min_y: core::sync::atomic::AtomicU32, // Offset 20
75 pub room_max_x: core::sync::atomic::AtomicU32, // Offset 24
76 pub room_max_y: core::sync::atomic::AtomicU32, // Offset 28
77 /// Seqlock counter for room bounds. Odd = write in progress; even = stable.
78 /// Writer bumps to odd before writing the four bounds fields, then to even (Release)
79 /// after. Readers spin until they observe two equal even values around their reads.
80 pub room_bounds_seq: core::sync::atomic::AtomicU32, // Offset 32
81 /// Alignment padding to 40 bytes.
82 pub pad: core::sync::atomic::AtomicU32, // Offset 36
83}
84
85/// Total size in bytes required for the compact replication layout.
86/// 32 bytes (Header) + 384 KiB (Buffer A) + 384 KiB (Buffer B) = 768 KiB + 32 bytes.
87/// Note: Rounded to 768 KiB in documentation, exact size is 786,464 bytes.
88pub const SHARED_MEMORY_SIZE: usize =
89 core::mem::size_of::<SabHeader>() + (core::mem::size_of::<SabSlot>() * MAX_ENTITIES * 2);
90
91/// Returns the size in bytes required for the shared world buffer.
92#[cfg(target_arch = "wasm32")]
93#[wasm_bindgen::prelude::wasm_bindgen]
94pub fn shared_world_size() -> usize {
95 SHARED_MEMORY_SIZE
96}
97
98/// A lock-free double buffer for compact entity replication.
99/// This points into a `SharedArrayBuffer` allocated by the Main Thread.
100pub struct SharedWorld {
101 ptr: *mut u8,
102 owns_memory: bool,
103}
104
105impl SharedWorld {
106 /// Initializes the `SharedWorld` from a raw memory pointer.
107 ///
108 /// # Safety
109 /// The pointer must remain valid for the lifetime of this object and must
110 /// point to a region of at least `SHARED_MEMORY_SIZE` bytes.
111 pub unsafe fn from_ptr(ptr: *mut u8) -> Self {
112 Self {
113 ptr,
114 owns_memory: false,
115 }
116 }
117
118 /// Creates a new `SharedWorld` by allocating its own memory (fallback/local use).
119 #[allow(clippy::missing_panics_doc)]
120 #[must_use]
121 pub fn new() -> Self {
122 let layout = core::alloc::Layout::from_size_align(SHARED_MEMORY_SIZE, 8)
123 .expect("Invalid SHARED_MEMORY_SIZE or alignment constants");
124 let ptr = unsafe { std::alloc::alloc_zeroed(layout) };
125
126 if ptr.is_null() {
127 std::alloc::handle_alloc_error(layout);
128 }
129
130 // Register the pointer for JS-boundary validation
131 get_registry()
132 .lock()
133 .expect("Registry mutex poisoned")
134 .insert(ptr as usize);
135
136 Self {
137 ptr,
138 owns_memory: true,
139 }
140 }
141
142 /// Validates if a raw pointer was registered by a living `SharedWorld` instance.
143 #[allow(clippy::missing_panics_doc)]
144 #[must_use]
145 pub fn is_valid(ptr: *mut u8) -> bool {
146 get_registry()
147 .lock()
148 .expect("Registry mutex poisoned")
149 .contains(&(ptr as usize))
150 }
151
152 /// Returns the raw pointer to the base of the shared world buffer.
153 #[must_use]
154 pub fn as_ptr(&self) -> *mut u8 {
155 self.ptr
156 }
157
158 #[allow(clippy::cast_ptr_alignment)]
159 fn header(&self) -> &SabHeader {
160 unsafe { &*(self.ptr.cast::<SabHeader>()) }
161 }
162
163 /// Returns the active buffer index (0 or 1).
164 #[must_use]
165 pub fn active_index(&self) -> u32 {
166 (self.header().state.load(Ordering::Acquire) & 0xFFFF_FFFF) as u32
167 }
168
169 /// Returns the entity count for the active buffer.
170 #[must_use]
171 pub fn entity_count(&self) -> u32 {
172 (self.header().state.load(Ordering::Acquire) >> 32) as u32
173 }
174
175 /// Returns the server tick for the active buffer.
176 #[must_use]
177 pub fn tick(&self) -> u64 {
178 self.header().tick.load(Ordering::Acquire)
179 }
180
181 /// Returns a slice of the entities in the buffer index i.
182 #[allow(clippy::cast_ptr_alignment)]
183 fn get_buffer(&self, idx: usize) -> &[SabSlot] {
184 let offset = core::mem::size_of::<SabHeader>()
185 + (idx * MAX_ENTITIES * core::mem::size_of::<SabSlot>());
186 unsafe { core::slice::from_raw_parts(self.ptr.add(offset).cast::<SabSlot>(), MAX_ENTITIES) }
187 }
188
189 /// Returns a mutable slice of the entities in the buffer index i.
190 #[allow(clippy::cast_ptr_alignment)]
191 fn get_buffer_mut(&mut self, idx: usize) -> &mut [SabSlot] {
192 let offset = core::mem::size_of::<SabHeader>()
193 + (idx * MAX_ENTITIES * core::mem::size_of::<SabSlot>());
194 unsafe {
195 core::slice::from_raw_parts_mut(self.ptr.add(offset).cast::<SabSlot>(), MAX_ENTITIES)
196 }
197 }
198
199 /// Returns the entities currently visible to readers.
200 ///
201 /// Both the active buffer index and the entity count are derived from a single
202 /// atomic load, so readers always see a consistent pair.
203 #[must_use]
204 pub fn get_read_buffer(&self) -> &[SabSlot] {
205 let state = self.header().state.load(Ordering::Acquire);
206 let active = (state & 0xFFFF_FFFF) as usize;
207 let count = ((state >> 32) as usize).min(MAX_ENTITIES);
208 &self.get_buffer(active)[..count]
209 }
210
211 /// Returns the buffer currently available for writing (inactive buffer).
212 #[must_use]
213 pub fn get_write_buffer(&mut self) -> &mut [SabSlot] {
214 let active = self.active_index() as usize;
215 let inactive = 1 - active;
216 self.get_buffer_mut(inactive)
217 }
218
219 /// Swaps the active buffer and updates the entity count and tick.
220 pub fn commit_write(&mut self, entity_count: u32, tick: u64) {
221 let active = self.active_index();
222 let next_active = 1 - active;
223
224 // Store tick first; readers only use it for display, not for buffer selection.
225 self.header().tick.store(tick, Ordering::Release);
226
227 // Pack entity_count (high 32 bits) and next_active flip_bit (low 32 bits) into
228 // a single u64 and publish with one Release store. This guarantees that any
229 // reader that observes the new flip_bit also observes the matching entity_count,
230 // eliminating the TOCTOU window of the previous three-store sequence.
231 let packed = (u64::from(entity_count) << 32) | u64::from(next_active);
232 self.header().state.store(packed, Ordering::Release);
233 }
234
235 /// Updates the room bounds using a seqlock so readers always see a consistent
236 /// rectangle. The sequence number is bumped to an odd value before writing and
237 /// back to an even value (with `Release` ordering) after, matching the acquire
238 /// fence in `get_room_bounds`.
239 pub fn set_room_bounds(&mut self, min_x: f32, min_y: f32, max_x: f32, max_y: f32) {
240 let h = self.header();
241 let seq = h.room_bounds_seq.load(Ordering::Relaxed);
242 // Mark write in progress: odd sequence number.
243 h.room_bounds_seq
244 .store(seq.wrapping_add(1), Ordering::Relaxed);
245 core::sync::atomic::fence(Ordering::Release);
246 h.room_min_x.store(min_x.to_bits(), Ordering::Relaxed);
247 h.room_min_y.store(min_y.to_bits(), Ordering::Relaxed);
248 h.room_max_x.store(max_x.to_bits(), Ordering::Relaxed);
249 h.room_max_y.store(max_y.to_bits(), Ordering::Relaxed);
250 // Mark write complete: even sequence number, visible to readers.
251 h.room_bounds_seq
252 .store(seq.wrapping_add(2), Ordering::Release);
253 }
254
255 /// Reads the room bounds, retrying if a concurrent write is detected via the
256 /// seqlock. Guaranteed to return a consistent (non-torn) rectangle.
257 #[must_use]
258 pub fn get_room_bounds(&self) -> (f32, f32, f32, f32) {
259 let h = self.header();
260 loop {
261 let seq1 = h.room_bounds_seq.load(Ordering::Acquire);
262 if seq1 & 1 != 0 {
263 // Write in progress — spin.
264 core::hint::spin_loop();
265 continue;
266 }
267 let min_x = f32::from_bits(h.room_min_x.load(Ordering::Relaxed));
268 let min_y = f32::from_bits(h.room_min_y.load(Ordering::Relaxed));
269 let max_x = f32::from_bits(h.room_max_x.load(Ordering::Relaxed));
270 let max_y = f32::from_bits(h.room_max_y.load(Ordering::Relaxed));
271 core::sync::atomic::fence(Ordering::Acquire);
272 let seq2 = h.room_bounds_seq.load(Ordering::Relaxed);
273 if seq1 == seq2 {
274 return (min_x, min_y, max_x, max_y);
275 }
276 // Torn read — retry.
277 core::hint::spin_loop();
278 }
279 }
280}
281
282impl Drop for SharedWorld {
283 #[allow(clippy::missing_panics_doc)]
284 fn drop(&mut self) {
285 if self.owns_memory {
286 if let Ok(mut reg) = get_registry().lock() {
287 reg.remove(&(self.ptr as usize));
288 }
289
290 let layout = core::alloc::Layout::from_size_align(SHARED_MEMORY_SIZE, 8)
291 .expect("Invalid SHARED_MEMORY_SIZE or alignment constants");
292
293 unsafe { std::alloc::dealloc(self.ptr, layout) };
294 }
295 }
296}
297
298impl Default for SharedWorld {
299 fn default() -> Self {
300 Self::new()
301 }
302}