Skip to main content

aetheris_client_wasm/
shared_world.rs

1//! Lock-free double-buffered compact replication layout.
2//!
3//! This module implements the "Shared World" logic which allows the Game Worker
4//! to write authoritative updates while the Render Worker reads a stable snapshot
5//! without blocking, satisfying the zero-cost synchronization requirement of M360.
6
7use bytemuck::{Pod, Zeroable};
8use core::sync::atomic::{AtomicU64, Ordering};
9use std::collections::HashSet;
10use std::sync::{Mutex, OnceLock};
11
12static VALID_POINTERS: OnceLock<Mutex<HashSet<usize>>> = OnceLock::new();
13
14fn get_registry() -> &'static Mutex<HashSet<usize>> {
15    VALID_POINTERS.get_or_init(|| Mutex::new(HashSet::new()))
16}
17
18/// Maximum number of entities supported in the compact shared buffer.
19/// Total slots = 16,384 (8,192 per buffer).
20pub const MAX_ENTITIES: usize = 8192;
21
22/// A single replicated entity state in the compact shared buffer (48 bytes).
23/// Optimized for Void Rush (2D gameplay with 3D elevation).
24#[derive(Copy, Clone, Debug, Pod, Zeroable)]
25#[repr(C)]
26pub struct SabSlot {
27    /// Network-wide unique entity identifier.
28    pub network_id: u64, // Offset 0, size 8
29    /// World-space position X.
30    pub x: f32, // Offset 8, size 4
31    /// World-space position Y.
32    pub y: f32, // Offset 12, size 4
33    /// World-space position Z.
34    pub z: f32, // Offset 16, size 4
35    /// Orientation yaw (rotation around Z axis).
36    pub rotation: f32, // Offset 20, size 4
37    /// Velocity vector X.
38    pub dx: f32, // Offset 24, size 4
39    /// Velocity vector Y.
40    pub dy: f32, // Offset 28, size 4
41    /// Velocity vector Z.
42    pub dz: f32, // Offset 32, size 4
43    /// Current health points.
44    pub hp: u16, // Offset 36, size 2
45    /// Current shield points.
46    pub shield: u16, // Offset 38, size 2
47    /// Entity type identifier.
48    pub entity_type: u16, // Offset 40, size 2
49    /// Bitfield flags (Alive: 0, Visible: 1, `LocalPlayer`: 2, Interpolate: 3, ...).
50    pub flags: u8, // Offset 42, size 1
51    /// Padding for 48-byte total size and alignment.
52    pub padding: [u8; 5], // Offset 43, size 5
53}
54
55/// The header for the `SharedArrayBuffer`.
56///
57/// `state` packs `entity_count` (high 32 bits) and `flip_bit` (low 32 bits) into a
58/// single `AtomicU64` so that readers always observe a consistent pair with a single
59/// acquire load, eliminating the TOCTOU window that existed when they were separate
60/// `AtomicU32` fields.
61#[derive(Debug)]
62#[repr(C)]
63pub struct SabHeader {
64    /// Packed atomic state: `high 32 bits = entity_count`, `low 32 bits = flip_bit` (0 or 1).
65    /// Updated with a single `Release` store in `commit_write`.
66    pub state: AtomicU64, // Offset 0
67    /// The latest server tick corresponding to the data in the active buffer.
68    pub tick: AtomicU64, // Offset 8
69}
70
71/// Total size in bytes required for the compact replication layout.
72/// 16 bytes (Header) + 384 KiB (Buffer A) + 384 KiB (Buffer B) = 768 KiB + 16 bytes.
73/// Note: Rounded to 768 KiB in documentation, exact size is 786,448 bytes.
74pub const SHARED_MEMORY_SIZE: usize =
75    core::mem::size_of::<SabHeader>() + (core::mem::size_of::<SabSlot>() * MAX_ENTITIES * 2);
76
77/// Returns the size in bytes required for the shared world buffer.
78#[cfg(target_arch = "wasm32")]
79#[wasm_bindgen::prelude::wasm_bindgen]
80pub fn shared_world_size() -> usize {
81    SHARED_MEMORY_SIZE
82}
83
84/// A lock-free double buffer for compact entity replication.
85/// This points into a `SharedArrayBuffer` allocated by the Main Thread.
86pub struct SharedWorld {
87    ptr: *mut u8,
88    owns_memory: bool,
89}
90
91impl SharedWorld {
92    /// Initializes the `SharedWorld` from a raw memory pointer.
93    ///
94    /// # Safety
95    /// The pointer must remain valid for the lifetime of this object and must
96    /// point to a region of at least `SHARED_MEMORY_SIZE` bytes.
97    pub unsafe fn from_ptr(ptr: *mut u8) -> Self {
98        Self {
99            ptr,
100            owns_memory: false,
101        }
102    }
103
104    /// Creates a new `SharedWorld` by allocating its own memory (fallback/local use).
105    #[allow(clippy::missing_panics_doc)]
106    #[must_use]
107    pub fn new() -> Self {
108        let layout = core::alloc::Layout::from_size_align(SHARED_MEMORY_SIZE, 8)
109            .expect("Invalid SHARED_MEMORY_SIZE or alignment constants");
110        let ptr = unsafe { std::alloc::alloc_zeroed(layout) };
111
112        if ptr.is_null() {
113            std::alloc::handle_alloc_error(layout);
114        }
115
116        // Register the pointer for JS-boundary validation
117        get_registry()
118            .lock()
119            .expect("Registry mutex poisoned")
120            .insert(ptr as usize);
121
122        Self {
123            ptr,
124            owns_memory: true,
125        }
126    }
127
128    /// Validates if a raw pointer was registered by a living `SharedWorld` instance.
129    #[allow(clippy::missing_panics_doc)]
130    #[must_use]
131    pub fn is_valid(ptr: *mut u8) -> bool {
132        get_registry()
133            .lock()
134            .expect("Registry mutex poisoned")
135            .contains(&(ptr as usize))
136    }
137
138    /// Returns the raw pointer to the base of the shared world buffer.
139    #[must_use]
140    pub fn as_ptr(&self) -> *mut u8 {
141        self.ptr
142    }
143
144    #[allow(clippy::cast_ptr_alignment)]
145    fn header(&self) -> &SabHeader {
146        unsafe { &*(self.ptr.cast::<SabHeader>()) }
147    }
148
149    /// Returns the active buffer index (0 or 1).
150    #[must_use]
151    pub fn active_index(&self) -> u32 {
152        (self.header().state.load(Ordering::Acquire) & 0xFFFF_FFFF) as u32
153    }
154
155    /// Returns the entity count for the active buffer.
156    #[must_use]
157    pub fn entity_count(&self) -> u32 {
158        (self.header().state.load(Ordering::Acquire) >> 32) as u32
159    }
160
161    /// Returns the server tick for the active buffer.
162    #[must_use]
163    pub fn tick(&self) -> u64 {
164        self.header().tick.load(Ordering::Acquire)
165    }
166
167    /// Returns a slice of the entities in the buffer index i.
168    #[allow(clippy::cast_ptr_alignment)]
169    fn get_buffer(&self, idx: usize) -> &[SabSlot] {
170        let offset = core::mem::size_of::<SabHeader>()
171            + (idx * MAX_ENTITIES * core::mem::size_of::<SabSlot>());
172        unsafe { core::slice::from_raw_parts(self.ptr.add(offset).cast::<SabSlot>(), MAX_ENTITIES) }
173    }
174
175    /// Returns a mutable slice of the entities in the buffer index i.
176    #[allow(clippy::cast_ptr_alignment)]
177    fn get_buffer_mut(&mut self, idx: usize) -> &mut [SabSlot] {
178        let offset = core::mem::size_of::<SabHeader>()
179            + (idx * MAX_ENTITIES * core::mem::size_of::<SabSlot>());
180        unsafe {
181            core::slice::from_raw_parts_mut(self.ptr.add(offset).cast::<SabSlot>(), MAX_ENTITIES)
182        }
183    }
184
185    /// Returns the entities currently visible to readers.
186    ///
187    /// Both the active buffer index and the entity count are derived from a single
188    /// atomic load, so readers always see a consistent pair.
189    #[must_use]
190    pub fn get_read_buffer(&self) -> &[SabSlot] {
191        let state = self.header().state.load(Ordering::Acquire);
192        let active = (state & 0xFFFF_FFFF) as usize;
193        let count = ((state >> 32) as usize).min(MAX_ENTITIES);
194        &self.get_buffer(active)[..count]
195    }
196
197    /// Returns the buffer currently available for writing (inactive buffer).
198    #[must_use]
199    pub fn get_write_buffer(&mut self) -> &mut [SabSlot] {
200        let active = self.active_index() as usize;
201        let inactive = 1 - active;
202        self.get_buffer_mut(inactive)
203    }
204
205    /// Swaps the active buffer and updates the entity count and tick.
206    pub fn commit_write(&mut self, entity_count: u32, tick: u64) {
207        let active = self.active_index();
208        let next_active = 1 - active;
209
210        // Store tick first; readers only use it for display, not for buffer selection.
211        self.header().tick.store(tick, Ordering::Release);
212
213        // Pack entity_count (high 32 bits) and next_active flip_bit (low 32 bits) into
214        // a single u64 and publish with one Release store. This guarantees that any
215        // reader that observes the new flip_bit also observes the matching entity_count,
216        // eliminating the TOCTOU window of the previous three-store sequence.
217        let packed = (u64::from(entity_count) << 32) | u64::from(next_active);
218        self.header().state.store(packed, Ordering::Release);
219    }
220}
221
222impl Drop for SharedWorld {
223    #[allow(clippy::missing_panics_doc)]
224    fn drop(&mut self) {
225        if self.owns_memory {
226            if let Ok(mut reg) = get_registry().lock() {
227                reg.remove(&(self.ptr as usize));
228            }
229
230            let layout = core::alloc::Layout::from_size_align(SHARED_MEMORY_SIZE, 8)
231                .expect("Invalid SHARED_MEMORY_SIZE or alignment constants");
232
233            unsafe { std::alloc::dealloc(self.ptr, layout) };
234        }
235    }
236}
237
238impl Default for SharedWorld {
239    fn default() -> Self {
240        Self::new()
241    }
242}