Skip to main content

vyre_foundation/runtime/
memory_model.rs

1//! Substrate-neutral memory model contracts.
2
3/// Memory ordering attached to atomic and barrier operations.
4#[non_exhaustive]
5#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash, serde::Deserialize, serde::Serialize)]
6pub enum MemoryOrdering {
7    /// No synchronization beyond atomicity of the operation.
8    Relaxed,
9    /// Subsequent reads observe writes released by another participant.
10    Acquire,
11    /// Prior writes become visible to acquiring participants.
12    Release,
13    /// Acquire and release semantics in one operation.
14    AcqRel,
15    /// Single total order across sequentially consistent operations
16    /// within the issuing thread's workgroup.
17    SeqCst,
18    /// Cross-grid synchronization. Every thread in the dispatch waits
19    /// here, and every prior write is globally visible after the
20    /// barrier returns. This is strictly stronger than `SeqCst`, which
21    /// only synchronizes within a workgroup. `GridSync` is required
22    /// when a fused kernel has an arm with divergent stores
23    /// (e.g. `if invocation_id == K { store ... }`) followed by an arm
24    /// that reads what was stored — without grid-level sync, threads
25    /// in non-K blocks observe stale state. Backends that lack a
26    /// native grid barrier (workgroup-only fences, no cooperative
27    /// launch) must lower this to a kernel-split: emit two separate
28    /// dispatches that share the underlying buffers.
29    GridSync,
30}
31
32impl MemoryOrdering {
33    /// Stable wire tag for this ordering.
34    #[must_use]
35    #[inline]
36    pub const fn wire_tag(self) -> u8 {
37        match self {
38            Self::Relaxed => 0,
39            Self::Acquire => 1,
40            Self::Release => 2,
41            Self::AcqRel => 3,
42            Self::SeqCst => 4,
43            Self::GridSync => 5,
44        }
45    }
46
47    /// Decode a stable wire tag.
48    ///
49    /// # Errors
50    ///
51    /// Returns an actionable error when `tag` is not assigned to a memory
52    /// ordering in this schema.
53    #[inline]
54    pub fn from_wire_tag(tag: u8) -> Result<Self, String> {
55        match tag {
56            0 => Ok(Self::Relaxed),
57            1 => Ok(Self::Acquire),
58            2 => Ok(Self::Release),
59            3 => Ok(Self::AcqRel),
60            4 => Ok(Self::SeqCst),
61            5 => Ok(Self::GridSync),
62            other => Err(format!(
63                "InvalidDiscriminant: memory ordering tag {other} is unknown. Fix: reserialize with a compatible VYRE wire schema."
64            )),
65        }
66    }
67
68    /// Whether this ordering is valid for an atomic RMW operation.
69    /// `GridSync` is barrier-only and not a valid atomic ordering.
70    #[must_use]
71    #[inline]
72    pub const fn is_valid_for_atomic_rmw(self) -> bool {
73        matches!(
74            self,
75            Self::Relaxed | Self::Acquire | Self::Release | Self::AcqRel | Self::SeqCst
76        )
77    }
78
79    /// Whether this ordering is valid for a barrier.
80    #[must_use]
81    #[inline]
82    pub const fn is_valid_for_barrier(self) -> bool {
83        matches!(
84            self,
85            Self::Acquire | Self::Release | Self::AcqRel | Self::SeqCst | Self::GridSync
86        )
87    }
88
89    /// Whether this ordering requires cross-grid synchronization.
90    /// Backends with a native grid barrier emit one instruction; backends
91    /// without must split the kernel.
92    #[must_use]
93    #[inline]
94    pub const fn requires_grid_sync(self) -> bool {
95        matches!(self, Self::GridSync)
96    }
97}
98
99impl Default for MemoryOrdering {
100    #[inline]
101    fn default() -> Self {
102        Self::SeqCst
103    }
104}
105
106#[cfg(test)]
107mod tests {
108    use super::*;
109
110    #[test]
111    fn default_is_seq_cst() {
112        assert_eq!(MemoryOrdering::default(), MemoryOrdering::SeqCst);
113    }
114
115    #[test]
116    fn all_variants_are_distinct() {
117        let variants = [
118            MemoryOrdering::Relaxed,
119            MemoryOrdering::Acquire,
120            MemoryOrdering::Release,
121            MemoryOrdering::AcqRel,
122            MemoryOrdering::SeqCst,
123            MemoryOrdering::GridSync,
124        ];
125        for i in 0..variants.len() {
126            for j in (i + 1)..variants.len() {
127                assert_ne!(variants[i], variants[j]);
128            }
129        }
130    }
131
132    #[test]
133    fn grid_sync_round_trips() {
134        let tag = MemoryOrdering::GridSync.wire_tag();
135        assert_eq!(tag, 5);
136        assert_eq!(
137            MemoryOrdering::from_wire_tag(tag).unwrap(),
138            MemoryOrdering::GridSync
139        );
140        assert!(MemoryOrdering::GridSync.is_valid_for_barrier());
141        assert!(!MemoryOrdering::GridSync.is_valid_for_atomic_rmw());
142        assert!(MemoryOrdering::GridSync.requires_grid_sync());
143        assert!(!MemoryOrdering::SeqCst.requires_grid_sync());
144    }
145
146    #[test]
147    fn clone_eq() {
148        let a = MemoryOrdering::AcqRel;
149        let b = a;
150        assert_eq!(a, b);
151    }
152}