hopper_runtime/segment.rs
1//! Runtime-local segment primitive.
2//!
3//! `Segment` is the tiny memory-contract descriptor that every segment
4//! access routes through: `{offset, size}`, 8 bytes on 32-bit accounts,
5//! `Copy`, `const`-constructable, no strings, no extra fields. It is the
6//! runtime counterpart to `hopper_core::segment_map::StaticSegment`
7//! (which carries a human-readable name for tooling), the runtime
8//! never needs the name, so this primitive stays bare.
9//!
10//! # Design
11//!
12//! The finish-line audit was explicit: segment access must be
13//! compile-time enforced and runtime cheap. Every Hopper segment
14//! accessor should eventually lower to `ptr + const_offset -> cast`
15//! and nothing more. Using this primitive means:
16//!
17//! - macros emit `const BALANCE: Segment = Segment::body(0, 8);`
18//! - call sites read `account.segment_mut_const::<u64>(&mut b, BALANCE)?`
19//! - the compiler substitutes the constant, collapses the call chain,
20//! and on Solana SBF you see one register-add over `data_ptr`.
21//!
22//! `Segment` never appears in an on-chain layout, it is a compile-time
23//! description only. Use `hopper_core::account::SegmentDescriptor` for
24//! bytes that travel on the wire.
25
26use crate::layout::HopperHeader;
27
28/// Compile-time descriptor of a typed byte range inside an account.
29///
30/// Fields are `u32` because every Solana account is bounded by
31/// `u32::MAX` in practice and we want the whole primitive to fit in a
32/// single 64-bit register.
33#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
34#[repr(C)]
35pub struct Segment {
36 /// Absolute byte offset from the start of account data (includes
37 /// the 16-byte Hopper header). This is what the access primitives
38 /// want, so storing it absolute avoids a runtime addition.
39 pub offset: u32,
40 /// Byte size of the segment.
41 pub size: u32,
42}
43
44impl Segment {
45 /// Construct a segment from an absolute offset (measured from the
46 /// start of account data, including the Hopper header).
47 #[inline(always)]
48 pub const fn new(offset: u32, size: u32) -> Self {
49 Self { offset, size }
50 }
51
52 /// Construct a segment from a body-relative offset (offset measured
53 /// past the 16-byte Hopper header). This is the form that macros
54 /// most often emit: `#[hopper::state]` computes field offsets
55 /// relative to the struct body, and body-relative is what
56 /// `SegmentMap::SEGMENTS` stores.
57 #[inline(always)]
58 pub const fn body(body_offset: u32, size: u32) -> Self {
59 Self {
60 offset: HopperHeader::SIZE as u32 + body_offset,
61 size,
62 }
63 }
64
65 /// One-past-the-end byte offset.
66 #[inline(always)]
67 pub const fn end(&self) -> u32 {
68 self.offset + self.size
69 }
70
71 /// Whether two segments share any byte.
72 #[inline(always)]
73 pub const fn overlaps(&self, other: &Segment) -> bool {
74 self.offset < other.end() && other.offset < self.end()
75 }
76
77 /// Whether this segment is contained fully within `container`.
78 #[inline(always)]
79 pub const fn contained_in(&self, container: &Segment) -> bool {
80 self.offset >= container.offset && self.end() <= container.end()
81 }
82}
83
84// ══════════════════════════════════════════════════════════════════════
85// TypedSegment<T, const OFFSET: u32>
86// ══════════════════════════════════════════════════════════════════════
87//
88// Where `Segment` carries `(offset, size)` at runtime, `TypedSegment`
89// folds **both** values into the type system: `T` determines the size
90// via `size_of::<T>()`, and `OFFSET` is a const generic. The struct
91// itself is a ZST, no memory at all. This is the finish-line audit's
92// "const-generic segments & compile-time offsets" innovation: at every
93// call site the compiler substitutes the literal offset and literal
94// size into the bounds check + pointer add, leaving pure
95// `ptr + constant` arithmetic in the emitted BPF.
96//
97// Use `TypedSegment` when you know the layout at compile time (i.e.
98// every `#[hopper::state]` field). Fall back to `Segment` when the
99// offset is data-dependent (e.g. a user-provided index into a fixed
100// array).
101
102/// Compile-time typed segment descriptor: `T` is the overlay type,
103/// `OFFSET` is the absolute byte offset from the start of account
104/// data. Zero-sized.
105///
106/// ```ignore
107/// // Matches Vault.balance at body offset 0, past the 16-byte header:
108/// const VAULT_BALANCE: TypedSegment<WireU64, { HopperHeader::SIZE as u32 }>
109/// = TypedSegment::new();
110///
111/// let bal = account.segment_ref_typed(&mut borrows, VAULT_BALANCE)?;
112/// ```
113#[derive(Copy, Clone, Debug, Default)]
114pub struct TypedSegment<T: crate::Pod, const OFFSET: u32> {
115 _marker: core::marker::PhantomData<fn() -> T>,
116}
117
118impl<T: crate::Pod, const OFFSET: u32> TypedSegment<T, OFFSET> {
119 /// Construct the marker. Runs entirely at compile time.
120 #[inline(always)]
121 pub const fn new() -> Self {
122 Self { _marker: core::marker::PhantomData }
123 }
124
125 /// The absolute byte offset of this segment (`OFFSET` const-generic).
126 #[inline(always)]
127 pub const fn offset() -> u32 {
128 OFFSET
129 }
130
131 /// The byte size of this segment (`size_of::<T>()`, folded at compile time).
132 #[inline(always)]
133 pub const fn size() -> u32 {
134 core::mem::size_of::<T>() as u32
135 }
136
137 /// One-past-the-end byte offset.
138 #[inline(always)]
139 pub const fn end() -> u32 {
140 OFFSET + core::mem::size_of::<T>() as u32
141 }
142
143 /// Lower to a runtime [`Segment`] when a heterogeneous collection
144 /// of segments is needed (e.g. a validation pass that iterates).
145 #[inline(always)]
146 pub const fn as_segment() -> Segment {
147 Segment::new(OFFSET, core::mem::size_of::<T>() as u32)
148 }
149}
150
151// SAFETY: Proof that `TypedSegment` really is zero-sized.
152const _: () = {
153 assert!(
154 core::mem::size_of::<TypedSegment<u64, 0>>() == 0,
155 "TypedSegment must be zero-sized so it costs nothing to pass around",
156 );
157};
158
159#[cfg(test)]
160mod tests {
161 use super::*;
162
163 #[test]
164 fn typed_segment_is_zero_sized() {
165 assert_eq!(core::mem::size_of::<TypedSegment<u64, 16>>(), 0);
166 }
167
168 #[test]
169 fn typed_segment_offset_and_size_fold() {
170 const S: TypedSegment<u64, 16> = TypedSegment::new();
171 // The values come from the type system directly.
172 assert_eq!(TypedSegment::<u64, 16>::offset(), 16);
173 assert_eq!(TypedSegment::<u64, 16>::size(), 8);
174 assert_eq!(TypedSegment::<u64, 16>::end(), 24);
175 let _ = S; // ensure const ctor works
176 }
177
178 #[test]
179 fn typed_segment_lowers_to_runtime_segment() {
180 const S: Segment = TypedSegment::<u64, 16>::as_segment();
181 assert_eq!(S.offset, 16);
182 assert_eq!(S.size, 8);
183 }
184
185 #[test]
186 fn body_adds_header() {
187 let s = Segment::body(0, 8);
188 assert_eq!(s.offset, HopperHeader::SIZE as u32);
189 assert_eq!(s.size, 8);
190 assert_eq!(s.end(), HopperHeader::SIZE as u32 + 8);
191 }
192
193 #[test]
194 fn overlaps_detects_shared_bytes() {
195 let a = Segment::new(0, 16);
196 let b = Segment::new(8, 16);
197 let c = Segment::new(16, 16);
198 assert!(a.overlaps(&b));
199 assert!(!a.overlaps(&c)); // adjacent, no shared bytes
200 assert!(b.overlaps(&c));
201 }
202
203 #[test]
204 fn contained_in_reports_proper_nesting() {
205 let outer = Segment::new(0, 32);
206 let inner = Segment::new(8, 8);
207 let equal = Segment::new(0, 32);
208 let escape = Segment::new(24, 16);
209 assert!(inner.contained_in(&outer));
210 assert!(equal.contained_in(&outer));
211 assert!(!escape.contained_in(&outer));
212 }
213}