Skip to main content

hopper_core/account/
segment.rs

1//! Segment table and segment slices for variable-length accounts.
2//!
3//! A segmented account has:
4//! ```text
5//! [Fixed Prefix][Segment Table][Segment 0 Data][Segment 1 Data]...
6//! ```
7//!
8//! Each segment descriptor is 12 bytes:
9//! ```text
10//! [offset: u32][count: u16][capacity: u16][element_size: u16][flags: u16]
11//! ```
12
13use super::pod::{FixedLayout, Pod};
14use hopper_runtime::error::ProgramError;
15
16/// Size of one segment descriptor in bytes.
17pub const SEGMENT_DESC_SIZE: usize = 12;
18
19/// Maximum number of segments per account.
20pub const MAX_SEGMENTS: usize = 256;
21
22/// A single segment descriptor.
23#[derive(Clone, Copy, PartialEq, Eq)]
24#[repr(C)]
25pub struct SegmentDescriptor {
26    offset_bytes: [u8; 4],
27    count_bytes: [u8; 2],
28    capacity_bytes: [u8; 2],
29    element_size_bytes: [u8; 2],
30    flags_bytes: [u8; 2],
31}
32
33const _: () = assert!(core::mem::size_of::<SegmentDescriptor>() == SEGMENT_DESC_SIZE);
34const _: () = assert!(core::mem::align_of::<SegmentDescriptor>() == 1);
35
36// Bytemuck proof (Hopper Safety Audit Must-Fix #5).
37#[cfg(feature = "hopper-native-backend")]
38unsafe impl ::hopper_runtime::__hopper_native::bytemuck::Zeroable for SegmentDescriptor {}
39#[cfg(feature = "hopper-native-backend")]
40unsafe impl ::hopper_runtime::__hopper_native::bytemuck::Pod for SegmentDescriptor {}
41
42// SAFETY: All fields are [u8; N], all bit patterns valid.
43unsafe impl Pod for SegmentDescriptor {}
44// Audit Step 5 seal: Hopper-authored primitive.
45unsafe impl ::hopper_runtime::__sealed::HopperZeroCopySealed for SegmentDescriptor {}
46
47impl FixedLayout for SegmentDescriptor {
48    const SIZE: usize = SEGMENT_DESC_SIZE;
49}
50
51impl SegmentDescriptor {
52    /// Data region byte offset within the account.
53    #[inline(always)]
54    pub fn offset(&self) -> u32 {
55        u32::from_le_bytes(self.offset_bytes)
56    }
57
58    /// Current element count.
59    #[inline(always)]
60    pub fn count(&self) -> u16 {
61        u16::from_le_bytes(self.count_bytes)
62    }
63
64    /// Maximum element capacity.
65    #[inline(always)]
66    pub fn capacity(&self) -> u16 {
67        u16::from_le_bytes(self.capacity_bytes)
68    }
69
70    /// Size of each element in bytes.
71    #[inline(always)]
72    pub fn element_size(&self) -> u16 {
73        u16::from_le_bytes(self.element_size_bytes)
74    }
75
76    /// Flags.
77    #[inline(always)]
78    pub fn flags(&self) -> u16 {
79        u16::from_le_bytes(self.flags_bytes)
80    }
81
82    /// Whether the segment is at capacity.
83    #[inline(always)]
84    pub fn is_full(&self) -> bool {
85        self.count() >= self.capacity()
86    }
87
88    /// Total data bytes used by this segment (count * element_size).
89    #[inline(always)]
90    pub fn data_len(&self) -> usize {
91        (self.count() as usize) * (self.element_size() as usize)
92    }
93
94    /// Total data bytes allocated (capacity * element_size).
95    #[inline(always)]
96    pub fn allocated_len(&self) -> usize {
97        (self.capacity() as usize) * (self.element_size() as usize)
98    }
99
100    /// Set the count.
101    #[inline(always)]
102    pub fn set_count(&mut self, count: u16) {
103        self.count_bytes = count.to_le_bytes();
104    }
105
106    /// Set the offset.
107    #[inline(always)]
108    pub fn set_offset(&mut self, offset: u32) {
109        self.offset_bytes = offset.to_le_bytes();
110    }
111
112    /// Set the capacity.
113    #[inline(always)]
114    pub fn set_capacity(&mut self, capacity: u16) {
115        self.capacity_bytes = capacity.to_le_bytes();
116    }
117
118    /// Set the element size.
119    #[inline(always)]
120    pub fn set_element_size(&mut self, size: u16) {
121        self.element_size_bytes = size.to_le_bytes();
122    }
123}
124
125/// Read-only segment table.
126pub struct SegmentTable<'a> {
127    data: &'a [u8],
128    count: usize,
129}
130
131impl<'a> SegmentTable<'a> {
132    /// Parse a segment table from raw bytes.
133    #[inline]
134    pub fn from_bytes(data: &'a [u8], count: usize) -> Result<Self, ProgramError> {
135        if data.len() < count * SEGMENT_DESC_SIZE {
136            return Err(ProgramError::AccountDataTooSmall);
137        }
138        Ok(Self { data, count })
139    }
140
141    /// Number of segments.
142    #[inline(always)]
143    pub fn segment_count(&self) -> usize {
144        self.count
145    }
146
147    /// Get a descriptor by index.
148    #[inline(always)]
149    pub fn descriptor(&self, index: usize) -> Result<&SegmentDescriptor, ProgramError> {
150        if index >= self.count {
151            return Err(ProgramError::InvalidArgument);
152        }
153        let offset = index * SEGMENT_DESC_SIZE;
154        // SAFETY: Bounds checked. SegmentDescriptor is alignment-1.
155        Ok(unsafe { &*(self.data.as_ptr().add(offset) as *const SegmentDescriptor) })
156    }
157}
158
159/// Mutable segment table.
160pub struct SegmentTableMut<'a> {
161    data: &'a mut [u8],
162    count: usize,
163}
164
165impl<'a> SegmentTableMut<'a> {
166    /// Parse a mutable segment table from raw bytes.
167    #[inline]
168    pub fn from_bytes_mut(data: &'a mut [u8], count: usize) -> Result<Self, ProgramError> {
169        if data.len() < count * SEGMENT_DESC_SIZE {
170            return Err(ProgramError::AccountDataTooSmall);
171        }
172        Ok(Self { data, count })
173    }
174
175    /// Get a mutable descriptor by index.
176    #[inline(always)]
177    pub fn descriptor_mut(&mut self, index: usize) -> Result<&mut SegmentDescriptor, ProgramError> {
178        if index >= self.count {
179            return Err(ProgramError::InvalidArgument);
180        }
181        let offset = index * SEGMENT_DESC_SIZE;
182        // SAFETY: Bounds checked. SegmentDescriptor is alignment-1. Exclusive access.
183        Ok(unsafe { &mut *(self.data.as_mut_ptr().add(offset) as *mut SegmentDescriptor) })
184    }
185
186    /// Initialize segment descriptors with given specifications.
187    ///
188    /// `specs` is `(element_size, count, capacity)` per segment.
189    /// `data_start` is the byte offset where segment data begins.
190    #[inline]
191    pub fn init(&mut self, data_start: u32, specs: &[(u16, u16, u16)]) -> Result<(), ProgramError> {
192        if specs.len() > self.count {
193            return Err(ProgramError::InvalidArgument);
194        }
195
196        let mut current_offset = data_start;
197        for (i, &(element_size, count, capacity)) in specs.iter().enumerate() {
198            let desc = self.descriptor_mut(i)?;
199            desc.set_offset(current_offset);
200            desc.set_count(count);
201            desc.set_capacity(capacity);
202            desc.set_element_size(element_size);
203            current_offset += (capacity as u32) * (element_size as u32);
204        }
205        Ok(())
206    }
207}
208
209/// Type-safe immutable slice over a segment's elements.
210pub struct SegmentSlice<'a, T: Pod + FixedLayout> {
211    data: &'a [u8],
212    count: usize,
213    _phantom: core::marker::PhantomData<T>,
214}
215
216impl<'a, T: Pod + FixedLayout> SegmentSlice<'a, T> {
217    /// Create from a segment descriptor and raw account data.
218    #[inline]
219    pub fn from_descriptor(
220        account_data: &'a [u8],
221        desc: &SegmentDescriptor,
222    ) -> Result<Self, ProgramError> {
223        let offset = desc.offset() as usize;
224        let count = desc.count() as usize;
225        let needed = offset + count * T::SIZE;
226        if needed > account_data.len() {
227            return Err(ProgramError::AccountDataTooSmall);
228        }
229        Ok(Self {
230            data: &account_data[offset..],
231            count,
232            _phantom: core::marker::PhantomData,
233        })
234    }
235
236    /// Number of elements.
237    #[inline(always)]
238    pub fn len(&self) -> usize {
239        self.count
240    }
241
242    /// Whether the segment slice is empty.
243    #[inline(always)]
244    pub fn is_empty(&self) -> bool {
245        self.count == 0
246    }
247
248    /// Read element at index.
249    #[inline(always)]
250    pub fn read(&self, index: usize) -> Result<T, ProgramError> {
251        if index >= self.count {
252            return Err(ProgramError::InvalidArgument);
253        }
254        let offset = index * T::SIZE;
255        // SAFETY: Bounds checked. T: Pod, alignment-1.
256        Ok(unsafe { core::ptr::read_unaligned(self.data.as_ptr().add(offset) as *const T) })
257    }
258
259    /// Get element reference at index.
260    #[inline(always)]
261    pub fn get(&self, index: usize) -> Result<&T, ProgramError> {
262        if index >= self.count {
263            return Err(ProgramError::InvalidArgument);
264        }
265        let offset = index * T::SIZE;
266        // SAFETY: Bounds checked. T: Pod, alignment-1.
267        Ok(unsafe { &*(self.data.as_ptr().add(offset) as *const T) })
268    }
269}
270
271/// Type-safe mutable slice over a segment's elements.
272pub struct SegmentSliceMut<'a, T: Pod + FixedLayout> {
273    data: &'a mut [u8],
274    count: usize,
275    capacity: usize,
276    _phantom: core::marker::PhantomData<T>,
277}
278
279impl<'a, T: Pod + FixedLayout> SegmentSliceMut<'a, T> {
280    /// Create from a segment descriptor and raw mutable account data.
281    #[inline]
282    pub fn from_descriptor(
283        account_data: &'a mut [u8],
284        desc: &SegmentDescriptor,
285    ) -> Result<Self, ProgramError> {
286        let offset = desc.offset() as usize;
287        let count = desc.count() as usize;
288        let capacity = desc.capacity() as usize;
289        let needed = offset + capacity * T::SIZE;
290        if needed > account_data.len() {
291            return Err(ProgramError::AccountDataTooSmall);
292        }
293        Ok(Self {
294            data: &mut account_data[offset..],
295            count,
296            capacity,
297            _phantom: core::marker::PhantomData,
298        })
299    }
300
301    /// Number of elements.
302    #[inline(always)]
303    pub fn len(&self) -> usize {
304        self.count
305    }
306
307    /// Whether the segment slice is empty.
308    #[inline(always)]
309    pub fn is_empty(&self) -> bool {
310        self.count == 0
311    }
312
313    /// Maximum capacity.
314    #[inline(always)]
315    pub fn capacity(&self) -> usize {
316        self.capacity
317    }
318
319    /// Read element at index (copy).
320    #[inline(always)]
321    pub fn read(&self, index: usize) -> Result<T, ProgramError> {
322        if index >= self.count {
323            return Err(ProgramError::InvalidArgument);
324        }
325        let offset = index * T::SIZE;
326        // SAFETY: This block is part of Hopper's audited zero-copy/backend boundary; surrounding checks and caller contracts uphold the required raw-pointer, layout, and aliasing invariants.
327        Ok(unsafe { core::ptr::read_unaligned(self.data.as_ptr().add(offset) as *const T) })
328    }
329
330    /// Write element at index.
331    #[inline(always)]
332    pub fn write(&mut self, index: usize, value: T) -> Result<(), ProgramError> {
333        if index >= self.count {
334            return Err(ProgramError::InvalidArgument);
335        }
336        let offset = index * T::SIZE;
337        // SAFETY: Bounds checked. T: Pod. Exclusive access.
338        unsafe {
339            core::ptr::write_unaligned(self.data.as_mut_ptr().add(offset) as *mut T, value);
340        }
341        Ok(())
342    }
343
344    /// Swap two elements.
345    #[inline]
346    pub fn swap(&mut self, i: usize, j: usize) -> Result<(), ProgramError> {
347        if i >= self.count || j >= self.count {
348            return Err(ProgramError::InvalidArgument);
349        }
350        if i == j {
351            return Ok(());
352        }
353        let size = T::SIZE;
354        let oi = i * size;
355        let oj = j * size;
356        // Swap byte-by-byte to avoid alignment issues
357        for k in 0..size {
358            self.data.swap(oi + k, oj + k);
359        }
360        Ok(())
361    }
362}