Skip to main content

hopper_core/collections/
fixed_vec.rs

1//! Bounded dynamic array -- zero-copy `FixedVec`.
2//!
3//! Wire layout:
4//! ```text
5//! [count: u32 LE][element 0][element 1]...[element capacity-1]
6//! ```
7//!
8//! The capacity is known at construction time from the byte slice size.
9//! Elements must implement `Pod + FixedLayout`.
10
11use crate::account::{FixedLayout, Pod};
12use hopper_runtime::error::ProgramError;
13
14/// Header size: 4 bytes for the count.
15const HEADER_SIZE: usize = 4;
16
17/// Bounded dynamic array overlaid on a byte slice.
18///
19/// Supports O(1) push, O(1) pop, O(1) swap_remove, O(1) index access.
20/// No heap allocation.
21pub struct FixedVec<'a, T: Pod + FixedLayout> {
22    data: &'a mut [u8],
23    _phantom: core::marker::PhantomData<T>,
24}
25
26impl<'a, T: Pod + FixedLayout> FixedVec<'a, T> {
27    /// Overlay a FixedVec on a mutable byte slice.
28    ///
29    /// The slice must be at least `HEADER_SIZE` bytes.
30    /// Capacity is `(data.len() - HEADER_SIZE) / T::SIZE`.
31    #[inline]
32    pub fn from_bytes(data: &'a mut [u8]) -> Result<Self, ProgramError> {
33        if data.len() < HEADER_SIZE {
34            return Err(ProgramError::AccountDataTooSmall);
35        }
36        Ok(Self {
37            data,
38            _phantom: core::marker::PhantomData,
39        })
40    }
41
42    /// Current number of elements.
43    #[inline(always)]
44    pub fn len(&self) -> usize {
45        let bytes = [self.data[0], self.data[1], self.data[2], self.data[3]];
46        u32::from_le_bytes(bytes) as usize
47    }
48
49    /// Maximum capacity.
50    #[inline(always)]
51    pub fn capacity(&self) -> usize {
52        (self.data.len() - HEADER_SIZE) / T::SIZE
53    }
54
55    /// Is the vec empty?
56    #[inline(always)]
57    pub fn is_empty(&self) -> bool {
58        self.len() == 0
59    }
60
61    /// Is the vec at capacity?
62    #[inline(always)]
63    pub fn is_full(&self) -> bool {
64        self.len() >= self.capacity()
65    }
66
67    /// Set the count (internal).
68    #[inline(always)]
69    fn set_len(&mut self, len: usize) {
70        let bytes = (len as u32).to_le_bytes();
71        self.data[0] = bytes[0];
72        self.data[1] = bytes[1];
73        self.data[2] = bytes[2];
74        self.data[3] = bytes[3];
75    }
76
77    /// Byte offset of element at `index`.
78    #[inline(always)]
79    fn element_offset(&self, index: usize) -> usize {
80        HEADER_SIZE + index * T::SIZE
81    }
82
83    /// Read element at index (copy).
84    #[inline]
85    pub fn get(&self, index: usize) -> Result<T, ProgramError> {
86        let len = self.len();
87        if index >= len {
88            return Err(ProgramError::InvalidArgument);
89        }
90        let offset = self.element_offset(index);
91        // SAFETY: Bounds checked. T: Pod, alignment-1.
92        Ok(unsafe { core::ptr::read_unaligned(self.data.as_ptr().add(offset) as *const T) })
93    }
94
95    /// Get immutable reference to element at index.
96    #[inline]
97    pub fn get_ref(&self, index: usize) -> Result<&T, ProgramError> {
98        let len = self.len();
99        if index >= len {
100            return Err(ProgramError::InvalidArgument);
101        }
102        let offset = self.element_offset(index);
103        // SAFETY: Bounds checked. T: Pod, alignment-1.
104        Ok(unsafe { &*(self.data.as_ptr().add(offset) as *const T) })
105    }
106
107    /// Set element at index.
108    #[inline]
109    pub fn set(&mut self, index: usize, value: T) -> Result<(), ProgramError> {
110        let len = self.len();
111        if index >= len {
112            return Err(ProgramError::InvalidArgument);
113        }
114        let offset = self.element_offset(index);
115        // SAFETY: Bounds checked. T: Pod. Exclusive access.
116        unsafe {
117            core::ptr::write_unaligned(self.data.as_mut_ptr().add(offset) as *mut T, value);
118        }
119        Ok(())
120    }
121
122    /// Push an element to the end. Returns error if at capacity.
123    #[inline]
124    pub fn push(&mut self, value: T) -> Result<(), ProgramError> {
125        let len = self.len();
126        if len >= self.capacity() {
127            return Err(ProgramError::AccountDataTooSmall);
128        }
129        let offset = self.element_offset(len);
130        // SAFETY: Bounds checked (len < capacity). T: Pod, alignment-1.
131        unsafe {
132            core::ptr::write_unaligned(self.data.as_mut_ptr().add(offset) as *mut T, value);
133        }
134        self.set_len(len + 1);
135        Ok(())
136    }
137
138    /// Pop the last element.
139    #[inline]
140    pub fn pop(&mut self) -> Result<T, ProgramError> {
141        let len = self.len();
142        if len == 0 {
143            return Err(ProgramError::InvalidArgument);
144        }
145        let offset = self.element_offset(len - 1);
146        // SAFETY: This block is part of Hopper's audited zero-copy/backend boundary; surrounding checks and caller contracts uphold the required raw-pointer, layout, and aliasing invariants.
147        let value =
148            unsafe { core::ptr::read_unaligned(self.data.as_ptr().add(offset) as *const T) };
149        // Zero the removed slot for cleanliness.
150        for byte in &mut self.data[offset..offset + T::SIZE] {
151            *byte = 0;
152        }
153        self.set_len(len - 1);
154        Ok(value)
155    }
156
157    /// Remove element at index by swapping with the last element. O(1).
158    #[inline]
159    pub fn swap_remove(&mut self, index: usize) -> Result<T, ProgramError> {
160        let len = self.len();
161        if index >= len {
162            return Err(ProgramError::InvalidArgument);
163        }
164        let removed_offset = self.element_offset(index);
165        // SAFETY: This block is part of Hopper's audited zero-copy/backend boundary; surrounding checks and caller contracts uphold the required raw-pointer, layout, and aliasing invariants.
166        let removed = unsafe {
167            core::ptr::read_unaligned(self.data.as_ptr().add(removed_offset) as *const T)
168        };
169
170        let last_index = len - 1;
171        if index != last_index {
172            let last_offset = self.element_offset(last_index);
173            // Copy last element to removed position
174            let size = T::SIZE;
175            for i in 0..size {
176                self.data[removed_offset + i] = self.data[last_offset + i];
177            }
178            // Zero the old last slot
179            for byte in &mut self.data[last_offset..last_offset + size] {
180                *byte = 0;
181            }
182        } else {
183            // Zero the removed slot
184            for byte in &mut self.data[removed_offset..removed_offset + T::SIZE] {
185                *byte = 0;
186            }
187        }
188        self.set_len(last_index);
189        Ok(removed)
190    }
191
192    /// Clear all elements, setting count to 0.
193    #[inline]
194    pub fn clear(&mut self) {
195        let len = self.len();
196        let end = self.element_offset(len);
197        for byte in &mut self.data[HEADER_SIZE..end] {
198            *byte = 0;
199        }
200        self.set_len(0);
201    }
202
203    /// Compute the byte size needed for a FixedVec with the given capacity.
204    #[inline(always)]
205    pub const fn required_bytes(capacity: usize) -> usize {
206        HEADER_SIZE + capacity * T::SIZE
207    }
208}
209
210#[cfg(test)]
211mod tests {
212    use super::*;
213    use crate::abi::WireU64;
214
215    #[test]
216    fn push_pop_roundtrip() {
217        let mut buf = [0u8; 4 + 8 * 4]; // 4 capacity
218        let mut vec = FixedVec::<WireU64>::from_bytes(&mut buf).unwrap();
219        assert_eq!(vec.len(), 0);
220        assert_eq!(vec.capacity(), 4);
221
222        vec.push(WireU64::new(10)).unwrap();
223        vec.push(WireU64::new(20)).unwrap();
224        assert_eq!(vec.len(), 2);
225
226        let val = vec.pop().unwrap();
227        assert_eq!(val.get(), 20);
228        assert_eq!(vec.len(), 1);
229
230        let val = vec.get(0).unwrap();
231        assert_eq!(val.get(), 10);
232    }
233
234    #[test]
235    fn swap_remove_works() {
236        let mut buf = [0u8; 4 + 8 * 4];
237        let mut vec = FixedVec::<WireU64>::from_bytes(&mut buf).unwrap();
238        vec.push(WireU64::new(100)).unwrap();
239        vec.push(WireU64::new(200)).unwrap();
240        vec.push(WireU64::new(300)).unwrap();
241
242        let removed = vec.swap_remove(0).unwrap();
243        assert_eq!(removed.get(), 100);
244        assert_eq!(vec.len(), 2);
245        // Element 0 is now the old last element (300)
246        assert_eq!(vec.get(0).unwrap().get(), 300);
247        assert_eq!(vec.get(1).unwrap().get(), 200);
248    }
249
250    #[test]
251    fn full_returns_error() {
252        let mut buf = [0u8; 4 + 8]; // capacity 1
253        let mut vec = FixedVec::<WireU64>::from_bytes(&mut buf).unwrap();
254        vec.push(WireU64::new(1)).unwrap();
255        assert!(vec.push(WireU64::new(2)).is_err());
256    }
257}