Skip to main content

hopper_core/collections/
packed_map.rs

1//! Packed key->value map for on-chain registries -- zero-copy, no allocation.
2//!
3//! Wire layout:
4//! ```text
5//! [count: u32 LE][entry 0: K+V][entry 1: K+V]...[entry capacity-1: K+V]
6//! ```
7//!
8//! Keys are unique. Lookup is O(n) linear scan (optimal for small N < ~64).
9//! For sorted key access, pair with `SortedVec` instead.
10//!
11//! ## Usage
12//!
13//! ```ignore
14//! let mut map = PackedMap::<Address32, WireU64>::from_bytes(&mut data[offset..])?;
15//! map.insert(key, value)?;
16//! let balance = map.get(&key)?;
17//! ```
18
19use crate::account::{FixedLayout, Pod};
20use hopper_runtime::error::ProgramError;
21
22const HEADER_SIZE: usize = 4;
23
24/// An entry in the packed map: key followed by value.
25#[repr(C)]
26#[derive(Clone, Copy)]
27#[allow(dead_code)]
28pub struct MapEntry<K: Pod + FixedLayout + PartialEq, V: Pod + FixedLayout> {
29    pub key: K,
30    pub value: V,
31}
32
33// Bytemuck proof (Hopper Safety Audit Must-Fix #5). `K` and `V` are
34// both `hopper_native::Pod`-bounded, which transitively requires
35// `bytemuck::Pod + Zeroable`, so `MapEntry<K, V>` is itself
36// byte-compatible by construction.
37#[cfg(feature = "hopper-native-backend")]
38unsafe impl<K: Pod + FixedLayout + PartialEq, V: Pod + FixedLayout>
39    ::hopper_runtime::__hopper_native::bytemuck::Zeroable for MapEntry<K, V>
40{
41}
42#[cfg(feature = "hopper-native-backend")]
43unsafe impl<K: Pod + FixedLayout + PartialEq, V: Pod + FixedLayout>
44    ::hopper_runtime::__hopper_native::bytemuck::Pod for MapEntry<K, V>
45{
46}
47
48// SAFETY: MapEntry is repr(C) of Pod types, alignment inherited.
49unsafe impl<K: Pod + FixedLayout + PartialEq, V: Pod + FixedLayout> Pod for MapEntry<K, V> {}
50
51impl<K: Pod + FixedLayout + PartialEq, V: Pod + FixedLayout> FixedLayout for MapEntry<K, V> {
52    const SIZE: usize = K::SIZE + V::SIZE;
53}
54
55/// Packed key->value map overlaid on a byte slice.
56///
57/// Supports O(n) lookup, O(1) insert (append), O(1) remove (swap-last).
58/// No heap allocation.
59pub struct PackedMap<'a, K: Pod + FixedLayout + PartialEq, V: Pod + FixedLayout> {
60    data: &'a mut [u8],
61    _phantom: core::marker::PhantomData<(K, V)>,
62}
63
64impl<'a, K: Pod + FixedLayout + PartialEq, V: Pod + FixedLayout> PackedMap<'a, K, V> {
65    const ENTRY_SIZE: usize = K::SIZE + V::SIZE;
66
67    /// Overlay a PackedMap on a mutable byte slice.
68    #[inline]
69    pub fn from_bytes(data: &'a mut [u8]) -> Result<Self, ProgramError> {
70        if data.len() < HEADER_SIZE {
71            return Err(ProgramError::AccountDataTooSmall);
72        }
73        Ok(Self {
74            data,
75            _phantom: core::marker::PhantomData,
76        })
77    }
78
79    /// Current number of entries.
80    #[inline(always)]
81    pub fn len(&self) -> usize {
82        u32::from_le_bytes([self.data[0], self.data[1], self.data[2], self.data[3]]) as usize
83    }
84
85    /// Maximum capacity.
86    #[inline(always)]
87    pub fn capacity(&self) -> usize {
88        (self.data.len() - HEADER_SIZE) / Self::ENTRY_SIZE
89    }
90
91    /// Is the map empty?
92    #[inline(always)]
93    pub fn is_empty(&self) -> bool {
94        self.len() == 0
95    }
96
97    /// Is the map full?
98    #[inline(always)]
99    pub fn is_full(&self) -> bool {
100        self.len() >= self.capacity()
101    }
102
103    #[inline(always)]
104    fn set_len(&mut self, count: usize) {
105        let bytes = (count as u32).to_le_bytes();
106        self.data[0] = bytes[0];
107        self.data[1] = bytes[1];
108        self.data[2] = bytes[2];
109        self.data[3] = bytes[3];
110    }
111
112    #[inline(always)]
113    fn entry_offset(index: usize) -> usize {
114        HEADER_SIZE + index * (K::SIZE + V::SIZE)
115    }
116
117    /// Read key at index.
118    #[inline]
119    fn read_key(&self, index: usize) -> K {
120        let offset = Self::entry_offset(index);
121        // SAFETY: Caller ensures index < len. K: Pod, alignment-1.
122        unsafe { core::ptr::read_unaligned(self.data.as_ptr().add(offset) as *const K) }
123    }
124
125    /// Read value at index.
126    #[inline]
127    fn read_value(&self, index: usize) -> V {
128        let offset = Self::entry_offset(index) + K::SIZE;
129        // SAFETY: Caller ensures index < len. V: Pod, alignment-1.
130        unsafe { core::ptr::read_unaligned(self.data.as_ptr().add(offset) as *const V) }
131    }
132
133    /// Write key at index.
134    #[inline]
135    fn write_key(&mut self, index: usize, key: K) {
136        let offset = Self::entry_offset(index);
137        // SAFETY: Caller ensures index < capacity. K: Pod, alignment-1.
138        unsafe { core::ptr::write_unaligned(self.data.as_mut_ptr().add(offset) as *mut K, key) }
139    }
140
141    /// Write value at index.
142    #[inline]
143    fn write_value(&mut self, index: usize, value: V) {
144        let offset = Self::entry_offset(index) + K::SIZE;
145        // SAFETY: Caller ensures index < capacity. V: Pod, alignment-1.
146        unsafe { core::ptr::write_unaligned(self.data.as_mut_ptr().add(offset) as *mut V, value) }
147    }
148
149    /// Find the index of a key, or None.
150    #[inline]
151    pub fn find(&self, key: &K) -> Option<usize> {
152        let len = self.len();
153        let mut i = 0;
154        while i < len {
155            if self.read_key(i) == *key {
156                return Some(i);
157            }
158            i += 1;
159        }
160        None
161    }
162
163    /// Get the value for a key, or error if not found.
164    #[inline]
165    pub fn get(&self, key: &K) -> Result<V, ProgramError> {
166        match self.find(key) {
167            Some(idx) => Ok(self.read_value(idx)),
168            None => Err(ProgramError::InvalidArgument),
169        }
170    }
171
172    /// Get a mutable reference to the value bytes for a key.
173    /// Returns the byte offset of the value within data for direct manipulation.
174    #[inline]
175    pub fn get_value_offset(&self, key: &K) -> Result<usize, ProgramError> {
176        match self.find(key) {
177            Some(idx) => Ok(Self::entry_offset(idx) + K::SIZE),
178            None => Err(ProgramError::InvalidArgument),
179        }
180    }
181
182    /// Insert or update a key-value pair.
183    ///
184    /// If the key already exists, updates the value and returns `true`.
185    /// If the key is new, appends and returns `false`.
186    /// Returns error if full and key doesn't exist.
187    #[inline]
188    pub fn insert(&mut self, key: K, value: V) -> Result<bool, ProgramError> {
189        // Check if key already exists
190        if let Some(idx) = self.find(&key) {
191            self.write_value(idx, value);
192            return Ok(true); // updated
193        }
194        // New entry
195        let len = self.len();
196        if len >= self.capacity() {
197            return Err(ProgramError::AccountDataTooSmall);
198        }
199        self.write_key(len, key);
200        self.write_value(len, value);
201        self.set_len(len + 1);
202        Ok(false) // inserted
203    }
204
205    /// Remove a key-value pair by swapping with the last entry. O(1).
206    ///
207    /// Returns the removed value, or error if key not found.
208    #[inline]
209    pub fn remove(&mut self, key: &K) -> Result<V, ProgramError> {
210        let idx = self.find(key).ok_or(ProgramError::InvalidArgument)?;
211        let value = self.read_value(idx);
212        let len = self.len();
213        let last = len - 1;
214
215        if idx != last {
216            // Swap with last entry
217            let last_key = self.read_key(last);
218            let last_value = self.read_value(last);
219            self.write_key(idx, last_key);
220            self.write_value(idx, last_value);
221        }
222
223        // Zero the removed last slot
224        let last_offset = Self::entry_offset(last);
225        let entry_end = last_offset + Self::ENTRY_SIZE;
226        for byte in &mut self.data[last_offset..entry_end] {
227            *byte = 0;
228        }
229
230        self.set_len(last);
231        Ok(value)
232    }
233
234    /// Check if the map contains a key.
235    #[inline(always)]
236    pub fn contains(&self, key: &K) -> bool {
237        self.find(key).is_some()
238    }
239
240    /// Clear all entries.
241    #[inline]
242    pub fn clear(&mut self) {
243        let len = self.len();
244        let end = Self::entry_offset(len);
245        for byte in &mut self.data[HEADER_SIZE..end] {
246            *byte = 0;
247        }
248        self.set_len(0);
249    }
250
251    /// Iterate over all entries (key, value) by index.
252    ///
253    /// Returns an iterator that yields (K, V) copies.
254    #[inline]
255    pub fn iter(&self) -> PackedMapIter<'_, K, V> {
256        PackedMapIter {
257            map: self,
258            index: 0,
259            len: self.len(),
260        }
261    }
262}
263
264/// Iterator over PackedMap entries.
265pub struct PackedMapIter<'a, K: Pod + FixedLayout + PartialEq, V: Pod + FixedLayout> {
266    map: &'a PackedMap<'a, K, V>,
267    index: usize,
268    len: usize,
269}
270
271impl<'a, K: Pod + FixedLayout + PartialEq + Copy, V: Pod + FixedLayout + Copy> Iterator
272    for PackedMapIter<'a, K, V>
273{
274    type Item = (K, V);
275
276    #[inline]
277    fn next(&mut self) -> Option<Self::Item> {
278        if self.index >= self.len {
279            return None;
280        }
281        let key = self.map.read_key(self.index);
282        let value = self.map.read_value(self.index);
283        self.index += 1;
284        Some((key, value))
285    }
286
287    #[inline(always)]
288    fn size_hint(&self) -> (usize, Option<usize>) {
289        let remaining = self.len - self.index;
290        (remaining, Some(remaining))
291    }
292}