1use super::segment_role::SegmentRole;
65#[cfg(feature = "collections")]
66use crate::collections::{FixedVec, Journal, RingBuffer, Slab, SlotMap, SortedVec};
67use hopper_runtime::error::ProgramError;
68
69pub type SegmentId = [u8; 4];
73
74#[inline(always)]
76pub const fn segment_id(name: &str) -> SegmentId {
77 let bytes = name.as_bytes();
78 let mut hash: u32 = 0x811c_9dc5; let mut i = 0;
80 while i < bytes.len() {
81 hash ^= bytes[i] as u32;
82 hash = hash.wrapping_mul(0x0100_0193); i += 1;
84 }
85 hash.to_le_bytes()
86}
87
88pub const SEGMENT_ENTRY_SIZE: usize = 16;
92
93pub const SEG_FLAG_LOCKED: u16 = 1 << 0; pub const SEG_FLAG_FROZEN: u16 = 1 << 1; pub const SEG_FLAG_DYNAMIC: u16 = 1 << 2; #[derive(Clone, Copy)]
100#[repr(C)]
101pub struct SegmentEntry {
102 pub id: [u8; 4],
103 offset_bytes: [u8; 4],
104 size_bytes: [u8; 4],
105 flags_bytes: [u8; 2],
106 pub version: u8,
107 pub _reserved: u8,
108}
109
110const _: () = assert!(core::mem::size_of::<SegmentEntry>() == SEGMENT_ENTRY_SIZE);
111const _: () = assert!(core::mem::align_of::<SegmentEntry>() == 1);
112
113impl SegmentEntry {
114 #[inline(always)]
116 pub const fn new(id: SegmentId, offset: u32, size: u32, flags: u16, version: u8) -> Self {
117 Self {
118 id,
119 offset_bytes: offset.to_le_bytes(),
120 size_bytes: size.to_le_bytes(),
121 flags_bytes: flags.to_le_bytes(),
122 version,
123 _reserved: 0,
124 }
125 }
126
127 #[inline(always)]
129 pub fn offset(&self) -> u32 {
130 u32::from_le_bytes(self.offset_bytes)
131 }
132
133 #[inline(always)]
135 pub fn size(&self) -> u32 {
136 u32::from_le_bytes(self.size_bytes)
137 }
138
139 #[inline(always)]
141 pub fn flags(&self) -> u16 {
142 u16::from_le_bytes(self.flags_bytes)
143 }
144
145 #[inline(always)]
147 pub fn is_locked(&self) -> bool {
148 self.flags() & SEG_FLAG_LOCKED != 0
149 }
150
151 #[inline(always)]
153 pub fn is_frozen(&self) -> bool {
154 self.flags() & SEG_FLAG_FROZEN != 0
155 }
156
157 #[inline(always)]
159 pub fn role(&self) -> SegmentRole {
160 SegmentRole::from_flags(self.flags())
161 }
162
163 #[inline(always)]
165 pub fn set_offset(&mut self, offset: u32) {
166 self.offset_bytes = offset.to_le_bytes();
167 }
168
169 #[inline(always)]
171 pub fn set_size(&mut self, size: u32) {
172 self.size_bytes = size.to_le_bytes();
173 }
174
175 #[inline(always)]
177 pub fn set_flags(&mut self, flags: u16) {
178 self.flags_bytes = flags.to_le_bytes();
179 }
180}
181
182pub const REGISTRY_HEADER_SIZE: usize = 4;
186
187pub const MAX_REGISTRY_SEGMENTS: usize = 16;
189
190pub struct SegmentRegistry<'a> {
196 data: &'a [u8],
197 count: usize,
198 entries_offset: usize,
199}
200
201pub const REGISTRY_OFFSET: usize = crate::account::HEADER_LEN;
203
204impl<'a> SegmentRegistry<'a> {
205 #[inline]
209 pub fn from_account(account_data: &'a [u8]) -> Result<Self, ProgramError> {
210 let start = REGISTRY_OFFSET;
211 if account_data.len() < start + REGISTRY_HEADER_SIZE {
212 return Err(ProgramError::AccountDataTooSmall);
213 }
214
215 let count = u16::from_le_bytes([account_data[start], account_data[start + 1]]) as usize;
216
217 if count > MAX_REGISTRY_SEGMENTS {
218 return Err(ProgramError::InvalidAccountData);
219 }
220
221 let entries_offset = start + REGISTRY_HEADER_SIZE;
222 let needed = entries_offset + count * SEGMENT_ENTRY_SIZE;
223 if account_data.len() < needed {
224 return Err(ProgramError::AccountDataTooSmall);
225 }
226
227 Ok(Self {
228 data: account_data,
229 count,
230 entries_offset,
231 })
232 }
233
234 #[inline(always)]
236 pub fn segment_count(&self) -> usize {
237 self.count
238 }
239
240 #[inline(always)]
242 pub fn data_region_offset(&self) -> usize {
243 self.entries_offset + self.count * SEGMENT_ENTRY_SIZE
244 }
245
246 #[inline]
248 pub fn entry(&self, index: usize) -> Result<&SegmentEntry, ProgramError> {
249 if index >= self.count {
250 return Err(ProgramError::InvalidArgument);
251 }
252 let offset = self.entries_offset + index * SEGMENT_ENTRY_SIZE;
253 Ok(unsafe { &*(self.data.as_ptr().add(offset) as *const SegmentEntry) })
255 }
256
257 #[inline]
259 pub fn find(&self, id: &SegmentId) -> Result<(usize, &SegmentEntry), ProgramError> {
260 let mut i = 0;
261 while i < self.count {
262 let offset = self.entries_offset + i * SEGMENT_ENTRY_SIZE;
263 let entry = unsafe { &*(self.data.as_ptr().add(offset) as *const SegmentEntry) };
265 if entry.id == *id {
266 return Ok((i, entry));
267 }
268 i += 1;
269 }
270 Err(ProgramError::InvalidArgument)
271 }
272
273 #[inline]
275 pub fn segment_data(&self, id: &SegmentId) -> Result<&'a [u8], ProgramError> {
276 let (_, entry) = self.find(id)?;
277 let start = entry.offset() as usize;
278 let end = start + entry.size() as usize;
279 if end > self.data.len() {
280 return Err(ProgramError::AccountDataTooSmall);
281 }
282 Ok(&self.data[start..end])
283 }
284
285 #[inline]
287 pub fn segment_overlay<T: super::Pod + super::FixedLayout>(
288 &self,
289 id: &SegmentId,
290 ) -> Result<&'a T, ProgramError> {
291 let data = self.segment_data(id)?;
292 if data.len() < T::SIZE {
293 return Err(ProgramError::AccountDataTooSmall);
294 }
295 Ok(unsafe { &*(data.as_ptr() as *const T) })
297 }
298
299 #[inline]
301 pub fn iter(&self) -> SegmentIter<'a> {
302 SegmentIter {
303 data: self.data,
304 entries_offset: self.entries_offset,
305 count: self.count,
306 pos: 0,
307 }
308 }
309
310 #[inline(always)]
312 pub fn registry_size(&self) -> usize {
313 REGISTRY_HEADER_SIZE + self.count * SEGMENT_ENTRY_SIZE
314 }
315
316 #[inline]
320 pub const fn required_account_size(
321 header_size: usize,
322 segment_count: usize,
323 total_segment_data: usize,
324 ) -> usize {
325 header_size + REGISTRY_HEADER_SIZE + segment_count * SEGMENT_ENTRY_SIZE + total_segment_data
326 }
327}
328
329pub struct SegmentIter<'a> {
331 data: &'a [u8],
332 entries_offset: usize,
333 count: usize,
334 pos: usize,
335}
336
337impl<'a> Iterator for SegmentIter<'a> {
338 type Item = (usize, &'a SegmentEntry);
339
340 #[inline]
341 fn next(&mut self) -> Option<Self::Item> {
342 if self.pos >= self.count {
343 return None;
344 }
345 let idx = self.pos;
346 let offset = self.entries_offset + idx * SEGMENT_ENTRY_SIZE;
347 let entry = unsafe { &*(self.data.as_ptr().add(offset) as *const SegmentEntry) };
349 self.pos += 1;
350 Some((idx, entry))
351 }
352}
353
354pub struct SegmentRegistryMut<'a> {
358 data: &'a mut [u8],
359 count: usize,
360 entries_offset: usize,
361}
362
363impl<'a> SegmentRegistryMut<'a> {
364 #[inline]
366 pub fn from_account_mut(account_data: &'a mut [u8]) -> Result<Self, ProgramError> {
367 let start = REGISTRY_OFFSET;
368 if account_data.len() < start + REGISTRY_HEADER_SIZE {
369 return Err(ProgramError::AccountDataTooSmall);
370 }
371
372 let count = u16::from_le_bytes([account_data[start], account_data[start + 1]]) as usize;
373
374 if count > MAX_REGISTRY_SEGMENTS {
375 return Err(ProgramError::InvalidAccountData);
376 }
377
378 let entries_offset = start + REGISTRY_HEADER_SIZE;
379 let needed = entries_offset + count * SEGMENT_ENTRY_SIZE;
380 if account_data.len() < needed {
381 return Err(ProgramError::AccountDataTooSmall);
382 }
383
384 Ok(Self {
385 data: account_data,
386 count,
387 entries_offset,
388 })
389 }
390
391 #[inline(always)]
393 pub fn segment_count(&self) -> usize {
394 self.count
395 }
396
397 #[inline]
402 pub fn init(data: &mut [u8], specs: &[(SegmentId, u32, u8)]) -> Result<(), ProgramError> {
403 let start = REGISTRY_OFFSET;
404 if specs.len() > MAX_REGISTRY_SEGMENTS {
405 return Err(ProgramError::InvalidArgument);
406 }
407
408 let n = specs.len();
410 let mut i = 0;
411 while i < n {
412 let mut j = i + 1;
413 while j < n {
414 if specs[i].0 == specs[j].0 {
415 return Err(ProgramError::InvalidArgument);
416 }
417 j += 1;
418 }
419 i += 1;
420 }
421
422 let count = specs.len();
423 let entries_offset = start + REGISTRY_HEADER_SIZE;
424 let data_region = entries_offset + count * SEGMENT_ENTRY_SIZE;
425
426 data[start] = (count & 0xFF) as u8;
428 data[start + 1] = ((count >> 8) & 0xFF) as u8;
429 data[start + 2] = 0; data[start + 3] = 0; let mut current_offset = data_region as u32;
434 for (i, &(id, size, version)) in specs.iter().enumerate() {
435 let entry = SegmentEntry::new(id, current_offset, size, 0, version);
436 let entry_offset = entries_offset + i * SEGMENT_ENTRY_SIZE;
437 let dst = &mut data[entry_offset..entry_offset + SEGMENT_ENTRY_SIZE];
439 unsafe {
441 core::ptr::copy_nonoverlapping(
442 &entry as *const SegmentEntry as *const u8,
443 dst.as_mut_ptr(),
444 SEGMENT_ENTRY_SIZE,
445 );
446 }
447 current_offset += size;
448 }
449
450 Ok(())
451 }
452
453 #[inline]
455 pub fn entry_mut(&mut self, index: usize) -> Result<&mut SegmentEntry, ProgramError> {
456 if index >= self.count {
457 return Err(ProgramError::InvalidArgument);
458 }
459 let offset = self.entries_offset + index * SEGMENT_ENTRY_SIZE;
460 Ok(unsafe { &mut *(self.data.as_mut_ptr().add(offset) as *mut SegmentEntry) })
462 }
463
464 #[inline]
466 pub fn find_mut(&mut self, id: &SegmentId) -> Result<(usize, &mut SegmentEntry), ProgramError> {
467 let mut i = 0;
468 while i < self.count {
469 let offset = self.entries_offset + i * SEGMENT_ENTRY_SIZE;
470 let entry = unsafe { &mut *(self.data.as_mut_ptr().add(offset) as *mut SegmentEntry) };
472 if entry.id == *id {
473 return Ok((i, entry));
474 }
475 i += 1;
476 }
477 Err(ProgramError::InvalidArgument)
478 }
479
480 #[inline]
486 pub fn segment_data_mut(&mut self, id: &SegmentId) -> Result<&mut [u8], ProgramError> {
487 let (_, entry) = self.find_mut(id)?;
488 if entry.is_locked() || entry.is_frozen() {
489 return Err(ProgramError::InvalidAccountData);
490 }
491 if entry.role().is_immutable_after_init() {
493 return Err(ProgramError::InvalidAccountData);
494 }
495 let start = entry.offset() as usize;
496 let size = entry.size() as usize;
497 let end = start + size;
498 if end > self.data.len() {
499 return Err(ProgramError::AccountDataTooSmall);
500 }
501 Ok(&mut self.data[start..end])
502 }
503
504 #[inline]
509 pub fn segment_data_mut_unchecked(
510 &mut self,
511 id: &SegmentId,
512 ) -> Result<&mut [u8], ProgramError> {
513 let (_, entry) = self.find_mut(id)?;
514 if entry.is_locked() || entry.is_frozen() {
515 return Err(ProgramError::InvalidAccountData);
516 }
517 let start = entry.offset() as usize;
518 let size = entry.size() as usize;
519 let end = start + size;
520 if end > self.data.len() {
521 return Err(ProgramError::AccountDataTooSmall);
522 }
523 Ok(&mut self.data[start..end])
524 }
525
526 #[inline]
530 pub fn segment_overlay_mut<T: super::Pod + super::FixedLayout>(
531 &mut self,
532 id: &SegmentId,
533 ) -> Result<&mut T, ProgramError> {
534 let data = self.segment_data_mut(id)?;
535 if data.len() < T::SIZE {
536 return Err(ProgramError::AccountDataTooSmall);
537 }
538 Ok(unsafe { &mut *(data.as_mut_ptr() as *mut T) })
540 }
541
542 #[cfg(feature = "collections")]
544 #[inline]
545 pub fn segment_fixed_vec<T: super::Pod + super::FixedLayout>(
546 &mut self,
547 id: &SegmentId,
548 ) -> Result<FixedVec<'_, T>, ProgramError> {
549 FixedVec::from_bytes(self.segment_data_mut(id)?)
550 }
551
552 #[cfg(feature = "collections")]
554 #[inline]
555 pub fn segment_sorted_vec<T: super::Pod + super::FixedLayout + Ord>(
556 &mut self,
557 id: &SegmentId,
558 ) -> Result<SortedVec<'_, T>, ProgramError> {
559 SortedVec::from_bytes(self.segment_data_mut(id)?)
560 }
561
562 #[cfg(feature = "collections")]
564 #[inline]
565 pub fn segment_ring_buffer<T: super::Pod + super::FixedLayout>(
566 &mut self,
567 id: &SegmentId,
568 ) -> Result<RingBuffer<'_, T>, ProgramError> {
569 RingBuffer::from_bytes(self.segment_data_mut(id)?)
570 }
571
572 #[cfg(feature = "collections")]
574 #[inline]
575 pub fn segment_slot_map<T: super::Pod + super::FixedLayout>(
576 &mut self,
577 id: &SegmentId,
578 ) -> Result<SlotMap<'_, T>, ProgramError> {
579 SlotMap::from_bytes(self.segment_data_mut(id)?)
580 }
581
582 #[cfg(feature = "collections")]
584 #[inline]
585 pub fn segment_journal<T: super::Pod + super::FixedLayout>(
586 &mut self,
587 id: &SegmentId,
588 ) -> Result<Journal<'_, T>, ProgramError> {
589 Journal::from_bytes_mut(self.segment_data_mut(id)?)
590 }
591
592 #[cfg(feature = "collections")]
594 #[inline]
595 pub fn segment_slab<T: super::Pod + super::FixedLayout>(
596 &mut self,
597 id: &SegmentId,
598 ) -> Result<Slab<'_, T>, ProgramError> {
599 Slab::from_bytes_mut(self.segment_data_mut(id)?)
600 }
601
602 #[inline]
604 pub fn freeze_segment(&mut self, id: &SegmentId) -> Result<(), ProgramError> {
605 let (_, entry) = self.find_mut(id)?;
606 let new_flags = entry.flags() | SEG_FLAG_FROZEN;
607 entry.set_flags(new_flags);
608 Ok(())
609 }
610
611 #[inline]
613 pub fn unfreeze_segment(&mut self, id: &SegmentId) -> Result<(), ProgramError> {
614 let (_, entry) = self.find_mut(id)?;
615 let new_flags = entry.flags() & !SEG_FLAG_FROZEN;
616 entry.set_flags(new_flags);
617 Ok(())
618 }
619
620 #[inline]
622 pub fn lock_segment(&mut self, id: &SegmentId) -> Result<(), ProgramError> {
623 let (_, entry) = self.find_mut(id)?;
624 let new_flags = entry.flags() | SEG_FLAG_LOCKED;
625 entry.set_flags(new_flags);
626 Ok(())
627 }
628}
629
630#[cfg(test)]
631mod tests {
632 use super::*;
633
634 #[repr(C)]
635 #[derive(Clone, Copy, Default, Eq, Ord, PartialEq, PartialOrd)]
636 struct Entry8 {
637 value: u8,
638 }
639
640 #[cfg(feature = "hopper-native-backend")]
641 unsafe impl ::hopper_runtime::__hopper_native::bytemuck::Zeroable for Entry8 {}
642 #[cfg(feature = "hopper-native-backend")]
643 unsafe impl ::hopper_runtime::__hopper_native::bytemuck::Pod for Entry8 {}
644 unsafe impl crate::account::Pod for Entry8 {}
645
646 impl crate::account::FixedLayout for Entry8 {
647 const SIZE: usize = 1;
648 }
649
650 #[cfg(feature = "collections")]
651 #[test]
652 fn segment_fixed_vec_adapter_exposes_vec_api() {
653 const CORE: SegmentId = segment_id("core");
654 let total = REGISTRY_OFFSET + REGISTRY_HEADER_SIZE + SEGMENT_ENTRY_SIZE + 8;
655 let mut account = std::vec![0u8; total];
656
657 SegmentRegistryMut::init(&mut account, &[(CORE, 8, 1)]).unwrap();
658
659 let mut registry = SegmentRegistryMut::from_account_mut(&mut account).unwrap();
660 let mut values = registry.segment_fixed_vec::<Entry8>(&CORE).unwrap();
661 values.push(Entry8 { value: 7 }).unwrap();
662 values.push(Entry8 { value: 9 }).unwrap();
663
664 assert_eq!(values.len(), 2);
665 assert_eq!(values.get(0).unwrap().value, 7);
666 assert_eq!(values.get(1).unwrap().value, 9);
667 }
668
669 #[cfg(feature = "collections")]
670 #[test]
671 fn segment_journal_adapter_exposes_journal_api() {
672 const AUDIT: SegmentId = segment_id("audit");
673 let segment_bytes = crate::collections::JOURNAL_HEADER_SIZE + 4;
674 let total = REGISTRY_OFFSET + REGISTRY_HEADER_SIZE + SEGMENT_ENTRY_SIZE + segment_bytes;
675 let mut account = std::vec![0u8; total];
676
677 SegmentRegistryMut::init(&mut account, &[(AUDIT, segment_bytes as u32, 1)]).unwrap();
678
679 {
680 let mut registry = SegmentRegistryMut::from_account_mut(&mut account).unwrap();
681 let mut journal = registry.segment_journal::<Entry8>(&AUDIT).unwrap();
682 journal.init(false);
683 journal.append(Entry8 { value: 3 }).unwrap();
684 journal.append(Entry8 { value: 4 }).unwrap();
685 }
686
687 let registry = SegmentRegistry::from_account(&account).unwrap();
688 let bytes = registry.segment_data(&AUDIT).unwrap();
689 let reader = crate::collections::Journal::<Entry8>::from_bytes(bytes).unwrap();
690 assert_eq!(reader.entry_count(), 2);
691 assert_eq!(reader.read(0).unwrap().value, 3);
692 assert_eq!(reader.read(1).unwrap().value, 4);
693 }
694}