Skip to main content

jiminy_core/account/
segment.rs

1//! Zero-copy segmented account access.
2//!
3//! Extends the fixed-size `zero_copy_layout!` pattern with support for
4//! **multiple variable-length arrays** within a single account. Each
5//! array is called a *segment*.
6//!
7//! ## On-chain layout
8//!
9//! ```text
10//! ┌──────────────┬──────────────────┬──────────────────────────┐
11//! │ Fixed Prefix │  Segment Table   │     Segment Data         │
12//! │  (N bytes)   │  (S × 8 bytes)   │  (variable per segment)  │
13//! └──────────────┴──────────────────┴──────────────────────────┘
14//! ```
15//!
16//! The fixed prefix is a standard `zero_copy_layout!` struct (including
17//! the 16-byte `AccountHeader`). Immediately after it comes the segment
18//! table: `S` entries of 8 bytes each, describing the offset, count, and
19//! element size of each dynamic array. Segment data follows the table.
20//!
21//! ## Segment Descriptor (8 bytes)
22//!
23//! ```text
24//! Byte   Field          Type      Description
25//! ──────────────────────────────────────────────────────────
26//! 0-3    offset         u32 LE    Byte offset from account start
27//! 4-5    count          u16 LE    Number of elements
28//! 6-7    element_size   u16 LE    Size of each element in bytes
29//! ──────────────────────────────────────────────────────────
30//! ```
31//!
32//! ## Usage
33//!
34//! ```rust,ignore
35//! use jiminy_core::account::segment::*;
36//!
37//! // Read segment table from account data starting after the fixed prefix.
38//! let table = SegmentTable::from_bytes(&data[prefix_len..], 2)?;
39//! let desc = table.descriptor(0)?;
40//!
41//! // Get a typed zero-copy view of the segment.
42//! let orders = SegmentSlice::<Order>::from_descriptor(&data, desc)?;
43//! for order in orders.iter() {
44//!     // order: Order (by copy, alignment-safe)
45//! }
46//! ```
47
48use pinocchio::error::ProgramError;
49
50use super::pod::{FixedLayout, Pod};
51
52/// Size of a single segment descriptor in bytes.
53pub const SEGMENT_DESC_SIZE: usize = 8;
54
55/// Maximum number of segments per account.
56///
57/// Practical upper bound to prevent excessive rent costs and simplify
58/// validation. 8 segments × 8 bytes = 64-byte table overhead.
59pub const MAX_SEGMENTS: usize = 8;
60
61/// On-wire segment descriptor.
62///
63/// Each 8-byte entry describes one variable-length array within a
64/// segmented account. The descriptor lives in the segment table region,
65/// between the fixed prefix and the segment data.
66#[repr(C)]
67#[derive(Clone, Copy, Debug, PartialEq, Eq)]
68pub struct SegmentDescriptor {
69    /// Byte offset from the start of the account data to the first
70    /// element of this segment.
71    pub offset: [u8; 4],
72    /// Number of elements currently stored in this segment.
73    pub count: [u8; 2],
74    /// Size of each element in bytes.
75    pub element_size: [u8; 2],
76}
77
78// SAFETY: repr(C), Copy, all fields are byte arrays. All bit patterns valid.
79unsafe impl Pod for SegmentDescriptor {}
80
81impl FixedLayout for SegmentDescriptor {
82    const SIZE: usize = SEGMENT_DESC_SIZE;
83}
84
85const _: () = assert!(core::mem::size_of::<SegmentDescriptor>() == SEGMENT_DESC_SIZE);
86const _: () = assert!(core::mem::align_of::<SegmentDescriptor>() == 1);
87
88impl SegmentDescriptor {
89    /// Create a new descriptor.
90    #[inline(always)]
91    pub const fn new(offset: u32, count: u16, element_size: u16) -> Self {
92        Self {
93            offset: offset.to_le_bytes(),
94            count: count.to_le_bytes(),
95            element_size: element_size.to_le_bytes(),
96        }
97    }
98
99    /// Read the byte offset.
100    #[inline(always)]
101    pub const fn offset(&self) -> u32 {
102        u32::from_le_bytes(self.offset)
103    }
104
105    /// Read the element count.
106    #[inline(always)]
107    pub const fn count(&self) -> u16 {
108        u16::from_le_bytes(self.count)
109    }
110
111    /// Read the element size.
112    #[inline(always)]
113    pub const fn element_size(&self) -> u16 {
114        u16::from_le_bytes(self.element_size)
115    }
116
117    /// Total byte footprint of this segment's data (count × element_size).
118    #[inline(always)]
119    pub const fn data_len(&self) -> usize {
120        self.count() as usize * self.element_size() as usize
121    }
122
123    /// Byte range `[offset .. offset + data_len)`. Returns `None` on overflow.
124    #[inline(always)]
125    pub const fn byte_range(&self) -> Option<(usize, usize)> {
126        let start = self.offset() as usize;
127        let len = self.data_len();
128        match start.checked_add(len) {
129            Some(end) => Some((start, end)),
130            None => None,
131        }
132    }
133}
134
135// ── Segment Table ────────────────────────────────────────────────────────────
136
137/// Immutable view over the segment table region of an account.
138///
139/// The table starts at a known offset (typically right after the fixed
140/// prefix) and contains `segment_count` descriptors of 8 bytes each.
141pub struct SegmentTable<'a> {
142    /// Slice covering just the segment table bytes.
143    data: &'a [u8],
144    /// Number of segments.
145    segment_count: usize,
146}
147
148impl<'a> SegmentTable<'a> {
149    /// Parse a segment table from `data`.
150    ///
151    /// `data` should start at the first descriptor byte.
152    /// `segment_count` must be ≤ `MAX_SEGMENTS`.
153    #[inline(always)]
154    pub fn from_bytes(data: &'a [u8], segment_count: usize) -> Result<Self, ProgramError> {
155        if segment_count > MAX_SEGMENTS {
156            return Err(ProgramError::InvalidArgument);
157        }
158        let table_size = segment_count * SEGMENT_DESC_SIZE;
159        if data.len() < table_size {
160            return Err(ProgramError::AccountDataTooSmall);
161        }
162        Ok(Self {
163            data: &data[..table_size],
164            segment_count,
165        })
166    }
167
168    /// Number of segments in the table.
169    #[inline(always)]
170    pub fn len(&self) -> usize {
171        self.segment_count
172    }
173
174    /// Whether the table has no segments.
175    #[inline(always)]
176    pub fn is_empty(&self) -> bool {
177        self.segment_count == 0
178    }
179
180    /// Get the descriptor at `index`.
181    #[inline(always)]
182    pub fn descriptor(&self, index: usize) -> Result<SegmentDescriptor, ProgramError> {
183        if index >= self.segment_count {
184            return Err(ProgramError::InvalidArgument);
185        }
186        let start = index * SEGMENT_DESC_SIZE;
187        Ok(SegmentDescriptor {
188            offset: [
189                self.data[start],
190                self.data[start + 1],
191                self.data[start + 2],
192                self.data[start + 3],
193            ],
194            count: [self.data[start + 4], self.data[start + 5]],
195            element_size: [self.data[start + 6], self.data[start + 7]],
196        })
197    }
198
199    /// Validate that all segments are well-formed within `account_len` bytes.
200    ///
201    /// `min_offset` is the earliest byte at which segment data may start
202    /// (typically `DATA_START_OFFSET` - after the fixed prefix + table).
203    /// This prevents segment data from overlapping the fixed prefix or
204    /// the segment table itself.
205    ///
206    /// Checks:
207    /// - Element size matches `expected_sizes[i]` (if provided).
208    /// - Segment data fits within the account.
209    /// - No segment data starts before `min_offset`.
210    /// - No two segments overlap.
211    /// - All segments are ordered by offset.
212    #[inline]
213    pub fn validate(
214        &self,
215        account_len: usize,
216        expected_sizes: &[u16],
217        min_offset: usize,
218    ) -> Result<(), ProgramError> {
219        let mut prev_end: usize = min_offset;
220
221        for i in 0..self.segment_count {
222            let desc = self.descriptor(i)?;
223
224            // Element size must be non-zero.
225            if desc.element_size() == 0 {
226                return Err(ProgramError::InvalidAccountData);
227            }
228
229            // Check expected element size if provided.
230            if i < expected_sizes.len() && desc.element_size() != expected_sizes[i] {
231                return Err(ProgramError::InvalidAccountData);
232            }
233
234            // Compute byte range with overflow check.
235            let (start, end) = desc
236                .byte_range()
237                .ok_or(ProgramError::InvalidAccountData)?;
238
239            // Must fit within account data.
240            if end > account_len {
241                return Err(ProgramError::AccountDataTooSmall);
242            }
243
244            // Must be ordered and non-overlapping.
245            if start < prev_end {
246                return Err(ProgramError::InvalidAccountData);
247            }
248
249            prev_end = end;
250        }
251
252        Ok(())
253    }
254
255    /// Total byte size of the table itself (segment_count × 8).
256    #[inline(always)]
257    pub fn byte_len(&self) -> usize {
258        self.segment_count * SEGMENT_DESC_SIZE
259    }
260}
261
262// ── Mutable Segment Table ────────────────────────────────────────────────────
263
264/// Mutable view over the segment table region.
265pub struct SegmentTableMut<'a> {
266    data: &'a mut [u8],
267    segment_count: usize,
268}
269
270impl<'a> SegmentTableMut<'a> {
271    /// Parse a mutable segment table from `data`.
272    #[inline(always)]
273    pub fn from_bytes(data: &'a mut [u8], segment_count: usize) -> Result<Self, ProgramError> {
274        if segment_count > MAX_SEGMENTS {
275            return Err(ProgramError::InvalidArgument);
276        }
277        let table_size = segment_count * SEGMENT_DESC_SIZE;
278        if data.len() < table_size {
279            return Err(ProgramError::AccountDataTooSmall);
280        }
281        Ok(Self {
282            data: &mut data[..table_size],
283            segment_count,
284        })
285    }
286
287    /// Read the descriptor at `index`.
288    #[inline(always)]
289    pub fn descriptor(&self, index: usize) -> Result<SegmentDescriptor, ProgramError> {
290        if index >= self.segment_count {
291            return Err(ProgramError::InvalidArgument);
292        }
293        let start = index * SEGMENT_DESC_SIZE;
294        Ok(SegmentDescriptor {
295            offset: [
296                self.data[start],
297                self.data[start + 1],
298                self.data[start + 2],
299                self.data[start + 3],
300            ],
301            count: [self.data[start + 4], self.data[start + 5]],
302            element_size: [self.data[start + 6], self.data[start + 7]],
303        })
304    }
305
306    /// Write a descriptor at `index`.
307    #[inline(always)]
308    pub fn set_descriptor(
309        &mut self,
310        index: usize,
311        desc: &SegmentDescriptor,
312    ) -> Result<(), ProgramError> {
313        if index >= self.segment_count {
314            return Err(ProgramError::InvalidArgument);
315        }
316        let start = index * SEGMENT_DESC_SIZE;
317        self.data[start..start + 4].copy_from_slice(&desc.offset);
318        self.data[start + 4..start + 6].copy_from_slice(&desc.count);
319        self.data[start + 6..start + 8].copy_from_slice(&desc.element_size);
320        Ok(())
321    }
322
323    /// Number of segments.
324    #[inline(always)]
325    pub fn len(&self) -> usize {
326        self.segment_count
327    }
328
329    /// Whether the table has no segments.
330    #[inline(always)]
331    pub fn is_empty(&self) -> bool {
332        self.segment_count == 0
333    }
334
335    /// Initialize the segment table with descriptors computed from
336    /// element sizes and initial counts.
337    ///
338    /// `specs` is a slice of `(element_size, initial_count)` pairs.
339    /// Offsets are computed automatically, starting at `data_start`
340    /// (typically `fixed_prefix_len + table_size`).
341    #[inline]
342    pub fn init(
343        data: &'a mut [u8],
344        data_start: u32,
345        specs: &[(u16, u16)],
346    ) -> Result<Self, ProgramError> {
347        let segment_count = specs.len();
348        if segment_count > MAX_SEGMENTS {
349            return Err(ProgramError::InvalidArgument);
350        }
351        let table_size = segment_count * SEGMENT_DESC_SIZE;
352        if data.len() < table_size {
353            return Err(ProgramError::AccountDataTooSmall);
354        }
355
356        let mut offset = data_start;
357        for (i, &(elem_size, count)) in specs.iter().enumerate() {
358            let start = i * SEGMENT_DESC_SIZE;
359            data[start..start + 4].copy_from_slice(&offset.to_le_bytes());
360            data[start + 4..start + 6].copy_from_slice(&count.to_le_bytes());
361            data[start + 6..start + 8].copy_from_slice(&elem_size.to_le_bytes());
362            // Advance offset with overflow check.
363            let seg_len = (count as u32)
364                .checked_mul(elem_size as u32)
365                .ok_or(ProgramError::ArithmeticOverflow)?;
366            offset = offset
367                .checked_add(seg_len)
368                .ok_or(ProgramError::ArithmeticOverflow)?;
369        }
370
371        Ok(Self {
372            data: &mut data[..table_size],
373            segment_count,
374        })
375    }
376}
377
378// ── Segment Slice (immutable) ────────────────────────────────────────────────
379
380/// Immutable zero-copy view over one segment's element array.
381///
382/// Similar to `ZeroCopySlice` but driven by a `SegmentDescriptor`
383/// rather than a length prefix. Elements are `Pod + FixedLayout`.
384pub struct SegmentSlice<'a, T: Pod + FixedLayout> {
385    data: &'a [u8],
386    count: u16,
387    _marker: core::marker::PhantomData<T>,
388}
389
390impl<'a, T: Pod + FixedLayout> SegmentSlice<'a, T> {
391    /// Create a segment view from a descriptor and the full account data.
392    ///
393    /// Validates that:
394    /// - `descriptor.element_size()` matches `T::SIZE`
395    /// - the segment's byte range fits within `account_data`
396    #[inline(always)]
397    pub fn from_descriptor(
398        account_data: &'a [u8],
399        descriptor: &SegmentDescriptor,
400    ) -> Result<Self, ProgramError> {
401        if descriptor.element_size() as usize != T::SIZE {
402            return Err(ProgramError::InvalidAccountData);
403        }
404        let (start, end) = descriptor
405            .byte_range()
406            .ok_or(ProgramError::InvalidAccountData)?;
407        if end > account_data.len() {
408            return Err(ProgramError::AccountDataTooSmall);
409        }
410        Ok(Self {
411            data: &account_data[start..end],
412            count: descriptor.count(),
413            _marker: core::marker::PhantomData,
414        })
415    }
416
417    /// Number of elements.
418    #[inline(always)]
419    pub fn len(&self) -> u16 {
420        self.count
421    }
422
423    /// Whether the segment is empty.
424    #[inline(always)]
425    pub fn is_empty(&self) -> bool {
426        self.count == 0
427    }
428
429    /// Get a reference to element at `index`.
430    #[inline(always)]
431    pub fn get(&self, index: u16) -> Result<&T, ProgramError> {
432        if index >= self.count {
433            return Err(ProgramError::InvalidArgument);
434        }
435        let offset = (index as usize) * T::SIZE;
436        #[cfg(target_os = "solana")]
437        {
438            // SAFETY: bounds checked above, alignment is 1 on SBF.
439            Ok(unsafe { &*(self.data.as_ptr().add(offset) as *const T) })
440        }
441        #[cfg(not(target_os = "solana"))]
442        {
443            let ptr = self.data.as_ptr();
444            // SAFETY: bounds checked above. Alignment checked below.
445            if (unsafe { ptr.add(offset) } as usize) % core::mem::align_of::<T>() != 0 {
446                return Err(ProgramError::InvalidAccountData);
447            }
448            Ok(unsafe { &*(ptr.add(offset) as *const T) })
449        }
450    }
451
452    /// Read element at `index` by copy (alignment-safe on all targets).
453    #[inline(always)]
454    pub fn read(&self, index: u16) -> Result<T, ProgramError> {
455        if index >= self.count {
456            return Err(ProgramError::InvalidArgument);
457        }
458        let offset = (index as usize) * T::SIZE;
459        // SAFETY: bounds checked above.
460        Ok(unsafe {
461            core::ptr::read_unaligned(self.data.as_ptr().add(offset) as *const T)
462        })
463    }
464
465    /// Iterate over all elements by copy.
466    #[inline(always)]
467    pub fn iter(&self) -> SegmentIter<'a, T> {
468        SegmentIter {
469            data: self.data,
470            index: 0,
471            count: self.count,
472            _marker: core::marker::PhantomData,
473        }
474    }
475
476    /// Raw byte slice of the segment data.
477    #[inline(always)]
478    pub fn as_bytes(&self) -> &[u8] {
479        self.data
480    }
481}
482
483// ── Segment Slice (mutable) ──────────────────────────────────────────────────
484
485/// Mutable zero-copy view over one segment's element array.
486pub struct SegmentSliceMut<'a, T: Pod + FixedLayout> {
487    data: &'a mut [u8],
488    count: u16,
489    _marker: core::marker::PhantomData<T>,
490}
491
492impl<'a, T: Pod + FixedLayout> SegmentSliceMut<'a, T> {
493    /// Create a mutable segment view from a descriptor and full account data.
494    #[inline(always)]
495    pub fn from_descriptor(
496        account_data: &'a mut [u8],
497        descriptor: &SegmentDescriptor,
498    ) -> Result<Self, ProgramError> {
499        if descriptor.element_size() as usize != T::SIZE {
500            return Err(ProgramError::InvalidAccountData);
501        }
502        let (start, end) = descriptor
503            .byte_range()
504            .ok_or(ProgramError::InvalidAccountData)?;
505        if end > account_data.len() {
506            return Err(ProgramError::AccountDataTooSmall);
507        }
508        Ok(Self {
509            data: &mut account_data[start..end],
510            count: descriptor.count(),
511            _marker: core::marker::PhantomData,
512        })
513    }
514
515    /// Number of elements.
516    #[inline(always)]
517    pub fn len(&self) -> u16 {
518        self.count
519    }
520
521    /// Whether the segment is empty.
522    #[inline(always)]
523    pub fn is_empty(&self) -> bool {
524        self.count == 0
525    }
526
527    /// Get a mutable reference to element at `index`.
528    #[inline(always)]
529    pub fn get_mut(&mut self, index: u16) -> Result<&mut T, ProgramError> {
530        if index >= self.count {
531            return Err(ProgramError::InvalidArgument);
532        }
533        let offset = (index as usize) * T::SIZE;
534        #[cfg(target_os = "solana")]
535        {
536            // SAFETY: bounds checked above, alignment is 1 on SBF.
537            Ok(unsafe { &mut *(self.data.as_mut_ptr().add(offset) as *mut T) })
538        }
539        #[cfg(not(target_os = "solana"))]
540        {
541            let ptr = self.data.as_mut_ptr();
542            if (unsafe { ptr.add(offset) } as usize) % core::mem::align_of::<T>() != 0 {
543                return Err(ProgramError::InvalidAccountData);
544            }
545            Ok(unsafe { &mut *(ptr.add(offset) as *mut T) })
546        }
547    }
548
549    /// Get an immutable reference to element at `index`.
550    #[inline(always)]
551    pub fn get(&self, index: u16) -> Result<&T, ProgramError> {
552        if index >= self.count {
553            return Err(ProgramError::InvalidArgument);
554        }
555        let offset = (index as usize) * T::SIZE;
556        #[cfg(target_os = "solana")]
557        {
558            Ok(unsafe { &*(self.data.as_ptr().add(offset) as *const T) })
559        }
560        #[cfg(not(target_os = "solana"))]
561        {
562            let ptr = self.data.as_ptr();
563            if (unsafe { ptr.add(offset) } as usize) % core::mem::align_of::<T>() != 0 {
564                return Err(ProgramError::InvalidAccountData);
565            }
566            Ok(unsafe { &*(ptr.add(offset) as *const T) })
567        }
568    }
569
570    /// Write a value at `index` via byte copy (alignment-safe).
571    #[inline(always)]
572    pub fn set(&mut self, index: u16, value: &T) -> Result<(), ProgramError> {
573        if index >= self.count {
574            return Err(ProgramError::InvalidArgument);
575        }
576        let offset = (index as usize) * T::SIZE;
577        let src = value as *const T as *const u8;
578        // SAFETY: bounds checked above, copy is non-overlapping.
579        unsafe {
580            core::ptr::copy_nonoverlapping(
581                src,
582                self.data.as_mut_ptr().add(offset),
583                T::SIZE,
584            );
585        }
586        Ok(())
587    }
588
589    /// Read element at `index` by copy (alignment-safe on all targets).
590    #[inline(always)]
591    pub fn read(&self, index: u16) -> Result<T, ProgramError> {
592        if index >= self.count {
593            return Err(ProgramError::InvalidArgument);
594        }
595        let offset = (index as usize) * T::SIZE;
596        Ok(unsafe {
597            core::ptr::read_unaligned(self.data.as_ptr().add(offset) as *const T)
598        })
599    }
600
601    /// Raw byte slice of the segment data.
602    #[inline(always)]
603    pub fn as_bytes(&self) -> &[u8] {
604        self.data
605    }
606}
607
608// ── Segment Iterator ─────────────────────────────────────────────────────────
609
610/// Iterator over elements in a [`SegmentSlice`], yielding copies.
611pub struct SegmentIter<'a, T: Pod + FixedLayout> {
612    data: &'a [u8],
613    index: u16,
614    count: u16,
615    _marker: core::marker::PhantomData<T>,
616}
617
618impl<'a, T: Pod + FixedLayout> Iterator for SegmentIter<'a, T> {
619    type Item = T;
620
621    #[inline(always)]
622    fn next(&mut self) -> Option<Self::Item> {
623        if self.index >= self.count {
624            return None;
625        }
626        let offset = (self.index as usize) * T::SIZE;
627        self.index += 1;
628        // SAFETY: bounds checked by constructor + index < count.
629        Some(unsafe {
630            core::ptr::read_unaligned(self.data.as_ptr().add(offset) as *const T)
631        })
632    }
633
634    #[inline(always)]
635    fn size_hint(&self) -> (usize, Option<usize>) {
636        let remaining = (self.count - self.index) as usize;
637        (remaining, Some(remaining))
638    }
639}
640
641impl<'a, T: Pod + FixedLayout> ExactSizeIterator for SegmentIter<'a, T> {}
642
643// ── Segment push / swap-remove ───────────────────────────────────────────────
644
645/// Push an element at the end of a segment.
646///
647/// Reads the current descriptor at `seg_index`, writes `value` after
648/// the last entry, then increments the descriptor count.
649///
650/// # Errors
651///
652/// - `AccountDataTooSmall` if the account data is too short for the new element.
653/// - `InvalidAccountData` if `T::SIZE` doesn't match the descriptor's element size.
654/// - `ArithmeticOverflow` if the count would exceed `u16::MAX`.
655#[inline]
656pub fn segment_push<T: Pod + FixedLayout>(
657    data: &mut [u8],
658    table_offset: usize,
659    segment_count: usize,
660    seg_index: usize,
661    value: &T,
662) -> Result<(), ProgramError> {
663    // Read descriptor and upper bound (scoped to release the shared borrow).
664    let (desc, upper_bound) = {
665        let table = SegmentTable::from_bytes(&data[table_offset..], segment_count)?;
666        let d = table.descriptor(seg_index)?;
667        // Upper bound: for the last segment, data.len(). For earlier
668        // segments, the next segment's offset. This prevents push from
669        // writing into an adjacent segment's region.
670        let ub = if seg_index + 1 < segment_count {
671            table.descriptor(seg_index + 1)?.offset() as usize
672        } else {
673            data.len()
674        };
675        (d, ub)
676    };
677
678    if desc.element_size() as usize != T::SIZE {
679        return Err(ProgramError::InvalidAccountData);
680    }
681
682    let current_count = desc.count();
683    let write_offset = desc.offset() as usize + (current_count as usize) * T::SIZE;
684    let write_end = write_offset + T::SIZE;
685
686    if write_end > upper_bound {
687        return Err(ProgramError::AccountDataTooSmall);
688    }
689
690    // Write the element bytes.
691    let src = value as *const T as *const u8;
692    // SAFETY: bounds checked above; copy is non-overlapping (new slot).
693    unsafe {
694        core::ptr::copy_nonoverlapping(src, data.as_mut_ptr().add(write_offset), T::SIZE);
695    }
696
697    // Increment the descriptor count.
698    let new_count = current_count
699        .checked_add(1)
700        .ok_or(ProgramError::ArithmeticOverflow)?;
701    let updated = SegmentDescriptor::new(desc.offset(), new_count, desc.element_size());
702    let mut table_mut = SegmentTableMut::from_bytes(&mut data[table_offset..], segment_count)?;
703    table_mut.set_descriptor(seg_index, &updated)?;
704
705    Ok(())
706}
707
708/// Remove element at `index` by swapping it with the last element.
709///
710/// Returns the removed element by copy. The last slot is zeroed.
711/// Order is **not** preserved (O(1) removal).
712///
713/// # Errors
714///
715/// - `InvalidArgument` if `index >= count`.
716/// - `InvalidAccountData` if `T::SIZE` doesn't match the descriptor's element size.
717#[inline]
718pub fn segment_swap_remove<T: Pod + FixedLayout>(
719    data: &mut [u8],
720    table_offset: usize,
721    segment_count: usize,
722    seg_index: usize,
723    index: u16,
724) -> Result<T, ProgramError> {
725    let desc = {
726        let table = SegmentTable::from_bytes(&data[table_offset..], segment_count)?;
727        table.descriptor(seg_index)?
728    };
729
730    if desc.element_size() as usize != T::SIZE {
731        return Err(ProgramError::InvalidAccountData);
732    }
733
734    let count = desc.count();
735    if index >= count {
736        return Err(ProgramError::InvalidArgument);
737    }
738
739    let base = desc.offset() as usize;
740    let target_offset = base + (index as usize) * T::SIZE;
741
742    // Read the element being removed (by copy).
743    // SAFETY: bounds guaranteed by descriptor validation.
744    let removed = unsafe {
745        core::ptr::read_unaligned(data.as_ptr().add(target_offset) as *const T)
746    };
747
748    let last_index = count - 1;
749    if index < last_index {
750        // Copy last element into the target slot.
751        let last_offset = base + (last_index as usize) * T::SIZE;
752        data.copy_within(last_offset..last_offset + T::SIZE, target_offset);
753    }
754
755    // Zero the now-unused last slot (compiles to sol_memset on SBF).
756    let last_offset = base + (last_index as usize) * T::SIZE;
757    data[last_offset..last_offset + T::SIZE].fill(0);
758
759    // Decrement the descriptor count.
760    let updated = SegmentDescriptor::new(desc.offset(), last_index, desc.element_size());
761    let mut table_mut = SegmentTableMut::from_bytes(&mut data[table_offset..], segment_count)?;
762    table_mut.set_descriptor(seg_index, &updated)?;
763
764    Ok(removed)
765}