hopper_core/collections/
ring_buffer.rs1use crate::account::{FixedLayout, Pod};
11use hopper_runtime::error::ProgramError;
12
13const RING_HEADER: usize = 8;
15
16pub struct RingBuffer<'a, T: Pod + FixedLayout> {
22 data: &'a mut [u8],
23 _phantom: core::marker::PhantomData<T>,
24}
25
26impl<'a, T: Pod + FixedLayout> RingBuffer<'a, T> {
27 #[inline]
29 pub fn from_bytes(data: &'a mut [u8]) -> Result<Self, ProgramError> {
30 if data.len() < RING_HEADER {
31 return Err(ProgramError::AccountDataTooSmall);
32 }
33 Ok(Self {
34 data,
35 _phantom: core::marker::PhantomData,
36 })
37 }
38
39 #[inline(always)]
41 pub fn capacity(&self) -> usize {
42 (self.data.len() - RING_HEADER) / T::SIZE
43 }
44
45 #[inline(always)]
47 pub fn count(&self) -> usize {
48 let bytes = [self.data[4], self.data[5], self.data[6], self.data[7]];
49 u32::from_le_bytes(bytes) as usize
50 }
51
52 #[inline(always)]
54 fn head(&self) -> usize {
55 let bytes = [self.data[0], self.data[1], self.data[2], self.data[3]];
56 u32::from_le_bytes(bytes) as usize
57 }
58
59 #[inline(always)]
61 fn set_head(&mut self, head: usize) {
62 let bytes = (head as u32).to_le_bytes();
63 self.data[0..4].copy_from_slice(&bytes);
64 }
65
66 #[inline(always)]
68 fn set_count(&mut self, count: usize) {
69 let bytes = (count as u32).to_le_bytes();
70 self.data[4..8].copy_from_slice(&bytes);
71 }
72
73 #[inline(always)]
75 fn slot_offset(&self, index: usize) -> usize {
76 RING_HEADER + index * T::SIZE
77 }
78
79 #[inline]
81 pub fn push(&mut self, value: T) -> Result<(), ProgramError> {
82 let cap = self.capacity();
83 if cap == 0 {
84 return Err(ProgramError::AccountDataTooSmall);
85 }
86
87 let head = self.head();
88 let offset = self.slot_offset(head);
89
90 unsafe {
92 core::ptr::write_unaligned(self.data.as_mut_ptr().add(offset) as *mut T, value);
93 }
94
95 let new_head = (head + 1) % cap;
96 self.set_head(new_head);
97
98 let count = self.count();
99 if count < cap {
100 self.set_count(count + 1);
101 }
102
103 Ok(())
104 }
105
106 #[inline]
108 pub fn get(&self, logical_index: usize) -> Result<T, ProgramError> {
109 let count = self.count();
110 if logical_index >= count {
111 return Err(ProgramError::InvalidArgument);
112 }
113 let cap = self.capacity();
114 let head = self.head();
115 let start = if head >= count {
117 head - count
118 } else {
119 cap - (count - head)
120 };
121 let physical = (start + logical_index) % cap;
122 let offset = self.slot_offset(physical);
123
124 Ok(unsafe { core::ptr::read_unaligned(self.data.as_ptr().add(offset) as *const T) })
126 }
127
128 #[inline]
130 pub fn latest(&self) -> Result<T, ProgramError> {
131 let count = self.count();
132 if count == 0 {
133 return Err(ProgramError::InvalidArgument);
134 }
135 self.get(count - 1)
136 }
137
138 #[inline]
140 pub fn oldest(&self) -> Result<T, ProgramError> {
141 if self.count() == 0 {
142 return Err(ProgramError::InvalidArgument);
143 }
144 self.get(0)
145 }
146
147 #[inline(always)]
149 pub const fn required_bytes(capacity: usize) -> usize {
150 RING_HEADER + capacity * T::SIZE
151 }
152}
153
154#[cfg(test)]
155mod tests {
156 use super::*;
157 use crate::abi::WireU32;
158
159 #[test]
160 fn ring_push_and_read() {
161 let mut buf = [0u8; 8 + 4 * 3]; let mut ring = RingBuffer::<WireU32>::from_bytes(&mut buf).unwrap();
163
164 ring.push(WireU32::new(10)).unwrap();
165 ring.push(WireU32::new(20)).unwrap();
166 ring.push(WireU32::new(30)).unwrap();
167
168 assert_eq!(ring.count(), 3);
169 assert_eq!(ring.oldest().unwrap().get(), 10);
170 assert_eq!(ring.latest().unwrap().get(), 30);
171 }
172
173 #[test]
174 fn ring_wraps_around() {
175 let mut buf = [0u8; 8 + 4 * 2]; let mut ring = RingBuffer::<WireU32>::from_bytes(&mut buf).unwrap();
177
178 ring.push(WireU32::new(1)).unwrap();
179 ring.push(WireU32::new(2)).unwrap();
180 ring.push(WireU32::new(3)).unwrap(); assert_eq!(ring.count(), 2);
183 assert_eq!(ring.oldest().unwrap().get(), 2);
184 assert_eq!(ring.latest().unwrap().get(), 3);
185 }
186}