Skip to main content

hopper_core/collections/
ring_buffer.rs

1//! Fixed-capacity circular buffer for journals, event logs, and queues.
2//!
3//! Wire layout:
4//! ```text
5//! [head: u32 LE][count: u32 LE][element 0][element 1]...[element capacity-1]
6//! ```
7//!
8//! Elements wrap around when the buffer is full. Oldest elements are overwritten.
9
10use crate::account::{FixedLayout, Pod};
11use hopper_runtime::error::ProgramError;
12
13/// Header: 4 bytes head + 4 bytes count = 8 bytes.
14const RING_HEADER: usize = 8;
15
16/// Fixed-capacity circular buffer overlaid on a byte slice.
17///
18/// - `push` always succeeds. When full, overwrites the oldest entry.
19/// - Use for event journals, audit logs, price history, etc.
20/// - O(1) push, O(1) read by logical index.
21pub struct RingBuffer<'a, T: Pod + FixedLayout> {
22    data: &'a mut [u8],
23    _phantom: core::marker::PhantomData<T>,
24}
25
26impl<'a, T: Pod + FixedLayout> RingBuffer<'a, T> {
27    /// Overlay a RingBuffer on a mutable byte slice.
28    #[inline]
29    pub fn from_bytes(data: &'a mut [u8]) -> Result<Self, ProgramError> {
30        if data.len() < RING_HEADER {
31            return Err(ProgramError::AccountDataTooSmall);
32        }
33        Ok(Self {
34            data,
35            _phantom: core::marker::PhantomData,
36        })
37    }
38
39    /// Maximum capacity (number of elements).
40    #[inline(always)]
41    pub fn capacity(&self) -> usize {
42        (self.data.len() - RING_HEADER) / T::SIZE
43    }
44
45    /// Current number of elements (may be less than capacity if not yet full).
46    #[inline(always)]
47    pub fn count(&self) -> usize {
48        let bytes = [self.data[4], self.data[5], self.data[6], self.data[7]];
49        u32::from_le_bytes(bytes) as usize
50    }
51
52    /// Head pointer (index of the next write position).
53    #[inline(always)]
54    fn head(&self) -> usize {
55        let bytes = [self.data[0], self.data[1], self.data[2], self.data[3]];
56        u32::from_le_bytes(bytes) as usize
57    }
58
59    /// Set head.
60    #[inline(always)]
61    fn set_head(&mut self, head: usize) {
62        let bytes = (head as u32).to_le_bytes();
63        self.data[0..4].copy_from_slice(&bytes);
64    }
65
66    /// Set count.
67    #[inline(always)]
68    fn set_count(&mut self, count: usize) {
69        let bytes = (count as u32).to_le_bytes();
70        self.data[4..8].copy_from_slice(&bytes);
71    }
72
73    /// Byte offset of element at physical slot `index`.
74    #[inline(always)]
75    fn slot_offset(&self, index: usize) -> usize {
76        RING_HEADER + index * T::SIZE
77    }
78
79    /// Push an element. If the buffer is full, overwrites the oldest entry.
80    #[inline]
81    pub fn push(&mut self, value: T) -> Result<(), ProgramError> {
82        let cap = self.capacity();
83        if cap == 0 {
84            return Err(ProgramError::AccountDataTooSmall);
85        }
86
87        let head = self.head();
88        let offset = self.slot_offset(head);
89
90        // SAFETY: T: Pod, alignment-1, bounds ensured by capacity calculation.
91        unsafe {
92            core::ptr::write_unaligned(self.data.as_mut_ptr().add(offset) as *mut T, value);
93        }
94
95        let new_head = (head + 1) % cap;
96        self.set_head(new_head);
97
98        let count = self.count();
99        if count < cap {
100            self.set_count(count + 1);
101        }
102
103        Ok(())
104    }
105
106    /// Read the element at logical index (0 = oldest still in buffer).
107    #[inline]
108    pub fn get(&self, logical_index: usize) -> Result<T, ProgramError> {
109        let count = self.count();
110        if logical_index >= count {
111            return Err(ProgramError::InvalidArgument);
112        }
113        let cap = self.capacity();
114        let head = self.head();
115        // The oldest element is at (head - count) mod cap
116        let start = if head >= count {
117            head - count
118        } else {
119            cap - (count - head)
120        };
121        let physical = (start + logical_index) % cap;
122        let offset = self.slot_offset(physical);
123
124        // SAFETY: This block is part of Hopper's audited zero-copy/backend boundary; surrounding checks and caller contracts uphold the required raw-pointer, layout, and aliasing invariants.
125        Ok(unsafe { core::ptr::read_unaligned(self.data.as_ptr().add(offset) as *const T) })
126    }
127
128    /// Read the most recently pushed element.
129    #[inline]
130    pub fn latest(&self) -> Result<T, ProgramError> {
131        let count = self.count();
132        if count == 0 {
133            return Err(ProgramError::InvalidArgument);
134        }
135        self.get(count - 1)
136    }
137
138    /// Read the oldest element.
139    #[inline]
140    pub fn oldest(&self) -> Result<T, ProgramError> {
141        if self.count() == 0 {
142            return Err(ProgramError::InvalidArgument);
143        }
144        self.get(0)
145    }
146
147    /// Compute the byte size needed for a RingBuffer with the given capacity.
148    #[inline(always)]
149    pub const fn required_bytes(capacity: usize) -> usize {
150        RING_HEADER + capacity * T::SIZE
151    }
152}
153
154#[cfg(test)]
155mod tests {
156    use super::*;
157    use crate::abi::WireU32;
158
159    #[test]
160    fn ring_push_and_read() {
161        let mut buf = [0u8; 8 + 4 * 3]; // capacity 3
162        let mut ring = RingBuffer::<WireU32>::from_bytes(&mut buf).unwrap();
163
164        ring.push(WireU32::new(10)).unwrap();
165        ring.push(WireU32::new(20)).unwrap();
166        ring.push(WireU32::new(30)).unwrap();
167
168        assert_eq!(ring.count(), 3);
169        assert_eq!(ring.oldest().unwrap().get(), 10);
170        assert_eq!(ring.latest().unwrap().get(), 30);
171    }
172
173    #[test]
174    fn ring_wraps_around() {
175        let mut buf = [0u8; 8 + 4 * 2]; // capacity 2
176        let mut ring = RingBuffer::<WireU32>::from_bytes(&mut buf).unwrap();
177
178        ring.push(WireU32::new(1)).unwrap();
179        ring.push(WireU32::new(2)).unwrap();
180        ring.push(WireU32::new(3)).unwrap(); // overwrites 1
181
182        assert_eq!(ring.count(), 2);
183        assert_eq!(ring.oldest().unwrap().get(), 2);
184        assert_eq!(ring.latest().unwrap().get(), 3);
185    }
186}