defmt_ringbuf/
ring_buffer.rs

1//! Ring buffer.
2
3use core::{
4    mem::MaybeUninit,
5    ptr::{addr_of, addr_of_mut},
6    sync::atomic::{AtomicBool, AtomicU32, AtomicU8, AtomicUsize, Ordering},
7};
8
9use cortex_m::Peripherals;
10
11/// Ring buffer access methods.
12pub trait RingBuf {
13    /// Write into the ring buffer.
14    fn write(&mut self, data: &[u8]);
15
16    /// Read from the ring buffer.
17    ///
18    /// Returns the number of bytes read and whether data was lost.
19    fn read(&mut self, data: &mut [u8]) -> (usize, bool);
20}
21
22/// Ring buffer.
23#[repr(C)]
24pub struct RingBuffer<const SIZE: usize> {
25    /// Signature for validity check.
26    signature: AtomicU32,
27    /// Read position.
28    read_pos: AtomicUsize,
29    /// Write position.
30    write_pos: AtomicUsize,
31    /// Unread data overwritten?
32    overwritten: AtomicBool,
33    /// Buffer.
34    buf: [AtomicU8; SIZE],
35}
36
37impl<const SIZE: usize> RingBuffer<SIZE> {
38    /// Signature for validity check.
39    const SIGNATURE: u32 = 0xb0ffe300;
40
41    /// Initializes the ring buffer, keeping its data if it appears valid.
42    pub fn init(uninit: &mut MaybeUninit<Self>) -> &mut Self {
43        unsafe {
44            let mut scb = Peripherals::steal().SCB;
45            let ptr = uninit.as_mut_ptr();
46
47            let signature = (addr_of!((*ptr).signature) as *const u32).read_volatile();
48            let mut read_pos = (addr_of!((*ptr).read_pos) as *const usize).read_volatile();
49            let mut write_pos = (addr_of!((*ptr).write_pos) as *const usize).read_volatile();
50            let mut overwritten = (addr_of!((*ptr).overwritten) as *const u8).read_volatile();
51
52            let valid = signature == Self::SIGNATURE
53                && read_pos < SIZE
54                && write_pos < SIZE
55                && (overwritten == 0 || overwritten == 1);
56
57            if !valid {
58                addr_of_mut!((*ptr).signature).write_volatile(AtomicU32::new(0));
59                scb.clean_dcache_by_ref(&(*ptr).signature);
60
61                read_pos = 0;
62                write_pos = 0;
63                overwritten = 0;
64            }
65
66            for i in 0..SIZE {
67                let b = if valid { (addr_of!((*ptr).buf[i]) as *const u8).read_volatile() } else { 0 };
68                addr_of_mut!((*ptr).buf[i]).write_volatile(AtomicU8::new(b));
69            }
70            scb.clean_dcache_by_slice(&(*ptr).buf);
71
72            addr_of_mut!((*ptr).read_pos).write_volatile(AtomicUsize::new(read_pos));
73            scb.clean_dcache_by_ref(&(*ptr).read_pos);
74            addr_of_mut!((*ptr).write_pos).write_volatile(AtomicUsize::new(write_pos));
75            scb.clean_dcache_by_ref(&(*ptr).write_pos);
76            addr_of_mut!((*ptr).overwritten).write_volatile(AtomicBool::new(overwritten != 0));
77            scb.clean_dcache_by_ref(&(*ptr).overwritten);
78            addr_of_mut!((*ptr).signature).write_volatile(AtomicU32::new(Self::SIGNATURE));
79            scb.clean_dcache_by_ref(&(*ptr).signature);
80
81            // SAFETY: all fields have been initialized with either default values or their previous contents.
82            uninit.assume_init_mut()
83        }
84    }
85}
86
87impl<const SIZE: usize> RingBuf for RingBuffer<SIZE> {
88    fn write(&mut self, mut data: &[u8]) {
89        let mut scb = unsafe { Peripherals::steal().SCB };
90
91        while !data.is_empty() {
92            // Split data into part that fits remaining buffer.
93            let write_pos = self.write_pos.load(Ordering::SeqCst);
94            let to_end = SIZE - write_pos;
95            let (part, rest) = data.split_at(to_end.min(data.len()));
96            data = rest;
97
98            // Calculate write boundaries.
99            let from = write_pos;
100            let to = write_pos + part.len();
101
102            // Update read position if we overwrite unread data.
103            let read_pos = self.read_pos.load(Ordering::SeqCst);
104            if from < read_pos && to >= read_pos {
105                self.overwritten.store(true, Ordering::SeqCst);
106
107                let mut new_read_pos = to + 1;
108                if new_read_pos >= SIZE {
109                    new_read_pos = 0;
110                }
111                self.read_pos.store(new_read_pos, Ordering::SeqCst);
112                scb.clean_dcache_by_ref(&self.read_pos);
113            }
114
115            // Copy.
116            for (dst, src) in self.buf[from..to].iter_mut().zip(part.iter()) {
117                dst.store(*src, Ordering::SeqCst);
118            }
119            scb.clean_dcache_by_slice(&self.buf[from..to]);
120
121            // Update write position.
122            let new_write_pos = if to == SIZE { 0 } else { to };
123            self.write_pos.store(new_write_pos, Ordering::SeqCst);
124            scb.clean_dcache_by_ref(&self.write_pos);
125        }
126    }
127
128    fn read(&mut self, data: &mut [u8]) -> (usize, bool) {
129        let mut scb = unsafe { Peripherals::steal().SCB };
130
131        let read_pos = self.read_pos.load(Ordering::SeqCst);
132        let write_pos = self.write_pos.load(Ordering::SeqCst);
133        let overwritten = self.overwritten.load(Ordering::SeqCst);
134
135        // Calculate available data.
136        let avail = if read_pos > write_pos { SIZE - read_pos } else { write_pos - read_pos };
137        let n = avail.min(data.len());
138
139        // Copy.
140        let from = read_pos;
141        let to = read_pos + n;
142        for (dst, src) in data[..n].iter_mut().zip(self.buf[from..to].iter()) {
143            *dst = src.load(Ordering::SeqCst);
144        }
145
146        // Update read position and overwritten status.
147        let new_read_pos = if to == SIZE { 0 } else { to };
148        self.read_pos.store(new_read_pos, Ordering::SeqCst);
149        scb.clean_dcache_by_ref(&self.read_pos);
150        self.overwritten.store(false, Ordering::SeqCst);
151        scb.clean_dcache_by_ref(&self.overwritten);
152
153        (n, overwritten)
154    }
155}