rtt_target/
rtt.rs

1//! This module contains the implementation for the RTT protocol. It's not meant to be used directly
2//! in user code, and therefore mostly undocumented. The module is only public so that it can be
3//! accessed from the rtt_init! macro.
4
5use crate::ChannelMode;
6use core::cmp::min;
7use core::fmt;
8use core::ptr;
9use portable_atomic::{AtomicUsize, Ordering::SeqCst};
10
11// Note: this is zero-initialized in the initialization macro so all zeros must be a valid value
12#[repr(C)]
13pub struct RttHeader {
14    id: [u8; 16],
15    max_up_channels: usize,
16    max_down_channels: usize,
17    // Followed in memory by:
18    // up_channels: [Channel; max_up_channels]
19    // down_channels: [Channel; down_up_channels]
20}
21
22impl RttHeader {
23    /// Initializes the control block header.
24    ///
25    /// # Safety
26    ///
27    /// The arguments must correspond to the sizes of the arrays that follow the header in memory.
28    pub unsafe fn init(&mut self, max_up_channels: usize, max_down_channels: usize) {
29        ptr::write_volatile(&mut self.max_up_channels, max_up_channels);
30        ptr::write_volatile(&mut self.max_down_channels, max_down_channels);
31
32        // Copy the ID backward to avoid storing the magic string in the binary. The ID is
33        // written backwards to make it less likely an unfinished control block is detected by the host.
34
35        const MAGIC_STR_BACKWARDS: &[u8; 16] = b"\0\0\0\0\0\0TTR REGGES";
36
37        for (idx, byte) in MAGIC_STR_BACKWARDS.into_iter().enumerate() {
38            ptr::write_volatile(&mut self.id[15 - idx], *byte);
39        }
40    }
41
42    pub fn max_up_channels(&self) -> usize {
43        self.max_up_channels
44    }
45}
46
47// Note: this is zero-initialized in the initialization macro so all zeros must be a valid value
48#[repr(C)]
49pub struct RttChannel {
50    name: *const u8,
51    buffer: *mut u8,
52    size: usize,
53    write: AtomicUsize,
54    read: AtomicUsize,
55    flags: AtomicUsize,
56}
57
58impl RttChannel {
59    /// Initializes the channel.
60    ///
61    /// # Safety
62    ///
63    /// The pointer arguments must point to a valid null-terminated name and writable buffer.
64    pub unsafe fn init(&mut self, name: *const u8, mode: ChannelMode, buffer: *mut [u8]) {
65        ptr::write_volatile(&mut self.name, name);
66        ptr::write_volatile(&mut self.size, (*buffer).len());
67        self.set_mode(mode);
68
69        // Set buffer last as it can be used to detect if the channel has been initialized
70        ptr::write_volatile(&mut self.buffer, buffer as *mut u8);
71    }
72
73    /// Returns true on a non-null value of the (raw) buffer pointer
74    pub fn is_initialized(&self) -> bool {
75        !self.buffer.is_null()
76    }
77
78    pub(crate) fn mode(&self) -> ChannelMode {
79        let mode = self.flags.load(SeqCst) & 3;
80
81        match mode {
82            0 => ChannelMode::NoBlockSkip,
83            1 => ChannelMode::NoBlockTrim,
84            2 => ChannelMode::BlockIfFull,
85            _ => ChannelMode::NoBlockSkip,
86        }
87    }
88
89    pub(crate) fn set_mode(&self, mode: ChannelMode) {
90        self.flags
91            .store((self.flags.load(SeqCst) & !3) | mode as usize, SeqCst);
92    }
93
94    // This method should only be called for down channels.
95    pub(crate) fn read(&self, mut buf: &mut [u8]) -> usize {
96        let (write, mut read) = self.read_pointers();
97
98        let mut total = 0;
99
100        // Read while buffer contains data and output buffer has space (maximum of two iterations)
101        while !buf.is_empty() {
102            let count = min(self.readable_contiguous(write, read), buf.len());
103            if count == 0 {
104                break;
105            }
106
107            unsafe {
108                ptr::copy_nonoverlapping(self.buffer.add(read), buf.as_mut_ptr(), count);
109            }
110
111            total += count;
112            read += count;
113
114            if read >= self.size {
115                // Wrap around to start
116                read = 0;
117            }
118
119            buf = &mut buf[count..];
120        }
121
122        self.read.store(read, SeqCst);
123
124        total
125    }
126
127    /// This method should only be called for up channels.
128    pub(crate) fn writer(&self) -> RttWriter<'_> {
129        RttWriter {
130            chan: self,
131            write: self.read_pointers().0,
132            total: 0,
133            state: WriteState::Writable,
134        }
135    }
136
137    /// Gets the amount of contiguous data available for reading
138    fn readable_contiguous(&self, write: usize, read: usize) -> usize {
139        if read > write {
140            self.size - read
141        } else {
142            write - read
143        }
144    }
145
146    pub(crate) fn read_pointers(&self) -> (usize, usize) {
147        let write = self.write.load(SeqCst);
148        let read = self.read.load(SeqCst);
149
150        if write >= self.size || read >= self.size {
151            // Pointers have been corrupted. This doesn't happen in well-behaved programs, so
152            // attempt to reset the buffer.
153
154            self.write.store(0, SeqCst);
155            self.read.store(0, SeqCst);
156            return (0, 0);
157        }
158
159        (write, read)
160    }
161}
162
163/// A cancellable write operation to an RTT channel.
164pub(crate) struct RttWriter<'c> {
165    chan: &'c RttChannel,
166    write: usize,
167    total: usize,
168    state: WriteState,
169}
170
171#[derive(Eq, PartialEq)]
172enum WriteState {
173    /// Operation can continue
174    Writable,
175
176    /// Buffer space ran out but the written data will still be committed
177    Full,
178
179    /// The operation failed and won't be committed, or it has already been committed.
180    Finished,
181}
182
183impl RttWriter<'_> {
184    pub fn write(&mut self, buf: &[u8]) {
185        self.write_with_mode(self.chan.mode(), buf);
186    }
187
188    pub fn write_with_mode(&mut self, mode: ChannelMode, mut buf: &[u8]) {
189        while self.state == WriteState::Writable && !buf.is_empty() {
190            let count = min(self.writable_contiguous(), buf.len());
191
192            if count == 0 {
193                // Buffer is full
194
195                match mode {
196                    ChannelMode::NoBlockSkip => {
197                        // Mark the entire operation as failed if even one part cannot be written in
198                        // full.
199                        self.state = WriteState::Finished;
200                        return;
201                    }
202
203                    ChannelMode::NoBlockTrim => {
204                        // If the buffer is full, write as much as possible (note: no return), and
205                        // mark the operation as full, which prevents further writes.
206                        self.state = WriteState::Full;
207                    }
208
209                    ChannelMode::BlockIfFull => {
210                        // Commit everything written so far and spin until more can be written
211                        self.chan.write.store(self.write, SeqCst);
212                        continue;
213                    }
214                }
215            }
216
217            unsafe {
218                ptr::copy_nonoverlapping(buf.as_ptr(), self.chan.buffer.add(self.write), count);
219            }
220
221            self.write += count;
222            self.total += count;
223
224            if self.write >= self.chan.size {
225                // Wrap around to start
226                self.write = 0;
227            }
228
229            buf = &buf[count..];
230        }
231    }
232
233    /// Gets the amount of contiguous space available for writing
234    fn writable_contiguous(&self) -> usize {
235        let read = self.chan.read_pointers().1;
236
237        if read > self.write {
238            read - self.write - 1
239        } else if read == 0 {
240            self.chan.size - self.write - 1
241        } else {
242            self.chan.size - self.write
243        }
244    }
245
246    pub fn is_failed(&self) -> bool {
247        self.state != WriteState::Finished
248    }
249
250    pub fn commit(mut self) -> usize {
251        self.commit_impl();
252
253        self.total
254    }
255
256    fn commit_impl(&mut self) {
257        match self.state {
258            WriteState::Finished => (),
259            WriteState::Full | WriteState::Writable => {
260                // Commit the write pointer so the host can see the new data
261                self.chan.write.store(self.write, SeqCst);
262                self.state = WriteState::Finished;
263            }
264        }
265    }
266}
267
268impl Drop for RttWriter<'_> {
269    fn drop(&mut self) {
270        self.commit_impl();
271    }
272}
273
274impl fmt::Write for RttWriter<'_> {
275    fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> {
276        self.write(s.as_bytes());
277        Ok(())
278    }
279}