1use crate::ChannelMode;
6use core::cmp::min;
7use core::fmt;
8use core::ptr;
9use portable_atomic::{AtomicUsize, Ordering::SeqCst};
10
11#[repr(C)]
13pub struct RttHeader {
14 id: [u8; 16],
15 max_up_channels: usize,
16 max_down_channels: usize,
17 }
21
22impl RttHeader {
23 pub unsafe fn init(&mut self, max_up_channels: usize, max_down_channels: usize) {
29 ptr::write_volatile(&mut self.max_up_channels, max_up_channels);
30 ptr::write_volatile(&mut self.max_down_channels, max_down_channels);
31
32 const MAGIC_STR_BACKWARDS: &[u8; 16] = b"\0\0\0\0\0\0TTR REGGES";
36
37 for (idx, byte) in MAGIC_STR_BACKWARDS.into_iter().enumerate() {
38 ptr::write_volatile(&mut self.id[15 - idx], *byte);
39 }
40 }
41
42 pub fn max_up_channels(&self) -> usize {
43 self.max_up_channels
44 }
45}
46
47#[repr(C)]
49pub struct RttChannel {
50 name: *const u8,
51 buffer: *mut u8,
52 size: usize,
53 write: AtomicUsize,
54 read: AtomicUsize,
55 flags: AtomicUsize,
56}
57
58impl RttChannel {
59 pub unsafe fn init(&mut self, name: *const u8, mode: ChannelMode, buffer: *mut [u8]) {
65 ptr::write_volatile(&mut self.name, name);
66 ptr::write_volatile(&mut self.size, (*buffer).len());
67 self.set_mode(mode);
68
69 ptr::write_volatile(&mut self.buffer, buffer as *mut u8);
71 }
72
73 pub fn is_initialized(&self) -> bool {
75 !self.buffer.is_null()
76 }
77
78 pub(crate) fn mode(&self) -> ChannelMode {
79 let mode = self.flags.load(SeqCst) & 3;
80
81 match mode {
82 0 => ChannelMode::NoBlockSkip,
83 1 => ChannelMode::NoBlockTrim,
84 2 => ChannelMode::BlockIfFull,
85 _ => ChannelMode::NoBlockSkip,
86 }
87 }
88
89 pub(crate) fn set_mode(&self, mode: ChannelMode) {
90 self.flags
91 .store((self.flags.load(SeqCst) & !3) | mode as usize, SeqCst);
92 }
93
94 pub(crate) fn read(&self, mut buf: &mut [u8]) -> usize {
96 let (write, mut read) = self.read_pointers();
97
98 let mut total = 0;
99
100 while !buf.is_empty() {
102 let count = min(self.readable_contiguous(write, read), buf.len());
103 if count == 0 {
104 break;
105 }
106
107 unsafe {
108 ptr::copy_nonoverlapping(self.buffer.add(read), buf.as_mut_ptr(), count);
109 }
110
111 total += count;
112 read += count;
113
114 if read >= self.size {
115 read = 0;
117 }
118
119 buf = &mut buf[count..];
120 }
121
122 self.read.store(read, SeqCst);
123
124 total
125 }
126
127 pub(crate) fn writer(&self) -> RttWriter<'_> {
129 RttWriter {
130 chan: self,
131 write: self.read_pointers().0,
132 total: 0,
133 state: WriteState::Writable,
134 }
135 }
136
137 fn readable_contiguous(&self, write: usize, read: usize) -> usize {
139 if read > write {
140 self.size - read
141 } else {
142 write - read
143 }
144 }
145
146 pub(crate) fn read_pointers(&self) -> (usize, usize) {
147 let write = self.write.load(SeqCst);
148 let read = self.read.load(SeqCst);
149
150 if write >= self.size || read >= self.size {
151 self.write.store(0, SeqCst);
155 self.read.store(0, SeqCst);
156 return (0, 0);
157 }
158
159 (write, read)
160 }
161}
162
163pub(crate) struct RttWriter<'c> {
165 chan: &'c RttChannel,
166 write: usize,
167 total: usize,
168 state: WriteState,
169}
170
171#[derive(Eq, PartialEq)]
172enum WriteState {
173 Writable,
175
176 Full,
178
179 Finished,
181}
182
183impl RttWriter<'_> {
184 pub fn write(&mut self, buf: &[u8]) {
185 self.write_with_mode(self.chan.mode(), buf);
186 }
187
188 pub fn write_with_mode(&mut self, mode: ChannelMode, mut buf: &[u8]) {
189 while self.state == WriteState::Writable && !buf.is_empty() {
190 let count = min(self.writable_contiguous(), buf.len());
191
192 if count == 0 {
193 match mode {
196 ChannelMode::NoBlockSkip => {
197 self.state = WriteState::Finished;
200 return;
201 }
202
203 ChannelMode::NoBlockTrim => {
204 self.state = WriteState::Full;
207 }
208
209 ChannelMode::BlockIfFull => {
210 self.chan.write.store(self.write, SeqCst);
212 continue;
213 }
214 }
215 }
216
217 unsafe {
218 ptr::copy_nonoverlapping(buf.as_ptr(), self.chan.buffer.add(self.write), count);
219 }
220
221 self.write += count;
222 self.total += count;
223
224 if self.write >= self.chan.size {
225 self.write = 0;
227 }
228
229 buf = &buf[count..];
230 }
231 }
232
233 fn writable_contiguous(&self) -> usize {
235 let read = self.chan.read_pointers().1;
236
237 if read > self.write {
238 read - self.write - 1
239 } else if read == 0 {
240 self.chan.size - self.write - 1
241 } else {
242 self.chan.size - self.write
243 }
244 }
245
246 pub fn is_failed(&self) -> bool {
247 self.state != WriteState::Finished
248 }
249
250 pub fn commit(mut self) -> usize {
251 self.commit_impl();
252
253 self.total
254 }
255
256 fn commit_impl(&mut self) {
257 match self.state {
258 WriteState::Finished => (),
259 WriteState::Full | WriteState::Writable => {
260 self.chan.write.store(self.write, SeqCst);
262 self.state = WriteState::Finished;
263 }
264 }
265 }
266}
267
268impl Drop for RttWriter<'_> {
269 fn drop(&mut self) {
270 self.commit_impl();
271 }
272}
273
274impl fmt::Write for RttWriter<'_> {
275 fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> {
276 self.write(s.as_bytes());
277 Ok(())
278 }
279}