1#![doc = include_str!("../README.md")]
2#![no_std]
3
4use core::{
5 cell::{Cell, UnsafeCell},
6 mem::MaybeUninit,
7 sync::atomic::{AtomicBool, Ordering},
8};
9use rtrb::{
10 Consumer, CopyToUninit, Producer, RingBuffer,
11 chunks::{ChunkError, ReadChunk},
12};
13
14pub fn init(size: usize) -> Consumer<u8> {
18 let (p, c) = RingBuffer::new(size);
19 LOGGER.init_buf(p);
20 c
21}
22
23pub fn init_global(size: usize) {
28 let (p, c) = RingBuffer::new(size);
29 LOGGER.init_buf(p);
30 CONSUMER.init_consumer(c);
31}
32
33pub unsafe fn get_read_chunk() -> Option<ReadChunk<'static, u8>> {
40 CONSUMER.read_chunk()
41}
42
43static LOGGER: LoggerRtrb = LoggerRtrb::new();
44static CONSUMER: LoggerRtrbConsumer = LoggerRtrbConsumer::new();
45
46struct LoggerRtrb {
47 taken: AtomicBool,
52 cs_restore: Cell<critical_section::RestoreState>,
54 encoder: UnsafeCell<defmt::Encoder>,
56 buf: UnsafeCell<MaybeUninit<Producer<u8>>>,
57}
58
59impl LoggerRtrb {
60 const fn new() -> LoggerRtrb {
62 LoggerRtrb {
63 taken: AtomicBool::new(false),
64 cs_restore: Cell::new(critical_section::RestoreState::invalid()),
65 encoder: UnsafeCell::new(defmt::Encoder::new()),
66 buf: UnsafeCell::new(MaybeUninit::uninit()),
67 }
68 }
69
70 fn init_buf(&self, buf: Producer<u8>) {
71 unsafe { &mut *self.buf.get() }.write(buf);
72 }
73
74 #[inline]
76 fn acquire(&self) {
77 let restore = unsafe { critical_section::acquire() };
79
80 if self.taken.load(Ordering::Relaxed) {
83 panic!("logger taken reentrantly")
84 }
85
86 self.taken.store(true, Ordering::Relaxed);
88
89 unsafe {
92 self.cs_restore.set(restore);
93 let encoder = &mut *self.encoder.get();
94 encoder.start_frame(|b| do_write(&self.buf, b));
95 }
96 }
97
98 #[inline]
104 unsafe fn write(&self, bytes: &[u8]) {
105 let encoder = unsafe { &mut *self.encoder.get() };
108 encoder.write(bytes, |b| do_write(&self.buf, b));
109 }
110
111 #[inline]
117 unsafe fn flush(&self) {}
118
119 #[inline]
127 unsafe fn release(&self) {
128 unsafe {
131 let encoder = &mut *self.encoder.get();
132 encoder.end_frame(|b| do_write(&self.buf, b));
133 self.taken.store(false, Ordering::Relaxed);
134 critical_section::release(self.cs_restore.get());
136 }
137 }
138}
139
140unsafe impl Sync for LoggerRtrb {}
141
142fn do_write(buf: &UnsafeCell<MaybeUninit<Producer<u8>>>, bytes: &[u8]) {
143 use ChunkError::TooFewSlots;
144
145 let buf = unsafe { (&mut *buf.get()).assume_init_mut() };
146 let mut chunk = match buf.write_chunk_uninit(bytes.len()) {
147 Ok(chunk) => chunk,
148 Err(TooFewSlots(0)) => return,
149 Err(TooFewSlots(n)) => match buf.write_chunk_uninit(n) {
150 Ok(chunk) => chunk,
151 _ => return,
152 },
153 };
154
155 let end = chunk.len();
156 let (first, second) = chunk.as_mut_slices();
157 let mid = first.len();
158 bytes[..mid].copy_to_uninit(first);
159 bytes[mid..end].copy_to_uninit(second);
160 unsafe { chunk.commit_all() };
161}
162
163struct LoggerRtrbConsumer {
164 consumer: UnsafeCell<MaybeUninit<Consumer<u8>>>,
165}
166
167impl LoggerRtrbConsumer {
168 pub const fn new() -> Self {
169 Self {
170 consumer: UnsafeCell::new(MaybeUninit::uninit()),
171 }
172 }
173
174 fn init_consumer(&self, consumer: Consumer<u8>) {
175 unsafe { &mut *self.consumer.get() }.write(consumer);
176 }
177
178 fn read_chunk(&self) -> Option<ReadChunk<'_, u8>> {
179 let consumer = unsafe { (&mut *self.consumer.get()).assume_init_mut() };
180 let n = consumer.slots();
181 if n > 0 {
182 return consumer.read_chunk(n).ok();
183 }
184 None
185 }
186}
187
188unsafe impl Sync for LoggerRtrbConsumer {}
189
190#[defmt::global_logger]
191struct Logger;
192
193unsafe impl defmt::Logger for Logger {
194 fn acquire() {
195 LOGGER.acquire();
196 }
197
198 unsafe fn write(bytes: &[u8]) {
199 unsafe {
200 LOGGER.write(bytes);
201 }
202 }
203
204 unsafe fn flush() {
205 unsafe {
206 LOGGER.flush();
207 }
208 }
209
210 unsafe fn release() {
211 unsafe {
212 LOGGER.release();
213 }
214 }
215}