1use core::sync::atomic::{AtomicBool, AtomicU8};
9
10static LOGGER_INIT_DONE: AtomicBool = AtomicBool::new(false);
11
12const LOG_SEL_LOCKED: u8 = 1;
13const LOG_SEL_UNSAFE_SINGLE_CORE: u8 = 2;
14
15static LOG_SEL: AtomicU8 = AtomicU8::new(0);
16
17pub mod uart_blocking {
19 use super::*;
20 use core::cell::{Cell, RefCell, UnsafeCell};
21 use embedded_io::Write as _;
22
23 use cortex_ar::register::Cpsr;
24 use critical_section::Mutex;
25 use log::{LevelFilter, Log, set_logger, set_max_level};
26
27 use crate::uart::Uart;
28
29 pub struct UartLoggerBlocking(Mutex<RefCell<Option<Uart>>>);
30
31 unsafe impl Send for UartLoggerBlocking {}
32 unsafe impl Sync for UartLoggerBlocking {}
33
34 static UART_LOGGER_BLOCKING: UartLoggerBlocking =
35 UartLoggerBlocking(Mutex::new(RefCell::new(None)));
36
37 pub fn init_with_locks(uart: Uart, level: LevelFilter) {
45 if LOGGER_INIT_DONE.swap(true, core::sync::atomic::Ordering::Relaxed) {
46 return;
47 }
48 LOG_SEL.swap(LOG_SEL_LOCKED, core::sync::atomic::Ordering::Relaxed);
49 critical_section::with(|cs| {
50 let inner = UART_LOGGER_BLOCKING.0.borrow(cs);
51 inner.replace(Some(uart));
52 });
53 set_logger(&UART_LOGGER_BLOCKING).unwrap();
54 set_max_level(level);
56 }
57
58 impl log::Log for UartLoggerBlocking {
59 fn enabled(&self, _metadata: &log::Metadata) -> bool {
60 true
61 }
62
63 fn log(&self, record: &log::Record) {
64 critical_section::with(|cs| {
65 let mut opt_logger = self.0.borrow(cs).borrow_mut();
66 if opt_logger.is_none() {
67 return;
68 }
69 let logger = opt_logger.as_mut().unwrap();
70 writeln!(logger, "{} - {}\r", record.level(), record.args()).unwrap();
71 })
72 }
73
74 fn flush(&self) {
75 critical_section::with(|cs| {
76 let mut opt_logger = self.0.borrow(cs).borrow_mut();
77 if opt_logger.is_none() {
78 return;
79 }
80 let logger = opt_logger.as_mut().unwrap();
81 logger.flush().unwrap();
82 });
83 }
84 }
85
86 pub struct UartLoggerUnsafeSingleThread {
87 skip_in_isr: Cell<bool>,
88 uart: UnsafeCell<Option<Uart>>,
89 }
90
91 unsafe impl Send for UartLoggerUnsafeSingleThread {}
92 unsafe impl Sync for UartLoggerUnsafeSingleThread {}
93
94 static UART_LOGGER_UNSAFE_SINGLE_THREAD: UartLoggerUnsafeSingleThread =
95 UartLoggerUnsafeSingleThread {
96 skip_in_isr: Cell::new(false),
97 uart: UnsafeCell::new(None),
98 };
99
100 pub unsafe fn init_unsafe_single_core(uart: Uart, level: LevelFilter, skip_in_isr: bool) {
108 if LOGGER_INIT_DONE.swap(true, core::sync::atomic::Ordering::Relaxed) {
109 return;
110 }
111 LOG_SEL.swap(
112 LOG_SEL_UNSAFE_SINGLE_CORE,
113 core::sync::atomic::Ordering::Relaxed,
114 );
115 let opt_uart = unsafe { &mut *UART_LOGGER_UNSAFE_SINGLE_THREAD.uart.get() };
116 opt_uart.replace(uart);
117 UART_LOGGER_UNSAFE_SINGLE_THREAD
118 .skip_in_isr
119 .set(skip_in_isr);
120
121 set_logger(&UART_LOGGER_UNSAFE_SINGLE_THREAD).unwrap();
122 set_max_level(level); }
124
125 impl log::Log for UartLoggerUnsafeSingleThread {
126 fn enabled(&self, _metadata: &log::Metadata) -> bool {
127 true
128 }
129
130 fn log(&self, record: &log::Record) {
131 if self.skip_in_isr.get() {
132 match Cpsr::read().mode().unwrap() {
133 cortex_ar::register::cpsr::ProcessorMode::Fiq
134 | cortex_ar::register::cpsr::ProcessorMode::Irq => {
135 return;
136 }
137 _ => {}
138 }
139 }
140
141 let uart_mut = unsafe { &mut *self.uart.get() }.as_mut();
142 if uart_mut.is_none() {
143 return;
144 }
145 writeln!(
146 uart_mut.unwrap(),
147 "{} - {}\r",
148 record.level(),
149 record.args()
150 )
151 .unwrap();
152 }
153
154 fn flush(&self) {
155 let uart_mut = unsafe { &mut *self.uart.get() }.as_mut();
156 if uart_mut.is_none() {
157 return;
158 }
159 uart_mut.unwrap().flush().unwrap();
160 }
161 }
162
163 pub fn flush() {
165 match LOG_SEL.load(core::sync::atomic::Ordering::Relaxed) {
166 val if val == LOG_SEL_LOCKED => UART_LOGGER_BLOCKING.flush(),
167 val if val == LOG_SEL_UNSAFE_SINGLE_CORE => UART_LOGGER_UNSAFE_SINGLE_THREAD.flush(),
168 _ => (),
169 }
170 }
171}
172
173pub mod rb {
175 use core::cell::RefCell;
176 use core::fmt::Write as _;
177
178 use embassy_sync::blocking_mutex::raw::CriticalSectionRawMutex;
179 use log::{LevelFilter, set_logger, set_max_level};
180 use ringbuf::{
181 StaticRb,
182 traits::{Consumer, Producer},
183 };
184
185 pub struct Logger {
191 frame_queue: embassy_sync::channel::Channel<CriticalSectionRawMutex, usize, 32>,
192 data_buf: critical_section::Mutex<RefCell<heapless::String<4096>>>,
193 ring_buf: critical_section::Mutex<RefCell<Option<StaticRb<u8, 4096>>>>,
194 }
195
196 unsafe impl Send for Logger {}
197 unsafe impl Sync for Logger {}
198
199 static LOGGER_RB: Logger = Logger {
200 frame_queue: embassy_sync::channel::Channel::new(),
201 data_buf: critical_section::Mutex::new(RefCell::new(heapless::String::new())),
202 ring_buf: critical_section::Mutex::new(RefCell::new(None)),
203 };
204
205 impl log::Log for Logger {
206 fn enabled(&self, _metadata: &log::Metadata) -> bool {
207 true
208 }
209
210 fn log(&self, record: &log::Record) {
211 critical_section::with(|cs| {
212 let ref_buf = self.data_buf.borrow(cs);
213 let mut buf = ref_buf.borrow_mut();
214 buf.clear();
215 let _ = writeln!(buf, "{} - {}\r", record.level(), record.args());
216 let rb_ref = self.ring_buf.borrow(cs);
217 let mut rb_opt = rb_ref.borrow_mut();
218 if rb_opt.is_none() {
219 panic!("log call on uninitialized logger");
220 }
221 rb_opt.as_mut().unwrap().push_slice(buf.as_bytes());
222 let _ = self.frame_queue.try_send(buf.len());
223 });
224 }
225
226 fn flush(&self) {
227 while !self.frame_queue().is_empty() {}
228 }
229 }
230
231 impl Logger {
232 pub fn frame_queue(
233 &self,
234 ) -> &embassy_sync::channel::Channel<CriticalSectionRawMutex, usize, 32> {
235 &self.frame_queue
236 }
237 }
238
239 pub fn init(level: LevelFilter) {
240 if super::LOGGER_INIT_DONE.swap(true, core::sync::atomic::Ordering::Relaxed) {
241 return;
242 }
243 critical_section::with(|cs| {
244 let rb = StaticRb::<u8, 4096>::default();
245 let rb_ref = LOGGER_RB.ring_buf.borrow(cs);
246 rb_ref.borrow_mut().replace(rb);
247 });
248 set_logger(&LOGGER_RB).unwrap();
249 set_max_level(level); }
251
252 pub fn read_next_frame(frame_len: usize, buf: &mut [u8]) {
253 let read_len = core::cmp::min(frame_len, buf.len());
254 critical_section::with(|cs| {
255 let rb_ref = LOGGER_RB.ring_buf.borrow(cs);
256 let mut rb = rb_ref.borrow_mut();
257 rb.as_mut().unwrap().pop_slice(&mut buf[0..read_len]);
258 })
259 }
260
261 pub fn get_frame_queue()
262 -> &'static embassy_sync::channel::Channel<CriticalSectionRawMutex, usize, 32> {
263 LOGGER_RB.frame_queue()
264 }
265}