funnel/
lib.rs

1//! A lock-free, wait-free, block-free logger for the ARM Cortex-M architecture
2//!
3//! (lock-free as in logging doesn't block interrupt handlers; wait-free as in there's no spinning
4//! (e.g. CAS loop) to get a handle; and block-free as in the logger never waits for an I/O transfer
5//! (e.g. ITM, UART, etc.) to complete)
6//!
7//! Status: ☢️ **Experimental** ☢️ (ALPHA PRE-RELEASE)
8//!
9//! **SUPER IMPORTANT** Using this crate in a threaded environment will result in an unsound
10//! program! You have been warned! Also, multi-core support has not been thought out at all so this
11//! is likely wrong when used in multi-core context.
12//!
13//! # Working principle
14//!
15//! There's one ring buffer per priority level. Logging from an interrupt / exception handler will
16//! simply write the message into one of these ring buffers. Thus logging is effectively 'I/O
17//! less' and as fast as a `memcpy`. Only the 'thread handler' (AKA `main` or `idle` in RTFM apps)
18//! can drain these ring buffers into an appropriate I/O sink (e.g. the ITM).
19//!
20//! Nothing is without trade-offs in this life; this logger uses plenty of static memory (i.e.
21//! RAM) in exchange for fast and predictable logging performance. Also, compared to loggers that
22//! directly do I/O this logger will, in overall, spend more CPU cycles to log the same amount of
23//! data but most of the work will be done at the lowest priority making logging in interrupt
24//! handlers much faster.
25//!
26//! # Examples
27//!
28//! ## Usual setup
29//!
30//! Application crate:
31//!
32//! ``` ignore
33//! // aligned = "0.3.2"
34//! use aligned::Aligned;
35//! use cortex_m::itm;
36//!
37//! use funnel::{Drain, funnel, info, trace};
38//!
39//! // `NVIC_PRIO_BITS` is the number of priority bits supported by the device
40//! //
41//! // The `NVIC_PRIO_BITS` value can be a literal integer (e.g. `3`) or a path to a constant
42//! // (`stm32f103xx::NVIC_PRIO_BITS`)
43//! //
44//! // This macro call can only appear *once* in the dependency graph and *must* appear if any
45//! // of the `funnel` macros or the `Logger::get()` API is used anywhere in the dependency graph
46//! funnel!(NVIC_PRIO_BITS = 3, {
47//!      // syntax: $logical_priority : $ring_buffer_size_in_bytes
48//!      // to get better performance use sizes that are a power of 2
49//!      1: 32,
50//!      2: 64,
51//!
52//!      // not listing a priority here disables logging at that priority level
53//!      // entering the wrong NVIC_PRIO_BITS value will disable most loggers
54//! });
55//!
56//! #[entry]
57//! fn main() -> ! {
58//!     // ..
59//!     let mut itm: ITM = /* .. */;
60//!
61//!     let drains = Drain::get_all();
62//!
63//!     let mut buf = Aligned([0; 32]); // 4-byte aligned buffer
64//!     loop {
65//!         for (i, drain) in drains.iter().enumerate() {
66//!             'l: loop {
67//!                 let n = drain.read(&mut buf).len();
68//!
69//!                 // this drain is empty
70//!                 if n == 0 {
71//!                     break 'l;
72//!                 }
73//!
74//!                 // we need this coercion or the slicing below won't do the right thing
75//!                 let buf: &Aligned<_, [_]> = &buf;
76//!
77//!                 // will send data in 32-bit chunks
78//!                 itm::write_aligned(&mut itm.stim[i], &buf[..n]);
79//!             }
80//!         }
81//!     }
82//! }
83//!
84//! // logical_priority = 1 (nvic_priority = 224)
85//! #[interrupt]
86//! fn GPIOA() {
87//!     info!("GPIOA");
88//!     foo(0);
89//!     // ..
90//! }
91//!
92//! // logical_priority = 2 (nvic_priority = 192)
93//! #[interrupt]
94//! fn GPIOB() {
95//!     info!("GPIOB");
96//!     foo(1);
97//!     // ..
98//! }
99//!
100//! fn foo(x: i32) {
101//!     // this macro can appear in libraries
102//!     trace!("foo({})", x);
103//!     // ..
104//! }
105//! ```
106//!
107//! ## `Logger`
108//!
109//! The overhead of each macro call can be reduced using one of the `uwrite!` macros on a
110//! `Logger`. A `Logger` can only be obtained using the `Logger::get()` constructor.
111//!
112//! ``` ignore
113//! use funnel::{Logger, log_enabled};
114//!
115//! #[interrupt]
116//! fn GPIOC() {
117//!     if let Some(mut logger) = Logger::get() {
118//!          if log_enabled!(Info) {
119//!              uwriteln!(logger, "{}", 100).ok();
120//!              uwriteln!(logger, "{:?}", some_value).ok();
121//!          }
122//!     }
123//! }
124//! ```
125//!
126//! # Logging levels
127//!
128//! `funnel` supports 5 logging level: Trace, Debug, Info, Warn and Error, sorted in increasing
129//! level of severity. Each of these logging level has an associated logging macro: `trace!`,
130//! `debug!`, `info!`, `warn!` and `error!`.
131//!
132//! Logs of *lesser* severity can be *statically* disabled using of these Cargo features.
133//!
134//! - `max_level_trace`
135//! - `max_level_debug`
136//! - `max_level_info`
137//! - `max_level_warn`
138//! - `max_level_error`
139//! - `max_level_off`
140//! - `release_max_level_trace`
141//! - `release_max_level_debug`
142//! - `release_max_level_info`
143//! - `release_max_level_warn`
144//! - `release_max_level_error`
145//! - `release_max_level_off`
146//!
147//! Enabling the `max_level_info` feature will disable the `Debug` and `Trace` logging levels;
148//! `max_level_off` will disable all logging levels. The `release_*` features apply when the
149//! application is compiled using the 'release' profile; the other features apply when the 'dev'
150//! profile is used. To check if a logging level is enabled or disabled in code use the
151//! `log_enabled!` macro.
152//!
153//! # Benchmarks
154//!
155//! Ran on Cortex-M3 core clocked at 8 MHz and configured with 0 Flash wait cycles.
156//!
157//! | Code                         | Cycles  |
158//! |------------------------------|---------|
159//! | `info!("")`                  | 36      |
160//! | `uwriteln!(logger, "")`      | 15      |
161//! | `drain("")`                  | 27      |
162//! | `info!("{}", S)`             | 331-369 |
163//! | `uwriteln!(logger, "{}", S)` | 308-346 |
164//! | `drain(S)`                   | 863-916 |
165//! | `iprintln!(_, "{}", S)`      | 1652    |
166//! | `info!("{}", N)`             | 348-383 |
167//! | `uwriteln!(logger, "{}", N)` | 329-364 |
168//! | `drain(N)`                   | 217-230 |
169//!
170//! Where `S` is a 45-byte long string, `N = usize::max_value()`, the `drain` function is
171//! `ptr::read_volatile`-ing each byte and the ITM was clocked at 2 MHz.
172//!
173//! # Potential improvements / alternatives
174//!
175//! Instead of draining the ring buffers at the lowest priority one could drain the buffers using
176//! the debugger using something like [SEGGER's Real Time Transfer][rtt] mechanism. The
177//! implementation would need to change to properly support this form of parallel draining.
178//!
179//! [rtt]: https://www.segger.com/products/debug-probes/j-link/technology/about-real-time-transfer/
180
181#![deny(missing_docs)]
182#![deny(warnings)]
183#![no_std]
184
185use core::{
186    cell::UnsafeCell,
187    cmp, ptr,
188    sync::atomic::{self, AtomicUsize, Ordering},
189};
190
191use ufmt::uWrite;
192
193/// Declares loggers for each priority level
194pub use cortex_m_funnel_macros::funnel;
195#[doc(hidden)]
196pub use ufmt::uwriteln;
197
198/// IMPLEMENTATION DETAIL
199// `static [mut]` variables cannot contain references to `static mut` variables so we lie about the
200// `Sync`-ness of `Inner` to be able to put references to it in `static` variables. Only the
201// `funnel!` macro uses this type -- end users will never see this type.
202#[doc(hidden)]
203#[repr(C)]
204pub struct Inner<B>
205where
206    B: ?Sized,
207{
208    write: UnsafeCell<usize>,
209    read: UnsafeCell<usize>,
210    buffer: UnsafeCell<B>,
211}
212
213unsafe impl<B> Sync for Inner<B> where B: ?Sized {}
214
215impl<B> Inner<B> {
216    // IMPLEMENTATION DETAIL
217    #[doc(hidden)]
218    pub const fn new(buffer: B) -> Self {
219        Self {
220            write: UnsafeCell::new(0),
221            read: UnsafeCell::new(0),
222            buffer: UnsafeCell::new(buffer),
223        }
224    }
225}
226
227/// A logger tied a particular priority level
228// NOTE: NOT `Sync` or `Send`
229#[repr(transparent)]
230pub struct Logger {
231    inner: &'static Inner<[u8]>,
232}
233
234impl Logger {
235    /// Gets the `funnel` logger associated to the caller's priority level
236    ///
237    /// This returns `None` if no logger was associated to the priority level
238    pub fn get() -> Option<Self> {
239        if cfg!(not(cortex_m)) {
240            return None;
241        }
242
243        if (cfg!(debug_assertions) && cfg!(feature = "max_level_off"))
244            || cfg!(feature = "release_max_level_off")
245        {
246            return None;
247        }
248
249        // Cortex-M MMIO registers
250        const SCB_ICSR: *const u32 = 0xE000_ED04 as *const u32;
251        const NVIC_IPR: *const u32 = 0xE000_E400 as *const u32;
252
253        extern "Rust" {
254            // NOTE The expansion of `funnel!` declares `__funnel_drains` as a function with signature
255            // `fn() -> Option<&'static Inner<[u8]>>` so here we are implicitly transmuting `&'static
256            // Inner<[u8]>` into `Logger` but this should be fine because they are equivalent due to
257            // `#[repr(transparent)]`
258            fn __funnel_logger(nvic_prio: u8) -> Option<Logger>;
259        }
260
261        unsafe {
262            let icsr = SCB_ICSR.read_volatile() as u8;
263
264            if icsr == 0 {
265                // thread mode
266                None
267            } else if icsr < 16 {
268                // TODO do something about exceptions -- NMI and HardFault are annoying because they
269                // have exceptional priorities
270                None
271            } else {
272                // assuming ARMv6-M (the lowest common denominator), IPR is *not* byte addressable
273                // so we perform word-size reads
274                let nr = icsr - 16;
275
276                // NOTE `nr` will always be less than `256`
277                let ipr = NVIC_IPR.add((nr >> 2) as usize).read_volatile();
278
279                let nvic_prio = (ipr >> (8 * (nr % 4))) as u8;
280
281                __funnel_logger(nvic_prio)
282            }
283        }
284    }
285
286    // This function is *non*-reentrant but `Logger` is `!Sync` so each `Logger`s is constrained to
287    // a single priority level (therefore no preemption / overlap can occur on any single `Logger`
288    // instance)
289    fn log(&self, s: &str) -> Result<(), ()> {
290        unsafe {
291            // NOTE we use `UnsafeCell` instead of `AtomicUsize` because we want the unique
292            // reference (`&mut-`) semantics; this logger has exclusive access to the `write`
293            // pointer
294            let write = &mut *self.inner.write.get();
295            let buffer = &mut *self.inner.buffer.get();
296
297            let input = s.as_bytes();
298
299            let blen = buffer.len();
300            let ilen = input.len();
301
302            if ilen > blen {
303                // early exit to hint the optimizer that `blen` can't be `0`
304                return Err(());
305            }
306
307            // NOTE we use `UnsafeCell` instead of `AtomicUsize` because we want this operation to
308            // return the same value when calling `log` consecutively
309            let read = *self.inner.read.get();
310
311            if blen >= ilen + (*write).wrapping_sub(read) {
312                // FIXME (?) this is *not* always optimized to a right shift (`lsr`) when `blen` is
313                // a power of 2 -- instead we get an `udiv` which is slower (?).
314                let w = *write % blen;
315
316                // NOTE we use `ptr::copy_nonoverlapping` instead of `copy_from_slice` to avoid
317                // panicking branches
318                if w + ilen > blen {
319                    // two memcpy-s
320                    let mid = blen - w;
321                    // buffer[w..].copy_from_slice(&input[..mid]);
322                    ptr::copy_nonoverlapping(input.as_ptr(), buffer.as_mut_ptr().add(w), mid);
323                    // buffer[..ilen - mid].copy_from_slice(&input[mid..]);
324                    ptr::copy_nonoverlapping(
325                        input.as_ptr().add(mid),
326                        buffer.as_mut_ptr(),
327                        ilen - mid,
328                    );
329                } else {
330                    // single memcpy
331                    // buffer[w..w + ilen].copy_from_slice(&input);
332                    ptr::copy_nonoverlapping(input.as_ptr(), buffer.as_mut_ptr().add(w), ilen);
333                }
334
335                *write = (*write).wrapping_add(ilen);
336
337                Ok(())
338            } else {
339                Err(())
340            }
341        }
342    }
343}
344
345impl uWrite for Logger {
346    type Error = ();
347
348    fn write_str(&mut self, s: &str) -> Result<(), ()> {
349        self.log(s)
350    }
351}
352
353/// IMPLEMENTATION DETAIL; DO NOT USE
354#[doc(hidden)]
355#[macro_export]
356macro_rules! _flog {
357    ($($tt:tt)*) => {{
358        if let Some(mut logger) = $crate::Logger::get() {
359            $crate::uwriteln!(logger, $($tt)*)
360        } else {
361            Ok(())
362        }
363    }};
364}
365
366/// IMPLEMENTATION DETAIL; DO NOT USE
367#[doc(hidden)]
368#[derive(PartialEq, PartialOrd)]
369pub enum Level {
370    Error,
371    Warn,
372    Info,
373    Debug,
374    Trace,
375}
376
377/// IMPLEMENTATION DETAIL; DO NOT USE
378#[doc(hidden)]
379pub fn is_enabled(lvl: Level) -> bool {
380    if let Some(threshold) = selected_log_level() {
381        lvl <= threshold
382    } else {
383        // off
384        false
385    }
386}
387
388fn selected_log_level() -> Option<Level> {
389    if cfg!(debug_assertions) {
390        // 'dev' profile
391        if cfg!(feature = "max_level_off") {
392            return None;
393        }
394
395        if cfg!(feature = "max_level_error") {
396            return Some(Level::Error);
397        }
398
399        if cfg!(feature = "max_level_warn") {
400            return Some(Level::Warn);
401        }
402
403        if cfg!(feature = "max_level_info") {
404            return Some(Level::Info);
405        }
406
407        if cfg!(feature = "max_level_debug") {
408            return Some(Level::Debug);
409        }
410
411        if cfg!(feature = "max_level_trace") {
412            return Some(Level::Trace);
413        }
414    } else {
415        if cfg!(feature = "release_max_level_off") {
416            return None;
417        }
418
419        if cfg!(feature = "release_max_level_error") {
420            return Some(Level::Error);
421        }
422
423        if cfg!(feature = "release_max_level_warn") {
424            return Some(Level::Warn);
425        }
426
427        if cfg!(feature = "release_max_level_info") {
428            return Some(Level::Info);
429        }
430
431        if cfg!(feature = "release_max_level_debug") {
432            return Some(Level::Debug);
433        }
434
435        if cfg!(feature = "release_max_level_trace") {
436            return Some(Level::Trace);
437        }
438    }
439
440    Some(Level::Trace)
441}
442
443/// Returns `true` if the specified logging level is statically enabled
444///
445/// Valid arguments are: `Error`, `Warn`, `Info`, `Debug` and `Trace`
446#[macro_export]
447macro_rules! log_enabled {
448    ($e:expr) => {{
449        $crate::is_enabled($crate::Level::$e)
450    }}
451}
452
453/// Logs a string at the 'Error' logging level
454///
455/// Syntax matches `uwriteln!` minus the first argument. You need to depend on the `ufmt` crate to
456/// use this macro.
457///
458/// NOTE a newline is always appended at the end
459#[macro_export]
460macro_rules! error {
461    ($($tt:tt)*) => {{
462        if $crate::is_enabled($crate::Level::Error) {
463            $crate::_flog!($($tt)*)
464        } else {
465            Ok(())
466        }
467    }}
468}
469
470/// Logs a string at the 'Warn' logging level
471///
472/// Syntax matches `uwriteln!` minus the first argument. You need to depend on the `ufmt` crate to
473/// use this macro.
474///
475/// NOTE a newline is always appended at the end
476#[macro_export]
477macro_rules! warn {
478    ($($tt:tt)*) => {{
479        if $crate::is_enabled($crate::Level::Warn) {
480            $crate::_flog!($($tt)*)
481        } else {
482            Ok(())
483        }
484    }}
485}
486
487/// Logs a string at the 'Info' logging level
488///
489/// Syntax matches `uwriteln!` minus the first argument. You need to depend on the `ufmt` crate to
490/// use this macro.
491///
492/// NOTE a newline is always appended at the end
493#[macro_export]
494macro_rules! info {
495    ($($tt:tt)*) => {{
496        if $crate::is_enabled($crate::Level::Info) {
497            $crate::_flog!($($tt)*)
498        } else {
499            Ok(())
500        }
501    }}
502}
503
504/// Logs a string at the 'Debug' logging level
505///
506/// Syntax matches `uwriteln!` minus the first argument. You need to depend on the `ufmt` crate to
507/// use this macro.
508///
509/// NOTE a newline is always appended at the end
510#[macro_export]
511macro_rules! debug {
512    ($($tt:tt)*) => {{
513        if $crate::is_enabled($crate::Level::Debug) {
514            $crate::_flog!($($tt)*)
515        } else {
516            Ok(())
517        }
518    }}
519}
520
521/// Logs a string at the 'Trace' logging level
522///
523/// Syntax matches `uwriteln!` minus the first argument. You need to depend on the `ufmt` crate to
524/// use this macro.
525///
526/// NOTE a newline is always appended at the end
527#[macro_export]
528macro_rules! trace {
529    ($($tt:tt)*) => {{
530        if $crate::is_enabled($crate::Level::Trace) {
531            $crate::_flog!($($tt)*)
532        } else {
533            Ok(())
534        }
535    }}
536}
537
538/// A drain retrieves the data written into a `Logger`
539// NOTE: NOT `Sync` or `Send`
540#[repr(transparent)]
541#[derive(Clone, Copy)]
542pub struct Drain {
543    inner: &'static Inner<[u8]>,
544}
545
546impl Drain {
547    /// The drain endpoint of each ring buffer, highest priority first
548    pub fn get_all() -> &'static [Self] {
549        if cfg!(not(cortex_m)) {
550            return &[];
551        }
552
553        if (cfg!(debug_assertions) && cfg!(feature = "max_level_off"))
554            || cfg!(feature = "release_max_level_off")
555        {
556            return &[];
557        }
558
559        // NOTE The expansion of `funnel!` declares `__funnel_drains` as a function with signature
560        // `fn() -> &'static [&'static Inner<[u8]>]` so here we are implicitly transmuting `&'static
561        // Inner<[u8]>` into `Drain` but this should be fine because they are equivalent due to
562        // `#[repr(transparent)]`
563        extern "Rust" {
564            fn __funnel_drains() -> &'static [Drain];
565        }
566
567        unsafe { __funnel_drains() }
568    }
569
570    /// Copies the contents of the `Logger` ring buffer into the given buffer
571    // NOTE this is basically `heapless::spsc::Consumer::dequeue`
572    pub fn read<'b>(&self, buf: &'b mut [u8]) -> &'b [u8] {
573        unsafe {
574            // NOTE we use `UnsafeCell` instead of `AtomicUsize` because we want the unique
575            // reference (`&mut-`) semantics; this drain has exclusive access to the `read`
576            // pointer for the duration of this function call
577            let readf = &mut *self.inner.read.get();
578            let writef: *const AtomicUsize = self.inner.write.get() as *const _;
579            let blen = (*self.inner.buffer.get()).len();
580            let p = (*self.inner.buffer.get()).as_ptr();
581
582            // early exit to hint the compiler that `n` is not `0`
583            if blen == 0 {
584                return &[];
585            }
586
587            let read = *readf;
588            // XXX on paper, this is insta-UB because `Logger::log` has a unique reference
589            // (`&mut-`) to the `write` field and this operation require a shared reference (`&-`)
590            // to the same field. At runtime, this load is atomic (happens in a single instruction)
591            // so any modification done by an interrupt handler (via `Logger::log`) can *not* result
592            // in a data race (e.g. torn read or write). To properly avoid any theoretical UB we
593            // would need to something like `atomic_load(a_raw_pointer_to_write)`, which exist but
594            // it's unstable (`intrinsics::atomic_load`), *plus* `&raw write` (RFC #2582), which has
595            // not been implemented. In practice, as long as this produces a fresh value each time
596            // is called (instead of cached on the stack) we should be fine.
597            let write = (*writef).load(Ordering::Relaxed);
598            atomic::compiler_fence(Ordering::Acquire); // ▼
599
600            if write > read {
601                // number of bytes to copy
602                let c = cmp::min(buf.len(), write.wrapping_sub(read));
603                // FIXME (?) this is *not* always optimized to a right shift (`lsr`) when `n` is
604                // a power of 2 -- instead we get an `udiv` which is slower.
605                let r = read % blen;
606
607                // NOTE we use `ptr::copy_nonoverlapping` instead of `copy_from_slice` to avoid
608                // panicking branches
609                if r + c > blen {
610                    // two memcpy-s
611                    let mid = blen - r;
612                    // buf[..mid].copy_from_slice(&buffer[r..]);
613                    ptr::copy_nonoverlapping(p.add(r), buf.as_mut_ptr(), mid);
614                    // buf[mid..mid + c].copy_from_slice(&buffer[..c - mid]);
615                    ptr::copy_nonoverlapping(p, buf.as_mut_ptr().add(mid), c - mid);
616                } else {
617                    // single memcpy
618                    // buf[..c].copy_from_slice(&buffer[r..r + c]);
619                    ptr::copy_nonoverlapping(p.add(r), buf.as_mut_ptr(), c);
620                }
621
622                atomic::compiler_fence(Ordering::Release); // ▲
623                *readf = (*readf).wrapping_add(c);
624
625                // &buf[..c]
626                buf.get_unchecked(..c)
627            } else {
628                &[]
629            }
630        }
631    }
632}
633
634impl Iterator for Drain {
635    type Item = u8;
636
637    fn next(&mut self) -> Option<u8> {
638        self.read(&mut [0]).first().cloned()
639    }
640}
641
642#[cfg(test)]
643mod tests {
644    use super::{Drain, Inner, Logger};
645
646    #[test]
647    fn sanity() {
648        static INNER: Inner<[u8; 32]> = Inner::new([0; 32]);
649
650        let inner = &INNER;
651        let m = "Hello, world!";
652        let logger = Logger { inner };
653        logger.log(m).unwrap();
654        unsafe {
655            assert!((*logger.inner.buffer.get()).starts_with(m.as_bytes()));
656        }
657    }
658
659    #[test]
660    fn drain() {
661        static INNER: Inner<[u8; 32]> = Inner::new([0; 32]);
662
663        let inner = &INNER;
664        let logger = Logger { inner };
665        let mut drain = Drain { inner };
666
667        assert_eq!(drain.next(), None);
668
669        logger.log("A").unwrap();
670        assert_eq!(drain.next(), Some(b'A'));
671        assert_eq!(drain.next(), None);
672
673        logger.log("B").unwrap();
674        assert_eq!(drain.next(), Some(b'B'));
675        assert_eq!(drain.next(), None);
676
677        logger.log("CD").unwrap();
678        assert_eq!(drain.next(), Some(b'C'));
679        assert_eq!(drain.next(), Some(b'D'));
680        assert_eq!(drain.next(), None);
681    }
682
683    #[test]
684    fn read() {
685        static INNER: Inner<[u8; 16]> = Inner::new([0; 16]);
686
687        let inner = &INNER;
688        let logger = Logger { inner };
689        let drain = Drain { inner };
690
691        let mut buf = [0; 8];
692        logger.log("Hello, world!").unwrap();
693        assert_eq!(drain.read(&mut buf), b"Hello, w");
694        assert_eq!(drain.read(&mut buf), b"orld!");
695        assert_eq!(drain.read(&mut buf), b"");
696
697        // NOTE the ring buffer will wrap around with this operation
698        logger.log("Hello, world!").unwrap();
699        assert_eq!(drain.read(&mut buf), b"Hello, w");
700        assert_eq!(drain.read(&mut buf), b"orld!");
701        assert_eq!(drain.read(&mut buf), b"");
702    }
703
704    #[test]
705    fn split_write() {
706        const N: usize = 32;
707        const M: usize = 24;
708        static INNER: Inner<[u8; N]> = Inner::new([0; N]);
709
710        let m = "Hello, world!";
711        let inner = &INNER;
712        unsafe {
713            // fake read/write pointers
714            *inner.read.get() = M;
715            *inner.write.get() = M;
716
717            let logger = Logger { inner };
718            logger.log(m).unwrap();
719            let m = m.as_bytes();
720            let buffer = &*logger.inner.buffer.get();
721            assert_eq!(buffer[M..], m[..(N - M)]);
722            assert_eq!(buffer[..(m.len() - (N - M))], m[(N - M)..]);
723        }
724    }
725
726    #[test]
727    fn wrap_around() {
728        static INNER: Inner<[u8; 32]> = Inner::new([0; 32]);
729
730        let m = "Hello, world!";
731        let inner = &INNER;
732        unsafe {
733            // fake read/write pointers
734            *inner.read.get() = usize::max_value();
735            *inner.write.get() = usize::max_value();
736
737            let logger = Logger { inner };
738            logger.log(m).unwrap();
739
740            let buffer = &*logger.inner.buffer.get();
741            assert_eq!(buffer.last(), Some(&b'H'));
742            assert_eq!(buffer[..m.len() - 1], m.as_bytes()[1..]);
743        }
744    }
745}