ublox_sockets/
ring_buffer.rs

1use super::{Error, Result};
2use core::cmp;
3
4use heapless::Vec;
5
6/// A ring buffer.
7///
8/// This ring buffer implementation provides many ways to interact with it:
9///
10///   * Enqueueing or dequeueing one element from corresponding side of the buffer;
11///   * Enqueueing or dequeueing a slice of elements from corresponding side of the buffer;
12///   * Accessing allocated and unallocated areas directly.
13///
14/// This implementation is suitable for both simple uses such as a FIFO queue
15/// of UDP packets, and advanced ones such as a TCP reassembly buffer.
16#[derive(Debug, Default)]
17pub struct RingBuffer<T, const N: usize> {
18    storage: Vec<T, N>,
19    read_at: usize,
20    length: usize,
21}
22
23impl<T: Default + Clone, const N: usize> RingBuffer<T, N> {
24    /// Create a ring buffer with the given storage.
25    ///
26    /// During creation, every element in `storage` is reset.
27    pub fn new() -> RingBuffer<T, N> {
28        let mut storage = Vec::new();
29        storage.resize_default(N).ok();
30        RingBuffer {
31            storage,
32            read_at: 0,
33            length: 0,
34        }
35    }
36
37    // Internal helper for test functions
38    fn from_slice(slice: &[T]) -> RingBuffer<T, N>
39    where
40        T: Copy + core::fmt::Debug,
41    {
42        let mut rb = RingBuffer::new();
43        rb.enqueue_slice(slice);
44        rb.clear();
45        rb
46    }
47
48    /// Clear the ring buffer.
49    pub fn clear(&mut self) {
50        self.read_at = 0;
51        self.length = 0;
52    }
53
54    /// Return the maximum number of elements in the ring buffer.
55    pub fn capacity(&self) -> usize {
56        self.storage.capacity()
57    }
58
59    /// Return the current number of elements in the ring buffer.
60    pub fn len(&self) -> usize {
61        self.length
62    }
63
64    /// Return the number of elements that can be added to the ring buffer.
65    pub fn window(&self) -> usize {
66        self.capacity() - self.len()
67    }
68
69    /// Return the largest number of elements that can be added to the buffer
70    /// without wrapping around (i.e. in a single `enqueue_many` call).
71    pub fn contiguous_window(&self) -> usize {
72        cmp::min(self.window(), self.capacity() - self.get_idx(self.len()))
73    }
74
75    /// Query whether the buffer is empty.
76    pub fn is_empty(&self) -> bool {
77        self.len() == 0
78    }
79
80    /// Query whether the buffer is full.
81    pub fn is_full(&self) -> bool {
82        self.window() == 0
83    }
84
85    /// Shorthand for `(self.read + idx) % self.capacity()` with an
86    /// additional check to ensure that the capacity is not zero.
87    fn get_idx(&self, idx: usize) -> usize {
88        let capacity = self.capacity();
89        if capacity > 0 {
90            (self.read_at + idx) % capacity
91        } else {
92            0
93        }
94    }
95
96    /// Shorthand for `(self.read + idx) % self.capacity()` with no
97    /// additional checks to ensure the capacity is not zero.
98    fn get_idx_unchecked(&self, idx: usize) -> usize {
99        (self.read_at + idx) % self.capacity()
100    }
101}
102
103/// This is the "discrete" ring buffer interface: it operates with single elements,
104/// and boundary conditions (empty/full) are errors.
105impl<T: Default + Clone, const N: usize> RingBuffer<T, N> {
106    /// Call `f` with a single buffer element, and enqueue the element if `f`
107    /// returns successfully, or return `Err(Error::Exhausted)` if the buffer is full.
108    pub fn enqueue_one_with<'b, R, F>(&'b mut self, f: F) -> Result<R>
109    where
110        F: FnOnce(&'b mut T) -> Result<R>,
111    {
112        if self.is_full() {
113            return Err(Error::Exhausted);
114        }
115
116        let index = self.get_idx_unchecked(self.length);
117        match f(&mut self.storage[index]) {
118            Ok(result) => {
119                self.length += 1;
120                Ok(result)
121            }
122            Err(error) => Err(error),
123        }
124    }
125
126    /// Enqueue a single element into the buffer, and return a reference to it,
127    /// or return `Err(Error::Exhausted)` if the buffer is full.
128    ///
129    /// This function is a shortcut for `ring_buf.enqueue_one_with(Ok)`.
130    pub fn enqueue_one(&mut self) -> Result<&mut T> {
131        self.enqueue_one_with(Ok)
132    }
133
134    /// Call `f` with a single buffer element, and dequeue the element if `f`
135    /// returns successfully, or return `Err(Error::Exhausted)` if the buffer is empty.
136    pub fn dequeue_one_with<'b, R, F>(&'b mut self, f: F) -> Result<R>
137    where
138        F: FnOnce(&'b mut T) -> Result<R>,
139    {
140        if self.is_empty() {
141            return Err(Error::Exhausted);
142        }
143
144        let next_at = self.get_idx_unchecked(1);
145        match f(&mut self.storage[self.read_at]) {
146            Ok(result) => {
147                self.length -= 1;
148                self.read_at = next_at;
149                Ok(result)
150            }
151            Err(error) => Err(error),
152        }
153    }
154
155    /// Dequeue an element from the buffer, and return a reference to it,
156    /// or return `Err(Error::Exhausted)` if the buffer is empty.
157    ///
158    /// This function is a shortcut for `ring_buf.dequeue_one_with(Ok)`.
159    pub fn dequeue_one(&mut self) -> Result<&mut T> {
160        self.dequeue_one_with(Ok)
161    }
162}
163
164/// This is the "continuous" ring buffer interface: it operates with element slices,
165/// and boundary conditions (empty/full) simply result in empty slices.
166impl<T: Default + core::fmt::Debug + Clone, const N: usize> RingBuffer<T, N> {
167    /// Call `f` with the largest contiguous slice of unallocated buffer elements,
168    /// and enqueue the amount of elements returned by `f`.
169    ///
170    /// # Panics
171    /// This function panics if the amount of elements returned by `f` is larger
172    /// than the size of the slice passed into it.
173    pub fn enqueue_many_with<'b, R, F>(&'b mut self, f: F) -> (usize, R)
174    where
175        F: FnOnce(&'b mut [T]) -> (usize, R),
176    {
177        if self.length == 0 {
178            // Ring is currently empty. Reset `read_at` to optimize
179            // for contiguous space.
180            self.read_at = 0;
181        }
182
183        let write_at = self.get_idx(self.length);
184        let max_size = self.contiguous_window();
185        let (size, result) = f(&mut self.storage[write_at..write_at + max_size]);
186        assert!(size <= max_size);
187        self.length += size;
188        (size, result)
189    }
190
191    /// Enqueue a slice of elements up to the given size into the buffer,
192    /// and return a reference to them.
193    ///
194    /// This function may return a slice smaller than the given size
195    /// if the free space in the buffer is not contiguous.
196    pub fn enqueue_many(&mut self, size: usize) -> &mut [T] {
197        self.enqueue_many_with(|buf| {
198            let size = cmp::min(size, buf.len());
199            (size, &mut buf[..size])
200        })
201        .1
202    }
203
204    /// Enqueue as many elements from the given slice into the buffer as possible,
205    /// and return the amount of elements that could fit.
206    pub fn enqueue_slice(&mut self, data: &[T]) -> usize
207    where
208        T: Copy,
209    {
210        let (size_1, data) = self.enqueue_many_with(|buf| {
211            let size = cmp::min(buf.len(), data.len());
212            buf[..size].copy_from_slice(&data[..size]);
213            (size, &data[size..])
214        });
215        let (size_2, ()) = self.enqueue_many_with(|buf| {
216            let size = cmp::min(buf.len(), data.len());
217            buf[..size].copy_from_slice(&data[..size]);
218            (size, ())
219        });
220        size_1 + size_2
221    }
222
223    /// Call `f` with the largest contiguous slice of allocated buffer elements,
224    /// and dequeue the amount of elements returned by `f`.
225    ///
226    /// # Panics
227    /// This function panics if the amount of elements returned by `f` is larger
228    /// than the size of the slice passed into it.
229    pub fn dequeue_many_with<'b, R, F>(&'b mut self, f: F) -> (usize, R)
230    where
231        F: FnOnce(&'b mut [T]) -> (usize, R),
232    {
233        let capacity = self.capacity();
234        let max_size = cmp::min(self.len(), capacity - self.read_at);
235        let (size, result) = f(&mut self.storage[self.read_at..self.read_at + max_size]);
236        assert!(size <= max_size);
237        self.read_at = if capacity > 0 {
238            (self.read_at + size) % capacity
239        } else {
240            0
241        };
242        self.length -= size;
243        (size, result)
244    }
245
246    pub fn dequeue_many_with_wrapping<'b, R, F>(&'b mut self, f: F) -> (usize, R)
247    where
248        F: FnOnce(&'b [T], Option<&'b [T]>) -> (usize, R),
249    {
250        let capacity = self.capacity();
251        let size1 = cmp::min(self.len(), capacity - self.read_at);
252        let size2 = self.len() - size1;
253        let (size, result) = if size2 != 0 {
254            f(
255                &self.storage[self.read_at..self.read_at + size1],
256                Some(&self.storage[..size2]),
257            )
258        } else {
259            f(&self.storage[self.read_at..self.read_at + size1], None)
260        };
261
262        assert!(size <= size1 + size2);
263        self.read_at = if capacity > 0 {
264            (self.read_at + size) % capacity
265        } else {
266            0
267        };
268        self.length -= size;
269        (size, result)
270    }
271
272    /// Dequeue a slice of elements up to the given size from the buffer,
273    /// and return a reference to them.
274    ///
275    /// This function may return a slice smaller than the given size
276    /// if the allocated space in the buffer is not contiguous.
277    pub fn dequeue_many(&mut self, size: usize) -> &mut [T] {
278        self.dequeue_many_with(|buf| {
279            let size = cmp::min(size, buf.len());
280            (size, &mut buf[..size])
281        })
282        .1
283    }
284
285    /// Dequeue as many elements from the buffer into the given slice as possible,
286    /// and return the amount of elements that could fit.
287    pub fn dequeue_slice(&mut self, data: &mut [T]) -> usize
288    where
289        T: Copy,
290    {
291        let (size_1, data) = self.dequeue_many_with(|buf| {
292            let size = cmp::min(buf.len(), data.len());
293            data[..size].copy_from_slice(&buf[..size]);
294            (size, &mut data[size..])
295        });
296        let (size_2, ()) = self.dequeue_many_with(|buf| {
297            let size = cmp::min(buf.len(), data.len());
298            data[..size].copy_from_slice(&buf[..size]);
299            (size, ())
300        });
301        size_1 + size_2
302    }
303}
304
305/// This is the "random access" ring buffer interface: it operates with element slices,
306/// and allows to access elements of the buffer that are not adjacent to its head or tail.
307impl<T: Default + Clone, const N: usize> RingBuffer<T, N> {
308    /// Return the largest contiguous slice of unallocated buffer elements starting
309    /// at the given offset past the last allocated element, and up to the given size.
310    pub fn get_unallocated(&mut self, offset: usize, mut size: usize) -> &mut [T] {
311        let start_at = self.get_idx(self.length + offset);
312        // We can't access past the end of unallocated data.
313        if offset > self.window() {
314            return &mut [];
315        }
316        // We can't enqueue more than there is free space.
317        let clamped_window = self.window() - offset;
318        if size > clamped_window {
319            size = clamped_window
320        }
321        // We can't contiguously enqueue past the end of the storage.
322        let until_end = self.capacity() - start_at;
323        if size > until_end {
324            size = until_end
325        }
326
327        &mut self.storage[start_at..start_at + size]
328    }
329
330    /// Write as many elements from the given slice into unallocated buffer elements
331    /// starting at the given offset past the last allocated element, and return
332    /// the amount written.
333    pub fn write_unallocated(&mut self, offset: usize, data: &[T]) -> usize
334    where
335        T: Copy,
336    {
337        let (size_1, offset, data) = {
338            let slice = self.get_unallocated(offset, data.len());
339            let slice_len = slice.len();
340            slice.copy_from_slice(&data[..slice_len]);
341            (slice_len, offset + slice_len, &data[slice_len..])
342        };
343        let size_2 = {
344            let slice = self.get_unallocated(offset, data.len());
345            let slice_len = slice.len();
346            slice.copy_from_slice(&data[..slice_len]);
347            slice_len
348        };
349        size_1 + size_2
350    }
351
352    /// Enqueue the given number of unallocated buffer elements.
353    ///
354    /// # Panics
355    /// Panics if the number of elements given exceeds the number of unallocated elements.
356    pub fn enqueue_unallocated(&mut self, count: usize) {
357        assert!(count <= self.window());
358        self.length += count;
359    }
360
361    /// Return the largest contiguous slice of allocated buffer elements starting
362    /// at the given offset past the first allocated element, and up to the given size.
363    pub fn get_allocated(&self, offset: usize, mut size: usize) -> &[T] {
364        let start_at = self.get_idx(offset);
365        // We can't read past the end of the allocated data.
366        if offset > self.length {
367            return &mut [];
368        }
369        // We can't read more than we have allocated.
370        let clamped_length = self.length - offset;
371        if size > clamped_length {
372            size = clamped_length
373        }
374        // We can't contiguously dequeue past the end of the storage.
375        let until_end = self.capacity() - start_at;
376        if size > until_end {
377            size = until_end
378        }
379
380        &self.storage[start_at..start_at + size]
381    }
382
383    /// Read as many elements from allocated buffer elements into the given slice
384    /// starting at the given offset past the first allocated element, and return
385    /// the amount read.
386    pub fn read_allocated(&mut self, offset: usize, data: &mut [T]) -> usize
387    where
388        T: Copy,
389    {
390        let (size_1, offset, data) = {
391            let slice = self.get_allocated(offset, data.len());
392            data[..slice.len()].copy_from_slice(slice);
393            (slice.len(), offset + slice.len(), &mut data[slice.len()..])
394        };
395        let size_2 = {
396            let slice = self.get_allocated(offset, data.len());
397            data[..slice.len()].copy_from_slice(slice);
398            slice.len()
399        };
400        size_1 + size_2
401    }
402
403    /// Dequeue the given number of allocated buffer elements.
404    ///
405    /// # Panics
406    /// Panics if the number of elements given exceeds the number of allocated elements.
407    pub fn dequeue_allocated(&mut self, count: usize) {
408        assert!(count <= self.len());
409        self.length -= count;
410        self.read_at = self.get_idx(count);
411    }
412}
413
414impl<T: Default + core::fmt::Debug + Copy, const N: usize> From<Vec<T, N>> for RingBuffer<T, N> {
415    fn from(slice: Vec<T, N>) -> RingBuffer<T, N> {
416        RingBuffer::from_slice(slice.as_ref())
417    }
418}
419
420#[cfg(test)]
421mod tests {
422    use super::*;
423
424    #[test]
425    fn test_buffer_length_changes() {
426        let mut ring: RingBuffer<u8, 2> = RingBuffer::new();
427        assert!(ring.is_empty());
428        assert!(!ring.is_full());
429        assert_eq!(ring.len(), 0);
430        assert_eq!(ring.capacity(), 2);
431        assert_eq!(ring.window(), 2);
432
433        ring.length = 1;
434        assert!(!ring.is_empty());
435        assert!(!ring.is_full());
436        assert_eq!(ring.len(), 1);
437        assert_eq!(ring.capacity(), 2);
438        assert_eq!(ring.window(), 1);
439
440        ring.length = 2;
441        assert!(!ring.is_empty());
442        assert!(ring.is_full());
443        assert_eq!(ring.len(), 2);
444        assert_eq!(ring.capacity(), 2);
445        assert_eq!(ring.window(), 0);
446    }
447
448    #[test]
449    fn test_buffer_enqueue_dequeue_one_with() {
450        let mut ring: RingBuffer<u8, 5> = RingBuffer::new();
451        assert_eq!(
452            ring.dequeue_one_with(|_| unreachable!()) as Result<()>,
453            Err(Error::Exhausted)
454        );
455
456        ring.enqueue_one_with(|e| Ok(e)).unwrap();
457        assert!(!ring.is_empty());
458        assert!(!ring.is_full());
459
460        for i in 1..5 {
461            ring.enqueue_one_with(|e| Ok(*e = i)).unwrap();
462            assert!(!ring.is_empty());
463        }
464        assert!(ring.is_full());
465        assert_eq!(
466            ring.enqueue_one_with(|_| unreachable!()) as Result<()>,
467            Err(Error::Exhausted)
468        );
469
470        for i in 0..5 {
471            assert_eq!(ring.dequeue_one_with(|e| Ok(*e)).unwrap(), i);
472            assert!(!ring.is_full());
473        }
474        assert_eq!(
475            ring.dequeue_one_with(|_| unreachable!()) as Result<()>,
476            Err(Error::Exhausted)
477        );
478        assert!(ring.is_empty());
479    }
480
481    #[test]
482    fn test_buffer_enqueue_dequeue_one() {
483        let mut ring: RingBuffer<u8, 5> = RingBuffer::new();
484        assert_eq!(ring.dequeue_one(), Err(Error::Exhausted));
485
486        ring.enqueue_one().unwrap();
487        assert!(!ring.is_empty());
488        assert!(!ring.is_full());
489
490        for i in 1..5 {
491            *ring.enqueue_one().unwrap() = i;
492            assert!(!ring.is_empty());
493        }
494
495        assert!(ring.is_full());
496        assert_eq!(ring.enqueue_one(), Err(Error::Exhausted));
497
498        for i in 0..5 {
499            assert_eq!(*ring.dequeue_one().unwrap(), i);
500            assert!(!ring.is_full());
501        }
502        assert_eq!(ring.dequeue_one(), Err(Error::Exhausted));
503        assert!(ring.is_empty());
504    }
505
506    #[test]
507    fn test_buffer_enqueue_many_with() {
508        let mut ring: RingBuffer<u8, 12> = RingBuffer::from_slice(&[b'.'; 12]);
509
510        assert_eq!(
511            ring.enqueue_many_with(|buf| {
512                assert_eq!(buf.len(), 12);
513                buf[0..2].copy_from_slice(b"ab");
514                (2, true)
515            }),
516            (2, true)
517        );
518        assert_eq!(ring.len(), 2);
519        assert_eq!(&ring.storage[..], b"ab..........");
520
521        ring.enqueue_many_with(|buf| {
522            assert_eq!(buf.len(), 12 - 2);
523            buf[0..4].copy_from_slice(b"cdXX");
524            (2, ())
525        });
526        assert_eq!(ring.len(), 4);
527        assert_eq!(&ring.storage[..], b"abcdXX......");
528
529        ring.enqueue_many_with(|buf| {
530            assert_eq!(buf.len(), 12 - 4);
531            buf[0..4].copy_from_slice(b"efgh");
532            (4, ())
533        });
534        assert_eq!(ring.len(), 8);
535        assert_eq!(&ring.storage[..], b"abcdefgh....");
536
537        for _ in 0..4 {
538            *ring.dequeue_one().unwrap() = b'.';
539        }
540        assert_eq!(ring.len(), 4);
541        assert_eq!(&ring.storage[..], b"....efgh....");
542
543        ring.enqueue_many_with(|buf| {
544            assert_eq!(buf.len(), 12 - 8);
545            buf[0..4].copy_from_slice(b"ijkl");
546            (4, ())
547        });
548        assert_eq!(ring.len(), 8);
549        assert_eq!(&ring.storage[..], b"....efghijkl");
550
551        ring.enqueue_many_with(|buf| {
552            assert_eq!(buf.len(), 4);
553            buf[0..4].copy_from_slice(b"abcd");
554            (4, ())
555        });
556        assert_eq!(ring.len(), 12);
557        assert_eq!(&ring.storage[..], b"abcdefghijkl");
558
559        for _ in 0..4 {
560            *ring.dequeue_one().unwrap() = b'.';
561        }
562        assert_eq!(ring.len(), 8);
563        assert_eq!(&ring.storage[..], b"abcd....ijkl");
564    }
565
566    #[test]
567    fn test_buffer_enqueue_many() {
568        let mut ring: RingBuffer<u8, 12> = RingBuffer::from_slice(&[b'.'; 12]);
569
570        ring.enqueue_many(8).copy_from_slice(b"abcdefgh");
571        assert_eq!(ring.len(), 8);
572        assert_eq!(&ring.storage[..], b"abcdefgh....");
573
574        ring.enqueue_many(8).copy_from_slice(b"ijkl");
575        assert_eq!(ring.len(), 12);
576        assert_eq!(&ring.storage[..], b"abcdefghijkl");
577    }
578
579    #[test]
580    fn test_buffer_enqueue_slice() {
581        let mut ring: RingBuffer<u8, 12> = RingBuffer::from_slice(&[b'.'; 12]);
582
583        assert_eq!(ring.enqueue_slice(b"abcdefgh"), 8);
584        assert_eq!(ring.len(), 8);
585        assert_eq!(&ring.storage[..], b"abcdefgh....");
586
587        for _ in 0..4 {
588            *ring.dequeue_one().unwrap() = b'.';
589        }
590        assert_eq!(ring.len(), 4);
591        assert_eq!(&ring.storage[..], b"....efgh....");
592
593        assert_eq!(ring.enqueue_slice(b"ijklabcd"), 8);
594        assert_eq!(ring.len(), 12);
595        assert_eq!(&ring.storage[..], b"abcdefghijkl");
596    }
597
598    #[test]
599    fn test_buffer_dequeue_many_with() {
600        let mut ring: RingBuffer<u8, 12> = RingBuffer::from_slice(&[b'.'; 12]);
601
602        assert_eq!(ring.enqueue_slice(b"abcdefghijkl"), 12);
603
604        assert_eq!(
605            ring.dequeue_many_with(|buf| {
606                assert_eq!(buf.len(), 12);
607                assert_eq!(buf, b"abcdefghijkl");
608                buf[..4].copy_from_slice(b"....");
609                (4, true)
610            }),
611            (4, true)
612        );
613        assert_eq!(ring.len(), 8);
614        assert_eq!(&ring.storage[..], b"....efghijkl");
615
616        ring.dequeue_many_with(|buf| {
617            assert_eq!(buf, b"efghijkl");
618            buf[..4].copy_from_slice(b"....");
619            (4, ())
620        });
621        assert_eq!(ring.len(), 4);
622        assert_eq!(&ring.storage[..], b"........ijkl");
623
624        assert_eq!(ring.enqueue_slice(b"abcd"), 4);
625        assert_eq!(ring.len(), 8);
626
627        ring.dequeue_many_with(|buf| {
628            assert_eq!(buf, b"ijkl");
629            buf[..4].copy_from_slice(b"....");
630            (4, ())
631        });
632        ring.dequeue_many_with(|buf| {
633            assert_eq!(buf, b"abcd");
634            buf[..4].copy_from_slice(b"....");
635            (4, ())
636        });
637        assert_eq!(ring.len(), 0);
638        assert_eq!(&ring.storage[..], b"............");
639    }
640
641    #[test]
642    fn test_buffer_dequeue_many_with_wrapping() {
643        let mut ring: RingBuffer<u8, 12> = RingBuffer::from_slice(&[b'.'; 12]);
644
645        assert_eq!(ring.enqueue_slice(b"abcdefghijkl"), 12);
646
647        assert_eq!(
648            ring.dequeue_many_with_wrapping(|a, b| {
649                assert_eq!(a.len(), 12);
650                assert_eq!(b, None);
651                assert_eq!(a, b"abcdefghijkl");
652                (4, true)
653            }),
654            (4, true)
655        );
656        assert_eq!(ring.len(), 8);
657        assert_eq!(cmp::min(ring.len(), ring.capacity() - ring.read_at), 8);
658
659        ring.dequeue_many_with_wrapping(|a, b| {
660            assert_eq!(a, b"efghijkl");
661            assert_eq!(b, None);
662            (4, ())
663        });
664        assert_eq!(ring.len(), 4);
665        assert_eq!(cmp::min(ring.len(), ring.capacity() - ring.read_at), 4);
666
667        assert_eq!(ring.enqueue_slice(b"abcd"), 4);
668        assert_eq!(ring.len(), 8);
669        assert_eq!(ring.read_at, 8);
670        assert_eq!(cmp::min(ring.len(), ring.capacity() - ring.read_at), 4);
671
672        ring.dequeue_many_with_wrapping(|a, b| {
673            assert_eq!(a, b"ijkl");
674            assert_eq!(b, Some(&b"abcd"[..]));
675            (4, ())
676        });
677        assert_eq!(ring.len(), 4);
678        assert_eq!(cmp::min(ring.len(), ring.capacity() - ring.read_at), 4);
679
680        ring.dequeue_many_with_wrapping(|a, b| {
681            assert_eq!(a, b"abcd");
682            assert_eq!(b, None);
683            (4, ())
684        });
685        assert_eq!(ring.len(), 0);
686        assert_eq!(cmp::min(ring.len(), ring.capacity() - ring.read_at), 0);
687    }
688
689    #[test]
690    fn test_buffer_dequeue_many() {
691        let mut ring: RingBuffer<u8, 12> = RingBuffer::from_slice(&[b'.'; 12]);
692
693        assert_eq!(ring.enqueue_slice(b"abcdefghijkl"), 12);
694
695        {
696            let buf = ring.dequeue_many(8);
697            assert_eq!(buf, b"abcdefgh");
698            buf.copy_from_slice(b"........");
699        }
700        assert_eq!(ring.len(), 4);
701        assert_eq!(&ring.storage[..], b"........ijkl");
702
703        {
704            let buf = ring.dequeue_many(8);
705            assert_eq!(buf, b"ijkl");
706            buf.copy_from_slice(b"....");
707        }
708        assert_eq!(ring.len(), 0);
709        assert_eq!(&ring.storage[..], b"............");
710    }
711
712    #[test]
713    fn test_buffer_dequeue_slice() {
714        let mut ring: RingBuffer<u8, 12> = RingBuffer::from_slice(&[b'.'; 12]);
715
716        assert_eq!(ring.enqueue_slice(b"abcdefghijkl"), 12);
717
718        {
719            let mut buf = [0; 8];
720            assert_eq!(ring.dequeue_slice(&mut buf[..]), 8);
721            assert_eq!(&buf[..], b"abcdefgh");
722            assert_eq!(ring.len(), 4);
723        }
724
725        assert_eq!(ring.enqueue_slice(b"abcd"), 4);
726
727        {
728            let mut buf = [0; 8];
729            assert_eq!(ring.dequeue_slice(&mut buf[..]), 8);
730            assert_eq!(&buf[..], b"ijklabcd");
731            assert_eq!(ring.len(), 0);
732        }
733    }
734
735    #[test]
736    fn test_buffer_get_unallocated() {
737        let mut ring: RingBuffer<u8, 12> = RingBuffer::from_slice(&[b'.'; 12]);
738
739        assert_eq!(ring.get_unallocated(16, 4), b"");
740
741        {
742            let buf = ring.get_unallocated(0, 4);
743            buf.copy_from_slice(b"abcd");
744        }
745        assert_eq!(&ring.storage[..], b"abcd........");
746
747        ring.enqueue_many(4);
748        assert_eq!(ring.len(), 4);
749
750        {
751            let buf = ring.get_unallocated(4, 8);
752            buf.copy_from_slice(b"ijkl");
753        }
754        assert_eq!(&ring.storage[..], b"abcd....ijkl");
755
756        ring.enqueue_many(8).copy_from_slice(b"EFGHIJKL");
757        ring.dequeue_many(4).copy_from_slice(b"abcd");
758        assert_eq!(ring.len(), 8);
759        assert_eq!(&ring.storage[..], b"abcdEFGHIJKL");
760
761        {
762            let buf = ring.get_unallocated(0, 8);
763            buf.copy_from_slice(b"ABCD");
764        }
765        assert_eq!(&ring.storage[..], b"ABCDEFGHIJKL");
766    }
767
768    #[test]
769    fn test_buffer_write_unallocated() {
770        let mut ring: RingBuffer<u8, 12> = RingBuffer::from_slice(&[b'.'; 12]);
771        ring.enqueue_many(6).copy_from_slice(b"abcdef");
772        ring.dequeue_many(6).copy_from_slice(b"ABCDEF");
773
774        assert_eq!(ring.write_unallocated(0, b"ghi"), 3);
775        assert_eq!(ring.get_unallocated(0, 3), b"ghi");
776
777        assert_eq!(ring.write_unallocated(3, b"jklmno"), 6);
778        assert_eq!(ring.get_unallocated(3, 3), b"jkl");
779
780        assert_eq!(ring.write_unallocated(9, b"pqrstu"), 3);
781        assert_eq!(ring.get_unallocated(9, 3), b"pqr");
782    }
783
784    #[test]
785    fn test_buffer_get_allocated() {
786        let mut ring: RingBuffer<u8, 12> = RingBuffer::from_slice(&[b'.'; 12]);
787
788        assert_eq!(ring.get_allocated(16, 4), b"");
789        assert_eq!(ring.get_allocated(0, 4), b"");
790
791        ring.enqueue_slice(b"abcd");
792        assert_eq!(ring.get_allocated(0, 8), b"abcd");
793
794        ring.enqueue_slice(b"efghijkl");
795        ring.dequeue_many(4).copy_from_slice(b"....");
796        assert_eq!(ring.get_allocated(4, 8), b"ijkl");
797
798        ring.enqueue_slice(b"abcd");
799        assert_eq!(ring.get_allocated(4, 8), b"ijkl");
800    }
801
802    #[test]
803    fn test_buffer_read_allocated() {
804        let mut ring: RingBuffer<u8, 12> = RingBuffer::from_slice(&[b'.'; 12]);
805        ring.enqueue_many(12).copy_from_slice(b"abcdefghijkl");
806
807        let mut data = [0; 6];
808        assert_eq!(ring.read_allocated(0, &mut data[..]), 6);
809        assert_eq!(&data[..], b"abcdef");
810
811        ring.dequeue_many(6).copy_from_slice(b"ABCDEF");
812        ring.enqueue_many(3).copy_from_slice(b"mno");
813
814        let mut data = [0; 6];
815        assert_eq!(ring.read_allocated(3, &mut data[..]), 6);
816        assert_eq!(&data[..], b"jklmno");
817
818        let mut data = [0; 6];
819        assert_eq!(ring.read_allocated(6, &mut data[..]), 3);
820        assert_eq!(&data[..], b"mno\x00\x00\x00");
821    }
822
823    // #[test]
824    // fn test_buffer_with_no_capacity() {
825    //     let mut no_capacity: RingBuffer<u8, 0> = RingBuffer::new();
826
827    //     // Call all functions that calculate the remainder against rx_buffer.capacity()
828    //     // with a backing storage with a length of 0.
829    //     assert_eq!(no_capacity.get_unallocated(0, 0), &[]);
830    //     assert_eq!(no_capacity.get_allocated(0, 0), &[]);
831    //     no_capacity.dequeue_allocated(0);
832    //     assert_eq!(no_capacity.enqueue_many(0), &[]);
833    //     assert_eq!(no_capacity.enqueue_one(), Err(Error::Exhausted));
834    //     assert_eq!(no_capacity.contiguous_window(), 0);
835    // }
836
837    /// Use the buffer a bit. Then empty it and put in an item of
838    /// maximum size. By detecting a length of 0, the implementation
839    /// can reset the current buffer position.
840    #[test]
841    fn test_buffer_write_wholly() {
842        let mut ring: RingBuffer<u8, 8> = RingBuffer::from_slice(&[b'.'; 8]);
843        ring.enqueue_many(2).copy_from_slice(b"xx");
844        ring.enqueue_many(2).copy_from_slice(b"xx");
845        assert_eq!(ring.len(), 4);
846        ring.dequeue_many(4);
847        assert_eq!(ring.len(), 0);
848
849        let large = ring.enqueue_many(8);
850        assert_eq!(large.len(), 8);
851    }
852}