ring_buffer/lib.rs
1/*!
2A queue that also allows direct index addressing.
3
4This queue trades in some features from VecDeque in the std library for the feature of direct
5indexing. For more information, see the struct.
6*/
7
8use std::alloc::{alloc, dealloc, handle_alloc_error, Layout};
9use std::cmp::{max, Ordering};
10use std::collections::VecDeque;
11use std::fmt::Debug;
12use std::hash::{Hash, Hasher};
13use std::iter::{FromIterator, FusedIterator, IntoIterator};
14use std::marker::PhantomData;
15use std::mem::{align_of, size_of};
16use std::ops::{Index, IndexMut};
17use std::ptr::NonNull;
18use std::slice;
19
20#[warn(missing_docs)]
21/**
22A queue that also allows direct index addressing.
23
24This queue trades some features from VecDeque in the std library for the feature of direct indexing.
25Instead of returning `()`, [`push_back`] returns the index of the new element in the buffer. Methods
26like [`get_absolute`] can then be used for element access.
27
28The index returned by [`push_back`] or used by [`get_absolute`] may not be the actual index but a
29higher number that gets wrapped around internally so the indices are still valid after elements get
30moved around by expanding or shrinking the container.
31
32The features not present in `RingBuffer` whereas present in `VecDeque` include:
33- Double ended functionality, including `push_front` and `pop_back`
34- `append`
35- `drain`
36- `insert`
37- `remove`
38- basically anything that messes with the position of elements in the buffer apart from `swap`
39
40[`push_back`]: Self::push_back
41[`get_absolute`]: Self::get_absolute
42*/
43pub struct RingBuffer<T> {
44 /// Pointer to the allocated array data.
45 data: NonNull<T>,
46 /// Tells the compiler the struct owns `T` values.
47 _marker: PhantomData<T>,
48 /// A bitfield signaling if a value got moved to this position by growing from a wrapped state.
49 moved_value: Box<[u64]>,
50 /// The count of elements currently saved in the buffer.
51 len: usize,
52 /// The amount of element that fit into the buffer before reallocation.
53 capacity: usize,
54 /// The next free element index.
55 head: usize,
56 /// The last valid element index, is there is one.
57 tail: usize,
58}
59
60/// A direct index into the buffer.
61#[derive(Clone, Copy, Debug)]
62pub struct RingBufferIndex {
63 /// The index at the time of allocation.
64 base_index: usize,
65 /// An offset that is applied if the value got moved by growing the buffer.
66 wrap_offset: usize,
67}
68
69impl<T> RingBuffer<T> {
70 /// Creates an empty `RingBuffer`.
71 ///
72 /// # Example
73 ///
74 /// ```
75 /// # use ring_buffer::RingBuffer;
76 /// #
77 /// let buffer: RingBuffer<usize> = RingBuffer::new();
78 /// ```
79 pub fn new() -> RingBuffer<T> {
80 RingBuffer::<T>::with_capacity(0)
81 }
82
83 /// Creates an empty `RingBuffer` with space for at least `capacity` elements.
84 ///
85 /// # Example
86 ///
87 /// ```
88 /// # use ring_buffer::RingBuffer;
89 /// #
90 /// let buffer: RingBuffer<usize> = RingBuffer::with_capacity(10);
91 /// assert!(buffer.capacity() >= 10);
92 /// ```
93 pub fn with_capacity(capacity: usize) -> RingBuffer<T> {
94 if size_of::<T>() > 0 {
95 let capacity = max(capacity.next_power_of_two(), 2);
96
97 let layout =
98 Layout::from_size_align(capacity * size_of::<T>(), align_of::<T>()).unwrap();
99 let ptr = unsafe { alloc(layout) };
100 if ptr.is_null() {
101 handle_alloc_error(layout);
102 }
103
104 RingBuffer {
105 data: NonNull::new(ptr).unwrap().cast(),
106 _marker: PhantomData::<T>,
107 moved_value: vec![0; (capacity + u64::BITS as usize - 1) / u64::BITS as usize]
108 .into(),
109 len: 0,
110 capacity,
111 head: 0,
112 tail: 0,
113 }
114 } else {
115 RingBuffer {
116 data: NonNull::dangling(),
117 _marker: PhantomData::<T>,
118 moved_value: vec![0; (capacity + u64::BITS as usize - 1) / u64::BITS as usize]
119 .into(),
120 len: 0,
121 capacity: usize::MAX,
122 head: 0,
123 tail: 0,
124 }
125 }
126 }
127
128 /// Returns a pair of slices which contain, in order, the contents of the
129 /// `RingBuffer`.
130 ///
131 /// # Example
132 ///
133 /// ```
134 /// # use ring_buffer::RingBuffer;
135 /// #
136 /// let mut buffer = RingBuffer::new();
137 ///
138 /// buffer.push_back(41);
139 /// buffer.push_back(42);
140 /// buffer.push_back(1);
141 /// buffer.push_back(2);
142 ///
143 /// assert_eq!(buffer.len(), 4);
144 /// assert_eq!(buffer.capacity(), 4);
145 /// assert_eq!(buffer.front(), Some(&41));
146 /// assert_eq!(buffer.as_slices(), (&[41, 42, 1, 2][..], &[][..]));
147 ///
148 /// buffer.pop_front();
149 /// buffer.pop_front();
150 /// buffer.push_back(3);
151 /// buffer.push_back(4);
152 ///
153 /// assert_eq!(buffer.as_slices(), (&[1, 2][..], &[3, 4][..]));
154 /// ```
155 #[inline]
156 pub fn as_slices(&self) -> (&[T], &[T]) {
157 unsafe {
158 if self.wraps_around() {
159 (
160 slice::from_raw_parts(
161 self.data.as_ptr().add(self.tail),
162 self.capacity - self.tail,
163 ),
164 slice::from_raw_parts(self.data.as_ptr(), self.head),
165 )
166 } else {
167 let head_or_capacity = if self.head == 0 {
168 self.capacity
169 } else {
170 self.head
171 };
172
173 (
174 slice::from_raw_parts(
175 self.data.as_ptr().add(self.tail),
176 head_or_capacity - self.tail,
177 ),
178 &[],
179 )
180 }
181 }
182 }
183
184 /// Provides a reference to the back element of the queue, or `None` if the `RingBuffer` is
185 /// empty.
186 ///
187 /// # Example
188 ///
189 /// ```
190 /// # use ring_buffer::RingBuffer;
191 /// #
192 /// let mut buffer = RingBuffer::new();
193 /// assert_eq!(buffer.back(), None);
194 ///
195 /// buffer.push_back(1);
196 /// buffer.push_back(2);
197 /// assert_eq!(buffer.back(), Some(&2));
198 /// ```
199 #[inline]
200 pub fn back(&self) -> Option<&T> {
201 self.get_relative(self.len.wrapping_sub(1))
202 }
203
204 /// Provides a mutable reference to the back element of the queue, or `None` if the
205 /// `RingBuffer` is empty.
206 ///
207 /// # Example
208 ///
209 /// ```
210 /// # use ring_buffer::RingBuffer;
211 /// #
212 /// let mut buffer = RingBuffer::new();
213 /// assert_eq!(buffer.back(), None);
214 ///
215 /// buffer.push_back(1);
216 /// buffer.push_back(2);
217 /// if let Some(x) = buffer.back_mut() {
218 /// *x = 42;
219 /// }
220 /// assert_eq!(buffer.back(), Some(&42));
221 /// ```
222 #[inline]
223 pub fn back_mut(&mut self) -> Option<&mut T> {
224 self.get_relative_mut(self.len.wrapping_sub(1))
225 }
226
227 /// Returns the absolute index of the back element of the queue, or `None` if the
228 /// `RingBuffer` is empty.
229 ///
230 /// # Example
231 ///
232 /// ```
233 /// # use ring_buffer::RingBuffer;
234 /// #
235 /// let mut buffer = RingBuffer::new();
236 /// assert!(buffer.back_absolute_index().is_none());
237 ///
238 /// buffer.push_back(1);
239 /// buffer.push_back(2);
240 /// let back_index = buffer.back_absolute_index();
241 ///
242 /// assert!(back_index.is_some());
243 /// assert_eq!(buffer.get_absolute(back_index.unwrap()), buffer.back());
244 /// ```
245 #[inline]
246 pub fn back_absolute_index(&self) -> Option<RingBufferIndex> {
247 if self.len > 0 {
248 Some(RingBufferIndex {
249 base_index: self.head.wrapping_sub(1) & self.mask(),
250 wrap_offset: self.capacity * (self.wraps_around() & (self.head != 0)) as usize,
251 })
252 } else {
253 None
254 }
255 }
256
257 /// Returns the number of elements the `RingBuffer` can hold without reallocating.
258 /// For fast wrapping functionality, capacity is always a power of two.
259 ///
260 /// # Example
261 ///
262 /// ```
263 /// # use ring_buffer::RingBuffer;
264 /// #
265 /// let buffer: RingBuffer<usize> = RingBuffer::with_capacity(10);
266 /// assert!(buffer.capacity() >= 16);
267 /// ```
268 #[inline]
269 pub fn capacity(&self) -> usize {
270 self.capacity
271 }
272
273 /// Clears the `RingBuffer`, removing all values.
274 ///
275 /// # Example
276 ///
277 /// ```
278 /// # use ring_buffer::RingBuffer;
279 /// #
280 /// let mut buffer = RingBuffer::new();
281 /// buffer.push_back(1);
282 /// buffer.clear();
283 /// assert!(buffer.is_empty());
284 /// ```
285 #[inline]
286 pub fn clear(&mut self) {
287 while self.len > 0 {
288 self.pop_front();
289 }
290 for field in self.moved_value.iter_mut() {
291 *field = 0;
292 }
293 self.len = 0;
294 self.head = 0;
295 self.tail = 0;
296 }
297
298 /// Returns `true` if the `RingBuffer` contains an element equal to the
299 /// given value.
300 ///
301 /// # Example
302 ///
303 /// ```
304 /// # use ring_buffer::RingBuffer;
305 /// #
306 /// let mut buffer = RingBuffer::new();
307 ///
308 /// buffer.push_back(0);
309 /// buffer.push_back(1);
310 ///
311 /// assert_eq!(buffer.contains(&1), true);
312 /// assert_eq!(buffer.contains(&42), false);
313 /// ```
314 #[inline]
315 pub fn contains(&self, x: &T) -> bool
316 where
317 T: PartialEq<T>,
318 {
319 let (a, b) = self.as_slices();
320 a.contains(x) || b.contains(x)
321 }
322
323 /// Provides a reference to the front element of the queue, or `None` if the `RingBuffer` is
324 /// empty.
325 ///
326 /// # Example
327 ///
328 /// ```
329 /// # use ring_buffer::RingBuffer;
330 /// #
331 /// let mut buffer = RingBuffer::new();
332 /// assert_eq!(buffer.front(), None);
333 ///
334 /// buffer.push_back(1);
335 /// buffer.push_back(2);
336 /// assert_eq!(buffer.front(), Some(&1));
337 /// ```
338 #[inline]
339 pub fn front(&self) -> Option<&T> {
340 self.get_relative(0)
341 }
342
343 /// Provides a mutable reference to the front element of the queue, or `None` if the
344 /// `RingBuffer` is empty.
345 ///
346 /// # Example
347 ///
348 /// ```
349 /// # use ring_buffer::RingBuffer;
350 /// #
351 /// let mut buffer = RingBuffer::new();
352 /// assert_eq!(buffer.front_mut(), None);
353 ///
354 /// buffer.push_back(1);
355 /// buffer.push_back(2);
356 /// if let Some(x) = buffer.front_mut() {
357 /// *x = 42;
358 /// }
359 /// assert_eq!(buffer.front(), Some(&42));
360 /// ```
361 #[inline]
362 pub fn front_mut(&mut self) -> Option<&mut T> {
363 self.get_relative_mut(0)
364 }
365
366 /// Returns the absolute index of the front element of the queue, or `None` if the
367 /// `RingBuffer` is empty.
368 ///
369 /// # Example
370 ///
371 /// ```
372 /// # use ring_buffer::RingBuffer;
373 /// #
374 /// let mut buffer = RingBuffer::new();
375 /// assert!(buffer.front_absolute_index().is_none());
376 ///
377 /// buffer.push_back(1);
378 /// buffer.push_back(2);
379 ///
380 /// let front_index = buffer.front_absolute_index();
381 ///
382 /// assert!(front_index.is_some());
383 /// assert_eq!(buffer.get_absolute(front_index.unwrap()), buffer.front());
384 /// ```
385 #[inline]
386 pub fn front_absolute_index(&self) -> Option<RingBufferIndex> {
387 if self.len > 0 {
388 Some(RingBufferIndex {
389 base_index: self.tail,
390 wrap_offset: 0,
391 })
392 } else {
393 None
394 }
395 }
396
397 /// Retrieves an element in the `RingBuffer` by absolute index.
398 ///
399 /// The index provided is the index used by the internal container.
400 /// This is designed to be used in conjunction with [`push_back`].
401 /// This also gets wrapped around, supporting expanding and shrinking the container without
402 /// breaking the access with absolute addresses.
403 ///
404 /// [`push_back`]: Self::push_back
405 ///
406 /// # Example
407 ///
408 /// ```
409 /// # use ring_buffer::RingBuffer;
410 /// #
411 /// let mut buffer = RingBuffer::new();
412 ///
413 /// let first = buffer.push_back(1);
414 /// buffer.push_back(2);
415 /// let second = buffer.push_back(42);
416 ///
417 /// assert_eq!(buffer.get_absolute(first), Some(&1));
418 /// assert_eq!(buffer.get_absolute(second), Some(&42));
419 /// ```
420 #[inline]
421 pub fn get_absolute(&self, index: RingBufferIndex) -> Option<&T> {
422 if self.is_index_in_range(index) {
423 unsafe { Some(&*self.data.as_ptr().add(self.resolve_index(index))) }
424 } else {
425 None
426 }
427 }
428
429 /// Retrieves a mutable element in the `RingBuffer` by absolute index.
430 ///
431 /// The index provided is the index used by the internal container.
432 /// This is designed to be used in conjunction with [`push_back`].
433 /// This also gets wrapped around, supporting expanding and shrinking the container without
434 /// breaking the access with absolute addresses.
435 ///
436 /// [`push_back`]: Self::push_back
437 ///
438 /// # Example
439 ///
440 /// ```
441 /// # use ring_buffer::RingBuffer;
442 /// #
443 /// let mut buffer = RingBuffer::new();
444 ///
445 /// buffer.push_back(1);
446 /// let the_answer = buffer.push_back(2);
447 /// buffer.push_back(3);
448 ///
449 /// assert_eq!(buffer.get_absolute(the_answer), Some(&2));
450 ///
451 /// if let Some(x) = buffer.get_absolute_mut(the_answer) {
452 /// *x = 42;
453 /// }
454 ///
455 /// assert_eq!(buffer.get_absolute(the_answer), Some(&42));
456 /// ```
457 #[inline]
458 pub fn get_absolute_mut(&mut self, index: RingBufferIndex) -> Option<&mut T> {
459 if self.is_index_in_range(index) {
460 unsafe { Some(&mut *self.data.as_ptr().add(self.resolve_index(index))) }
461 } else {
462 None
463 }
464 }
465
466 /// Retrieves an element in the `RingBuffer` by absolute index without checking for validity.
467 ///
468 /// This does the same as [`get_absolute`], but skips any checks. Use this if you really need
469 /// the performance and can guarantee the accessed element is valid.
470 ///
471 /// [`get_absolute`]: Self::get_absolute
472 ///
473 /// # Safety
474 ///
475 /// The caller is responsible for maintaining Rusts ownership guarantees.
476 /// The validity of a buffer entry also isn't checked, so while being valid memory, the
477 /// reference may not point to an object in valid state.
478 ///
479 /// # Example
480 ///
481 /// ```
482 /// # use ring_buffer::RingBuffer;
483 /// #
484 /// let mut buffer = RingBuffer::new();
485 ///
486 /// let first = buffer.push_back(1);
487 /// buffer.push_back(2);
488 /// let second = buffer.push_back(42);
489 ///
490 /// unsafe {
491 /// assert_eq!(buffer.get_absolute_unchecked(first), &1);
492 /// assert_eq!(buffer.get_absolute_unchecked(second), &42);
493 /// }
494 /// ```
495 #[inline]
496 pub unsafe fn get_absolute_unchecked(&self, index: RingBufferIndex) -> &T {
497 &*self.data.as_ptr().add(self.resolve_index(index))
498 }
499
500 /// Retrieves a mutable element in the `RingBuffer` by absolute index without checking for
501 /// validity.
502 ///
503 /// This does the same as [`get_absolute_mut`], but skips any checks. Use this if you really need
504 /// the performance and can guarantee the accessed element is valid.
505 ///
506 /// [`get_absolute_mut`]: Self::get_absolute_mut
507 ///
508 /// # Safety
509 ///
510 /// The caller is responsible for maintaining Rusts ownership guarantees.
511 /// The validity of a buffer entry also isn't checked, so while being valid memory, the
512 /// reference may not point to an object in valid state.
513 ///
514 /// # Example
515 ///
516 /// ```
517 /// # use ring_buffer::RingBuffer;
518 /// #
519 /// let mut buffer = RingBuffer::new();
520 ///
521 /// buffer.push_back(1);
522 /// let the_answer = buffer.push_back(2);
523 /// buffer.push_back(3);
524 ///
525 /// assert_eq!(buffer.get_absolute(the_answer), Some(&2));
526 ///
527 /// unsafe {
528 /// *buffer.get_absolute_mut_unchecked(the_answer) = 42;
529 /// }
530 ///
531 /// assert_eq!(buffer.get_absolute(the_answer), Some(&42));
532 /// ```
533 #[inline]
534 pub unsafe fn get_absolute_mut_unchecked(&mut self, index: RingBufferIndex) -> &mut T {
535 &mut *self.data.as_ptr().add(self.resolve_index(index))
536 }
537
538 /// Retrieves an element in the `RingBuffer` by relative index.
539 ///
540 /// Element at index 0 is the front of the queue.
541 ///
542 /// # Example
543 ///
544 /// ```
545 /// # use ring_buffer::RingBuffer;
546 /// #
547 /// let mut buffer = RingBuffer::new();
548 /// buffer.push_back(3);
549 /// buffer.push_back(4);
550 /// buffer.push_back(5);
551 /// buffer.push_back(6);
552 /// assert_eq!(buffer.get_relative(1), Some(&4));
553 /// ```
554 pub fn get_relative(&self, index: usize) -> Option<&T> {
555 if index < self.len {
556 unsafe { Some(&*self.get_relative_pointer(index)) }
557 } else {
558 None
559 }
560 }
561
562 /// Retrieves an element in the `RingBuffer` mutably by relative index.
563 ///
564 /// Element at index 0 is the front of the queue.
565 ///
566 /// # Example
567 ///
568 /// ```
569 /// # use ring_buffer::RingBuffer;
570 /// #
571 /// let mut buffer = RingBuffer::new();
572 /// buffer.push_back(3);
573 /// buffer.push_back(4);
574 /// buffer.push_back(5);
575 /// buffer.push_back(6);
576 /// if let Some(elem) = buffer.get_relative_mut(1) {
577 /// *elem = 7;
578 /// }
579 ///
580 /// assert_eq!(buffer.get_relative(1), Some(&7));
581 /// ```
582 pub fn get_relative_mut(&mut self, index: usize) -> Option<&mut T> {
583 if index < self.len {
584 unsafe { Some(&mut *self.get_relative_pointer(index)) }
585 } else {
586 None
587 }
588 }
589
590 /// Returns `true` if the `RingBuffer` is empty.
591 ///
592 /// # Example
593 ///
594 /// ```
595 /// # use ring_buffer::RingBuffer;
596 /// #
597 /// let mut buffer = RingBuffer::new();
598 /// assert!(buffer.is_empty());
599 /// buffer.push_back(1);
600 /// assert!(!buffer.is_empty());
601 /// ```
602 #[inline]
603 pub fn is_empty(&self) -> bool {
604 self.len == 0
605 }
606
607 /// Returns `true` if the buffer is at full capacity.
608 ///
609 /// # Example
610 ///
611 /// ```
612 /// # use ring_buffer::RingBuffer;
613 /// #
614 /// let mut buffer = RingBuffer::with_capacity(2);
615 /// assert_eq!(buffer.capacity(), 2);
616 /// assert!(!buffer.is_full());
617 /// buffer.push_back(1);
618 /// buffer.push_back(2);
619 /// assert!(buffer.is_full());
620 /// ```
621 #[inline]
622 pub fn is_full(&self) -> bool {
623 self.len == self.capacity
624 }
625
626 /// Returns a front-to-back iterator.
627 ///
628 /// # Example
629 ///
630 /// ```
631 /// # use ring_buffer::RingBuffer;
632 /// #
633 /// let mut buffer = RingBuffer::new();
634 /// buffer.push_back(3);
635 /// buffer.push_back(4);
636 /// buffer.push_back(5);
637 /// let b: &[_] = &[&3, &4, &5];
638 /// let c: Vec<&usize> = buffer.iter().collect();
639 /// assert_eq!(buffer.front(), Some(&3));
640 /// assert_eq!(&c[..], b);
641 /// ```
642 #[inline]
643 pub fn iter(&self) -> Iter<T> {
644 Iter {
645 buffer: unsafe { slice::from_raw_parts(self.data.as_ptr(), self.capacity) },
646 len: self.len,
647 index: self.tail,
648 }
649 }
650
651 /// Returns a front-to-back iterator that returns mutable references.
652 ///
653 /// # Example
654 ///
655 /// ```
656 /// # use ring_buffer::RingBuffer;
657 /// #
658 /// let mut buffer = RingBuffer::new();
659 /// buffer.push_back(3);
660 /// buffer.push_back(4);
661 /// buffer.push_back(5);
662 /// for num in buffer.iter_mut() {
663 /// *num = *num - 2;
664 /// }
665 /// let b: &[_] = &[&mut 1, &mut 2, &mut 3];
666 /// assert_eq!(&buffer.iter_mut().collect::<Vec<&mut usize>>()[..], b);
667 /// ```
668 #[inline]
669 pub fn iter_mut(&mut self) -> IterMut<T> {
670 IterMut {
671 buffer: unsafe { slice::from_raw_parts_mut(self.data.as_ptr(), self.capacity) },
672 len: self.len,
673 index: self.tail,
674 }
675 }
676
677 /// Returns the number of elements in the `RingBuffer`.
678 ///
679 /// # Example
680 ///
681 /// ```
682 /// # use ring_buffer::RingBuffer;
683 /// #
684 /// let mut buffer = RingBuffer::new();
685 /// assert_eq!(buffer.len(), 0);
686 /// buffer.push_back(1);
687 /// assert_eq!(buffer.len(), 1);
688 /// ```
689 #[inline]
690 pub fn len(&self) -> usize {
691 self.len
692 }
693
694 /// Removes the first element and returns it, or `None` if the `RingBuffer` is
695 /// empty.
696 ///
697 /// # Example
698 ///
699 /// ```
700 /// # use ring_buffer::RingBuffer;
701 /// #
702 /// let mut buffer = RingBuffer::new();
703 /// buffer.push_back(1);
704 /// buffer.push_back(2);
705 ///
706 /// assert_eq!(buffer.pop_front(), Some(1));
707 /// assert_eq!(buffer.pop_front(), Some(2));
708 /// assert_eq!(buffer.pop_front(), None);
709 /// ```
710 #[inline]
711 pub fn pop_front(&mut self) -> Option<T> {
712 if !self.is_empty() {
713 let return_value = unsafe { Some(self.data.as_ptr().add(self.tail).read()) };
714
715 self.tail = (self.tail + 1) & self.mask();
716 self.len -= 1;
717 return_value
718 } else {
719 None
720 }
721 }
722
723 /// Appends an element to the back of the `RingBuffer`.
724 ///
725 /// # Examples
726 ///
727 /// ```
728 /// # use ring_buffer::RingBuffer;
729 /// #
730 /// let mut buffer = RingBuffer::new();
731 /// buffer.push_back(1);
732 /// buffer.push_back(42);
733 /// assert_eq!(42, *buffer.back().unwrap());
734 /// ```
735 #[inline]
736 pub fn push_back(&mut self, new_element: T) -> RingBufferIndex {
737 if self.is_full() {
738 self.grow(self.capacity << 1);
739 }
740
741 unsafe {
742 self.data.as_ptr().add(self.head).write(new_element);
743 }
744
745 let wrap_offset;
746 let check_bit = 1 << (self.head & (u64::BITS as usize - 1));
747 if self.wraps_around() {
748 self.moved_value[self.head / u64::BITS as usize] |= check_bit;
749 wrap_offset = self.capacity;
750 } else {
751 self.moved_value[self.head / u64::BITS as usize] &= u64::MAX ^ check_bit;
752 wrap_offset = 0;
753 }
754 let base_index = self.head;
755 self.head = (self.head + 1) & self.mask();
756 self.len += 1;
757
758 RingBufferIndex {
759 base_index,
760 wrap_offset,
761 }
762 }
763
764 /// Reserves capacity for at least `additional` more elements to be inserted in the given
765 /// `RingBuffer`. The collection may reserve more space to avoid frequent reallocations.
766 ///
767 /// # Panics
768 ///
769 /// Panics if the new capacity overflows `usize`.
770 ///
771 /// # Example
772 ///
773 /// ```
774 /// # use ring_buffer::RingBuffer;
775 /// #
776 /// let mut buffer: RingBuffer<usize> = vec![1].into_iter().collect();
777 /// buffer.reserve(10);
778 /// assert!(buffer.capacity() >= 11);
779 /// ```
780 pub fn reserve(&mut self, additional: usize) {
781 let new_capacity = self
782 .len
783 .checked_add(additional)
784 .and_then(|cap| cap.checked_next_power_of_two())
785 .expect("capacity overflow");
786 if new_capacity > self.capacity {
787 self.grow(new_capacity);
788 }
789 }
790
791 /// Shrinks the capacity of the `RingBuffer` with a lower bound.
792 ///
793 /// The capacity will remain at least as large as the maximum of the length
794 /// and the supplied value.
795 ///
796 /// Does nothing if the current capacity is smaller than the supplied
797 /// minimum capacity.
798 ///
799 /// Also only shrinks if no element position needs to change as to preserve the indices of the
800 /// allocated elements.
801 ///
802 /// # Example
803 ///
804 /// ```
805 /// # use ring_buffer::RingBuffer;
806 /// #
807 /// let mut buffer = RingBuffer::with_capacity(15);
808 /// buffer.extend(0..4);
809 /// assert!(buffer.capacity() >= 15);
810 /// assert!(!(buffer.capacity() < 6));
811 /// buffer.shrink_to(6);
812 /// assert!(buffer.capacity() >= 6);
813 /// assert!(!(buffer.capacity() < 4));
814 /// buffer.shrink_to(0);
815 /// assert!(buffer.capacity() >= 4);
816 /// ```
817 pub fn shrink_to(&mut self, min_capacity: usize) {
818 let new_capacity = max(
819 2,
820 max(
821 min_capacity.next_power_of_two(),
822 self.len.next_power_of_two(),
823 ),
824 );
825 if (new_capacity < self.capacity)
826 & !self.wraps_around()
827 & !self.is_empty()
828 & (self.head != 0)
829 & (self.head <= new_capacity)
830 {
831 unsafe {
832 let layout = Layout::from_size_align_unchecked(
833 new_capacity * size_of::<T>(),
834 align_of::<T>(),
835 );
836 let new_ptr: *mut T = alloc(layout) as *mut T;
837 if new_ptr.is_null() {
838 handle_alloc_error(layout);
839 }
840
841 self.data
842 .as_ptr()
843 .add(self.tail)
844 .copy_to(new_ptr.add(self.tail), self.len);
845
846 dealloc(
847 self.data.as_ptr() as *mut u8,
848 Layout::from_size_align(self.capacity * size_of::<T>(), align_of::<T>())
849 .unwrap(),
850 );
851
852 self.data = NonNull::new_unchecked(new_ptr);
853 }
854
855 self.moved_value = self
856 .moved_value
857 .iter()
858 .copied()
859 .take(new_capacity)
860 .collect();
861 self.capacity = new_capacity;
862 self.head &= self.mask();
863 self.tail &= self.mask();
864 }
865 }
866
867 /// Shrinks the capacity of the `RingBuffer` as much as possible.
868 ///
869 /// It will drop down as close as possible to the length but the allocator may still inform the
870 /// `RingBuffer` that there is space for a few more elements.
871 ///
872 /// # Example
873 ///
874 /// ```
875 /// # use ring_buffer::RingBuffer;
876 /// #
877 /// let mut buffer = RingBuffer::with_capacity(15);
878 /// buffer.extend(0..4);
879 /// assert!(buffer.capacity() >= 15);
880 /// buffer.shrink_to_fit();
881 /// assert!(buffer.capacity() >= 4);
882 /// ```
883 pub fn shrink_to_fit(&mut self) {
884 self.shrink_to(0);
885 }
886
887 /// Swaps elements at relative indices `i` and `j`.
888 ///
889 /// `i` and `j` may be equal.
890 ///
891 /// Element at index 0 is the front of the queue.
892 ///
893 /// # Panics
894 ///
895 /// Panics if either index is out of bounds.
896 ///
897 /// # Example
898 ///
899 /// ```
900 /// # use ring_buffer::RingBuffer;
901 /// #
902 /// let mut buffer = RingBuffer::new();
903 /// buffer.push_back(3);
904 /// buffer.push_back(4);
905 /// buffer.push_back(5);
906 /// buffer.push_back(6);
907 /// buffer.push_back(7);
908 ///
909 /// assert_eq!(buffer.get_relative(0), Some(&3));
910 /// assert_eq!(buffer.get_relative(2), Some(&5));
911 ///
912 /// buffer.swap_relative(0, 2);
913 ///
914 /// assert_eq!(buffer.get_relative(0), Some(&5));
915 /// assert_eq!(buffer.get_relative(2), Some(&3));
916 /// ```
917 pub fn swap_relative(&mut self, i: usize, j: usize) {
918 assert!(
919 (i < self.len) & (j < self.len),
920 "At least one index is out of bounds, i is {}, j is {}, length of buffer is {}",
921 i,
922 j,
923 self.len
924 );
925 unsafe {
926 self.get_relative_pointer(i)
927 .swap(self.get_relative_pointer(j));
928 }
929 }
930
931 /// Swaps elements at absolute indices `i` and `j`.
932 ///
933 /// `i` and `j` may be equal.
934 ///
935 /// This is designed to be used in conjunction with [`push_back`].
936 ///
937 /// [`push_back`]: Self::push_back
938 ///
939 /// # Panics
940 ///
941 /// Panics if either index is out of bounds.
942 ///
943 /// # Example
944 ///
945 /// ```
946 /// # use ring_buffer::RingBuffer;
947 /// #
948 /// let mut buffer = RingBuffer::new();
949 /// buffer.push_back(3);
950 /// let first = buffer.push_back(4);
951 /// buffer.push_back(5);
952 /// let second = buffer.push_back(6);
953 /// buffer.push_back(7);
954 ///
955 /// assert_eq!(buffer.get_absolute(first), Some(&4));
956 /// assert_eq!(buffer.get_absolute(second), Some(&6));
957 ///
958 /// buffer.swap_absolute(first, second);
959 ///
960 /// assert_eq!(buffer.get_absolute(first), Some(&6));
961 /// assert_eq!(buffer.get_absolute(second), Some(&4));
962 /// ```
963 pub fn swap_absolute(&mut self, i: RingBufferIndex, j: RingBufferIndex) {
964 assert!(
965 self.is_index_in_range(i) & self.is_index_in_range(j),
966 "At least one index is out of bounds, i is {}, j is {}, buffer is from {} to {}",
967 self.resolve_index(i),
968 self.resolve_index(j),
969 self.tail,
970 self.head
971 );
972 unsafe {
973 self.data
974 .as_ptr()
975 .add(self.resolve_index(i))
976 .swap(self.data.as_ptr().add(self.resolve_index(j)));
977 }
978 }
979
980 /// Shortens the `RingBuffer`, dropping excess elements from the back.
981 ///
982 /// If `len` is greater than the `RingBuffer`'s current length, this has no
983 /// effect.
984 ///
985 /// # Example
986 ///
987 /// ```
988 /// # use ring_buffer::RingBuffer;
989 /// #
990 /// let mut buffer = RingBuffer::new();
991 /// buffer.push_back(5);
992 /// buffer.push_back(10);
993 /// buffer.push_back(15);
994 /// assert_eq!(buffer.len(), 3);
995 /// buffer.truncate(1);
996 /// assert_eq!(buffer.len(), 1);
997 /// assert_eq!(buffer.get_relative(0), Some(&15));
998 /// ```
999 pub fn truncate(&mut self, len: usize) {
1000 for _ in len..self.len {
1001 self.pop_front();
1002 }
1003 }
1004
1005 // private functions
1006 /// Tests if the given address is in the valid
1007 #[inline]
1008 fn is_index_in_range(&self, index: RingBufferIndex) -> bool {
1009 let base_index = self.resolve_index(index);
1010 if self.wraps_around() {
1011 base_index.wrapping_sub(self.head) >= (self.capacity - self.len)
1012 } else {
1013 base_index.wrapping_sub(self.tail) < self.len
1014 }
1015 }
1016
1017 /// Expands the container to a larger one and copies the data. Unwraps the `RingBuffer` if it
1018 /// was wrapped before.
1019 fn grow(&mut self, new_capacity: usize) {
1020 let mut moved_value = vec![0; (new_capacity + u64::BITS as usize - 1) / u64::BITS as usize];
1021 moved_value[..(self.capacity + u64::BITS as usize - 1) / u64::BITS as usize]
1022 .copy_from_slice(&self.moved_value[..]);
1023
1024 unsafe {
1025 assert_ne!(size_of::<T>(), 0, "capacity overflow");
1026 assert!(new_capacity.is_power_of_two());
1027
1028 let layout =
1029 Layout::from_size_align_unchecked(new_capacity * size_of::<T>(), align_of::<T>());
1030 let new_ptr: *mut T = alloc(layout) as *mut T;
1031 if new_ptr.is_null() {
1032 handle_alloc_error(layout);
1033 }
1034
1035 if self.wraps_around() {
1036 self.data
1037 .as_ptr()
1038 .add(self.tail)
1039 .copy_to(new_ptr.add(self.tail), self.capacity - self.tail);
1040 self.data
1041 .as_ptr()
1042 .copy_to(new_ptr.add(self.capacity), self.head);
1043 self.head += self.capacity;
1044
1045 for i in self.capacity..self.head {
1046 let check_bit = 1 << (i & (u64::BITS as usize - 1));
1047 moved_value[i / u64::BITS as usize] |= check_bit;
1048 }
1049 } else if !self.is_empty() {
1050 self.data
1051 .as_ptr()
1052 .add(self.tail)
1053 .copy_to(new_ptr.add(self.tail), self.len);
1054 }
1055
1056 dealloc(
1057 self.data.as_ptr() as *mut u8,
1058 Layout::from_size_align(self.capacity * size_of::<T>(), align_of::<T>()).unwrap(),
1059 );
1060
1061 self.data = NonNull::new_unchecked(new_ptr);
1062 }
1063
1064 self.capacity = new_capacity;
1065 self.moved_value = moved_value.into();
1066 }
1067
1068 /// A more convenient way to access a relative addressed element. Public access method only
1069 /// need to add their sugar.
1070 #[inline]
1071 unsafe fn get_relative_pointer(&self, index: usize) -> *mut T {
1072 self.data
1073 .as_ptr()
1074 .add(self.tail.wrapping_add(index) & self.mask())
1075 }
1076
1077 /// The name conveys the meaning better than its content.
1078 #[inline]
1079 fn mask(&self) -> usize {
1080 self.capacity - 1
1081 }
1082
1083 /// Converts a [`RingBufferIndex`] into its effective index.
1084 #[inline]
1085 fn resolve_index(&self, index: RingBufferIndex) -> usize {
1086 if index.wrap_offset < self.capacity {
1087 let check_index = index.base_index + index.wrap_offset;
1088 let check_bit = 1 << (check_index as u64 & (u64::BITS as u64 - 1));
1089 let apply_offset = (self.moved_value[check_index / u64::BITS as usize] & check_bit) > 0;
1090 index.base_index + index.wrap_offset * apply_offset as usize
1091 } else {
1092 index.base_index
1093 }
1094 }
1095
1096 /// If the container wraps around.
1097 #[inline]
1098 fn wraps_around(&self) -> bool {
1099 !self.is_empty() & (self.head <= self.tail)
1100 }
1101}
1102
1103impl RingBufferIndex {
1104 /// Uses the buffer to resolve any aliasing that can occur because of a growing buffer and wrap-
1105 /// around.
1106 #[inline]
1107 pub fn eq<T>(self, buffer: &RingBuffer<T>, other: Self) -> bool {
1108 buffer.resolve_index(self) == buffer.resolve_index(other)
1109 }
1110}
1111
1112impl<T> Clone for RingBuffer<T>
1113where
1114 T: Clone,
1115{
1116 fn clone(&self) -> RingBuffer<T> {
1117 self.iter().cloned().collect()
1118 }
1119}
1120
1121impl<T> Debug for RingBuffer<T>
1122where
1123 T: Debug,
1124{
1125 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1126 f.debug_list().entries(self).finish()
1127 }
1128}
1129
1130impl<T> Default for RingBuffer<T> {
1131 #[inline]
1132 fn default() -> RingBuffer<T> {
1133 RingBuffer::new()
1134 }
1135}
1136
1137impl<T> Drop for RingBuffer<T> {
1138 fn drop(&mut self) {
1139 while !self.is_empty() {
1140 self.pop_front();
1141 }
1142
1143 if size_of::<T>() > 0 {
1144 unsafe {
1145 dealloc(
1146 self.data.as_ptr() as *mut u8,
1147 Layout::from_size_align_unchecked(
1148 self.capacity * size_of::<T>(),
1149 align_of::<T>(),
1150 ),
1151 )
1152 }
1153 }
1154 }
1155}
1156
1157impl<T> Eq for RingBuffer<T> where T: Eq {}
1158
1159impl<T> Extend<T> for RingBuffer<T> {
1160 fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
1161 for elem in iter.into_iter() {
1162 self.push_back(elem);
1163 }
1164 }
1165}
1166
1167impl<'a, T> Extend<&'a T> for RingBuffer<T>
1168where
1169 T: 'a + Copy,
1170{
1171 fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
1172 self.extend(iter.into_iter().cloned());
1173 }
1174}
1175
1176impl<T> From<Vec<T>> for RingBuffer<T> {
1177 // This can be optimized to avoid all the copying
1178 fn from(other: Vec<T>) -> Self {
1179 let mut buffer = RingBuffer::with_capacity(other.len());
1180 for elem in other.into_iter() {
1181 buffer.push_back(elem);
1182 }
1183 buffer
1184 }
1185}
1186
1187impl<T> Hash for RingBuffer<T>
1188where
1189 T: Hash,
1190{
1191 fn hash<H: Hasher>(&self, state: &mut H) {
1192 self.len.hash(state);
1193 self.head.hash(state);
1194 self.tail.hash(state);
1195 let (a, b) = self.as_slices();
1196 Hash::hash_slice(a, state);
1197 Hash::hash_slice(b, state);
1198 }
1199}
1200
1201impl<T> From<RingBuffer<T>> for VecDeque<T> {
1202 // This can be optimized to avoid all the copying
1203 fn from(buffer: RingBuffer<T>) -> Self {
1204 let mut vec = VecDeque::<T>::with_capacity(buffer.len());
1205 for elem in buffer.into_iter() {
1206 vec.push_back(elem);
1207 }
1208 vec
1209 }
1210}
1211
1212impl<T> FromIterator<T> for RingBuffer<T> {
1213 fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> RingBuffer<T> {
1214 let iterator = iter.into_iter();
1215 let (bound, _) = iterator.size_hint();
1216 let mut ring_buffer = RingBuffer::<T>::with_capacity(bound);
1217 ring_buffer.extend(iterator);
1218 ring_buffer
1219 }
1220}
1221
1222impl<T> Index<RingBufferIndex> for RingBuffer<T> {
1223 type Output = T;
1224
1225 #[inline]
1226 fn index(&self, index: RingBufferIndex) -> &T {
1227 self.get_absolute(index).expect("Out of bounds access")
1228 }
1229}
1230
1231impl<T> IndexMut<RingBufferIndex> for RingBuffer<T> {
1232 #[inline]
1233 fn index_mut(&mut self, index: RingBufferIndex) -> &mut T {
1234 self.get_absolute_mut(index).expect("Out of bounds access")
1235 }
1236}
1237
1238impl<T> IntoIterator for RingBuffer<T> {
1239 type Item = T;
1240 type IntoIter = IntoIter<T>;
1241
1242 /// Consumes the `RingBuffer` into a front-to-back iterator yielding elements by
1243 /// value.
1244 fn into_iter(self) -> Self::IntoIter {
1245 IntoIter { inner: self }
1246 }
1247}
1248
1249impl<'a, T> IntoIterator for &'a RingBuffer<T> {
1250 type Item = &'a T;
1251 type IntoIter = Iter<'a, T>;
1252
1253 fn into_iter(self) -> Self::IntoIter {
1254 self.iter()
1255 }
1256}
1257
1258impl<'a, T> IntoIterator for &'a mut RingBuffer<T> {
1259 type Item = &'a mut T;
1260 type IntoIter = IterMut<'a, T>;
1261
1262 fn into_iter(self) -> Self::IntoIter {
1263 self.iter_mut()
1264 }
1265}
1266
1267impl<T> Ord for RingBuffer<T>
1268where
1269 T: Ord,
1270{
1271 fn cmp(&self, other: &RingBuffer<T>) -> Ordering {
1272 self.iter().cmp(other.iter())
1273 }
1274}
1275
1276impl<T> PartialEq<RingBuffer<T>> for RingBuffer<T>
1277where
1278 T: PartialEq,
1279{
1280 fn eq(&self, other: &RingBuffer<T>) -> bool {
1281 if (self.len == other.len) & (self.tail == other.tail) & (self.head == other.head) {
1282 let mut other_iter = other.iter();
1283 for elem in self.iter() {
1284 if elem != other_iter.next().unwrap() {
1285 return false;
1286 }
1287 }
1288 true
1289 } else {
1290 false
1291 }
1292 }
1293}
1294
1295impl<T> PartialOrd<RingBuffer<T>> for RingBuffer<T>
1296where
1297 T: PartialOrd,
1298{
1299 fn partial_cmp(&self, other: &RingBuffer<T>) -> Option<Ordering> {
1300 self.iter().partial_cmp(other.iter())
1301 }
1302}
1303
1304unsafe impl<T> Send for RingBuffer<T> where T: Send {}
1305unsafe impl<T> Sync for RingBuffer<T> where T: Sync {}
1306
1307/// An iterator over the elements of a `RingBuffer`.
1308///
1309/// This `struct` is created by the [`iter`] method on [`RingBuffer`]. See its
1310/// documentation for more.
1311///
1312/// [`iter`]: crate::RingBuffer::iter
1313/// [`RingBuffer`]: crate::RingBuffer
1314pub struct Iter<'a, T: 'a> {
1315 buffer: &'a [T],
1316 len: usize,
1317 index: usize,
1318}
1319
1320impl<T> Iter<'_, T> {
1321 fn as_slices(&self) -> (&[T], &[T]) {
1322 if (self.index + self.len) > self.buffer.len() {
1323 let remaining_len = self.index + self.len - self.buffer.len();
1324 (&self.buffer[self.index..], &self.buffer[..remaining_len])
1325 } else {
1326 (&self.buffer[self.index..self.index + self.len], &[])
1327 }
1328 }
1329}
1330
1331impl<T> Clone for Iter<'_, T> {
1332 fn clone(&self) -> Self {
1333 Iter {
1334 buffer: self.buffer,
1335 len: self.len,
1336 index: self.index,
1337 }
1338 }
1339}
1340
1341impl<T> Debug for Iter<'_, T>
1342where
1343 T: Debug,
1344{
1345 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1346 let (front, back) = self.as_slices();
1347 f.debug_tuple("Iter").field(&front).field(&back).finish()
1348 }
1349}
1350
1351impl<'a, T> Iterator for Iter<'a, T>
1352where
1353 T: 'a,
1354{
1355 type Item = &'a T;
1356
1357 #[inline]
1358 fn next(&mut self) -> Option<Self::Item> {
1359 if self.len > 0 {
1360 let current_index = self.index;
1361 self.index = (self.index + 1) & (self.buffer.len() - 1);
1362 self.len -= 1;
1363
1364 unsafe { Some(self.buffer.get_unchecked(current_index)) }
1365 } else {
1366 None
1367 }
1368 }
1369
1370 #[inline]
1371 fn size_hint(&self) -> (usize, Option<usize>) {
1372 (self.len, Some(self.len))
1373 }
1374}
1375
1376impl<T> ExactSizeIterator for Iter<'_, T> {}
1377
1378impl<T> FusedIterator for Iter<'_, T> {}
1379
1380/// A mutable iterator over the elements of a `RingBuffer`.
1381///
1382/// This `struct` is created by the [`iter_mut`] method on [`RingBuffer`]. See its
1383/// documentation for more.
1384///
1385/// [`iter_mut`]: crate::RingBuffer::iter_mut
1386/// [`RingBuffer`]: crate::RingBuffer
1387pub struct IterMut<'a, T: 'a> {
1388 buffer: &'a mut [T],
1389 len: usize,
1390 index: usize,
1391}
1392
1393impl<T> IterMut<'_, T> {
1394 fn as_slices(&self) -> (&[T], &[T]) {
1395 if (self.index + self.len) > self.buffer.len() {
1396 let remaining_len = self.index + self.len - self.buffer.len();
1397 (&self.buffer[self.index..], &self.buffer[..remaining_len])
1398 } else {
1399 (&self.buffer[self.index..self.index + self.len], &[])
1400 }
1401 }
1402}
1403
1404impl<T> Debug for IterMut<'_, T>
1405where
1406 T: Debug,
1407{
1408 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1409 let (front, back) = self.as_slices();
1410 f.debug_tuple("IterMut").field(&front).field(&back).finish()
1411 }
1412}
1413
1414impl<'a, T> Iterator for IterMut<'a, T>
1415where
1416 T: 'a,
1417{
1418 type Item = &'a mut T;
1419
1420 #[inline]
1421 fn next(&mut self) -> Option<Self::Item> {
1422 if self.len > 0 {
1423 let current_index = self.index;
1424 self.index = (self.index + 1) & (self.buffer.len() - 1);
1425 self.len -= 1;
1426
1427 unsafe {
1428 let elem = self.buffer.get_unchecked_mut(current_index);
1429 // and now for some black magic
1430 // the std stuff does this too, but afaik this breaks the borrow checker
1431 Some(&mut *(elem as *mut T))
1432 }
1433 } else {
1434 None
1435 }
1436 }
1437
1438 #[inline]
1439 fn size_hint(&self) -> (usize, Option<usize>) {
1440 (self.len, Some(self.len))
1441 }
1442}
1443
1444impl<T> ExactSizeIterator for IterMut<'_, T> {}
1445
1446impl<T> FusedIterator for IterMut<'_, T> {}
1447
1448/// An owning iterator over the elements of a `RingBuffer`.
1449///
1450/// This `struct` is created by the [`into_iter`] method on [`RingBuffer`]. See its
1451/// documentation for more.
1452///
1453/// [`into_iter`]: crate::RingBuffer::into_iter
1454/// [`RingBuffer`]: crate::RingBuffer
1455#[derive(Clone)]
1456pub struct IntoIter<T> {
1457 inner: RingBuffer<T>,
1458}
1459
1460impl<T> Debug for IntoIter<T>
1461where
1462 T: Debug,
1463{
1464 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1465 let (front, back) = self.inner.as_slices();
1466 f.debug_tuple("OwnedIter")
1467 .field(&front)
1468 .field(&back)
1469 .finish()
1470 }
1471}
1472
1473impl<T> Iterator for IntoIter<T> {
1474 type Item = T;
1475
1476 #[inline]
1477 fn next(&mut self) -> Option<Self::Item> {
1478 self.inner.pop_front()
1479 }
1480
1481 #[inline]
1482 fn size_hint(&self) -> (usize, Option<usize>) {
1483 (self.inner.len, Some(self.inner.len))
1484 }
1485}
1486
1487impl<T> ExactSizeIterator for IntoIter<T> {}
1488
1489impl<T> FusedIterator for IntoIter<T> {}
1490
1491#[cfg(test)]
1492mod tests {
1493 use crate::{RingBuffer, RingBufferIndex};
1494
1495 #[test]
1496 fn access_and_reallocations() {
1497 let mut buffer = RingBuffer::<usize>::with_capacity(2);
1498 let mut indices = Vec::with_capacity(8);
1499 assert_eq!(buffer.capacity, 2, "Initialised with too much capacity");
1500
1501 // Phase 1: Filling the buffer and trigger extensions
1502 for i in 1..=8 {
1503 indices.push(buffer.push_back(i));
1504 }
1505 assert!(indices[0].eq(
1506 &buffer,
1507 RingBufferIndex {
1508 base_index: 0,
1509 wrap_offset: 0
1510 }
1511 ));
1512 assert!(indices[7].eq(
1513 &buffer,
1514 RingBufferIndex {
1515 base_index: 7,
1516 wrap_offset: 0
1517 }
1518 ));
1519 assert_eq!(buffer.capacity, 8);
1520 assert_eq!(buffer.as_slices(), (&[1, 2, 3, 4, 5, 6, 7, 8][..], &[][..]));
1521 for i in 0..8 {
1522 assert_eq!(buffer.get_relative(i), Some(&(i + 1)));
1523 assert_eq!(buffer.get_absolute(indices[i]), Some(&(i + 1)));
1524 }
1525
1526 // Phase 2: Wrap around and trigger extension
1527 indices.clear();
1528 for i in 1..=4 {
1529 indices.push(RingBufferIndex {
1530 base_index: i + 3,
1531 wrap_offset: 0,
1532 });
1533 assert_eq!(buffer.pop_front(), Some(i));
1534 }
1535
1536 for i in 9..=12 {
1537 indices.push(buffer.push_back(i));
1538 }
1539 assert!(indices[0].eq(
1540 &buffer,
1541 RingBufferIndex {
1542 base_index: 4,
1543 wrap_offset: 0
1544 }
1545 ));
1546 assert!(indices[3].eq(
1547 &buffer,
1548 RingBufferIndex {
1549 base_index: 7,
1550 wrap_offset: 0
1551 }
1552 ));
1553 assert!(indices[4].eq(
1554 &buffer,
1555 RingBufferIndex {
1556 base_index: 0,
1557 wrap_offset: 8
1558 }
1559 ));
1560 assert!(indices[7].eq(
1561 &buffer,
1562 RingBufferIndex {
1563 base_index: 3,
1564 wrap_offset: 8
1565 }
1566 ));
1567 assert_eq!(buffer.capacity, 8);
1568 assert_eq!(
1569 buffer.as_slices(),
1570 (&[5, 6, 7, 8][..], &[9, 10, 11, 12][..])
1571 );
1572
1573 for i in 13..=16 {
1574 indices.push(buffer.push_back(i));
1575 }
1576 assert!(indices[8].eq(
1577 &buffer,
1578 RingBufferIndex {
1579 base_index: 12,
1580 wrap_offset: 0
1581 }
1582 ));
1583 assert!(indices[11].eq(
1584 &buffer,
1585 RingBufferIndex {
1586 base_index: 15,
1587 wrap_offset: 0
1588 }
1589 ));
1590 assert_eq!(buffer.capacity, 16);
1591 assert_eq!(
1592 buffer.as_slices(),
1593 (&[5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16][..], &[][..])
1594 );
1595
1596 for i in 0..12 {
1597 assert_eq!(buffer.get_relative(i), Some(&(i + 5)));
1598 assert_eq!(buffer.get_absolute(indices[i]), Some(&(i + 5)));
1599 }
1600 }
1601}