1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
//! The `simple_ringbuf` crate provides a lightweight (no dependency) ring buffer backed with
//! a hand-memory managed buffer (with `unsafe`). Serialization support is optionally available
//! with the `serde` feature flag.
//!
//! The sole feature of this create is the [`RingBuffer`] struct, which is a fixed-sized collection
//! with an API somewhat similar to standard Rust collections.
//!
//! The primary intention of this crate is to provide a cheap *fixed-sized* collection for
//! buffering a finite horizon of data. The use case this was developed for was an action history
//! log for a bot, and could be used for similar concepts like undo logs. In fact, the iterator
//! and Index implementations for this struct assume you want to iterate from the "top" of the deque
//! (newest to oldest), and doesn't allow mutation of elements at this time (but does allow
//! push/popping from both ends).
//!
//! Most existing ring buffers for Rust such as the [`ringbuf`] crate, or the standard library's
//! own [`VecDeque`] are specialized for different uses and may fit your needs better.
//!
//! [`RingBuffer`]: struct.RingBuffer.html
//! [`ringbuf`]: https://crates.io/crates/ringbuf
//! [`VecDeque`]: https://doc.rust-lang.org/std/collections/struct.VecDeque.html

mod impls;
pub mod iter;
mod raw;

use iter::RingBufIter;
use raw::RawRingBuffer;

use std::iter::{ExactSizeIterator, IntoIterator};
use std::ptr;

/// The side of the ring buffer we're operating on
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
enum Side {
    Beginning,
    End,
}

/// A fixed-sized collection meant to hold a limited horizon of generic data.
/// When filled to capacity, older data will be overwritten.
///
/// Indexing and iterating over this collection will start from the *most recent*
/// element (assuming you primarily use [`push`],
/// however all iterators implement [`ExactSizeIterator`] and
/// [`DoubleEndedIterator`], so they can be trivially traversed backwards with
/// the [`rev`] method.
///
/// Be aware that the [`Eq`] and [`PartialEq`] implementations for this struct do not care
/// about buffer *rotation* (where in the ring a sequence starts), but they *do* check if two
/// buffers have the same capacity. So two buffers with the sequence `[1,2,3]` may not compare
/// as equal if one can hold 10 elements and one can hold 20. If you want to compare these, a
/// convenience method called [`elem_equal`] is provided.
///
///
/// [`DoubleEndedIterator`]: https://doc.rust-lang.org/std/iter/trait.DoubleEndedIterator.html
/// [`ExactSizeIterator`]: https://doc.rust-lang.org/std/iter/trait.ExactSizeIterator.html
/// [`rev`]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.rev
/// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html
/// [`PartialEq`]: https://doc.rust-lang.org/std/cmp/trait.PartialEq.html
/// [`elem_equal`]: struct.RingBuffer.html#method.elem_equal
/// [`push`]: struct.RingBuffer.html#method.elem_equal
pub struct RingBuffer<T> {
    buf: RawRingBuffer<T>,
    len: usize,
    cap: usize,
    // different from internal cap for ZSTs
    begin: usize,
    end: usize,
}

impl<T> RingBuffer<T> {
    /// Returns a pointer to the head of the raw backing
    /// buffer
    fn ptr(&self) -> *mut T {
        self.buf.ptr.as_ptr()
    }

    /// The capacity of the buffer, this is the most
    /// data this buffer can store before it starts
    /// overwriting older entries.
    pub fn cap(&self) -> usize {
        self.cap
    }

    /// The length of the buffer. This is the number
    /// of elements currently stores. When `len` and [`cap`]
    /// are equal, the buffer will begin overwriting older
    /// entries.
    ///
    /// [`cap`]: struct.RingBuffer.html#method.cap
    pub fn len(&self) -> usize {
        self.len
    }

    /// Creates a new [`RingBuffer`] at the given capacity,
    /// this buffer will not grow without explicit calls to
    /// [`resize`].
    ///
    /// [`RingBuffer`]: struct.RingBuffer.html
    /// [`resize`]: struct.RingBuffer.html#method.resize
    pub fn new(cap: usize) -> Self {
        RingBuffer {
            buf: RawRingBuffer::new(cap),
            cap,
            len: 0,
            begin: 0,
            end: cap - 1,
        }
    }

    /// Creates a new [`RingBuffer`] that *exactly* fits
    /// the data in the provided iterator. It must have an exact
    /// size or we cannot intuit the capacity.
    ///
    /// ```rust
    /// use simple_ringbuf::RingBuffer;;
    ///
    /// let buf = RingBuffer::from_exact_size_iter(vec![1,2,3]);
    /// assert!(buf.is_justified());
    ///
    /// let mut cmp = RingBuffer::new(3);
    /// cmp.push(1);
    /// cmp.push(2);
    /// cmp.push(3);
    ///
    /// assert_eq!(buf, cmp);
    /// ```
    ///
    /// [`RingBuffer`]: struct.RingBuffer.html
    pub fn from_exact_size_iter<I, U>(iter: I) -> Self
    where
        I: IntoIterator<Item = T, IntoIter = U>,
        U: ExactSizeIterator<Item = T>,
    {
        let iter = iter.into_iter();
        let cap = iter.len();
        let mut me = Self::new(cap);

        for el in iter {
            me.push(el);
        }

        me
    }

    /// Creates a new [`RingBuffer`] with capacity `cap` that fills itself with
    /// at *most* `cap` elements from the provided iterator. If any of the iterator is
    /// left over at the end, an iterator yielding those elements will be returned.
    ///
    /// ```rust
    /// use simple_ringbuf::RingBuffer;;
    ///
    /// let (buf, rem) = RingBuffer::from_iter_cap(vec![1,2,3], 2);
    /// assert!(buf.is_justified());
    ///
    /// let mut cmp = RingBuffer::new(2);
    /// cmp.push(1);
    /// cmp.push(2);
    ///
    /// assert_eq!(buf, cmp);
    ///
    /// assert!(rem.is_some());
    /// let rem = rem.unwrap();
    /// assert_eq!(vec![3], rem.collect::<Vec<_>>());
    /// ```
    ///
    /// [`RingBuffer`]: struct.RingBuffer.html
    pub fn from_iter_cap<I>(iter: I, cap: usize) -> (Self, Option<impl Iterator<Item = T>>)
    where
        I: IntoIterator<Item = T>,
    {
        let mut iter = iter.into_iter();
        let mut me = Self::new(cap);

        for el in iter.by_ref().take(cap) {
            me.push(el);
        }

        let next = iter.next();
        let remainder = if next.is_some() {
            Some(next.into_iter().chain(iter))
        } else {
            None
        };

        (me, remainder)
    }

    /// Pushes an element onto the collection. This is intended to be
    /// the primary means of using this as a log/undo buffer.
    ///
    /// If the buffer is full, this will overwrite the element at `buf[len-1]`, which is the "oldest"
    /// element if you generally use the collection as designed.
    ///
    /// If this method overwrites an existing element it will return it, otherwise it will
    /// return [`None`].
    ///
    /// ```rust
    /// use simple_ringbuf::RingBuffer;;
    ///
    /// let mut buf = RingBuffer::new(3);
    ///
    /// buf.push(1);
    /// buf.push(2);
    /// buf.push(3);
    ///
    /// assert_eq!(buf[0], 3);
    /// assert_eq!(buf[1], 2);
    /// assert_eq!(buf[2], 1);
    ///
    /// let el = buf.push(10);
    /// assert_eq!(el, Some(1));
    /// assert_eq!(buf[0], 10);
    /// assert_eq!(buf[1], 3);
    /// assert_eq!(buf[2], 2);
    /// ```
    ///
    /// [`push`]: struct.RingBuffer.html#method.push
    /// [`None`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.None
    pub fn push(&mut self, elem: T) -> Option<T> {
        if self.is_full() {
            unsafe {
                // We're full, so overwrite the oldest member (buffer's current beginning)
                let offset = self.ptr().add(self.begin);

                let prev_val = ptr::read(offset);
                ptr::write(offset, elem);
                self.rotate(1, Side::End);

                Some(prev_val)
            }
        } else {
            let offset = (self.end + 1) % self.cap;
            unsafe {
                let offset = self.ptr().add(offset);
                ptr::write(offset, elem);
            }

            self.grow(1, Side::End);
            None
        }
    }

    /// Pushes an element as if it were the first element added. If the buffer
    /// is full, this will overwrite the element at `buf[0]`, which is the most
    /// recently added element (if you use the collection as designed defaulting to
    /// [`push`] in most cases).
    ///
    /// This is not intended to be the primary method of adding data, but rather reinserting
    /// an element you've removed or otherwise rewinding history.
    ///
    /// If this method overwrites an existing element, it will return it. Otherwise it will
    /// return [`None`].
    ///
    /// ```rust
    /// use simple_ringbuf::RingBuffer;;
    ///
    /// let mut buf = RingBuffer::new(3);
    ///
    /// buf.push(1);
    /// buf.push(2);
    /// buf.push(3);
    ///
    /// assert_eq!(buf[0], 3);
    /// assert_eq!(buf[1], 2);
    /// assert_eq!(buf[2], 1);
    ///
    /// let el = buf.push(10);
    /// assert!(el.is_some());
    /// let el = el.unwrap();
    ///
    /// let el = buf.push_back(el);
    /// assert_eq!(el, Some(10));
    /// assert_eq!(buf[0], 3);
    /// assert_eq!(buf[1], 2);
    /// assert_eq!(buf[2], 1);
    /// ```
    /// [`push`]: struct.RingBuffer.html#method.push
    /// [`None`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.None
    pub fn push_back(&mut self, elem: T) -> Option<T> {
        if self.is_full() {
            unsafe {
                let offset = self.ptr().add(self.end);

                let prev_val = ptr::read(offset);
                ptr::write(offset, elem);
                self.rotate(1, Side::Beginning);

                Some(prev_val)
            }
        } else {
            let offset = sub_rem(self.begin, 1, self.cap);

            unsafe {
                let offset = self.ptr().add(offset);
                ptr::write(offset, elem);
            }

            self.grow(1, Side::Beginning);
            None
        }
    }

    /// Removes the most recently added element from the collection and
    /// returns it, shrinking the buffer from the end.
    ///
    /// Note that if you've been using [`push_back`] this won't
    /// be the most recently added element per se, but rather the element at
    /// the [`end`] of the collection (aka at `buf[0]`).
    ///
    /// ```rust
    /// use simple_ringbuf::RingBuffer;
    ///
    /// let mut buf = RingBuffer::new(5);
    /// buf.push(5);
    /// buf.push(10);
    /// buf.push(20);
    ///
    /// assert_eq!(buf.pop(), Some(20));
    /// assert_eq!(buf.len(), 2);
    /// ```
    /// [`push_back`]: struct.RingBuffer.html#method.push_back
    /// [`end`]: struct.RingBuffer.html#method.end
    pub fn pop(&mut self) -> Option<T> {
        if self.is_empty() {
            return None;
        }

        let elem = unsafe {
            let offset = self.ptr().add(self.end);
            ptr::read(offset)
        };

        self.shrink(1, Side::End);

        Some(elem)
    }

    /// Removes the oldest element from the collection and returns it,
    /// shrinking the buffer from the beginning.
    ///
    /// Note that if you've been using [`push_back`] this won't be the oldest
    /// element per se, but rather the element at the [`beginning`] (`buf[len-1]`).
    ///
    /// ```rust
    /// use simple_ringbuf::RingBuffer;
    ///
    /// let mut buf = RingBuffer::new(5);
    /// buf.push(5);
    /// buf.push(10);
    /// buf.push(20);
    ///
    /// assert_eq!(buf.pop_oldest(), Some(5));
    /// assert_eq!(buf.len(), 2);
    /// ```
    ///
    /// [`push_back`]: struct.RingBuffer.html#method.push_back
    /// [`end`]: struct.RingBuffer.html#method.end
    pub fn pop_oldest(&mut self) -> Option<T> {
        if self.is_empty() {
            return None;
        }

        let elem = unsafe {
            let offset = self.ptr().add(self.begin);
            ptr::read(offset)
        };

        self.shrink(1, Side::Beginning);

        Some(elem)
    }

    /// Determines whether the buffer is empty.
    pub fn is_empty(&self) -> bool {
        self.len == 0
    }

    /// Determines whether the buffer is filled to capacity, if this is
    /// true, any subsequent writes will overwrite the oldest element.
    pub fn is_full(&self) -> bool {
        self.len == self.cap
    }

    /// Returns an iterator over this buffer.
    pub fn iter(&self) -> RingBufIter<T> {
        RingBufIter::new(self)
    }

    /// Forces a buffer to be "justified".
    /// This means the first element is at the beginning of
    /// the actual underlying buffer. Once a buffer gets full, or if you frequently use [`pop_oldest`],
    /// this will rarely be true.
    ///
    /// Justifying a buffer is largely useful for internal operations such as [`resize`], but may
    /// be useful if you require the elements to be in a contiguous memory block for some reason.
    ///
    /// ```rust
    /// use simple_ringbuf::RingBuffer;
    ///
    /// let mut buf = RingBuffer::new(5);
    ///
    /// buf.push(19);
    /// buf.push(26);
    /// buf.push(113);
    ///
    /// assert!(buf.is_justified());
    ///
    /// // Buf now has a gap
    /// buf.pop_oldest();
    /// assert!( !buf.is_justified() );
    ///
    /// buf.justify();
    /// assert!(buf.is_justified());
    /// ```
    ///
    /// [`resize`]: struct.RingBuffer.html#method.resize
    /// [`pop_oldest`]: struct.RingBuffer.html#method.pop_oldest
    pub fn justify(&mut self) {
        use std::mem;

        if self.len == 0 || self.begin == 0 {
            return;
        }

        if mem::size_of::<T>() == 0 {
            self.begin = 0;
            self.end = self.len - 1;
            return;
        }

        if self.end >= self.begin {
            unsafe {
                let src = self.buf.ptr.as_ptr().add(self.begin);
                let dst = self.buf.ptr.as_ptr();

                ptr::copy(src, dst, self.len);
                self.begin = 0;
                self.end = self.len - 1;
            }
        } else {
            unsafe {
                let ptr = self.buf.ptr.as_ptr();
                let tmp_buf = RawRingBuffer::new(self.end + 1);
                let tmp_ptr = tmp_buf.ptr.as_ptr();

                ptr::copy_nonoverlapping(ptr, tmp_ptr, tmp_buf.cap);

                let begin_size = self.cap - self.begin;
                let begin = ptr.add(self.begin);

                ptr::copy(begin, ptr, begin_size);

                let end_ptr = ptr.add(begin_size);
                ptr::copy_nonoverlapping(tmp_ptr, end_ptr, tmp_buf.cap);

                self.begin = 0;
                self.end = self.len - 1;
            }
        }
    }

    /// Determines if a buffer is "contiguous" if
    /// the end of the buffer is after the beginning in
    /// the internal memory layout. That is [`beginning`] < [`end`].
    ///
    /// This likely isn't useful in most cases, but if you want to do unsafe hacky things, or
    /// make sure the buffer is contiguous before iterating for, e.g., cache reasons
    /// you may decide to call this before deciding whether to call [`justify`]. (This likely
    /// won't have any appreciable effect on speed, especially while iterating in the default order).
    ///
    /// An empty buffer is always contiguous.
    ///
    /// [`beginning`]: struct.RingBuffer.html#method.beginning
    /// [`end`]: struct.RingBuffer.html#method.end
    /// [`justify`]: struct.RingBuffer.html#method.justify
    pub fn is_contiguous(&self) -> bool {
        self.len == 0 || self.begin <= self.end
    }

    /// Shows where in the internal buffer the current data begins,
    /// not particularly useful but here for convenience. This will return
    /// [`None`] when the buffer is empty. Note that due to how indexing works,
    ///   this is `buf[end]`, `buf[0]` will yield the newest element added.
    ///
    /// [`None`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.None
    pub fn beginning(&self) -> Option<usize> {
        if self.is_empty() {
            None
        } else {
            Some(self.begin)
        }
    }

    /// Shows where in the internal buffer the current data ends,
    /// not particularly useful but here for convenience. This will return
    /// [`None`] when the buffer is empty. Note that due to how indexing works,
    /// this is `buf[0]`, `buf[end]` will yield the oldest element added.
    ///
    /// [`None`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.None
    pub fn end(&self) -> Option<usize> {
        if self.is_empty() {
            None
        } else {
            Some(self.end)
        }
    }

    /// Determines if the buffer is "justified",
    /// this means the first element is at the beginning of
    /// the actual underlying buffer. Once a buffer gets full, or if you frequently use [`pop_oldest`],
    /// this will rarely be true.
    ///
    /// An empty buffer is always justified.
    ///
    /// ```rust
    /// use simple_ringbuf::RingBuffer;
    /// let mut buf = RingBuffer::new(5);
    ///
    /// buf.push(19);
    /// buf.push(26);
    /// buf.push(113);
    ///
    /// assert!(buf.is_justified());
    ///
    /// buf.pop();
    /// assert!(buf.is_justified());
    ///
    /// // Buf now has a gap
    /// buf.pop_oldest();
    /// assert!( !buf.is_justified() );
    ///
    /// // Empty buffer
    /// buf.pop();
    /// assert!(buf.is_justified());
    /// ```
    ///
    /// This is largely useful for internal operations such as [`resize`].
    /// If you *really* need to check for something that will aid, e.g., iteration
    /// speed, or you want to do unsafe hacky things, [`is_contiguous`] is probably
    /// closer to what you want.
    ///
    /// [`resize`]: struct.RingBuffer.html#method.resize
    /// [`pop_oldest`]: struct.RingBuffer.html#method.pop_oldest
    /// [`is_contiguous`]: struct.RingBuffer.html#method.is_contiguous
    pub fn is_justified(&self) -> bool {
        self.begin == 0
    }

    /// Resizes the buffer such that it can hold more or fewer total elements.
    /// This will panic is the capacity underflows the current length.
    ///
    /// The new buffer will be [`justified`].
    ///
    /// ```rust
    /// use simple_ringbuf::RingBuffer;
    ///
    /// let mut buf = RingBuffer::new(5);
    /// buf.push(2);
    /// buf.push(3);
    ///
    /// buf.resize(2);
    /// assert_eq!(buf.cap(), 2);
    ///
    /// buf.pop_oldest();
    /// buf.resize(9);
    ///
    /// assert_eq!(buf.cap(), 9);
    /// assert_eq!(buf.len(), 1);
    /// assert!(buf.is_justified());
    /// ```
    ///
    /// [`justified`]: struct.RingBuffer.html#method.justify
    pub fn resize(&mut self, new_cap: usize) {
        assert!(
            new_cap >= self.len,
            "New capacity {} is smaller than current buffer size {}",
            new_cap,
            self.len
        );

        self.justify();
        self.buf.resize(new_cap);
        self.cap = new_cap;
    }

    pub fn shrink_to_fit(&mut self) {
        self.resize(self.len);
    }

    /// This method is *not* related to [`resize`], instead it shrinks the *internal* length
    /// by some amount, from either size of the buffer, essentially "forgetting" a value
    /// (but in all cases we should drop the element there beforehand).
    ///
    /// [`resize`]: struct.RingBuffer.html#method.resize
    fn shrink(&mut self, n: usize, side: Side) {
        debug_assert!(n > 0, "Shrink by 0 (internal error)");
        debug_assert_ne!(
            self.len, 0,
            "Attempting to shrink 0-size buffer (internal error)"
        );
        debug_assert!(
            n <= self.len,
            "Attempting to shrink more than our current size (internal error)"
        );

        self.len -= n;

        // If we're empty, best to just reset to the top of the buffer
        if self.len == 0 {
            self.end = self.cap - 1;
            self.begin = 0;
        } else {
            match side {
                Side::End => {
                    self.end = sub_rem(self.end, n, self.cap);
                }
                Side::Beginning => self.begin = (self.begin + n) % self.cap,
            }
        }
    }

    /// Circularly calculates backwards from the buffer's end.
    fn offset_backwards(&self, n: usize) -> usize {
        sub_rem(self.end, n, self.cap)
    }

    /// This method is *not* related to [`resize`], instead it grows the *internal* length
    /// by some amount, up until it reaches the capacity.
    ///
    /// [`resize`]: struct.RingBuffer.html#method.resize
    fn grow(&mut self, n: usize, side: Side) {
        debug_assert!(n > 0, "Growth by 0 (internal error)");
        debug_assert!(
            self.len + n <= self.cap,
            "Growing buffer past capacity \
             (this should never appear externally, this means internally calls were not \
             properly split between grow and rotate, file a bug report)"
        );

        match side {
            Side::End => {
                self.end = (self.end + n) % self.cap;
                self.len += n;
            }
            Side::Beginning => {
                self.begin = sub_rem(self.begin, n, self.cap);
                self.len += n;
            }
        }
    }

    /// "Rotation" is an operation that just means we cycle the starting and ending
    /// points of the ring by so many steps to the left or right, without affecting the number of
    /// elements. This particular method is only used when the buffer is full (otherwise
    /// you get uninitialized or dropped values).
    fn rotate(&mut self, n: usize, side: Side) {
        debug_assert!(n > 0, "Rotation of 0 (internal error)");
        debug_assert!(self.is_full(), "Rotation when not full (internal error)");

        match side {
            Side::End => {
                self.end = (self.end + n) % self.cap;
                self.begin = (self.begin + n) % self.cap;
            }
            Side::Beginning => {
                self.begin = sub_rem(self.begin, n, self.cap);
                self.end = sub_rem(self.end, n, self.cap);
            }
        }
    }
}

impl<T> RingBuffer<T>
where
    T: PartialEq,
{
    /// Performs an element-wise comparison between two different-capacity buffers.
    /// If your buffers are the same size you probably just want `==`.
    ///
    /// ```rust
    /// use simple_ringbuf::RingBuffer;
    ///
    /// let mut buf = RingBuffer::new(5);
    /// buf.push(1);
    /// buf.push(2);
    /// buf.push(3);
    ///
    /// let mut buf2 = RingBuffer::new(10);
    /// buf2.push(1);
    /// buf2.push(2);
    /// buf2.push(3);
    ///
    /// assert_ne!(buf, buf2); // Not the same!
    /// assert!(buf.elem_equal(&buf2)); // This works though
    /// ```
    pub fn elem_equal(&self, other: &Self) -> bool {
        self.len == other.len && self.iter().zip(other.iter()).all(|(e1, e2)| e1 == e2)
    }
}

fn sub_rem(n: usize, sub: usize, div: usize) -> usize {
    debug_assert!(n < div, "n ({}) should already be modulo div ({})", n, div);
    debug_assert!(
        sub <= div,
        "sub_rem not meant to be used with sub ({}) > div ({})",
        sub,
        div
    );

    if sub <= n {
        n - sub
    } else {
        div - (sub - n)
    }
}

#[cfg(test)]
mod test {
    #[test]
    fn sub_rem() {
        use super::sub_rem;

        assert_eq!(sub_rem(4, 1, 5), 3);

        assert_eq!(sub_rem(0, 1, 5), 4);

        assert_eq!(sub_rem(2, 3, 5), 4);

        assert_eq!(sub_rem(3, 3, 5), 0);
    }
}