kinesin_rdt/common/
ring_buffer.rs

1// TODO: fix documentation
2#![allow(clippy::missing_safety_doc)]
3
4use std::marker::PhantomData;
5use std::mem::{size_of, MaybeUninit};
6use std::ops::{Bound, Range, RangeBounds};
7use std::{ptr, slice};
8
9/// ring buffer supporting batch copy in/out
10///
11/// safety: probably not
12pub struct RingBuf<T> {
13    // buf: [-----<(head)=====(head + len)>--]
14    buf: Vec<T>,
15    head: usize,
16    len: usize,
17}
18
19/// an immutable element range of a RingBuf
20pub struct RingBufSlice<'a, T> {
21    buf: &'a RingBuf<T>,
22    start: usize,
23    end: usize,
24}
25
26/// a mutable element range of a RingBuf
27pub struct RingBufSliceMut<'a, T> {
28    buf: *mut RingBuf<T>,
29    start: usize,
30    end: usize,
31    // disclaimer: i have very little clue as to what this actually does
32    _marker: PhantomData<&'a RingBuf<T>>,
33}
34
35/// draining iterator implementation
36pub struct Drain<'a, T> {
37    /// associated RingBuf
38    buf: &'a mut RingBuf<T>,
39    /// index of next front element
40    front: usize,
41    /// index of next back element
42    back: usize,
43    /// remaining elements in iterator
44    remaining: usize,
45    /// head pointer before Drain creation
46    prev_head: usize,
47    // /// drain type
48    // op_type: DrainType,
49}
50
51impl<T> RingBuf<T> {
52    /// ensure T is not something strange
53    const fn ensure_type_ok() {
54        assert!(size_of::<T>() > 0, "cannot deal with zero-sized types");
55    }
56
57    /// create new buffer
58    pub fn new() -> RingBuf<T> {
59        Self::ensure_type_ok();
60        RingBuf {
61            buf: Vec::new(),
62            head: 0,
63            len: 0,
64        }
65    }
66
67    /// create new buffer with preallocated capacity
68    #[allow(clippy::uninit_vec)] // does not allow access to uninitialized regions
69    pub fn with_capacity(capacity: usize) -> RingBuf<T> {
70        Self::ensure_type_ok();
71        let mut vec = Vec::with_capacity(capacity);
72        // safety: uninitialized bytes are not leaked
73        unsafe { vec.set_len(vec.capacity()) };
74        RingBuf {
75            buf: vec,
76            head: 0,
77            len: 0,
78        }
79    }
80
81    /// max capacity before reallocating
82    pub fn capacity(&self) -> usize {
83        self.buf.len()
84    }
85
86    /// length of buffer
87    pub fn len(&self) -> usize {
88        self.len
89    }
90
91    /// whether buffer is empty
92    pub fn is_empty(&self) -> bool {
93        self.len == 0
94    }
95
96    /// obtain pointer to backing buffer
97    fn ptr(&self) -> *mut T {
98        self.buf.as_ptr() as *mut _
99    }
100
101    /// obtain pointer to raw offset into backing buffer
102    unsafe fn ptr_at(&self, offset: usize) -> *mut T {
103        self.ptr().add(offset)
104    }
105
106    /// copy elements within backing buffer
107    unsafe fn copy(&mut self, src: usize, dst: usize, count: usize) {
108        ptr::copy(self.ptr_at(src), self.ptr_at(dst), count);
109    }
110
111    /// non-overlapping copy elements within backing buffer
112    unsafe fn copy_nonoverlapping(&mut self, src: usize, dst: usize, count: usize) {
113        ptr::copy_nonoverlapping(self.ptr_at(src), self.ptr_at(dst), count);
114    }
115
116    /// obtain slice to elements with raw index
117    unsafe fn buf_slice_at(&self, range: Range<usize>) -> &[T] {
118        slice::from_raw_parts(self.ptr_at(range.start), range.end - range.start)
119    }
120
121    /// obtain mutable slice to elements with raw index
122    ///
123    /// safety warning: will absolutely spit out aliasing mutable references if
124    /// asked wrongly
125    #[allow(clippy::mut_from_ref)]
126    unsafe fn buf_slice_at_mut(&self, range: Range<usize>) -> &mut [T] {
127        slice::from_raw_parts_mut(self.ptr_at(range.start), range.end - range.start)
128    }
129
130    /// determine if elements in backing buffer are in a contiguous segment
131    pub fn is_contiguous(&self) -> bool {
132        // did tail wrap?
133        // head + len <= capacity
134        self.head <= self.capacity() - self.len
135    }
136
137    /// get offset into backing buffer from element index
138    #[inline]
139    fn offset_of(&self, index: usize) -> usize {
140        self.offset_of_explicit(self.head, index)
141    }
142
143    /// get offset into backing buffer from element index and explicit head index
144    fn offset_of_explicit(&self, head: usize, index: usize) -> usize {
145        // disclaimer: the math worked. outside of that, i have no idea what this does
146        debug_assert!(index < self.capacity(), "index cannot exceed capacity");
147        let remaining = self.capacity() - index;
148        if head < remaining {
149            // does not wrap
150            head + index
151        } else {
152            // does wrap
153            head - remaining
154        }
155    }
156
157    /// get offset into backing buffer of backwards element index
158    fn offset_of_reverse(&self, negative_index: usize) -> usize {
159        // disclaimer: same as above
160        debug_assert!(
161            negative_index < self.capacity(),
162            "index cannot exceed capacity"
163        );
164        if self.head >= negative_index {
165            // does not wrap
166            self.head - negative_index
167        } else {
168            // does wrap
169            self.head + (self.capacity() - negative_index)
170        }
171    }
172
173    /// handle resize of backing buffer to a larger capacity
174    unsafe fn handle_buf_expand(&mut self, old_capacity: usize) {
175        let new_capacity = self.capacity();
176
177        if self.head <= old_capacity - self.len {
178            // was contiguous, do nothing
179        } else {
180            let head_segment_len = old_capacity - self.head;
181            let tail_segment_len = self.len - head_segment_len;
182
183            if head_segment_len > tail_segment_len
184                && new_capacity - old_capacity >= tail_segment_len
185            {
186                // we can fit the tail segment after the head segment
187                // from: [==>------<======-----]
188                // to:   [---------<========>--]
189                self.copy_nonoverlapping(0, old_capacity, tail_segment_len);
190            } else {
191                // copy head segment to the end
192                // from: [========>----<====---]
193                // to:   [========>-------<====]
194                let new_head = new_capacity - head_segment_len;
195                self.copy(self.head, new_head, head_segment_len);
196                self.head = new_head;
197            }
198        }
199    }
200
201    /// realign all elements so they are contiguous at the beginning of the buffer
202    pub fn realign(&mut self) {
203        if self.head == 0 {
204            // already aligned, nothing to do
205            return;
206        }
207
208        unsafe {
209            if self.is_contiguous() {
210                // copy to start
211                self.copy(self.head, 0, self.len);
212            } else {
213                // copy head end to start
214                // from: [===>-----<=====]
215                // to:   [===><=====-----]
216                let head_segment_len = self.capacity() - self.head;
217                let tail_segment_len = self.len - head_segment_len;
218                self.copy(self.head, tail_segment_len, head_segment_len);
219
220                // rotate segment
221                // from: [===><=====-----]
222                // to:   [<========>-----]
223                let slice = self.buf_slice_at_mut(0..self.len);
224                slice.rotate_left(tail_segment_len);
225            }
226
227            self.head = 0;
228        }
229    }
230
231    /// reserve space for at least `count` more elements
232    #[allow(clippy::uninit_vec)] // does not allow access to uninitialized regions
233    pub fn reserve(&mut self, count: usize) {
234        let desired_capacity = self.len.checked_add(count).expect("capacity overflow");
235        if desired_capacity > self.capacity() {
236            let old_capacity = self.capacity();
237            self.buf.reserve(desired_capacity - old_capacity);
238            unsafe {
239                self.buf.set_len(self.buf.capacity());
240                self.handle_buf_expand(old_capacity);
241            }
242        }
243    }
244
245    /// reserve space for exactly `count` more elements (see Vec::reserve_exact)
246    pub fn reserve_exact(&mut self, count: usize) {
247        let desired_capacity = self.len.checked_add(count).expect("capacity overflow");
248        if desired_capacity > self.capacity() {
249            let old_capacity = self.capacity();
250            self.buf.reserve_exact(desired_capacity - old_capacity);
251            unsafe {
252                self.buf.set_len(self.buf.capacity());
253                self.handle_buf_expand(old_capacity);
254            }
255        }
256    }
257
258    /// shrink backing buffer to given capacity
259    pub fn shrink_to(&mut self, target_capacity: usize) {
260        assert!(
261            target_capacity <= self.capacity(),
262            "cannot shrink to a greater capacity (old: {}, requested: {})",
263            self.capacity(),
264            target_capacity
265        );
266
267        // ensure elements are aligned to start
268        self.realign();
269        let requested_capacity = usize::max(target_capacity, self.len);
270        let old_capacity = self.capacity();
271
272        unsafe {
273            // request shrink to size
274            self.buf.set_len(requested_capacity);
275            self.buf.shrink_to(requested_capacity);
276            // ensure correct size
277            let new_capacity = self.buf.capacity();
278            self.buf.set_len(new_capacity);
279            debug_assert!(
280                new_capacity <= old_capacity,
281                "Vec::shrink_to did not shrink?"
282            );
283        }
284    }
285
286    /// push one element to back of ring
287    pub fn push_back(&mut self, val: T) {
288        self.reserve(1);
289        unsafe {
290            // append to tail side
291            let target = self.ptr_at(self.offset_of(self.len));
292            ptr::write(target, val);
293        }
294        self.len += 1;
295    }
296
297    /// push one element to front of ring
298    pub fn push_front(&mut self, val: T) {
299        self.reserve(1);
300        // append to head side
301        let new_head = self.offset_of_reverse(1);
302        unsafe {
303            let target = self.ptr_at(new_head);
304            ptr::write(target, val);
305        }
306        self.head = new_head;
307        self.len += 1;
308    }
309
310    /// pop one element from back of ring
311    pub fn pop_back(&mut self) -> Option<T> {
312        if self.len == 0 {
313            return None;
314        }
315
316        let out;
317        unsafe {
318            let target = self.ptr_at(self.offset_of(self.len));
319            out = ptr::read(target);
320            self.len -= 1;
321        }
322
323        Some(out)
324    }
325
326    /// pop one element from front of ring
327    pub fn pop_front(&mut self) -> Option<T> {
328        if self.len == 0 {
329            return None;
330        }
331
332        let out;
333        unsafe {
334            let target = self.ptr_at(self.offset_of(0));
335            out = ptr::read(target);
336            self.head = self.offset_of(1);
337            self.len -= 1;
338        }
339
340        Some(out)
341    }
342
343    /// ensure provided index range is sane
344    fn check_range(&self, range: &Range<usize>) {
345        assert!(range.start <= range.end, "range cannot be reverse");
346        assert!(range.start < self.len, "range start out of bounds");
347        assert!(range.end <= self.len, "range end out of bounds");
348    }
349
350    /// map element index range to backing buffer range(s)
351    #[inline]
352    fn map_range(&self, range: Range<usize>) -> (Range<usize>, Option<Range<usize>>) {
353        self.map_range_explicit(self.head, range)
354    }
355
356    /// map element index range to backing buffer range(s) with explicit head index
357    fn map_range_explicit(
358        &self,
359        head: usize,
360        range: Range<usize>,
361    ) -> (Range<usize>, Option<Range<usize>>) {
362        if range.start == range.end {
363            // zero size range
364            return (head..head, None);
365        }
366        let start = self.offset_of_explicit(head, range.start);
367        let end = self.offset_of_explicit(head, range.end - 1);
368
369        if end >= start {
370            // range does not wrap
371            (start..end + 1, None)
372        } else {
373            // range does wrap
374            (start..self.capacity(), Some(0..end + 1))
375        }
376    }
377
378    /// get immutable reference to range
379    pub fn range(&self, range: Range<usize>) -> RingBufSlice<T> {
380        self.check_range(&range);
381        RingBufSlice {
382            buf: self,
383            start: range.start,
384            end: range.end,
385        }
386    }
387
388    /// get mutable reference to range
389    pub fn range_mut(&mut self, range: Range<usize>) -> RingBufSliceMut<T> {
390        self.check_range(&range);
391        RingBufSliceMut {
392            buf: self,
393            start: range.start,
394            end: range.end,
395            _marker: PhantomData,
396        }
397    }
398
399    /// get slice(s) corresponding to range
400    unsafe fn range_to_slices(&self, range: Range<usize>) -> (&[T], Option<&[T]>) {
401        let (a, b) = self.map_range(range);
402        (self.buf_slice_at(a), b.map(|r| self.buf_slice_at(r)))
403    }
404
405    /// get mutable slice(s) corresponding to range
406    ///
407    /// safety warning: must ensure slices do not alias
408    unsafe fn range_to_slices_mut(&mut self, range: Range<usize>) -> (&mut [T], Option<&mut [T]>) {
409        let (a, b) = self.map_range(range);
410        (
411            self.buf_slice_at_mut(a),
412            b.map(|r| self.buf_slice_at_mut(r)),
413        )
414    }
415
416    /// obtain reference to element at provided index
417    pub fn get(&self, index: usize) -> Option<&T> {
418        if index < self.len {
419            unsafe { Some(&*self.ptr_at(self.offset_of(index))) }
420        } else {
421            None
422        }
423    }
424
425    /// obtain mutable reference to element at provided index
426    pub fn get_mut(&mut self, index: usize) -> Option<&mut T> {
427        if index < self.len {
428            unsafe { Some(&mut *self.ptr_at(self.offset_of(index))) }
429        } else {
430            None
431        }
432    }
433
434    /// clear all elements
435    pub fn clear(&mut self) {
436        unsafe {
437            // get active ranges
438            let (a, b) = self.map_range(0..self.len);
439            self.head = 0;
440            self.len = 0;
441
442            let slice_a: *mut [T] = self.buf_slice_at_mut(a);
443            ptr::drop_in_place(slice_a);
444            if let Some(b) = b {
445                let slice_b: *mut [T] = self.buf_slice_at_mut(b);
446                ptr::drop_in_place(slice_b);
447            }
448        }
449    }
450
451    /// remove range of elements from RingBuf and return iterator for those elements
452    ///
453    /// Currently only supports draining from either the start or the end.
454    /// Drained elements are dropped when the iterator is dropped.
455    pub fn drain<R: RangeBounds<usize>>(&mut self, range: R) -> Drain<T> {
456        let lower_bound = match range.start_bound() {
457            Bound::Included(&start) => {
458                assert!(start < self.len, "start index out of bounds");
459                Some(start)
460            }
461            Bound::Excluded(&start) => {
462                let start = start.checked_add(1).expect("start index out of bounds");
463                assert!(start < self.len, "start index out of bounds");
464                Some(start)
465            }
466            Bound::Unbounded => None,
467        };
468        let upper_bound = match range.end_bound() {
469            Bound::Included(&end) => {
470                let end = end.checked_add(1).expect("range out of bounds");
471                assert!(end <= self.len, "end index out of bounds");
472                Some(end)
473            }
474            Bound::Excluded(&end) => {
475                assert!(end <= self.len, "end index out of bounds");
476                Some(end)
477            }
478            Bound::Unbounded => None,
479        };
480
481        if let Some(start) = lower_bound {
482            if let Some(_end) = upper_bound {
483                unimplemented!("drain from middle unimplemented");
484            } else {
485                // drain until end
486                Drain::to_end(self, start)
487            }
488        } else if let Some(end) = upper_bound {
489            // drain from start
490            Drain::from_start(self, end)
491        } else {
492            // drain everything
493            Drain::from_start(self, self.len)
494        }
495    }
496}
497
498impl<T: Clone> RingBuf<T> {
499    /// append `count` elements at back by cloning
500    pub fn fill_at_back(&mut self, count: usize, value: T) {
501        self.reserve(count);
502        let (a, b) = self.map_range(self.len..self.len + count);
503
504        unsafe {
505            // for_each is massively faster than a for loop here
506            a.for_each(|i| ptr::write(self.ptr_at(i), value.clone()));
507            if let Some(b) = b {
508                b.for_each(|i| ptr::write(self.ptr_at(i), value.clone()));
509            }
510        }
511
512        self.len += count;
513    }
514
515    /// prepend `count` elements at front by cloning
516    pub fn fill_at_front(&mut self, count: usize, value: T) {
517        self.reserve(count);
518        let new_head = self.offset_of_reverse(count);
519        let (a, b) = self.map_range_explicit(new_head, 0..count);
520
521        unsafe {
522            a.for_each(|i| ptr::write(self.ptr_at(i), value.clone()));
523            if let Some(b) = b {
524                b.for_each(|i| ptr::write(self.ptr_at(i), value.clone()));
525            }
526        }
527
528        self.head = new_head;
529        self.len += count;
530    }
531}
532
533// this was a bad idea
534impl<T: Copy> RingBuf<T> {
535    /// copy elements from slice into buffer ranges
536    unsafe fn copy_range_from_slice(
537        &mut self,
538        range_a: Range<usize>,
539        range_b: Option<Range<usize>>,
540        elements: &[T],
541    ) {
542        self.copy_range_from_ptr(range_a, range_b, elements.as_ptr(), elements.len());
543    }
544
545    /// copy elements from raw pointer into buffer ranges
546    unsafe fn copy_range_from_ptr(
547        &mut self,
548        range_a: Range<usize>,
549        range_b: Option<Range<usize>>,
550        source: *const T,
551        count: usize,
552    ) {
553        if let Some(b) = range_b {
554            // split copy
555            debug_assert_eq!(
556                b.end - b.start + range_a.end - range_a.start,
557                count,
558                "range incorrect"
559            );
560            unsafe {
561                // copy first range
562                let dest_a = self.ptr_at(range_a.start);
563                let length_a = range_a.end - range_a.start;
564                ptr::copy_nonoverlapping(source, dest_a, length_a);
565                // copy second range
566                let dest_b = self.ptr_at(b.start);
567                let length_b = count - length_a;
568                ptr::copy_nonoverlapping(source.add(length_a), dest_b, length_b);
569            }
570        } else {
571            // oneshot copy
572            debug_assert_eq!(range_a.end - range_a.start, count, "range incorrect");
573            unsafe {
574                let dest = self.ptr_at(range_a.start);
575                ptr::copy_nonoverlapping(source, dest, range_a.end - range_a.start);
576            }
577        }
578    }
579
580    /// copy elements from buffer ranges to slice
581    unsafe fn copy_range_to_slice(
582        &self,
583        range_a: Range<usize>,
584        range_b: Option<Range<usize>>,
585        to_slice: &mut [T],
586    ) {
587        self.copy_range_to_ptr(range_a, range_b, to_slice.as_mut_ptr(), to_slice.len());
588    }
589
590    /// copy elements from buffer ranges to raw pointer
591    unsafe fn copy_range_to_ptr(
592        &self,
593        range_a: Range<usize>,
594        range_b: Option<Range<usize>>,
595        dest: *mut T,
596        count: usize,
597    ) {
598        if let Some(b) = range_b {
599            // split copy
600            debug_assert_eq!(
601                b.end - b.start + range_a.end - range_a.start,
602                count,
603                "range incorrect"
604            );
605            unsafe {
606                // copy first range
607                let source_a = self.ptr_at(range_a.start) as *const T;
608                let length_a = range_a.end - range_a.start;
609                ptr::copy_nonoverlapping(source_a, dest, length_a);
610                // copy second range
611                let source_b = self.ptr_at(b.start) as *const T;
612                let length_b = count - length_a;
613                ptr::copy_nonoverlapping(source_b, dest.add(length_a), length_b);
614            }
615        } else {
616            // oneshot copy
617            debug_assert_eq!(range_a.end - range_a.start, count, "range incorrect");
618            unsafe {
619                let source = self.ptr_at(range_a.start) as *const T;
620                ptr::copy_nonoverlapping(source, dest, range_a.end - range_a.start);
621            }
622        }
623    }
624
625    /// push contents of slice to back by copying
626    pub fn push_back_copy_from_slice(&mut self, elements: &[T]) {
627        self.reserve(elements.len());
628        let (a, b) = self.map_range(self.len..self.len + elements.len());
629        unsafe { self.copy_range_from_slice(a, b, elements) };
630        self.len += elements.len();
631    }
632
633    /// push contents of slice to front by copying
634    pub fn push_front_copy_from_slice(&mut self, elements: &[T]) {
635        self.reserve(elements.len());
636        let new_head = self.offset_of_reverse(elements.len());
637        let (a, b) = self.map_range_explicit(new_head, 0..elements.len());
638        unsafe { self.copy_range_from_slice(a, b, elements) };
639        self.head = new_head;
640        self.len += elements.len();
641    }
642
643    /// pop contents to slice from back by copying
644    pub fn pop_back_copy_to_slice(&mut self, dest: &mut [T]) {
645        assert!(dest.len() <= self.len, "destination slice too large");
646        let new_len = self.len - dest.len();
647        let (a, b) = self.map_range(new_len..self.len);
648        self.len = new_len;
649        unsafe { self.copy_range_to_slice(a, b, dest) };
650    }
651
652    /// pop contents to slice from front by copying
653    pub fn pop_front_copy_to_slice(&mut self, dest: &mut [T]) {
654        assert!(dest.len() <= self.len, "destination slice too large");
655        let (a, b) = self.map_range(0..dest.len());
656        self.head = self.offset_of(dest.len());
657        self.len -= dest.len();
658        unsafe { self.copy_range_to_slice(a, b, dest) };
659    }
660}
661
662fn validate_subrange(r1: Range<usize>, r2: &Range<usize>) -> Range<usize> {
663    assert!(r2.start <= r2.end, "range cannot be reverse");
664    let new_start = r1.start.checked_add(r2.start).expect("start out of range");
665    let new_end = r1.start.checked_add(r2.end).expect("end out of range");
666    assert!(new_start < r1.end, "start out of range");
667    assert!(new_end <= r1.end, "end out of range");
668    new_start..new_end
669}
670
671impl<'a, T> RingBufSlice<'a, T> {
672    /// get length of slice
673    pub fn len(&self) -> usize {
674        self.end - self.start
675    }
676
677    /// whether slice contains zero elements
678    pub fn is_empty(&self) -> bool {
679        self.end == self.start
680    }
681
682    /// get slices representing range
683    pub fn as_slices(&self) -> (&'a [T], Option<&'a [T]>) {
684        unsafe { self.buf.range_to_slices(self.start..self.end) }
685    }
686
687    /// get sub-range into range
688    pub fn range(&self, range: Range<usize>) -> RingBufSlice<'a, T> {
689        let new_range = validate_subrange(self.start..self.end, &range);
690        RingBufSlice {
691            buf: self.buf,
692            start: new_range.start,
693            end: new_range.end,
694        }
695    }
696}
697
698impl<'a, T: Copy> RingBufSlice<'a, T> {
699    /// copy contents of range to a slice
700    pub fn copy_to_slice(&self, slice: &mut [T]) {
701        assert_eq!(self.len(), slice.len(), "length mismatch");
702        unsafe {
703            let (a, b) = self.buf.map_range(self.start..self.end);
704            self.buf.copy_range_to_slice(a, b, slice);
705        }
706    }
707
708    /// copy contents of range to a raw pointer
709    pub unsafe fn copy_to_ptr(&self, dest: *mut T, count: usize) {
710        assert_eq!(self.len(), count, "length mismatch");
711        let (a, b) = self.buf.map_range(self.start..self.end);
712        self.buf.copy_range_to_ptr(a, b, dest, count);
713    }
714
715    /// read elements as fixed length array
716    pub fn read_fixed<const N: usize>(&self) -> [T; N] {
717        unsafe {
718            let mut out: MaybeUninit<[T; N]> = MaybeUninit::uninit();
719            self.copy_to_ptr(out.as_mut_ptr() as *mut T, N);
720            out.assume_init()
721        }
722    }
723}
724
725impl<'a, T> RingBufSliceMut<'a, T> {
726    /// get length of slice
727    pub fn len(&self) -> usize {
728        self.end - self.start
729    }
730
731    /// whether slice contains zero elements
732    pub fn is_empty(&self) -> bool {
733        self.end == self.start
734    }
735
736    /// get slices representing range
737    pub fn as_slices(&self) -> (&'a [T], Option<&'a [T]>) {
738        unsafe { (*self.buf).range_to_slices(self.start..self.end) }
739    }
740
741    /// get mutable slices representing range
742    pub fn as_mut_slices(&mut self) -> (&'a mut [T], Option<&'a mut [T]>) {
743        unsafe { (*self.buf).range_to_slices_mut(self.start..self.end) }
744    }
745
746    /// get sub-range into range
747    pub fn range(&self, range: Range<usize>) -> RingBufSlice<'a, T> {
748        let new_range = validate_subrange(self.start..self.end, &range);
749        unsafe {
750            // safety: write operations cannot be performed on RingBufSlice
751            RingBufSlice {
752                buf: &*self.buf,
753                start: new_range.start,
754                end: new_range.end,
755            }
756        }
757    }
758
759    /// get mutable sub-range into range
760    pub fn range_mut(&mut self, range: Range<usize>) -> RingBufSliceMut<'a, T> {
761        let new_range = validate_subrange(self.start..self.end, &range);
762        RingBufSliceMut {
763            buf: self.buf,
764            start: new_range.start,
765            end: new_range.end,
766            _marker: PhantomData,
767        }
768    }
769
770    /// split into two mutable slices at index
771    pub fn split_at_mut(self, index: usize) -> (RingBufSliceMut<'a, T>, RingBufSliceMut<'a, T>) {
772        let split_index = index.checked_add(self.start).expect("index out of range");
773        assert!(split_index < self.end, "split index out of range");
774
775        (
776            RingBufSliceMut {
777                buf: self.buf,
778                start: self.start,
779                end: split_index,
780                _marker: PhantomData,
781            },
782            RingBufSliceMut {
783                buf: self.buf,
784                start: split_index,
785                end: self.end,
786                _marker: PhantomData,
787            },
788        )
789    }
790}
791
792impl<'a, T: Copy> RingBufSliceMut<'a, T> {
793    /// copy contents of range to a slice
794    pub fn copy_to_slice(&self, slice: &mut [T]) {
795        assert_eq!(self.len(), slice.len(), "length mismatch");
796        unsafe {
797            let (a, b) = (*self.buf).map_range(self.start..self.end);
798            (*self.buf).copy_range_to_slice(a, b, slice);
799        }
800    }
801
802    /// copy contents of range to a raw pointer
803    pub unsafe fn copy_to_ptr(&self, dest: *mut T, count: usize) {
804        assert_eq!(self.len(), count, "length mismatch");
805        let (a, b) = (*self.buf).map_range(self.start..self.end);
806        (*self.buf).copy_range_to_ptr(a, b, dest, count);
807    }
808
809    /// read elements as fixed length array
810    pub fn read_fixed<const N: usize>(&self) -> [T; N] {
811        unsafe {
812            let mut out: MaybeUninit<[T; N]> = MaybeUninit::uninit();
813            self.copy_to_ptr(out.as_mut_ptr() as *mut T, N);
814            out.assume_init()
815        }
816    }
817
818    /// copy contents of a slice to the range
819    pub fn copy_from_slice(&mut self, slice: &[T]) {
820        assert_eq!(self.len(), slice.len(), "length mismatch");
821        unsafe {
822            let (a, b) = (*self.buf).map_range(self.start..self.end);
823            (*self.buf).copy_range_from_slice(a, b, slice);
824        }
825    }
826
827    /// copy contents of a raw pointer to the range
828    pub unsafe fn copy_from_ptr(&mut self, dest: *const T, count: usize) {
829        assert_eq!(self.len(), count, "length mismatch");
830        unsafe {
831            let (a, b) = (*self.buf).map_range(self.start..self.end);
832            (*self.buf).copy_range_from_ptr(a, b, dest, count);
833        }
834    }
835}
836
837impl<T> Drop for RingBuf<T> {
838    fn drop(&mut self) {
839        unsafe {
840            // ensure Vec does not drop garbage
841            self.realign();
842            self.buf.set_len(self.len);
843        }
844    }
845}
846
847impl<'a, T> Drain<'a, T> {
848    /// create a Drain for the range [0, until)
849    fn from_start(buf: &'a mut RingBuf<T>, until: usize) -> Drain<'a, T> {
850        let prev_head = buf.head;
851        let drain = Drain {
852            buf,
853            front: 0,
854            back: until,
855            remaining: until,
856            prev_head,
857        };
858        drain.buf.head = until;
859        drain.buf.len -= until;
860        drain
861    }
862
863    /// create a Drain for the range [starting_from, buf.len)
864    fn to_end(buf: &'a mut RingBuf<T>, starting_from: usize) -> Drain<'a, T> {
865        let prev_head = buf.head;
866        let back = buf.len;
867        let remaining = buf.len - starting_from;
868        let drain = Drain {
869            buf,
870            front: starting_from,
871            back,
872            remaining,
873            prev_head,
874        };
875        drain.buf.len -= starting_from;
876        drain
877    }
878}
879
880impl<'a, T> Drop for Drain<'a, T> {
881    fn drop(&mut self) {
882        if self.remaining == 0 {
883            // nothing to drop
884            return;
885        }
886
887        unsafe {
888            // drop everything remaining in iterator
889            let (a, b) = self
890                .buf
891                .map_range_explicit(self.prev_head, self.front..self.back);
892            let slice_a: *mut [T] = self.buf.buf_slice_at_mut(a);
893            ptr::drop_in_place(slice_a);
894            if let Some(b) = b {
895                let slice_b: *mut [T] = self.buf.buf_slice_at_mut(b);
896                ptr::drop_in_place(slice_b);
897            }
898        }
899    }
900}
901
902impl<'a, T> Iterator for Drain<'a, T> {
903    type Item = T;
904
905    fn next(&mut self) -> Option<Self::Item> {
906        if self.remaining == 0 {
907            None
908        } else {
909            unsafe {
910                let element = self
911                    .buf
912                    .ptr_at(self.buf.offset_of_explicit(self.prev_head, self.front));
913                self.front += 1;
914                self.remaining -= 1;
915                Some(ptr::read(element))
916            }
917        }
918    }
919
920    fn size_hint(&self) -> (usize, Option<usize>) {
921        (self.remaining, Some(self.remaining))
922    }
923}
924
925impl<'a, T> DoubleEndedIterator for Drain<'a, T> {
926    fn next_back(&mut self) -> Option<Self::Item> {
927        if self.remaining == 0 {
928            None
929        } else {
930            unsafe {
931                self.back -= 1;
932                self.remaining -= 1;
933                let element = self
934                    .buf
935                    .ptr_at(self.buf.offset_of_explicit(self.prev_head, self.back));
936                Some(ptr::read(element))
937            }
938        }
939    }
940}
941
942impl<'a, T> ExactSizeIterator for Drain<'a, T> {}
943
944#[cfg(test)]
945mod test {
946    // DISCLAIMER: this "test suite" is in absolutely no way exhaustive and
947    // should not in any way, shape, or form serve as reassurance that the
948    // RingBuf implementation above is safe for anything at all
949
950    use super::*;
951
952    #[test]
953    fn new() {
954        let mut buf: RingBuf<String> = RingBuf::new();
955        buf.push_back("world".into());
956        buf.push_back("!".into());
957        buf.push_front(", ".into());
958        buf.push_front("Hello".into());
959
960        assert_eq!(buf.pop_front(), Some("Hello".into()));
961        assert_eq!(buf.pop_front(), Some(", ".into()));
962        assert_eq!(buf.pop_front(), Some("world".into()));
963        assert_eq!(buf.pop_front(), Some("!".into()));
964        assert_eq!(buf.pop_front(), None);
965    }
966
967    #[test]
968    fn copy_around_slices() {
969        let mut buf: RingBuf<u8> = RingBuf::new();
970        buf.push_back_copy_from_slice(&[5, 6, 7, 8, 9, 10, 11]);
971        buf.push_front_copy_from_slice(&[0, 1, 2, 3, 4]);
972        assert_eq!(buf.get(3), Some(&3));
973        assert_eq!(buf.get(7), Some(&7));
974
975        let sliced = buf.range(3..6);
976        let mut dest = [0u8; 3];
977        sliced.copy_to_slice(&mut dest);
978        assert_eq!(dest, [3, 4, 5]);
979
980        let mut dest = [0u8; 6];
981        buf.pop_front_copy_to_slice(&mut dest);
982        assert_eq!(dest, [0, 1, 2, 3, 4, 5]);
983        buf.pop_back_copy_to_slice(&mut dest);
984        assert_eq!(dest, [6, 7, 8, 9, 10, 11]);
985    }
986
987    #[test]
988    fn copy_from_slice() {
989        let mut buf: RingBuf<u8> = RingBuf::new();
990        buf.push_back_copy_from_slice(&[0u8; 4096]);
991
992        let mut slice_mut = buf.range_mut(1024..2048);
993        slice_mut.copy_from_slice(&[1u8; 1024]);
994
995        buf.drain(..1024);
996
997        assert_eq!(buf.get(1024), Some(&0));
998        assert_eq!(buf.get(512), Some(&1));
999
1000        buf.push_front_copy_from_slice(&[2u8; 1024]);
1001
1002        assert_eq!(buf.get(512), Some(&2));
1003        assert_eq!(buf.get(1536), Some(&1));
1004        assert_eq!(buf.get(3072), Some(&0));
1005    }
1006
1007    #[test]
1008    fn drain() {
1009        let mut buf: RingBuf<String> = RingBuf::new();
1010        buf.push_back("Hello, ".into());
1011        buf.push_back("world!".into());
1012
1013        for i in 0..10 {
1014            buf.push_back(i.to_string());
1015        }
1016
1017        let a: Vec<String> = buf.drain(..2).collect();
1018        assert_eq!(a.join(""), "Hello, world!");
1019        let b: Vec<String> = buf.drain(..).collect();
1020        assert_eq!(b.join(""), "0123456789");
1021        assert_eq!(buf.len(), 0);
1022    }
1023
1024    #[test]
1025    fn range_as_slices() {
1026        let mut data = [0u8; 256];
1027        for (i, v) in data.iter_mut().enumerate() {
1028            *v = i as u8;
1029        }
1030        let mut buf: RingBuf<u8> = RingBuf::with_capacity(data.len());
1031        buf.push_back_copy_from_slice(&data[24..]);
1032        buf.push_front_copy_from_slice(&data[..24]);
1033        let mut range = buf.range_mut(0..buf.len());
1034        let (a, b) = range.as_mut_slices();
1035        assert_eq!(a, &mut data[..24]);
1036        assert_eq!(b, Some(&mut data[24..]));
1037
1038        let range2 = buf.range(0..4);
1039        assert_eq!(range2.read_fixed::<4>(), [0, 1, 2, 3]);
1040    }
1041
1042    #[test]
1043    fn shrink() {
1044        let mut buf: RingBuf<u8> = RingBuf::with_capacity(256);
1045        buf.push_back_copy_from_slice(&[5u8; 64]);
1046        buf.push_front_copy_from_slice(&[6u8; 32]);
1047        buf.shrink_to(buf.len());
1048        assert_eq!(buf.get(0), Some(&6));
1049        assert_eq!(buf.get(16), Some(&6));
1050        assert_eq!(buf.get(48), Some(&5));
1051        assert_eq!(buf.get(95), Some(&5));
1052    }
1053}