generic_arraydeque/drain.rs
1use core::{
2 fmt,
3 iter::FusedIterator,
4 marker::PhantomData,
5 mem::{self, size_of},
6 ops::{Range, RangeBounds},
7 ptr::{self, NonNull},
8};
9
10use super::{ArrayLength, GenericArrayDeque};
11
12impl<T, N: ArrayLength> GenericArrayDeque<T, N> {
13 /// Removes the specified range from the deque in bulk, returning all
14 /// removed elements as an iterator. If the iterator is dropped before
15 /// being fully consumed, it drops the remaining removed elements.
16 ///
17 /// The returned iterator keeps a mutable borrow on the queue to optimize
18 /// its implementation.
19 ///
20 ///
21 /// # Panics
22 ///
23 /// Panics if the range has `start_bound > end_bound`, or, if the range is
24 /// bounded on either end and past the length of the deque.
25 ///
26 /// # Leaking
27 ///
28 /// If the returned iterator goes out of scope without being dropped (due to
29 /// [`mem::forget`], for example), the deque may have lost and leaked
30 /// elements arbitrarily, including elements outside the range.
31 ///
32 /// # Examples
33 ///
34 /// ```
35 /// use std::collections::VecDeque;
36 ///
37 /// let mut deque: VecDeque<_> = [1, 2, 3].into();
38 /// let drained = deque.drain(2..).collect::<VecDeque<_>>();
39 /// assert_eq!(drained, [3]);
40 /// assert_eq!(deque, [1, 2]);
41 ///
42 /// // A full range clears all contents, like `clear()` does
43 /// deque.drain(..);
44 /// assert!(deque.is_empty());
45 /// ```
46 #[inline]
47 pub fn drain<R>(&mut self, range: R) -> Drain<'_, T, N>
48 where
49 R: RangeBounds<usize>,
50 {
51 // Memory safety
52 //
53 // When the Drain is first created, the source deque is shortened to
54 // make sure no uninitialized or moved-from elements are accessible at
55 // all if the Drain's destructor never gets to run.
56 //
57 // Drain will ptr::read out the values to remove.
58 // When finished, the remaining data will be copied back to cover the hole,
59 // and the head/tail values will be restored correctly.
60 //
61 let Range { start, end } = super::range(range, ..self.len);
62 let drain_start = start;
63 let drain_len = end - start;
64
65 // The deque's elements are parted into three segments:
66 // * 0 -> drain_start
67 // * drain_start -> drain_start+drain_len
68 // * drain_start+drain_len -> self.len
69 //
70 // H = self.head; T = self.head+self.len; t = drain_start+drain_len; h = drain_head
71 //
72 // We store drain_start as self.len, and drain_len and self.len as
73 // drain_len and orig_len respectively on the Drain. This also
74 // truncates the effective array such that if the Drain is leaked, we
75 // have forgotten about the potentially moved values after the start of
76 // the drain.
77 //
78 // H h t T
79 // [. . . o o x x o o . . .]
80 //
81 // "forget" about the values after the start of the drain until after
82 // the drain is complete and the Drain destructor is run.
83
84 unsafe { Drain::new(self, drain_start, drain_len) }
85 }
86}
87
88/// A draining iterator over the elements of a `VecDeque`.
89///
90/// This `struct` is created by the [`drain`] method on [`VecDeque`]. See its
91/// documentation for more.
92///
93/// [`drain`]: VecDeque::drain
94pub struct Drain<'a, T, N: ArrayLength> {
95 // We can't just use a &mut VecDeque<T, N>, as that would make Drain invariant over T
96 // and we want it to be covariant instead
97 deque: NonNull<GenericArrayDeque<T, N>>,
98 // drain_start is stored in deque.len
99 drain_len: usize,
100 // index into the logical array, not the physical one (always lies in [0..deque.len))
101 idx: usize,
102 // number of elements remaining after dropping the drain
103 new_len: usize,
104 remaining: usize,
105 // Needed to make Drain covariant over T
106 _marker: PhantomData<&'a T>,
107}
108
109impl<'a, T, N: ArrayLength> Drain<'a, T, N> {
110 pub(super) unsafe fn new(
111 deque: &'a mut GenericArrayDeque<T, N>,
112 drain_start: usize,
113 drain_len: usize,
114 ) -> Self {
115 let orig_len = mem::replace(&mut deque.len, drain_start);
116 let new_len = orig_len - drain_len;
117 Drain {
118 deque: NonNull::from(deque),
119 drain_len,
120 idx: drain_start,
121 new_len,
122 remaining: drain_len,
123 _marker: PhantomData,
124 }
125 }
126
127 // Only returns pointers to the slices, as that's all we need
128 // to drop them. May only be called if `self.remaining != 0`.
129 unsafe fn as_slices(&mut self) -> (*mut [T], *mut [T]) {
130 unsafe {
131 let deque = self.deque.as_mut();
132
133 // We know that `self.idx + self.remaining <= deque.len <= usize::MAX`, so this won't overflow.
134 let logical_remaining_range = self.idx..self.idx + self.remaining;
135
136 // SAFETY: `logical_remaining_range` represents the
137 // range into the logical buffer of elements that
138 // haven't been drained yet, so they're all initialized,
139 // and `slice::range(start..end, end) == start..end`,
140 // so the preconditions for `slice_ranges` are met.
141 let (a_range, b_range) =
142 deque.slice_ranges(logical_remaining_range.clone(), logical_remaining_range.end);
143 (
144 deque.buffer_range_mut(a_range),
145 deque.buffer_range_mut(b_range),
146 )
147 }
148 }
149}
150
151impl<T: fmt::Debug, N: ArrayLength> fmt::Debug for Drain<'_, T, N> {
152 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
153 f.debug_tuple("Drain")
154 .field(&self.drain_len)
155 .field(&self.idx)
156 .field(&self.new_len)
157 .field(&self.remaining)
158 .finish()
159 }
160}
161
162unsafe impl<T: Sync, N: ArrayLength + Sync> Sync for Drain<'_, T, N> {}
163unsafe impl<T: Send, N: ArrayLength + Send> Send for Drain<'_, T, N> {}
164
165impl<T, N: ArrayLength> Drop for Drain<'_, T, N> {
166 fn drop(&mut self) {
167 struct DropGuard<'r, 'a, T, N: ArrayLength>(&'r mut Drain<'a, T, N>);
168
169 let guard = DropGuard(self);
170
171 if mem::needs_drop::<T>() && guard.0.remaining != 0 {
172 unsafe {
173 // SAFETY: We just checked that `self.remaining != 0`.
174 let (front, back) = guard.0.as_slices();
175 // since idx is a logical index, we don't need to worry about wrapping.
176 guard.0.idx += PtrLen::len(front);
177 guard.0.remaining -= PtrLen::len(front);
178
179 ptr::drop_in_place(front);
180 guard.0.remaining = 0;
181 ptr::drop_in_place(back);
182 }
183 }
184
185 // Dropping `guard` handles moving the remaining elements into place.
186 impl<T, N: ArrayLength> Drop for DropGuard<'_, '_, T, N> {
187 #[inline]
188 fn drop(&mut self) {
189 if mem::needs_drop::<T>() && self.0.remaining != 0 {
190 unsafe {
191 // SAFETY: We just checked that `self.remaining != 0`.
192 let (front, back) = self.0.as_slices();
193 ptr::drop_in_place(front);
194 ptr::drop_in_place(back);
195 }
196 }
197
198 let source_deque = unsafe { self.0.deque.as_mut() };
199
200 let drain_len = self.0.drain_len;
201 let new_len = self.0.new_len;
202
203 if size_of::<T>() == 0 {
204 // no need to copy around any memory if T is a ZST
205 source_deque.len = new_len;
206 return;
207 }
208
209 let head_len = source_deque.len; // #elements in front of the drain
210 let tail_len = new_len - head_len; // #elements behind the drain
211
212 // Next, we will fill the hole left by the drain with as few writes as possible.
213 // The code below handles the following control flow and reduces the amount of
214 // branches under the assumption that `head_len == 0 || tail_len == 0`, i.e.
215 // draining at the front or at the back of the dequeue is especially common.
216 //
217 // H = "head index" = `deque.head`
218 // h = elements in front of the drain
219 // d = elements in the drain
220 // t = elements behind the drain
221 //
222 // Note that the buffer may wrap at any point and the wrapping is handled by
223 // `wrap_copy` and `to_physical_idx`.
224 //
225 // Case 1: if `head_len == 0 && tail_len == 0`
226 // Everything was drained, reset the head index back to 0.
227 // H
228 // [ . . . . . d d d d . . . . . ]
229 // H
230 // [ . . . . . . . . . . . . . . ]
231 //
232 // Case 2: else if `tail_len == 0`
233 // Don't move data or the head index.
234 // H
235 // [ . . . h h h h d d d d . . . ]
236 // H
237 // [ . . . h h h h . . . . . . . ]
238 //
239 // Case 3: else if `head_len == 0`
240 // Don't move data, but move the head index.
241 // H
242 // [ . . . d d d d t t t t . . . ]
243 // H
244 // [ . . . . . . . t t t t . . . ]
245 //
246 // Case 4: else if `tail_len <= head_len`
247 // Move data, but not the head index.
248 // H
249 // [ . . h h h h d d d d t t . . ]
250 // H
251 // [ . . h h h h t t . . . . . . ]
252 //
253 // Case 5: else
254 // Move data and the head index.
255 // H
256 // [ . . h h d d d d t t t t . . ]
257 // H
258 // [ . . . . . . h h t t t t . . ]
259
260 // When draining at the front (`.drain(..n)`) or at the back (`.drain(n..)`),
261 // we don't need to copy any data. The number of elements copied would be 0.
262 if head_len != 0 && tail_len != 0 {
263 join_head_and_tail_wrapping(source_deque, drain_len, head_len, tail_len);
264 // Marking this function as cold helps LLVM to eliminate it entirely if
265 // this branch is never taken.
266 // We use `#[cold]` instead of `#[inline(never)]`, because inlining this
267 // function into the general case (`.drain(n..m)`) is fine.
268 // See `tests/codegen-llvm/vecdeque-drain.rs` for a test.
269 #[cold]
270 fn join_head_and_tail_wrapping<T, N: ArrayLength>(
271 source_deque: &mut GenericArrayDeque<T, N>,
272 drain_len: usize,
273 head_len: usize,
274 tail_len: usize,
275 ) {
276 // Pick whether to move the head or the tail here.
277 let (src, dst, len);
278 if head_len < tail_len {
279 src = source_deque.head;
280 dst = source_deque.to_physical_idx(drain_len);
281 len = head_len;
282 } else {
283 src = source_deque.to_physical_idx(head_len + drain_len);
284 dst = source_deque.to_physical_idx(head_len);
285 len = tail_len;
286 };
287
288 unsafe {
289 source_deque.wrap_copy(src, dst, len);
290 }
291 }
292 }
293
294 if new_len == 0 {
295 // Special case: If the entire dequeue was drained, reset the head back to 0,
296 // like `.clear()` does.
297 source_deque.head = 0;
298 } else if head_len < tail_len {
299 // If we moved the head above, then we need to adjust the head index here.
300 source_deque.head = source_deque.to_physical_idx(drain_len);
301 }
302 source_deque.len = new_len;
303 }
304 }
305 }
306}
307
308impl<T, N: ArrayLength> Iterator for Drain<'_, T, N> {
309 type Item = T;
310
311 #[inline]
312 fn next(&mut self) -> Option<T> {
313 if self.remaining == 0 {
314 return None;
315 }
316 let wrapped_idx = unsafe { self.deque.as_ref().to_physical_idx(self.idx) };
317 self.idx += 1;
318 self.remaining -= 1;
319 Some(unsafe { self.deque.as_mut().buffer_read(wrapped_idx) })
320 }
321
322 #[inline]
323 fn size_hint(&self) -> (usize, Option<usize>) {
324 let len = self.remaining;
325 (len, Some(len))
326 }
327}
328
329impl<T, N: ArrayLength> DoubleEndedIterator for Drain<'_, T, N> {
330 #[inline]
331 fn next_back(&mut self) -> Option<T> {
332 if self.remaining == 0 {
333 return None;
334 }
335 self.remaining -= 1;
336 let wrapped_idx = unsafe {
337 self
338 .deque
339 .as_ref()
340 .to_physical_idx(self.idx + self.remaining)
341 };
342 Some(unsafe { self.deque.as_mut().buffer_read(wrapped_idx) })
343 }
344}
345
346impl<T, N: ArrayLength> ExactSizeIterator for Drain<'_, T, N> {}
347
348impl<T, N: ArrayLength> FusedIterator for Drain<'_, T, N> {}
349
350trait PtrLen {
351 #[allow(unstable_name_collisions)]
352 /// # Safety
353 /// - The pointer must be initialized and valid for reads.
354 unsafe fn len(self) -> usize;
355}
356
357#[rustversion::since(1.79)]
358impl<T> PtrLen for *mut [T] {
359 #[allow(unstable_name_collisions)]
360 #[cfg_attr(not(tarpaulin), inline(always))]
361 unsafe fn len(self) -> usize {
362 <*mut [T]>::len(self)
363 }
364}
365
366#[rustversion::before(1.79)]
367impl<T> PtrLen for *mut [T] {
368 #[allow(unstable_name_collisions)]
369 #[cfg_attr(not(tarpaulin), inline(always))]
370 unsafe fn len(self) -> usize {
371 (&*self).len()
372 }
373}
374
375#[cfg(test)]
376mod tests {
377 use crate::{typenum::U8, GenericArrayDeque};
378 use core::sync::atomic::{AtomicUsize, Ordering};
379
380 static DROP_COUNTER: AtomicUsize = AtomicUsize::new(0);
381
382 #[derive(Debug)]
383 struct DropSpy;
384
385 impl Drop for DropSpy {
386 fn drop(&mut self) {
387 DROP_COUNTER.fetch_add(1, Ordering::SeqCst);
388 }
389 }
390
391 #[test]
392 fn drain_removes_requested_range() {
393 let mut deque = GenericArrayDeque::<_, U8>::new();
394 for value in 0..6 {
395 assert!(deque.push_back(value).is_none());
396 }
397
398 let mut drained = [0; 2];
399 let mut idx = 0;
400 for value in deque.drain(2..4) {
401 drained[idx] = value;
402 idx += 1;
403 }
404 assert_eq!(&drained[..idx], &[2, 3]);
405 assert_eq!(deque.len(), 4);
406 assert_eq!(deque[0], 0);
407 assert_eq!(deque[1], 1);
408 assert_eq!(deque[2], 4);
409 assert_eq!(deque[3], 5);
410 }
411
412 #[test]
413 fn drain_iterator_supports_double_ended_iteration() {
414 let mut deque = GenericArrayDeque::<_, U8>::new();
415 for value in 0..5 {
416 assert!(deque.push_back(value).is_none());
417 }
418
419 let mut drain = deque.drain(1..4);
420 assert_eq!(drain.next_back(), Some(3));
421 assert_eq!(drain.next(), Some(1));
422 assert_eq!(drain.next(), Some(2));
423 assert_eq!(drain.next(), None);
424 drop(drain);
425 assert_eq!(deque.len(), 2);
426 assert_eq!(deque[0], 0);
427 assert_eq!(deque[1], 4);
428 }
429
430 #[test]
431 fn dropping_drain_drops_remaining_elements() {
432 DROP_COUNTER.store(0, Ordering::SeqCst);
433 {
434 let mut deque = GenericArrayDeque::<_, U8>::new();
435 for _ in 0..4 {
436 assert!(deque.push_back(DropSpy).is_none());
437 }
438 let _ = deque.drain(1..3);
439 assert_eq!(deque.len(), 2);
440 }
441 assert_eq!(DROP_COUNTER.load(Ordering::SeqCst), 4);
442 }
443}