griddle/raw/
mod.rs

1#[cfg(any(test, miri))]
2pub(crate) const R: usize = 4;
3#[cfg(not(any(test, miri)))]
4const R: usize = 8;
5
6use core::iter::FusedIterator;
7use core::mem;
8use hashbrown::{raw, TryReserveError};
9
10/// A reference to a hash table bucket containing a `T`.
11///
12/// This is usually just a pointer to the element itself. However if the element
13/// is a ZST, then we instead track the index of the element in the table so
14/// that `erase` works properly.
15pub(crate) struct Bucket<T> {
16    pub(crate) bucket: raw::Bucket<T>,
17    pub(crate) in_main: bool,
18}
19
20impl<T> Clone for Bucket<T> {
21    #[cfg_attr(feature = "inline-more", inline)]
22    fn clone(&self) -> Self {
23        Bucket {
24            bucket: self.bucket.clone(),
25            in_main: self.in_main,
26        }
27    }
28}
29
30impl<T> Bucket<T> {
31    /// Returns true if this bucket is in the "old" table and will be moved.
32    pub(crate) fn will_move(&self) -> bool {
33        !self.in_main
34    }
35}
36
37impl<T> core::ops::Deref for Bucket<T> {
38    type Target = raw::Bucket<T>;
39    fn deref(&self) -> &Self::Target {
40        &self.bucket
41    }
42}
43
44/// A raw hash table with an unsafe API.
45///
46/// This is a wrapper around [`hashbrown::raw::RawTable`] that also implements incremental
47/// resizing. When you interact with this API, keep in mind that there may be two backing tables,
48/// and a lookup may return a reference to _either_. Eventually, entries in the old table will be
49/// reclaimed, which invalidates any references to them.
50pub(crate) struct RawTable<T> {
51    table: raw::RawTable<T>,
52    leftovers: Option<OldTable<T>>,
53}
54
55impl<T> RawTable<T> {
56    /// Creates a new empty hash table without allocating any memory.
57    ///
58    /// In effect this returns a table with exactly 1 bucket. However we can
59    /// leave the data pointer dangling since that bucket is never written to
60    /// due to our load factor forcing us to always have at least 1 free bucket.
61    #[cfg_attr(feature = "inline-more", inline)]
62    pub(crate) const fn new() -> Self {
63        Self {
64            table: raw::RawTable::new(),
65            leftovers: None,
66        }
67    }
68
69    /// Allocates a new hash table with at least enough capacity for inserting
70    /// the given number of elements without reallocating.
71    pub(crate) fn with_capacity(capacity: usize) -> Self {
72        Self {
73            table: raw::RawTable::with_capacity(capacity),
74            leftovers: None,
75        }
76    }
77
78    /// Erases an element from the table, dropping it in place.
79    #[cfg_attr(feature = "inline-more", inline)]
80    pub(crate) unsafe fn erase(&mut self, item: Bucket<T>) {
81        if item.in_main {
82            self.table.erase(item.bucket);
83        } else if let Some(ref mut lo) = self.leftovers {
84            lo.items.reflect_remove(&item.bucket);
85            lo.table.erase(item.bucket);
86        } else {
87            unreachable!("invalid bucket state");
88        }
89    }
90
91    /// Removes an element from the table, returning it.
92    #[cfg_attr(feature = "inline-more", inline)]
93    pub(crate) unsafe fn remove(&mut self, item: Bucket<T>) -> T {
94        if item.in_main {
95            self.table.remove(item.bucket).0
96        } else if let Some(ref mut lo) = self.leftovers {
97            lo.items.reflect_remove(&item.bucket);
98            let (v, _) = lo.table.remove(item.bucket);
99
100            if lo.table.len() == 0 {
101                let _ = self.leftovers.take();
102            }
103
104            v
105        } else {
106            unreachable!("invalid bucket state");
107        }
108    }
109
110    /// Finds and removes an element from the table, returning it.
111    #[cfg_attr(feature = "inline-more", inline)]
112    pub(crate) fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<T> {
113        // Avoid `Option::map` because it bloats LLVM IR.
114        match self.find(hash, eq) {
115            Some(bucket) => Some(unsafe { self.remove(bucket) }),
116            None => None,
117        }
118    }
119
120    /// Removes all elements from the table without freeing the backing memory.
121    #[cfg_attr(feature = "inline-more", inline)]
122    pub(crate) fn clear(&mut self) {
123        let _ = self.leftovers.take();
124        self.table.clear();
125    }
126
127    /// Shrinks the table so that it fits as close to `min_size` elements as possible.
128    ///
129    /// In reality, the table may end up larger than `min_size`, as must be able to hold all the
130    /// current elements, as well as some additional elements due to incremental resizing.
131    #[cfg_attr(feature = "inline-more", inline)]
132    pub(crate) fn shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64) {
133        // Calculate the minimal number of elements that we need to reserve
134        // space for.
135        let mut need = self.table.len();
136        // We need to make sure that we never have to resize while there
137        // are still leftovers.
138        if let Some(ref lo) = self.leftovers {
139            // We need to move another lo.table.len() items.
140            need += lo.table.len();
141            // We move R items on each insert.
142            // That means we need to accomodate another
143            // lo.table.len() / R (rounded up) inserts to move them all.
144            need += (lo.table.len() + R - 1) / R;
145        }
146        let min_size = usize::max(need, min_size);
147        self.table.shrink_to(min_size, hasher);
148    }
149
150    /// Ensures that at least `additional` items can be inserted into the table
151    /// without reallocation.
152    ///
153    /// While we try to make this incremental where possible, it may require all-at-once resizing.
154    #[cfg_attr(feature = "inline-more", inline)]
155    pub(crate) fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) {
156        let need = self.leftovers.as_ref().map_or(0, |t| t.table.len()) + additional;
157        if self.table.capacity() - self.table.len() > need {
158            // We can accommodate the additional items without resizing, so all is well.
159            if cfg!(debug_assertions) {
160                let buckets = self.table.buckets();
161                self.table.reserve(need, |_| unreachable!());
162                assert_eq!(
163                    buckets,
164                    self.table.buckets(),
165                    "resize despite sufficient capacity"
166                );
167            } else {
168                self.table.reserve(need, |_| unreachable!());
169            }
170        } else if self.leftovers.is_some() {
171            // We probably have to resize, but we already have leftovers!
172            //
173            // Here, we're sort of stuck — we can't do this fully incrementally, because we'd need
174            // to keep _three_ tables: the current leftovers, the current table (which would become
175            // the new leftovers), _and_ the new, resized table.
176            //
177            // We do the best we can, which is to carry over all the current leftovers, and _then_
178            // do an incremental resize. This at least moves only the current leftovers, rather
179            // than the current full set of elements.
180            self.carry_all(hasher);
181            self.grow(additional);
182        } else {
183            // We probably have to resize, but since we don't have any leftovers, we can do it
184            // incrementally.
185            self.grow(additional);
186        }
187    }
188
189    /// Tries to ensure that at least `additional` items can be inserted into
190    /// the table without reallocation.
191    ///
192    /// While we try to make this incremental where possible, it may require all-at-once resizing.
193    #[cfg_attr(feature = "inline-more", inline)]
194    pub(crate) fn try_reserve(
195        &mut self,
196        additional: usize,
197        hasher: impl Fn(&T) -> u64,
198    ) -> Result<(), TryReserveError> {
199        let need = self.leftovers.as_ref().map_or(0, |t| t.table.len()) + additional;
200        if self.table.capacity() - self.table.len() > need {
201            // we can accommodate the additional items without resizing, so all good
202            if cfg!(debug_assertions) {
203                let buckets = self.table.buckets();
204                self.table
205                    .try_reserve(need, |_| unreachable!())
206                    .expect("resize despite sufficient capacity");
207                assert_eq!(
208                    buckets,
209                    self.table.buckets(),
210                    "resize despite sufficient capacity"
211                );
212            } else {
213                self.table
214                    .try_reserve(need, |_| unreachable!())
215                    .expect("resize despite sufficient capacity");
216            }
217            Ok(())
218        } else if self.leftovers.is_some() {
219            self.carry_all(hasher);
220            self.try_grow(additional, true)
221        } else {
222            self.try_grow(additional, true)
223        }
224    }
225
226    /// Inserts a new element into the table, and returns its raw bucket.
227    ///
228    /// This does not check if the given element already exists in the table.
229    #[cfg_attr(feature = "inline-more", inline)]
230    pub(crate) fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket<T> {
231        if self.table.capacity() == self.table.len() {
232            assert!(self.leftovers.is_none());
233            // Even though this _may_ succeed without growing due to tombstones, handling
234            // that case is convoluted, so we just assume this would grow the map.
235            self.grow(1);
236            return self.insert(hash, value, hasher);
237        }
238
239        // SAFETY: unknown requirements, but _was_ safe
240        unsafe { self.insert_no_grow(hash, value, hasher) }
241    }
242
243    /// Inserts a new element into the table, and returns a mutable reference to it.
244    ///
245    /// This does not check if the given element already exists in the table.
246    #[cfg_attr(feature = "inline-more", inline)]
247    pub(crate) fn insert_entry(
248        &mut self,
249        hash: u64,
250        value: T,
251        hasher: impl Fn(&T) -> u64,
252    ) -> &mut T {
253        unsafe { self.insert(hash, value, hasher).as_mut() }
254    }
255
256    /// Inserts a new element into the table, without growing the table.
257    ///
258    /// There must be enough space in the table to insert the new element.
259    ///
260    /// This does not check if the given element already exists in the table.
261    ///
262    /// Note that unlike `hashbrown::RawTable::insert_no_grow`, this _does_ take a `hasher`.
263    /// This is because while the insert won't grow the table, it may need to carry over some
264    /// elements from the pre-resize table to the current table, which requires re-hashing.
265    #[cfg_attr(feature = "inline-more", inline)]
266    pub(crate) unsafe fn insert_no_grow(
267        &mut self,
268        hash: u64,
269        value: T,
270        hasher: impl Fn(&T) -> u64,
271    ) -> Bucket<T> {
272        // SAFETY: unknown requirements, but mirrored to caller
273        let bucket = unsafe { self.table.insert_no_grow(hash, value) };
274
275        if self.leftovers.is_some() {
276            // Also carry some items over.
277            self.carry(hasher);
278        }
279
280        Bucket {
281            bucket,
282            in_main: true,
283        }
284    }
285
286    /// Temporary removes a bucket, applying the given function to the removed
287    /// element and optionally put back the returned value in the same bucket.
288    ///
289    /// Returns `true` if the bucket still contains an element
290    ///
291    /// This does not check if the given bucket is actually occupied.
292    #[cfg_attr(feature = "inline-more", inline)]
293    pub(crate) unsafe fn replace_bucket_with<F>(&mut self, bucket: Bucket<T>, f: F) -> bool
294    where
295        F: FnOnce(T) -> Option<T>,
296    {
297        if bucket.in_main {
298            self.table.replace_bucket_with(bucket.bucket, f)
299        } else if let Some(ref mut lo) = self.leftovers {
300            let items = &mut lo.items;
301            let b = bucket.bucket.clone();
302            lo.table.replace_bucket_with(b, move |t| {
303                let v = f(t);
304                if v.is_none() {
305                    items.reflect_remove(&bucket.bucket);
306                }
307                v
308            })
309        } else {
310            unreachable!("invalid bucket state");
311        }
312    }
313
314    /// Searches for an element in the table.
315    #[inline]
316    pub(crate) fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
317        let e = self.table.find(hash, &mut eq);
318        if let Some(bucket) = e {
319            return Some(Bucket {
320                bucket,
321                in_main: true,
322            });
323        }
324
325        if let Some(OldTable { ref table, .. }) = self.leftovers {
326            table.find(hash, eq).map(|bucket| Bucket {
327                bucket,
328                in_main: false,
329            })
330        } else {
331            None
332        }
333    }
334
335    /// Gets a reference to an element in the table.
336    #[inline]
337    pub(crate) fn get(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> {
338        // Avoid `Option::map` because it bloats LLVM IR.
339        match self.find(hash, eq) {
340            Some(bucket) => Some(unsafe { bucket.as_ref() }),
341            None => None,
342        }
343    }
344
345    /// Gets a mutable reference to an element in the table.
346    #[inline]
347    pub(crate) fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> {
348        // Avoid `Option::map` because it bloats LLVM IR.
349        match self.find(hash, eq) {
350            Some(bucket) => Some(unsafe { bucket.as_mut() }),
351            None => None,
352        }
353    }
354
355    /// Returns the number of elements the map can hold without reallocating.
356    ///
357    /// This number is a lower bound; the table might be able to hold
358    /// more, but is guaranteed to be able to hold at least this many.
359    #[cfg_attr(feature = "inline-more", inline)]
360    pub(crate) fn capacity(&self) -> usize {
361        self.table.capacity()
362    }
363
364    /// Returns the number of elements in the table.
365    #[cfg_attr(feature = "inline-more", inline)]
366    pub(crate) fn len(&self) -> usize {
367        self.table.len() + self.leftovers.as_ref().map_or(0, |t| t.table.len())
368    }
369
370    /// Returns the number of buckets in the table.
371    #[cfg(test)]
372    pub(crate) fn buckets(&self) -> usize {
373        self.table.buckets()
374    }
375
376    /// Returns an iterator over every element in the table. It is up to
377    /// the caller to ensure that the `RawTable` outlives the `RawIter`.
378    /// Because we cannot make the `next` method unsafe on the `RawIter`
379    /// struct, we have to make the `iter` method unsafe.
380    #[cfg_attr(feature = "inline-more", inline)]
381    pub(crate) unsafe fn iter(&self) -> RawIter<T> {
382        RawIter {
383            table: self.table.iter(),
384            leftovers: self.leftovers.as_ref().map(|lo| lo.items.clone()),
385        }
386    }
387
388    /// Returns an iterator which removes all elements from the table without
389    /// freeing the memory.
390    #[cfg_attr(feature = "inline-more", inline)]
391    pub(crate) fn drain(&mut self) -> RawDrain<'_, T> {
392        RawDrain {
393            table: self.table.drain(),
394            leftovers: self
395                .leftovers
396                .take()
397                .map(|lo| unsafe { lo.table.into_iter_from(lo.items) }),
398        }
399    }
400
401    /// Returns an iterator which consumes all elements from the table.
402    ///
403    /// Iteration starts at the provided iterator's current location.
404    ///
405    /// It is up to the caller to ensure that the iterator is valid for this
406    /// `RawTable` and covers all items that remain in the table.
407    pub(crate) unsafe fn into_iter_from(self, iter: RawIter<T>) -> RawIntoIter<T> {
408        RawIntoIter {
409            table: self.table.into_iter_from(iter.table),
410            leftovers: self.leftovers.map(|lo| lo.table.into_iter_from(lo.items)),
411        }
412    }
413}
414
415fn and_carry_with_hasher<T: Clone>(
416    table: &mut raw::RawTable<T>,
417    leftovers: &Option<OldTable<T>>,
418    hasher: impl Fn(&T) -> u64,
419) {
420    if let Some(lo) = leftovers {
421        for e in lo.items.clone() {
422            let v = unsafe { e.as_ref() };
423            let hash = hasher(v);
424            table.insert(hash, v.clone(), &hasher);
425        }
426    }
427}
428
429impl<T: Clone> RawTable<T> {
430    /// Variant of `clone_from` to use when a hasher is available.
431    pub(crate) fn clone_from_with_hasher(&mut self, source: &Self, hasher: impl Fn(&T) -> u64) {
432        let _ = self.leftovers.take();
433        self.table.clone_from_with_hasher(&source.table, &hasher);
434        // Since we're doing the work of cloning anyway, we might as well carry the leftovers.
435        and_carry_with_hasher(&mut self.table, &source.leftovers, hasher);
436    }
437
438    /// Variant of `clone` to use when a hasher is available.
439    pub(crate) fn clone_with_hasher(&self, hasher: impl Fn(&T) -> u64) -> Self {
440        let mut table = self.table.clone();
441        // Since we're doing the work of cloning anyway, we might as well carry the leftovers.
442        and_carry_with_hasher(&mut table, &self.leftovers, hasher);
443        RawTable {
444            table,
445            leftovers: None,
446        }
447    }
448}
449
450impl<T> RawTable<T> {
451    #[cold]
452    #[inline(never)]
453    fn grow(&mut self, extra: usize) {
454        if self.try_grow(extra, false).is_err() {
455            unsafe { core::hint::unreachable_unchecked() };
456        }
457    }
458
459    #[cold]
460    fn try_grow(&mut self, extra: usize, fallible: bool) -> Result<(), TryReserveError> {
461        debug_assert!(self.leftovers.is_none());
462
463        // We need to grow the table by at least a factor of (R + 1)/R to ensure that
464        // the new table won't _also_ grow while we're still moving items from the old
465        // one.
466        //
467        // Here's how we get to len * (R + 1)/R:
468        //  - We need to move another len items
469        let need = self.table.len();
470        //  - We move R items on each insert, so to move len items takes
471        //    len / R inserts (rounded up!)
472        //  - Since we want to round up, we pull the old +R-1 trick
473        let inserts = (self.table.len() + R - 1) / R;
474        //  - That's len + len/R
475        //    Which is == R*len/R + len/R
476        //    Which is == ((R+1)*len)/R
477        //    Which is == len * (R+1)/R
478        //  - We don't actually use that formula because of integer division.
479        //
480        // We also need to make sure we can fit the additional capacity required for `extra`.
481        // Normally, that'll be handled by `inserts`, but not always!
482        let add = usize::max(extra, inserts);
483        let new_table = if fallible {
484            raw::RawTable::try_with_capacity(need + inserts + add)?
485        } else {
486            raw::RawTable::with_capacity(need + inserts + add)
487        };
488        let old_table = mem::replace(&mut self.table, new_table);
489        if old_table.len() != 0 {
490            let old_table_items = unsafe { old_table.iter() };
491            self.leftovers = Some(OldTable {
492                table: old_table,
493                items: old_table_items,
494            });
495        }
496        Ok(())
497    }
498
499    #[cold]
500    #[inline(never)]
501    pub(crate) fn carry_all(&mut self, hasher: impl Fn(&T) -> u64) {
502        if let Some(ref mut lo) = self.leftovers {
503            // It is safe to continue to access this iterator because:
504            //  - we have not de-allocated the table it points into
505            //  - we have not grown or shrunk the table it points into
506            //
507            // NOTE: Calling next here could be expensive, as the iter needs to search for the
508            // next non-empty bucket. as the map grows in size, that search time will increase
509            // linearly.
510            while let Some(e) = lo.items.next() {
511                // We need to remove the item in this bucket from the old map
512                // to the resized map, without shrinking the old map.
513                let (value, _) = unsafe { lo.table.remove(e) };
514                let hash = hasher(&value);
515                self.table.insert(hash, value, &hasher);
516            }
517            // The resize is finally fully complete.
518            let _ = self.leftovers.take();
519        }
520    }
521
522    #[cold]
523    #[inline(never)]
524    pub(crate) fn carry(&mut self, hasher: impl Fn(&T) -> u64) {
525        if let Some(ref mut lo) = self.leftovers {
526            for _ in 0..R {
527                // It is safe to continue to access this iterator because:
528                //  - we have not de-allocated the table it points into
529                //  - we have not grown or shrunk the table it points into
530                //
531                // NOTE: Calling next here could be expensive, as the iter needs to search for the
532                // next non-empty bucket. as the map grows in size, that search time will increase
533                // linearly.
534                if let Some(e) = lo.items.next() {
535                    // We need to remove the item in this bucket from the old map
536                    // to the resized map, without shrinking the old map.
537                    let (value, _) = unsafe { lo.table.remove(e) };
538                    let hash = hasher(&value);
539                    // SAFETY: unknown requirements, but _was_ safe
540                    unsafe { self.table.insert_no_grow(hash, value) };
541                } else {
542                    // The resize is finally fully complete.
543                    let _ = self.leftovers.take();
544                    return;
545                }
546            }
547
548            if lo.table.len() == 0 {
549                // The resize is finally fully complete.
550                let _ = self.leftovers.take();
551            }
552        }
553    }
554
555    pub(crate) fn is_split(&self) -> bool {
556        self.leftovers.is_some()
557    }
558
559    #[cfg(any(test, feature = "rayon"))]
560    pub(crate) fn main(&self) -> &raw::RawTable<T> {
561        &self.table
562    }
563
564    #[cfg(any(test, feature = "rayon"))]
565    pub(crate) fn leftovers(&self) -> Option<&raw::RawTable<T>> {
566        self.leftovers.as_ref().map(|lo| &lo.table)
567    }
568}
569
570impl<T> IntoIterator for RawTable<T> {
571    type Item = T;
572    type IntoIter = RawIntoIter<T>;
573
574    #[cfg_attr(feature = "inline-more", inline)]
575    fn into_iter(self) -> RawIntoIter<T> {
576        unsafe {
577            let iter = self.iter();
578            self.into_iter_from(iter)
579        }
580    }
581}
582
583struct OldTable<T> {
584    table: raw::RawTable<T>,
585
586    // We cache an iterator over the old table's buckets so we don't need to do a linear search
587    // across buckets we know are empty each time we want to move more items.
588    items: raw::RawIter<T>,
589}
590
591/// Iterator which returns a raw pointer to every full bucket in the table.
592///
593/// For maximum flexibility this iterator is not bound by a lifetime, but you
594/// must observe several rules when using it:
595/// - You must not free the hash table while iterating (including via growing/shrinking).
596/// - It is fine to erase a bucket that has been yielded by the iterator.
597/// - Erasing a bucket that has not yet been yielded by the iterator may still
598///   result in the iterator yielding that bucket (unless `reflect_remove` is called).
599/// - It is unspecified whether an element inserted after the iterator was
600///   created will be yielded by that iterator (unless `reflect_insert` is called).
601/// - The order in which the iterator yields bucket is unspecified and may
602///   change in the future.
603pub(crate) struct RawIter<T> {
604    table: raw::RawIter<T>,
605    leftovers: Option<raw::RawIter<T>>,
606}
607
608impl<T> Clone for RawIter<T> {
609    #[cfg_attr(feature = "inline-more", inline)]
610    fn clone(&self) -> Self {
611        Self {
612            table: self.table.clone(),
613            leftovers: self.leftovers.clone(),
614        }
615    }
616}
617
618impl<T> Iterator for RawIter<T> {
619    type Item = Bucket<T>;
620
621    #[cfg_attr(feature = "inline-more", inline)]
622    fn next(&mut self) -> Option<Self::Item> {
623        let leftovers = &mut self.leftovers;
624        self.table
625            .next()
626            .map(|bucket| Bucket {
627                bucket,
628                in_main: true,
629            })
630            .or_else(|| {
631                leftovers.as_mut()?.next().map(|bucket| Bucket {
632                    bucket,
633                    in_main: false,
634                })
635            })
636    }
637
638    #[cfg_attr(feature = "inline-more", inline)]
639    fn size_hint(&self) -> (usize, Option<usize>) {
640        let (mut lo, mut hi) = self.table.size_hint();
641        if let Some(ref left) = self.leftovers {
642            let (lo2, hi2) = left.size_hint();
643            lo += lo2;
644            if let (Some(ref mut hi), Some(hi2)) = (&mut hi, hi2) {
645                *hi += hi2;
646            }
647        }
648        (lo, hi)
649    }
650}
651
652impl<T> ExactSizeIterator for RawIter<T> {}
653impl<T> FusedIterator for RawIter<T> {}
654
655/// Iterator which consumes a table and returns elements.
656pub(crate) struct RawIntoIter<T> {
657    table: raw::RawIntoIter<T>,
658    leftovers: Option<raw::RawIntoIter<T>>,
659}
660
661impl<T> RawIntoIter<T> {
662    /// Returns a by-reference iterator over the remaining items of this iterator.
663    #[cfg_attr(feature = "inline-more", inline)]
664    pub(crate) fn iter(&self) -> RawIter<T> {
665        RawIter {
666            table: self.table.iter(),
667            leftovers: self.leftovers.as_ref().map(|lo| lo.iter()),
668        }
669    }
670}
671
672impl<T> Iterator for RawIntoIter<T> {
673    type Item = T;
674
675    #[cfg_attr(feature = "inline-more", inline)]
676    fn next(&mut self) -> Option<T> {
677        if let Some(ref mut lo) = self.leftovers {
678            if let Some(e) = lo.next() {
679                return Some(e);
680            }
681            // Done with leftovers.
682            let _ = self.leftovers.take();
683        }
684        self.table.next()
685    }
686
687    #[cfg_attr(feature = "inline-more", inline)]
688    fn size_hint(&self) -> (usize, Option<usize>) {
689        self.iter().size_hint()
690    }
691}
692
693impl<T> ExactSizeIterator for RawIntoIter<T> {}
694impl<T> FusedIterator for RawIntoIter<T> {}
695
696/// Iterator which consumes elements without freeing the table storage.
697pub(crate) struct RawDrain<'a, T> {
698    table: raw::RawDrain<'a, T>,
699    leftovers: Option<raw::RawIntoIter<T>>,
700}
701
702impl<T> RawDrain<'_, T> {
703    /// Returns a by-reference iterator over the remaining items of this iterator.
704    #[cfg_attr(feature = "inline-more", inline)]
705    pub(crate) fn iter(&self) -> RawIter<T> {
706        RawIter {
707            table: self.table.iter(),
708            leftovers: self.leftovers.as_ref().map(|lo| lo.iter()),
709        }
710    }
711}
712
713impl<T> Drop for RawDrain<'_, T> {
714    #[cfg_attr(feature = "inline-more", inline)]
715    fn drop(&mut self) {}
716}
717
718impl<T> Iterator for RawDrain<'_, T> {
719    type Item = T;
720
721    #[cfg_attr(feature = "inline-more", inline)]
722    fn next(&mut self) -> Option<T> {
723        if let Some(ref mut lo) = self.leftovers {
724            if let Some(e) = lo.next() {
725                return Some(e);
726            }
727            // Done with leftovers.
728            let _ = self.leftovers.take();
729        }
730        self.table.next()
731    }
732
733    #[cfg_attr(feature = "inline-more", inline)]
734    fn size_hint(&self) -> (usize, Option<usize>) {
735        let (mut lo, mut hi) = self.table.size_hint();
736        if let Some(ref left) = self.leftovers {
737            let (lo2, hi2) = left.size_hint();
738            lo += lo2;
739            if let (Some(ref mut hi), Some(hi2)) = (&mut hi, hi2) {
740                *hi += hi2;
741            }
742        }
743        (lo, hi)
744    }
745}
746
747impl<T> ExactSizeIterator for RawDrain<'_, T> {}
748impl<T> FusedIterator for RawDrain<'_, T> {}