Skip to main content

hashbrown_tstd/raw/
mod.rs

1use crate::alloc::alloc::{alloc, dealloc, handle_alloc_error};
2use crate::scopeguard::guard;
3use crate::CollectionAllocErr;
4use core::alloc::Layout;
5use core::hint;
6use core::iter::FusedIterator;
7use core::marker::PhantomData;
8use core::mem;
9use core::mem::ManuallyDrop;
10use core::ptr::NonNull;
11
12cfg_if! {
13    // Use the SSE2 implementation if possible: it allows us to scan 16 buckets
14    // at once instead of 8. We don't bother with AVX since it would require
15    // runtime dispatch and wouldn't gain us much anyways: the probability of
16    // finding a match drops off drastically after the first few buckets.
17    //
18    // I attempted an implementation on ARM using NEON instructions, but it
19    // turns out that most NEON instructions have multi-cycle latency, which in
20    // the end outweighs any gains over the generic implementation.
21    if #[cfg(all(
22        target_feature = "sse2",
23        any(target_arch = "x86", target_arch = "x86_64"),
24        not(miri)
25    ))] {
26        mod sse2;
27        use sse2 as imp;
28    } else {
29        #[path = "generic.rs"]
30        mod generic;
31        use generic as imp;
32    }
33}
34
35mod bitmask;
36
37use self::bitmask::BitMask;
38use self::imp::Group;
39
40// Branch prediction hint. This is currently only available on nightly but it
41// consistently improves performance by 10-15%.
42#[cfg(feature = "nightly")]
43use core::intrinsics::{likely, unlikely};
44#[cfg(not(feature = "nightly"))]
45#[inline]
46fn likely(b: bool) -> bool {
47    b
48}
49#[cfg(not(feature = "nightly"))]
50#[inline]
51fn unlikely(b: bool) -> bool {
52    b
53}
54
55#[cfg(feature = "nightly")]
56#[cfg_attr(feature = "inline-more", inline)]
57unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
58    to.offset_from(from) as usize
59}
60#[cfg(not(feature = "nightly"))]
61#[cfg_attr(feature = "inline-more", inline)]
62unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
63    (to as usize - from as usize) / mem::size_of::<T>()
64}
65
66/// Whether memory allocation errors should return an error or abort.
67#[derive(Copy, Clone)]
68enum Fallibility {
69    Fallible,
70    Infallible,
71}
72
73impl Fallibility {
74    /// Error to return on capacity overflow.
75    #[cfg_attr(feature = "inline-more", inline)]
76    fn capacity_overflow(self) -> CollectionAllocErr {
77        match self {
78            Fallibility::Fallible => CollectionAllocErr::CapacityOverflow,
79            Fallibility::Infallible => panic!("Hash table capacity overflow"),
80        }
81    }
82
83    /// Error to return on allocation error.
84    #[cfg_attr(feature = "inline-more", inline)]
85    fn alloc_err(self, layout: Layout) -> CollectionAllocErr {
86        match self {
87            Fallibility::Fallible => CollectionAllocErr::AllocErr { layout },
88            Fallibility::Infallible => handle_alloc_error(layout),
89        }
90    }
91}
92
93/// Control byte value for an empty bucket.
94const EMPTY: u8 = 0b1111_1111;
95
96/// Control byte value for a deleted bucket.
97const DELETED: u8 = 0b1000_0000;
98
99/// Checks whether a control byte represents a full bucket (top bit is clear).
100#[inline]
101fn is_full(ctrl: u8) -> bool {
102    ctrl & 0x80 == 0
103}
104
105/// Checks whether a control byte represents a special value (top bit is set).
106#[inline]
107fn is_special(ctrl: u8) -> bool {
108    ctrl & 0x80 != 0
109}
110
111/// Checks whether a special control value is EMPTY (just check 1 bit).
112#[inline]
113fn special_is_empty(ctrl: u8) -> bool {
114    debug_assert!(is_special(ctrl));
115    ctrl & 0x01 != 0
116}
117
118/// Primary hash function, used to select the initial bucket to probe from.
119#[inline]
120#[allow(clippy::cast_possible_truncation)]
121fn h1(hash: u64) -> usize {
122    // On 32-bit platforms we simply ignore the higher hash bits.
123    hash as usize
124}
125
126/// Secondary hash function, saved in the low 7 bits of the control byte.
127#[inline]
128#[allow(clippy::cast_possible_truncation)]
129fn h2(hash: u64) -> u8 {
130    // Grab the top 7 bits of the hash. While the hash is normally a full 64-bit
131    // value, some hash functions (such as FxHash) produce a usize result
132    // instead, which means that the top 32 bits are 0 on 32-bit platforms.
133    let hash_len = usize::min(mem::size_of::<usize>(), mem::size_of::<u64>());
134    let top7 = hash >> (hash_len * 8 - 7);
135    (top7 & 0x7f) as u8 // truncation
136}
137
138/// Probe sequence based on triangular numbers, which is guaranteed (since our
139/// table size is a power of two) to visit every group of elements exactly once.
140///
141/// A triangular probe has us jump by 1 more group every time. So first we
142/// jump by 1 group (meaning we just continue our linear scan), then 2 groups
143/// (skipping over 1 group), then 3 groups (skipping over 2 groups), and so on.
144///
145/// Proof that the probe will visit every group in the table:
146/// <https://fgiesen.wordpress.com/2015/02/22/triangular-numbers-mod-2n/>
147struct ProbeSeq {
148    bucket_mask: usize,
149    pos: usize,
150    stride: usize,
151}
152
153impl Iterator for ProbeSeq {
154    type Item = usize;
155
156    #[inline]
157    fn next(&mut self) -> Option<usize> {
158        // We should have found an empty bucket by now and ended the probe.
159        debug_assert!(
160            self.stride <= self.bucket_mask,
161            "Went past end of probe sequence"
162        );
163
164        let result = self.pos;
165        self.stride += Group::WIDTH;
166        self.pos += self.stride;
167        self.pos &= self.bucket_mask;
168        Some(result)
169    }
170}
171
172/// Returns the number of buckets needed to hold the given number of items,
173/// taking the maximum load factor into account.
174///
175/// Returns `None` if an overflow occurs.
176#[cfg_attr(feature = "inline-more", inline)]
177// Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258
178#[cfg_attr(target_os = "emscripten", inline(never))]
179fn capacity_to_buckets(cap: usize) -> Option<usize> {
180    let adjusted_cap = if cap < 8 {
181        // Need at least 1 free bucket on small tables
182        cap + 1
183    } else {
184        // Otherwise require 1/8 buckets to be empty (87.5% load)
185        //
186        // Be careful when modifying this, calculate_layout relies on the
187        // overflow check here.
188        cap.checked_mul(8)? / 7
189    };
190
191    // Any overflows will have been caught by the checked_mul. Also, any
192    // rounding errors from the division above will be cleaned up by
193    // next_power_of_two (which can't overflow because of the previous divison).
194    Some(adjusted_cap.next_power_of_two())
195}
196
197/// Returns the maximum effective capacity for the given bucket mask, taking
198/// the maximum load factor into account.
199#[cfg_attr(feature = "inline-more", inline)]
200fn bucket_mask_to_capacity(bucket_mask: usize) -> usize {
201    if bucket_mask < 8 {
202        // For tables with 1/2/4/8 buckets, we always reserve one empty slot.
203        // Keep in mind that the bucket mask is one less than the bucket count.
204        bucket_mask
205    } else {
206        // For larger tables we reserve 12.5% of the slots as empty.
207        ((bucket_mask + 1) / 8) * 7
208    }
209}
210
211// Returns a Layout which describes the allocation required for a hash table,
212// and the offset of the buckets in the allocation.
213///
214/// Returns `None` if an overflow occurs.
215#[cfg_attr(feature = "inline-more", inline)]
216#[cfg(feature = "nightly")]
217fn calculate_layout<T>(buckets: usize) -> Option<(Layout, usize)> {
218    debug_assert!(buckets.is_power_of_two());
219
220    // Array of buckets
221    let data = Layout::array::<T>(buckets).ok()?;
222
223    // Array of control bytes. This must be aligned to the group size.
224    //
225    // We add `Group::WIDTH` control bytes at the end of the array which
226    // replicate the bytes at the start of the array and thus avoids the need to
227    // perform bounds-checking while probing.
228    //
229    // There is no possible overflow here since buckets is a power of two and
230    // Group::WIDTH is a small number.
231    let ctrl = unsafe { Layout::from_size_align_unchecked(buckets + Group::WIDTH, Group::WIDTH) };
232
233    ctrl.extend(data).ok()
234}
235
236// Returns a Layout which describes the allocation required for a hash table,
237// and the offset of the buckets in the allocation.
238#[cfg_attr(feature = "inline-more", inline)]
239#[cfg(not(feature = "nightly"))]
240fn calculate_layout<T>(buckets: usize) -> Option<(Layout, usize)> {
241    debug_assert!(buckets.is_power_of_two());
242
243    // Manual layout calculation since Layout methods are not yet stable.
244    let data_align = usize::max(mem::align_of::<T>(), Group::WIDTH);
245    let data_offset = (buckets + Group::WIDTH).checked_add(data_align - 1)? & !(data_align - 1);
246    let len = data_offset.checked_add(mem::size_of::<T>().checked_mul(buckets)?)?;
247
248    Some((
249        unsafe { Layout::from_size_align_unchecked(len, data_align) },
250        data_offset,
251    ))
252}
253
254/// A reference to a hash table bucket containing a `T`.
255///
256/// This is usually just a pointer to the element itself. However if the element
257/// is a ZST, then we instead track the index of the element in the table so
258/// that `erase` works properly.
259pub struct Bucket<T> {
260    // Using *const for variance
261    ptr: *const T,
262}
263
264// This Send impl is needed for rayon support. This is safe since Bucket is
265// never exposed in a public API.
266unsafe impl<T> Send for Bucket<T> {}
267
268impl<T> Clone for Bucket<T> {
269    #[cfg_attr(feature = "inline-more", inline)]
270    fn clone(&self) -> Self {
271        Self { ptr: self.ptr }
272    }
273}
274
275impl<T> Bucket<T> {
276    #[cfg_attr(feature = "inline-more", inline)]
277    unsafe fn from_base_index(base: *const T, index: usize) -> Self {
278        let ptr = if mem::size_of::<T>() == 0 {
279            index as *const T
280        } else {
281            base.add(index)
282        };
283        Self { ptr }
284    }
285    #[cfg_attr(feature = "inline-more", inline)]
286    pub unsafe fn as_ptr(&self) -> *mut T {
287        if mem::size_of::<T>() == 0 {
288            // Just return an arbitrary ZST pointer which is properly aligned
289            mem::align_of::<T>() as *mut T
290        } else {
291            self.ptr as *mut T
292        }
293    }
294    #[cfg_attr(feature = "inline-more", inline)]
295    unsafe fn add(&self, offset: usize) -> Self {
296        let ptr = if mem::size_of::<T>() == 0 {
297            (self.ptr as usize + offset) as *const T
298        } else {
299            self.ptr.add(offset)
300        };
301        Self { ptr }
302    }
303    #[cfg_attr(feature = "inline-more", inline)]
304    pub unsafe fn drop(&self) {
305        self.as_ptr().drop_in_place();
306    }
307    #[cfg_attr(feature = "inline-more", inline)]
308    pub unsafe fn read(&self) -> T {
309        self.as_ptr().read()
310    }
311    #[cfg_attr(feature = "inline-more", inline)]
312    pub unsafe fn write(&self, val: T) {
313        self.as_ptr().write(val);
314    }
315    #[cfg_attr(feature = "inline-more", inline)]
316    pub unsafe fn as_ref<'a>(&self) -> &'a T {
317        &*self.as_ptr()
318    }
319    #[cfg_attr(feature = "inline-more", inline)]
320    pub unsafe fn as_mut<'a>(&self) -> &'a mut T {
321        &mut *self.as_ptr()
322    }
323    #[cfg_attr(feature = "inline-more", inline)]
324    pub unsafe fn copy_from_nonoverlapping(&self, other: &Self) {
325        self.as_ptr().copy_from_nonoverlapping(other.as_ptr(), 1);
326    }
327}
328
329/// A raw hash table with an unsafe API.
330pub struct RawTable<T> {
331    // Mask to get an index from a hash value. The value is one less than the
332    // number of buckets in the table.
333    bucket_mask: usize,
334
335    // Pointer to the array of control bytes
336    ctrl: NonNull<u8>,
337
338    // Pointer to the array of buckets
339    data: NonNull<T>,
340
341    // Number of elements that can be inserted before we need to grow the table
342    growth_left: usize,
343
344    // Number of elements in the table, only really used by len()
345    items: usize,
346
347    // Tell dropck that we own instances of T.
348    marker: PhantomData<T>,
349}
350
351impl<T> RawTable<T> {
352    /// Creates a new empty hash table without allocating any memory.
353    ///
354    /// In effect this returns a table with exactly 1 bucket. However we can
355    /// leave the data pointer dangling since that bucket is never written to
356    /// due to our load factor forcing us to always have at least 1 free bucket.
357    #[cfg_attr(feature = "inline-more", inline)]
358    pub fn new() -> Self {
359        Self {
360            data: NonNull::dangling(),
361            // Be careful to cast the entire slice to a raw pointer.
362            ctrl: unsafe { NonNull::new_unchecked(Group::static_empty().as_ptr() as *mut u8) },
363            bucket_mask: 0,
364            items: 0,
365            growth_left: 0,
366            marker: PhantomData,
367        }
368    }
369
370    /// Allocates a new hash table with the given number of buckets.
371    ///
372    /// The control bytes are left uninitialized.
373    #[cfg_attr(feature = "inline-more", inline)]
374    unsafe fn new_uninitialized(
375        buckets: usize,
376        fallability: Fallibility,
377    ) -> Result<Self, CollectionAllocErr> {
378        debug_assert!(buckets.is_power_of_two());
379        let (layout, data_offset) =
380            calculate_layout::<T>(buckets).ok_or_else(|| fallability.capacity_overflow())?;
381        let ctrl = NonNull::new(alloc(layout)).ok_or_else(|| fallability.alloc_err(layout))?;
382        let data = NonNull::new_unchecked(ctrl.as_ptr().add(data_offset) as *mut T);
383        Ok(Self {
384            data,
385            ctrl,
386            bucket_mask: buckets - 1,
387            items: 0,
388            growth_left: bucket_mask_to_capacity(buckets - 1),
389            marker: PhantomData,
390        })
391    }
392
393    /// Attempts to allocate a new hash table with at least enough capacity
394    /// for inserting the given number of elements without reallocating.
395    fn try_with_capacity(
396        capacity: usize,
397        fallability: Fallibility,
398    ) -> Result<Self, CollectionAllocErr> {
399        if capacity == 0 {
400            Ok(Self::new())
401        } else {
402            unsafe {
403                let buckets =
404                    capacity_to_buckets(capacity).ok_or_else(|| fallability.capacity_overflow())?;
405                let result = Self::new_uninitialized(buckets, fallability)?;
406                result.ctrl(0).write_bytes(EMPTY, result.num_ctrl_bytes());
407
408                Ok(result)
409            }
410        }
411    }
412
413    /// Allocates a new hash table with at least enough capacity for inserting
414    /// the given number of elements without reallocating.
415    pub fn with_capacity(capacity: usize) -> Self {
416        Self::try_with_capacity(capacity, Fallibility::Infallible)
417            .unwrap_or_else(|_| unsafe { hint::unreachable_unchecked() })
418    }
419
420    /// Deallocates the table without dropping any entries.
421    #[cfg_attr(feature = "inline-more", inline)]
422    unsafe fn free_buckets(&mut self) {
423        let (layout, _) =
424            calculate_layout::<T>(self.buckets()).unwrap_or_else(|| hint::unreachable_unchecked());
425        dealloc(self.ctrl.as_ptr(), layout);
426    }
427
428    /// Returns the index of a bucket from a `Bucket`.
429    #[cfg_attr(feature = "inline-more", inline)]
430    pub unsafe fn bucket_index(&self, bucket: &Bucket<T>) -> usize {
431        if mem::size_of::<T>() == 0 {
432            bucket.ptr as usize
433        } else {
434            offset_from(bucket.ptr, self.data.as_ptr())
435        }
436    }
437
438    /// Returns a pointer to a control byte.
439    #[cfg_attr(feature = "inline-more", inline)]
440    unsafe fn ctrl(&self, index: usize) -> *mut u8 {
441        debug_assert!(index < self.num_ctrl_bytes());
442        self.ctrl.as_ptr().add(index)
443    }
444
445    /// Returns a pointer to an element in the table.
446    #[cfg_attr(feature = "inline-more", inline)]
447    pub unsafe fn bucket(&self, index: usize) -> Bucket<T> {
448        debug_assert_ne!(self.bucket_mask, 0);
449        debug_assert!(index < self.buckets());
450        Bucket::from_base_index(self.data.as_ptr(), index)
451    }
452
453    /// Erases an element from the table without dropping it.
454    #[cfg_attr(feature = "inline-more", inline)]
455    pub unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
456        let index = self.bucket_index(item);
457        debug_assert!(is_full(*self.ctrl(index)));
458        let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask;
459        let empty_before = Group::load(self.ctrl(index_before)).match_empty();
460        let empty_after = Group::load(self.ctrl(index)).match_empty();
461
462        // If we are inside a continuous block of Group::WIDTH full or deleted
463        // cells then a probe window may have seen a full block when trying to
464        // insert. We therefore need to keep that block non-empty so that
465        // lookups will continue searching to the next probe window.
466        //
467        // Note that in this context `leading_zeros` refers to the bytes at the
468        // end of a group, while `trailing_zeros` refers to the bytes at the
469        // begining of a group.
470        let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH {
471            DELETED
472        } else {
473            self.growth_left += 1;
474            EMPTY
475        };
476        self.set_ctrl(index, ctrl);
477        self.items -= 1;
478    }
479
480    /// Returns an iterator for a probe sequence on the table.
481    ///
482    /// This iterator never terminates, but is guaranteed to visit each bucket
483    /// group exactly once. The loop using `probe_seq` must terminate upon
484    /// reaching a group containing an empty bucket.
485    #[cfg_attr(feature = "inline-more", inline)]
486    fn probe_seq(&self, hash: u64) -> ProbeSeq {
487        ProbeSeq {
488            bucket_mask: self.bucket_mask,
489            pos: h1(hash) & self.bucket_mask,
490            stride: 0,
491        }
492    }
493
494    /// Sets a control byte, and possibly also the replicated control byte at
495    /// the end of the array.
496    #[cfg_attr(feature = "inline-more", inline)]
497    unsafe fn set_ctrl(&self, index: usize, ctrl: u8) {
498        // Replicate the first Group::WIDTH control bytes at the end of
499        // the array without using a branch:
500        // - If index >= Group::WIDTH then index == index2.
501        // - Otherwise index2 == self.bucket_mask + 1 + index.
502        //
503        // The very last replicated control byte is never actually read because
504        // we mask the initial index for unaligned loads, but we write it
505        // anyways because it makes the set_ctrl implementation simpler.
506        //
507        // If there are fewer buckets than Group::WIDTH then this code will
508        // replicate the buckets at the end of the trailing group. For example
509        // with 2 buckets and a group size of 4, the control bytes will look
510        // like this:
511        //
512        //     Real    |             Replicated
513        // ---------------------------------------------
514        // | [A] | [B] | [EMPTY] | [EMPTY] | [A] | [B] |
515        // ---------------------------------------------
516        let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH;
517
518        *self.ctrl(index) = ctrl;
519        *self.ctrl(index2) = ctrl;
520    }
521
522    /// Searches for an empty or deleted bucket which is suitable for inserting
523    /// a new element.
524    ///
525    /// There must be at least 1 empty bucket in the table.
526    #[cfg_attr(feature = "inline-more", inline)]
527    fn find_insert_slot(&self, hash: u64) -> usize {
528        for pos in self.probe_seq(hash) {
529            unsafe {
530                let group = Group::load(self.ctrl(pos));
531                if let Some(bit) = group.match_empty_or_deleted().lowest_set_bit() {
532                    let result = (pos + bit) & self.bucket_mask;
533
534                    // In tables smaller than the group width, trailing control
535                    // bytes outside the range of the table are filled with
536                    // EMPTY entries. These will unfortunately trigger a
537                    // match, but once masked may point to a full bucket that
538                    // is already occupied. We detect this situation here and
539                    // perform a second scan starting at the begining of the
540                    // table. This second scan is guaranteed to find an empty
541                    // slot (due to the load factor) before hitting the trailing
542                    // control bytes (containing EMPTY).
543                    if unlikely(is_full(*self.ctrl(result))) {
544                        debug_assert!(self.bucket_mask < Group::WIDTH);
545                        debug_assert_ne!(pos, 0);
546                        return Group::load_aligned(self.ctrl(0))
547                            .match_empty_or_deleted()
548                            .lowest_set_bit_nonzero();
549                    } else {
550                        return result;
551                    }
552                }
553            }
554        }
555
556        // probe_seq never returns.
557        unreachable!();
558    }
559
560    /// Marks all table buckets as empty without dropping their contents.
561    #[cfg_attr(feature = "inline-more", inline)]
562    pub fn clear_no_drop(&mut self) {
563        if !self.is_empty_singleton() {
564            unsafe {
565                self.ctrl(0).write_bytes(EMPTY, self.num_ctrl_bytes());
566            }
567        }
568        self.items = 0;
569        self.growth_left = bucket_mask_to_capacity(self.bucket_mask);
570    }
571
572    /// Removes all elements from the table without freeing the backing memory.
573    #[cfg_attr(feature = "inline-more", inline)]
574    pub fn clear(&mut self) {
575        // Ensure that the table is reset even if one of the drops panic
576        let self_ = guard(self, |self_| self_.clear_no_drop());
577
578        if mem::needs_drop::<T>() {
579            unsafe {
580                for item in self_.iter() {
581                    item.drop();
582                }
583            }
584        }
585    }
586
587    /// Shrinks the table to fit `max(self.len(), min_size)` elements.
588    #[cfg_attr(feature = "inline-more", inline)]
589    pub fn shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64) {
590        // Calculate the minimal number of elements that we need to reserve
591        // space for.
592        let min_size = usize::max(self.items, min_size);
593        if min_size == 0 {
594            *self = Self::new();
595            return;
596        }
597
598        // Calculate the number of buckets that we need for this number of
599        // elements. If the calculation overflows then the requested bucket
600        // count must be larger than what we have right and nothing needs to be
601        // done.
602        let min_buckets = match capacity_to_buckets(min_size) {
603            Some(buckets) => buckets,
604            None => return,
605        };
606
607        // If we have more buckets than we need, shrink the table.
608        if min_buckets < self.buckets() {
609            // Fast path if the table is empty
610            if self.items == 0 {
611                *self = Self::with_capacity(min_size)
612            } else {
613                self.resize(min_size, hasher, Fallibility::Infallible)
614                    .unwrap_or_else(|_| unsafe { hint::unreachable_unchecked() });
615            }
616        }
617    }
618
619    /// Ensures that at least `additional` items can be inserted into the table
620    /// without reallocation.
621    #[cfg_attr(feature = "inline-more", inline)]
622    pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) {
623        if additional > self.growth_left {
624            self.reserve_rehash(additional, hasher, Fallibility::Infallible)
625                .unwrap_or_else(|_| unsafe { hint::unreachable_unchecked() });
626        }
627    }
628
629    /// Tries to ensure that at least `additional` items can be inserted into
630    /// the table without reallocation.
631    #[cfg_attr(feature = "inline-more", inline)]
632    pub fn try_reserve(
633        &mut self,
634        additional: usize,
635        hasher: impl Fn(&T) -> u64,
636    ) -> Result<(), CollectionAllocErr> {
637        if additional > self.growth_left {
638            self.reserve_rehash(additional, hasher, Fallibility::Fallible)
639        } else {
640            Ok(())
641        }
642    }
643
644    /// Out-of-line slow path for `reserve` and `try_reserve`.
645    #[cold]
646    #[inline(never)]
647    fn reserve_rehash(
648        &mut self,
649        additional: usize,
650        hasher: impl Fn(&T) -> u64,
651        fallability: Fallibility,
652    ) -> Result<(), CollectionAllocErr> {
653        let new_items = self
654            .items
655            .checked_add(additional)
656            .ok_or_else(|| fallability.capacity_overflow())?;
657
658        let full_capacity = bucket_mask_to_capacity(self.bucket_mask);
659        if new_items <= full_capacity / 2 {
660            // Rehash in-place without re-allocating if we have plenty of spare
661            // capacity that is locked up due to DELETED entries.
662            self.rehash_in_place(hasher);
663            Ok(())
664        } else {
665            // Otherwise, conservatively resize to at least the next size up
666            // to avoid churning deletes into frequent rehashes.
667            self.resize(
668                usize::max(new_items, full_capacity + 1),
669                hasher,
670                fallability,
671            )
672        }
673    }
674
675    /// Rehashes the contents of the table in place (i.e. without changing the
676    /// allocation).
677    ///
678    /// If `hasher` panics then some the table's contents may be lost.
679    fn rehash_in_place(&mut self, hasher: impl Fn(&T) -> u64) {
680        unsafe {
681            // Bulk convert all full control bytes to DELETED, and all DELETED
682            // control bytes to EMPTY. This effectively frees up all buckets
683            // containing a DELETED entry.
684            for i in (0..self.buckets()).step_by(Group::WIDTH) {
685                let group = Group::load_aligned(self.ctrl(i));
686                let group = group.convert_special_to_empty_and_full_to_deleted();
687                group.store_aligned(self.ctrl(i));
688            }
689
690            // Fix up the trailing control bytes. See the comments in set_ctrl
691            // for the handling of tables smaller than the group width.
692            if self.buckets() < Group::WIDTH {
693                self.ctrl(0)
694                    .copy_to(self.ctrl(Group::WIDTH), self.buckets());
695            } else {
696                self.ctrl(0)
697                    .copy_to(self.ctrl(self.buckets()), Group::WIDTH);
698            }
699
700            // If the hash function panics then properly clean up any elements
701            // that we haven't rehashed yet. We unfortunately can't preserve the
702            // element since we lost their hash and have no way of recovering it
703            // without risking another panic.
704            let mut guard = guard(self, |self_| {
705                if mem::needs_drop::<T>() {
706                    for i in 0..self_.buckets() {
707                        if *self_.ctrl(i) == DELETED {
708                            self_.set_ctrl(i, EMPTY);
709                            self_.bucket(i).drop();
710                            self_.items -= 1;
711                        }
712                    }
713                }
714                self_.growth_left = bucket_mask_to_capacity(self_.bucket_mask) - self_.items;
715            });
716
717            // At this point, DELETED elements are elements that we haven't
718            // rehashed yet. Find them and re-insert them at their ideal
719            // position.
720            'outer: for i in 0..guard.buckets() {
721                if *guard.ctrl(i) != DELETED {
722                    continue;
723                }
724                'inner: loop {
725                    // Hash the current item
726                    let item = guard.bucket(i);
727                    let hash = hasher(item.as_ref());
728
729                    // Search for a suitable place to put it
730                    let new_i = guard.find_insert_slot(hash);
731
732                    // Probing works by scanning through all of the control
733                    // bytes in groups, which may not be aligned to the group
734                    // size. If both the new and old position fall within the
735                    // same unaligned group, then there is no benefit in moving
736                    // it and we can just continue to the next item.
737                    let probe_index = |pos: usize| {
738                        (pos.wrapping_sub(guard.probe_seq(hash).pos) & guard.bucket_mask)
739                            / Group::WIDTH
740                    };
741                    if likely(probe_index(i) == probe_index(new_i)) {
742                        guard.set_ctrl(i, h2(hash));
743                        continue 'outer;
744                    }
745
746                    // We are moving the current item to a new position. Write
747                    // our H2 to the control byte of the new position.
748                    let prev_ctrl = *guard.ctrl(new_i);
749                    guard.set_ctrl(new_i, h2(hash));
750
751                    if prev_ctrl == EMPTY {
752                        // If the target slot is empty, simply move the current
753                        // element into the new slot and clear the old control
754                        // byte.
755                        guard.set_ctrl(i, EMPTY);
756                        guard.bucket(new_i).copy_from_nonoverlapping(&item);
757                        continue 'outer;
758                    } else {
759                        // If the target slot is occupied, swap the two elements
760                        // and then continue processing the element that we just
761                        // swapped into the old slot.
762                        debug_assert_eq!(prev_ctrl, DELETED);
763                        mem::swap(guard.bucket(new_i).as_mut(), item.as_mut());
764                        continue 'inner;
765                    }
766                }
767            }
768
769            guard.growth_left = bucket_mask_to_capacity(guard.bucket_mask) - guard.items;
770            mem::forget(guard);
771        }
772    }
773
774    /// Allocates a new table of a different size and moves the contents of the
775    /// current table into it.
776    fn resize(
777        &mut self,
778        capacity: usize,
779        hasher: impl Fn(&T) -> u64,
780        fallability: Fallibility,
781    ) -> Result<(), CollectionAllocErr> {
782        unsafe {
783            debug_assert!(self.items <= capacity);
784
785            // Allocate and initialize the new table.
786            let mut new_table = Self::try_with_capacity(capacity, fallability)?;
787            new_table.growth_left -= self.items;
788            new_table.items = self.items;
789
790            // The hash function may panic, in which case we simply free the new
791            // table without dropping any elements that may have been copied into
792            // it.
793            //
794            // This guard is also used to free the old table on success, see
795            // the comment at the bottom of this function.
796            let mut new_table = guard(ManuallyDrop::new(new_table), |new_table| {
797                if !new_table.is_empty_singleton() {
798                    new_table.free_buckets();
799                }
800            });
801
802            // Copy all elements to the new table.
803            for item in self.iter() {
804                // This may panic.
805                let hash = hasher(item.as_ref());
806
807                // We can use a simpler version of insert() here since:
808                // - there are no DELETED entries.
809                // - we know there is enough space in the table.
810                // - all elements are unique.
811                let index = new_table.find_insert_slot(hash);
812                new_table.set_ctrl(index, h2(hash));
813                new_table.bucket(index).copy_from_nonoverlapping(&item);
814            }
815
816            // We successfully copied all elements without panicking. Now replace
817            // self with the new table. The old table will have its memory freed but
818            // the items will not be dropped (since they have been moved into the
819            // new table).
820            mem::swap(self, &mut new_table);
821
822            Ok(())
823        }
824    }
825
826    /// Inserts a new element into the table.
827    ///
828    /// This does not check if the given element already exists in the table.
829    #[cfg_attr(feature = "inline-more", inline)]
830    pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket<T> {
831        unsafe {
832            let mut index = self.find_insert_slot(hash);
833
834            // We can avoid growing the table once we have reached our load
835            // factor if we are replacing a tombstone. This works since the
836            // number of EMPTY slots does not change in this case.
837            let old_ctrl = *self.ctrl(index);
838            if unlikely(self.growth_left == 0 && special_is_empty(old_ctrl)) {
839                self.reserve(1, hasher);
840                index = self.find_insert_slot(hash);
841            }
842
843            let bucket = self.bucket(index);
844            self.growth_left -= special_is_empty(old_ctrl) as usize;
845            self.set_ctrl(index, h2(hash));
846            bucket.write(value);
847            self.items += 1;
848            bucket
849        }
850    }
851
852    /// Inserts a new element into the table, without growing the table.
853    ///
854    /// There must be enough space in the table to insert the new element.
855    ///
856    /// This does not check if the given element already exists in the table.
857    #[cfg_attr(feature = "inline-more", inline)]
858    #[cfg(feature = "rustc-internal-api")]
859    pub fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket<T> {
860        unsafe {
861            let index = self.find_insert_slot(hash);
862            let bucket = self.bucket(index);
863
864            // If we are replacing a DELETED entry then we don't need to update
865            // the load counter.
866            let old_ctrl = *self.ctrl(index);
867            self.growth_left -= special_is_empty(old_ctrl) as usize;
868
869            self.set_ctrl(index, h2(hash));
870            bucket.write(value);
871            self.items += 1;
872            bucket
873        }
874    }
875
876    /// Searches for an element in the table.
877    #[inline]
878    pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
879        unsafe {
880            for pos in self.probe_seq(hash) {
881                let group = Group::load(self.ctrl(pos));
882                for bit in group.match_byte(h2(hash)) {
883                    let index = (pos + bit) & self.bucket_mask;
884                    let bucket = self.bucket(index);
885                    if likely(eq(bucket.as_ref())) {
886                        return Some(bucket);
887                    }
888                }
889                if likely(group.match_empty().any_bit_set()) {
890                    return None;
891                }
892            }
893        }
894
895        // probe_seq never returns.
896        unreachable!();
897    }
898
899    /// Returns the number of elements the map can hold without reallocating.
900    ///
901    /// This number is a lower bound; the table might be able to hold
902    /// more, but is guaranteed to be able to hold at least this many.
903    #[cfg_attr(feature = "inline-more", inline)]
904    pub fn capacity(&self) -> usize {
905        self.items + self.growth_left
906    }
907
908    /// Returns the number of elements in the table.
909    #[cfg_attr(feature = "inline-more", inline)]
910    pub fn len(&self) -> usize {
911        self.items
912    }
913
914    /// Returns the number of buckets in the table.
915    #[cfg_attr(feature = "inline-more", inline)]
916    pub fn buckets(&self) -> usize {
917        self.bucket_mask + 1
918    }
919
920    /// Returns the number of control bytes in the table.
921    #[cfg_attr(feature = "inline-more", inline)]
922    fn num_ctrl_bytes(&self) -> usize {
923        self.bucket_mask + 1 + Group::WIDTH
924    }
925
926    /// Returns whether this table points to the empty singleton with a capacity
927    /// of 0.
928    #[cfg_attr(feature = "inline-more", inline)]
929    fn is_empty_singleton(&self) -> bool {
930        self.bucket_mask == 0
931    }
932
933    /// Returns an iterator over every element in the table. It is up to
934    /// the caller to ensure that the `RawTable` outlives the `RawIter`.
935    /// Because we cannot make the `next` method unsafe on the `RawIter`
936    /// struct, we have to make the `iter` method unsafe.
937    #[cfg_attr(feature = "inline-more", inline)]
938    pub unsafe fn iter(&self) -> RawIter<T> {
939        let data = Bucket::from_base_index(self.data.as_ptr(), 0);
940        RawIter {
941            iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.buckets()),
942            items: self.items,
943        }
944    }
945
946    /// Returns an iterator which removes all elements from the table without
947    /// freeing the memory. It is up to the caller to ensure that the `RawTable`
948    /// outlives the `RawDrain`. Because we cannot make the `next` method unsafe
949    /// on the `RawDrain`, we have to make the `drain` method unsafe.
950    #[cfg_attr(feature = "inline-more", inline)]
951    pub unsafe fn drain(&mut self) -> RawDrain<'_, T> {
952        RawDrain {
953            iter: self.iter(),
954            table: ManuallyDrop::new(mem::replace(self, Self::new())),
955            orig_table: NonNull::from(self),
956            marker: PhantomData,
957        }
958    }
959
960    /// Converts the table into a raw allocation. The contents of the table
961    /// should be dropped using a `RawIter` before freeing the allocation.
962    #[cfg_attr(feature = "inline-more", inline)]
963    pub(crate) fn into_alloc(self) -> Option<(NonNull<u8>, Layout)> {
964        let alloc = if self.is_empty_singleton() {
965            None
966        } else {
967            let (layout, _) = calculate_layout::<T>(self.buckets())
968                .unwrap_or_else(|| unsafe { hint::unreachable_unchecked() });
969            Some((self.ctrl.cast(), layout))
970        };
971        mem::forget(self);
972        alloc
973    }
974}
975
976unsafe impl<T> Send for RawTable<T> where T: Send {}
977unsafe impl<T> Sync for RawTable<T> where T: Sync {}
978
979impl<T: Clone> Clone for RawTable<T> {
980    fn clone(&self) -> Self {
981        if self.is_empty_singleton() {
982            Self::new()
983        } else {
984            unsafe {
985                let mut new_table = ManuallyDrop::new(
986                    Self::new_uninitialized(self.buckets(), Fallibility::Infallible)
987                        .unwrap_or_else(|_| hint::unreachable_unchecked()),
988                );
989
990                new_table.clone_from_spec(self, |new_table| {
991                    // We need to free the memory allocated for the new table.
992                    new_table.free_buckets();
993                });
994
995                // Return the newly created table.
996                ManuallyDrop::into_inner(new_table)
997            }
998        }
999    }
1000
1001    fn clone_from(&mut self, source: &Self) {
1002        if source.is_empty_singleton() {
1003            *self = Self::new();
1004        } else {
1005            unsafe {
1006                // First, drop all our elements without clearing the control bytes.
1007                if mem::needs_drop::<T>() {
1008                    for item in self.iter() {
1009                        item.drop();
1010                    }
1011                }
1012
1013                // If necessary, resize our table to match the source.
1014                if self.buckets() != source.buckets() {
1015                    // Skip our drop by using ptr::write.
1016                    if !self.is_empty_singleton() {
1017                        self.free_buckets();
1018                    }
1019                    (self as *mut Self).write(
1020                        Self::new_uninitialized(source.buckets(), Fallibility::Infallible)
1021                            .unwrap_or_else(|_| hint::unreachable_unchecked()),
1022                    );
1023                }
1024
1025                self.clone_from_spec(source, |self_| {
1026                    // We need to leave the table in an empty state.
1027                    self_.clear_no_drop()
1028                });
1029            }
1030        }
1031    }
1032}
1033
1034/// Specialization of `clone_from` for `Copy` types
1035trait RawTableClone {
1036    unsafe fn clone_from_spec(&mut self, source: &Self, on_panic: impl FnMut(&mut Self));
1037}
1038impl<T: Clone> RawTableClone for RawTable<T> {
1039    #[cfg(feature = "nightly")]
1040    #[cfg_attr(feature = "inline-more", inline)]
1041    default unsafe fn clone_from_spec(&mut self, source: &Self, on_panic: impl FnMut(&mut Self)) {
1042        self.clone_from_impl(source, on_panic);
1043    }
1044
1045    #[cfg(not(feature = "nightly"))]
1046    #[cfg_attr(feature = "inline-more", inline)]
1047    unsafe fn clone_from_spec(&mut self, source: &Self, on_panic: impl FnMut(&mut Self)) {
1048        self.clone_from_impl(source, on_panic);
1049    }
1050}
1051#[cfg(feature = "nightly")]
1052impl<T: Copy> RawTableClone for RawTable<T> {
1053    #[cfg_attr(feature = "inline-more", inline)]
1054    unsafe fn clone_from_spec(&mut self, source: &Self, _on_panic: impl FnMut(&mut Self)) {
1055        source
1056            .ctrl(0)
1057            .copy_to_nonoverlapping(self.ctrl(0), self.num_ctrl_bytes());
1058        source
1059            .data
1060            .as_ptr()
1061            .copy_to_nonoverlapping(self.data.as_ptr(), self.buckets());
1062
1063        self.items = source.items;
1064        self.growth_left = source.growth_left;
1065    }
1066}
1067
1068impl<T: Clone> RawTable<T> {
1069    /// Common code for clone and clone_from. Assumes `self.buckets() == source.buckets()`.
1070    #[cfg_attr(feature = "inline-more", inline)]
1071    unsafe fn clone_from_impl(&mut self, source: &Self, mut on_panic: impl FnMut(&mut Self)) {
1072        // Copy the control bytes unchanged. We do this in a single pass
1073        source
1074            .ctrl(0)
1075            .copy_to_nonoverlapping(self.ctrl(0), self.num_ctrl_bytes());
1076
1077        // The cloning of elements may panic, in which case we need
1078        // to make sure we drop only the elements that have been
1079        // cloned so far.
1080        let mut guard = guard((0, &mut *self), |(index, self_)| {
1081            if mem::needs_drop::<T>() {
1082                for i in 0..=*index {
1083                    if is_full(*self_.ctrl(i)) {
1084                        self_.bucket(i).drop();
1085                    }
1086                }
1087            }
1088
1089            // Depending on whether we were called from clone or clone_from, we
1090            // either need to free the memory for the destination table or just
1091            // clear the control bytes.
1092            on_panic(self_);
1093        });
1094
1095        for from in source.iter() {
1096            let index = source.bucket_index(&from);
1097            let to = guard.1.bucket(index);
1098            to.write(from.as_ref().clone());
1099
1100            // Update the index in case we need to unwind.
1101            guard.0 = index;
1102        }
1103
1104        // Successfully cloned all items, no need to clean up.
1105        mem::forget(guard);
1106
1107        self.items = source.items;
1108        self.growth_left = source.growth_left;
1109    }
1110
1111    /// Variant of `clone_from` to use when a hasher is available.
1112    #[cfg(any(feature = "nightly", feature = "raw"))]
1113    pub fn clone_from_with_hasher(&mut self, source: &Self, hasher: impl Fn(&T) -> u64) {
1114        // If we have enough capacity in the table, just clear it and insert
1115        // elements one by one. We don't do this if we have the same number of
1116        // buckets as the source since we can just copy the contents directly
1117        // in that case.
1118        if self.buckets() != source.buckets()
1119            && bucket_mask_to_capacity(self.bucket_mask) >= source.len()
1120        {
1121            self.clear();
1122
1123            let guard_self = guard(&mut *self, |self_| {
1124                // Clear the partially copied table if a panic occurs, otherwise
1125                // items and growth_left will be out of sync with the contents
1126                // of the table.
1127                self_.clear();
1128            });
1129
1130            unsafe {
1131                for item in source.iter() {
1132                    // This may panic.
1133                    let item = item.as_ref().clone();
1134                    let hash = hasher(&item);
1135
1136                    // We can use a simpler version of insert() here since:
1137                    // - there are no DELETED entries.
1138                    // - we know there is enough space in the table.
1139                    // - all elements are unique.
1140                    let index = guard_self.find_insert_slot(hash);
1141                    guard_self.set_ctrl(index, h2(hash));
1142                    guard_self.bucket(index).write(item);
1143                }
1144            }
1145
1146            // Successfully cloned all items, no need to clean up.
1147            mem::forget(guard_self);
1148
1149            self.items = source.items;
1150            self.growth_left -= source.items;
1151        } else {
1152            self.clone_from(source);
1153        }
1154    }
1155}
1156
1157#[cfg(feature = "nightly")]
1158unsafe impl<#[may_dangle] T> Drop for RawTable<T> {
1159    #[cfg_attr(feature = "inline-more", inline)]
1160    fn drop(&mut self) {
1161        if !self.is_empty_singleton() {
1162            unsafe {
1163                if mem::needs_drop::<T>() {
1164                    for item in self.iter() {
1165                        item.drop();
1166                    }
1167                }
1168                self.free_buckets();
1169            }
1170        }
1171    }
1172}
1173#[cfg(not(feature = "nightly"))]
1174impl<T> Drop for RawTable<T> {
1175    #[cfg_attr(feature = "inline-more", inline)]
1176    fn drop(&mut self) {
1177        if !self.is_empty_singleton() {
1178            unsafe {
1179                if mem::needs_drop::<T>() {
1180                    for item in self.iter() {
1181                        item.drop();
1182                    }
1183                }
1184                self.free_buckets();
1185            }
1186        }
1187    }
1188}
1189
1190impl<T> IntoIterator for RawTable<T> {
1191    type Item = T;
1192    type IntoIter = RawIntoIter<T>;
1193
1194    #[cfg_attr(feature = "inline-more", inline)]
1195    fn into_iter(self) -> RawIntoIter<T> {
1196        unsafe {
1197            let iter = self.iter();
1198            let alloc = self.into_alloc();
1199            RawIntoIter {
1200                iter,
1201                alloc,
1202                marker: PhantomData,
1203            }
1204        }
1205    }
1206}
1207
1208/// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does
1209/// not track an item count.
1210pub(crate) struct RawIterRange<T> {
1211    // Mask of full buckets in the current group. Bits are cleared from this
1212    // mask as each element is processed.
1213    current_group: BitMask,
1214
1215    // Pointer to the buckets for the current group.
1216    data: Bucket<T>,
1217
1218    // Pointer to the next group of control bytes,
1219    // Must be aligned to the group size.
1220    next_ctrl: *const u8,
1221
1222    // Pointer one past the last control byte of this range.
1223    end: *const u8,
1224}
1225
1226impl<T> RawIterRange<T> {
1227    /// Returns a `RawIterRange` covering a subset of a table.
1228    ///
1229    /// The control byte address must be aligned to the group size.
1230    #[cfg_attr(feature = "inline-more", inline)]
1231    unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self {
1232        debug_assert_ne!(len, 0);
1233        debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
1234        let end = ctrl.add(len);
1235
1236        // Load the first group and advance ctrl to point to the next group
1237        let current_group = Group::load_aligned(ctrl).match_full();
1238        let next_ctrl = ctrl.add(Group::WIDTH);
1239
1240        Self {
1241            current_group,
1242            data,
1243            next_ctrl,
1244            end,
1245        }
1246    }
1247
1248    /// Splits a `RawIterRange` into two halves.
1249    ///
1250    /// Returns `None` if the remaining range is smaller than or equal to the
1251    /// group width.
1252    #[cfg_attr(feature = "inline-more", inline)]
1253    #[cfg(feature = "rayon")]
1254    pub(crate) fn split(mut self) -> (Self, Option<RawIterRange<T>>) {
1255        unsafe {
1256            if self.end <= self.next_ctrl {
1257                // Nothing to split if the group that we are current processing
1258                // is the last one.
1259                (self, None)
1260            } else {
1261                // len is the remaining number of elements after the group that
1262                // we are currently processing. It must be a multiple of the
1263                // group size (small tables are caught by the check above).
1264                let len = offset_from(self.end, self.next_ctrl);
1265                debug_assert_eq!(len % Group::WIDTH, 0);
1266
1267                // Split the remaining elements into two halves, but round the
1268                // midpoint down in case there is an odd number of groups
1269                // remaining. This ensures that:
1270                // - The tail is at least 1 group long.
1271                // - The split is roughly even considering we still have the
1272                //   current group to process.
1273                let mid = (len / 2) & !(Group::WIDTH - 1);
1274
1275                let tail = Self::new(
1276                    self.next_ctrl.add(mid),
1277                    self.data.add(Group::WIDTH).add(mid),
1278                    len - mid,
1279                );
1280                debug_assert_eq!(self.data.add(Group::WIDTH).add(mid).ptr, tail.data.ptr);
1281                debug_assert_eq!(self.end, tail.end);
1282                self.end = self.next_ctrl.add(mid);
1283                debug_assert_eq!(self.end.add(Group::WIDTH), tail.next_ctrl);
1284                (self, Some(tail))
1285            }
1286        }
1287    }
1288}
1289
1290// We make raw iterators unconditionally Send and Sync, and let the PhantomData
1291// in the actual iterator implementations determine the real Send/Sync bounds.
1292unsafe impl<T> Send for RawIterRange<T> {}
1293unsafe impl<T> Sync for RawIterRange<T> {}
1294
1295impl<T> Clone for RawIterRange<T> {
1296    #[cfg_attr(feature = "inline-more", inline)]
1297    fn clone(&self) -> Self {
1298        Self {
1299            data: self.data.clone(),
1300            next_ctrl: self.next_ctrl,
1301            current_group: self.current_group,
1302            end: self.end,
1303        }
1304    }
1305}
1306
1307impl<T> Iterator for RawIterRange<T> {
1308    type Item = Bucket<T>;
1309
1310    #[cfg_attr(feature = "inline-more", inline)]
1311    fn next(&mut self) -> Option<Bucket<T>> {
1312        unsafe {
1313            loop {
1314                if let Some(index) = self.current_group.lowest_set_bit() {
1315                    self.current_group = self.current_group.remove_lowest_bit();
1316                    return Some(self.data.add(index));
1317                }
1318
1319                if self.next_ctrl >= self.end {
1320                    return None;
1321                }
1322
1323                // We might read past self.end up to the next group boundary,
1324                // but this is fine because it only occurs on tables smaller
1325                // than the group size where the trailing control bytes are all
1326                // EMPTY. On larger tables self.end is guaranteed to be aligned
1327                // to the group size (since tables are power-of-two sized).
1328                self.current_group = Group::load_aligned(self.next_ctrl).match_full();
1329                self.data = self.data.add(Group::WIDTH);
1330                self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
1331            }
1332        }
1333    }
1334
1335    #[cfg_attr(feature = "inline-more", inline)]
1336    fn size_hint(&self) -> (usize, Option<usize>) {
1337        // We don't have an item count, so just guess based on the range size.
1338        (
1339            0,
1340            Some(unsafe { offset_from(self.end, self.next_ctrl) + Group::WIDTH }),
1341        )
1342    }
1343}
1344
1345impl<T> FusedIterator for RawIterRange<T> {}
1346
1347/// Iterator which returns a raw pointer to every full bucket in the table.
1348pub struct RawIter<T> {
1349    pub(crate) iter: RawIterRange<T>,
1350    items: usize,
1351}
1352
1353impl<T> Clone for RawIter<T> {
1354    #[cfg_attr(feature = "inline-more", inline)]
1355    fn clone(&self) -> Self {
1356        Self {
1357            iter: self.iter.clone(),
1358            items: self.items,
1359        }
1360    }
1361}
1362
1363impl<T> Iterator for RawIter<T> {
1364    type Item = Bucket<T>;
1365
1366    #[cfg_attr(feature = "inline-more", inline)]
1367    fn next(&mut self) -> Option<Bucket<T>> {
1368        if let Some(b) = self.iter.next() {
1369            self.items -= 1;
1370            Some(b)
1371        } else {
1372            // We don't check against items == 0 here to allow the
1373            // compiler to optimize away the item count entirely if the
1374            // iterator length is never queried.
1375            debug_assert_eq!(self.items, 0);
1376            None
1377        }
1378    }
1379
1380    #[cfg_attr(feature = "inline-more", inline)]
1381    fn size_hint(&self) -> (usize, Option<usize>) {
1382        (self.items, Some(self.items))
1383    }
1384}
1385
1386impl<T> ExactSizeIterator for RawIter<T> {}
1387impl<T> FusedIterator for RawIter<T> {}
1388
1389/// Iterator which consumes a table and returns elements.
1390pub struct RawIntoIter<T> {
1391    iter: RawIter<T>,
1392    alloc: Option<(NonNull<u8>, Layout)>,
1393    marker: PhantomData<T>,
1394}
1395
1396impl<T> RawIntoIter<T> {
1397    #[cfg_attr(feature = "inline-more", inline)]
1398    pub fn iter(&self) -> RawIter<T> {
1399        self.iter.clone()
1400    }
1401}
1402
1403unsafe impl<T> Send for RawIntoIter<T> where T: Send {}
1404unsafe impl<T> Sync for RawIntoIter<T> where T: Sync {}
1405
1406#[cfg(feature = "nightly")]
1407unsafe impl<#[may_dangle] T> Drop for RawIntoIter<T> {
1408    #[cfg_attr(feature = "inline-more", inline)]
1409    fn drop(&mut self) {
1410        unsafe {
1411            // Drop all remaining elements
1412            if mem::needs_drop::<T>() {
1413                while let Some(item) = self.iter.next() {
1414                    item.drop();
1415                }
1416            }
1417
1418            // Free the table
1419            if let Some((ptr, layout)) = self.alloc {
1420                dealloc(ptr.as_ptr(), layout);
1421            }
1422        }
1423    }
1424}
1425#[cfg(not(feature = "nightly"))]
1426impl<T> Drop for RawIntoIter<T> {
1427    #[cfg_attr(feature = "inline-more", inline)]
1428    fn drop(&mut self) {
1429        unsafe {
1430            // Drop all remaining elements
1431            if mem::needs_drop::<T>() {
1432                while let Some(item) = self.iter.next() {
1433                    item.drop();
1434                }
1435            }
1436
1437            // Free the table
1438            if let Some((ptr, layout)) = self.alloc {
1439                dealloc(ptr.as_ptr(), layout);
1440            }
1441        }
1442    }
1443}
1444
1445impl<T> Iterator for RawIntoIter<T> {
1446    type Item = T;
1447
1448    #[cfg_attr(feature = "inline-more", inline)]
1449    fn next(&mut self) -> Option<T> {
1450        unsafe { Some(self.iter.next()?.read()) }
1451    }
1452
1453    #[cfg_attr(feature = "inline-more", inline)]
1454    fn size_hint(&self) -> (usize, Option<usize>) {
1455        self.iter.size_hint()
1456    }
1457}
1458
1459impl<T> ExactSizeIterator for RawIntoIter<T> {}
1460impl<T> FusedIterator for RawIntoIter<T> {}
1461
1462/// Iterator which consumes elements without freeing the table storage.
1463pub struct RawDrain<'a, T> {
1464    iter: RawIter<T>,
1465
1466    // The table is moved into the iterator for the duration of the drain. This
1467    // ensures that an empty table is left if the drain iterator is leaked
1468    // without dropping.
1469    table: ManuallyDrop<RawTable<T>>,
1470    orig_table: NonNull<RawTable<T>>,
1471
1472    // We don't use a &'a mut RawTable<T> because we want RawDrain to be
1473    // covariant over T.
1474    marker: PhantomData<&'a RawTable<T>>,
1475}
1476
1477impl<T> RawDrain<'_, T> {
1478    #[cfg_attr(feature = "inline-more", inline)]
1479    pub fn iter(&self) -> RawIter<T> {
1480        self.iter.clone()
1481    }
1482}
1483
1484unsafe impl<T> Send for RawDrain<'_, T> where T: Send {}
1485unsafe impl<T> Sync for RawDrain<'_, T> where T: Sync {}
1486
1487impl<T> Drop for RawDrain<'_, T> {
1488    #[cfg_attr(feature = "inline-more", inline)]
1489    fn drop(&mut self) {
1490        unsafe {
1491            // Drop all remaining elements. Note that this may panic.
1492            if mem::needs_drop::<T>() {
1493                while let Some(item) = self.iter.next() {
1494                    item.drop();
1495                }
1496            }
1497
1498            // Reset the contents of the table now that all elements have been
1499            // dropped.
1500            self.table.clear_no_drop();
1501
1502            // Move the now empty table back to its original location.
1503            self.orig_table
1504                .as_ptr()
1505                .copy_from_nonoverlapping(&*self.table, 1);
1506        }
1507    }
1508}
1509
1510impl<T> Iterator for RawDrain<'_, T> {
1511    type Item = T;
1512
1513    #[cfg_attr(feature = "inline-more", inline)]
1514    fn next(&mut self) -> Option<T> {
1515        unsafe {
1516            let item = self.iter.next()?;
1517            Some(item.read())
1518        }
1519    }
1520
1521    #[cfg_attr(feature = "inline-more", inline)]
1522    fn size_hint(&self) -> (usize, Option<usize>) {
1523        self.iter.size_hint()
1524    }
1525}
1526
1527impl<T> ExactSizeIterator for RawDrain<'_, T> {}
1528impl<T> FusedIterator for RawDrain<'_, T> {}