rune_alloc/hashbrown/raw/
mod.rs

1use core::alloc::Layout;
2use core::iter::FusedIterator;
3use core::marker::PhantomData;
4use core::mem;
5use core::mem::MaybeUninit;
6use core::ptr::NonNull;
7use core::{hint, ptr};
8
9use crate::hashbrown::scopeguard::{guard, ScopeGuard};
10
11use crate::alloc::{Allocator, Global, SizedTypeProperties};
12use crate::clone::TryClone;
13#[cfg(rune_nightly)]
14use crate::clone::TryCopy;
15use crate::error::{CustomError, Error};
16// Branch prediction hint. This is currently only available on nightly but it
17// consistently improves performance by 10-15%.
18use crate::hint::{likely, unlikely};
19use crate::ptr::invalid_mut;
20
21#[cfg(test)]
22use crate::testing::*;
23
24use super::{EqFn, ErrorOrInsertSlot, HasherFn};
25
26cfg_if! {
27    // Use the SSE2 implementation if possible: it allows us to scan 16 buckets
28    // at once instead of 8. We don't bother with AVX since it would require
29    // runtime dispatch and wouldn't gain us much anyways: the probability of
30    // finding a match drops off drastically after the first few buckets.
31    //
32    // I attempted an implementation on ARM using NEON instructions, but it
33    // turns out that most NEON instructions have multi-cycle latency, which in
34    // the end outweighs any gains over the generic implementation.
35    if #[cfg(all(
36        target_feature = "sse2",
37        any(target_arch = "x86", target_arch = "x86_64"),
38        not(miri)
39    ))] {
40        mod sse2;
41        use sse2 as imp;
42    } else if #[cfg(all(target_arch = "aarch64", target_feature = "neon"))] {
43        mod neon;
44        use neon as imp;
45    } else {
46        mod generic;
47        use generic as imp;
48    }
49}
50
51mod bitmask;
52
53use self::bitmask::BitMaskIter;
54use self::imp::Group;
55
56#[inline]
57unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
58    to.offset_from(from) as usize
59}
60
61/// Control byte value for an empty bucket.
62const EMPTY: u8 = 0b1111_1111;
63
64/// Control byte value for a deleted bucket.
65const DELETED: u8 = 0b1000_0000;
66
67/// Checks whether a control byte represents a full bucket (top bit is clear).
68#[inline]
69fn is_full(ctrl: u8) -> bool {
70    ctrl & 0x80 == 0
71}
72
73/// Checks whether a control byte represents a special value (top bit is set).
74#[inline]
75fn is_special(ctrl: u8) -> bool {
76    ctrl & 0x80 != 0
77}
78
79/// Checks whether a special control value is EMPTY (just check 1 bit).
80#[inline]
81fn special_is_empty(ctrl: u8) -> bool {
82    debug_assert!(is_special(ctrl));
83    ctrl & 0x01 != 0
84}
85
86/// Primary hash function, used to select the initial bucket to probe from.
87#[inline]
88#[allow(clippy::cast_possible_truncation)]
89fn h1(hash: u64) -> usize {
90    // On 32-bit platforms we simply ignore the higher hash bits.
91    hash as usize
92}
93
94// Constant for h2 function that grabing the top 7 bits of the hash.
95const MIN_HASH_LEN: usize = if mem::size_of::<usize>() < mem::size_of::<u64>() {
96    mem::size_of::<usize>()
97} else {
98    mem::size_of::<u64>()
99};
100
101/// Secondary hash function, saved in the low 7 bits of the control byte.
102#[inline]
103#[allow(clippy::cast_possible_truncation)]
104fn h2(hash: u64) -> u8 {
105    // Grab the top 7 bits of the hash. While the hash is normally a full 64-bit
106    // value, some hash functions (such as FxHash) produce a usize result
107    // instead, which means that the top 32 bits are 0 on 32-bit platforms.
108    // So we use MIN_HASH_LEN constant to handle this.
109    let top7 = hash >> (MIN_HASH_LEN * 8 - 7);
110    (top7 & 0x7f) as u8 // truncation
111}
112
113/// Probe sequence based on triangular numbers, which is guaranteed (since our
114/// table size is a power of two) to visit every group of elements exactly once.
115///
116/// A triangular probe has us jump by 1 more group every time. So first we
117/// jump by 1 group (meaning we just continue our linear scan), then 2 groups
118/// (skipping over 1 group), then 3 groups (skipping over 2 groups), and so on.
119///
120/// Proof that the probe will visit every group in the table:
121/// <https://fgiesen.wordpress.com/2015/02/22/triangular-numbers-mod-2n/>
122struct ProbeSeq {
123    pos: usize,
124    stride: usize,
125}
126
127impl ProbeSeq {
128    #[inline]
129    fn move_next(&mut self, bucket_mask: usize) {
130        // We should have found an empty bucket by now and ended the probe.
131        debug_assert!(
132            self.stride <= bucket_mask,
133            "Went past end of probe sequence"
134        );
135
136        self.stride += Group::WIDTH;
137        self.pos += self.stride;
138        self.pos &= bucket_mask;
139    }
140}
141
142/// Returns the number of buckets needed to hold the given number of items,
143/// taking the maximum load factor into account.
144///
145/// Returns `None` if an overflow occurs.
146// Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258
147#[cfg_attr(target_os = "emscripten", inline(never))]
148#[cfg_attr(not(target_os = "emscripten"), inline)]
149fn capacity_to_buckets(cap: usize) -> Option<usize> {
150    debug_assert_ne!(cap, 0);
151
152    // For small tables we require at least 1 empty bucket so that lookups are
153    // guaranteed to terminate if an element doesn't exist in the table.
154    if cap < 8 {
155        // We don't bother with a table size of 2 buckets since that can only
156        // hold a single element. Instead we skip directly to a 4 bucket table
157        // which can hold 3 elements.
158        return Some(if cap < 4 { 4 } else { 8 });
159    }
160
161    // Otherwise require 1/8 buckets to be empty (87.5% load)
162    //
163    // Be careful when modifying this, calculate_layout relies on the
164    // overflow check here.
165    let adjusted_cap = cap.checked_mul(8)? / 7;
166
167    // Any overflows will have been caught by the checked_mul. Also, any
168    // rounding errors from the division above will be cleaned up by
169    // next_power_of_two (which can't overflow because of the previous division).
170    Some(adjusted_cap.next_power_of_two())
171}
172
173/// Returns the maximum effective capacity for the given bucket mask, taking
174/// the maximum load factor into account.
175#[inline]
176fn bucket_mask_to_capacity(bucket_mask: usize) -> usize {
177    if bucket_mask < 8 {
178        // For tables with 1/2/4/8 buckets, we always reserve one empty slot.
179        // Keep in mind that the bucket mask is one less than the bucket count.
180        bucket_mask
181    } else {
182        // For larger tables we reserve 12.5% of the slots as empty.
183        ((bucket_mask + 1) / 8) * 7
184    }
185}
186
187/// Helper which allows the max calculation for ctrl_align to be statically computed for each T
188/// while keeping the rest of `calculate_layout_for` independent of `T`
189#[derive(Copy, Clone)]
190struct TableLayout {
191    size: usize,
192    ctrl_align: usize,
193}
194
195impl TableLayout {
196    #[inline]
197    const fn new<T>() -> Self {
198        let layout = Layout::new::<T>();
199        Self {
200            size: layout.size(),
201            ctrl_align: if layout.align() > Group::WIDTH {
202                layout.align()
203            } else {
204                Group::WIDTH
205            },
206        }
207    }
208
209    #[inline]
210    fn calculate_layout_for(self, buckets: usize) -> Option<(Layout, usize)> {
211        debug_assert!(buckets.is_power_of_two());
212
213        let TableLayout { size, ctrl_align } = self;
214        // Manual layout calculation since Layout methods are not yet stable.
215        let ctrl_offset =
216            size.checked_mul(buckets)?.checked_add(ctrl_align - 1)? & !(ctrl_align - 1);
217        let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?;
218
219        // We need an additional check to ensure that the allocation doesn't
220        // exceed `isize::MAX` (https://github.com/rust-lang/rust/pull/95295).
221        if len > isize::MAX as usize - (ctrl_align - 1) {
222            return None;
223        }
224
225        Some((
226            unsafe { Layout::from_size_align_unchecked(len, ctrl_align) },
227            ctrl_offset,
228        ))
229    }
230}
231
232/// A reference to an empty bucket into which an can be inserted.
233pub struct InsertSlot {
234    index: usize,
235}
236
237/// A reference to a hash table bucket containing a `T`.
238///
239/// This is usually just a pointer to the element itself. However if the element
240/// is a ZST, then we instead track the index of the element in the table so
241/// that `erase` works properly.
242pub struct Bucket<T> {
243    // Actually it is pointer to next element than element itself
244    // this is needed to maintain pointer arithmetic invariants
245    // keeping direct pointer to element introduces difficulty.
246    // Using `NonNull` for variance and niche layout
247    ptr: NonNull<T>,
248}
249
250// This Send impl is needed for rayon support. This is safe since Bucket is
251// never exposed in a public API.
252unsafe impl<T> Send for Bucket<T> {}
253
254impl<T> Clone for Bucket<T> {
255    #[inline]
256    fn clone(&self) -> Self {
257        Self { ptr: self.ptr }
258    }
259}
260
261impl<T> Bucket<T> {
262    /// Creates a [`Bucket`] that contain pointer to the data.
263    /// The pointer calculation is performed by calculating the
264    /// offset from given `base` pointer (convenience for
265    /// `base.as_ptr().sub(index)`).
266    ///
267    /// `index` is in units of `T`; e.g., an `index` of 3 represents a pointer
268    /// offset of `3 * size_of::<T>()` bytes.
269    ///
270    /// If the `T` is a ZST, then we instead track the index of the element
271    /// in the table so that `erase` works properly (return
272    /// `NonNull::new_unchecked((index + 1) as *mut T)`)
273    ///
274    /// # Safety
275    ///
276    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
277    /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and the safety
278    /// rules of [`NonNull::new_unchecked`] function.
279    ///
280    /// Thus, in order to uphold the safety contracts for the [`<*mut T>::sub`] method
281    /// and [`NonNull::new_unchecked`] function, as well as for the correct
282    /// logic of the work of this crate, the following rules are necessary and
283    /// sufficient:
284    ///
285    /// * the `base` pointer must not be `dangling` and must points to the
286    ///   end of the first `value element` from the `data part` of the table, i.e.
287    ///   must be the pointer that returned by [`RawTable::data_end`] or by
288    ///   [`RawTableInner::data_end<T>`];
289    ///
290    /// * `index` must not be greater than `RawTableInner.bucket_mask`, i.e.
291    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)`
292    ///   must be no greater than the number returned by the function
293    ///   [`RawTable::buckets`] or [`RawTableInner::buckets`].
294    ///
295    /// If `mem::size_of::<T>() == 0`, then the only requirement is that the
296    /// `index` must not be greater than `RawTableInner.bucket_mask`, i.e.
297    /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)`
298    /// must be no greater than the number returned by the function
299    /// [`RawTable::buckets`] or [`RawTableInner::buckets`].
300    ///
301    /// [`Bucket`]: crate::raw::Bucket
302    /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1
303    /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked
304    /// [`RawTable::data_end`]: crate::raw::RawTable::data_end
305    /// [`RawTableInner::data_end<T>`]: RawTableInner::data_end<T>
306    /// [`RawTable::buckets`]: crate::raw::RawTable::buckets
307    /// [`RawTableInner::buckets`]: RawTableInner::buckets
308    #[inline]
309    unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self {
310        // If mem::size_of::<T>() != 0 then return a pointer to an `element` in
311        // the data part of the table (we start counting from "0", so that
312        // in the expression T[last], the "last" index actually one less than the
313        // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask"):
314        //
315        //                   `from_base_index(base, 1).as_ptr()` returns a pointer that
316        //                   points here in the data part of the table
317        //                   (to the start of T1)
318        //                        |
319        //                        |        `base: NonNull<T>` must point here
320        //                        |         (to the end of T0 or to the start of C0)
321        //                        v         v
322        // [Padding], Tlast, ..., |T1|, T0, |C0, C1, ..., Clast
323        //                           ^
324        //                           `from_base_index(base, 1)` returns a pointer
325        //                           that points here in the data part of the table
326        //                           (to the end of T1)
327        //
328        // where: T0...Tlast - our stored data; C0...Clast - control bytes
329        // or metadata for data.
330        let ptr = if T::IS_ZST {
331            // won't overflow because index must be less than length (bucket_mask)
332            // and bucket_mask is guaranteed to be less than `isize::MAX`
333            // (see TableLayout::calculate_layout_for method)
334            invalid_mut(index + 1)
335        } else {
336            base.as_ptr().sub(index)
337        };
338        Self {
339            ptr: NonNull::new_unchecked(ptr),
340        }
341    }
342
343    /// Calculates the index of a [`Bucket`] as distance between two pointers
344    /// (convenience for `base.as_ptr().offset_from(self.ptr.as_ptr()) as usize`).
345    /// The returned value is in units of T: the distance in bytes divided by
346    /// [`core::mem::size_of::<T>()`].
347    ///
348    /// If the `T` is a ZST, then we return the index of the element in
349    /// the table so that `erase` works properly (return `self.ptr.as_ptr() as usize - 1`).
350    ///
351    /// This function is the inverse of [`from_base_index`].
352    ///
353    /// # Safety
354    ///
355    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
356    /// from the safety rules for [`<*const T>::offset_from`] method of `*const T`.
357    ///
358    /// Thus, in order to uphold the safety contracts for [`<*const T>::offset_from`]
359    /// method, as well as for the correct logic of the work of this crate, the
360    /// following rules are necessary and sufficient:
361    ///
362    /// * `base` contained pointer must not be `dangling` and must point to the
363    ///   end of the first `element` from the `data part` of the table, i.e.
364    ///   must be a pointer that returns by [`RawTable::data_end`] or by
365    ///   [`RawTableInner::data_end<T>`];
366    ///
367    /// * `self` also must not contain dangling pointer;
368    ///
369    /// * both `self` and `base` must be created from the same [`RawTable`]
370    ///   (or [`RawTableInner`]).
371    ///
372    /// If `mem::size_of::<T>() == 0`, this function is always safe.
373    ///
374    /// [`Bucket`]: crate::raw::Bucket
375    /// [`from_base_index`]: crate::raw::Bucket::from_base_index
376    /// [`RawTable::data_end`]: crate::raw::RawTable::data_end
377    /// [`RawTableInner::data_end<T>`]: RawTableInner::data_end<T>
378    /// [`RawTable`]: crate::raw::RawTable
379    /// [`RawTableInner`]: RawTableInner
380    /// [`<*const T>::offset_from`]: https://doc.rust-lang.org/nightly/core/primitive.pointer.html#method.offset_from
381    #[inline]
382    unsafe fn to_base_index(&self, base: NonNull<T>) -> usize {
383        // If mem::size_of::<T>() != 0 then return an index under which we used to store the
384        // `element` in the data part of the table (we start counting from "0", so
385        // that in the expression T[last], the "last" index actually is one less than the
386        // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask").
387        // For example for 5th element in table calculation is performed like this:
388        //
389        //                        mem::size_of::<T>()
390        //                          |
391        //                          |         `self = from_base_index(base, 5)` that returns pointer
392        //                          |         that points here in tha data part of the table
393        //                          |         (to the end of T5)
394        //                          |           |                    `base: NonNull<T>` must point here
395        //                          v           |                    (to the end of T0 or to the start of C0)
396        //                        /???\         v                      v
397        // [Padding], Tlast, ..., |T10|, ..., T5|, T4, T3, T2, T1, T0, |C0, C1, C2, C3, C4, C5, ..., C10, ..., Clast
398        //                                      \__________  __________/
399        //                                                 \/
400        //                                     `bucket.to_base_index(base)` = 5
401        //                                     (base.as_ptr() as usize - self.ptr.as_ptr() as usize) / mem::size_of::<T>()
402        //
403        // where: T0...Tlast - our stored data; C0...Clast - control bytes or metadata for data.
404        if T::IS_ZST {
405            // this can not be UB
406            self.ptr.as_ptr() as usize - 1
407        } else {
408            offset_from(base.as_ptr(), self.ptr.as_ptr())
409        }
410    }
411
412    /// Acquires the underlying raw pointer `*mut T` to `data`.
413    ///
414    /// # Note
415    ///
416    /// If `T` is not [`Copy`], do not use `*mut T` methods that can cause calling the
417    /// destructor of `T` (for example the [`<*mut T>::drop_in_place`] method), because
418    /// for properly dropping the data we also need to clear `data` control bytes. If we
419    /// drop data, but do not clear `data control byte` it leads to double drop when
420    /// [`RawTable`] goes out of scope.
421    ///
422    /// If you modify an already initialized `value`, so [`Hash`] and [`Eq`] on the new
423    /// `T` value and its borrowed form *must* match those for the old `T` value, as the map
424    /// will not re-evaluate where the new value should go, meaning the value may become
425    /// "lost" if their location does not reflect their state.
426    ///
427    /// [`RawTable`]: crate::hashbrown::raw::RawTable
428    /// [`<*mut T>::drop_in_place`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.drop_in_place
429    /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
430    /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
431    ///
432    /// # Examples
433    ///
434    /// ```
435    /// use core::hash::{BuildHasher, Hash};
436    /// use core::convert::Infallible;
437    ///
438    /// use rune::alloc::hashbrown::raw::{Bucket, RawTable};
439    ///
440    /// type NewHashBuilder = core::hash::BuildHasherDefault<ahash::AHasher>;
441    ///
442    /// fn make_hash<K: Hash + ?Sized, S: BuildHasher>(hash_builder: &S, key: &K) -> u64 {
443    ///     use core::hash::Hasher;
444    ///     let mut state = hash_builder.build_hasher();
445    ///     key.hash(&mut state);
446    ///     state.finish()
447    /// }
448    ///
449    /// let hash_builder = NewHashBuilder::default();
450    /// let mut table = RawTable::new();
451    ///
452    /// let value = ("a", 100);
453    /// let hash = make_hash(&hash_builder, &value.0);
454    ///
455    /// table.insert(&mut (), hash, value.clone(), |_: &mut (), val: &(&str, _)| Ok::<_, Infallible>(make_hash(&hash_builder, &val.0)));
456    ///
457    /// let bucket: Bucket<(&str, i32)> = table.find(&mut (), hash, |_: &mut (), (k1, _): &(&str, _)| Ok::<_, Infallible>(k1 == &value.0)).unwrap().unwrap();
458    ///
459    /// assert_eq!(unsafe { &*bucket.as_ptr() }, &("a", 100));
460    /// ```
461    #[inline]
462    pub fn as_ptr(&self) -> *mut T {
463        if T::IS_ZST {
464            // Just return an arbitrary ZST pointer which is properly aligned
465            // invalid pointer is good enough for ZST
466            invalid_mut(mem::align_of::<T>())
467        } else {
468            unsafe { self.ptr.as_ptr().sub(1) }
469        }
470    }
471
472    /// Create a new [`Bucket`] that is offset from the `self` by the given
473    /// `offset`. The pointer calculation is performed by calculating the
474    /// offset from `self` pointer (convenience for `self.ptr.as_ptr().sub(offset)`).
475    /// This function is used for iterators.
476    ///
477    /// `offset` is in units of `T`; e.g., a `offset` of 3 represents a pointer
478    /// offset of `3 * size_of::<T>()` bytes.
479    ///
480    /// # Safety
481    ///
482    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
483    /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and safety
484    /// rules of [`NonNull::new_unchecked`] function.
485    ///
486    /// Thus, in order to uphold the safety contracts for [`<*mut T>::sub`] method
487    /// and [`NonNull::new_unchecked`] function, as well as for the correct
488    /// logic of the work of this crate, the following rules are necessary and
489    /// sufficient:
490    ///
491    /// * `self` contained pointer must not be `dangling`;
492    ///
493    /// * `self.to_base_index() + ofset` must not be greater than `RawTableInner.bucket_mask`,
494    ///   i.e. `(self.to_base_index() + ofset) <= RawTableInner.bucket_mask` or, in other
495    ///   words, `self.to_base_index() + ofset + 1` must be no greater than the number returned
496    ///   by the function [`RawTable::buckets`] or [`RawTableInner::buckets`].
497    ///
498    /// If `mem::size_of::<T>() == 0`, then the only requirement is that the
499    /// `self.to_base_index() + ofset` must not be greater than `RawTableInner.bucket_mask`,
500    /// i.e. `(self.to_base_index() + ofset) <= RawTableInner.bucket_mask` or, in other words,
501    /// `self.to_base_index() + ofset + 1` must be no greater than the number returned by the
502    /// function [`RawTable::buckets`] or [`RawTableInner::buckets`].
503    ///
504    /// [`Bucket`]: crate::raw::Bucket
505    /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1
506    /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked
507    /// [`RawTable::buckets`]: crate::raw::RawTable::buckets
508    /// [`RawTableInner::buckets`]: RawTableInner::buckets
509    #[inline]
510    unsafe fn next_n(&self, offset: usize) -> Self {
511        let ptr = if T::IS_ZST {
512            // invalid pointer is good enough for ZST
513            invalid_mut(self.ptr.as_ptr() as usize + offset)
514        } else {
515            self.ptr.as_ptr().sub(offset)
516        };
517        Self {
518            ptr: NonNull::new_unchecked(ptr),
519        }
520    }
521
522    /// Executes the destructor (if any) of the pointed-to `data`.
523    ///
524    /// # Safety
525    ///
526    /// See [`ptr::drop_in_place`] for safety concerns.
527    ///
528    /// You should use [`RawTable::erase`] instead of this function,
529    /// or be careful with calling this function directly, because for
530    /// properly dropping the data we need also clear `data` control bytes.
531    /// If we drop data, but do not erase `data control byte` it leads to
532    /// double drop when [`RawTable`] goes out of scope.
533    ///
534    /// [`ptr::drop_in_place`]: https://doc.rust-lang.org/core/ptr/fn.drop_in_place.html
535    /// [`RawTable`]: crate::raw::RawTable
536    /// [`RawTable::erase`]: crate::raw::RawTable::erase
537    #[cfg_attr(feature = "inline-more", inline)]
538    pub(crate) unsafe fn drop(&self) {
539        self.as_ptr().drop_in_place();
540    }
541
542    /// Reads the `value` from `self` without moving it. This leaves the
543    /// memory in `self` unchanged.
544    ///
545    /// # Safety
546    ///
547    /// See [`ptr::read`] for safety concerns.
548    ///
549    /// You should use [`RawTable::remove`] instead of this function,
550    /// or be careful with calling this function directly, because compiler
551    /// calls its destructor when readed `value` goes out of scope. It
552    /// can cause double dropping when [`RawTable`] goes out of scope,
553    /// because of not erased `data control byte`.
554    ///
555    /// [`ptr::read`]: https://doc.rust-lang.org/core/ptr/fn.read.html
556    /// [`RawTable`]: crate::raw::RawTable
557    /// [`RawTable::remove`]: crate::raw::RawTable::remove
558    #[inline]
559    pub(crate) unsafe fn read(&self) -> T {
560        self.as_ptr().read()
561    }
562
563    /// Overwrites a memory location with the given `value` without reading
564    /// or dropping the old value (like [`ptr::write`] function).
565    ///
566    /// # Safety
567    ///
568    /// See [`ptr::write`] for safety concerns.
569    ///
570    /// # Note
571    ///
572    /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
573    /// those for the old `T` value, as the map will not re-evaluate where the new
574    /// value should go, meaning the value may become "lost" if their location
575    /// does not reflect their state.
576    ///
577    /// [`ptr::write`]: https://doc.rust-lang.org/core/ptr/fn.write.html
578    /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
579    /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
580    #[inline]
581    pub(crate) unsafe fn write(&self, val: T) {
582        self.as_ptr().write(val);
583    }
584
585    /// Returns a shared immutable reference to the `value`.
586    ///
587    /// # Safety
588    ///
589    /// See [`NonNull::as_ref`] for safety concerns.
590    ///
591    /// [`NonNull::as_ref`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_ref
592    ///
593    /// # Examples
594    ///
595    /// ```
596    /// use core::hash::{BuildHasher, Hash};
597    /// use core::convert::Infallible;
598    ///
599    /// use rune::alloc::hashbrown::raw::{Bucket, RawTable};
600    ///
601    /// type NewHashBuilder = core::hash::BuildHasherDefault<ahash::AHasher>;
602    ///
603    /// fn make_hash<K: Hash + ?Sized, S: BuildHasher>(hash_builder: &S, key: &K) -> u64 {
604    ///     use core::hash::Hasher;
605    ///     let mut state = hash_builder.build_hasher();
606    ///     key.hash(&mut state);
607    ///     state.finish()
608    /// }
609    ///
610    /// let hash_builder = NewHashBuilder::default();
611    /// let mut table = RawTable::new();
612    ///
613    /// let value: (&str, String) = ("A pony", "is a small horse".to_owned());
614    /// let hash = make_hash(&hash_builder, &value.0);
615    ///
616    /// table.insert(&mut (), hash, value.clone(), |_: &mut (), (val, _): &(&str, _)| Ok::<_, Infallible>(make_hash(&hash_builder, val))).unwrap();
617    ///
618    /// let bucket: Bucket<(&str, String)> = table.find(&mut (), hash, |_: &mut (), (k, _): &(&str, _)| Ok::<_, Infallible>(k == &value.0)).unwrap().unwrap();
619    ///
620    /// assert_eq!(
621    ///     unsafe { bucket.as_ref() },
622    ///     &("A pony", "is a small horse".to_owned())
623    /// );
624    /// ```
625    #[inline]
626    pub unsafe fn as_ref<'a>(&self) -> &'a T {
627        &*self.as_ptr()
628    }
629
630    /// Returns a unique mutable reference to the `value`.
631    ///
632    /// # Safety
633    ///
634    /// See [`NonNull::as_mut`] for safety concerns.
635    ///
636    /// # Note
637    ///
638    /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
639    /// those for the old `T` value, as the map will not re-evaluate where the new
640    /// value should go, meaning the value may become "lost" if their location
641    /// does not reflect their state.
642    ///
643    /// [`NonNull::as_mut`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_mut
644    /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
645    /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
646    ///
647    /// # Examples
648    ///
649    /// ```
650    /// use core::hash::{BuildHasher, Hash};
651    /// use core::convert::Infallible;
652    ///
653    /// use rune::alloc::hashbrown::raw::{Bucket, RawTable};
654    ///
655    /// type NewHashBuilder = core::hash::BuildHasherDefault<ahash::AHasher>;
656    ///
657    /// fn make_hash<K: Hash + ?Sized, S: BuildHasher>(hash_builder: &S, key: &K) -> u64 {
658    ///     use core::hash::Hasher;
659    ///     let mut state = hash_builder.build_hasher();
660    ///     key.hash(&mut state);
661    ///     state.finish()
662    /// }
663    ///
664    /// let hash_builder = NewHashBuilder::default();
665    /// let mut table = RawTable::new();
666    ///
667    /// let value: (&str, String) = ("A pony", "is a small horse".to_owned());
668    /// let hash = make_hash(&hash_builder, &value.0);
669    ///
670    /// table.insert(&mut (), hash, value.clone(), |_: &mut (), (k, _): &(&str, _)| Ok::<_, Infallible>(make_hash(&hash_builder, k))).unwrap();
671    ///
672    /// let bucket: Bucket<(&str, String)> = table.find(&mut (), hash, |_: &mut (), (k, _): &(&str, _)| Ok::<_, Infallible>(k == &value.0)).unwrap().unwrap();
673    ///
674    /// unsafe {
675    ///     bucket
676    ///         .as_mut()
677    ///         .1
678    ///         .push_str(" less than 147 cm at the withers")
679    /// };
680    /// assert_eq!(
681    ///     unsafe { bucket.as_ref() },
682    ///     &(
683    ///         "A pony",
684    ///         "is a small horse less than 147 cm at the withers".to_owned()
685    ///     )
686    /// );
687    /// # Ok::<_, rune::alloc::Error>(())
688    /// ```
689    #[inline]
690    pub unsafe fn as_mut<'a>(&self) -> &'a mut T {
691        &mut *self.as_ptr()
692    }
693
694    /// Copies `size_of<T>` bytes from `other` to `self`. The source
695    /// and destination may *not* overlap.
696    ///
697    /// # Safety
698    ///
699    /// See [`ptr::copy_nonoverlapping`] for safety concerns.
700    ///
701    /// Like [`read`], `copy_nonoverlapping` creates a bitwise copy of `T`, regardless of
702    /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using *both* the values
703    /// in the region beginning at `*self` and the region beginning at `*other` can
704    /// [violate memory safety].
705    ///
706    /// # Note
707    ///
708    /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
709    /// those for the old `T` value, as the map will not re-evaluate where the new
710    /// value should go, meaning the value may become "lost" if their location
711    /// does not reflect their state.
712    ///
713    /// [`ptr::copy_nonoverlapping`]: https://doc.rust-lang.org/core/ptr/fn.copy_nonoverlapping.html
714    /// [`read`]: https://doc.rust-lang.org/core/ptr/fn.read.html
715    /// [violate memory safety]: https://doc.rust-lang.org/std/ptr/fn.read.html#ownership-of-the-returned-value
716    /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
717    /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
718    #[inline]
719    pub unsafe fn copy_from_nonoverlapping(&self, other: &Self) {
720        self.as_ptr().copy_from_nonoverlapping(other.as_ptr(), 1);
721    }
722}
723
724/// A raw hash table with an unsafe API.
725pub struct RawTable<T, A: Allocator = Global> {
726    table: RawTableInner,
727    alloc: A,
728    // Tell dropck that we own instances of T.
729    marker: PhantomData<T>,
730}
731
732/// Non-generic part of `RawTable` which allows functions to be instantiated only once regardless
733/// of how many different key-value types are used.
734struct RawTableInner {
735    // Mask to get an index from a hash value. The value is one less than the
736    // number of buckets in the table.
737    bucket_mask: usize,
738
739    // [Padding], T1, T2, ..., Tlast, C1, C2, ...
740    //                                ^ points here
741    ctrl: NonNull<u8>,
742
743    // Number of elements that can be inserted before we need to grow the table
744    growth_left: usize,
745
746    // Number of elements in the table, only really used by len()
747    items: usize,
748}
749
750impl<T> RawTable<T, Global> {
751    /// Creates a new empty hash table without allocating any memory.
752    ///
753    /// In effect this returns a table with exactly 1 bucket. However we can
754    /// leave the data pointer dangling since that bucket is never written to
755    /// due to our load factor forcing us to always have at least 1 free bucket.
756    #[inline]
757    pub const fn new() -> Self {
758        Self {
759            table: RawTableInner::NEW,
760            alloc: Global,
761            marker: PhantomData,
762        }
763    }
764
765    /// Attempts to allocate a new hash table with at least enough capacity
766    /// for inserting the given number of elements without reallocating.
767    pub fn try_with_capacity(capacity: usize) -> Result<Self, Error> {
768        Self::try_with_capacity_in(capacity, Global)
769    }
770}
771
772impl<T, A: Allocator> RawTable<T, A> {
773    const TABLE_LAYOUT: TableLayout = TableLayout::new::<T>();
774
775    /// Creates a new empty hash table without allocating any memory, using the
776    /// given allocator.
777    ///
778    /// In effect this returns a table with exactly 1 bucket. However we can
779    /// leave the data pointer dangling since that bucket is never written to
780    /// due to our load factor forcing us to always have at least 1 free bucket.
781    #[inline]
782    pub const fn new_in(alloc: A) -> Self {
783        Self {
784            table: RawTableInner::NEW,
785            alloc,
786            marker: PhantomData,
787        }
788    }
789
790    /// Allocates a new hash table with the given number of buckets.
791    ///
792    /// The control bytes are left uninitialized.
793    #[cfg_attr(feature = "inline-more", inline)]
794    unsafe fn new_uninitialized(alloc: A, buckets: usize) -> Result<Self, Error> {
795        debug_assert!(buckets.is_power_of_two());
796
797        Ok(Self {
798            table: RawTableInner::new_uninitialized(&alloc, Self::TABLE_LAYOUT, buckets)?,
799            alloc,
800            marker: PhantomData,
801        })
802    }
803
804    /// Allocates a new hash table using the given allocator, with at least enough capacity for
805    /// inserting the given number of elements without reallocating.
806    pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, Error> {
807        Ok(Self {
808            table: RawTableInner::try_with_capacity(&alloc, Self::TABLE_LAYOUT, capacity)?,
809            alloc,
810            marker: PhantomData,
811        })
812    }
813
814    /// Returns a reference to the underlying allocator.
815    #[inline]
816    pub fn allocator(&self) -> &A {
817        &self.alloc
818    }
819
820    /// Returns pointer to one past last element of data table.
821    #[inline]
822    pub unsafe fn data_end(&self) -> NonNull<T> {
823        NonNull::new_unchecked(self.table.ctrl.as_ptr().cast())
824    }
825
826    /// Returns pointer to start of data table.
827    #[inline]
828    #[cfg(any(feature = "raw", rune_nightly))]
829    pub unsafe fn data_start(&self) -> NonNull<T> {
830        NonNull::new_unchecked(self.data_end().as_ptr().wrapping_sub(self.buckets()))
831    }
832
833    /// Return the information about memory allocated by the table.
834    ///
835    /// `RawTable` allocates single memory block to store both data and metadata.
836    /// This function returns allocation size and alignment and the beginning of the area.
837    /// These are the arguments which will be passed to `dealloc` when the table is dropped.
838    ///
839    /// This function might be useful for memory profiling.
840    #[inline]
841    pub fn allocation_info(&self) -> (NonNull<u8>, Layout) {
842        // SAFETY: We use the same `table_layout` that was used to allocate
843        // this table.
844        unsafe { self.table.allocation_info_or_zero(Self::TABLE_LAYOUT) }
845    }
846
847    /// Returns the index of a bucket from a `Bucket`.
848    #[inline]
849    pub unsafe fn bucket_index(&self, bucket: &Bucket<T>) -> usize {
850        bucket.to_base_index(self.data_end())
851    }
852
853    /// Returns a pointer to an element in the table.
854    #[inline]
855    pub unsafe fn bucket(&self, index: usize) -> Bucket<T> {
856        debug_assert_ne!(self.table.bucket_mask, 0);
857        debug_assert!(index < self.buckets());
858        Bucket::from_base_index(self.data_end(), index)
859    }
860
861    /// Erases an element from the table without dropping it.
862    #[cfg_attr(feature = "inline-more", inline)]
863    unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
864        let index = self.bucket_index(item);
865        self.table.erase(index);
866    }
867
868    /// Erases an element from the table, dropping it in place.
869    #[cfg_attr(feature = "inline-more", inline)]
870    #[allow(clippy::needless_pass_by_value)]
871    pub unsafe fn erase(&mut self, item: Bucket<T>) {
872        // Erase the element from the table first since drop might panic.
873        self.erase_no_drop(&item);
874        item.drop();
875    }
876
877    /// Finds and erases an element from the table, dropping it in place.
878    /// Returns true if an element was found.
879    #[cfg_attr(feature = "inline-more", inline)]
880    pub fn erase_entry<C: ?Sized, E>(
881        &mut self,
882        cx: &mut C,
883        hash: u64,
884        eq: impl EqFn<C, T, E>,
885    ) -> Result<bool, E> {
886        // Avoid `Option::map` because it bloats LLVM IR.
887        if let Some(bucket) = self.find(cx, hash, eq)? {
888            unsafe {
889                self.erase(bucket);
890            }
891            Ok(true)
892        } else {
893            Ok(false)
894        }
895    }
896
897    /// Removes an element from the table, returning it.
898    ///
899    /// This also returns an `InsertSlot` pointing to the newly free bucket.
900    #[cfg_attr(feature = "inline-more", inline)]
901    #[allow(clippy::needless_pass_by_value)]
902    pub unsafe fn remove(&mut self, item: Bucket<T>) -> (T, InsertSlot) {
903        self.erase_no_drop(&item);
904        (
905            item.read(),
906            InsertSlot {
907                index: self.bucket_index(&item),
908            },
909        )
910    }
911
912    /// Finds and removes an element from the table, returning it.
913    #[cfg_attr(feature = "inline-more", inline)]
914    pub fn remove_entry<C: ?Sized, E>(
915        &mut self,
916        cx: &mut C,
917        hash: u64,
918        eq: impl EqFn<C, T, E>,
919    ) -> Result<Option<T>, E> {
920        // Avoid `Option::map` because it bloats LLVM IR.
921        Ok(match self.find(cx, hash, eq)? {
922            Some(bucket) => Some(unsafe { self.remove(bucket).0 }),
923            None => None,
924        })
925    }
926
927    /// Marks all table buckets as empty without dropping their contents.
928    #[cfg_attr(feature = "inline-more", inline)]
929    pub fn clear_no_drop(&mut self) {
930        self.table.clear_no_drop();
931    }
932
933    /// Removes all elements from the table without freeing the backing memory.
934    #[cfg_attr(feature = "inline-more", inline)]
935    pub fn clear(&mut self) {
936        if self.is_empty() {
937            // Special case empty table to avoid surprising O(capacity) time.
938            return;
939        }
940        // Ensure that the table is reset even if one of the drops panic
941        let mut self_ = guard(self, |self_| self_.clear_no_drop());
942        unsafe {
943            // SAFETY: ScopeGuard sets to zero the `items` field of the table
944            // even in case of panic during the dropping of the elements so
945            // that there will be no double drop of the elements.
946            self_.table.drop_elements::<T>();
947        }
948    }
949
950    /// Shrinks the table to fit `max(self.len(), min_size)` elements.
951    #[cfg_attr(feature = "inline-more", inline)]
952    pub fn shrink_to<C: ?Sized, E>(
953        &mut self,
954        cx: &mut C,
955        min_size: usize,
956        hasher: impl HasherFn<C, T, E>,
957    ) -> Result<(), CustomError<E>> {
958        // Calculate the minimal number of elements that we need to reserve
959        // space for.
960        let min_size = usize::max(self.table.items, min_size);
961        if min_size == 0 {
962            let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW);
963            unsafe {
964                // SAFETY:
965                // 1. We call the function only once;
966                // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
967                //    and [`TableLayout`] that were used to allocate this table.
968                // 3. If any elements' drop function panics, then there will only be a memory leak,
969                //    because we have replaced the inner table with a new one.
970                old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
971            }
972            return Ok(());
973        }
974
975        // Calculate the number of buckets that we need for this number of
976        // elements. If the calculation overflows then the requested bucket
977        // count must be larger than what we have right and nothing needs to be
978        // done.
979        let min_buckets = match capacity_to_buckets(min_size) {
980            Some(buckets) => buckets,
981            None => return Ok(()),
982        };
983
984        // If we have more buckets than we need, shrink the table.
985        if min_buckets < self.buckets() {
986            // Fast path if the table is empty
987            if self.table.items == 0 {
988                let new_inner =
989                    RawTableInner::try_with_capacity(&self.alloc, Self::TABLE_LAYOUT, min_size)?;
990                let mut old_inner = mem::replace(&mut self.table, new_inner);
991                unsafe {
992                    // SAFETY:
993                    // 1. We call the function only once;
994                    // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
995                    //    and [`TableLayout`] that were used to allocate this table.
996                    // 3. If any elements' drop function panics, then there will only be a memory leak,
997                    //    because we have replaced the inner table with a new one.
998                    old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
999                }
1000            } else {
1001                // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
1002                unsafe {
1003                    // SAFETY:
1004                    // 1. We know for sure that `min_size >= self.table.items`.
1005                    // 2. The [`RawTableInner`] must already have properly initialized control bytes since
1006                    //    we never exposed RawTable::new_uninitialized in a public API.
1007                    self.resize(cx, min_size, hasher)?;
1008                }
1009            }
1010        }
1011
1012        Ok(())
1013    }
1014
1015    /// Ensures that at least `additional` items can be inserted into the table
1016    /// without reallocation.
1017    #[cfg_attr(feature = "inline-more", inline)]
1018    pub fn reserve<C: ?Sized, E>(
1019        &mut self,
1020        cx: &mut C,
1021        additional: usize,
1022        hasher: impl HasherFn<C, T, E>,
1023    ) -> Result<(), CustomError<E>> {
1024        if unlikely(additional > self.table.growth_left) {
1025            // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
1026            unsafe {
1027                // SAFETY: The [`RawTableInner`] must already have properly initialized control
1028                // bytes since we never exposed RawTable::new_uninitialized in a public API.
1029                self.reserve_rehash(cx, additional, hasher)?;
1030            }
1031        }
1032
1033        Ok(())
1034    }
1035
1036    /// Tries to ensure that at least `additional` items can be inserted into
1037    /// the table without reallocation.
1038    #[cfg_attr(feature = "inline-more", inline)]
1039    pub fn try_reserve<C: ?Sized, E>(
1040        &mut self,
1041        cx: &mut C,
1042        additional: usize,
1043        hasher: impl HasherFn<C, T, E>,
1044    ) -> Result<(), CustomError<E>> {
1045        if additional > self.table.growth_left {
1046            // SAFETY: The [`RawTableInner`] must already have properly initialized control
1047            // bytes since we never exposed RawTable::new_uninitialized in a public API.
1048            unsafe { self.reserve_rehash(cx, additional, hasher) }
1049        } else {
1050            Ok(())
1051        }
1052    }
1053
1054    /// Out-of-line slow path for `reserve` and `try_reserve`.
1055    ///
1056    /// # Safety
1057    ///
1058    /// The [`RawTableInner`] must have properly initialized control bytes,
1059    /// otherwise calling this function results in [`undefined behavior`]
1060    ///
1061    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1062    #[cold]
1063    #[inline(never)]
1064    unsafe fn reserve_rehash<C: ?Sized, E>(
1065        &mut self,
1066        cx: &mut C,
1067        additional: usize,
1068        hasher: impl HasherFn<C, T, E>,
1069    ) -> Result<(), CustomError<E>> {
1070        unsafe {
1071            // SAFETY:
1072            // 1. We know for sure that `alloc` and `layout` matches the [`Allocator`] and
1073            //    [`TableLayout`] that were used to allocate this table.
1074            // 2. The `drop` function is the actual drop function of the elements stored in
1075            //    the table.
1076            // 3. The caller ensures that the control bytes of the `RawTableInner`
1077            //    are already initialized.
1078            self.table.reserve_rehash_inner(
1079                cx,
1080                &self.alloc,
1081                additional,
1082                &|cx, table, index| hasher.hash(cx, table.bucket::<T>(index).as_ref()),
1083                Self::TABLE_LAYOUT,
1084                if T::NEEDS_DROP {
1085                    Some(mem::transmute(ptr::drop_in_place::<T> as unsafe fn(*mut T)))
1086                } else {
1087                    None
1088                },
1089            )
1090        }
1091    }
1092
1093    /// Allocates a new table of a different size and moves the contents of the
1094    /// current table into it.
1095    ///
1096    /// # Safety
1097    ///
1098    /// The [`RawTableInner`] must have properly initialized control bytes,
1099    /// otherwise calling this function results in [`undefined behavior`]
1100    ///
1101    /// The caller of this function must ensure that `capacity >= self.table.items`
1102    /// otherwise:
1103    ///
1104    /// * If `self.table.items != 0`, calling of this function with `capacity`
1105    ///   equal to 0 (`capacity == 0`) results in [`undefined behavior`].
1106    ///
1107    /// * If `capacity_to_buckets(capacity) < Group::WIDTH` and
1108    ///   `self.table.items > capacity_to_buckets(capacity)`
1109    ///   calling this function results in [`undefined behavior`].
1110    ///
1111    /// * If `capacity_to_buckets(capacity) >= Group::WIDTH` and
1112    ///   `self.table.items > capacity_to_buckets(capacity)`
1113    ///   calling this function are never return (will go into an
1114    ///   infinite loop).
1115    ///
1116    /// See [`RawTableInner::find_insert_slot`] for more information.
1117    ///
1118    /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot
1119    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1120    unsafe fn resize<C: ?Sized, E>(
1121        &mut self,
1122        cx: &mut C,
1123        capacity: usize,
1124        hasher: impl HasherFn<C, T, E>,
1125    ) -> Result<(), CustomError<E>> {
1126        // SAFETY:
1127        // 1. The caller of this function guarantees that `capacity >= self.table.items`.
1128        // 2. We know for sure that `alloc` and `layout` matches the [`Allocator`] and
1129        //    [`TableLayout`] that were used to allocate this table.
1130        // 3. The caller ensures that the control bytes of the `RawTableInner`
1131        //    are already initialized.
1132        self.table.resize_inner(
1133            cx,
1134            &self.alloc,
1135            capacity,
1136            &move |cx, table, index| hasher.hash(cx, table.bucket::<T>(index).as_ref()),
1137            Self::TABLE_LAYOUT,
1138        )
1139    }
1140
1141    /// Inserts a new element into the table, and returns its raw bucket.
1142    ///
1143    /// This does not check if the given element already exists in the table.
1144    #[cfg_attr(feature = "inline-more", inline)]
1145    pub fn insert<C: ?Sized, E>(
1146        &mut self,
1147        cx: &mut C,
1148        hash: u64,
1149        value: T,
1150        hasher: impl HasherFn<C, T, E>,
1151    ) -> Result<Bucket<T>, CustomError<E>> {
1152        unsafe {
1153            let mut slot = self.table.find_insert_slot(hash);
1154
1155            // We can avoid growing the table once we have reached our load
1156            // factor if we are replacing a tombstone. This works since the
1157            // number of EMPTY slots does not change in this case.
1158            let old_ctrl = *self.table.ctrl(slot.index);
1159            if unlikely(self.table.growth_left == 0 && special_is_empty(old_ctrl)) {
1160                self.reserve(cx, 1, hasher)?;
1161                slot = self.table.find_insert_slot(hash);
1162            }
1163
1164            Ok(self.insert_in_slot(hash, slot, value))
1165        }
1166    }
1167
1168    /// Attempts to insert a new element without growing the table and return its raw bucket.
1169    ///
1170    /// Returns an `Err` containing the given element if inserting it would require growing the
1171    /// table.
1172    ///
1173    /// This does not check if the given element already exists in the table.
1174    #[cfg_attr(feature = "inline-more", inline)]
1175    pub fn try_insert_no_grow(&mut self, hash: u64, value: T) -> Result<Bucket<T>, T> {
1176        unsafe {
1177            match self.table.prepare_insert_no_grow(hash) {
1178                Ok(index) => {
1179                    let bucket = self.bucket(index);
1180                    bucket.write(value);
1181                    Ok(bucket)
1182                }
1183                Err(()) => Err(value),
1184            }
1185        }
1186    }
1187
1188    /// Inserts a new element into the table, and returns a mutable reference to it.
1189    ///
1190    /// This does not check if the given element already exists in the table.
1191    #[cfg_attr(feature = "inline-more", inline)]
1192    pub fn insert_entry<C: ?Sized, E>(
1193        &mut self,
1194        cx: &mut C,
1195        hash: u64,
1196        value: T,
1197        hasher: impl HasherFn<C, T, E>,
1198    ) -> Result<&mut T, CustomError<E>> {
1199        Ok(unsafe { self.insert(cx, hash, value, hasher)?.as_mut() })
1200    }
1201
1202    /// Inserts a new element into the table, without growing the table.
1203    ///
1204    /// There must be enough space in the table to insert the new element.
1205    ///
1206    /// This does not check if the given element already exists in the table.
1207    #[cfg_attr(feature = "inline-more", inline)]
1208    #[cfg(any(feature = "raw", feature = "rustc-internal-api"))]
1209    pub unsafe fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket<T> {
1210        let (index, old_ctrl) = self.table.prepare_insert_slot(hash);
1211        let bucket = self.table.bucket(index);
1212
1213        // If we are replacing a DELETED entry then we don't need to update
1214        // the load counter.
1215        self.table.growth_left -= special_is_empty(old_ctrl) as usize;
1216
1217        bucket.write(value);
1218        self.table.items += 1;
1219        bucket
1220    }
1221
1222    /// Temporary removes a bucket, applying the given function to the removed
1223    /// element and optionally put back the returned value in the same bucket.
1224    ///
1225    /// Returns `true` if the bucket still contains an element
1226    ///
1227    /// This does not check if the given bucket is actually occupied.
1228    #[cfg_attr(feature = "inline-more", inline)]
1229    pub unsafe fn replace_bucket_with<F>(&mut self, bucket: Bucket<T>, f: F) -> bool
1230    where
1231        F: FnOnce(T) -> Option<T>,
1232    {
1233        let index = self.bucket_index(&bucket);
1234        let old_ctrl = *self.table.ctrl(index);
1235        debug_assert!(self.is_bucket_full(index));
1236        let old_growth_left = self.table.growth_left;
1237        let item = self.remove(bucket).0;
1238        if let Some(new_item) = f(item) {
1239            self.table.growth_left = old_growth_left;
1240            self.table.set_ctrl(index, old_ctrl);
1241            self.table.items += 1;
1242            self.bucket(index).write(new_item);
1243            true
1244        } else {
1245            false
1246        }
1247    }
1248
1249    /// Searches for an element in the table. If the element is not found,
1250    /// returns `Err` with the position of a slot where an element with the
1251    /// same hash could be inserted.
1252    ///
1253    /// This function may resize the table if additional space is required for
1254    /// inserting an element.
1255    #[inline]
1256    pub fn find_or_find_insert_slot<C: ?Sized, E>(
1257        &mut self,
1258        cx: &mut C,
1259        hash: u64,
1260        eq: impl EqFn<C, T, E>,
1261        hasher: impl HasherFn<C, T, E>,
1262    ) -> Result<Bucket<T>, ErrorOrInsertSlot<E>> {
1263        self.reserve(cx, 1, hasher)?;
1264
1265        let index = self
1266            .table
1267            .find_or_find_insert_slot_inner(cx, hash, &|cx, index| unsafe {
1268                eq.eq(cx, self.bucket(index).as_ref())
1269            })?;
1270
1271        Ok(unsafe { self.bucket(index) })
1272    }
1273
1274    /// Inserts a new element into the table in the given slot, and returns its
1275    /// raw bucket.
1276    ///
1277    /// # Safety
1278    ///
1279    /// `slot` must point to a slot previously returned by
1280    /// `find_or_find_insert_slot`, and no mutation of the table must have
1281    /// occurred since that call.
1282    #[inline]
1283    pub unsafe fn insert_in_slot(&mut self, hash: u64, slot: InsertSlot, value: T) -> Bucket<T> {
1284        let old_ctrl = *self.table.ctrl(slot.index);
1285        self.table.record_item_insert_at(slot.index, old_ctrl, hash);
1286
1287        let bucket = self.bucket(slot.index);
1288        bucket.write(value);
1289        bucket
1290    }
1291
1292    /// Searches for an element in the table.
1293    #[inline]
1294    pub fn find<C: ?Sized, E>(
1295        &self,
1296        cx: &mut C,
1297        hash: u64,
1298        eq: impl EqFn<C, T, E>,
1299    ) -> Result<Option<Bucket<T>>, E> {
1300        let result = self.table.find_inner(cx, hash, &|cx, index| unsafe {
1301            eq.eq(cx, self.bucket(index).as_ref())
1302        })?;
1303
1304        // Avoid `Option::map` because it bloats LLVM IR.
1305        Ok(match result {
1306            Some(index) => Some(unsafe { self.bucket(index) }),
1307            None => None,
1308        })
1309    }
1310
1311    /// Gets a reference to an element in the table.
1312    #[inline]
1313    pub fn get<C: ?Sized, E>(
1314        &self,
1315        cx: &mut C,
1316        hash: u64,
1317        eq: impl EqFn<C, T, E>,
1318    ) -> Result<Option<&T>, E> {
1319        // Avoid `Option::map` because it bloats LLVM IR.
1320        Ok(match self.find(cx, hash, eq)? {
1321            Some(bucket) => Some(unsafe { bucket.as_ref() }),
1322            None => None,
1323        })
1324    }
1325
1326    /// Gets a mutable reference to an element in the table.
1327    #[inline]
1328    pub fn get_mut<C: ?Sized, E>(
1329        &mut self,
1330        cx: &mut C,
1331        hash: u64,
1332        eq: impl EqFn<C, T, E>,
1333    ) -> Result<Option<&mut T>, E> {
1334        // Avoid `Option::map` because it bloats LLVM IR.
1335        Ok(match self.find(cx, hash, eq)? {
1336            Some(bucket) => Some(unsafe { bucket.as_mut() }),
1337            None => None,
1338        })
1339    }
1340
1341    /// Attempts to get mutable references to `N` entries in the table at once.
1342    ///
1343    /// Returns an array of length `N` with the results of each query.
1344    ///
1345    /// At most one mutable reference will be returned to any entry. `None` will be returned if any
1346    /// of the hashes are duplicates. `None` will be returned if the hash is not found.
1347    ///
1348    /// The `eq` argument should be a closure such that `eq(i, k)` returns true if `k` is equal to
1349    /// the `i`th key to be looked up.
1350    pub fn get_many_mut<C: ?Sized, E, const N: usize>(
1351        &mut self,
1352        cx: &mut C,
1353        hashes: [u64; N],
1354        eq: impl Fn(&mut C, usize, &T) -> Result<bool, E>,
1355    ) -> Result<Option<[&'_ mut T; N]>, E> {
1356        unsafe {
1357            let ptrs = match self.get_many_mut_pointers(cx, hashes, eq)? {
1358                Some(ptrs) => ptrs,
1359                None => return Ok(None),
1360            };
1361
1362            for (i, &cur) in ptrs.iter().enumerate() {
1363                if ptrs[..i].iter().any(|&prev| ptr::eq::<T>(prev, cur)) {
1364                    return Ok(None);
1365                }
1366            }
1367            // All bucket are distinct from all previous buckets so we're clear to return the result
1368            // of the lookup.
1369
1370            // TODO use `MaybeUninit::array_assume_init` here instead once that's stable.
1371            Ok(Some(mem::transmute_copy(&ptrs)))
1372        }
1373    }
1374
1375    pub unsafe fn get_many_unchecked_mut<C: ?Sized, E, const N: usize>(
1376        &mut self,
1377        cx: &mut C,
1378        hashes: [u64; N],
1379        eq: impl Fn(&mut C, usize, &T) -> Result<bool, E>,
1380    ) -> Result<Option<[&'_ mut T; N]>, E> {
1381        let ptrs = match self.get_many_mut_pointers(cx, hashes, eq)? {
1382            Some(ptrs) => ptrs,
1383            None => return Ok(None),
1384        };
1385
1386        Ok(Some(mem::transmute_copy(&ptrs)))
1387    }
1388
1389    unsafe fn get_many_mut_pointers<C: ?Sized, E, const N: usize>(
1390        &mut self,
1391        cx: &mut C,
1392        hashes: [u64; N],
1393        eq: impl Fn(&mut C, usize, &T) -> Result<bool, E>,
1394    ) -> Result<Option<[*mut T; N]>, E> {
1395        // TODO use `MaybeUninit::uninit_array` here instead once that's stable.
1396        let mut outs: MaybeUninit<[*mut T; N]> = MaybeUninit::uninit();
1397        let outs_ptr = outs.as_mut_ptr();
1398
1399        for (i, &hash) in hashes.iter().enumerate() {
1400            let cur = match self.find(cx, hash, |cx: &mut C, k: &T| eq(cx, i, k))? {
1401                Some(cur) => cur,
1402                None => return Ok(None),
1403            };
1404            *(*outs_ptr).get_unchecked_mut(i) = cur.as_mut();
1405        }
1406
1407        // TODO use `MaybeUninit::array_assume_init` here instead once that's stable.
1408        Ok(Some(outs.assume_init()))
1409    }
1410
1411    /// Returns the number of elements the map can hold without reallocating.
1412    ///
1413    /// This number is a lower bound; the table might be able to hold
1414    /// more, but is guaranteed to be able to hold at least this many.
1415    #[inline]
1416    pub fn capacity(&self) -> usize {
1417        self.table.items + self.table.growth_left
1418    }
1419
1420    /// Returns the number of elements in the table.
1421    #[inline]
1422    pub fn len(&self) -> usize {
1423        self.table.items
1424    }
1425
1426    /// Returns `true` if the table contains no elements.
1427    #[inline]
1428    pub fn is_empty(&self) -> bool {
1429        self.len() == 0
1430    }
1431
1432    /// Returns the number of buckets in the table.
1433    #[inline]
1434    pub fn buckets(&self) -> usize {
1435        self.table.bucket_mask + 1
1436    }
1437
1438    /// Checks whether the bucket at `index` is full.
1439    ///
1440    /// # Safety
1441    ///
1442    /// The caller must ensure `index` is less than the number of buckets.
1443    #[inline]
1444    pub unsafe fn is_bucket_full(&self, index: usize) -> bool {
1445        self.table.is_bucket_full(index)
1446    }
1447
1448    /// Returns an iterator over every element in the table. It is up to
1449    /// the caller to ensure that the `RawTable` outlives the `RawIter`.
1450    /// Because we cannot make the `next` method unsafe on the `RawIter`
1451    /// struct, we have to make the `iter` method unsafe.
1452    ///
1453    /// # Safety
1454    ///
1455    /// Caller must ensure that the raw iterator doesn't outlive `self`.
1456    #[inline]
1457    pub unsafe fn iter(&self) -> RawIter<T> {
1458        // SAFETY:
1459        // 1. The caller must uphold the safety contract for `iter` method.
1460        // 2. The [`RawTableInner`] must already have properly initialized control bytes since
1461        //    we never exposed RawTable::new_uninitialized in a public API.
1462        self.table.iter()
1463    }
1464
1465    /// Returns an iterator over occupied buckets that could match a given hash.
1466    ///
1467    /// `RawTable` only stores 7 bits of the hash value, so this iterator may
1468    /// return items that have a hash value different than the one provided. You
1469    /// should always validate the returned values before using them.
1470    ///
1471    /// It is up to the caller to ensure that the `RawTable` outlives the
1472    /// `RawIterHash`. Because we cannot make the `next` method unsafe on the
1473    /// `RawIterHash` struct, we have to make the `iter_hash` method unsafe.
1474    #[cfg_attr(feature = "inline-more", inline)]
1475    pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash<T> {
1476        RawIterHash::new(self, hash)
1477    }
1478
1479    /// Returns an iterator which removes all elements from the table without
1480    /// freeing the memory.
1481    #[cfg_attr(feature = "inline-more", inline)]
1482    pub fn drain(&mut self) -> RawDrain<'_, T, A> {
1483        unsafe {
1484            let iter = self.iter();
1485            self.drain_iter_from(iter)
1486        }
1487    }
1488
1489    /// Returns an iterator which removes all elements from the table without
1490    /// freeing the memory.
1491    ///
1492    /// Iteration starts at the provided iterator's current location.
1493    ///
1494    /// It is up to the caller to ensure that the iterator is valid for this
1495    /// `RawTable` and covers all items that remain in the table.
1496    #[cfg_attr(feature = "inline-more", inline)]
1497    pub unsafe fn drain_iter_from(&mut self, iter: RawIter<T>) -> RawDrain<'_, T, A> {
1498        debug_assert_eq!(iter.len(), self.len());
1499        RawDrain {
1500            iter,
1501            table: mem::replace(&mut self.table, RawTableInner::NEW),
1502            orig_table: NonNull::from(&mut self.table),
1503            marker: PhantomData,
1504        }
1505    }
1506
1507    /// Returns an iterator which consumes all elements from the table.
1508    ///
1509    /// Iteration starts at the provided iterator's current location.
1510    ///
1511    /// It is up to the caller to ensure that the iterator is valid for this
1512    /// `RawTable` and covers all items that remain in the table.
1513    pub unsafe fn into_iter_from(self, iter: RawIter<T>) -> RawIntoIter<T, A> {
1514        debug_assert_eq!(iter.len(), self.len());
1515
1516        let allocation = self.into_allocation();
1517        RawIntoIter {
1518            iter,
1519            allocation,
1520            marker: PhantomData,
1521        }
1522    }
1523
1524    /// Converts the table into a raw allocation. The contents of the table
1525    /// should be dropped using a `RawIter` before freeing the allocation.
1526    #[cfg_attr(feature = "inline-more", inline)]
1527    pub(crate) fn into_allocation(self) -> Option<(NonNull<u8>, Layout, A)> {
1528        let alloc = if self.table.is_empty_singleton() {
1529            None
1530        } else {
1531            // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
1532            let (layout, ctrl_offset) =
1533                match Self::TABLE_LAYOUT.calculate_layout_for(self.table.buckets()) {
1534                    Some(lco) => lco,
1535                    None => unsafe { hint::unreachable_unchecked() },
1536                };
1537            Some((
1538                unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset)) },
1539                layout,
1540                unsafe { ptr::read(&self.alloc) },
1541            ))
1542        };
1543        mem::forget(self);
1544        alloc
1545    }
1546}
1547
1548unsafe impl<T, A: Allocator> Send for RawTable<T, A>
1549where
1550    T: Send,
1551    A: Send,
1552{
1553}
1554unsafe impl<T, A: Allocator> Sync for RawTable<T, A>
1555where
1556    T: Sync,
1557    A: Sync,
1558{
1559}
1560
1561impl RawTableInner {
1562    const NEW: Self = RawTableInner::new();
1563
1564    /// Creates a new empty hash table without allocating any memory.
1565    ///
1566    /// In effect this returns a table with exactly 1 bucket. However we can
1567    /// leave the data pointer dangling since that bucket is never accessed
1568    /// due to our load factor forcing us to always have at least 1 free bucket.
1569    #[inline]
1570    const fn new() -> Self {
1571        Self {
1572            // Be careful to cast the entire slice to a raw pointer.
1573            ctrl: unsafe { NonNull::new_unchecked(Group::static_empty() as *const _ as *mut u8) },
1574            bucket_mask: 0,
1575            items: 0,
1576            growth_left: 0,
1577        }
1578    }
1579}
1580
1581impl RawTableInner {
1582    /// Allocates a new [`RawTableInner`] with the given number of buckets.
1583    /// The control bytes and buckets are left uninitialized.
1584    ///
1585    /// # Safety
1586    ///
1587    /// The caller of this function must ensure that the `buckets` is power of two
1588    /// and also initialize all control bytes of the length `self.bucket_mask + 1 +
1589    /// Group::WIDTH` with the [`EMPTY`] bytes.
1590    ///
1591    /// See also [`Allocator`] API for other safety concerns.
1592    ///
1593    /// [`Allocator`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html
1594    #[cfg_attr(feature = "inline-more", inline)]
1595    unsafe fn new_uninitialized<A>(
1596        alloc: &A,
1597        table_layout: TableLayout,
1598        buckets: usize,
1599    ) -> Result<Self, Error>
1600    where
1601        A: Allocator,
1602    {
1603        debug_assert!(buckets.is_power_of_two());
1604
1605        // Avoid `Option::ok_or_else` because it bloats LLVM IR.
1606        let (layout, ctrl_offset) = match table_layout.calculate_layout_for(buckets) {
1607            Some(lco) => lco,
1608            None => return Err(Error::CapacityOverflow),
1609        };
1610
1611        let ptr: NonNull<u8> = alloc.allocate(layout)?.cast();
1612
1613        // SAFETY: null pointer will be caught in above check
1614        let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset));
1615        Ok(Self {
1616            ctrl,
1617            bucket_mask: buckets - 1,
1618            items: 0,
1619            growth_left: bucket_mask_to_capacity(buckets - 1),
1620        })
1621    }
1622
1623    /// Attempts to allocate a new [`RawTableInner`] with at least enough
1624    /// capacity for inserting the given number of elements without reallocating.
1625    ///
1626    /// All the control bytes are initialized with the [`EMPTY`] bytes.
1627    #[inline]
1628    fn fallible_with_capacity<A>(
1629        alloc: &A,
1630        table_layout: TableLayout,
1631        capacity: usize,
1632    ) -> Result<Self, Error>
1633    where
1634        A: Allocator,
1635    {
1636        if capacity == 0 {
1637            Ok(Self::NEW)
1638        } else {
1639            // SAFETY: We checked that we could successfully allocate the new table, and then
1640            // initialized all control bytes with the constant `EMPTY` byte.
1641            unsafe {
1642                let buckets = capacity_to_buckets(capacity).ok_or(Error::CapacityOverflow)?;
1643
1644                let result = Self::new_uninitialized(alloc, table_layout, buckets)?;
1645                // SAFETY: We checked that the table is allocated and therefore the table already has
1646                // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for)
1647                // so writing `self.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe.
1648                result.ctrl(0).write_bytes(EMPTY, result.num_ctrl_bytes());
1649
1650                Ok(result)
1651            }
1652        }
1653    }
1654
1655    /// Allocates a new [`RawTableInner`] with at least enough capacity for inserting
1656    /// the given number of elements without reallocating.
1657    ///
1658    /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program
1659    /// in case of allocation error. Use [`fallible_with_capacity`] instead if you want to
1660    /// handle memory allocation failure.
1661    ///
1662    /// All the control bytes are initialized with the [`EMPTY`] bytes.
1663    ///
1664    /// [`fallible_with_capacity`]: RawTableInner::fallible_with_capacity
1665    fn try_with_capacity<A>(
1666        alloc: &A,
1667        table_layout: TableLayout,
1668        capacity: usize,
1669    ) -> Result<Self, Error>
1670    where
1671        A: Allocator,
1672    {
1673        // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
1674        Self::fallible_with_capacity(alloc, table_layout, capacity)
1675    }
1676
1677    /// Fixes up an insertion slot due to false positives for groups smaller than the group width.
1678    /// This must only be used on insertion slots found by `find_insert_slot_in_group`.
1679    #[inline]
1680    unsafe fn fix_insert_slot(&self, mut index: usize) -> InsertSlot {
1681        // In tables smaller than the group width
1682        // (self.buckets() < Group::WIDTH), trailing control
1683        // bytes outside the range of the table are filled with
1684        // EMPTY entries. These will unfortunately trigger a
1685        // match, but once masked may point to a full bucket that
1686        // is already occupied. We detect this situation here and
1687        // perform a second scan starting at the beginning of the
1688        // table. This second scan is guaranteed to find an empty
1689        // slot (due to the load factor) before hitting the trailing
1690        // control bytes (containing EMPTY).
1691        if unlikely(self.is_bucket_full(index)) {
1692            debug_assert!(self.bucket_mask < Group::WIDTH);
1693            // SAFETY:
1694            //
1695            // * We are in range and `ptr = self.ctrl(0)` are valid for reads
1696            //   and properly aligned, because the table is already allocated
1697            //   (see `TableLayout::calculate_layout_for` and `ptr::read`);
1698            //
1699            // * For tables larger than the group width (self.buckets() >= Group::WIDTH),
1700            //   we will never end up in the given branch, since
1701            //   `(probe_seq.pos + bit) & self.bucket_mask` in `find_insert_slot_in_group` cannot
1702            //   return a full bucket index. For tables smaller than the group width, calling the
1703            //   `unwrap_unchecked` function is also
1704            //   safe, as the trailing control bytes outside the range of the table are filled
1705            //   with EMPTY bytes, so this second scan either finds an empty slot (due to the
1706            //   load factor) or hits the trailing control bytes (containing EMPTY).
1707            index = Group::load_aligned(self.ctrl(0))
1708                .match_empty_or_deleted()
1709                .lowest_set_bit()
1710                .unwrap_unchecked();
1711        }
1712        InsertSlot { index }
1713    }
1714
1715    /// Finds the position to insert something in a group.
1716    /// This may have false positives and must be fixed up with `fix_insert_slot` before it's used.
1717    #[inline]
1718    fn find_insert_slot_in_group(&self, group: &Group, probe_seq: &ProbeSeq) -> Option<usize> {
1719        let bit = group.match_empty_or_deleted().lowest_set_bit();
1720
1721        if likely(bit.is_some()) {
1722            Some((probe_seq.pos + bit.unwrap()) & self.bucket_mask)
1723        } else {
1724            None
1725        }
1726    }
1727
1728    /// Searches for an element in the table, or a potential slot where that element could be
1729    /// inserted.
1730    ///
1731    /// This uses dynamic dispatch to reduce the amount of code generated, but that is
1732    /// eliminated by LLVM optimizations.
1733    #[inline]
1734    fn find_or_find_insert_slot_inner<C: ?Sized, E>(
1735        &self,
1736        cx: &mut C,
1737        hash: u64,
1738        eq: &dyn Fn(&mut C, usize) -> Result<bool, E>,
1739    ) -> Result<usize, ErrorOrInsertSlot<E>> {
1740        let mut insert_slot = None;
1741
1742        let h2_hash = h2(hash);
1743        let mut probe_seq = self.probe_seq(hash);
1744
1745        loop {
1746            let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
1747
1748            for bit in group.match_byte(h2_hash) {
1749                let index = (probe_seq.pos + bit) & self.bucket_mask;
1750
1751                if likely(eq(cx, index).map_err(CustomError::Custom)?) {
1752                    return Ok(index);
1753                }
1754            }
1755
1756            // We didn't find the element we were looking for in the group, try to get an
1757            // insertion slot from the group if we don't have one yet.
1758            if likely(insert_slot.is_none()) {
1759                insert_slot = self.find_insert_slot_in_group(&group, &probe_seq);
1760            }
1761
1762            // Only stop the search if the group contains at least one empty element.
1763            // Otherwise, the element that we are looking for might be in a following group.
1764            if likely(group.match_empty().any_bit_set()) {
1765                // We must have found a insert slot by now, since the current group contains at
1766                // least one. For tables smaller than the group width, there will still be an
1767                // empty element in the current (and only) group due to the load factor.
1768                unsafe {
1769                    return Err(ErrorOrInsertSlot::InsertSlot(
1770                        self.fix_insert_slot(insert_slot.unwrap_unchecked()),
1771                    ));
1772                }
1773            }
1774
1775            probe_seq.move_next(self.bucket_mask);
1776        }
1777    }
1778
1779    /// Searches for an empty or deleted bucket which is suitable for inserting a new
1780    /// element and sets the hash for that slot. Returns an index of that slot and the
1781    /// old control byte stored in the found index.
1782    ///
1783    /// This function does not check if the given element exists in the table. Also,
1784    /// this function does not check if there is enough space in the table to insert
1785    /// a new element, so the caller must make ensure that the table has at least 1
1786    /// empty or deleted `bucket` or this function will never return (will go into
1787    /// an infinite loop).
1788    ///
1789    /// This function does not make any changes to the `data` parts of the table,
1790    /// or any changes to the the `items` or `growth_left` field of the table.
1791    ///
1792    /// # Safety
1793    ///
1794    /// The safety rules are directly derived from the safety rule for the
1795    /// [`RawTableInner::set_ctrl_h2`] methods. Thus, in order to uphold the safety
1796    /// contracts for that method, as well as for the correct logic of the work of this
1797    /// crate, you must observe the following rules when calling this function:
1798    ///
1799    /// * The [`RawTableInner`] has already been allocated;
1800    ///
1801    /// * The caller of this function must ensure that the "data" parts of the table
1802    ///   will have an entry in the returned index (matching the given hash) right
1803    ///   after calling this function.
1804    ///
1805    /// Calling this function on a table that has not been allocated results in
1806    /// [`undefined behavior`].
1807    ///
1808    /// The caller must independently increase the `items` field of the table, and also,
1809    /// if the old control byte was [`EMPTY`], then decrease the table's `growth_left`
1810    /// field, and do not change it if the old control byte was [`DELETED`].
1811    ///
1812    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
1813    /// or saving `element` from / into the [`RawTable`] / [`RawTableInner`].
1814    ///
1815    /// [`Bucket::as_ptr`]: Bucket::as_ptr
1816    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1817    /// [`RawTableInner::ctrl`]: RawTableInner::ctrl
1818    /// [`RawTableInner::set_ctrl_h2`]: RawTableInner::set_ctrl_h2
1819    #[inline]
1820    unsafe fn prepare_insert_slot(&mut self, hash: u64) -> (usize, u8) {
1821        let index: usize = self.find_insert_slot(hash).index;
1822        // SAFETY:
1823        // 1. The `find_insert_slot` function either returns an `index` less than or
1824        //    equal to `self.bucket_mask = self.buckets() - 1` of the table, or never
1825        //    returns if it cannot find an empty or deleted slot.
1826        // 2. The caller of this function guarantees that the table has already been
1827        //    allocated
1828        let old_ctrl = *self.ctrl(index);
1829        self.set_ctrl_h2(index, hash);
1830        (index, old_ctrl)
1831    }
1832
1833    /// Searches for an empty or deleted bucket which is suitable for inserting
1834    /// a new element, returning the `index` for the new [`Bucket`].
1835    ///
1836    /// This function does not make any changes to the `data` part of the table, or any
1837    /// changes to the `items` or `growth_left` field of the table.
1838    ///
1839    /// The table must have at least 1 empty or deleted `bucket`, otherwise this function
1840    /// will never return (will go into an infinite loop) for tables larger than the group
1841    /// width, or return an index outside of the table indices range if the table is less
1842    /// than the group width.
1843    ///
1844    /// # Note
1845    ///
1846    /// Calling this function is always safe, but attempting to write data at
1847    /// the index returned by this function when the table is less than the group width
1848    /// and if there was not at least one empty bucket in the table will cause immediate
1849    /// [`undefined behavior`]. This is because in this case the function will return
1850    /// `self.bucket_mask + 1` as an index due to the trailing EMPTY control bytes outside
1851    /// the table range.
1852    ///
1853    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1854    #[inline]
1855    fn find_insert_slot(&self, hash: u64) -> InsertSlot {
1856        let mut probe_seq = self.probe_seq(hash);
1857        loop {
1858            // SAFETY:
1859            // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
1860            //   of the table due to masking with `self.bucket_mask` and also because mumber of
1861            //   buckets is a power of two (see comment for masking below).
1862            //
1863            // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
1864            //   call `Group::load` due to the extended control bytes range, which is
1865            //  `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
1866            //   byte will never be read for the allocated table);
1867            //
1868            // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
1869            //   always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
1870            //   bytes, which is safe (see RawTableInner::new_in).
1871            unsafe {
1872                let group = Group::load(self.ctrl(probe_seq.pos));
1873                let index = self.find_insert_slot_in_group(&group, &probe_seq);
1874
1875                if likely(index.is_some()) {
1876                    return self.fix_insert_slot(index.unwrap_unchecked());
1877                }
1878            }
1879            probe_seq.move_next(self.bucket_mask);
1880        }
1881    }
1882
1883    /// Searches for an element in a table, returning the `index` of the found element.
1884    /// This uses dynamic dispatch to reduce the amount of code generated, but it is
1885    /// eliminated by LLVM optimizations.
1886    ///
1887    /// This function does not make any changes to the `data` part of the table, or any
1888    /// changes to the `items` or `growth_left` field of the table.
1889    ///
1890    /// The table must have at least 1 empty `bucket`, otherwise, if the
1891    /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`,
1892    /// this function will also never return (will go into an infinite loop).
1893    #[inline(always)]
1894    fn find_inner<C: ?Sized, E>(
1895        &self,
1896        cx: &mut C,
1897        hash: u64,
1898        eq: &dyn Fn(&mut C, usize) -> Result<bool, E>,
1899    ) -> Result<Option<usize>, E> {
1900        let h2_hash = h2(hash);
1901        let mut probe_seq = self.probe_seq(hash);
1902
1903        loop {
1904            // SAFETY:
1905            // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
1906            //   of the table due to masking with `self.bucket_mask`.
1907            //
1908            // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
1909            //   call `Group::load` due to the extended control bytes range, which is
1910            //  `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
1911            //   byte will never be read for the allocated table);
1912            //
1913            // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
1914            //   always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
1915            //   bytes, which is safe (see RawTableInner::new_in).
1916            let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
1917
1918            for bit in group.match_byte(h2_hash) {
1919                // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number
1920                // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
1921                let index = (probe_seq.pos + bit) & self.bucket_mask;
1922
1923                if likely(eq(cx, index)?) {
1924                    return Ok(Some(index));
1925                }
1926            }
1927
1928            if likely(group.match_empty().any_bit_set()) {
1929                return Ok(None);
1930            }
1931
1932            probe_seq.move_next(self.bucket_mask);
1933        }
1934    }
1935
1936    /// Prepares for rehashing data in place (that is, without allocating new memory).
1937    /// Converts all full index `control bytes` to `DELETED` and all `DELETED` control
1938    /// bytes to `EMPTY`, i.e. performs the following conversion:
1939    ///
1940    /// - `EMPTY` control bytes   -> `EMPTY`;
1941    /// - `DELETED` control bytes -> `EMPTY`;
1942    /// - `FULL` control bytes    -> `DELETED`.
1943    ///
1944    /// This function does not make any changes to the `data` parts of the table,
1945    /// or any changes to the the `items` or `growth_left` field of the table.
1946    ///
1947    /// # Safety
1948    ///
1949    /// You must observe the following safety rules when calling this function:
1950    ///
1951    /// * The [`RawTableInner`] has already been allocated;
1952    ///
1953    /// * The caller of this function must convert the `DELETED` bytes back to `FULL`
1954    ///   bytes when re-inserting them into their ideal position (which was impossible
1955    ///   to do during the first insert due to tombstones). If the caller does not do
1956    ///   this, then calling this function may result in a memory leak.
1957    ///
1958    /// * The [`RawTableInner`] must have properly initialized control bytes otherwise
1959    ///   calling this function results in [`undefined behavior`].
1960    ///
1961    /// Calling this function on a table that has not been allocated results in
1962    /// [`undefined behavior`].
1963    ///
1964    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
1965    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
1966    ///
1967    /// [`Bucket::as_ptr`]: Bucket::as_ptr
1968    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1969    #[allow(clippy::mut_mut)]
1970    #[inline]
1971    unsafe fn prepare_rehash_in_place(&mut self) {
1972        // Bulk convert all full control bytes to DELETED, and all DELETED control bytes to EMPTY.
1973        // This effectively frees up all buckets containing a DELETED entry.
1974        //
1975        // SAFETY:
1976        // 1. `i` is guaranteed to be within bounds since we are iterating from zero to `buckets - 1`;
1977        // 2. Even if `i` will be `i == self.bucket_mask`, it is safe to call `Group::load_aligned`
1978        //    due to the extended control bytes range, which is `self.bucket_mask + 1 + Group::WIDTH`;
1979        // 3. The caller of this function guarantees that [`RawTableInner`] has already been allocated;
1980        // 4. We can use `Group::load_aligned` and `Group::store_aligned` here since we start from 0
1981        //    and go to the end with a step equal to `Group::WIDTH` (see TableLayout::calculate_layout_for).
1982        for i in (0..self.buckets()).step_by(Group::WIDTH) {
1983            let group = Group::load_aligned(self.ctrl(i));
1984            let group = group.convert_special_to_empty_and_full_to_deleted();
1985            group.store_aligned(self.ctrl(i));
1986        }
1987
1988        // Fix up the trailing control bytes. See the comments in set_ctrl
1989        // for the handling of tables smaller than the group width.
1990        //
1991        // SAFETY: The caller of this function guarantees that [`RawTableInner`]
1992        // has already been allocated
1993        if unlikely(self.buckets() < Group::WIDTH) {
1994            // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes,
1995            // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to
1996            // `Group::WIDTH` is safe
1997            self.ctrl(0)
1998                .copy_to(self.ctrl(Group::WIDTH), self.buckets());
1999        } else {
2000            // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of
2001            // control bytes,so copying `Group::WIDTH` bytes with offset equal
2002            // to `self.buckets() == self.bucket_mask + 1` is safe
2003            self.ctrl(0)
2004                .copy_to(self.ctrl(self.buckets()), Group::WIDTH);
2005        }
2006    }
2007
2008    /// Returns an iterator over every element in the table.
2009    ///
2010    /// # Safety
2011    ///
2012    /// If any of the following conditions are violated, the result
2013    /// is [`undefined behavior`]:
2014    ///
2015    /// * The caller has to ensure that the `RawTableInner` outlives the
2016    ///   `RawIter`. Because we cannot make the `next` method unsafe on
2017    ///   the `RawIter` struct, we have to make the `iter` method unsafe.
2018    ///
2019    /// * The [`RawTableInner`] must have properly initialized control bytes.
2020    ///
2021    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2022    #[inline]
2023    unsafe fn iter<T>(&self) -> RawIter<T> {
2024        // SAFETY:
2025        // 1. Since the caller of this function ensures that the control bytes
2026        //    are properly initialized and `self.data_end()` points to the start
2027        //    of the array of control bytes, therefore: `ctrl` is valid for reads,
2028        //    properly aligned to `Group::WIDTH` and points to the properly initialized
2029        //    control bytes.
2030        // 2. `data` bucket index in the table is equal to the `ctrl` index (i.e.
2031        //    equal to zero).
2032        // 3. We pass the exact value of buckets of the table to the function.
2033        //
2034        //                         `ctrl` points here (to the start
2035        //                         of the first control byte `CT0`)
2036        //                          ∨
2037        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, Group::WIDTH
2038        //                           \________  ________/
2039        //                                    \/
2040        //       `n = buckets - 1`, i.e. `RawIndexTableInner::buckets() - 1`
2041        //
2042        // where: T0...T_n  - our stored data;
2043        //        CT0...CT_n - control bytes or metadata for `data`.
2044        let data = Bucket::from_base_index(self.data_end(), 0);
2045        RawIter {
2046            // SAFETY: See explanation above
2047            iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.buckets()),
2048            items: self.items,
2049        }
2050    }
2051
2052    /// Executes the destructors (if any) of the values stored in the table.
2053    ///
2054    /// # Note
2055    ///
2056    /// This function does not erase the control bytes of the table and does
2057    /// not make any changes to the `items` or `growth_left` fields of the
2058    /// table. If necessary, the caller of this function must manually set
2059    /// up these table fields, for example using the [`clear_no_drop`] function.
2060    ///
2061    /// Be careful during calling this function, because drop function of
2062    /// the elements can panic, and this can leave table in an inconsistent
2063    /// state.
2064    ///
2065    /// # Safety
2066    ///
2067    /// If `T` is a type that should be dropped and **the table is not empty**,
2068    /// calling this function more than once results in [`undefined behavior`].
2069    ///
2070    /// If `T` is not [`Copy`], attempting to use values stored in the table after
2071    /// calling this function may result in [`undefined behavior`].
2072    ///
2073    /// It is safe to call this function on a table that has not been allocated,
2074    /// on a table with uninitialized control bytes, and on a table with no actual
2075    /// data but with `Full` control bytes if `self.items == 0`.
2076    ///
2077    /// See also [`Bucket::drop`] / [`Bucket::as_ptr`] methods, for more information
2078    /// about of properly removing or saving `element` from / into the [`RawTable`] /
2079    /// [`RawTableInner`].
2080    ///
2081    /// [`Bucket::drop`]: Bucket::drop
2082    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2083    /// [`clear_no_drop`]: RawTableInner::clear_no_drop
2084    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2085    unsafe fn drop_elements<T>(&mut self) {
2086        // Check that `self.items != 0`. Protects against the possibility
2087        // of creating an iterator on an table with uninitialized control bytes.
2088        if T::NEEDS_DROP && self.items != 0 {
2089            // SAFETY: We know for sure that RawTableInner will outlive the
2090            // returned `RawIter` iterator, and the caller of this function
2091            // must uphold the safety contract for `drop_elements` method.
2092            for item in self.iter::<T>() {
2093                // SAFETY: The caller must uphold the safety contract for
2094                // `drop_elements` method.
2095                item.drop();
2096            }
2097        }
2098    }
2099
2100    /// Executes the destructors (if any) of the values stored in the table and than
2101    /// deallocates the table.
2102    ///
2103    /// # Note
2104    ///
2105    /// Calling this function automatically makes invalid (dangling) all instances of
2106    /// buckets ([`Bucket`]) and makes invalid (dangling) the `ctrl` field of the table.
2107    ///
2108    /// This function does not make any changes to the `bucket_mask`, `items` or `growth_left`
2109    /// fields of the table. If necessary, the caller of this function must manually set
2110    /// up these table fields.
2111    ///
2112    /// # Safety
2113    ///
2114    /// If any of the following conditions are violated, the result is [`undefined behavior`]:
2115    ///
2116    /// * Calling this function more than once;
2117    ///
2118    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used
2119    ///   to allocate this table.
2120    ///
2121    /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that
2122    ///   was used to allocate this table.
2123    ///
2124    /// The caller of this function should pay attention to the possibility of the
2125    /// elements' drop function panicking, because this:
2126    ///
2127    ///    * May leave the table in an inconsistent state;
2128    ///
2129    ///    * Memory is never deallocated, so a memory leak may occur.
2130    ///
2131    /// Attempt to use the `ctrl` field of the table (dereference) after calling this
2132    /// function results in [`undefined behavior`].
2133    ///
2134    /// It is safe to call this function on a table that has not been allocated,
2135    /// on a table with uninitialized control bytes, and on a table with no actual
2136    /// data but with `Full` control bytes if `self.items == 0`.
2137    ///
2138    /// See also [`RawTableInner::drop_elements`] or [`RawTableInner::free_buckets`]
2139    /// for more  information.
2140    ///
2141    /// [`RawTableInner::drop_elements`]: RawTableInner::drop_elements
2142    /// [`RawTableInner::free_buckets`]: RawTableInner::free_buckets
2143    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2144    unsafe fn drop_inner_table<T, A: Allocator>(&mut self, alloc: &A, table_layout: TableLayout) {
2145        if !self.is_empty_singleton() {
2146            unsafe {
2147                // SAFETY: The caller must uphold the safety contract for `drop_inner_table` method.
2148                self.drop_elements::<T>();
2149                // SAFETY:
2150                // 1. We have checked that our table is allocated.
2151                // 2. The caller must uphold the safety contract for `drop_inner_table` method.
2152                self.free_buckets(alloc, table_layout);
2153            }
2154        }
2155    }
2156
2157    #[inline]
2158    unsafe fn bucket<T>(&self, index: usize) -> Bucket<T> {
2159        debug_assert_ne!(self.bucket_mask, 0);
2160        debug_assert!(index < self.buckets());
2161        Bucket::from_base_index(self.data_end(), index)
2162    }
2163
2164    #[inline]
2165    unsafe fn bucket_ptr(&self, index: usize, size_of: usize) -> *mut u8 {
2166        debug_assert_ne!(self.bucket_mask, 0);
2167        debug_assert!(index < self.buckets());
2168        let base: *mut u8 = self.data_end().as_ptr();
2169        base.sub((index + 1) * size_of)
2170    }
2171
2172    #[inline]
2173    unsafe fn data_end<T>(&self) -> NonNull<T> {
2174        NonNull::new_unchecked(self.ctrl.as_ptr().cast())
2175    }
2176
2177    /// Returns an iterator-like object for a probe sequence on the table.
2178    ///
2179    /// This iterator never terminates, but is guaranteed to visit each bucket
2180    /// group exactly once. The loop using `probe_seq` must terminate upon
2181    /// reaching a group containing an empty bucket.
2182    #[inline]
2183    fn probe_seq(&self, hash: u64) -> ProbeSeq {
2184        ProbeSeq {
2185            pos: h1(hash) & self.bucket_mask,
2186            stride: 0,
2187        }
2188    }
2189
2190    /// Returns the index of a bucket for which a value must be inserted if there is enough rooom
2191    /// in the table, otherwise returns error
2192    #[inline]
2193    unsafe fn prepare_insert_no_grow(&mut self, hash: u64) -> Result<usize, ()> {
2194        let index = self.find_insert_slot(hash).index;
2195        let old_ctrl = *self.ctrl(index);
2196        if unlikely(self.growth_left == 0 && special_is_empty(old_ctrl)) {
2197            Err(())
2198        } else {
2199            self.record_item_insert_at(index, old_ctrl, hash);
2200            Ok(index)
2201        }
2202    }
2203
2204    #[inline]
2205    unsafe fn record_item_insert_at(&mut self, index: usize, old_ctrl: u8, hash: u64) {
2206        self.growth_left -= usize::from(special_is_empty(old_ctrl));
2207        self.set_ctrl_h2(index, hash);
2208        self.items += 1;
2209    }
2210
2211    #[inline]
2212    fn is_in_same_group(&self, i: usize, new_i: usize, hash: u64) -> bool {
2213        let probe_seq_pos = self.probe_seq(hash).pos;
2214        let probe_index =
2215            |pos: usize| (pos.wrapping_sub(probe_seq_pos) & self.bucket_mask) / Group::WIDTH;
2216        probe_index(i) == probe_index(new_i)
2217    }
2218
2219    /// Sets a control byte to the hash, and possibly also the replicated control byte at
2220    /// the end of the array.
2221    ///
2222    /// This function does not make any changes to the `data` parts of the table,
2223    /// or any changes to the the `items` or `growth_left` field of the table.
2224    ///
2225    /// # Safety
2226    ///
2227    /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl`]
2228    /// method. Thus, in order to uphold the safety contracts for the method, you must observe the
2229    /// following rules when calling this function:
2230    ///
2231    /// * The [`RawTableInner`] has already been allocated;
2232    ///
2233    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2234    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2235    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
2236    ///
2237    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2238    ///
2239    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2240    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2241    ///
2242    /// [`RawTableInner::set_ctrl`]: RawTableInner::set_ctrl
2243    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2244    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2245    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2246    #[inline]
2247    unsafe fn set_ctrl_h2(&mut self, index: usize, hash: u64) {
2248        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl_h2`]
2249        self.set_ctrl(index, h2(hash));
2250    }
2251
2252    /// Replaces the hash in the control byte at the given index with the provided one,
2253    /// and possibly also replicates the new control byte at the end of the array of control
2254    /// bytes, returning the old control byte.
2255    ///
2256    /// This function does not make any changes to the `data` parts of the table,
2257    /// or any changes to the the `items` or `growth_left` field of the table.
2258    ///
2259    /// # Safety
2260    ///
2261    /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl_h2`]
2262    /// and [`RawTableInner::ctrl`] methods. Thus, in order to uphold the safety contracts for both
2263    /// methods, you must observe the following rules when calling this function:
2264    ///
2265    /// * The [`RawTableInner`] has already been allocated;
2266    ///
2267    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2268    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2269    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
2270    ///
2271    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2272    ///
2273    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2274    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2275    ///
2276    /// [`RawTableInner::set_ctrl_h2`]: RawTableInner::set_ctrl_h2
2277    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2278    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2279    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2280    #[inline]
2281    unsafe fn replace_ctrl_h2(&mut self, index: usize, hash: u64) -> u8 {
2282        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::replace_ctrl_h2`]
2283        let prev_ctrl = *self.ctrl(index);
2284        self.set_ctrl_h2(index, hash);
2285        prev_ctrl
2286    }
2287
2288    /// Sets a control byte, and possibly also the replicated control byte at
2289    /// the end of the array.
2290    ///
2291    /// This function does not make any changes to the `data` parts of the table,
2292    /// or any changes to the the `items` or `growth_left` field of the table.
2293    ///
2294    /// # Safety
2295    ///
2296    /// You must observe the following safety rules when calling this function:
2297    ///
2298    /// * The [`RawTableInner`] has already been allocated;
2299    ///
2300    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2301    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2302    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
2303    ///
2304    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2305    ///
2306    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2307    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2308    ///
2309    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2310    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2311    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2312    #[inline]
2313    unsafe fn set_ctrl(&mut self, index: usize, ctrl: u8) {
2314        // Replicate the first Group::WIDTH control bytes at the end of
2315        // the array without using a branch. If the tables smaller than
2316        // the group width (self.buckets() < Group::WIDTH),
2317        // `index2 = Group::WIDTH + index`, otherwise `index2` is:
2318        //
2319        // - If index >= Group::WIDTH then index == index2.
2320        // - Otherwise index2 == self.bucket_mask + 1 + index.
2321        //
2322        // The very last replicated control byte is never actually read because
2323        // we mask the initial index for unaligned loads, but we write it
2324        // anyways because it makes the set_ctrl implementation simpler.
2325        //
2326        // If there are fewer buckets than Group::WIDTH then this code will
2327        // replicate the buckets at the end of the trailing group. For example
2328        // with 2 buckets and a group size of 4, the control bytes will look
2329        // like this:
2330        //
2331        //     Real    |             Replicated
2332        // ---------------------------------------------
2333        // | [A] | [B] | [EMPTY] | [EMPTY] | [A] | [B] |
2334        // ---------------------------------------------
2335
2336        // This is the same as `(index.wrapping_sub(Group::WIDTH)) % self.buckets() + Group::WIDTH`
2337        // because the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2338        let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH;
2339
2340        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl`]
2341        *self.ctrl(index) = ctrl;
2342        *self.ctrl(index2) = ctrl;
2343    }
2344
2345    /// Returns a pointer to a control byte.
2346    ///
2347    /// # Safety
2348    ///
2349    /// For the allocated [`RawTableInner`], the result is [`Undefined Behavior`],
2350    /// if the `index` is greater than the `self.bucket_mask + 1 + Group::WIDTH`.
2351    /// In that case, calling this function with `index == self.bucket_mask + 1 + Group::WIDTH`
2352    /// will return a pointer to the end of the allocated table and it is useless on its own.
2353    ///
2354    /// Calling this function with `index >= self.bucket_mask + 1 + Group::WIDTH` on a
2355    /// table that has not been allocated results in [`Undefined Behavior`].
2356    ///
2357    /// So to satisfy both requirements you should always follow the rule that
2358    /// `index < self.bucket_mask + 1 + Group::WIDTH`
2359    ///
2360    /// Calling this function on [`RawTableInner`] that are not already allocated is safe
2361    /// for read-only purpose.
2362    ///
2363    /// See also [`Bucket::as_ptr()`] method, for more information about of properly removing
2364    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2365    ///
2366    /// [`Bucket::as_ptr()`]: Bucket::as_ptr()
2367    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2368    #[inline]
2369    unsafe fn ctrl(&self, index: usize) -> *mut u8 {
2370        debug_assert!(index < self.num_ctrl_bytes());
2371        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::ctrl`]
2372        self.ctrl.as_ptr().add(index)
2373    }
2374
2375    #[inline]
2376    fn buckets(&self) -> usize {
2377        self.bucket_mask + 1
2378    }
2379
2380    /// Checks whether the bucket at `index` is full.
2381    ///
2382    /// # Safety
2383    ///
2384    /// The caller must ensure `index` is less than the number of buckets.
2385    #[inline]
2386    unsafe fn is_bucket_full(&self, index: usize) -> bool {
2387        debug_assert!(index < self.buckets());
2388        is_full(*self.ctrl(index))
2389    }
2390
2391    #[inline]
2392    fn num_ctrl_bytes(&self) -> usize {
2393        self.bucket_mask + 1 + Group::WIDTH
2394    }
2395
2396    #[inline]
2397    fn is_empty_singleton(&self) -> bool {
2398        self.bucket_mask == 0
2399    }
2400
2401    /// Attempts to allocate a new hash table with at least enough capacity
2402    /// for inserting the given number of elements without reallocating,
2403    /// and return it inside ScopeGuard to protect against panic in the hash
2404    /// function.
2405    ///
2406    /// # Note
2407    ///
2408    /// It is recommended (but not required):
2409    ///
2410    /// * That the new table's `capacity` be greater than or equal to `self.items`.
2411    ///
2412    /// * The `alloc` is the same [`Allocator`] as the `Allocator` used
2413    ///   to allocate this table.
2414    ///
2415    /// * The `table_layout` is the same [`TableLayout`] as the `TableLayout` used
2416    ///   to allocate this table.
2417    ///
2418    /// If `table_layout` does not match the `TableLayout` that was used to allocate
2419    /// this table, then using `mem::swap` with the `self` and the new table returned
2420    /// by this function results in [`undefined behavior`].
2421    ///
2422    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2423    #[allow(clippy::mut_mut)]
2424    #[inline]
2425    fn prepare_resize<'a, A>(
2426        &self,
2427        alloc: &'a A,
2428        table_layout: TableLayout,
2429        capacity: usize,
2430    ) -> Result<ScopeGuard<Self, impl FnMut(&mut Self) + 'a>, Error>
2431    where
2432        A: Allocator,
2433    {
2434        debug_assert!(self.items <= capacity);
2435
2436        // Allocate and initialize the new table.
2437        let new_table = RawTableInner::fallible_with_capacity(alloc, table_layout, capacity)?;
2438
2439        // The hash function may panic, in which case we simply free the new
2440        // table without dropping any elements that may have been copied into
2441        // it.
2442        //
2443        // This guard is also used to free the old table on success, see
2444        // the comment at the bottom of this function.
2445        Ok(guard(new_table, move |self_| {
2446            if !self_.is_empty_singleton() {
2447                // SAFETY:
2448                // 1. We have checked that our table is allocated.
2449                // 2. We know for sure that the `alloc` and `table_layout` matches the
2450                //    [`Allocator`] and [`TableLayout`] used to allocate this table.
2451                unsafe { self_.free_buckets(alloc, table_layout) };
2452            }
2453        }))
2454    }
2455
2456    /// Reserves or rehashes to make room for `additional` more elements.
2457    ///
2458    /// This uses dynamic dispatch to reduce the amount of
2459    /// code generated, but it is eliminated by LLVM optimizations when inlined.
2460    ///
2461    /// # Safety
2462    ///
2463    /// If any of the following conditions are violated, the result is
2464    /// [`undefined behavior`]:
2465    ///
2466    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used
2467    ///   to allocate this table.
2468    ///
2469    /// * The `layout` must be the same [`TableLayout`] as the `TableLayout`
2470    ///   used to allocate this table.
2471    ///
2472    /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of
2473    ///   the elements stored in the table.
2474    ///
2475    /// * The [`RawTableInner`] must have properly initialized control bytes.
2476    ///
2477    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2478    #[allow(clippy::inline_always)]
2479    #[inline(always)]
2480    unsafe fn reserve_rehash_inner<C: ?Sized, E, A>(
2481        &mut self,
2482        cx: &mut C,
2483        alloc: &A,
2484        additional: usize,
2485        hasher: &dyn Fn(&mut C, &mut Self, usize) -> Result<u64, E>,
2486        layout: TableLayout,
2487        drop: Option<fn(*mut u8)>,
2488    ) -> Result<(), CustomError<E>>
2489    where
2490        A: Allocator,
2491    {
2492        // Avoid `Option::ok_or_else` because it bloats LLVM IR.
2493        let new_items = match self.items.checked_add(additional) {
2494            Some(new_items) => new_items,
2495            None => return Err(CustomError::from(Error::CapacityOverflow)),
2496        };
2497        let full_capacity = bucket_mask_to_capacity(self.bucket_mask);
2498        if new_items <= full_capacity / 2 {
2499            // Rehash in-place without re-allocating if we have plenty of spare
2500            // capacity that is locked up due to DELETED entries.
2501
2502            // SAFETY:
2503            // 1. We know for sure that `[`RawTableInner`]` has already been allocated
2504            //    (since new_items <= full_capacity / 2);
2505            // 2. The caller ensures that `drop` function is the actual drop function of
2506            //    the elements stored in the table.
2507            // 3. The caller ensures that `layout` matches the [`TableLayout`] that was
2508            //    used to allocate this table.
2509            // 4. The caller ensures that the control bytes of the `RawTableInner`
2510            //    are already initialized.
2511            self.rehash_in_place(cx, hasher, layout.size, drop)
2512                .map_err(CustomError::Custom)?;
2513            Ok(())
2514        } else {
2515            // Otherwise, conservatively resize to at least the next size up
2516            // to avoid churning deletes into frequent rehashes.
2517            //
2518            // SAFETY:
2519            // 1. We know for sure that `capacity >= self.items`.
2520            // 2. The caller ensures that `alloc` and `layout` matches the [`Allocator`] and
2521            //    [`TableLayout`] that were used to allocate this table.
2522            // 3. The caller ensures that the control bytes of the `RawTableInner`
2523            //    are already initialized.
2524            self.resize_inner(
2525                cx,
2526                alloc,
2527                usize::max(new_items, full_capacity + 1),
2528                hasher,
2529                layout,
2530            )
2531        }
2532    }
2533
2534    /// Returns an iterator over full buckets indices in the table.
2535    ///
2536    /// # Safety
2537    ///
2538    /// Behavior is undefined if any of the following conditions are violated:
2539    ///
2540    /// * The caller has to ensure that the `RawTableInner` outlives the
2541    ///   `FullBucketsIndices`. Because we cannot make the `next` method
2542    ///   unsafe on the `FullBucketsIndices` struct, we have to make the
2543    ///   `full_buckets_indices` method unsafe.
2544    ///
2545    /// * The [`RawTableInner`] must have properly initialized control bytes.
2546    #[inline(always)]
2547    unsafe fn full_buckets_indices(&self) -> FullBucketsIndices {
2548        // SAFETY:
2549        // 1. Since the caller of this function ensures that the control bytes
2550        //    are properly initialized and `self.ctrl(0)` points to the start
2551        //    of the array of control bytes, therefore: `ctrl` is valid for reads,
2552        //    properly aligned to `Group::WIDTH` and points to the properly initialized
2553        //    control bytes.
2554        // 2. The value of `items` is equal to the amount of data (values) added
2555        //    to the table.
2556        //
2557        //                         `ctrl` points here (to the start
2558        //                         of the first control byte `CT0`)
2559        //                          ∨
2560        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, Group::WIDTH
2561        //                           \________  ________/
2562        //                                    \/
2563        //       `n = buckets - 1`, i.e. `RawIndexTableInner::buckets() - 1`
2564        //
2565        // where: T0...T_n  - our stored data;
2566        //        CT0...CT_n - control bytes or metadata for `data`.
2567        let ctrl = NonNull::new_unchecked(self.ctrl(0));
2568
2569        FullBucketsIndices {
2570            // Load the first group
2571            // SAFETY: See explanation above.
2572            current_group: Group::load_aligned(ctrl.as_ptr()).match_full().into_iter(),
2573            group_first_index: 0,
2574            ctrl,
2575            items: self.items,
2576        }
2577    }
2578
2579    /// Allocates a new table of a different size and moves the contents of the
2580    /// current table into it.
2581    ///
2582    /// This uses dynamic dispatch to reduce the amount of
2583    /// code generated, but it is eliminated by LLVM optimizations when inlined.
2584    ///
2585    /// # Safety
2586    ///
2587    /// If any of the following conditions are violated, the result is
2588    /// [`undefined behavior`]:
2589    ///
2590    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used
2591    ///   to allocate this table;
2592    ///
2593    /// * The `layout` must be the same [`TableLayout`] as the `TableLayout`
2594    ///   used to allocate this table;
2595    ///
2596    /// * The [`RawTableInner`] must have properly initialized control bytes.
2597    ///
2598    /// The caller of this function must ensure that `capacity >= self.items`
2599    /// otherwise:
2600    ///
2601    /// * If `self.items != 0`, calling of this function with `capacity == 0`
2602    ///   results in [`undefined behavior`].
2603    ///
2604    /// * If `capacity_to_buckets(capacity) < Group::WIDTH` and
2605    ///   `self.items > capacity_to_buckets(capacity)` calling this function
2606    ///   results in [`undefined behavior`].
2607    ///
2608    /// * If `capacity_to_buckets(capacity) >= Group::WIDTH` and
2609    ///   `self.items > capacity_to_buckets(capacity)` calling this function
2610    ///   are never return (will go into an infinite loop).
2611    ///
2612    /// Note: It is recommended (but not required) that the new table's `capacity`
2613    /// be greater than or equal to `self.items`. In case if `capacity <= self.items`
2614    /// this function can never return. See [`RawTableInner::find_insert_slot`] for
2615    /// more information.
2616    ///
2617    /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot
2618    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2619    #[allow(clippy::inline_always)]
2620    #[inline(always)]
2621    unsafe fn resize_inner<C: ?Sized, E, A>(
2622        &mut self,
2623        cx: &mut C,
2624        alloc: &A,
2625        capacity: usize,
2626        hasher: &dyn Fn(&mut C, &mut Self, usize) -> Result<u64, E>,
2627        layout: TableLayout,
2628    ) -> Result<(), CustomError<E>>
2629    where
2630        A: Allocator,
2631    {
2632        // SAFETY: We know for sure that `alloc` and `layout` matches the [`Allocator`] and [`TableLayout`]
2633        // that were used to allocate this table.
2634        let mut new_table = self.prepare_resize(alloc, layout, capacity)?;
2635
2636        // SAFETY: We know for sure that RawTableInner will outlive the
2637        // returned `FullBucketsIndices` iterator, and the caller of this
2638        // function ensures that the control bytes are properly initialized.
2639        for full_byte_index in self.full_buckets_indices() {
2640            // This may panic.
2641            let hash = hasher(cx, self, full_byte_index).map_err(CustomError::Custom)?;
2642
2643            // We can use a simpler version of insert() here since:
2644            // - there are no DELETED entries.
2645            // - we know there is enough space in the table.
2646            // - all elements are unique.
2647            //
2648            // SAFETY:
2649            // 1. The caller of this function guarantees that `capacity > 0`
2650            //    so `new_table` must already have some allocated memory.
2651            // 2. We set `growth_left` and `items` fields of the new table
2652            //    after the loop.
2653            // 3. We insert into the table, at the returned index, the data
2654            //    matching the given hash immediately after calling this function.
2655            let (new_index, _) = new_table.prepare_insert_slot(hash);
2656
2657            // SAFETY:
2658            //
2659            // * `src` is valid for reads of `layout.size` bytes, since the
2660            //   table is alive and the `full_byte_index` is guaranteed to be
2661            //   within bounds (see `FullBucketsIndices::next_impl`);
2662            //
2663            // * `dst` is valid for writes of `layout.size` bytes, since the
2664            //   caller ensures that `table_layout` matches the [`TableLayout`]
2665            //   that was used to allocate old table and we have the `new_index`
2666            //   returned by `prepare_insert_slot`.
2667            //
2668            // * Both `src` and `dst` are properly aligned.
2669            //
2670            // * Both `src` and `dst` point to different region of memory.
2671            ptr::copy_nonoverlapping(
2672                self.bucket_ptr(full_byte_index, layout.size),
2673                new_table.bucket_ptr(new_index, layout.size),
2674                layout.size,
2675            );
2676        }
2677
2678        // The hash function didn't panic, so we can safely set the
2679        // `growth_left` and `items` fields of the new table.
2680        new_table.growth_left -= self.items;
2681        new_table.items = self.items;
2682
2683        // We successfully copied all elements without panicking. Now replace
2684        // self with the new table. The old table will have its memory freed but
2685        // the items will not be dropped (since they have been moved into the
2686        // new table).
2687        // SAFETY: The caller ensures that `table_layout` matches the [`TableLayout`]
2688        // that was used to allocate this table.
2689        mem::swap(self, &mut new_table);
2690
2691        Ok(())
2692    }
2693
2694    /// Rehashes the contents of the table in place (i.e. without changing the
2695    /// allocation).
2696    ///
2697    /// If `hasher` panics then some the table's contents may be lost.
2698    ///
2699    /// This uses dynamic dispatch to reduce the amount of
2700    /// code generated, but it is eliminated by LLVM optimizations when inlined.
2701    ///
2702    /// # Safety
2703    ///
2704    /// If any of the following conditions are violated, the result is [`undefined behavior`]:
2705    ///
2706    /// * The `size_of` must be equal to the size of the elements stored in the table;
2707    ///
2708    /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of
2709    ///   the elements stored in the table.
2710    ///
2711    /// * The [`RawTableInner`] has already been allocated;
2712    ///
2713    /// * The [`RawTableInner`] must have properly initialized control bytes.
2714    ///
2715    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2716    #[allow(clippy::inline_always)]
2717    #[cfg_attr(feature = "inline-more", inline(always))]
2718    #[cfg_attr(not(feature = "inline-more"), inline)]
2719    unsafe fn rehash_in_place<C: ?Sized, E>(
2720        &mut self,
2721        cx: &mut C,
2722        hasher: &dyn Fn(&mut C, &mut Self, usize) -> Result<u64, E>,
2723        size_of: usize,
2724        drop: Option<fn(*mut u8)>,
2725    ) -> Result<(), E> {
2726        // If the hash function panics then properly clean up any elements
2727        // that we haven't rehashed yet. We unfortunately can't preserve the
2728        // element since we lost their hash and have no way of recovering it
2729        // without risking another panic.
2730        self.prepare_rehash_in_place();
2731
2732        let mut guard = guard(self, move |self_| {
2733            if let Some(drop) = drop {
2734                for i in 0..self_.buckets() {
2735                    if *self_.ctrl(i) == DELETED {
2736                        self_.set_ctrl(i, EMPTY);
2737                        drop(self_.bucket_ptr(i, size_of));
2738                        self_.items -= 1;
2739                    }
2740                }
2741            }
2742            self_.growth_left = bucket_mask_to_capacity(self_.bucket_mask) - self_.items;
2743        });
2744
2745        // At this point, DELETED elements are elements that we haven't
2746        // rehashed yet. Find them and re-insert them at their ideal
2747        // position.
2748        'outer: for i in 0..guard.buckets() {
2749            if *guard.ctrl(i) != DELETED {
2750                continue;
2751            }
2752
2753            let i_p = guard.bucket_ptr(i, size_of);
2754
2755            'inner: loop {
2756                // Hash the current item
2757                let hash = hasher(cx, *guard, i)?;
2758
2759                // Search for a suitable place to put it
2760                let new_i = guard.find_insert_slot(hash).index;
2761
2762                // Probing works by scanning through all of the control
2763                // bytes in groups, which may not be aligned to the group
2764                // size. If both the new and old position fall within the
2765                // same unaligned group, then there is no benefit in moving
2766                // it and we can just continue to the next item.
2767                if likely(guard.is_in_same_group(i, new_i, hash)) {
2768                    guard.set_ctrl_h2(i, hash);
2769                    continue 'outer;
2770                }
2771
2772                let new_i_p = guard.bucket_ptr(new_i, size_of);
2773
2774                // We are moving the current item to a new position. Write
2775                // our H2 to the control byte of the new position.
2776                let prev_ctrl = guard.replace_ctrl_h2(new_i, hash);
2777                if prev_ctrl == EMPTY {
2778                    guard.set_ctrl(i, EMPTY);
2779                    // If the target slot is empty, simply move the current
2780                    // element into the new slot and clear the old control
2781                    // byte.
2782                    ptr::copy_nonoverlapping(i_p, new_i_p, size_of);
2783                    continue 'outer;
2784                } else {
2785                    // If the target slot is occupied, swap the two elements
2786                    // and then continue processing the element that we just
2787                    // swapped into the old slot.
2788                    debug_assert_eq!(prev_ctrl, DELETED);
2789                    ptr::swap_nonoverlapping(i_p, new_i_p, size_of);
2790                    continue 'inner;
2791                }
2792            }
2793        }
2794
2795        guard.growth_left = bucket_mask_to_capacity(guard.bucket_mask) - guard.items;
2796
2797        mem::forget(guard);
2798        Ok(())
2799    }
2800
2801    /// Deallocates the table without dropping any entries.
2802    ///
2803    /// # Note
2804    ///
2805    /// This function must be called only after [`drop_elements`](RawTable::drop_elements),
2806    /// else it can lead to leaking of memory. Also calling this function automatically
2807    /// makes invalid (dangling) all instances of buckets ([`Bucket`]) and makes invalid
2808    /// (dangling) the `ctrl` field of the table.
2809    ///
2810    /// # Safety
2811    ///
2812    /// If any of the following conditions are violated, the result is [`Undefined Behavior`]:
2813    ///
2814    /// * The [`RawTableInner`] has already been allocated;
2815    ///
2816    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used
2817    ///   to allocate this table.
2818    ///
2819    /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that was used
2820    ///   to allocate this table.
2821    ///
2822    /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more  information.
2823    ///
2824    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2825    /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc
2826    /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate
2827    #[inline]
2828    unsafe fn free_buckets<A>(&mut self, alloc: &A, table_layout: TableLayout)
2829    where
2830        A: Allocator,
2831    {
2832        // SAFETY: The caller must uphold the safety contract for `free_buckets`
2833        // method.
2834        let (ptr, layout) = self.allocation_info(table_layout);
2835        alloc.deallocate(ptr, layout);
2836    }
2837
2838    /// Returns a pointer to the allocated memory and the layout that was used to
2839    /// allocate the table.
2840    ///
2841    /// # Safety
2842    ///
2843    /// Caller of this function must observe the following safety rules:
2844    ///
2845    /// * The [`RawTableInner`] has already been allocated, otherwise
2846    ///   calling this function results in [`undefined behavior`]
2847    ///
2848    /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout`
2849    ///   that was used to allocate this table. Failure to comply with this condition
2850    ///   may result in [`undefined behavior`].
2851    ///
2852    /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more  information.
2853    ///
2854    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2855    /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc
2856    /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate
2857    #[inline]
2858    unsafe fn allocation_info(&self, table_layout: TableLayout) -> (NonNull<u8>, Layout) {
2859        debug_assert!(
2860            !self.is_empty_singleton(),
2861            "this function can only be called on non-empty tables"
2862        );
2863
2864        // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
2865        let (layout, ctrl_offset) = match table_layout.calculate_layout_for(self.buckets()) {
2866            Some(lco) => lco,
2867            None => unsafe { hint::unreachable_unchecked() },
2868        };
2869        (
2870            // SAFETY: The caller must uphold the safety contract for `allocation_info` method.
2871            unsafe { NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)) },
2872            layout,
2873        )
2874    }
2875
2876    /// Returns a pointer to the allocated memory and the layout that was used to
2877    /// allocate the table. If [`RawTableInner`] has not been allocated, this
2878    /// function return `dangling` pointer and `()` (unit) layout.
2879    ///
2880    /// # Safety
2881    ///
2882    /// The `table_layout` must be the same [`TableLayout`] as the `TableLayout`
2883    /// that was used to allocate this table. Failure to comply with this condition
2884    /// may result in [`undefined behavior`].
2885    ///
2886    /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more  information.
2887    ///
2888    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2889    /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc
2890    /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate
2891    unsafe fn allocation_info_or_zero(&self, table_layout: TableLayout) -> (NonNull<u8>, Layout) {
2892        if self.is_empty_singleton() {
2893            (NonNull::dangling(), Layout::new::<()>())
2894        } else {
2895            // SAFETY:
2896            // 1. We have checked that our table is allocated.
2897            // 2. The caller ensures that `table_layout` matches the [`TableLayout`]
2898            // that was used to allocate this table.
2899            unsafe { self.allocation_info(table_layout) }
2900        }
2901    }
2902
2903    /// Marks all table buckets as empty without dropping their contents.
2904    #[inline]
2905    fn clear_no_drop(&mut self) {
2906        if !self.is_empty_singleton() {
2907            unsafe {
2908                self.ctrl(0).write_bytes(EMPTY, self.num_ctrl_bytes());
2909            }
2910        }
2911        self.items = 0;
2912        self.growth_left = bucket_mask_to_capacity(self.bucket_mask);
2913    }
2914
2915    /// Erases the [`Bucket`]'s control byte at the given index so that it does not
2916    /// triggered as full, decreases the `items` of the table and, if it can be done,
2917    /// increases `self.growth_left`.
2918    ///
2919    /// This function does not actually erase / drop the [`Bucket`] itself, i.e. it
2920    /// does not make any changes to the `data` parts of the table. The caller of this
2921    /// function must take care to properly drop the `data`, otherwise calling this
2922    /// function may result in a memory leak.
2923    ///
2924    /// # Safety
2925    ///
2926    /// You must observe the following safety rules when calling this function:
2927    ///
2928    /// * The [`RawTableInner`] has already been allocated;
2929    ///
2930    /// * It must be the full control byte at the given position;
2931    ///
2932    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2933    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2934    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
2935    ///
2936    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2937    ///
2938    /// Calling this function on a table with no elements is unspecified, but calling subsequent
2939    /// functions is likely to result in [`undefined behavior`] due to overflow subtraction
2940    /// (`self.items -= 1 cause overflow when self.items == 0`).
2941    ///
2942    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2943    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2944    ///
2945    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2946    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2947    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2948    #[inline]
2949    unsafe fn erase(&mut self, index: usize) {
2950        debug_assert!(self.is_bucket_full(index));
2951
2952        // This is the same as `index.wrapping_sub(Group::WIDTH) % self.buckets()` because
2953        // the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2954        let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask;
2955        // SAFETY:
2956        // - The caller must uphold the safety contract for `erase` method;
2957        // - `index_before` is guaranteed to be in range due to masking with `self.bucket_mask`
2958        let empty_before = Group::load(self.ctrl(index_before)).match_empty();
2959        let empty_after = Group::load(self.ctrl(index)).match_empty();
2960
2961        // Inserting and searching in the map is performed by two key functions:
2962        //
2963        // - The `find_insert_slot` function that looks up the index of any `EMPTY` or `DELETED`
2964        //   slot in a group to be able to insert. If it doesn't find an `EMPTY` or `DELETED`
2965        //   slot immediately in the first group, it jumps to the next `Group` looking for it,
2966        //   and so on until it has gone through all the groups in the control bytes.
2967        //
2968        // - The `find_inner` function that looks for the index of the desired element by looking
2969        //   at all the `FULL` bytes in the group. If it did not find the element right away, and
2970        //   there is no `EMPTY` byte in the group, then this means that the `find_insert_slot`
2971        //   function may have found a suitable slot in the next group. Therefore, `find_inner`
2972        //   jumps further, and if it does not find the desired element and again there is no `EMPTY`
2973        //   byte, then it jumps further, and so on. The search stops only if `find_inner` function
2974        //   finds the desired element or hits an `EMPTY` slot/byte.
2975        //
2976        // Accordingly, this leads to two consequences:
2977        //
2978        // - The map must have `EMPTY` slots (bytes);
2979        //
2980        // - You can't just mark the byte to be erased as `EMPTY`, because otherwise the `find_inner`
2981        //   function may stumble upon an `EMPTY` byte before finding the desired element and stop
2982        //   searching.
2983        //
2984        // Thus it is necessary to check all bytes after and before the erased element. If we are in
2985        // a contiguous `Group` of `FULL` or `DELETED` bytes (the number of `FULL` or `DELETED` bytes
2986        // before and after is greater than or equal to `Group::WIDTH`), then we must mark our byte as
2987        // `DELETED` in order for the `find_inner` function to go further. On the other hand, if there
2988        // is at least one `EMPTY` slot in the `Group`, then the `find_inner` function will still stumble
2989        // upon an `EMPTY` byte, so we can safely mark our erased byte as `EMPTY` as well.
2990        //
2991        // Finally, since `index_before == (index.wrapping_sub(Group::WIDTH) & self.bucket_mask) == index`
2992        // and given all of the above, tables smaller than the group width (self.buckets() < Group::WIDTH)
2993        // cannot have `DELETED` bytes.
2994        //
2995        // Note that in this context `leading_zeros` refers to the bytes at the end of a group, while
2996        // `trailing_zeros` refers to the bytes at the beginning of a group.
2997        let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH {
2998            DELETED
2999        } else {
3000            self.growth_left += 1;
3001            EMPTY
3002        };
3003        // SAFETY: the caller must uphold the safety contract for `erase` method.
3004        self.set_ctrl(index, ctrl);
3005        self.items -= 1;
3006    }
3007}
3008
3009impl<T, A: Allocator + Clone> TryClone for RawTable<T, A>
3010where
3011    T: TryClone,
3012{
3013    fn try_clone(&self) -> Result<Self, Error> {
3014        if self.table.is_empty_singleton() {
3015            Ok(Self::new_in(self.alloc.clone()))
3016        } else {
3017            unsafe {
3018                // Avoid `Result::ok_or_else` because it bloats LLVM IR.
3019                //
3020                // SAFETY: This is safe as we are taking the size of an already allocated table
3021                // and therefore сapacity overflow cannot occur, `self.table.buckets()` is power
3022                // of two and all allocator errors will be caught inside `RawTableInner::new_uninitialized`.
3023                let mut new_table =
3024                    Self::new_uninitialized(self.alloc.clone(), self.table.buckets())?;
3025
3026                // Cloning elements may fail (the clone function may panic). But we don't
3027                // need to worry about uninitialized control bits, since:
3028                // 1. The number of items (elements) in the table is zero, which means that
3029                //    the control bits will not be readed by Drop function.
3030                // 2. The `clone_from_spec` method will first copy all control bits from
3031                //    `self` (thus initializing them). But this will not affect the `Drop`
3032                //    function, since the `clone_from_spec` function sets `items` only after
3033                //    successfully clonning all elements.
3034                new_table.clone_from_spec(self)?;
3035                Ok(new_table)
3036            }
3037        }
3038    }
3039
3040    fn try_clone_from(&mut self, source: &Self) -> Result<(), Error> {
3041        if source.table.is_empty_singleton() {
3042            let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW);
3043            unsafe {
3044                // SAFETY:
3045                // 1. We call the function only once;
3046                // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3047                //    and [`TableLayout`] that were used to allocate this table.
3048                // 3. If any elements' drop function panics, then there will only be a memory leak,
3049                //    because we have replaced the inner table with a new one.
3050                old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3051            }
3052        } else {
3053            unsafe {
3054                // Make sure that if any panics occurs, we clear the table and
3055                // leave it in an empty state.
3056                let mut self_ = guard(self, |self_| {
3057                    self_.clear_no_drop();
3058                });
3059
3060                // First, drop all our elements without clearing the control
3061                // bytes. If this panics then the scope guard will clear the
3062                // table, leaking any elements that were not dropped yet.
3063                //
3064                // This leak is unavoidable: we can't try dropping more elements
3065                // since this could lead to another panic and abort the process.
3066                //
3067                // SAFETY: If something gets wrong we clear our table right after
3068                // dropping the elements, so there is no double drop, since `items`
3069                // will be equal to zero.
3070                self_.table.drop_elements::<T>();
3071
3072                // If necessary, resize our table to match the source.
3073                if self_.buckets() != source.buckets() {
3074                    let new_inner = RawTableInner::new_uninitialized(
3075                        &self_.alloc,
3076                        Self::TABLE_LAYOUT,
3077                        source.buckets(),
3078                    )?;
3079                    // Replace the old inner with new uninitialized one. It's ok, since if something gets
3080                    // wrong `ScopeGuard` will initialize all control bytes and leave empty table.
3081                    let mut old_inner = mem::replace(&mut self_.table, new_inner);
3082                    if !old_inner.is_empty_singleton() {
3083                        // SAFETY:
3084                        // 1. We have checked that our table is allocated.
3085                        // 2. We know for sure that `alloc` and `table_layout` matches
3086                        // the [`Allocator`] and [`TableLayout`] that were used to allocate this table.
3087                        old_inner.free_buckets(&self_.alloc, Self::TABLE_LAYOUT);
3088                    }
3089                }
3090
3091                // Cloning elements may fail (the clone function may panic), but the `ScopeGuard`
3092                // inside the `clone_from_impl` function will take care of that, dropping all
3093                // cloned elements if necessary. Our `ScopeGuard` will clear the table.
3094                self_.clone_from_spec(source)?;
3095
3096                // Disarm the scope guard if cloning was successful.
3097                ScopeGuard::into_inner(self_);
3098            }
3099        }
3100
3101        Ok(())
3102    }
3103}
3104
3105#[cfg(test)]
3106impl<T, A: Allocator + Clone> Clone for RawTable<T, A>
3107where
3108    T: TryClone,
3109{
3110    fn clone(&self) -> Self {
3111        self.try_clone().abort()
3112    }
3113
3114    fn clone_from(&mut self, source: &Self) {
3115        self.try_clone_from(source).abort()
3116    }
3117}
3118
3119/// Specialization of `clone_from` for `Copy` types
3120trait RawTableClone {
3121    unsafe fn clone_from_spec(&mut self, source: &Self) -> Result<(), Error>;
3122}
3123impl<T: TryClone, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
3124    default_fn! {
3125        #[cfg_attr(feature = "inline-more", inline)]
3126        unsafe fn clone_from_spec(&mut self, source: &Self) -> Result<(), Error> {
3127            self.clone_from_impl(source)
3128        }
3129    }
3130}
3131#[cfg(rune_nightly)]
3132impl<T: TryCopy, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
3133    #[cfg_attr(feature = "inline-more", inline)]
3134    unsafe fn clone_from_spec(&mut self, source: &Self) -> Result<(), Error> {
3135        source
3136            .table
3137            .ctrl(0)
3138            .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes());
3139        source
3140            .data_start()
3141            .as_ptr()
3142            .copy_to_nonoverlapping(self.data_start().as_ptr(), self.table.buckets());
3143
3144        self.table.items = source.table.items;
3145        self.table.growth_left = source.table.growth_left;
3146        Ok(())
3147    }
3148}
3149
3150impl<T: TryClone, A: Allocator + Clone> RawTable<T, A> {
3151    /// Common code for clone and clone_from. Assumes:
3152    /// - `self.buckets() == source.buckets()`.
3153    /// - Any existing elements have been dropped.
3154    /// - The control bytes are not initialized yet.
3155    #[cfg_attr(feature = "inline-more", inline)]
3156    unsafe fn clone_from_impl(&mut self, source: &Self) -> Result<(), Error> {
3157        // Copy the control bytes unchanged. We do this in a single pass
3158        source
3159            .table
3160            .ctrl(0)
3161            .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes());
3162
3163        // The cloning of elements may panic, in which case we need
3164        // to make sure we drop only the elements that have been
3165        // cloned so far.
3166        let mut guard = guard((0, &mut *self), |(index, self_)| {
3167            if T::NEEDS_DROP {
3168                for i in 0..*index {
3169                    if self_.is_bucket_full(i) {
3170                        self_.bucket(i).drop();
3171                    }
3172                }
3173            }
3174        });
3175
3176        for from in source.iter() {
3177            let index = source.bucket_index(&from);
3178            let to = guard.1.bucket(index);
3179            to.write(from.as_ref().try_clone()?);
3180
3181            // Update the index in case we need to unwind.
3182            guard.0 = index + 1;
3183        }
3184
3185        // Successfully cloned all items, no need to clean up.
3186        mem::forget(guard);
3187
3188        self.table.items = source.table.items;
3189        self.table.growth_left = source.table.growth_left;
3190        Ok(())
3191    }
3192
3193    /// Variant of `clone_from` to use when a hasher is available.
3194    pub fn clone_from_with_hasher<C: ?Sized, E>(
3195        &mut self,
3196        cx: &mut C,
3197        source: &Self,
3198        hasher: impl HasherFn<C, T, E>,
3199    ) -> Result<(), CustomError<E>> {
3200        // If we have enough capacity in the table, just clear it and insert
3201        // elements one by one. We don't do this if we have the same number of
3202        // buckets as the source since we can just copy the contents directly
3203        // in that case.
3204        if self.table.buckets() != source.table.buckets()
3205            && bucket_mask_to_capacity(self.table.bucket_mask) >= source.len()
3206        {
3207            self.clear();
3208
3209            let mut guard_self = guard(&mut *self, |self_| {
3210                // Clear the partially copied table if a panic occurs, otherwise
3211                // items and growth_left will be out of sync with the contents
3212                // of the table.
3213                self_.clear();
3214            });
3215
3216            unsafe {
3217                for item in source.iter() {
3218                    // This may panic.
3219                    let item = item.as_ref().try_clone()?;
3220                    let hash = hasher.hash(cx, &item).map_err(CustomError::Custom)?;
3221
3222                    // We can use a simpler version of insert() here since:
3223                    // - there are no DELETED entries.
3224                    // - we know there is enough space in the table.
3225                    // - all elements are unique.
3226                    let (index, _) = guard_self.table.prepare_insert_slot(hash);
3227                    guard_self.bucket(index).write(item);
3228                }
3229            }
3230
3231            // Successfully cloned all items, no need to clean up.
3232            mem::forget(guard_self);
3233
3234            self.table.items = source.table.items;
3235            self.table.growth_left -= source.table.items;
3236        } else {
3237            self.try_clone_from(source)?;
3238        }
3239
3240        Ok(())
3241    }
3242}
3243
3244impl<T, A: Allocator + Default> Default for RawTable<T, A> {
3245    #[inline]
3246    fn default() -> Self {
3247        Self::new_in(Default::default())
3248    }
3249}
3250
3251#[cfg(rune_nightly)]
3252unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawTable<T, A> {
3253    #[cfg_attr(feature = "inline-more", inline)]
3254    fn drop(&mut self) {
3255        unsafe {
3256            // SAFETY:
3257            // 1. We call the function only once;
3258            // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3259            //    and [`TableLayout`] that were used to allocate this table.
3260            // 3. If the drop function of any elements fails, then only a memory leak will occur,
3261            //    and we don't care because we are inside the `Drop` function of the `RawTable`,
3262            //    so there won't be any table left in an inconsistent state.
3263            self.table
3264                .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3265        }
3266    }
3267}
3268#[cfg(not(rune_nightly))]
3269impl<T, A: Allocator> Drop for RawTable<T, A> {
3270    #[cfg_attr(feature = "inline-more", inline)]
3271    fn drop(&mut self) {
3272        unsafe {
3273            // SAFETY:
3274            // 1. We call the function only once;
3275            // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3276            //    and [`TableLayout`] that were used to allocate this table.
3277            // 3. If the drop function of any elements fails, then only a memory leak will occur,
3278            //    and we don't care because we are inside the `Drop` function of the `RawTable`,
3279            //    so there won't be any table left in an inconsistent state.
3280            self.table
3281                .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3282        }
3283    }
3284}
3285
3286impl<T, A: Allocator> IntoIterator for RawTable<T, A> {
3287    type Item = T;
3288    type IntoIter = RawIntoIter<T, A>;
3289
3290    #[cfg_attr(feature = "inline-more", inline)]
3291    fn into_iter(self) -> RawIntoIter<T, A> {
3292        unsafe {
3293            let iter = self.iter();
3294            self.into_iter_from(iter)
3295        }
3296    }
3297}
3298
3299/// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does
3300/// not track an item count.
3301pub(crate) struct RawIterRange<T> {
3302    // Mask of full buckets in the current group. Bits are cleared from this
3303    // mask as each element is processed.
3304    current_group: BitMaskIter,
3305
3306    // Pointer to the buckets for the current group.
3307    data: Bucket<T>,
3308
3309    // Pointer to the next group of control bytes,
3310    // Must be aligned to the group size.
3311    next_ctrl: *const u8,
3312
3313    // Pointer one past the last control byte of this range.
3314    end: *const u8,
3315}
3316
3317impl<T> RawIterRange<T> {
3318    /// Returns a `RawIterRange` covering a subset of a table.
3319    ///
3320    /// # Safety
3321    ///
3322    /// If any of the following conditions are violated, the result is
3323    /// [`undefined behavior`]:
3324    ///
3325    /// * `ctrl` must be [valid] for reads, i.e. table outlives the `RawIterRange`;
3326    ///
3327    /// * `ctrl` must be properly aligned to the group size (Group::WIDTH);
3328    ///
3329    /// * `ctrl` must point to the array of properly initialized control bytes;
3330    ///
3331    /// * `data` must be the [`Bucket`] at the `ctrl` index in the table;
3332    ///
3333    /// * the value of `len` must be less than or equal to the number of table buckets,
3334    ///   and the returned value of `ctrl.as_ptr().add(len).offset_from(ctrl.as_ptr())`
3335    ///   must be positive.
3336    ///
3337    /// * The `ctrl.add(len)` pointer must be either in bounds or one
3338    ///   byte past the end of the same [allocated table].
3339    ///
3340    /// * The `len` must be a power of two.
3341    ///
3342    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3343    #[cfg_attr(feature = "inline-more", inline)]
3344    unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self {
3345        debug_assert_ne!(len, 0);
3346        debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
3347        // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
3348        let end = ctrl.add(len);
3349
3350        // Load the first group and advance ctrl to point to the next group
3351        // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
3352        let current_group = Group::load_aligned(ctrl).match_full();
3353        let next_ctrl = ctrl.add(Group::WIDTH);
3354
3355        Self {
3356            current_group: current_group.into_iter(),
3357            data,
3358            next_ctrl,
3359            end,
3360        }
3361    }
3362
3363    /// Splits a `RawIterRange` into two halves.
3364    ///
3365    /// Returns `None` if the remaining range is smaller than or equal to the
3366    /// group width.
3367    #[cfg_attr(feature = "inline-more", inline)]
3368    #[cfg(feature = "rayon")]
3369    pub(crate) fn split(mut self) -> (Self, Option<RawIterRange<T>>) {
3370        unsafe {
3371            if self.end <= self.next_ctrl {
3372                // Nothing to split if the group that we are current processing
3373                // is the last one.
3374                (self, None)
3375            } else {
3376                // len is the remaining number of elements after the group that
3377                // we are currently processing. It must be a multiple of the
3378                // group size (small tables are caught by the check above).
3379                let len = offset_from(self.end, self.next_ctrl);
3380                debug_assert_eq!(len % Group::WIDTH, 0);
3381
3382                // Split the remaining elements into two halves, but round the
3383                // midpoint down in case there is an odd number of groups
3384                // remaining. This ensures that:
3385                // - The tail is at least 1 group long.
3386                // - The split is roughly even considering we still have the
3387                //   current group to process.
3388                let mid = (len / 2) & !(Group::WIDTH - 1);
3389
3390                let tail = Self::new(
3391                    self.next_ctrl.add(mid),
3392                    self.data.next_n(Group::WIDTH).next_n(mid),
3393                    len - mid,
3394                );
3395                debug_assert_eq!(
3396                    self.data.next_n(Group::WIDTH).next_n(mid).ptr,
3397                    tail.data.ptr
3398                );
3399                debug_assert_eq!(self.end, tail.end);
3400                self.end = self.next_ctrl.add(mid);
3401                debug_assert_eq!(self.end.add(Group::WIDTH), tail.next_ctrl);
3402                (self, Some(tail))
3403            }
3404        }
3405    }
3406
3407    /// # Safety
3408    /// If DO_CHECK_PTR_RANGE is false, caller must ensure that we never try to iterate
3409    /// after yielding all elements.
3410    #[cfg_attr(feature = "inline-more", inline)]
3411    unsafe fn next_impl<const DO_CHECK_PTR_RANGE: bool>(&mut self) -> Option<Bucket<T>> {
3412        loop {
3413            if let Some(index) = self.current_group.next() {
3414                return Some(self.data.next_n(index));
3415            }
3416
3417            if DO_CHECK_PTR_RANGE && self.next_ctrl >= self.end {
3418                return None;
3419            }
3420
3421            // We might read past self.end up to the next group boundary,
3422            // but this is fine because it only occurs on tables smaller
3423            // than the group size where the trailing control bytes are all
3424            // EMPTY. On larger tables self.end is guaranteed to be aligned
3425            // to the group size (since tables are power-of-two sized).
3426            self.current_group = Group::load_aligned(self.next_ctrl).match_full().into_iter();
3427            self.data = self.data.next_n(Group::WIDTH);
3428            self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
3429        }
3430    }
3431}
3432
3433// We make raw iterators unconditionally Send and Sync, and let the PhantomData
3434// in the actual iterator implementations determine the real Send/Sync bounds.
3435unsafe impl<T> Send for RawIterRange<T> {}
3436unsafe impl<T> Sync for RawIterRange<T> {}
3437
3438impl<T> Clone for RawIterRange<T> {
3439    #[cfg_attr(feature = "inline-more", inline)]
3440    fn clone(&self) -> Self {
3441        Self {
3442            data: self.data.clone(),
3443            next_ctrl: self.next_ctrl,
3444            current_group: self.current_group,
3445            end: self.end,
3446        }
3447    }
3448}
3449
3450impl<T> Iterator for RawIterRange<T> {
3451    type Item = Bucket<T>;
3452
3453    #[cfg_attr(feature = "inline-more", inline)]
3454    fn next(&mut self) -> Option<Bucket<T>> {
3455        unsafe {
3456            // SAFETY: We set checker flag to true.
3457            self.next_impl::<true>()
3458        }
3459    }
3460
3461    #[inline]
3462    fn size_hint(&self) -> (usize, Option<usize>) {
3463        // We don't have an item count, so just guess based on the range size.
3464        let remaining_buckets = if self.end > self.next_ctrl {
3465            unsafe { offset_from(self.end, self.next_ctrl) }
3466        } else {
3467            0
3468        };
3469
3470        // Add a group width to include the group we are currently processing.
3471        (0, Some(Group::WIDTH + remaining_buckets))
3472    }
3473}
3474
3475impl<T> FusedIterator for RawIterRange<T> {}
3476
3477/// Iterator which returns a raw pointer to every full bucket in the table.
3478///
3479/// For maximum flexibility this iterator is not bound by a lifetime, but you
3480/// must observe several rules when using it:
3481/// - You must not free the hash table while iterating (including via growing/shrinking).
3482/// - It is fine to erase a bucket that has been yielded by the iterator.
3483/// - Erasing a bucket that has not yet been yielded by the iterator may still
3484///   result in the iterator yielding that bucket (unless `reflect_remove` is called).
3485/// - It is unspecified whether an element inserted after the iterator was
3486///   created will be yielded by that iterator (unless `reflect_insert` is called).
3487/// - The order in which the iterator yields bucket is unspecified and may
3488///   change in the future.
3489pub struct RawIter<T> {
3490    pub(crate) iter: RawIterRange<T>,
3491    items: usize,
3492}
3493
3494impl<T> RawIter<T> {
3495    /// Refresh the iterator so that it reflects a removal from the given bucket.
3496    ///
3497    /// For the iterator to remain valid, this method must be called once
3498    /// for each removed bucket before `next` is called again.
3499    ///
3500    /// This method should be called _before_ the removal is made. It is not necessary to call this
3501    /// method if you are removing an item that this iterator yielded in the past.
3502    pub unsafe fn reflect_remove(&mut self, b: &Bucket<T>) {
3503        self.reflect_toggle_full(b, false);
3504    }
3505
3506    /// Refresh the iterator so that it reflects an insertion into the given bucket.
3507    ///
3508    /// For the iterator to remain valid, this method must be called once
3509    /// for each insert before `next` is called again.
3510    ///
3511    /// This method does not guarantee that an insertion of a bucket with a greater
3512    /// index than the last one yielded will be reflected in the iterator.
3513    ///
3514    /// This method should be called _after_ the given insert is made.
3515    pub unsafe fn reflect_insert(&mut self, b: &Bucket<T>) {
3516        self.reflect_toggle_full(b, true);
3517    }
3518
3519    /// Refresh the iterator so that it reflects a change to the state of the given bucket.
3520    unsafe fn reflect_toggle_full(&mut self, b: &Bucket<T>, is_insert: bool) {
3521        if b.as_ptr() > self.iter.data.as_ptr() {
3522            // The iterator has already passed the bucket's group.
3523            // So the toggle isn't relevant to this iterator.
3524            return;
3525        }
3526
3527        if self.iter.next_ctrl < self.iter.end
3528            && b.as_ptr() <= self.iter.data.next_n(Group::WIDTH).as_ptr()
3529        {
3530            // The iterator has not yet reached the bucket's group.
3531            // We don't need to reload anything, but we do need to adjust the item count.
3532
3533            if cfg!(debug_assertions) {
3534                // Double-check that the user isn't lying to us by checking the bucket state.
3535                // To do that, we need to find its control byte. We know that self.iter.data is
3536                // at self.iter.next_ctrl - Group::WIDTH, so we work from there:
3537                let offset = offset_from(self.iter.data.as_ptr(), b.as_ptr());
3538                let ctrl = self.iter.next_ctrl.sub(Group::WIDTH).add(offset);
3539                // This method should be called _before_ a removal, or _after_ an insert,
3540                // so in both cases the ctrl byte should indicate that the bucket is full.
3541                assert!(is_full(*ctrl));
3542            }
3543
3544            if is_insert {
3545                self.items += 1;
3546            } else {
3547                self.items -= 1;
3548            }
3549
3550            return;
3551        }
3552
3553        // The iterator is at the bucket group that the toggled bucket is in.
3554        // We need to do two things:
3555        //
3556        //  - Determine if the iterator already yielded the toggled bucket.
3557        //    If it did, we're done.
3558        //  - Otherwise, update the iterator cached group so that it won't
3559        //    yield a to-be-removed bucket, or _will_ yield a to-be-added bucket.
3560        //    We'll also need to update the item count accordingly.
3561        if let Some(index) = self.iter.current_group.0.lowest_set_bit() {
3562            let next_bucket = self.iter.data.next_n(index);
3563            if b.as_ptr() > next_bucket.as_ptr() {
3564                // The toggled bucket is "before" the bucket the iterator would yield next. We
3565                // therefore don't need to do anything --- the iterator has already passed the
3566                // bucket in question.
3567                //
3568                // The item count must already be correct, since a removal or insert "prior" to
3569                // the iterator's position wouldn't affect the item count.
3570            } else {
3571                // The removed bucket is an upcoming bucket. We need to make sure it does _not_
3572                // get yielded, and also that it's no longer included in the item count.
3573                //
3574                // NOTE: We can't just reload the group here, both since that might reflect
3575                // inserts we've already passed, and because that might inadvertently unset the
3576                // bits for _other_ removals. If we do that, we'd have to also decrement the
3577                // item count for those other bits that we unset. But the presumably subsequent
3578                // call to reflect for those buckets might _also_ decrement the item count.
3579                // Instead, we _just_ flip the bit for the particular bucket the caller asked
3580                // us to reflect.
3581                let our_bit = offset_from(self.iter.data.as_ptr(), b.as_ptr());
3582                let was_full = self.iter.current_group.flip(our_bit);
3583                debug_assert_ne!(was_full, is_insert);
3584
3585                if is_insert {
3586                    self.items += 1;
3587                } else {
3588                    self.items -= 1;
3589                }
3590
3591                if cfg!(debug_assertions) {
3592                    if b.as_ptr() == next_bucket.as_ptr() {
3593                        // The removed bucket should no longer be next
3594                        debug_assert_ne!(self.iter.current_group.0.lowest_set_bit(), Some(index));
3595                    } else {
3596                        // We should not have changed what bucket comes next.
3597                        debug_assert_eq!(self.iter.current_group.0.lowest_set_bit(), Some(index));
3598                    }
3599                }
3600            }
3601        } else {
3602            // We must have already iterated past the removed item.
3603        }
3604    }
3605
3606    unsafe fn drop_elements(&mut self) {
3607        if T::NEEDS_DROP && self.items != 0 {
3608            for item in self {
3609                item.drop();
3610            }
3611        }
3612    }
3613}
3614
3615impl<T> Clone for RawIter<T> {
3616    #[cfg_attr(feature = "inline-more", inline)]
3617    fn clone(&self) -> Self {
3618        Self {
3619            iter: self.iter.clone(),
3620            items: self.items,
3621        }
3622    }
3623}
3624
3625impl<T> Iterator for RawIter<T> {
3626    type Item = Bucket<T>;
3627
3628    #[cfg_attr(feature = "inline-more", inline)]
3629    fn next(&mut self) -> Option<Bucket<T>> {
3630        // Inner iterator iterates over buckets
3631        // so it can do unnecessary work if we already yielded all items.
3632        if self.items == 0 {
3633            return None;
3634        }
3635
3636        let nxt = unsafe {
3637            // SAFETY: We check number of items to yield using `items` field.
3638            self.iter.next_impl::<false>()
3639        };
3640
3641        debug_assert!(nxt.is_some());
3642        self.items -= 1;
3643
3644        nxt
3645    }
3646
3647    #[inline]
3648    fn size_hint(&self) -> (usize, Option<usize>) {
3649        (self.items, Some(self.items))
3650    }
3651}
3652
3653impl<T> ExactSizeIterator for RawIter<T> {}
3654impl<T> FusedIterator for RawIter<T> {}
3655
3656/// Iterator which returns an index of every full bucket in the table.
3657///
3658/// For maximum flexibility this iterator is not bound by a lifetime, but you
3659/// must observe several rules when using it:
3660/// - You must not free the hash table while iterating (including via growing/shrinking).
3661/// - It is fine to erase a bucket that has been yielded by the iterator.
3662/// - Erasing a bucket that has not yet been yielded by the iterator may still
3663///   result in the iterator yielding index of that bucket.
3664/// - It is unspecified whether an element inserted after the iterator was
3665///   created will be yielded by that iterator.
3666/// - The order in which the iterator yields indices of the buckets is unspecified
3667///   and may change in the future.
3668pub(crate) struct FullBucketsIndices {
3669    // Mask of full buckets in the current group. Bits are cleared from this
3670    // mask as each element is processed.
3671    current_group: BitMaskIter,
3672
3673    // Initial value of the bytes' indices of the current group (relative
3674    // to the start of the control bytes).
3675    group_first_index: usize,
3676
3677    // Pointer to the current group of control bytes,
3678    // Must be aligned to the group size (Group::WIDTH).
3679    ctrl: NonNull<u8>,
3680
3681    // Number of elements in the table.
3682    items: usize,
3683}
3684
3685impl FullBucketsIndices {
3686    /// Advances the iterator and returns the next value.
3687    ///
3688    /// # Safety
3689    ///
3690    /// If any of the following conditions are violated, the result is
3691    /// [`Undefined Behavior`]:
3692    ///
3693    /// * The [`RawTableInner`] / [`RawTable`] must be alive and not moved,
3694    ///   i.e. table outlives the `FullBucketsIndices`;
3695    ///
3696    /// * It never tries to iterate after getting all elements.
3697    ///
3698    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3699    #[inline(always)]
3700    unsafe fn next_impl(&mut self) -> Option<usize> {
3701        loop {
3702            if let Some(index) = self.current_group.next() {
3703                // The returned `self.group_first_index + index` will always
3704                // be in the range `0..self.buckets()`. See explanation below.
3705                return Some(self.group_first_index + index);
3706            }
3707
3708            // SAFETY: The caller of this function ensures that:
3709            //
3710            // 1. It never tries to iterate after getting all the elements;
3711            // 2. The table is alive and did not moved;
3712            // 3. The first `self.ctrl` pointed to the start of the array of control bytes.
3713            //
3714            // Taking the above into account, we always stay within the bounds, because:
3715            //
3716            // 1. For tables smaller than the group width (self.buckets() <= Group::WIDTH),
3717            //    we will never end up in the given branch, since we should have already
3718            //    yielded all the elements of the table.
3719            //
3720            // 2. For tables larger than the group width. The the number of buckets is a
3721            //    power of two (2 ^ n), Group::WIDTH is also power of two (2 ^ k). Sinse
3722            //    `(2 ^ n) > (2 ^ k)`, than `(2 ^ n) % (2 ^ k) = 0`. As we start from the
3723            //    the start of the array of control bytes, and never try to iterate after
3724            //    getting all the elements, the last `self.ctrl` will be equal to
3725            //    the `self.buckets() - Group::WIDTH`, so `self.current_group.next()`
3726            //    will always contains indices within the range `0..Group::WIDTH`,
3727            //    and subsequent `self.group_first_index + index` will always return a
3728            //    number less than `self.buckets()`.
3729            self.ctrl = NonNull::new_unchecked(self.ctrl.as_ptr().add(Group::WIDTH));
3730
3731            // SAFETY: See explanation above.
3732            self.current_group = Group::load_aligned(self.ctrl.as_ptr())
3733                .match_full()
3734                .into_iter();
3735            self.group_first_index += Group::WIDTH;
3736        }
3737    }
3738}
3739
3740impl Iterator for FullBucketsIndices {
3741    type Item = usize;
3742
3743    /// Advances the iterator and returns the next value. It is up to
3744    /// the caller to ensure that the `RawTable` outlives the `FullBucketsIndices`,
3745    /// because we cannot make the `next` method unsafe.
3746    #[inline(always)]
3747    fn next(&mut self) -> Option<usize> {
3748        // Return if we already yielded all items.
3749        if self.items == 0 {
3750            return None;
3751        }
3752
3753        let nxt = unsafe {
3754            // SAFETY:
3755            // 1. We check number of items to yield using `items` field.
3756            // 2. The caller ensures that the table is alive and has not moved.
3757            self.next_impl()
3758        };
3759
3760        debug_assert!(nxt.is_some());
3761        self.items -= 1;
3762
3763        nxt
3764    }
3765
3766    #[inline(always)]
3767    fn size_hint(&self) -> (usize, Option<usize>) {
3768        (self.items, Some(self.items))
3769    }
3770}
3771
3772impl ExactSizeIterator for FullBucketsIndices {}
3773impl FusedIterator for FullBucketsIndices {}
3774
3775/// Iterator which consumes a table and returns elements.
3776pub struct RawIntoIter<T, A: Allocator = Global> {
3777    iter: RawIter<T>,
3778    allocation: Option<(NonNull<u8>, Layout, A)>,
3779    marker: PhantomData<T>,
3780}
3781
3782impl<T, A: Allocator> RawIntoIter<T, A> {
3783    #[cfg_attr(feature = "inline-more", inline)]
3784    pub fn iter(&self) -> RawIter<T> {
3785        self.iter.clone()
3786    }
3787}
3788
3789unsafe impl<T, A: Allocator> Send for RawIntoIter<T, A>
3790where
3791    T: Send,
3792    A: Send,
3793{
3794}
3795unsafe impl<T, A: Allocator> Sync for RawIntoIter<T, A>
3796where
3797    T: Sync,
3798    A: Sync,
3799{
3800}
3801
3802#[cfg(rune_nightly)]
3803unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawIntoIter<T, A> {
3804    #[cfg_attr(feature = "inline-more", inline)]
3805    fn drop(&mut self) {
3806        unsafe {
3807            // Drop all remaining elements
3808            self.iter.drop_elements();
3809
3810            // Free the table
3811            if let Some((ptr, layout, ref alloc)) = self.allocation {
3812                alloc.deallocate(ptr, layout);
3813            }
3814        }
3815    }
3816}
3817#[cfg(not(rune_nightly))]
3818impl<T, A: Allocator> Drop for RawIntoIter<T, A> {
3819    #[cfg_attr(feature = "inline-more", inline)]
3820    fn drop(&mut self) {
3821        unsafe {
3822            // Drop all remaining elements
3823            self.iter.drop_elements();
3824
3825            // Free the table
3826            if let Some((ptr, layout, ref alloc)) = self.allocation {
3827                alloc.deallocate(ptr, layout);
3828            }
3829        }
3830    }
3831}
3832
3833impl<T, A: Allocator> Iterator for RawIntoIter<T, A> {
3834    type Item = T;
3835
3836    #[cfg_attr(feature = "inline-more", inline)]
3837    fn next(&mut self) -> Option<T> {
3838        unsafe { Some(self.iter.next()?.read()) }
3839    }
3840
3841    #[inline]
3842    fn size_hint(&self) -> (usize, Option<usize>) {
3843        self.iter.size_hint()
3844    }
3845}
3846
3847impl<T, A: Allocator> ExactSizeIterator for RawIntoIter<T, A> {}
3848impl<T, A: Allocator> FusedIterator for RawIntoIter<T, A> {}
3849
3850/// Iterator which consumes elements without freeing the table storage.
3851pub struct RawDrain<'a, T, A: Allocator = Global> {
3852    iter: RawIter<T>,
3853
3854    // The table is moved into the iterator for the duration of the drain. This
3855    // ensures that an empty table is left if the drain iterator is leaked
3856    // without dropping.
3857    table: RawTableInner,
3858    orig_table: NonNull<RawTableInner>,
3859
3860    // We don't use a &'a mut RawTable<T> because we want RawDrain to be
3861    // covariant over T.
3862    marker: PhantomData<&'a RawTable<T, A>>,
3863}
3864
3865impl<T, A: Allocator> RawDrain<'_, T, A> {
3866    #[cfg_attr(feature = "inline-more", inline)]
3867    pub fn iter(&self) -> RawIter<T> {
3868        self.iter.clone()
3869    }
3870}
3871
3872unsafe impl<T, A: Allocator> Send for RawDrain<'_, T, A>
3873where
3874    T: Send,
3875    A: Send,
3876{
3877}
3878unsafe impl<T, A: Allocator> Sync for RawDrain<'_, T, A>
3879where
3880    T: Sync,
3881    A: Sync,
3882{
3883}
3884
3885impl<T, A: Allocator> Drop for RawDrain<'_, T, A> {
3886    #[cfg_attr(feature = "inline-more", inline)]
3887    fn drop(&mut self) {
3888        unsafe {
3889            // Drop all remaining elements. Note that this may panic.
3890            self.iter.drop_elements();
3891
3892            // Reset the contents of the table now that all elements have been
3893            // dropped.
3894            self.table.clear_no_drop();
3895
3896            // Move the now empty table back to its original location.
3897            self.orig_table
3898                .as_ptr()
3899                .copy_from_nonoverlapping(&self.table, 1);
3900        }
3901    }
3902}
3903
3904impl<T, A: Allocator> Iterator for RawDrain<'_, T, A> {
3905    type Item = T;
3906
3907    #[cfg_attr(feature = "inline-more", inline)]
3908    fn next(&mut self) -> Option<T> {
3909        unsafe {
3910            let item = self.iter.next()?;
3911            Some(item.read())
3912        }
3913    }
3914
3915    #[inline]
3916    fn size_hint(&self) -> (usize, Option<usize>) {
3917        self.iter.size_hint()
3918    }
3919}
3920
3921impl<T, A: Allocator> ExactSizeIterator for RawDrain<'_, T, A> {}
3922impl<T, A: Allocator> FusedIterator for RawDrain<'_, T, A> {}
3923
3924/// Iterator over occupied buckets that could match a given hash.
3925///
3926/// `RawTable` only stores 7 bits of the hash value, so this iterator may return
3927/// items that have a hash value different than the one provided. You should
3928/// always validate the returned values before using them.
3929///
3930/// For maximum flexibility this iterator is not bound by a lifetime, but you
3931/// must observe several rules when using it:
3932/// - You must not free the hash table while iterating (including via growing/shrinking).
3933/// - It is fine to erase a bucket that has been yielded by the iterator.
3934/// - Erasing a bucket that has not yet been yielded by the iterator may still
3935///   result in the iterator yielding that bucket.
3936/// - It is unspecified whether an element inserted after the iterator was
3937///   created will be yielded by that iterator.
3938/// - The order in which the iterator yields buckets is unspecified and may
3939///   change in the future.
3940pub struct RawIterHash<T> {
3941    inner: RawIterHashInner,
3942    _marker: PhantomData<T>,
3943}
3944
3945struct RawIterHashInner {
3946    // See `RawTableInner`'s corresponding fields for details.
3947    // We can't store a `*const RawTableInner` as it would get
3948    // invalidated by the user calling `&mut` methods on `RawTable`.
3949    bucket_mask: usize,
3950    ctrl: NonNull<u8>,
3951
3952    // The top 7 bits of the hash.
3953    h2_hash: u8,
3954
3955    // The sequence of groups to probe in the search.
3956    probe_seq: ProbeSeq,
3957
3958    group: Group,
3959
3960    // The elements within the group with a matching h2-hash.
3961    bitmask: BitMaskIter,
3962}
3963
3964impl<T> RawIterHash<T> {
3965    #[cfg_attr(feature = "inline-more", inline)]
3966    unsafe fn new<A: Allocator>(table: &RawTable<T, A>, hash: u64) -> Self {
3967        RawIterHash {
3968            inner: RawIterHashInner::new(&table.table, hash),
3969            _marker: PhantomData,
3970        }
3971    }
3972}
3973impl RawIterHashInner {
3974    #[cfg_attr(feature = "inline-more", inline)]
3975    unsafe fn new(table: &RawTableInner, hash: u64) -> Self {
3976        let h2_hash = h2(hash);
3977        let probe_seq = table.probe_seq(hash);
3978        let group = Group::load(table.ctrl(probe_seq.pos));
3979        let bitmask = group.match_byte(h2_hash).into_iter();
3980
3981        RawIterHashInner {
3982            bucket_mask: table.bucket_mask,
3983            ctrl: table.ctrl,
3984            h2_hash,
3985            probe_seq,
3986            group,
3987            bitmask,
3988        }
3989    }
3990}
3991
3992impl<T> Iterator for RawIterHash<T> {
3993    type Item = Bucket<T>;
3994
3995    fn next(&mut self) -> Option<Bucket<T>> {
3996        unsafe {
3997            match self.inner.next() {
3998                Some(index) => {
3999                    // Can't use `RawTable::bucket` here as we don't have
4000                    // an actual `RawTable` reference to use.
4001                    debug_assert!(index <= self.inner.bucket_mask);
4002                    let bucket = Bucket::from_base_index(self.inner.ctrl.cast(), index);
4003                    Some(bucket)
4004                }
4005                None => None,
4006            }
4007        }
4008    }
4009}
4010
4011impl Iterator for RawIterHashInner {
4012    type Item = usize;
4013
4014    fn next(&mut self) -> Option<Self::Item> {
4015        unsafe {
4016            loop {
4017                if let Some(bit) = self.bitmask.next() {
4018                    let index = (self.probe_seq.pos + bit) & self.bucket_mask;
4019                    return Some(index);
4020                }
4021                if likely(self.group.match_empty().any_bit_set()) {
4022                    return None;
4023                }
4024                self.probe_seq.move_next(self.bucket_mask);
4025
4026                // Can't use `RawTableInner::ctrl` here as we don't have
4027                // an actual `RawTableInner` reference to use.
4028                let index = self.probe_seq.pos;
4029                debug_assert!(index < self.bucket_mask + 1 + Group::WIDTH);
4030                let group_ctrl = self.ctrl.as_ptr().add(index);
4031
4032                self.group = Group::load(group_ctrl);
4033                self.bitmask = self.group.match_byte(self.h2_hash).into_iter();
4034            }
4035        }
4036    }
4037}
4038
4039#[cfg(test)]
4040mod test_map {
4041    use super::*;
4042
4043    use crate::alloc::into_ok;
4044    use crate::alloc::Global;
4045    use core::convert::Infallible;
4046
4047    fn rehash_in_place<T>(
4048        table: &mut RawTable<T>,
4049        hasher: impl Fn(&mut (), &T) -> Result<u64, Infallible>,
4050    ) {
4051        unsafe {
4052            into_ok(table.table.rehash_in_place(
4053                &mut (),
4054                &move |cx, table, index| hasher(cx, table.bucket::<T>(index).as_ref()),
4055                mem::size_of::<T>(),
4056                if mem::needs_drop::<T>() {
4057                    Some(mem::transmute(ptr::drop_in_place::<T> as unsafe fn(*mut T)))
4058                } else {
4059                    None
4060                },
4061            ));
4062        }
4063    }
4064
4065    #[test]
4066    fn rehash() {
4067        let mut table = RawTable::new();
4068        let hasher = |_: &mut (), i: &u64| Ok(*i);
4069        for i in 0..100 {
4070            table.insert(&mut (), i, i, hasher).abort();
4071        }
4072
4073        for i in 0..100 {
4074            unsafe {
4075                assert_eq!(
4076                    into_ok(table.find(&mut (), i, |_: &mut (), x: &u64| Ok(*x == i)))
4077                        .map(|b| b.read()),
4078                    Some(i)
4079                );
4080            }
4081            assert!(
4082                into_ok(table.find(&mut (), i + 100, |_: &mut (), x: &u64| Ok(*x == i + 100)))
4083                    .is_none()
4084            );
4085        }
4086
4087        rehash_in_place(&mut table, hasher);
4088
4089        for i in 0..100 {
4090            unsafe {
4091                assert_eq!(
4092                    into_ok(table.find(&mut (), i, |_: &mut (), x: &u64| Ok(*x == i)))
4093                        .map(|b| b.read()),
4094                    Some(i)
4095                );
4096            }
4097            assert!(
4098                into_ok(table.find(&mut (), i + 100, |_: &mut (), x: &u64| Ok(*x == i + 100)))
4099                    .is_none()
4100            );
4101        }
4102    }
4103
4104    /// CHECKING THAT WE ARE NOT TRYING TO READ THE MEMORY OF
4105    /// AN UNINITIALIZED TABLE DURING THE DROP
4106    #[test]
4107    fn test_drop_uninitialized() {
4108        use ::rust_alloc::vec::Vec;
4109
4110        let table = unsafe {
4111            // SAFETY: The `buckets` is power of two and we're not
4112            // trying to actually use the returned RawTable.
4113            RawTable::<(u64, Vec<i32>)>::new_uninitialized(Global, 8).unwrap()
4114        };
4115        drop(table);
4116    }
4117
4118    /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS`
4119    /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES.
4120    #[test]
4121    fn test_drop_zero_items() {
4122        use ::rust_alloc::vec::Vec;
4123        unsafe {
4124            // SAFETY: The `buckets` is power of two and we're not
4125            // trying to actually use the returned RawTable.
4126            let table = RawTable::<(u64, Vec<i32>)>::new_uninitialized(Global, 8).unwrap();
4127
4128            // WE SIMULATE, AS IT WERE, A FULL TABLE.
4129
4130            // SAFETY: We checked that the table is allocated and therefore the table already has
4131            // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for)
4132            // so writing `table.table.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe.
4133            table
4134                .table
4135                .ctrl(0)
4136                .write_bytes(EMPTY, table.table.num_ctrl_bytes());
4137
4138            // SAFETY: table.capacity() is guaranteed to be smaller than table.buckets()
4139            table.table.ctrl(0).write_bytes(0, table.capacity());
4140
4141            // Fix up the trailing control bytes. See the comments in set_ctrl
4142            // for the handling of tables smaller than the group width.
4143            if table.buckets() < Group::WIDTH {
4144                // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes,
4145                // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to
4146                // `Group::WIDTH` is safe
4147                table
4148                    .table
4149                    .ctrl(0)
4150                    .copy_to(table.table.ctrl(Group::WIDTH), table.table.buckets());
4151            } else {
4152                // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of
4153                // control bytes,so copying `Group::WIDTH` bytes with offset equal
4154                // to `self.buckets() == self.bucket_mask + 1` is safe
4155                table
4156                    .table
4157                    .ctrl(0)
4158                    .copy_to(table.table.ctrl(table.table.buckets()), Group::WIDTH);
4159            }
4160            drop(table);
4161        }
4162    }
4163
4164    /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS`
4165    /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES.
4166    #[test]
4167    fn test_catch_panic_clone_from() {
4168        use crate::alloc::{AllocError, Allocator};
4169        use ::rust_alloc::sync::Arc;
4170        use ::rust_alloc::vec::Vec;
4171        use core::sync::atomic::{AtomicI8, Ordering};
4172        use std::thread;
4173
4174        struct MyAllocInner {
4175            drop_count: Arc<AtomicI8>,
4176        }
4177
4178        #[derive(Clone)]
4179        struct MyAlloc {
4180            _inner: Arc<MyAllocInner>,
4181        }
4182
4183        impl Drop for MyAllocInner {
4184            fn drop(&mut self) {
4185                std::println!("MyAlloc freed.");
4186                self.drop_count.fetch_sub(1, Ordering::SeqCst);
4187            }
4188        }
4189
4190        unsafe impl Allocator for MyAlloc {
4191            fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
4192                let g = Global;
4193                g.allocate(layout)
4194            }
4195
4196            unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
4197                let g = Global;
4198                g.deallocate(ptr, layout)
4199            }
4200        }
4201
4202        const DISARMED: bool = false;
4203        const ARMED: bool = true;
4204
4205        struct CheckedCloneDrop {
4206            panic_in_clone: bool,
4207            dropped: bool,
4208            need_drop: Vec<u64>,
4209        }
4210
4211        impl TryClone for CheckedCloneDrop {
4212            fn try_clone(&self) -> Result<Self, Error> {
4213                if self.panic_in_clone {
4214                    panic!("panic in clone")
4215                }
4216                Ok(Self {
4217                    panic_in_clone: self.panic_in_clone,
4218                    dropped: self.dropped,
4219                    need_drop: self.need_drop.clone(),
4220                })
4221            }
4222        }
4223
4224        impl Drop for CheckedCloneDrop {
4225            fn drop(&mut self) {
4226                if self.dropped {
4227                    panic!("double drop");
4228                }
4229                self.dropped = true;
4230            }
4231        }
4232
4233        let dropped: Arc<AtomicI8> = Arc::new(AtomicI8::new(2));
4234
4235        let mut table = RawTable::new_in(MyAlloc {
4236            _inner: Arc::new(MyAllocInner {
4237                drop_count: dropped.clone(),
4238            }),
4239        });
4240
4241        for (idx, panic_in_clone) in core::iter::repeat(DISARMED).take(7).enumerate() {
4242            let idx = idx as u64;
4243            table
4244                .insert(
4245                    &mut (),
4246                    idx,
4247                    (
4248                        idx,
4249                        CheckedCloneDrop {
4250                            panic_in_clone,
4251                            dropped: false,
4252                            need_drop: ::rust_alloc::vec![idx],
4253                        },
4254                    ),
4255                    |_: &mut (), (k, _): &(u64, _)| Ok::<_, Infallible>(*k),
4256                )
4257                .abort();
4258        }
4259
4260        assert_eq!(table.len(), 7);
4261
4262        thread::scope(|s| {
4263            let result = s.spawn(|| {
4264                let armed_flags = [
4265                    DISARMED, DISARMED, ARMED, DISARMED, DISARMED, DISARMED, DISARMED,
4266                ];
4267                let mut scope_table = RawTable::new_in(MyAlloc {
4268                    _inner: Arc::new(MyAllocInner {
4269                        drop_count: dropped.clone(),
4270                    }),
4271                });
4272                for (idx, &panic_in_clone) in armed_flags.iter().enumerate() {
4273                    let idx = idx as u64;
4274                    scope_table
4275                        .insert(
4276                            &mut (),
4277                            idx,
4278                            (
4279                                idx,
4280                                CheckedCloneDrop {
4281                                    panic_in_clone,
4282                                    dropped: false,
4283                                    need_drop: ::rust_alloc::vec![idx + 100],
4284                                },
4285                            ),
4286                            |_: &mut (), (k, _): &(u64, _)| Ok::<_, Infallible>(*k),
4287                        )
4288                        .abort();
4289                }
4290                table.clone_from(&scope_table);
4291            });
4292            assert!(result.join().is_err());
4293        });
4294
4295        // Let's check that all iterators work fine and do not return elements
4296        // (especially `RawIterRange`, which does not depend on the number of
4297        // elements in the table, but looks directly at the control bytes)
4298        //
4299        // SAFETY: We know for sure that `RawTable` will outlive
4300        // the returned `RawIter / RawIterRange` iterator.
4301        assert_eq!(table.len(), 0);
4302        assert_eq!(unsafe { table.iter().count() }, 0);
4303        assert_eq!(unsafe { table.iter().iter.count() }, 0);
4304
4305        for idx in 0..table.buckets() {
4306            let idx = idx as u64;
4307            assert!(
4308                into_ok(table.find(&mut (), idx, |_: &mut (), (k, _): &(u64, _)| Ok(*k == idx)))
4309                    .is_none(),
4310                "Index: {idx}"
4311            );
4312        }
4313
4314        // All allocator clones should already be dropped.
4315        assert_eq!(dropped.load(Ordering::SeqCst), 1);
4316    }
4317}