Skip to main content

kovan_map/
hopscotch.rs

1//! Lock-Free Growing/Shrinking Hopscotch Hash Map
2//!
3//! # Features
4//!
5//! - **Robust Concurrency**: Uses Copy-on-Move displacement to prevent Use-After-Free.
6//! - **Safe Resizing**: Uses a lightweight resize lock to prevent lost updates during migration.
7//! - **Memory reclamation**: Uses Kovan.
8//! - **Clone Support**: Supports V: Clone (e.g., `Arc<T>`) instead of just Copy.
9
10extern crate alloc;
11
12use alloc::boxed::Box;
13use alloc::vec::Vec;
14use core::borrow::Borrow;
15use core::hash::{BuildHasher, Hash};
16use core::sync::atomic::{AtomicBool, AtomicU32, AtomicUsize, Ordering};
17use foldhash::fast::FixedState;
18use kovan::{Atomic, Shared, pin, retire};
19
20/// Neighborhood size (H parameter in hopscotch hashing)
21const NEIGHBORHOOD_SIZE: usize = 32;
22
23/// Initial capacity
24const INITIAL_CAPACITY: usize = 64;
25
26/// Load factor threshold for growing (75%)
27const GROW_THRESHOLD: f64 = 0.75;
28
29/// Load factor threshold for shrinking (25%)
30const SHRINK_THRESHOLD: f64 = 0.25;
31
32/// Minimum capacity to prevent excessive shrinking
33const MIN_CAPACITY: usize = 64;
34
35/// Maximum probe distance before giving up and resizing
36const MAX_PROBE_DISTANCE: usize = 512;
37
38/// A bucket in the hopscotch hash table
39struct Bucket<K, V> {
40    /// Bitmap indicating which of the next H slots contain items that hash to this bucket
41    hop_info: AtomicU32,
42    /// The actual key-value slot at this position
43    slot: Atomic<Entry<K, V>>,
44}
45
46/// An entry in the hash table
47#[derive(Clone)]
48struct Entry<K, V> {
49    hash: u64,
50    key: K,
51    value: V,
52}
53
54/// The hash table structure
55struct Table<K, V> {
56    buckets: Box<[Bucket<K, V>]>,
57    capacity: usize,
58    mask: usize,
59}
60
61impl<K, V> Table<K, V> {
62    fn new(capacity: usize) -> Self {
63        let capacity = capacity.next_power_of_two().max(MIN_CAPACITY);
64        // We add padding to the array so we don't have to check bounds constantly
65        // during neighborhood scans.
66        let mut buckets = Vec::with_capacity(capacity + NEIGHBORHOOD_SIZE);
67
68        for _ in 0..(capacity + NEIGHBORHOOD_SIZE) {
69            buckets.push(Bucket {
70                hop_info: AtomicU32::new(0),
71                slot: Atomic::null(),
72            });
73        }
74
75        Self {
76            buckets: buckets.into_boxed_slice(),
77            capacity,
78            mask: capacity - 1,
79        }
80    }
81
82    #[inline(always)]
83    fn bucket_index(&self, hash: u64) -> usize {
84        (hash as usize) & self.mask
85    }
86
87    #[inline(always)]
88    fn get_bucket(&self, idx: usize) -> &Bucket<K, V> {
89        // SAFETY: Internal indices are calculated via mask or bounded offset loops.
90        // The buckets array has padding to handle overflow up to NEIGHBORHOOD_SIZE.
91        unsafe { self.buckets.get_unchecked(idx) }
92    }
93}
94
95/// A concurrent, lock-free hash map based on Hopscotch Hashing.
96pub struct HopscotchMap<K: 'static, V: 'static, S = FixedState> {
97    table: Atomic<Table<K, V>>,
98    count: AtomicUsize,
99    /// Prevents concurrent writes during resize migration to avoid lost updates
100    resizing: AtomicBool,
101    hasher: S,
102}
103
104#[cfg(feature = "std")]
105impl<K, V> HopscotchMap<K, V, FixedState>
106where
107    K: Hash + Eq + Clone + 'static,
108    V: Clone + 'static,
109{
110    /// Creates a new `HopscotchMap` with default capacity and hasher.
111    pub fn new() -> Self {
112        Self::with_hasher(FixedState::default())
113    }
114
115    /// Creates a new `HopscotchMap` with the specified capacity and default hasher.
116    pub fn with_capacity(capacity: usize) -> Self {
117        Self::with_capacity_and_hasher(capacity, FixedState::default())
118    }
119}
120
121impl<K, V, S> HopscotchMap<K, V, S>
122where
123    K: Hash + Eq + Clone + 'static,
124    V: Clone + 'static,
125    S: BuildHasher,
126{
127    /// Creates a new `HopscotchMap` with the specified hasher and default capacity.
128    pub fn with_hasher(hasher: S) -> Self {
129        Self::with_capacity_and_hasher(INITIAL_CAPACITY, hasher)
130    }
131
132    /// Creates a new `HopscotchMap` with the specified capacity and hasher.
133    pub fn with_capacity_and_hasher(capacity: usize, hasher: S) -> Self {
134        let table = Table::new(capacity);
135        Self {
136            table: Atomic::new(Box::into_raw(Box::new(table))),
137            count: AtomicUsize::new(0),
138            resizing: AtomicBool::new(false),
139            hasher,
140        }
141    }
142
143    /// Returns the number of elements in the map.
144    pub fn len(&self) -> usize {
145        self.count.load(Ordering::Relaxed)
146    }
147
148    /// Returns `true` if the map contains no elements.
149    pub fn is_empty(&self) -> bool {
150        self.len() == 0
151    }
152
153    /// Returns the current capacity of the map.
154    pub fn capacity(&self) -> usize {
155        let guard = pin();
156        let table_ptr = self.table.load(Ordering::Acquire, &guard);
157        unsafe { (*table_ptr.as_raw()).capacity }
158    }
159
160    #[inline]
161    fn wait_for_resize(&self) {
162        while self.resizing.load(Ordering::Acquire) {
163            core::hint::spin_loop();
164        }
165    }
166
167    /// Returns the value corresponding to the key.
168    pub fn get<Q>(&self, key: &Q) -> Option<V>
169    where
170        K: Borrow<Q>,
171        Q: Hash + Eq + ?Sized,
172    {
173        let hash = self.hasher.hash_one(key);
174        let guard = pin();
175        let table_ptr = self.table.load(Ordering::Acquire, &guard);
176        let table = unsafe { &*table_ptr.as_raw() };
177
178        let bucket_idx = table.bucket_index(hash);
179        let bucket = table.get_bucket(bucket_idx);
180
181        let hop_info = bucket.hop_info.load(Ordering::Acquire);
182
183        if hop_info == 0 {
184            return None;
185        }
186
187        for offset in 0..NEIGHBORHOOD_SIZE {
188            if hop_info & (1 << offset) != 0 {
189                let slot_idx = bucket_idx + offset;
190                let slot_bucket = table.get_bucket(slot_idx);
191                let entry_ptr = slot_bucket.slot.load(Ordering::Acquire, &guard);
192
193                if !entry_ptr.is_null() {
194                    let entry = unsafe { &*entry_ptr.as_raw() };
195                    if entry.hash == hash && entry.key.borrow() == key {
196                        return Some(entry.value.clone());
197                    }
198                }
199            }
200        }
201
202        None
203    }
204
205    /// Inserts a key-value pair into the map.
206    pub fn insert(&self, key: K, value: V) -> Option<V> {
207        self.insert_impl(key, value, false)
208    }
209
210    /// Helper for get_or_insert logic.
211    fn insert_impl(&self, key: K, value: V, only_if_absent: bool) -> Option<V> {
212        let hash = self.hasher.hash_one(&key);
213
214        loop {
215            self.wait_for_resize();
216
217            let guard = pin();
218            let table_ptr = self.table.load(Ordering::Acquire, &guard);
219            let table = unsafe { &*table_ptr.as_raw() };
220
221            if self.resizing.load(Ordering::Acquire) {
222                continue;
223            }
224
225            // Note: We pass clones to try_insert if we loop here, but try_insert consumes them.
226            // Since `insert_impl` owns `key` and `value`, we must clone them for the call
227            // because `try_insert` might return `Retry` (looping again).
228            match self.try_insert(
229                table,
230                hash,
231                key.clone(),
232                value.clone(),
233                only_if_absent,
234                &guard,
235            ) {
236                InsertResult::Success(old_val) => {
237                    // CRITICAL FIX: If a resize started while we were inserting, our update
238                    // might have been missed by the migration.
239                    // We must retry to ensure we write to the new table.
240                    // We check BOTH the resizing flag (active resize) AND the table pointer (completed resize).
241                    if self.resizing.load(Ordering::SeqCst)
242                        || self.table.load(Ordering::SeqCst, &guard) != table_ptr
243                    {
244                        continue;
245                    }
246
247                    if old_val.is_none() {
248                        let new_count = self.count.fetch_add(1, Ordering::Relaxed) + 1;
249                        let current_capacity = table.capacity;
250                        let load_factor = new_count as f64 / current_capacity as f64;
251
252                        if load_factor > GROW_THRESHOLD {
253                            drop(guard);
254                            self.try_resize(current_capacity * 2);
255                        }
256                    }
257                    return old_val;
258                }
259                InsertResult::Exists(existing_val) => {
260                    return Some(existing_val);
261                }
262                InsertResult::NeedResize => {
263                    let current_capacity = table.capacity;
264                    drop(guard);
265                    self.try_resize(current_capacity * 2);
266                    continue;
267                }
268                InsertResult::Retry => {
269                    continue;
270                }
271            }
272        }
273    }
274
275    /// Returns the value corresponding to the key, or inserts the given value if the key is not present.
276    pub fn get_or_insert(&self, key: K, value: V) -> V {
277        match self.insert_impl(key, value.clone(), true) {
278            Some(existing) => existing,
279            None => value,
280        }
281    }
282
283    /// Insert a key-value pair only if the key does not exist.
284    /// Returns `None` if inserted, `Some(existing_value)` if the key already exists.
285    pub fn insert_if_absent(&self, key: K, value: V) -> Option<V> {
286        self.insert_impl(key, value, true)
287    }
288
289    /// Removes a key from the map, returning the value at the key if the key was previously in the map.
290    pub fn remove<Q>(&self, key: &Q) -> Option<V>
291    where
292        K: Borrow<Q>,
293        Q: Hash + Eq + ?Sized,
294    {
295        let hash = self.hasher.hash_one(key);
296
297        loop {
298            self.wait_for_resize();
299
300            let guard = pin();
301            let table_ptr = self.table.load(Ordering::Acquire, &guard);
302            let table = unsafe { &*table_ptr.as_raw() };
303
304            if self.resizing.load(Ordering::Acquire) {
305                continue;
306            }
307
308            let bucket_idx = table.bucket_index(hash);
309            let bucket = table.get_bucket(bucket_idx);
310
311            let hop_info = bucket.hop_info.load(Ordering::Acquire);
312            if hop_info == 0 {
313                return None;
314            }
315
316            for offset in 0..NEIGHBORHOOD_SIZE {
317                if hop_info & (1 << offset) != 0 {
318                    let slot_idx = bucket_idx + offset;
319                    let slot_bucket = table.get_bucket(slot_idx);
320                    let entry_ptr = slot_bucket.slot.load(Ordering::Acquire, &guard);
321
322                    if !entry_ptr.is_null() {
323                        let entry = unsafe { &*entry_ptr.as_raw() };
324                        if entry.hash == hash && entry.key.borrow() == key {
325                            let old_value = entry.value.clone();
326
327                            match slot_bucket.slot.compare_exchange(
328                                entry_ptr,
329                                unsafe { Shared::from_raw(core::ptr::null_mut()) },
330                                Ordering::Release,
331                                Ordering::Relaxed,
332                                &guard,
333                            ) {
334                                Ok(_) => {
335                                    let mask = !(1u32 << offset);
336                                    bucket.hop_info.fetch_and(mask, Ordering::Release);
337
338                                    retire(entry_ptr.as_raw());
339
340                                    let new_count = self.count.fetch_sub(1, Ordering::Relaxed) - 1;
341                                    let current_capacity = table.capacity;
342                                    let load_factor = new_count as f64 / current_capacity as f64;
343
344                                    if load_factor < SHRINK_THRESHOLD
345                                        && current_capacity > MIN_CAPACITY
346                                    {
347                                        drop(guard);
348                                        self.try_resize(current_capacity / 2);
349                                    }
350
351                                    return Some(old_value);
352                                }
353                                Err(_) => {
354                                    break;
355                                }
356                            }
357                        }
358                    }
359                }
360            }
361            return None;
362        }
363    }
364
365    /// Clears the map, removing all key-value pairs.
366    pub fn clear(&self) {
367        while self
368            .resizing
369            .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
370            .is_err()
371        {
372            core::hint::spin_loop();
373        }
374
375        let guard = pin();
376        let table_ptr = self.table.load(Ordering::Acquire, &guard);
377        let table = unsafe { &*table_ptr.as_raw() };
378
379        for i in 0..(table.capacity + NEIGHBORHOOD_SIZE) {
380            let bucket = table.get_bucket(i);
381            let entry_ptr = bucket.slot.load(Ordering::Acquire, &guard);
382
383            if !entry_ptr.is_null()
384                && bucket
385                    .slot
386                    .compare_exchange(
387                        entry_ptr,
388                        unsafe { Shared::from_raw(core::ptr::null_mut()) },
389                        Ordering::Release,
390                        Ordering::Relaxed,
391                        &guard,
392                    )
393                    .is_ok()
394            {
395                retire(entry_ptr.as_raw());
396            }
397
398            if i < table.capacity {
399                let b = table.get_bucket(i);
400                b.hop_info.store(0, Ordering::Release);
401            }
402        }
403
404        self.count.store(0, Ordering::Release);
405        self.resizing.store(false, Ordering::Release);
406    }
407
408    fn try_insert(
409        &self,
410        table: &Table<K, V>,
411        hash: u64,
412        key: K,
413        value: V,
414        only_if_absent: bool,
415        guard: &kovan::Guard,
416    ) -> InsertResult<V> {
417        let bucket_idx = table.bucket_index(hash);
418        let bucket = table.get_bucket(bucket_idx);
419
420        // 1. Check if key exists (Update)
421        let hop_info = bucket.hop_info.load(Ordering::Acquire);
422        for offset in 0..NEIGHBORHOOD_SIZE {
423            if hop_info & (1 << offset) != 0 {
424                let slot_idx = bucket_idx + offset;
425                let slot_bucket = table.get_bucket(slot_idx);
426                let entry_ptr = slot_bucket.slot.load(Ordering::Acquire, guard);
427
428                if !entry_ptr.is_null() {
429                    let entry = unsafe { &*entry_ptr.as_raw() };
430                    if entry.hash == hash && entry.key == key {
431                        if only_if_absent {
432                            return InsertResult::Exists(entry.value.clone());
433                        }
434
435                        let old_value = entry.value.clone();
436                        // Clone key and value because if CAS fails, we retry, and we are inside a loop.
437                        // We cannot move out of `key` or `value` inside a loop.
438                        let new_entry = Box::into_raw(Box::new(Entry {
439                            hash,
440                            key: key.clone(),
441                            value: value.clone(),
442                        }));
443
444                        match slot_bucket.slot.compare_exchange(
445                            entry_ptr,
446                            unsafe { Shared::from_raw(new_entry) },
447                            Ordering::Release,
448                            Ordering::Relaxed,
449                            guard,
450                        ) {
451                            Ok(_) => {
452                                retire(entry_ptr.as_raw());
453                                return InsertResult::Success(Some(old_value));
454                            }
455                            Err(_) => {
456                                drop(unsafe { Box::from_raw(new_entry) });
457                                return InsertResult::Retry;
458                            }
459                        }
460                    }
461                }
462            }
463        }
464
465        // 2. Find empty slot
466        for offset in 0..NEIGHBORHOOD_SIZE {
467            let slot_idx = bucket_idx + offset;
468            if slot_idx >= table.capacity + NEIGHBORHOOD_SIZE {
469                return InsertResult::NeedResize;
470            }
471
472            let slot_bucket = table.get_bucket(slot_idx);
473            let entry_ptr = slot_bucket.slot.load(Ordering::Acquire, guard);
474
475            if entry_ptr.is_null() {
476                // Clone key and value. If CAS fails, we continue the loop, so we need the originals
477                // for the next iteration.
478                let new_entry = Box::into_raw(Box::new(Entry {
479                    hash,
480                    key: key.clone(),
481                    value: value.clone(),
482                }));
483
484                match slot_bucket.slot.compare_exchange(
485                    unsafe { Shared::from_raw(core::ptr::null_mut()) },
486                    unsafe { Shared::from_raw(new_entry) },
487                    Ordering::Release,
488                    Ordering::Relaxed,
489                    guard,
490                ) {
491                    Ok(_) => {
492                        bucket.hop_info.fetch_or(1u32 << offset, Ordering::Release);
493                        return InsertResult::Success(None);
494                    }
495                    Err(_) => {
496                        drop(unsafe { Box::from_raw(new_entry) });
497                        continue;
498                    }
499                }
500            }
501        }
502
503        // 3. Try displacement
504        match self.try_find_closer_slot(table, bucket_idx, guard) {
505            Some(final_offset) if final_offset < NEIGHBORHOOD_SIZE => {
506                let slot_idx = bucket_idx + final_offset;
507                let slot_bucket = table.get_bucket(slot_idx);
508
509                let curr = slot_bucket.slot.load(Ordering::Relaxed, guard);
510                if !curr.is_null() {
511                    return InsertResult::Retry;
512                }
513
514                // This is the final attempt in this function. We can move key/value here
515                // because previous usages were clones.
516                let new_entry = Box::into_raw(Box::new(Entry { hash, key, value }));
517
518                match slot_bucket.slot.compare_exchange(
519                    unsafe { Shared::from_raw(core::ptr::null_mut()) },
520                    unsafe { Shared::from_raw(new_entry) },
521                    Ordering::Release,
522                    Ordering::Relaxed,
523                    guard,
524                ) {
525                    Ok(_) => {
526                        bucket
527                            .hop_info
528                            .fetch_or(1u32 << final_offset, Ordering::Release);
529                        InsertResult::Success(None)
530                    }
531                    Err(_) => {
532                        drop(unsafe { Box::from_raw(new_entry) });
533                        InsertResult::Retry
534                    }
535                }
536            }
537            _ => InsertResult::NeedResize,
538        }
539    }
540
541    fn try_find_closer_slot(
542        &self,
543        table: &Table<K, V>,
544        bucket_idx: usize,
545        guard: &kovan::Guard,
546    ) -> Option<usize> {
547        for probe_offset in NEIGHBORHOOD_SIZE..MAX_PROBE_DISTANCE {
548            let probe_idx = bucket_idx + probe_offset;
549            if probe_idx >= table.capacity + NEIGHBORHOOD_SIZE {
550                return None;
551            }
552
553            let probe_bucket = table.get_bucket(probe_idx);
554            let entry_ptr = probe_bucket.slot.load(Ordering::Acquire, guard);
555
556            if entry_ptr.is_null() {
557                return self.try_move_closer(table, bucket_idx, probe_idx, guard);
558            }
559        }
560        None
561    }
562
563    fn try_move_closer(
564        &self,
565        table: &Table<K, V>,
566        target_idx: usize,
567        empty_idx: usize,
568        guard: &kovan::Guard,
569    ) -> Option<usize> {
570        let mut current_empty = empty_idx;
571
572        while current_empty > target_idx + NEIGHBORHOOD_SIZE - 1 {
573            let mut moved = false;
574
575            for offset in 1..NEIGHBORHOOD_SIZE.min(current_empty - target_idx) {
576                let candidate_idx = current_empty - offset;
577                let candidate_bucket = table.get_bucket(candidate_idx);
578                let entry_ptr = candidate_bucket.slot.load(Ordering::Acquire, guard);
579
580                if !entry_ptr.is_null() {
581                    let entry = unsafe { &*entry_ptr.as_raw() };
582                    let entry_home = table.bucket_index(entry.hash);
583
584                    if entry_home <= candidate_idx && current_empty < entry_home + NEIGHBORHOOD_SIZE
585                    {
586                        // Copy-on-Move for safety
587                        let new_entry = Box::into_raw(Box::new(entry.clone()));
588                        let empty_bucket = table.get_bucket(current_empty);
589
590                        match empty_bucket.slot.compare_exchange(
591                            unsafe { Shared::from_raw(core::ptr::null_mut()) },
592                            unsafe { Shared::from_raw(new_entry) },
593                            Ordering::Release,
594                            Ordering::Relaxed,
595                            guard,
596                        ) {
597                            Ok(_) => {
598                                match candidate_bucket.slot.compare_exchange(
599                                    entry_ptr,
600                                    unsafe { Shared::from_raw(core::ptr::null_mut()) },
601                                    Ordering::Release,
602                                    Ordering::Relaxed,
603                                    guard,
604                                ) {
605                                    Ok(_) => {
606                                        let old_offset = candidate_idx - entry_home;
607                                        let new_offset = current_empty - entry_home;
608
609                                        let home_bucket = table.get_bucket(entry_home);
610                                        home_bucket
611                                            .hop_info
612                                            .fetch_and(!(1u32 << old_offset), Ordering::Release);
613                                        home_bucket
614                                            .hop_info
615                                            .fetch_or(1u32 << new_offset, Ordering::Release);
616
617                                        retire(entry_ptr.as_raw());
618                                        current_empty = candidate_idx;
619                                        moved = true;
620                                        break;
621                                    }
622                                    Err(_) => {
623                                        let _ = empty_bucket.slot.compare_exchange(
624                                            unsafe { Shared::from_raw(new_entry) },
625                                            unsafe { Shared::from_raw(core::ptr::null_mut()) },
626                                            Ordering::Release,
627                                            Ordering::Relaxed,
628                                            guard,
629                                        );
630                                        unsafe { drop(Box::from_raw(new_entry)) };
631                                        continue;
632                                    }
633                                }
634                            }
635                            Err(_) => {
636                                unsafe { drop(Box::from_raw(new_entry)) };
637                                continue;
638                            }
639                        }
640                    }
641                }
642            }
643
644            if !moved {
645                return None;
646            }
647        }
648
649        if current_empty >= target_idx && current_empty < target_idx + NEIGHBORHOOD_SIZE {
650            Some(current_empty - target_idx)
651        } else {
652            None
653        }
654    }
655
656    fn insert_into_new_table(
657        &self,
658        table: &Table<K, V>,
659        hash: u64,
660        key: K,
661        value: V,
662        guard: &kovan::Guard,
663    ) -> bool {
664        let bucket_idx = table.bucket_index(hash);
665
666        for probe_offset in 0..(table.capacity + NEIGHBORHOOD_SIZE) {
667            let probe_idx = bucket_idx + probe_offset;
668            if probe_idx >= table.capacity + NEIGHBORHOOD_SIZE {
669                break;
670            }
671
672            let probe_bucket = table.get_bucket(probe_idx);
673            let slot_ptr = probe_bucket.slot.load(Ordering::Relaxed, guard);
674
675            if slot_ptr.is_null() {
676                let offset_from_home = probe_idx - bucket_idx;
677
678                if offset_from_home < NEIGHBORHOOD_SIZE {
679                    let new_entry = Box::into_raw(Box::new(Entry { hash, key, value }));
680                    probe_bucket
681                        .slot
682                        .store(unsafe { Shared::from_raw(new_entry) }, Ordering::Release);
683
684                    let bucket = table.get_bucket(bucket_idx);
685                    let mut hop = bucket.hop_info.load(Ordering::Relaxed);
686                    hop |= 1u32 << offset_from_home;
687                    bucket.hop_info.store(hop, Ordering::Relaxed);
688                    return true;
689                } else {
690                    return false;
691                }
692            }
693        }
694        false
695    }
696
697    fn try_resize(&self, new_capacity: usize) {
698        if self
699            .resizing
700            .compare_exchange(false, true, Ordering::SeqCst, Ordering::Relaxed)
701            .is_err()
702        {
703            return;
704        }
705
706        let new_capacity = new_capacity.next_power_of_two().max(MIN_CAPACITY);
707        let guard = pin();
708        let old_table_ptr = self.table.load(Ordering::Acquire, &guard);
709        let old_table = unsafe { &*old_table_ptr.as_raw() };
710
711        if old_table.capacity == new_capacity {
712            self.resizing.store(false, Ordering::Release);
713            return;
714        }
715
716        let new_table = Box::into_raw(Box::new(Table::new(new_capacity)));
717        let new_table_ref = unsafe { &*new_table };
718
719        let mut success = true;
720
721        for i in 0..(old_table.capacity + NEIGHBORHOOD_SIZE) {
722            let bucket = old_table.get_bucket(i);
723            let entry_ptr = bucket.slot.load(Ordering::Acquire, &guard);
724
725            if !entry_ptr.is_null() {
726                let entry = unsafe { &*entry_ptr.as_raw() };
727                if !self.insert_into_new_table(
728                    new_table_ref,
729                    entry.hash,
730                    entry.key.clone(),
731                    entry.value.clone(),
732                    &guard,
733                ) {
734                    success = false;
735                    break;
736                }
737            }
738        }
739
740        if success {
741            match self.table.compare_exchange(
742                old_table_ptr,
743                unsafe { Shared::from_raw(new_table) },
744                Ordering::Release,
745                Ordering::Relaxed,
746                &guard,
747            ) {
748                Ok(_) => {
749                    retire(old_table_ptr.as_raw());
750                }
751                Err(_) => {
752                    success = false;
753                }
754            }
755        }
756
757        if !success {
758            for i in 0..(new_table_ref.capacity + NEIGHBORHOOD_SIZE) {
759                let bucket = new_table_ref.get_bucket(i);
760                let entry_ptr = bucket.slot.load(Ordering::Relaxed, &guard);
761                if !entry_ptr.is_null() {
762                    unsafe {
763                        drop(Box::from_raw(entry_ptr.as_raw()));
764                    }
765                }
766            }
767            unsafe {
768                drop(Box::from_raw(new_table));
769            }
770        }
771
772        self.resizing.store(false, Ordering::Release);
773    }
774    /// Returns an iterator over the map entries.
775    pub fn iter(&self) -> HopscotchIter<'_, K, V, S> {
776        let guard = pin();
777        let table_ptr = self.table.load(Ordering::Acquire, &guard);
778        let _table = unsafe { &*table_ptr.as_raw() };
779        HopscotchIter {
780            map: self,
781            bucket_idx: 0,
782            guard,
783        }
784    }
785
786    /// Returns an iterator over the map keys.
787    pub fn keys(&self) -> HopscotchKeys<'_, K, V, S> {
788        HopscotchKeys { iter: self.iter() }
789    }
790
791    /// Get the underlying hasher.
792    pub fn hasher(&self) -> &S {
793        &self.hasher
794    }
795}
796
797/// Iterator over HopscotchMap entries.
798pub struct HopscotchIter<'a, K: 'static, V: 'static, S> {
799    map: &'a HopscotchMap<K, V, S>,
800    bucket_idx: usize,
801    guard: kovan::Guard,
802}
803
804impl<'a, K, V, S> Iterator for HopscotchIter<'a, K, V, S>
805where
806    K: Clone,
807    V: Clone,
808{
809    type Item = (K, V);
810
811    fn next(&mut self) -> Option<Self::Item> {
812        let table_ptr = self.map.table.load(Ordering::Acquire, &self.guard);
813        let table = unsafe { &*table_ptr.as_raw() };
814
815        while self.bucket_idx < table.buckets.len() {
816            let bucket = table.get_bucket(self.bucket_idx);
817            self.bucket_idx += 1;
818
819            let entry_ptr = bucket.slot.load(Ordering::Acquire, &self.guard);
820            if !entry_ptr.is_null() {
821                let entry = unsafe { &*entry_ptr.as_raw() };
822                return Some((entry.key.clone(), entry.value.clone()));
823            }
824        }
825        None
826    }
827}
828
829/// Iterator over HopscotchMap keys.
830pub struct HopscotchKeys<'a, K: 'static, V: 'static, S> {
831    iter: HopscotchIter<'a, K, V, S>,
832}
833
834impl<'a, K, V, S> Iterator for HopscotchKeys<'a, K, V, S>
835where
836    K: Clone,
837    V: Clone,
838{
839    type Item = K;
840
841    fn next(&mut self) -> Option<Self::Item> {
842        self.iter.next().map(|(k, _)| k)
843    }
844}
845
846impl<'a, K, V, S> IntoIterator for &'a HopscotchMap<K, V, S>
847where
848    K: Hash + Eq + Clone + 'static,
849    V: Clone + 'static,
850    S: BuildHasher,
851{
852    type Item = (K, V);
853    type IntoIter = HopscotchIter<'a, K, V, S>;
854
855    fn into_iter(self) -> Self::IntoIter {
856        self.iter()
857    }
858}
859
860enum InsertResult<V> {
861    Success(Option<V>),
862    Exists(V),
863    NeedResize,
864    Retry,
865}
866
867#[cfg(feature = "std")]
868impl<K, V> Default for HopscotchMap<K, V, FixedState>
869where
870    K: Hash + Eq + Clone + 'static,
871    V: Clone + 'static,
872{
873    fn default() -> Self {
874        Self::new()
875    }
876}
877
878unsafe impl<K: Send, V: Send, S: Send> Send for HopscotchMap<K, V, S> {}
879unsafe impl<K: Sync, V: Sync, S: Sync> Sync for HopscotchMap<K, V, S> {}
880
881impl<K, V, S> Drop for HopscotchMap<K, V, S> {
882    fn drop(&mut self) {
883        // SAFETY: `drop(&mut self)` guarantees exclusive ownership — no concurrent
884        // readers can exist. Free nodes immediately instead of deferring via `retire()`.
885        let guard = pin();
886        let table_ptr = self.table.load(Ordering::Acquire, &guard);
887        let table = unsafe { &*table_ptr.as_raw() };
888
889        for i in 0..(table.capacity + NEIGHBORHOOD_SIZE) {
890            let bucket = table.get_bucket(i);
891            let entry_ptr = bucket.slot.load(Ordering::Acquire, &guard);
892
893            if !entry_ptr.is_null() {
894                unsafe {
895                    drop(Box::from_raw(entry_ptr.as_raw()));
896                }
897            }
898        }
899
900        unsafe {
901            drop(Box::from_raw(table_ptr.as_raw()));
902        }
903    }
904}
905
906#[cfg(test)]
907mod tests {
908    use super::*;
909
910    #[test]
911    fn test_insert_and_get() {
912        let map = HopscotchMap::new();
913        assert_eq!(map.insert(1, 100), None);
914        assert_eq!(map.get(&1), Some(100));
915        assert_eq!(map.get(&2), None);
916    }
917
918    #[test]
919    fn test_growing() {
920        let map = HopscotchMap::with_capacity(32);
921        for i in 0..100 {
922            map.insert(i, i * 2);
923        }
924        for i in 0..100 {
925            assert_eq!(map.get(&i), Some(i * 2));
926        }
927    }
928
929    #[test]
930    fn test_concurrent() {
931        use alloc::sync::Arc;
932        extern crate std;
933        use std::thread;
934
935        let map = Arc::new(HopscotchMap::with_capacity(64));
936        let mut handles = alloc::vec::Vec::new();
937
938        for thread_id in 0..4 {
939            let map_clone = Arc::clone(&map);
940            let handle = thread::spawn(move || {
941                for i in 0..1000 {
942                    let key = thread_id * 1000 + i;
943                    map_clone.insert(key, key * 2);
944                }
945            });
946            handles.push(handle);
947        }
948
949        for handle in handles {
950            handle.join().unwrap();
951        }
952
953        for thread_id in 0..4 {
954            for i in 0..1000 {
955                let key = thread_id * 1000 + i;
956                assert_eq!(map.get(&key), Some(key * 2));
957            }
958        }
959    }
960}