Skip to main content

kovan_map/
hopscotch.rs

1//! Lock-Free Growing/Shrinking Hopscotch Hash Map
2//!
3//! # Features
4//!
5//! - **Robust Concurrency**: Uses Copy-on-Move displacement to prevent Use-After-Free.
6//! - **Safe Resizing**: Uses a lightweight resize lock to prevent lost updates during migration.
7//! - **Memory reclamation**: Uses Kovan.
8//! - **Clone Support**: Supports V: Clone (e.g., `Arc<T>`) instead of just Copy.
9
10extern crate alloc;
11
12use alloc::boxed::Box;
13use alloc::vec::Vec;
14use core::borrow::Borrow;
15use core::hash::{BuildHasher, Hash};
16use core::sync::atomic::{AtomicBool, AtomicU32, AtomicUsize, Ordering};
17use foldhash::fast::FixedState;
18use kovan::{Atomic, RetiredNode, Shared, pin, retire};
19
20/// Neighborhood size (H parameter in hopscotch hashing)
21const NEIGHBORHOOD_SIZE: usize = 32;
22
23/// Initial capacity
24const INITIAL_CAPACITY: usize = 64;
25
26/// Load factor threshold for growing (75%)
27const GROW_THRESHOLD: f64 = 0.75;
28
29/// Load factor threshold for shrinking (25%)
30const SHRINK_THRESHOLD: f64 = 0.25;
31
32/// Minimum capacity to prevent excessive shrinking
33const MIN_CAPACITY: usize = 64;
34
35/// Maximum probe distance before giving up and resizing
36const MAX_PROBE_DISTANCE: usize = 512;
37
38/// A bucket in the hopscotch hash table
39struct Bucket<K, V> {
40    /// Bitmap indicating which of the next H slots contain items that hash to this bucket
41    hop_info: AtomicU32,
42    /// The actual key-value slot at this position
43    slot: Atomic<Entry<K, V>>,
44}
45
46/// An entry in the hash table
47#[repr(C)]
48struct Entry<K, V> {
49    retired: RetiredNode,
50    hash: u64,
51    key: K,
52    value: V,
53}
54
55impl<K: Clone, V: Clone> Clone for Entry<K, V> {
56    fn clone(&self) -> Self {
57        Self {
58            retired: RetiredNode::new(),
59            hash: self.hash,
60            key: self.key.clone(),
61            value: self.value.clone(),
62        }
63    }
64}
65
66/// The hash table structure
67#[repr(C)]
68struct Table<K, V> {
69    retired: RetiredNode,
70    buckets: Box<[Bucket<K, V>]>,
71    capacity: usize,
72    mask: usize,
73}
74
75impl<K, V> Table<K, V> {
76    fn new(capacity: usize) -> Self {
77        let capacity = capacity.next_power_of_two().max(MIN_CAPACITY);
78        // We add padding to the array so we don't have to check bounds constantly
79        // during neighborhood scans.
80        let mut buckets = Vec::with_capacity(capacity + NEIGHBORHOOD_SIZE);
81
82        for _ in 0..(capacity + NEIGHBORHOOD_SIZE) {
83            buckets.push(Bucket {
84                hop_info: AtomicU32::new(0),
85                slot: Atomic::null(),
86            });
87        }
88
89        Self {
90            retired: RetiredNode::new(),
91            buckets: buckets.into_boxed_slice(),
92            capacity,
93            mask: capacity - 1,
94        }
95    }
96
97    #[inline(always)]
98    fn bucket_index(&self, hash: u64) -> usize {
99        (hash as usize) & self.mask
100    }
101
102    #[inline(always)]
103    fn get_bucket(&self, idx: usize) -> &Bucket<K, V> {
104        // SAFETY: Internal indices are calculated via mask or bounded offset loops.
105        // The buckets array has padding to handle overflow up to NEIGHBORHOOD_SIZE.
106        unsafe { self.buckets.get_unchecked(idx) }
107    }
108}
109
110/// A concurrent, lock-free hash map based on Hopscotch Hashing.
111pub struct HopscotchMap<K: 'static, V: 'static, S = FixedState> {
112    table: Atomic<Table<K, V>>,
113    count: AtomicUsize,
114    /// Prevents concurrent writes during resize migration to avoid lost updates
115    resizing: AtomicBool,
116    hasher: S,
117}
118
119#[cfg(feature = "std")]
120impl<K, V> HopscotchMap<K, V, FixedState>
121where
122    K: Hash + Eq + Clone + 'static,
123    V: Clone + 'static,
124{
125    /// Creates a new `HopscotchMap` with default capacity and hasher.
126    pub fn new() -> Self {
127        Self::with_hasher(FixedState::default())
128    }
129
130    /// Creates a new `HopscotchMap` with the specified capacity and default hasher.
131    pub fn with_capacity(capacity: usize) -> Self {
132        Self::with_capacity_and_hasher(capacity, FixedState::default())
133    }
134}
135
136impl<K, V, S> HopscotchMap<K, V, S>
137where
138    K: Hash + Eq + Clone + 'static,
139    V: Clone + 'static,
140    S: BuildHasher,
141{
142    /// Creates a new `HopscotchMap` with the specified hasher and default capacity.
143    pub fn with_hasher(hasher: S) -> Self {
144        Self::with_capacity_and_hasher(INITIAL_CAPACITY, hasher)
145    }
146
147    /// Creates a new `HopscotchMap` with the specified capacity and hasher.
148    pub fn with_capacity_and_hasher(capacity: usize, hasher: S) -> Self {
149        let table = Table::new(capacity);
150        Self {
151            table: Atomic::new(Box::into_raw(Box::new(table))),
152            count: AtomicUsize::new(0),
153            resizing: AtomicBool::new(false),
154            hasher,
155        }
156    }
157
158    /// Returns the number of elements in the map.
159    pub fn len(&self) -> usize {
160        self.count.load(Ordering::Relaxed)
161    }
162
163    /// Returns `true` if the map contains no elements.
164    pub fn is_empty(&self) -> bool {
165        self.len() == 0
166    }
167
168    /// Returns the current capacity of the map.
169    pub fn capacity(&self) -> usize {
170        let guard = pin();
171        let table_ptr = self.table.load(Ordering::Acquire, &guard);
172        unsafe { (*table_ptr.as_raw()).capacity }
173    }
174
175    #[inline]
176    fn wait_for_resize(&self) {
177        while self.resizing.load(Ordering::Acquire) {
178            core::hint::spin_loop();
179        }
180    }
181
182    /// Returns the value corresponding to the key.
183    pub fn get<Q>(&self, key: &Q) -> Option<V>
184    where
185        K: Borrow<Q>,
186        Q: Hash + Eq + ?Sized,
187    {
188        let hash = self.hasher.hash_one(key);
189        let guard = pin();
190        let table_ptr = self.table.load(Ordering::Acquire, &guard);
191        let table = unsafe { &*table_ptr.as_raw() };
192
193        let bucket_idx = table.bucket_index(hash);
194        let bucket = table.get_bucket(bucket_idx);
195
196        let hop_info = bucket.hop_info.load(Ordering::Acquire);
197
198        if hop_info == 0 {
199            return None;
200        }
201
202        for offset in 0..NEIGHBORHOOD_SIZE {
203            if hop_info & (1 << offset) != 0 {
204                let slot_idx = bucket_idx + offset;
205                let slot_bucket = table.get_bucket(slot_idx);
206                let entry_ptr = slot_bucket.slot.load(Ordering::Acquire, &guard);
207
208                if !entry_ptr.is_null() {
209                    let entry = unsafe { &*entry_ptr.as_raw() };
210                    if entry.hash == hash && entry.key.borrow() == key {
211                        return Some(entry.value.clone());
212                    }
213                }
214            }
215        }
216
217        None
218    }
219
220    /// Inserts a key-value pair into the map.
221    pub fn insert(&self, key: K, value: V) -> Option<V> {
222        self.insert_impl(key, value, false)
223    }
224
225    /// Helper for get_or_insert logic.
226    fn insert_impl(&self, key: K, value: V, only_if_absent: bool) -> Option<V> {
227        let hash = self.hasher.hash_one(&key);
228
229        loop {
230            self.wait_for_resize();
231
232            let guard = pin();
233            let table_ptr = self.table.load(Ordering::Acquire, &guard);
234            let table = unsafe { &*table_ptr.as_raw() };
235
236            if self.resizing.load(Ordering::Acquire) {
237                continue;
238            }
239
240            // Note: We pass clones to try_insert if we loop here, but try_insert consumes them.
241            // Since `insert_impl` owns `key` and `value`, we must clone them for the call
242            // because `try_insert` might return `Retry` (looping again).
243            match self.try_insert(
244                table,
245                hash,
246                key.clone(),
247                value.clone(),
248                only_if_absent,
249                &guard,
250            ) {
251                InsertResult::Success(old_val) => {
252                    // CRITICAL FIX: If a resize started while we were inserting, our update
253                    // might have been missed by the migration.
254                    // We must retry to ensure we write to the new table.
255                    // We check BOTH the resizing flag (active resize) AND the table pointer (completed resize).
256                    if self.resizing.load(Ordering::SeqCst)
257                        || self.table.load(Ordering::SeqCst, &guard) != table_ptr
258                    {
259                        continue;
260                    }
261
262                    if old_val.is_none() {
263                        let new_count = self.count.fetch_add(1, Ordering::Relaxed) + 1;
264                        let current_capacity = table.capacity;
265                        let load_factor = new_count as f64 / current_capacity as f64;
266
267                        if load_factor > GROW_THRESHOLD {
268                            drop(guard);
269                            self.try_resize(current_capacity * 2);
270                        }
271                    }
272                    return old_val;
273                }
274                InsertResult::Exists(existing_val) => {
275                    return Some(existing_val);
276                }
277                InsertResult::NeedResize => {
278                    let current_capacity = table.capacity;
279                    drop(guard);
280                    self.try_resize(current_capacity * 2);
281                    continue;
282                }
283                InsertResult::Retry => {
284                    continue;
285                }
286            }
287        }
288    }
289
290    /// Returns the value corresponding to the key, or inserts the given value if the key is not present.
291    ///
292    /// When multiple threads call this concurrently for the same key, all callers
293    /// are guaranteed to receive the same value (the one visible in the map).
294    pub fn get_or_insert(&self, key: K, value: V) -> V {
295        // Fast path: key already exists — no clone, no insert.
296        if let Some(v) = self.get(&key) {
297            return v;
298        }
299        // Slow path: try to insert, then read back for concurrent consistency.
300        // The hop_info update is non-atomic with the slot CAS, so two threads can
301        // both "win" the insert. Reading back ensures all callers agree on one value.
302        //
303        // Note: `expect` has zero overhead vs `unwrap` on the success path — the
304        // static string literal is only materialized in the panic (unreachable) path.
305        let _ = self.insert_impl(key.clone(), value, true);
306        self.get(&key).expect("key was just inserted")
307    }
308
309    /// Insert a key-value pair only if the key does not exist.
310    /// Returns `None` if inserted, `Some(existing_value)` if the key already exists.
311    pub fn insert_if_absent(&self, key: K, value: V) -> Option<V> {
312        self.insert_impl(key, value, true)
313    }
314
315    /// Removes a key from the map, returning the value at the key if the key was previously in the map.
316    pub fn remove<Q>(&self, key: &Q) -> Option<V>
317    where
318        K: Borrow<Q>,
319        Q: Hash + Eq + ?Sized,
320    {
321        let hash = self.hasher.hash_one(key);
322
323        loop {
324            self.wait_for_resize();
325
326            let guard = pin();
327            let table_ptr = self.table.load(Ordering::Acquire, &guard);
328            let table = unsafe { &*table_ptr.as_raw() };
329
330            if self.resizing.load(Ordering::Acquire) {
331                continue;
332            }
333
334            let bucket_idx = table.bucket_index(hash);
335            let bucket = table.get_bucket(bucket_idx);
336
337            let hop_info = bucket.hop_info.load(Ordering::Acquire);
338            if hop_info == 0 {
339                return None;
340            }
341
342            for offset in 0..NEIGHBORHOOD_SIZE {
343                if hop_info & (1 << offset) != 0 {
344                    let slot_idx = bucket_idx + offset;
345                    let slot_bucket = table.get_bucket(slot_idx);
346                    let entry_ptr = slot_bucket.slot.load(Ordering::Acquire, &guard);
347
348                    if !entry_ptr.is_null() {
349                        let entry = unsafe { &*entry_ptr.as_raw() };
350                        if entry.hash == hash && entry.key.borrow() == key {
351                            let old_value = entry.value.clone();
352
353                            match slot_bucket.slot.compare_exchange(
354                                entry_ptr,
355                                unsafe { Shared::from_raw(core::ptr::null_mut()) },
356                                Ordering::Release,
357                                Ordering::Relaxed,
358                                &guard,
359                            ) {
360                                Ok(_) => {
361                                    let mask = !(1u32 << offset);
362                                    bucket.hop_info.fetch_and(mask, Ordering::Release);
363
364                                    unsafe { retire(entry_ptr.as_raw()) };
365
366                                    let new_count = self.count.fetch_sub(1, Ordering::Relaxed) - 1;
367                                    let current_capacity = table.capacity;
368                                    let load_factor = new_count as f64 / current_capacity as f64;
369
370                                    if load_factor < SHRINK_THRESHOLD
371                                        && current_capacity > MIN_CAPACITY
372                                    {
373                                        drop(guard);
374                                        self.try_resize(current_capacity / 2);
375                                    }
376
377                                    return Some(old_value);
378                                }
379                                Err(_) => {
380                                    break;
381                                }
382                            }
383                        }
384                    }
385                }
386            }
387            return None;
388        }
389    }
390
391    /// Clears the map, removing all key-value pairs.
392    pub fn clear(&self) {
393        while self
394            .resizing
395            .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
396            .is_err()
397        {
398            core::hint::spin_loop();
399        }
400
401        let guard = pin();
402        let table_ptr = self.table.load(Ordering::Acquire, &guard);
403        let table = unsafe { &*table_ptr.as_raw() };
404
405        for i in 0..(table.capacity + NEIGHBORHOOD_SIZE) {
406            let bucket = table.get_bucket(i);
407            let entry_ptr = bucket.slot.load(Ordering::Acquire, &guard);
408
409            if !entry_ptr.is_null()
410                && bucket
411                    .slot
412                    .compare_exchange(
413                        entry_ptr,
414                        unsafe { Shared::from_raw(core::ptr::null_mut()) },
415                        Ordering::Release,
416                        Ordering::Relaxed,
417                        &guard,
418                    )
419                    .is_ok()
420            {
421                unsafe { retire(entry_ptr.as_raw()) };
422            }
423
424            if i < table.capacity {
425                let b = table.get_bucket(i);
426                b.hop_info.store(0, Ordering::Release);
427            }
428        }
429
430        self.count.store(0, Ordering::Release);
431        self.resizing.store(false, Ordering::Release);
432    }
433
434    fn try_insert(
435        &self,
436        table: &Table<K, V>,
437        hash: u64,
438        key: K,
439        value: V,
440        only_if_absent: bool,
441        guard: &kovan::Guard,
442    ) -> InsertResult<V> {
443        let bucket_idx = table.bucket_index(hash);
444        let bucket = table.get_bucket(bucket_idx);
445
446        // 1. Check if key exists (Update)
447        let hop_info = bucket.hop_info.load(Ordering::Acquire);
448        for offset in 0..NEIGHBORHOOD_SIZE {
449            if hop_info & (1 << offset) != 0 {
450                let slot_idx = bucket_idx + offset;
451                let slot_bucket = table.get_bucket(slot_idx);
452                let entry_ptr = slot_bucket.slot.load(Ordering::Acquire, guard);
453
454                if !entry_ptr.is_null() {
455                    let entry = unsafe { &*entry_ptr.as_raw() };
456                    if entry.hash == hash && entry.key == key {
457                        if only_if_absent {
458                            return InsertResult::Exists(entry.value.clone());
459                        }
460
461                        let old_value = entry.value.clone();
462                        // Clone key and value because if CAS fails, we retry, and we are inside a loop.
463                        // We cannot move out of `key` or `value` inside a loop.
464                        let new_entry = Box::into_raw(Box::new(Entry {
465                            retired: RetiredNode::new(),
466                            hash,
467                            key: key.clone(),
468                            value: value.clone(),
469                        }));
470
471                        match slot_bucket.slot.compare_exchange(
472                            entry_ptr,
473                            unsafe { Shared::from_raw(new_entry) },
474                            Ordering::Release,
475                            Ordering::Relaxed,
476                            guard,
477                        ) {
478                            Ok(_) => {
479                                unsafe { retire(entry_ptr.as_raw()) };
480                                return InsertResult::Success(Some(old_value));
481                            }
482                            Err(_) => {
483                                drop(unsafe { Box::from_raw(new_entry) });
484                                return InsertResult::Retry;
485                            }
486                        }
487                    }
488                }
489            }
490        }
491
492        // 2. Find empty slot
493        for offset in 0..NEIGHBORHOOD_SIZE {
494            let slot_idx = bucket_idx + offset;
495            if slot_idx >= table.capacity + NEIGHBORHOOD_SIZE {
496                return InsertResult::NeedResize;
497            }
498
499            let slot_bucket = table.get_bucket(slot_idx);
500            let entry_ptr = slot_bucket.slot.load(Ordering::Acquire, guard);
501
502            if entry_ptr.is_null() {
503                // Clone key and value. If CAS fails, we continue the loop, so we need the originals
504                // for the next iteration.
505                let new_entry = Box::into_raw(Box::new(Entry {
506                    retired: RetiredNode::new(),
507                    hash,
508                    key: key.clone(),
509                    value: value.clone(),
510                }));
511
512                match slot_bucket.slot.compare_exchange(
513                    unsafe { Shared::from_raw(core::ptr::null_mut()) },
514                    unsafe { Shared::from_raw(new_entry) },
515                    Ordering::Release,
516                    Ordering::Relaxed,
517                    guard,
518                ) {
519                    Ok(_) => {
520                        bucket.hop_info.fetch_or(1u32 << offset, Ordering::Release);
521                        return InsertResult::Success(None);
522                    }
523                    Err(_) => {
524                        drop(unsafe { Box::from_raw(new_entry) });
525                        continue;
526                    }
527                }
528            }
529        }
530
531        // 3. Try displacement
532        match self.try_find_closer_slot(table, bucket_idx, guard) {
533            Some(final_offset) if final_offset < NEIGHBORHOOD_SIZE => {
534                let slot_idx = bucket_idx + final_offset;
535                let slot_bucket = table.get_bucket(slot_idx);
536
537                let curr = slot_bucket.slot.load(Ordering::Relaxed, guard);
538                if !curr.is_null() {
539                    return InsertResult::Retry;
540                }
541
542                // This is the final attempt in this function. We can move key/value here
543                // because previous usages were clones.
544                let new_entry = Box::into_raw(Box::new(Entry {
545                    retired: RetiredNode::new(),
546                    hash,
547                    key,
548                    value,
549                }));
550
551                match slot_bucket.slot.compare_exchange(
552                    unsafe { Shared::from_raw(core::ptr::null_mut()) },
553                    unsafe { Shared::from_raw(new_entry) },
554                    Ordering::Release,
555                    Ordering::Relaxed,
556                    guard,
557                ) {
558                    Ok(_) => {
559                        bucket
560                            .hop_info
561                            .fetch_or(1u32 << final_offset, Ordering::Release);
562                        InsertResult::Success(None)
563                    }
564                    Err(_) => {
565                        drop(unsafe { Box::from_raw(new_entry) });
566                        InsertResult::Retry
567                    }
568                }
569            }
570            _ => InsertResult::NeedResize,
571        }
572    }
573
574    fn try_find_closer_slot(
575        &self,
576        table: &Table<K, V>,
577        bucket_idx: usize,
578        guard: &kovan::Guard,
579    ) -> Option<usize> {
580        for probe_offset in NEIGHBORHOOD_SIZE..MAX_PROBE_DISTANCE {
581            let probe_idx = bucket_idx + probe_offset;
582            if probe_idx >= table.capacity + NEIGHBORHOOD_SIZE {
583                return None;
584            }
585
586            let probe_bucket = table.get_bucket(probe_idx);
587            let entry_ptr = probe_bucket.slot.load(Ordering::Acquire, guard);
588
589            if entry_ptr.is_null() {
590                return self.try_move_closer(table, bucket_idx, probe_idx, guard);
591            }
592        }
593        None
594    }
595
596    fn try_move_closer(
597        &self,
598        table: &Table<K, V>,
599        target_idx: usize,
600        empty_idx: usize,
601        guard: &kovan::Guard,
602    ) -> Option<usize> {
603        let mut current_empty = empty_idx;
604
605        while current_empty > target_idx + NEIGHBORHOOD_SIZE - 1 {
606            let mut moved = false;
607
608            for offset in 1..NEIGHBORHOOD_SIZE.min(current_empty - target_idx) {
609                let candidate_idx = current_empty - offset;
610                let candidate_bucket = table.get_bucket(candidate_idx);
611                let entry_ptr = candidate_bucket.slot.load(Ordering::Acquire, guard);
612
613                if !entry_ptr.is_null() {
614                    let entry = unsafe { &*entry_ptr.as_raw() };
615                    let entry_home = table.bucket_index(entry.hash);
616
617                    if entry_home <= candidate_idx && current_empty < entry_home + NEIGHBORHOOD_SIZE
618                    {
619                        // Copy-on-Move for safety
620                        let new_entry = Box::into_raw(Box::new(entry.clone()));
621                        let empty_bucket = table.get_bucket(current_empty);
622
623                        match empty_bucket.slot.compare_exchange(
624                            unsafe { Shared::from_raw(core::ptr::null_mut()) },
625                            unsafe { Shared::from_raw(new_entry) },
626                            Ordering::Release,
627                            Ordering::Relaxed,
628                            guard,
629                        ) {
630                            Ok(_) => {
631                                match candidate_bucket.slot.compare_exchange(
632                                    entry_ptr,
633                                    unsafe { Shared::from_raw(core::ptr::null_mut()) },
634                                    Ordering::Release,
635                                    Ordering::Relaxed,
636                                    guard,
637                                ) {
638                                    Ok(_) => {
639                                        let old_offset = candidate_idx - entry_home;
640                                        let new_offset = current_empty - entry_home;
641
642                                        let home_bucket = table.get_bucket(entry_home);
643                                        home_bucket
644                                            .hop_info
645                                            .fetch_and(!(1u32 << old_offset), Ordering::Release);
646                                        home_bucket
647                                            .hop_info
648                                            .fetch_or(1u32 << new_offset, Ordering::Release);
649
650                                        unsafe { retire(entry_ptr.as_raw()) };
651                                        current_empty = candidate_idx;
652                                        moved = true;
653                                        break;
654                                    }
655                                    Err(_) => {
656                                        // Attempt to revert the insertion of new_entry.
657                                        match empty_bucket.slot.compare_exchange(
658                                            unsafe { Shared::from_raw(new_entry) },
659                                            unsafe { Shared::from_raw(core::ptr::null_mut()) },
660                                            Ordering::Release,
661                                            Ordering::Relaxed,
662                                            guard,
663                                        ) {
664                                            Ok(_) => {
665                                                // Revert succeeded: no other thread touched it.
666                                                // Safe to drop immediately.
667                                                unsafe { drop(Box::from_raw(new_entry)) };
668                                            }
669                                            Err(_) => {
670                                                // Displacement already happened here...
671                                                // Too late, so we just drop it. Another thread found new_entry,
672                                                // displaced it, and replaced it with something else.
673                                                // This means new_entry is now part of the blocks
674                                                // and was already "retired" by the other thread,
675                                                // OR it was just moved. In either case, we must
676                                                // let the reclamation system handle it to avoid a
677                                                // double free.
678                                                unsafe { retire(new_entry) };
679                                            }
680                                        }
681                                        continue;
682                                    }
683                                }
684                            }
685                            Err(_) => {
686                                unsafe { drop(Box::from_raw(new_entry)) };
687                                continue;
688                            }
689                        }
690                    }
691                }
692            }
693
694            if !moved {
695                return None;
696            }
697        }
698
699        if current_empty >= target_idx && current_empty < target_idx + NEIGHBORHOOD_SIZE {
700            Some(current_empty - target_idx)
701        } else {
702            None
703        }
704    }
705
706    fn insert_into_new_table(
707        &self,
708        table: &Table<K, V>,
709        hash: u64,
710        key: K,
711        value: V,
712        guard: &kovan::Guard,
713    ) -> bool {
714        let bucket_idx = table.bucket_index(hash);
715
716        for probe_offset in 0..(table.capacity + NEIGHBORHOOD_SIZE) {
717            let probe_idx = bucket_idx + probe_offset;
718            if probe_idx >= table.capacity + NEIGHBORHOOD_SIZE {
719                break;
720            }
721
722            let probe_bucket = table.get_bucket(probe_idx);
723            let slot_ptr = probe_bucket.slot.load(Ordering::Relaxed, guard);
724
725            if slot_ptr.is_null() {
726                let offset_from_home = probe_idx - bucket_idx;
727
728                if offset_from_home < NEIGHBORHOOD_SIZE {
729                    let new_entry = Box::into_raw(Box::new(Entry {
730                        retired: RetiredNode::new(),
731                        hash,
732                        key,
733                        value,
734                    }));
735                    probe_bucket
736                        .slot
737                        .store(unsafe { Shared::from_raw(new_entry) }, Ordering::Release);
738
739                    let bucket = table.get_bucket(bucket_idx);
740                    bucket
741                        .hop_info
742                        .fetch_or(1u32 << offset_from_home, Ordering::Relaxed);
743                    return true;
744                } else {
745                    return false;
746                }
747            }
748        }
749        false
750    }
751
752    fn try_resize(&self, new_capacity: usize) {
753        if self
754            .resizing
755            .compare_exchange(false, true, Ordering::SeqCst, Ordering::Relaxed)
756            .is_err()
757        {
758            return;
759        }
760
761        let new_capacity = new_capacity.next_power_of_two().max(MIN_CAPACITY);
762        let guard = pin();
763        let old_table_ptr = self.table.load(Ordering::Acquire, &guard);
764        let old_table = unsafe { &*old_table_ptr.as_raw() };
765
766        if old_table.capacity == new_capacity {
767            self.resizing.store(false, Ordering::Release);
768            return;
769        }
770
771        let new_table = Box::into_raw(Box::new(Table::new(new_capacity)));
772        let new_table_ref = unsafe { &*new_table };
773
774        let mut success = true;
775
776        for i in 0..(old_table.capacity + NEIGHBORHOOD_SIZE) {
777            let bucket = old_table.get_bucket(i);
778            let entry_ptr = bucket.slot.load(Ordering::Acquire, &guard);
779
780            if !entry_ptr.is_null() {
781                let entry = unsafe { &*entry_ptr.as_raw() };
782                if !self.insert_into_new_table(
783                    new_table_ref,
784                    entry.hash,
785                    entry.key.clone(),
786                    entry.value.clone(),
787                    &guard,
788                ) {
789                    success = false;
790                    break;
791                }
792            }
793        }
794
795        if success {
796            match self.table.compare_exchange(
797                old_table_ptr,
798                unsafe { Shared::from_raw(new_table) },
799                Ordering::Release,
800                Ordering::Relaxed,
801                &guard,
802            ) {
803                Ok(_) => {
804                    unsafe { retire(old_table_ptr.as_raw()) };
805                }
806                Err(_) => {
807                    success = false;
808                }
809            }
810        }
811
812        if !success {
813            for i in 0..(new_table_ref.capacity + NEIGHBORHOOD_SIZE) {
814                let bucket = new_table_ref.get_bucket(i);
815                let entry_ptr = bucket.slot.load(Ordering::Relaxed, &guard);
816                if !entry_ptr.is_null() {
817                    unsafe {
818                        drop(Box::from_raw(entry_ptr.as_raw()));
819                    }
820                }
821            }
822            unsafe {
823                drop(Box::from_raw(new_table));
824            }
825        }
826
827        self.resizing.store(false, Ordering::Release);
828    }
829    /// Returns an iterator over the map entries.
830    pub fn iter(&self) -> HopscotchIter<'_, K, V, S> {
831        let guard = pin();
832        let table_ptr = self.table.load(Ordering::Acquire, &guard);
833        let _table = unsafe { &*table_ptr.as_raw() };
834        HopscotchIter {
835            map: self,
836            bucket_idx: 0,
837            guard,
838        }
839    }
840
841    /// Returns an iterator over the map keys.
842    pub fn keys(&self) -> HopscotchKeys<'_, K, V, S> {
843        HopscotchKeys { iter: self.iter() }
844    }
845
846    /// Get the underlying hasher.
847    pub fn hasher(&self) -> &S {
848        &self.hasher
849    }
850}
851
852/// Iterator over HopscotchMap entries.
853pub struct HopscotchIter<'a, K: 'static, V: 'static, S> {
854    map: &'a HopscotchMap<K, V, S>,
855    bucket_idx: usize,
856    guard: kovan::Guard,
857}
858
859impl<'a, K, V, S> Iterator for HopscotchIter<'a, K, V, S>
860where
861    K: Clone,
862    V: Clone,
863{
864    type Item = (K, V);
865
866    fn next(&mut self) -> Option<Self::Item> {
867        let table_ptr = self.map.table.load(Ordering::Acquire, &self.guard);
868        let table = unsafe { &*table_ptr.as_raw() };
869
870        while self.bucket_idx < table.buckets.len() {
871            let bucket = table.get_bucket(self.bucket_idx);
872            self.bucket_idx += 1;
873
874            let entry_ptr = bucket.slot.load(Ordering::Acquire, &self.guard);
875            if !entry_ptr.is_null() {
876                let entry = unsafe { &*entry_ptr.as_raw() };
877                return Some((entry.key.clone(), entry.value.clone()));
878            }
879        }
880        None
881    }
882}
883
884/// Iterator over HopscotchMap keys.
885pub struct HopscotchKeys<'a, K: 'static, V: 'static, S> {
886    iter: HopscotchIter<'a, K, V, S>,
887}
888
889impl<'a, K, V, S> Iterator for HopscotchKeys<'a, K, V, S>
890where
891    K: Clone,
892    V: Clone,
893{
894    type Item = K;
895
896    fn next(&mut self) -> Option<Self::Item> {
897        self.iter.next().map(|(k, _)| k)
898    }
899}
900
901impl<'a, K, V, S> IntoIterator for &'a HopscotchMap<K, V, S>
902where
903    K: Hash + Eq + Clone + 'static,
904    V: Clone + 'static,
905    S: BuildHasher,
906{
907    type Item = (K, V);
908    type IntoIter = HopscotchIter<'a, K, V, S>;
909
910    fn into_iter(self) -> Self::IntoIter {
911        self.iter()
912    }
913}
914
915enum InsertResult<V> {
916    Success(Option<V>),
917    Exists(V),
918    NeedResize,
919    Retry,
920}
921
922#[cfg(feature = "std")]
923impl<K, V> Default for HopscotchMap<K, V, FixedState>
924where
925    K: Hash + Eq + Clone + 'static,
926    V: Clone + 'static,
927{
928    fn default() -> Self {
929        Self::new()
930    }
931}
932
933unsafe impl<K: Send, V: Send, S: Send> Send for HopscotchMap<K, V, S> {}
934// SAFETY: Shared references allow moving K and V across threads (via insert/remove),
935// so K and V must be Send in addition to Sync.
936unsafe impl<K: Send + Sync, V: Send + Sync, S: Send + Sync> Sync for HopscotchMap<K, V, S> {}
937
938impl<K, V, S> Drop for HopscotchMap<K, V, S> {
939    fn drop(&mut self) {
940        // SAFETY: `drop(&mut self)` guarantees exclusive ownership — no concurrent
941        // readers can exist. Free nodes immediately instead of deferring via `retire()`.
942        let guard = pin();
943        let table_ptr = self.table.load(Ordering::Acquire, &guard);
944        let table = unsafe { &*table_ptr.as_raw() };
945
946        for i in 0..(table.capacity + NEIGHBORHOOD_SIZE) {
947            let bucket = table.get_bucket(i);
948            let entry_ptr = bucket.slot.load(Ordering::Acquire, &guard);
949
950            if !entry_ptr.is_null() {
951                unsafe {
952                    drop(Box::from_raw(entry_ptr.as_raw()));
953                }
954            }
955        }
956
957        unsafe {
958            drop(Box::from_raw(table_ptr.as_raw()));
959        }
960    }
961}
962
963#[cfg(test)]
964mod tests {
965    use super::*;
966
967    #[test]
968    fn test_insert_and_get() {
969        let map = HopscotchMap::new();
970        assert_eq!(map.insert(1, 100), None);
971        assert_eq!(map.get(&1), Some(100));
972        assert_eq!(map.get(&2), None);
973    }
974
975    #[test]
976    fn test_growing() {
977        let map = HopscotchMap::with_capacity(32);
978        for i in 0..100 {
979            map.insert(i, i * 2);
980        }
981        for i in 0..100 {
982            assert_eq!(map.get(&i), Some(i * 2));
983        }
984    }
985
986    #[test]
987    fn test_concurrent() {
988        use alloc::sync::Arc;
989        extern crate std;
990        use std::thread;
991
992        let map = Arc::new(HopscotchMap::with_capacity(64));
993        let mut handles = alloc::vec::Vec::new();
994
995        for thread_id in 0..4 {
996            let map_clone = Arc::clone(&map);
997            let handle = thread::spawn(move || {
998                for i in 0..1000 {
999                    let key = thread_id * 1000 + i;
1000                    map_clone.insert(key, key * 2);
1001                }
1002            });
1003            handles.push(handle);
1004        }
1005
1006        for handle in handles {
1007            handle.join().unwrap();
1008        }
1009
1010        for thread_id in 0..4 {
1011            for i in 0..1000 {
1012                let key = thread_id * 1000 + i;
1013                assert_eq!(map.get(&key), Some(key * 2));
1014            }
1015        }
1016    }
1017
1018    #[test]
1019    fn test_concurrent_insert_and_remove() {
1020        use alloc::sync::Arc;
1021        extern crate std;
1022        use std::thread;
1023
1024        let map = Arc::new(HopscotchMap::with_capacity(64));
1025
1026        // Phase 1: Pre-populate so removers have something to work with
1027        for thread_id in 0..4u64 {
1028            for i in 0..500u64 {
1029                let key = thread_id * 1000 + i;
1030                map.insert(key, key * 3);
1031            }
1032        }
1033
1034        let mut insert_handles = alloc::vec::Vec::new();
1035        let mut remove_handles = alloc::vec::Vec::new();
1036
1037        // Spawn inserter threads: each inserts keys in its own range
1038        for thread_id in 0..4u64 {
1039            let map_clone = Arc::clone(&map);
1040            insert_handles.push(thread::spawn(move || {
1041                for i in 0..500u64 {
1042                    let key = thread_id * 1000 + i;
1043                    map_clone.insert(key, key * 3);
1044                }
1045            }));
1046        }
1047
1048        // Spawn remover threads: each removes keys from the same ranges,
1049        // racing with inserters
1050        for thread_id in 0..4u64 {
1051            let map_clone = Arc::clone(&map);
1052            remove_handles.push(thread::spawn(move || {
1053                for i in 0..500u64 {
1054                    let key = thread_id * 1000 + i;
1055                    if let Some(val) = map_clone.remove(&key) {
1056                        // Value must be correct if present
1057                        assert_eq!(val, key * 3);
1058                    }
1059                }
1060            }));
1061        }
1062
1063        for handle in insert_handles {
1064            handle.join().unwrap();
1065        }
1066        for handle in remove_handles {
1067            handle.join().unwrap();
1068        }
1069
1070        // Verify: every remaining key has the correct value
1071        for thread_id in 0..4u64 {
1072            for i in 0..500u64 {
1073                let key = thread_id * 1000 + i;
1074                if let Some(val) = map.get(&key) {
1075                    assert_eq!(val, key * 3);
1076                }
1077            }
1078        }
1079    }
1080}