Skip to main content

kovan_map/
hopscotch.rs

1//! Lock-Free Growing/Shrinking Hopscotch Hash Map
2//!
3//! # Features
4//!
5//! - **Robust Concurrency**: Uses Copy-on-Move displacement to prevent Use-After-Free.
6//! - **Safe Resizing**: Uses a lightweight resize lock to prevent lost updates during migration.
7//! - **Memory reclamation**: Uses Kovan.
8//! - **Clone Support**: Supports V: Clone (e.g., `Arc<T>`) instead of just Copy.
9
10extern crate alloc;
11
12use alloc::boxed::Box;
13use alloc::vec::Vec;
14use core::borrow::Borrow;
15use core::hash::{BuildHasher, Hash};
16use core::sync::atomic::{AtomicBool, AtomicU32, AtomicUsize, Ordering};
17use foldhash::fast::FixedState;
18use kovan::{Atomic, RetiredNode, Shared, pin, retire};
19
20/// Neighborhood size (H parameter in hopscotch hashing)
21const NEIGHBORHOOD_SIZE: usize = 32;
22
23/// Initial capacity
24const INITIAL_CAPACITY: usize = 64;
25
26/// Load factor threshold for growing (75%)
27const GROW_THRESHOLD: f64 = 0.75;
28
29/// Load factor threshold for shrinking (25%)
30const SHRINK_THRESHOLD: f64 = 0.25;
31
32/// Minimum capacity to prevent excessive shrinking
33const MIN_CAPACITY: usize = 64;
34
35/// Maximum probe distance before giving up and resizing
36const MAX_PROBE_DISTANCE: usize = 512;
37
38/// A bucket in the hopscotch hash table
39struct Bucket<K, V> {
40    /// Bitmap indicating which of the next H slots contain items that hash to this bucket
41    hop_info: AtomicU32,
42    /// The actual key-value slot at this position
43    slot: Atomic<Entry<K, V>>,
44}
45
46/// An entry in the hash table
47#[repr(C)]
48struct Entry<K, V> {
49    retired: RetiredNode,
50    hash: u64,
51    key: K,
52    value: V,
53}
54
55impl<K: Clone, V: Clone> Clone for Entry<K, V> {
56    fn clone(&self) -> Self {
57        Self {
58            retired: RetiredNode::new(),
59            hash: self.hash,
60            key: self.key.clone(),
61            value: self.value.clone(),
62        }
63    }
64}
65
66/// The hash table structure
67#[repr(C)]
68struct Table<K, V> {
69    retired: RetiredNode,
70    buckets: Box<[Bucket<K, V>]>,
71    capacity: usize,
72    mask: usize,
73}
74
75impl<K, V> Table<K, V> {
76    fn new(capacity: usize) -> Self {
77        let capacity = capacity.next_power_of_two().max(MIN_CAPACITY);
78        // We add padding to the array so we don't have to check bounds constantly
79        // during neighborhood scans.
80        let mut buckets = Vec::with_capacity(capacity + NEIGHBORHOOD_SIZE);
81
82        for _ in 0..(capacity + NEIGHBORHOOD_SIZE) {
83            buckets.push(Bucket {
84                hop_info: AtomicU32::new(0),
85                slot: Atomic::null(),
86            });
87        }
88
89        Self {
90            retired: RetiredNode::new(),
91            buckets: buckets.into_boxed_slice(),
92            capacity,
93            mask: capacity - 1,
94        }
95    }
96
97    #[inline(always)]
98    fn bucket_index(&self, hash: u64) -> usize {
99        (hash as usize) & self.mask
100    }
101
102    #[inline(always)]
103    fn get_bucket(&self, idx: usize) -> &Bucket<K, V> {
104        // SAFETY: Internal indices are calculated via mask or bounded offset loops.
105        // The buckets array has padding to handle overflow up to NEIGHBORHOOD_SIZE.
106        unsafe { self.buckets.get_unchecked(idx) }
107    }
108}
109
110/// A concurrent, lock-free hash map based on Hopscotch Hashing.
111pub struct HopscotchMap<K: 'static, V: 'static, S = FixedState> {
112    table: Atomic<Table<K, V>>,
113    count: AtomicUsize,
114    /// Prevents concurrent writes during resize migration to avoid lost updates
115    resizing: AtomicBool,
116    hasher: S,
117}
118
119#[cfg(feature = "std")]
120impl<K, V> HopscotchMap<K, V, FixedState>
121where
122    K: Hash + Eq + Clone + 'static,
123    V: Clone + 'static,
124{
125    /// Creates a new `HopscotchMap` with default capacity and hasher.
126    pub fn new() -> Self {
127        Self::with_hasher(FixedState::default())
128    }
129
130    /// Creates a new `HopscotchMap` with the specified capacity and default hasher.
131    pub fn with_capacity(capacity: usize) -> Self {
132        Self::with_capacity_and_hasher(capacity, FixedState::default())
133    }
134}
135
136impl<K, V, S> HopscotchMap<K, V, S>
137where
138    K: Hash + Eq + Clone + 'static,
139    V: Clone + 'static,
140    S: BuildHasher,
141{
142    /// Creates a new `HopscotchMap` with the specified hasher and default capacity.
143    pub fn with_hasher(hasher: S) -> Self {
144        Self::with_capacity_and_hasher(INITIAL_CAPACITY, hasher)
145    }
146
147    /// Creates a new `HopscotchMap` with the specified capacity and hasher.
148    pub fn with_capacity_and_hasher(capacity: usize, hasher: S) -> Self {
149        let table = Table::new(capacity);
150        Self {
151            table: Atomic::new(Box::into_raw(Box::new(table))),
152            count: AtomicUsize::new(0),
153            resizing: AtomicBool::new(false),
154            hasher,
155        }
156    }
157
158    /// Returns the number of elements in the map.
159    pub fn len(&self) -> usize {
160        self.count.load(Ordering::Relaxed)
161    }
162
163    /// Returns `true` if the map contains no elements.
164    pub fn is_empty(&self) -> bool {
165        self.len() == 0
166    }
167
168    /// Returns the current capacity of the map.
169    pub fn capacity(&self) -> usize {
170        let guard = pin();
171        let table_ptr = self.table.load(Ordering::Acquire, &guard);
172        unsafe { (*table_ptr.as_raw()).capacity }
173    }
174
175    #[inline]
176    fn wait_for_resize(&self) {
177        while self.resizing.load(Ordering::Acquire) {
178            core::hint::spin_loop();
179        }
180    }
181
182    /// Returns the value corresponding to the key.
183    pub fn get<Q>(&self, key: &Q) -> Option<V>
184    where
185        K: Borrow<Q>,
186        Q: Hash + Eq + ?Sized,
187    {
188        let hash = self.hasher.hash_one(key);
189        let guard = pin();
190        let table_ptr = self.table.load(Ordering::Acquire, &guard);
191        let table = unsafe { &*table_ptr.as_raw() };
192
193        let bucket_idx = table.bucket_index(hash);
194        let bucket = table.get_bucket(bucket_idx);
195
196        let hop_info = bucket.hop_info.load(Ordering::Acquire);
197
198        if hop_info == 0 {
199            return None;
200        }
201
202        for offset in 0..NEIGHBORHOOD_SIZE {
203            if hop_info & (1 << offset) != 0 {
204                let slot_idx = bucket_idx + offset;
205                let slot_bucket = table.get_bucket(slot_idx);
206                let entry_ptr = slot_bucket.slot.load(Ordering::Acquire, &guard);
207
208                if !entry_ptr.is_null() {
209                    let entry = unsafe { &*entry_ptr.as_raw() };
210                    if entry.hash == hash && entry.key.borrow() == key {
211                        return Some(entry.value.clone());
212                    }
213                }
214            }
215        }
216
217        None
218    }
219
220    /// Inserts a key-value pair into the map.
221    pub fn insert(&self, key: K, value: V) -> Option<V> {
222        self.insert_impl(key, value, false)
223    }
224
225    /// Helper for get_or_insert logic.
226    fn insert_impl(&self, key: K, value: V, only_if_absent: bool) -> Option<V> {
227        let hash = self.hasher.hash_one(&key);
228
229        loop {
230            self.wait_for_resize();
231
232            let guard = pin();
233            let table_ptr = self.table.load(Ordering::Acquire, &guard);
234            let table = unsafe { &*table_ptr.as_raw() };
235
236            if self.resizing.load(Ordering::Acquire) {
237                continue;
238            }
239
240            // Note: We pass clones to try_insert if we loop here, but try_insert consumes them.
241            // Since `insert_impl` owns `key` and `value`, we must clone them for the call
242            // because `try_insert` might return `Retry` (looping again).
243            match self.try_insert(
244                table,
245                hash,
246                key.clone(),
247                value.clone(),
248                only_if_absent,
249                &guard,
250            ) {
251                InsertResult::Success(old_val) => {
252                    // CRITICAL FIX: If a resize started while we were inserting, our update
253                    // might have been missed by the migration.
254                    // We must retry to ensure we write to the new table.
255                    // We check BOTH the resizing flag (active resize) AND the table pointer (completed resize).
256                    if self.resizing.load(Ordering::SeqCst)
257                        || self.table.load(Ordering::SeqCst, &guard) != table_ptr
258                    {
259                        continue;
260                    }
261
262                    if old_val.is_none() {
263                        let new_count = self.count.fetch_add(1, Ordering::Relaxed) + 1;
264                        let current_capacity = table.capacity;
265                        let load_factor = new_count as f64 / current_capacity as f64;
266
267                        if load_factor > GROW_THRESHOLD {
268                            drop(guard);
269                            self.try_resize(current_capacity * 2);
270                        }
271                    }
272                    return old_val;
273                }
274                InsertResult::Exists(existing_val) => {
275                    return Some(existing_val);
276                }
277                InsertResult::NeedResize => {
278                    let current_capacity = table.capacity;
279                    drop(guard);
280                    self.try_resize(current_capacity * 2);
281                    continue;
282                }
283                InsertResult::Retry => {
284                    continue;
285                }
286            }
287        }
288    }
289
290    /// Returns the value corresponding to the key, or inserts the given value if the key is not present.
291    ///
292    /// When multiple threads call this concurrently for the same key, all callers
293    /// are guaranteed to receive the same value (the one visible in the map).
294    pub fn get_or_insert(&self, key: K, value: V) -> V {
295        // Fast path: key already exists — no clone, no insert.
296        if let Some(v) = self.get(&key) {
297            return v;
298        }
299        // Slow path: try to insert, then read back for concurrent consistency.
300        // The hop_info update is non-atomic with the slot CAS, so two threads can
301        // both "win" the insert. Reading back ensures all callers agree on one value.
302        //
303        // Note: `expect` has zero overhead vs `unwrap` on the success path — the
304        // static string literal is only materialized in the panic (unreachable) path.
305        let _ = self.insert_impl(key.clone(), value, true);
306        self.get(&key).expect("key was just inserted")
307    }
308
309    /// Insert a key-value pair only if the key does not exist.
310    /// Returns `None` if inserted, `Some(existing_value)` if the key already exists.
311    pub fn insert_if_absent(&self, key: K, value: V) -> Option<V> {
312        self.insert_impl(key, value, true)
313    }
314
315    /// Removes a key from the map, returning the value at the key if the key was previously in the map.
316    pub fn remove<Q>(&self, key: &Q) -> Option<V>
317    where
318        K: Borrow<Q>,
319        Q: Hash + Eq + ?Sized,
320    {
321        let hash = self.hasher.hash_one(key);
322
323        loop {
324            self.wait_for_resize();
325
326            let guard = pin();
327            let table_ptr = self.table.load(Ordering::Acquire, &guard);
328            let table = unsafe { &*table_ptr.as_raw() };
329
330            if self.resizing.load(Ordering::Acquire) {
331                continue;
332            }
333
334            let bucket_idx = table.bucket_index(hash);
335            let bucket = table.get_bucket(bucket_idx);
336
337            let hop_info = bucket.hop_info.load(Ordering::Acquire);
338            if hop_info == 0 {
339                return None;
340            }
341
342            for offset in 0..NEIGHBORHOOD_SIZE {
343                if hop_info & (1 << offset) != 0 {
344                    let slot_idx = bucket_idx + offset;
345                    let slot_bucket = table.get_bucket(slot_idx);
346                    let entry_ptr = slot_bucket.slot.load(Ordering::Acquire, &guard);
347
348                    if !entry_ptr.is_null() {
349                        let entry = unsafe { &*entry_ptr.as_raw() };
350                        if entry.hash == hash && entry.key.borrow() == key {
351                            let old_value = entry.value.clone();
352
353                            match slot_bucket.slot.compare_exchange(
354                                entry_ptr,
355                                unsafe { Shared::from_raw(core::ptr::null_mut()) },
356                                Ordering::Release,
357                                Ordering::Relaxed,
358                                &guard,
359                            ) {
360                                Ok(_) => {
361                                    let mask = !(1u32 << offset);
362                                    bucket.hop_info.fetch_and(mask, Ordering::Release);
363
364                                    retire(entry_ptr.as_raw());
365
366                                    let new_count = self.count.fetch_sub(1, Ordering::Relaxed) - 1;
367                                    let current_capacity = table.capacity;
368                                    let load_factor = new_count as f64 / current_capacity as f64;
369
370                                    if load_factor < SHRINK_THRESHOLD
371                                        && current_capacity > MIN_CAPACITY
372                                    {
373                                        drop(guard);
374                                        self.try_resize(current_capacity / 2);
375                                    }
376
377                                    return Some(old_value);
378                                }
379                                Err(_) => {
380                                    break;
381                                }
382                            }
383                        }
384                    }
385                }
386            }
387            return None;
388        }
389    }
390
391    /// Clears the map, removing all key-value pairs.
392    pub fn clear(&self) {
393        while self
394            .resizing
395            .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
396            .is_err()
397        {
398            core::hint::spin_loop();
399        }
400
401        let guard = pin();
402        let table_ptr = self.table.load(Ordering::Acquire, &guard);
403        let table = unsafe { &*table_ptr.as_raw() };
404
405        for i in 0..(table.capacity + NEIGHBORHOOD_SIZE) {
406            let bucket = table.get_bucket(i);
407            let entry_ptr = bucket.slot.load(Ordering::Acquire, &guard);
408
409            if !entry_ptr.is_null()
410                && bucket
411                    .slot
412                    .compare_exchange(
413                        entry_ptr,
414                        unsafe { Shared::from_raw(core::ptr::null_mut()) },
415                        Ordering::Release,
416                        Ordering::Relaxed,
417                        &guard,
418                    )
419                    .is_ok()
420            {
421                retire(entry_ptr.as_raw());
422            }
423
424            if i < table.capacity {
425                let b = table.get_bucket(i);
426                b.hop_info.store(0, Ordering::Release);
427            }
428        }
429
430        self.count.store(0, Ordering::Release);
431        self.resizing.store(false, Ordering::Release);
432    }
433
434    fn try_insert(
435        &self,
436        table: &Table<K, V>,
437        hash: u64,
438        key: K,
439        value: V,
440        only_if_absent: bool,
441        guard: &kovan::Guard,
442    ) -> InsertResult<V> {
443        let bucket_idx = table.bucket_index(hash);
444        let bucket = table.get_bucket(bucket_idx);
445
446        // 1. Check if key exists (Update)
447        let hop_info = bucket.hop_info.load(Ordering::Acquire);
448        for offset in 0..NEIGHBORHOOD_SIZE {
449            if hop_info & (1 << offset) != 0 {
450                let slot_idx = bucket_idx + offset;
451                let slot_bucket = table.get_bucket(slot_idx);
452                let entry_ptr = slot_bucket.slot.load(Ordering::Acquire, guard);
453
454                if !entry_ptr.is_null() {
455                    let entry = unsafe { &*entry_ptr.as_raw() };
456                    if entry.hash == hash && entry.key == key {
457                        if only_if_absent {
458                            return InsertResult::Exists(entry.value.clone());
459                        }
460
461                        let old_value = entry.value.clone();
462                        // Clone key and value because if CAS fails, we retry, and we are inside a loop.
463                        // We cannot move out of `key` or `value` inside a loop.
464                        let new_entry = Box::into_raw(Box::new(Entry {
465                            retired: RetiredNode::new(),
466                            hash,
467                            key: key.clone(),
468                            value: value.clone(),
469                        }));
470
471                        match slot_bucket.slot.compare_exchange(
472                            entry_ptr,
473                            unsafe { Shared::from_raw(new_entry) },
474                            Ordering::Release,
475                            Ordering::Relaxed,
476                            guard,
477                        ) {
478                            Ok(_) => {
479                                retire(entry_ptr.as_raw());
480                                return InsertResult::Success(Some(old_value));
481                            }
482                            Err(_) => {
483                                drop(unsafe { Box::from_raw(new_entry) });
484                                return InsertResult::Retry;
485                            }
486                        }
487                    }
488                }
489            }
490        }
491
492        // 2. Find empty slot
493        for offset in 0..NEIGHBORHOOD_SIZE {
494            let slot_idx = bucket_idx + offset;
495            if slot_idx >= table.capacity + NEIGHBORHOOD_SIZE {
496                return InsertResult::NeedResize;
497            }
498
499            let slot_bucket = table.get_bucket(slot_idx);
500            let entry_ptr = slot_bucket.slot.load(Ordering::Acquire, guard);
501
502            if entry_ptr.is_null() {
503                // Clone key and value. If CAS fails, we continue the loop, so we need the originals
504                // for the next iteration.
505                let new_entry = Box::into_raw(Box::new(Entry {
506                    retired: RetiredNode::new(),
507                    hash,
508                    key: key.clone(),
509                    value: value.clone(),
510                }));
511
512                match slot_bucket.slot.compare_exchange(
513                    unsafe { Shared::from_raw(core::ptr::null_mut()) },
514                    unsafe { Shared::from_raw(new_entry) },
515                    Ordering::Release,
516                    Ordering::Relaxed,
517                    guard,
518                ) {
519                    Ok(_) => {
520                        bucket.hop_info.fetch_or(1u32 << offset, Ordering::Release);
521                        return InsertResult::Success(None);
522                    }
523                    Err(_) => {
524                        drop(unsafe { Box::from_raw(new_entry) });
525                        continue;
526                    }
527                }
528            }
529        }
530
531        // 3. Try displacement
532        match self.try_find_closer_slot(table, bucket_idx, guard) {
533            Some(final_offset) if final_offset < NEIGHBORHOOD_SIZE => {
534                let slot_idx = bucket_idx + final_offset;
535                let slot_bucket = table.get_bucket(slot_idx);
536
537                let curr = slot_bucket.slot.load(Ordering::Relaxed, guard);
538                if !curr.is_null() {
539                    return InsertResult::Retry;
540                }
541
542                // This is the final attempt in this function. We can move key/value here
543                // because previous usages were clones.
544                let new_entry = Box::into_raw(Box::new(Entry {
545                    retired: RetiredNode::new(),
546                    hash,
547                    key,
548                    value,
549                }));
550
551                match slot_bucket.slot.compare_exchange(
552                    unsafe { Shared::from_raw(core::ptr::null_mut()) },
553                    unsafe { Shared::from_raw(new_entry) },
554                    Ordering::Release,
555                    Ordering::Relaxed,
556                    guard,
557                ) {
558                    Ok(_) => {
559                        bucket
560                            .hop_info
561                            .fetch_or(1u32 << final_offset, Ordering::Release);
562                        InsertResult::Success(None)
563                    }
564                    Err(_) => {
565                        drop(unsafe { Box::from_raw(new_entry) });
566                        InsertResult::Retry
567                    }
568                }
569            }
570            _ => InsertResult::NeedResize,
571        }
572    }
573
574    fn try_find_closer_slot(
575        &self,
576        table: &Table<K, V>,
577        bucket_idx: usize,
578        guard: &kovan::Guard,
579    ) -> Option<usize> {
580        for probe_offset in NEIGHBORHOOD_SIZE..MAX_PROBE_DISTANCE {
581            let probe_idx = bucket_idx + probe_offset;
582            if probe_idx >= table.capacity + NEIGHBORHOOD_SIZE {
583                return None;
584            }
585
586            let probe_bucket = table.get_bucket(probe_idx);
587            let entry_ptr = probe_bucket.slot.load(Ordering::Acquire, guard);
588
589            if entry_ptr.is_null() {
590                return self.try_move_closer(table, bucket_idx, probe_idx, guard);
591            }
592        }
593        None
594    }
595
596    fn try_move_closer(
597        &self,
598        table: &Table<K, V>,
599        target_idx: usize,
600        empty_idx: usize,
601        guard: &kovan::Guard,
602    ) -> Option<usize> {
603        let mut current_empty = empty_idx;
604
605        while current_empty > target_idx + NEIGHBORHOOD_SIZE - 1 {
606            let mut moved = false;
607
608            for offset in 1..NEIGHBORHOOD_SIZE.min(current_empty - target_idx) {
609                let candidate_idx = current_empty - offset;
610                let candidate_bucket = table.get_bucket(candidate_idx);
611                let entry_ptr = candidate_bucket.slot.load(Ordering::Acquire, guard);
612
613                if !entry_ptr.is_null() {
614                    let entry = unsafe { &*entry_ptr.as_raw() };
615                    let entry_home = table.bucket_index(entry.hash);
616
617                    if entry_home <= candidate_idx && current_empty < entry_home + NEIGHBORHOOD_SIZE
618                    {
619                        // Copy-on-Move for safety
620                        let new_entry = Box::into_raw(Box::new(entry.clone()));
621                        let empty_bucket = table.get_bucket(current_empty);
622
623                        match empty_bucket.slot.compare_exchange(
624                            unsafe { Shared::from_raw(core::ptr::null_mut()) },
625                            unsafe { Shared::from_raw(new_entry) },
626                            Ordering::Release,
627                            Ordering::Relaxed,
628                            guard,
629                        ) {
630                            Ok(_) => {
631                                match candidate_bucket.slot.compare_exchange(
632                                    entry_ptr,
633                                    unsafe { Shared::from_raw(core::ptr::null_mut()) },
634                                    Ordering::Release,
635                                    Ordering::Relaxed,
636                                    guard,
637                                ) {
638                                    Ok(_) => {
639                                        let old_offset = candidate_idx - entry_home;
640                                        let new_offset = current_empty - entry_home;
641
642                                        let home_bucket = table.get_bucket(entry_home);
643                                        home_bucket
644                                            .hop_info
645                                            .fetch_and(!(1u32 << old_offset), Ordering::Release);
646                                        home_bucket
647                                            .hop_info
648                                            .fetch_or(1u32 << new_offset, Ordering::Release);
649
650                                        retire(entry_ptr.as_raw());
651                                        current_empty = candidate_idx;
652                                        moved = true;
653                                        break;
654                                    }
655                                    Err(_) => {
656                                        let _ = empty_bucket.slot.compare_exchange(
657                                            unsafe { Shared::from_raw(new_entry) },
658                                            unsafe { Shared::from_raw(core::ptr::null_mut()) },
659                                            Ordering::Release,
660                                            Ordering::Relaxed,
661                                            guard,
662                                        );
663                                        unsafe { drop(Box::from_raw(new_entry)) };
664                                        continue;
665                                    }
666                                }
667                            }
668                            Err(_) => {
669                                unsafe { drop(Box::from_raw(new_entry)) };
670                                continue;
671                            }
672                        }
673                    }
674                }
675            }
676
677            if !moved {
678                return None;
679            }
680        }
681
682        if current_empty >= target_idx && current_empty < target_idx + NEIGHBORHOOD_SIZE {
683            Some(current_empty - target_idx)
684        } else {
685            None
686        }
687    }
688
689    fn insert_into_new_table(
690        &self,
691        table: &Table<K, V>,
692        hash: u64,
693        key: K,
694        value: V,
695        guard: &kovan::Guard,
696    ) -> bool {
697        let bucket_idx = table.bucket_index(hash);
698
699        for probe_offset in 0..(table.capacity + NEIGHBORHOOD_SIZE) {
700            let probe_idx = bucket_idx + probe_offset;
701            if probe_idx >= table.capacity + NEIGHBORHOOD_SIZE {
702                break;
703            }
704
705            let probe_bucket = table.get_bucket(probe_idx);
706            let slot_ptr = probe_bucket.slot.load(Ordering::Relaxed, guard);
707
708            if slot_ptr.is_null() {
709                let offset_from_home = probe_idx - bucket_idx;
710
711                if offset_from_home < NEIGHBORHOOD_SIZE {
712                    let new_entry = Box::into_raw(Box::new(Entry {
713                        retired: RetiredNode::new(),
714                        hash,
715                        key,
716                        value,
717                    }));
718                    probe_bucket
719                        .slot
720                        .store(unsafe { Shared::from_raw(new_entry) }, Ordering::Release);
721
722                    let bucket = table.get_bucket(bucket_idx);
723                    let mut hop = bucket.hop_info.load(Ordering::Relaxed);
724                    hop |= 1u32 << offset_from_home;
725                    bucket.hop_info.store(hop, Ordering::Relaxed);
726                    return true;
727                } else {
728                    return false;
729                }
730            }
731        }
732        false
733    }
734
735    fn try_resize(&self, new_capacity: usize) {
736        if self
737            .resizing
738            .compare_exchange(false, true, Ordering::SeqCst, Ordering::Relaxed)
739            .is_err()
740        {
741            return;
742        }
743
744        let new_capacity = new_capacity.next_power_of_two().max(MIN_CAPACITY);
745        let guard = pin();
746        let old_table_ptr = self.table.load(Ordering::Acquire, &guard);
747        let old_table = unsafe { &*old_table_ptr.as_raw() };
748
749        if old_table.capacity == new_capacity {
750            self.resizing.store(false, Ordering::Release);
751            return;
752        }
753
754        let new_table = Box::into_raw(Box::new(Table::new(new_capacity)));
755        let new_table_ref = unsafe { &*new_table };
756
757        let mut success = true;
758
759        for i in 0..(old_table.capacity + NEIGHBORHOOD_SIZE) {
760            let bucket = old_table.get_bucket(i);
761            let entry_ptr = bucket.slot.load(Ordering::Acquire, &guard);
762
763            if !entry_ptr.is_null() {
764                let entry = unsafe { &*entry_ptr.as_raw() };
765                if !self.insert_into_new_table(
766                    new_table_ref,
767                    entry.hash,
768                    entry.key.clone(),
769                    entry.value.clone(),
770                    &guard,
771                ) {
772                    success = false;
773                    break;
774                }
775            }
776        }
777
778        if success {
779            match self.table.compare_exchange(
780                old_table_ptr,
781                unsafe { Shared::from_raw(new_table) },
782                Ordering::Release,
783                Ordering::Relaxed,
784                &guard,
785            ) {
786                Ok(_) => {
787                    retire(old_table_ptr.as_raw());
788                }
789                Err(_) => {
790                    success = false;
791                }
792            }
793        }
794
795        if !success {
796            for i in 0..(new_table_ref.capacity + NEIGHBORHOOD_SIZE) {
797                let bucket = new_table_ref.get_bucket(i);
798                let entry_ptr = bucket.slot.load(Ordering::Relaxed, &guard);
799                if !entry_ptr.is_null() {
800                    unsafe {
801                        drop(Box::from_raw(entry_ptr.as_raw()));
802                    }
803                }
804            }
805            unsafe {
806                drop(Box::from_raw(new_table));
807            }
808        }
809
810        self.resizing.store(false, Ordering::Release);
811    }
812    /// Returns an iterator over the map entries.
813    pub fn iter(&self) -> HopscotchIter<'_, K, V, S> {
814        let guard = pin();
815        let table_ptr = self.table.load(Ordering::Acquire, &guard);
816        let _table = unsafe { &*table_ptr.as_raw() };
817        HopscotchIter {
818            map: self,
819            bucket_idx: 0,
820            guard,
821        }
822    }
823
824    /// Returns an iterator over the map keys.
825    pub fn keys(&self) -> HopscotchKeys<'_, K, V, S> {
826        HopscotchKeys { iter: self.iter() }
827    }
828
829    /// Get the underlying hasher.
830    pub fn hasher(&self) -> &S {
831        &self.hasher
832    }
833}
834
835/// Iterator over HopscotchMap entries.
836pub struct HopscotchIter<'a, K: 'static, V: 'static, S> {
837    map: &'a HopscotchMap<K, V, S>,
838    bucket_idx: usize,
839    guard: kovan::Guard,
840}
841
842impl<'a, K, V, S> Iterator for HopscotchIter<'a, K, V, S>
843where
844    K: Clone,
845    V: Clone,
846{
847    type Item = (K, V);
848
849    fn next(&mut self) -> Option<Self::Item> {
850        let table_ptr = self.map.table.load(Ordering::Acquire, &self.guard);
851        let table = unsafe { &*table_ptr.as_raw() };
852
853        while self.bucket_idx < table.buckets.len() {
854            let bucket = table.get_bucket(self.bucket_idx);
855            self.bucket_idx += 1;
856
857            let entry_ptr = bucket.slot.load(Ordering::Acquire, &self.guard);
858            if !entry_ptr.is_null() {
859                let entry = unsafe { &*entry_ptr.as_raw() };
860                return Some((entry.key.clone(), entry.value.clone()));
861            }
862        }
863        None
864    }
865}
866
867/// Iterator over HopscotchMap keys.
868pub struct HopscotchKeys<'a, K: 'static, V: 'static, S> {
869    iter: HopscotchIter<'a, K, V, S>,
870}
871
872impl<'a, K, V, S> Iterator for HopscotchKeys<'a, K, V, S>
873where
874    K: Clone,
875    V: Clone,
876{
877    type Item = K;
878
879    fn next(&mut self) -> Option<Self::Item> {
880        self.iter.next().map(|(k, _)| k)
881    }
882}
883
884impl<'a, K, V, S> IntoIterator for &'a HopscotchMap<K, V, S>
885where
886    K: Hash + Eq + Clone + 'static,
887    V: Clone + 'static,
888    S: BuildHasher,
889{
890    type Item = (K, V);
891    type IntoIter = HopscotchIter<'a, K, V, S>;
892
893    fn into_iter(self) -> Self::IntoIter {
894        self.iter()
895    }
896}
897
898enum InsertResult<V> {
899    Success(Option<V>),
900    Exists(V),
901    NeedResize,
902    Retry,
903}
904
905#[cfg(feature = "std")]
906impl<K, V> Default for HopscotchMap<K, V, FixedState>
907where
908    K: Hash + Eq + Clone + 'static,
909    V: Clone + 'static,
910{
911    fn default() -> Self {
912        Self::new()
913    }
914}
915
916unsafe impl<K: Send, V: Send, S: Send> Send for HopscotchMap<K, V, S> {}
917unsafe impl<K: Sync, V: Sync, S: Sync> Sync for HopscotchMap<K, V, S> {}
918
919impl<K, V, S> Drop for HopscotchMap<K, V, S> {
920    fn drop(&mut self) {
921        // SAFETY: `drop(&mut self)` guarantees exclusive ownership — no concurrent
922        // readers can exist. Free nodes immediately instead of deferring via `retire()`.
923        let guard = pin();
924        let table_ptr = self.table.load(Ordering::Acquire, &guard);
925        let table = unsafe { &*table_ptr.as_raw() };
926
927        for i in 0..(table.capacity + NEIGHBORHOOD_SIZE) {
928            let bucket = table.get_bucket(i);
929            let entry_ptr = bucket.slot.load(Ordering::Acquire, &guard);
930
931            if !entry_ptr.is_null() {
932                unsafe {
933                    drop(Box::from_raw(entry_ptr.as_raw()));
934                }
935            }
936        }
937
938        unsafe {
939            drop(Box::from_raw(table_ptr.as_raw()));
940        }
941    }
942}
943
944#[cfg(test)]
945mod tests {
946    use super::*;
947
948    #[test]
949    fn test_insert_and_get() {
950        let map = HopscotchMap::new();
951        assert_eq!(map.insert(1, 100), None);
952        assert_eq!(map.get(&1), Some(100));
953        assert_eq!(map.get(&2), None);
954    }
955
956    #[test]
957    fn test_growing() {
958        let map = HopscotchMap::with_capacity(32);
959        for i in 0..100 {
960            map.insert(i, i * 2);
961        }
962        for i in 0..100 {
963            assert_eq!(map.get(&i), Some(i * 2));
964        }
965    }
966
967    #[test]
968    fn test_concurrent() {
969        use alloc::sync::Arc;
970        extern crate std;
971        use std::thread;
972
973        let map = Arc::new(HopscotchMap::with_capacity(64));
974        let mut handles = alloc::vec::Vec::new();
975
976        for thread_id in 0..4 {
977            let map_clone = Arc::clone(&map);
978            let handle = thread::spawn(move || {
979                for i in 0..1000 {
980                    let key = thread_id * 1000 + i;
981                    map_clone.insert(key, key * 2);
982                }
983            });
984            handles.push(handle);
985        }
986
987        for handle in handles {
988            handle.join().unwrap();
989        }
990
991        for thread_id in 0..4 {
992            for i in 0..1000 {
993                let key = thread_id * 1000 + i;
994                assert_eq!(map.get(&key), Some(key * 2));
995            }
996        }
997    }
998}