1extern crate alloc;
11
12use alloc::boxed::Box;
13use alloc::vec::Vec;
14use core::borrow::Borrow;
15use core::hash::{BuildHasher, Hash};
16use core::sync::atomic::{AtomicBool, AtomicU32, AtomicUsize, Ordering};
17use foldhash::fast::FixedState;
18use kovan::{Atomic, Shared, pin, retire};
19
20const NEIGHBORHOOD_SIZE: usize = 32;
22
23const INITIAL_CAPACITY: usize = 64;
25
26const GROW_THRESHOLD: f64 = 0.75;
28
29const SHRINK_THRESHOLD: f64 = 0.25;
31
32const MIN_CAPACITY: usize = 64;
34
35const MAX_PROBE_DISTANCE: usize = 512;
37
38struct Bucket<K, V> {
40 hop_info: AtomicU32,
42 slot: Atomic<Entry<K, V>>,
44}
45
46#[derive(Clone)]
48struct Entry<K, V> {
49 hash: u64,
50 key: K,
51 value: V,
52}
53
54struct Table<K, V> {
56 buckets: Box<[Bucket<K, V>]>,
57 capacity: usize,
58 mask: usize,
59}
60
61impl<K, V> Table<K, V> {
62 fn new(capacity: usize) -> Self {
63 let capacity = capacity.next_power_of_two().max(MIN_CAPACITY);
64 let mut buckets = Vec::with_capacity(capacity + NEIGHBORHOOD_SIZE);
67
68 for _ in 0..(capacity + NEIGHBORHOOD_SIZE) {
69 buckets.push(Bucket {
70 hop_info: AtomicU32::new(0),
71 slot: Atomic::null(),
72 });
73 }
74
75 Self {
76 buckets: buckets.into_boxed_slice(),
77 capacity,
78 mask: capacity - 1,
79 }
80 }
81
82 #[inline(always)]
83 fn bucket_index(&self, hash: u64) -> usize {
84 (hash as usize) & self.mask
85 }
86
87 #[inline(always)]
88 fn get_bucket(&self, idx: usize) -> &Bucket<K, V> {
89 unsafe { self.buckets.get_unchecked(idx) }
92 }
93}
94
95pub struct HopscotchMap<K: 'static, V: 'static, S = FixedState> {
97 table: Atomic<Table<K, V>>,
98 count: AtomicUsize,
99 resizing: AtomicBool,
101 hasher: S,
102}
103
104#[cfg(feature = "std")]
105impl<K, V> HopscotchMap<K, V, FixedState>
106where
107 K: Hash + Eq + Clone + 'static,
108 V: Clone + 'static,
109{
110 pub fn new() -> Self {
112 Self::with_hasher(FixedState::default())
113 }
114
115 pub fn with_capacity(capacity: usize) -> Self {
117 Self::with_capacity_and_hasher(capacity, FixedState::default())
118 }
119}
120
121impl<K, V, S> HopscotchMap<K, V, S>
122where
123 K: Hash + Eq + Clone + 'static,
124 V: Clone + 'static,
125 S: BuildHasher,
126{
127 pub fn with_hasher(hasher: S) -> Self {
129 Self::with_capacity_and_hasher(INITIAL_CAPACITY, hasher)
130 }
131
132 pub fn with_capacity_and_hasher(capacity: usize, hasher: S) -> Self {
134 let table = Table::new(capacity);
135 Self {
136 table: Atomic::new(Box::into_raw(Box::new(table))),
137 count: AtomicUsize::new(0),
138 resizing: AtomicBool::new(false),
139 hasher,
140 }
141 }
142
143 pub fn len(&self) -> usize {
145 self.count.load(Ordering::Relaxed)
146 }
147
148 pub fn is_empty(&self) -> bool {
150 self.len() == 0
151 }
152
153 pub fn capacity(&self) -> usize {
155 let guard = pin();
156 let table_ptr = self.table.load(Ordering::Acquire, &guard);
157 unsafe { (*table_ptr.as_raw()).capacity }
158 }
159
160 #[inline]
161 fn wait_for_resize(&self) {
162 while self.resizing.load(Ordering::Acquire) {
163 core::hint::spin_loop();
164 }
165 }
166
167 pub fn get<Q>(&self, key: &Q) -> Option<V>
169 where
170 K: Borrow<Q>,
171 Q: Hash + Eq + ?Sized,
172 {
173 let hash = self.hasher.hash_one(key);
174 let guard = pin();
175 let table_ptr = self.table.load(Ordering::Acquire, &guard);
176 let table = unsafe { &*table_ptr.as_raw() };
177
178 let bucket_idx = table.bucket_index(hash);
179 let bucket = table.get_bucket(bucket_idx);
180
181 let hop_info = bucket.hop_info.load(Ordering::Acquire);
182
183 if hop_info == 0 {
184 return None;
185 }
186
187 for offset in 0..NEIGHBORHOOD_SIZE {
188 if hop_info & (1 << offset) != 0 {
189 let slot_idx = bucket_idx + offset;
190 let slot_bucket = table.get_bucket(slot_idx);
191 let entry_ptr = slot_bucket.slot.load(Ordering::Acquire, &guard);
192
193 if !entry_ptr.is_null() {
194 let entry = unsafe { &*entry_ptr.as_raw() };
195 if entry.hash == hash && entry.key.borrow() == key {
196 return Some(entry.value.clone());
197 }
198 }
199 }
200 }
201
202 None
203 }
204
205 pub fn insert(&self, key: K, value: V) -> Option<V> {
207 self.insert_impl(key, value, false)
208 }
209
210 fn insert_impl(&self, key: K, value: V, only_if_absent: bool) -> Option<V> {
212 let hash = self.hasher.hash_one(&key);
213
214 loop {
215 self.wait_for_resize();
216
217 let guard = pin();
218 let table_ptr = self.table.load(Ordering::Acquire, &guard);
219 let table = unsafe { &*table_ptr.as_raw() };
220
221 if self.resizing.load(Ordering::Acquire) {
222 continue;
223 }
224
225 match self.try_insert(
229 table,
230 hash,
231 key.clone(),
232 value.clone(),
233 only_if_absent,
234 &guard,
235 ) {
236 InsertResult::Success(old_val) => {
237 if self.resizing.load(Ordering::SeqCst)
242 || self.table.load(Ordering::SeqCst, &guard) != table_ptr
243 {
244 continue;
245 }
246
247 if old_val.is_none() {
248 let new_count = self.count.fetch_add(1, Ordering::Relaxed) + 1;
249 let current_capacity = table.capacity;
250 let load_factor = new_count as f64 / current_capacity as f64;
251
252 if load_factor > GROW_THRESHOLD {
253 drop(guard);
254 self.try_resize(current_capacity * 2);
255 }
256 }
257 return old_val;
258 }
259 InsertResult::Exists(existing_val) => {
260 return Some(existing_val);
261 }
262 InsertResult::NeedResize => {
263 let current_capacity = table.capacity;
264 drop(guard);
265 self.try_resize(current_capacity * 2);
266 continue;
267 }
268 InsertResult::Retry => {
269 continue;
270 }
271 }
272 }
273 }
274
275 pub fn get_or_insert(&self, key: K, value: V) -> V {
277 match self.insert_impl(key, value.clone(), true) {
278 Some(existing) => existing,
279 None => value,
280 }
281 }
282
283 pub fn insert_if_absent(&self, key: K, value: V) -> Option<V> {
286 self.insert_impl(key, value, true)
287 }
288
289 pub fn remove<Q>(&self, key: &Q) -> Option<V>
291 where
292 K: Borrow<Q>,
293 Q: Hash + Eq + ?Sized,
294 {
295 let hash = self.hasher.hash_one(key);
296
297 loop {
298 self.wait_for_resize();
299
300 let guard = pin();
301 let table_ptr = self.table.load(Ordering::Acquire, &guard);
302 let table = unsafe { &*table_ptr.as_raw() };
303
304 if self.resizing.load(Ordering::Acquire) {
305 continue;
306 }
307
308 let bucket_idx = table.bucket_index(hash);
309 let bucket = table.get_bucket(bucket_idx);
310
311 let hop_info = bucket.hop_info.load(Ordering::Acquire);
312 if hop_info == 0 {
313 return None;
314 }
315
316 for offset in 0..NEIGHBORHOOD_SIZE {
317 if hop_info & (1 << offset) != 0 {
318 let slot_idx = bucket_idx + offset;
319 let slot_bucket = table.get_bucket(slot_idx);
320 let entry_ptr = slot_bucket.slot.load(Ordering::Acquire, &guard);
321
322 if !entry_ptr.is_null() {
323 let entry = unsafe { &*entry_ptr.as_raw() };
324 if entry.hash == hash && entry.key.borrow() == key {
325 let old_value = entry.value.clone();
326
327 match slot_bucket.slot.compare_exchange(
328 entry_ptr,
329 unsafe { Shared::from_raw(core::ptr::null_mut()) },
330 Ordering::Release,
331 Ordering::Relaxed,
332 &guard,
333 ) {
334 Ok(_) => {
335 let mask = !(1u32 << offset);
336 bucket.hop_info.fetch_and(mask, Ordering::Release);
337
338 retire(entry_ptr.as_raw());
339
340 let new_count = self.count.fetch_sub(1, Ordering::Relaxed) - 1;
341 let current_capacity = table.capacity;
342 let load_factor = new_count as f64 / current_capacity as f64;
343
344 if load_factor < SHRINK_THRESHOLD
345 && current_capacity > MIN_CAPACITY
346 {
347 drop(guard);
348 self.try_resize(current_capacity / 2);
349 }
350
351 return Some(old_value);
352 }
353 Err(_) => {
354 break;
355 }
356 }
357 }
358 }
359 }
360 }
361 return None;
362 }
363 }
364
365 pub fn clear(&self) {
367 while self
368 .resizing
369 .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
370 .is_err()
371 {
372 core::hint::spin_loop();
373 }
374
375 let guard = pin();
376 let table_ptr = self.table.load(Ordering::Acquire, &guard);
377 let table = unsafe { &*table_ptr.as_raw() };
378
379 for i in 0..(table.capacity + NEIGHBORHOOD_SIZE) {
380 let bucket = table.get_bucket(i);
381 let entry_ptr = bucket.slot.load(Ordering::Acquire, &guard);
382
383 if !entry_ptr.is_null()
384 && bucket
385 .slot
386 .compare_exchange(
387 entry_ptr,
388 unsafe { Shared::from_raw(core::ptr::null_mut()) },
389 Ordering::Release,
390 Ordering::Relaxed,
391 &guard,
392 )
393 .is_ok()
394 {
395 retire(entry_ptr.as_raw());
396 }
397
398 if i < table.capacity {
399 let b = table.get_bucket(i);
400 b.hop_info.store(0, Ordering::Release);
401 }
402 }
403
404 self.count.store(0, Ordering::Release);
405 self.resizing.store(false, Ordering::Release);
406 }
407
408 fn try_insert(
409 &self,
410 table: &Table<K, V>,
411 hash: u64,
412 key: K,
413 value: V,
414 only_if_absent: bool,
415 guard: &kovan::Guard,
416 ) -> InsertResult<V> {
417 let bucket_idx = table.bucket_index(hash);
418 let bucket = table.get_bucket(bucket_idx);
419
420 let hop_info = bucket.hop_info.load(Ordering::Acquire);
422 for offset in 0..NEIGHBORHOOD_SIZE {
423 if hop_info & (1 << offset) != 0 {
424 let slot_idx = bucket_idx + offset;
425 let slot_bucket = table.get_bucket(slot_idx);
426 let entry_ptr = slot_bucket.slot.load(Ordering::Acquire, guard);
427
428 if !entry_ptr.is_null() {
429 let entry = unsafe { &*entry_ptr.as_raw() };
430 if entry.hash == hash && entry.key == key {
431 if only_if_absent {
432 return InsertResult::Exists(entry.value.clone());
433 }
434
435 let old_value = entry.value.clone();
436 let new_entry = Box::into_raw(Box::new(Entry {
439 hash,
440 key: key.clone(),
441 value: value.clone(),
442 }));
443
444 match slot_bucket.slot.compare_exchange(
445 entry_ptr,
446 unsafe { Shared::from_raw(new_entry) },
447 Ordering::Release,
448 Ordering::Relaxed,
449 guard,
450 ) {
451 Ok(_) => {
452 retire(entry_ptr.as_raw());
453 return InsertResult::Success(Some(old_value));
454 }
455 Err(_) => {
456 drop(unsafe { Box::from_raw(new_entry) });
457 return InsertResult::Retry;
458 }
459 }
460 }
461 }
462 }
463 }
464
465 for offset in 0..NEIGHBORHOOD_SIZE {
467 let slot_idx = bucket_idx + offset;
468 if slot_idx >= table.capacity + NEIGHBORHOOD_SIZE {
469 return InsertResult::NeedResize;
470 }
471
472 let slot_bucket = table.get_bucket(slot_idx);
473 let entry_ptr = slot_bucket.slot.load(Ordering::Acquire, guard);
474
475 if entry_ptr.is_null() {
476 let new_entry = Box::into_raw(Box::new(Entry {
479 hash,
480 key: key.clone(),
481 value: value.clone(),
482 }));
483
484 match slot_bucket.slot.compare_exchange(
485 unsafe { Shared::from_raw(core::ptr::null_mut()) },
486 unsafe { Shared::from_raw(new_entry) },
487 Ordering::Release,
488 Ordering::Relaxed,
489 guard,
490 ) {
491 Ok(_) => {
492 bucket.hop_info.fetch_or(1u32 << offset, Ordering::Release);
493 return InsertResult::Success(None);
494 }
495 Err(_) => {
496 drop(unsafe { Box::from_raw(new_entry) });
497 continue;
498 }
499 }
500 }
501 }
502
503 match self.try_find_closer_slot(table, bucket_idx, guard) {
505 Some(final_offset) if final_offset < NEIGHBORHOOD_SIZE => {
506 let slot_idx = bucket_idx + final_offset;
507 let slot_bucket = table.get_bucket(slot_idx);
508
509 let curr = slot_bucket.slot.load(Ordering::Relaxed, guard);
510 if !curr.is_null() {
511 return InsertResult::Retry;
512 }
513
514 let new_entry = Box::into_raw(Box::new(Entry { hash, key, value }));
517
518 match slot_bucket.slot.compare_exchange(
519 unsafe { Shared::from_raw(core::ptr::null_mut()) },
520 unsafe { Shared::from_raw(new_entry) },
521 Ordering::Release,
522 Ordering::Relaxed,
523 guard,
524 ) {
525 Ok(_) => {
526 bucket
527 .hop_info
528 .fetch_or(1u32 << final_offset, Ordering::Release);
529 InsertResult::Success(None)
530 }
531 Err(_) => {
532 drop(unsafe { Box::from_raw(new_entry) });
533 InsertResult::Retry
534 }
535 }
536 }
537 _ => InsertResult::NeedResize,
538 }
539 }
540
541 fn try_find_closer_slot(
542 &self,
543 table: &Table<K, V>,
544 bucket_idx: usize,
545 guard: &kovan::Guard,
546 ) -> Option<usize> {
547 for probe_offset in NEIGHBORHOOD_SIZE..MAX_PROBE_DISTANCE {
548 let probe_idx = bucket_idx + probe_offset;
549 if probe_idx >= table.capacity + NEIGHBORHOOD_SIZE {
550 return None;
551 }
552
553 let probe_bucket = table.get_bucket(probe_idx);
554 let entry_ptr = probe_bucket.slot.load(Ordering::Acquire, guard);
555
556 if entry_ptr.is_null() {
557 return self.try_move_closer(table, bucket_idx, probe_idx, guard);
558 }
559 }
560 None
561 }
562
563 fn try_move_closer(
564 &self,
565 table: &Table<K, V>,
566 target_idx: usize,
567 empty_idx: usize,
568 guard: &kovan::Guard,
569 ) -> Option<usize> {
570 let mut current_empty = empty_idx;
571
572 while current_empty > target_idx + NEIGHBORHOOD_SIZE - 1 {
573 let mut moved = false;
574
575 for offset in 1..NEIGHBORHOOD_SIZE.min(current_empty - target_idx) {
576 let candidate_idx = current_empty - offset;
577 let candidate_bucket = table.get_bucket(candidate_idx);
578 let entry_ptr = candidate_bucket.slot.load(Ordering::Acquire, guard);
579
580 if !entry_ptr.is_null() {
581 let entry = unsafe { &*entry_ptr.as_raw() };
582 let entry_home = table.bucket_index(entry.hash);
583
584 if entry_home <= candidate_idx && current_empty < entry_home + NEIGHBORHOOD_SIZE
585 {
586 let new_entry = Box::into_raw(Box::new(entry.clone()));
588 let empty_bucket = table.get_bucket(current_empty);
589
590 match empty_bucket.slot.compare_exchange(
591 unsafe { Shared::from_raw(core::ptr::null_mut()) },
592 unsafe { Shared::from_raw(new_entry) },
593 Ordering::Release,
594 Ordering::Relaxed,
595 guard,
596 ) {
597 Ok(_) => {
598 match candidate_bucket.slot.compare_exchange(
599 entry_ptr,
600 unsafe { Shared::from_raw(core::ptr::null_mut()) },
601 Ordering::Release,
602 Ordering::Relaxed,
603 guard,
604 ) {
605 Ok(_) => {
606 let old_offset = candidate_idx - entry_home;
607 let new_offset = current_empty - entry_home;
608
609 let home_bucket = table.get_bucket(entry_home);
610 home_bucket
611 .hop_info
612 .fetch_and(!(1u32 << old_offset), Ordering::Release);
613 home_bucket
614 .hop_info
615 .fetch_or(1u32 << new_offset, Ordering::Release);
616
617 retire(entry_ptr.as_raw());
618 current_empty = candidate_idx;
619 moved = true;
620 break;
621 }
622 Err(_) => {
623 let _ = empty_bucket.slot.compare_exchange(
624 unsafe { Shared::from_raw(new_entry) },
625 unsafe { Shared::from_raw(core::ptr::null_mut()) },
626 Ordering::Release,
627 Ordering::Relaxed,
628 guard,
629 );
630 unsafe { drop(Box::from_raw(new_entry)) };
631 continue;
632 }
633 }
634 }
635 Err(_) => {
636 unsafe { drop(Box::from_raw(new_entry)) };
637 continue;
638 }
639 }
640 }
641 }
642 }
643
644 if !moved {
645 return None;
646 }
647 }
648
649 if current_empty >= target_idx && current_empty < target_idx + NEIGHBORHOOD_SIZE {
650 Some(current_empty - target_idx)
651 } else {
652 None
653 }
654 }
655
656 fn insert_into_new_table(
657 &self,
658 table: &Table<K, V>,
659 hash: u64,
660 key: K,
661 value: V,
662 guard: &kovan::Guard,
663 ) -> bool {
664 let bucket_idx = table.bucket_index(hash);
665
666 for probe_offset in 0..(table.capacity + NEIGHBORHOOD_SIZE) {
667 let probe_idx = bucket_idx + probe_offset;
668 if probe_idx >= table.capacity + NEIGHBORHOOD_SIZE {
669 break;
670 }
671
672 let probe_bucket = table.get_bucket(probe_idx);
673 let slot_ptr = probe_bucket.slot.load(Ordering::Relaxed, guard);
674
675 if slot_ptr.is_null() {
676 let offset_from_home = probe_idx - bucket_idx;
677
678 if offset_from_home < NEIGHBORHOOD_SIZE {
679 let new_entry = Box::into_raw(Box::new(Entry { hash, key, value }));
680 probe_bucket
681 .slot
682 .store(unsafe { Shared::from_raw(new_entry) }, Ordering::Release);
683
684 let bucket = table.get_bucket(bucket_idx);
685 let mut hop = bucket.hop_info.load(Ordering::Relaxed);
686 hop |= 1u32 << offset_from_home;
687 bucket.hop_info.store(hop, Ordering::Relaxed);
688 return true;
689 } else {
690 return false;
691 }
692 }
693 }
694 false
695 }
696
697 fn try_resize(&self, new_capacity: usize) {
698 if self
699 .resizing
700 .compare_exchange(false, true, Ordering::SeqCst, Ordering::Relaxed)
701 .is_err()
702 {
703 return;
704 }
705
706 let new_capacity = new_capacity.next_power_of_two().max(MIN_CAPACITY);
707 let guard = pin();
708 let old_table_ptr = self.table.load(Ordering::Acquire, &guard);
709 let old_table = unsafe { &*old_table_ptr.as_raw() };
710
711 if old_table.capacity == new_capacity {
712 self.resizing.store(false, Ordering::Release);
713 return;
714 }
715
716 let new_table = Box::into_raw(Box::new(Table::new(new_capacity)));
717 let new_table_ref = unsafe { &*new_table };
718
719 let mut success = true;
720
721 for i in 0..(old_table.capacity + NEIGHBORHOOD_SIZE) {
722 let bucket = old_table.get_bucket(i);
723 let entry_ptr = bucket.slot.load(Ordering::Acquire, &guard);
724
725 if !entry_ptr.is_null() {
726 let entry = unsafe { &*entry_ptr.as_raw() };
727 if !self.insert_into_new_table(
728 new_table_ref,
729 entry.hash,
730 entry.key.clone(),
731 entry.value.clone(),
732 &guard,
733 ) {
734 success = false;
735 break;
736 }
737 }
738 }
739
740 if success {
741 match self.table.compare_exchange(
742 old_table_ptr,
743 unsafe { Shared::from_raw(new_table) },
744 Ordering::Release,
745 Ordering::Relaxed,
746 &guard,
747 ) {
748 Ok(_) => {
749 retire(old_table_ptr.as_raw());
750 }
751 Err(_) => {
752 success = false;
753 }
754 }
755 }
756
757 if !success {
758 for i in 0..(new_table_ref.capacity + NEIGHBORHOOD_SIZE) {
759 let bucket = new_table_ref.get_bucket(i);
760 let entry_ptr = bucket.slot.load(Ordering::Relaxed, &guard);
761 if !entry_ptr.is_null() {
762 unsafe {
763 drop(Box::from_raw(entry_ptr.as_raw()));
764 }
765 }
766 }
767 unsafe {
768 drop(Box::from_raw(new_table));
769 }
770 }
771
772 self.resizing.store(false, Ordering::Release);
773 }
774 pub fn iter(&self) -> HopscotchIter<'_, K, V, S> {
776 let guard = pin();
777 let table_ptr = self.table.load(Ordering::Acquire, &guard);
778 let _table = unsafe { &*table_ptr.as_raw() };
779 HopscotchIter {
780 map: self,
781 bucket_idx: 0,
782 guard,
783 }
784 }
785
786 pub fn keys(&self) -> HopscotchKeys<'_, K, V, S> {
788 HopscotchKeys { iter: self.iter() }
789 }
790
791 pub fn hasher(&self) -> &S {
793 &self.hasher
794 }
795}
796
797pub struct HopscotchIter<'a, K: 'static, V: 'static, S> {
799 map: &'a HopscotchMap<K, V, S>,
800 bucket_idx: usize,
801 guard: kovan::Guard,
802}
803
804impl<'a, K, V, S> Iterator for HopscotchIter<'a, K, V, S>
805where
806 K: Clone,
807 V: Clone,
808{
809 type Item = (K, V);
810
811 fn next(&mut self) -> Option<Self::Item> {
812 let table_ptr = self.map.table.load(Ordering::Acquire, &self.guard);
813 let table = unsafe { &*table_ptr.as_raw() };
814
815 while self.bucket_idx < table.buckets.len() {
816 let bucket = table.get_bucket(self.bucket_idx);
817 self.bucket_idx += 1;
818
819 let entry_ptr = bucket.slot.load(Ordering::Acquire, &self.guard);
820 if !entry_ptr.is_null() {
821 let entry = unsafe { &*entry_ptr.as_raw() };
822 return Some((entry.key.clone(), entry.value.clone()));
823 }
824 }
825 None
826 }
827}
828
829pub struct HopscotchKeys<'a, K: 'static, V: 'static, S> {
831 iter: HopscotchIter<'a, K, V, S>,
832}
833
834impl<'a, K, V, S> Iterator for HopscotchKeys<'a, K, V, S>
835where
836 K: Clone,
837 V: Clone,
838{
839 type Item = K;
840
841 fn next(&mut self) -> Option<Self::Item> {
842 self.iter.next().map(|(k, _)| k)
843 }
844}
845
846impl<'a, K, V, S> IntoIterator for &'a HopscotchMap<K, V, S>
847where
848 K: Hash + Eq + Clone + 'static,
849 V: Clone + 'static,
850 S: BuildHasher,
851{
852 type Item = (K, V);
853 type IntoIter = HopscotchIter<'a, K, V, S>;
854
855 fn into_iter(self) -> Self::IntoIter {
856 self.iter()
857 }
858}
859
860enum InsertResult<V> {
861 Success(Option<V>),
862 Exists(V),
863 NeedResize,
864 Retry,
865}
866
867#[cfg(feature = "std")]
868impl<K, V> Default for HopscotchMap<K, V, FixedState>
869where
870 K: Hash + Eq + Clone + 'static,
871 V: Clone + 'static,
872{
873 fn default() -> Self {
874 Self::new()
875 }
876}
877
878unsafe impl<K: Send, V: Send, S: Send> Send for HopscotchMap<K, V, S> {}
879unsafe impl<K: Sync, V: Sync, S: Sync> Sync for HopscotchMap<K, V, S> {}
880
881impl<K, V, S> Drop for HopscotchMap<K, V, S> {
882 fn drop(&mut self) {
883 let guard = pin();
884 let table_ptr = self.table.load(Ordering::Acquire, &guard);
885 let table = unsafe { &*table_ptr.as_raw() };
886
887 for i in 0..(table.capacity + NEIGHBORHOOD_SIZE) {
888 let bucket = table.get_bucket(i);
889 let entry_ptr = bucket.slot.load(Ordering::Acquire, &guard);
890
891 if !entry_ptr.is_null() {
892 retire(entry_ptr.as_raw());
893 }
894 }
895
896 retire(table_ptr.as_raw());
897 }
898}
899
900#[cfg(test)]
901mod tests {
902 use super::*;
903
904 #[test]
905 fn test_insert_and_get() {
906 let map = HopscotchMap::new();
907 assert_eq!(map.insert(1, 100), None);
908 assert_eq!(map.get(&1), Some(100));
909 assert_eq!(map.get(&2), None);
910 }
911
912 #[test]
913 fn test_growing() {
914 let map = HopscotchMap::with_capacity(32);
915 for i in 0..100 {
916 map.insert(i, i * 2);
917 }
918 for i in 0..100 {
919 assert_eq!(map.get(&i), Some(i * 2));
920 }
921 }
922
923 #[test]
924 fn test_concurrent() {
925 use alloc::sync::Arc;
926 extern crate std;
927 use std::thread;
928
929 let map = Arc::new(HopscotchMap::with_capacity(64));
930 let mut handles = alloc::vec::Vec::new();
931
932 for thread_id in 0..4 {
933 let map_clone = Arc::clone(&map);
934 let handle = thread::spawn(move || {
935 for i in 0..1000 {
936 let key = thread_id * 1000 + i;
937 map_clone.insert(key, key * 2);
938 }
939 });
940 handles.push(handle);
941 }
942
943 for handle in handles {
944 handle.join().unwrap();
945 }
946
947 for thread_id in 0..4 {
948 for i in 0..1000 {
949 let key = thread_id * 1000 + i;
950 assert_eq!(map.get(&key), Some(key * 2));
951 }
952 }
953 }
954}