scc/hash_index.rs
1//! [`HashIndex`] is a read-optimized asynchronous/concurrent hash map.
2
3use std::collections::hash_map::RandomState;
4use std::fmt::{self, Debug};
5use std::hash::{BuildHasher, Hash};
6use std::iter::FusedIterator;
7use std::ops::{Deref, RangeInclusive};
8use std::panic::UnwindSafe;
9use std::ptr;
10use std::sync::atomic::AtomicU8;
11#[cfg(not(feature = "loom"))]
12use std::sync::atomic::AtomicUsize;
13use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release};
14
15#[cfg(feature = "loom")]
16use loom::sync::atomic::AtomicUsize;
17use sdd::{AtomicRaw, Epoch, Owned, RawPtr};
18
19use super::hash_table::bucket::{Bucket, EntryPtr, INDEX};
20use super::hash_table::bucket_array::BucketArray;
21use super::hash_table::{HashTable, LockedBucket};
22use super::utils::{deref_unchecked, fake_ref, get_owned};
23use super::{Equivalent, Guard};
24
25/// Scalable asynchronous/concurrent hash index.
26///
27/// [`HashIndex`] is an asynchronous/concurrent hash map data structure optimized for parallel read
28/// operations. The key characteristics of [`HashIndex`] are similar to those of
29/// [`HashMap`](super::HashMap) except its read operations are lock-free.
30///
31/// ## The key differences between [`HashIndex`] and [`HashMap`](crate::HashMap).
32///
33/// * Lock-free read: read and scan operations are never blocked and do not modify shared data.
34/// * Immutability: the data in the container is immutable until it becomes unreachable.
35/// * Linearizability: linearizability of read operations relies on the CPU architecture.
36///
37/// ## The key statistics for [`HashIndex`]
38///
39/// * The expected size of metadata for a single key-value pair: 2 bytes.
40/// * The expected number of atomic write operations required for an operation on a single key: 2.
41/// * The expected number of atomic variables accessed during a single key operation: 2.
42/// * The number of entries managed by a single bucket without a linked list: 32.
43/// * The expected maximum linked list length when a resize is triggered: log(capacity) / 8.
44///
45/// ## Unwind safety
46///
47/// [`HashIndex`] is impervious to out-of-memory errors and panics in user-specified code under one
48/// condition: `H::Hasher::hash`, `K::drop` and `V::drop` must not panic.
49pub struct HashIndex<K, V, H = RandomState>
50where
51 H: BuildHasher,
52{
53 bucket_array: AtomicRaw<BucketArray<K, V, (), INDEX>>,
54 build_hasher: H,
55 garbage_chain: AtomicRaw<BucketArray<K, V, (), INDEX>>,
56 garbage_epoch: AtomicU8,
57 minimum_capacity: AtomicUsize,
58}
59
60/// [`Entry`] represents a single entry in a [`HashIndex`].
61pub enum Entry<'h, K, V, H = RandomState>
62where
63 H: BuildHasher,
64{
65 /// An occupied entry.
66 Occupied(OccupiedEntry<'h, K, V, H>),
67 /// A vacant entry.
68 Vacant(VacantEntry<'h, K, V, H>),
69}
70
71/// [`OccupiedEntry`] is a view into an occupied entry in a [`HashIndex`].
72pub struct OccupiedEntry<'h, K, V, H = RandomState>
73where
74 H: BuildHasher,
75{
76 hashindex: &'h HashIndex<K, V, H>,
77 locked_bucket: LockedBucket<K, V, (), INDEX>,
78 entry_ptr: EntryPtr<K, V, INDEX>,
79}
80
81/// [`VacantEntry`] is a view into a vacant entry in a [`HashIndex`].
82pub struct VacantEntry<'h, K, V, H = RandomState>
83where
84 H: BuildHasher,
85{
86 hashindex: &'h HashIndex<K, V, H>,
87 key: K,
88 hash: u64,
89 locked_bucket: LockedBucket<K, V, (), INDEX>,
90}
91
92/// [`Reserve`] keeps the capacity of the associated [`HashIndex`] higher than a certain level.
93///
94/// The [`HashIndex`] does not shrink the capacity below the reserved capacity.
95pub struct Reserve<'h, K, V, H = RandomState>
96where
97 K: Eq + Hash,
98 H: BuildHasher,
99{
100 hashindex: &'h HashIndex<K, V, H>,
101 additional: usize,
102}
103
104/// An iterator over the entries of a [`HashIndex`].
105///
106/// An [`Iter`] iterates over all the entries that survive the [`Iter`].
107pub struct Iter<'h, K, V, H = RandomState>
108where
109 H: BuildHasher,
110{
111 hashindex: &'h HashIndex<K, V, H>,
112 bucket_array: Option<&'h BucketArray<K, V, (), INDEX>>,
113 index: usize,
114 bucket: Option<&'h Bucket<K, V, (), INDEX>>,
115 entry_ptr: EntryPtr<K, V, INDEX>,
116 guard: &'h Guard,
117}
118
119impl<K, V, H> HashIndex<K, V, H>
120where
121 H: BuildHasher,
122{
123 /// Creates an empty [`HashIndex`] with the given [`BuildHasher`].
124 ///
125 /// # Examples
126 ///
127 /// ```
128 /// use scc::HashIndex;
129 /// use std::collections::hash_map::RandomState;
130 ///
131 /// let hashindex: HashIndex<u64, u32, RandomState> =
132 /// HashIndex::with_hasher(RandomState::new());
133 /// ```
134 #[cfg(not(feature = "loom"))]
135 #[inline]
136 pub const fn with_hasher(build_hasher: H) -> Self {
137 Self {
138 bucket_array: AtomicRaw::null(),
139 build_hasher,
140 garbage_chain: AtomicRaw::null(),
141 garbage_epoch: AtomicU8::new(u8::MAX),
142 minimum_capacity: AtomicUsize::new(0),
143 }
144 }
145
146 /// Creates an empty [`HashIndex`] with the given [`BuildHasher`].
147 #[cfg(feature = "loom")]
148 #[inline]
149 pub fn with_hasher(build_hasher: H) -> Self {
150 Self {
151 bucket_array: AtomicRaw::null(),
152 build_hasher,
153 garbage_chain: AtomicRaw::null(),
154 garbage_epoch: AtomicU8::new(u8::MAX),
155 minimum_capacity: AtomicUsize::new(0),
156 }
157 }
158
159 /// Creates an empty [`HashIndex`] with the specified capacity and [`BuildHasher`].
160 ///
161 /// The actual capacity is equal to or greater than `capacity` unless it is greater than
162 /// `1 << (usize::BITS - 1)`.
163 ///
164 /// # Examples
165 ///
166 /// ```
167 /// use scc::HashIndex;
168 /// use std::collections::hash_map::RandomState;
169 ///
170 /// let hashindex: HashIndex<u64, u32, RandomState> =
171 /// HashIndex::with_capacity_and_hasher(1000, RandomState::new());
172 ///
173 /// let result = hashindex.capacity();
174 /// assert_eq!(result, 1024);
175 /// ```
176 #[inline]
177 pub fn with_capacity_and_hasher(capacity: usize, build_hasher: H) -> Self {
178 let (array, minimum_capacity) = if capacity == 0 {
179 (AtomicRaw::null(), AtomicUsize::new(0))
180 } else {
181 let bucket_array = unsafe {
182 Owned::new_with_unchecked(|| {
183 BucketArray::<K, V, (), INDEX>::new(capacity, AtomicRaw::null())
184 })
185 };
186 let minimum_capacity = bucket_array.num_slots();
187 (
188 AtomicRaw::new(bucket_array.into_raw()),
189 AtomicUsize::new(minimum_capacity),
190 )
191 };
192 Self {
193 bucket_array: array,
194 build_hasher,
195 garbage_chain: AtomicRaw::null(),
196 garbage_epoch: AtomicU8::new(u8::MAX),
197 minimum_capacity,
198 }
199 }
200
201 /// Deallocates garbage bucket arrays if they are unreachable.
202 fn dealloc_garbage(&self, force_collect: bool) {
203 let guard = Guard::new();
204 let head_ptr = self.garbage_chain.load(Acquire, &guard);
205 if head_ptr.is_null() {
206 return;
207 }
208 if force_collect
209 || Epoch::try_from(self.garbage_epoch.load(Acquire))
210 .is_ok_and(|e| !e.in_same_generation(guard.epoch()))
211 {
212 if self
213 .garbage_chain
214 .compare_exchange(head_ptr, RawPtr::null(), Acquire, Relaxed, &guard)
215 .is_ok()
216 {
217 let mut ptr = head_ptr;
218 while let Some(garbage_bucket_array) = get_owned(ptr) {
219 ptr = garbage_bucket_array.linked_array_var().swap(
220 RawPtr::null(),
221 Acquire,
222 &guard,
223 );
224 unsafe { garbage_bucket_array.drop_in_place() };
225 }
226 }
227 } else {
228 guard.set_has_garbage();
229 }
230 }
231}
232
233impl<K, V, H> HashIndex<K, V, H>
234where
235 K: Eq + Hash,
236 H: BuildHasher,
237{
238 /// Temporarily increases the minimum capacity of the [`HashIndex`].
239 ///
240 /// A [`Reserve`] is returned if the [`HashIndex`] could increase the minimum capacity while
241 /// the increased capacity is not exclusively owned by the returned [`Reserve`], allowing
242 /// others to benefit from it. The memory for the additional space may not be immediately
243 /// allocated if the [`HashIndex`] is empty or currently being resized, however once the memory
244 /// is reserved eventually, the capacity will not shrink below the additional capacity until
245 /// the returned [`Reserve`] is dropped.
246 ///
247 /// # Errors
248 ///
249 /// Returns `None` if a too large number is given.
250 ///
251 /// # Examples
252 ///
253 /// ```
254 /// use scc::HashIndex;
255 ///
256 /// let hashindex: HashIndex<usize, usize> = HashIndex::with_capacity(1000);
257 /// assert_eq!(hashindex.capacity(), 1024);
258 ///
259 /// let reserved = hashindex.reserve(10000);
260 /// assert!(reserved.is_some());
261 /// assert_eq!(hashindex.capacity(), 16384);
262 ///
263 /// assert!(hashindex.reserve(usize::MAX).is_none());
264 /// assert_eq!(hashindex.capacity(), 16384);
265 ///
266 /// for i in 0..16 {
267 /// assert!(hashindex.insert_sync(i, i).is_ok());
268 /// }
269 /// drop(reserved);
270 ///
271 /// assert_eq!(hashindex.capacity(), 1024);
272 /// ```
273 #[inline]
274 pub fn reserve(&self, additional_capacity: usize) -> Option<Reserve<'_, K, V, H>> {
275 let additional = self.reserve_capacity(additional_capacity);
276 if additional == 0 {
277 None
278 } else {
279 Some(Reserve {
280 hashindex: self,
281 additional,
282 })
283 }
284 }
285
286 /// Gets the entry associated with the given key in the map for in-place manipulation.
287 ///
288 /// # Examples
289 ///
290 /// ```
291 /// use scc::HashIndex;
292 ///
293 /// let hashindex: HashIndex<char, u32> = HashIndex::default();
294 ///
295 /// let future_entry = hashindex.entry_async('b');
296 /// ```
297 #[inline]
298 pub async fn entry_async(&self, key: K) -> Entry<'_, K, V, H> {
299 self.reclaim_memory();
300 let hash = self.hash(&key);
301 let locked_bucket = self.writer_async(hash).await;
302 let entry_ptr = locked_bucket.search(&key, hash);
303 if entry_ptr.is_valid() {
304 Entry::Occupied(OccupiedEntry {
305 hashindex: self,
306 locked_bucket,
307 entry_ptr,
308 })
309 } else {
310 let vacant_entry = VacantEntry {
311 hashindex: self,
312 key,
313 hash,
314 locked_bucket,
315 };
316 Entry::Vacant(vacant_entry)
317 }
318 }
319
320 /// Gets the entry associated with the given key in the map for in-place manipulation.
321 ///
322 /// # Examples
323 ///
324 /// ```
325 /// use scc::HashIndex;
326 ///
327 /// let hashindex: HashIndex<char, u32> = HashIndex::default();
328 ///
329 /// for ch in "a short treatise on fungi".chars() {
330 /// unsafe {
331 /// hashindex.entry_sync(ch).and_modify(|counter| *counter += 1).or_insert(1);
332 /// }
333 /// }
334 ///
335 /// assert_eq!(hashindex.peek_with(&'s', |_, v| *v), Some(2));
336 /// assert_eq!(hashindex.peek_with(&'t', |_, v| *v), Some(3));
337 /// assert!(hashindex.peek_with(&'y', |_, v| *v).is_none());
338 /// ```
339 #[inline]
340 pub fn entry_sync(&self, key: K) -> Entry<'_, K, V, H> {
341 let hash = self.hash(&key);
342 let locked_bucket = self.writer_sync(hash);
343 let entry_ptr = locked_bucket.search(&key, hash);
344 if entry_ptr.is_valid() {
345 Entry::Occupied(OccupiedEntry {
346 hashindex: self,
347 locked_bucket,
348 entry_ptr,
349 })
350 } else {
351 let vacant_entry = VacantEntry {
352 hashindex: self,
353 key,
354 hash,
355 locked_bucket,
356 };
357 Entry::Vacant(vacant_entry)
358 }
359 }
360
361 /// Tries to get the entry associated with the given key in the map for in-place manipulation.
362 ///
363 /// Returns `None` if the entry could not be locked.
364 ///
365 /// # Examples
366 ///
367 /// ```
368 /// use scc::HashIndex;
369 ///
370 /// let hashindex: HashIndex<usize, usize> = HashIndex::default();
371 ///
372 /// assert!(hashindex.insert_sync(0, 1).is_ok());
373 /// assert!(hashindex.try_entry(0).is_some());
374 /// ```
375 #[inline]
376 pub fn try_entry(&self, key: K) -> Option<Entry<'_, K, V, H>> {
377 self.reclaim_memory();
378 let hash = self.hash(&key);
379 let guard = Guard::new();
380 let locked_bucket = self.try_reserve_bucket(hash, &guard)?;
381 let entry_ptr = locked_bucket.search(&key, hash);
382 if entry_ptr.is_valid() {
383 Some(Entry::Occupied(OccupiedEntry {
384 hashindex: self,
385 locked_bucket,
386 entry_ptr,
387 }))
388 } else {
389 Some(Entry::Vacant(VacantEntry {
390 hashindex: self,
391 key,
392 hash,
393 locked_bucket,
394 }))
395 }
396 }
397
398 /// Begins iterating over entries by getting the first occupied entry.
399 ///
400 /// The returned [`OccupiedEntry`] in combination with [`OccupiedEntry::next_async`] can act as
401 /// a mutable iterator over entries.
402 ///
403 /// # Examples
404 ///
405 /// ```
406 /// use scc::HashIndex;
407 ///
408 /// let hashindex: HashIndex<char, u32> = HashIndex::default();
409 ///
410 /// let future_entry = hashindex.begin_async();
411 /// ```
412 #[inline]
413 pub async fn begin_async(&self) -> Option<OccupiedEntry<'_, K, V, H>> {
414 self.any_async(|_, _| true).await
415 }
416
417 /// Begins iterating over entries by getting the first occupied entry.
418 ///
419 /// The returned [`OccupiedEntry`] in combination with [`OccupiedEntry::next_sync`] can act as a
420 /// mutable iterator over entries.
421 ///
422 /// # Examples
423 ///
424 /// ```
425 /// use scc::HashIndex;
426 ///
427 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
428 ///
429 /// assert!(hashindex.insert_sync(1, 0).is_ok());
430 ///
431 /// let mut first_entry = hashindex.begin_sync().unwrap();
432 /// unsafe {
433 /// *first_entry.get_mut() = 2;
434 /// }
435 ///
436 /// assert!(first_entry.next_sync().is_none());
437 /// assert_eq!(hashindex.peek_with(&1, |_, v| *v), Some(2));
438 /// ```
439 #[inline]
440 pub fn begin_sync(&self) -> Option<OccupiedEntry<'_, K, V, H>> {
441 self.any_sync(|_, _| true)
442 }
443
444 /// Finds any entry satisfying the supplied predicate for in-place manipulation.
445 ///
446 /// # Examples
447 ///
448 /// ```
449 /// use scc::HashIndex;
450 ///
451 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
452 ///
453 /// let future_entry = hashindex.any_async(|k, _| *k == 2);
454 /// ```
455 #[inline]
456 pub async fn any_async<P: FnMut(&K, &V) -> bool>(
457 &self,
458 mut pred: P,
459 ) -> Option<OccupiedEntry<'_, K, V, H>> {
460 self.reclaim_memory();
461 let mut entry = None;
462 self.for_each_writer_async(0, 0, |locked_bucket, _| {
463 let mut entry_ptr = EntryPtr::null();
464 while entry_ptr.find_next(&locked_bucket.writer) {
465 let (k, v) = locked_bucket.entry(&entry_ptr);
466 if pred(k, v) {
467 entry = Some(OccupiedEntry {
468 hashindex: self,
469 locked_bucket,
470 entry_ptr,
471 });
472 return false;
473 }
474 }
475 true
476 })
477 .await;
478 entry
479 }
480
481 /// Finds any entry satisfying the supplied predicate for in-place manipulation.
482 ///
483 /// # Examples
484 ///
485 /// ```
486 /// use scc::HashIndex;
487 ///
488 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
489 ///
490 /// assert!(hashindex.insert_sync(1, 0).is_ok());
491 /// assert!(hashindex.insert_sync(2, 3).is_ok());
492 ///
493 /// let mut entry = hashindex.any_sync(|k, _| *k == 2).unwrap();
494 /// assert_eq!(*entry.get(), 3);
495 /// ```
496 #[inline]
497 pub fn any_sync<P: FnMut(&K, &V) -> bool>(
498 &self,
499 mut pred: P,
500 ) -> Option<OccupiedEntry<'_, K, V, H>> {
501 self.reclaim_memory();
502 let mut entry = None;
503 let guard = Guard::new();
504 self.for_each_writer_sync(0, 0, &guard, |locked_bucket, _| {
505 let mut entry_ptr = EntryPtr::null();
506 while entry_ptr.find_next(&locked_bucket.writer) {
507 let (k, v) = locked_bucket.entry(&entry_ptr);
508 if pred(k, v) {
509 entry = Some(OccupiedEntry {
510 hashindex: self,
511 locked_bucket,
512 entry_ptr,
513 });
514 return false;
515 }
516 }
517 true
518 });
519 entry
520 }
521
522 /// Inserts a key-value pair into the [`HashIndex`].
523 ///
524 /// # Note
525 ///
526 /// If the key exists, the value is *not* updated.
527 ///
528 /// # Errors
529 ///
530 /// Returns an error containing the supplied key-value pair if the key exists.
531 ///
532 /// # Examples
533 ///
534 /// ```
535 /// use scc::HashIndex;
536 ///
537 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
538 /// let future_insert = hashindex.insert_async(11, 17);
539 /// ```
540 #[inline]
541 pub async fn insert_async(&self, key: K, val: V) -> Result<(), (K, V)> {
542 self.reclaim_memory();
543 let hash = self.hash(&key);
544 let locked_bucket = self.writer_async(hash).await;
545 if locked_bucket.search(&key, hash).is_valid() {
546 Err((key, val))
547 } else {
548 locked_bucket.insert(hash, (key, val));
549 Ok(())
550 }
551 }
552
553 /// Inserts a key-value pair into the [`HashIndex`].
554 ///
555 /// # Note
556 ///
557 /// If the key exists, the value is *not* updated.
558 ///
559 /// # Errors
560 ///
561 /// Returns an error along with the supplied key-value pair if the key exists.
562 ///
563 /// # Examples
564 ///
565 /// ```
566 /// use scc::HashIndex;
567 ///
568 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
569 ///
570 /// assert!(hashindex.insert_sync(1, 0).is_ok());
571 /// assert_eq!(hashindex.insert_sync(1, 1).unwrap_err(), (1, 1));
572 /// ```
573 #[inline]
574 pub fn insert_sync(&self, key: K, val: V) -> Result<(), (K, V)> {
575 self.reclaim_memory();
576 let hash = self.hash(&key);
577 let locked_bucket = self.writer_sync(hash);
578 if locked_bucket.search(&key, hash).is_valid() {
579 Err((key, val))
580 } else {
581 locked_bucket.insert(hash, (key, val));
582 Ok(())
583 }
584 }
585
586 /// Removes a key-value pair if the key exists.
587 ///
588 /// Returns `false` if the key does not exist.
589 ///
590 /// Returns `true` if the key existed and the condition was met after marking the entry
591 /// unreachable; the memory will be reclaimed later.
592 ///
593 /// # Examples
594 ///
595 /// ```
596 /// use scc::HashIndex;
597 ///
598 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
599 /// let future_insert = hashindex.insert_async(11, 17);
600 /// let future_remove = hashindex.remove_async(&11);
601 /// ```
602 #[inline]
603 pub async fn remove_async<Q>(&self, key: &Q) -> bool
604 where
605 Q: Equivalent<K> + Hash + ?Sized,
606 {
607 self.remove_if_async(key, |_| true).await
608 }
609
610 /// Removes a key-value pair if the key exists.
611 ///
612 /// Returns `false` if the key does not exist.
613 ///
614 /// Returns `true` if the key existed and the condition was met after marking the entry
615 /// unreachable; the memory will be reclaimed later.
616 ///
617 /// # Examples
618 ///
619 /// ```
620 /// use scc::HashIndex;
621 ///
622 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
623 ///
624 /// assert!(!hashindex.remove_sync(&1));
625 /// assert!(hashindex.insert_sync(1, 0).is_ok());
626 /// assert!(hashindex.remove_sync(&1));
627 /// ```
628 #[inline]
629 pub fn remove_sync<Q>(&self, key: &Q) -> bool
630 where
631 Q: Equivalent<K> + Hash + ?Sized,
632 {
633 self.remove_if_sync(key, |_| true)
634 }
635
636 /// Removes a key-value pair if the key exists and the given condition is met.
637 ///
638 /// Returns `false` if the key does not exist or the condition was not met.
639 ///
640 /// Returns `true` if the key existed and the condition was met after marking the entry
641 /// unreachable; the memory will be reclaimed later.
642 ///
643 /// # Examples
644 ///
645 /// ```
646 /// use scc::HashIndex;
647 ///
648 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
649 /// let future_insert = hashindex.insert_async(11, 17);
650 /// let future_remove = hashindex.remove_if_async(&11, |_| true);
651 /// ```
652 #[inline]
653 pub async fn remove_if_async<Q, F: FnOnce(&V) -> bool>(&self, key: &Q, condition: F) -> bool
654 where
655 Q: Equivalent<K> + Hash + ?Sized,
656 {
657 self.reclaim_memory();
658 let hash = self.hash(key);
659 let Some(mut locked_bucket) = self.optional_writer_async(hash).await else {
660 return false;
661 };
662 let mut entry_ptr = locked_bucket.search(key, hash);
663 if entry_ptr.is_valid() && condition(locked_bucket.entry_mut(&mut entry_ptr).1) {
664 locked_bucket.mark_removed(self, &mut entry_ptr);
665 true
666 } else {
667 false
668 }
669 }
670
671 /// Removes a key-value pair if the key exists and the given condition is met.
672 ///
673 /// Returns `false` if the key does not exist or the condition was not met.
674 ///
675 /// Returns `true` if the key existed and the condition was met after marking the entry
676 /// unreachable; the memory will be reclaimed later.
677 ///
678 /// # Examples
679 ///
680 /// ```
681 /// use scc::HashIndex;
682 ///
683 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
684 ///
685 /// assert!(hashindex.insert_sync(1, 0).is_ok());
686 /// assert!(!hashindex.remove_if_sync(&1, |v| *v == 1));
687 /// assert!(hashindex.remove_if_sync(&1, |v| *v == 0));
688 /// ```
689 #[inline]
690 pub fn remove_if_sync<Q, F: FnOnce(&V) -> bool>(&self, key: &Q, condition: F) -> bool
691 where
692 Q: Equivalent<K> + Hash + ?Sized,
693 {
694 self.reclaim_memory();
695 let hash = self.hash(key);
696 let Some(mut locked_bucket) = self.optional_writer_sync(hash) else {
697 return false;
698 };
699 let mut entry_ptr = locked_bucket.search(key, hash);
700 if entry_ptr.is_valid() && condition(locked_bucket.entry_mut(&mut entry_ptr).1) {
701 locked_bucket.mark_removed(self, &mut entry_ptr);
702 true
703 } else {
704 false
705 }
706 }
707
708 /// Gets an [`OccupiedEntry`] corresponding to the key for in-place modification.
709 ///
710 /// [`OccupiedEntry`] exclusively owns the entry, preventing others from gaining access to it:
711 /// use [`peek`](Self::peek) if read-only access is sufficient.
712 ///
713 /// Returns `None` if the key does not exist.
714 ///
715 /// # Examples
716 ///
717 /// ```
718 /// use scc::HashIndex;
719 ///
720 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
721 /// let future_insert = hashindex.insert_async(11, 17);
722 /// let future_get = hashindex.get_async(&11);
723 /// ```
724 #[inline]
725 pub async fn get_async<Q>(&self, key: &Q) -> Option<OccupiedEntry<'_, K, V, H>>
726 where
727 Q: Equivalent<K> + Hash + ?Sized,
728 {
729 self.reclaim_memory();
730 let hash = self.hash(key);
731 let locked_bucket = self.optional_writer_async(hash).await?;
732 let entry_ptr = locked_bucket.search(key, hash);
733 if entry_ptr.is_valid() {
734 return Some(OccupiedEntry {
735 hashindex: self,
736 locked_bucket,
737 entry_ptr,
738 });
739 }
740 None
741 }
742
743 /// Gets an [`OccupiedEntry`] corresponding to the key for in-place modification.
744 ///
745 /// [`OccupiedEntry`] exclusively owns the entry, preventing others from gaining access to it:
746 /// use [`peek`](Self::peek) if read-only access is sufficient.
747 ///
748 /// Returns `None` if the key does not exist.
749 ///
750 /// # Examples
751 ///
752 /// ```
753 /// use scc::HashIndex;
754 ///
755 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
756 ///
757 /// assert!(hashindex.get_sync(&1).is_none());
758 /// assert!(hashindex.insert_sync(1, 10).is_ok());
759 /// assert_eq!(*hashindex.get_sync(&1).unwrap().get(), 10);
760 /// assert_eq!(*hashindex.get_sync(&1).unwrap(), 10);
761 /// ```
762 #[inline]
763 pub fn get_sync<Q>(&self, key: &Q) -> Option<OccupiedEntry<'_, K, V, H>>
764 where
765 Q: Equivalent<K> + Hash + ?Sized,
766 {
767 self.reclaim_memory();
768 let hash = self.hash(key);
769 let locked_bucket = self.optional_writer_sync(hash)?;
770 let entry_ptr = locked_bucket.search(key, hash);
771 if entry_ptr.is_valid() {
772 return Some(OccupiedEntry {
773 hashindex: self,
774 locked_bucket,
775 entry_ptr,
776 });
777 }
778 None
779 }
780
781 /// Returns a guarded reference to the value for the specified key without acquiring locks.
782 ///
783 /// Returns `None` if the key does not exist.
784 ///
785 /// # Note
786 ///
787 /// The returned reference may point to an old snapshot of the value if the entry has recently
788 /// been relocated due to resizing. This means that the effects of interior mutability, e.g.,
789 /// `Mutex<T>` or `UnsafeCell<T>`, may not be observable later. Use [`HashIndex::read_async`] or
790 /// [`HashIndex::read_sync`] if the value needs to be updated through interior mutability.
791 ///
792 /// # Examples
793 ///
794 /// ```
795 /// use scc::{Guard, HashIndex};
796 ///
797 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
798 ///
799 /// assert!(hashindex.insert_sync(1, 10).is_ok());
800 ///
801 /// let guard = Guard::new();
802 /// let value_ref = hashindex.peek(&1, &guard).unwrap();
803 /// assert_eq!(*value_ref, 10);
804 /// ```
805 #[inline]
806 pub fn peek<'h, Q>(&'h self, key: &Q, guard: &'h Guard) -> Option<&'h V>
807 where
808 Q: Equivalent<K> + Hash + ?Sized,
809 {
810 self.reclaim_memory();
811 self.peek_entry(key, guard).map(|(_, v)| v)
812 }
813
814 /// Peeks a key-value pair without acquiring locks.
815 ///
816 /// Returns `None` if the key does not exist.
817 ///
818 /// # Note
819 ///
820 /// The reference passed to the closure may point to an old snapshot of the value if the entry
821 /// has recently been relocated due to resizing. This means that the effects of interior
822 /// mutability, e.g., `Mutex<T>` or `UnsafeCell<T>`, may not be observable later. Use
823 /// [`HashIndex::read_async`] or [`HashIndex::read_sync`] if the value needs to be updated
824 /// through interior mutability.
825 ///
826 /// # Examples
827 ///
828 /// ```
829 /// use scc::HashIndex;
830 ///
831 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
832 ///
833 /// assert!(hashindex.peek_with(&1, |_, v| *v).is_none());
834 /// assert!(hashindex.insert_sync(1, 10).is_ok());
835 /// assert_eq!(hashindex.peek_with(&1, |_, v| *v).unwrap(), 10);
836 /// ```
837 #[inline]
838 pub fn peek_with<Q, R, F: FnOnce(&K, &V) -> R>(&self, key: &Q, reader: F) -> Option<R>
839 where
840 Q: Equivalent<K> + Hash + ?Sized,
841 {
842 self.reclaim_memory();
843 let guard = Guard::new();
844 self.peek_entry(key, &guard).map(|(k, v)| reader(k, v))
845 }
846
847 /// Reads a key-value pair.
848 ///
849 /// Returns `None` if the key does not exist.
850 ///
851 /// # Note
852 ///
853 /// This method guarantees that the closure reads the latest snapshot of the value by acquiring
854 /// a shared lock. If lock-free read-only access to entry is required, consider using
855 /// [`HashIndex::peek`] or [`HashIndex::peek_with`].
856 ///
857 /// ```
858 /// use scc::HashIndex;
859 ///
860 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
861 /// let future_insert = hashindex.insert_async(11, 17);
862 /// let future_read = hashindex.read_async(&11, |_, v| *v);
863 /// ```
864 #[inline]
865 pub async fn read_async<Q, R, F: FnOnce(&K, &V) -> R>(&self, key: &Q, reader: F) -> Option<R>
866 where
867 Q: Equivalent<K> + Hash + ?Sized,
868 {
869 self.reader_async(key, reader).await
870 }
871
872 /// Reads a key-value pair.
873 ///
874 /// Returns `None` if the key does not exist.
875 ///
876 /// # Note
877 ///
878 /// This method guarantees that the closure reads the latest snapshot of the value by acquiring
879 /// a shared lock. If lock-free read-only access to entry is required, consider using
880 /// [`HashIndex::peek`] or [`HashIndex::peek_with`].
881 ///
882 /// # Examples
883 ///
884 /// ```
885 /// use scc::HashIndex;
886 ///
887 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
888 ///
889 /// assert!(hashindex.read_sync(&1, |_, v| *v).is_none());
890 /// assert!(hashindex.insert_sync(1, 10).is_ok());
891 /// assert_eq!(hashindex.read_sync(&1, |_, v| *v).unwrap(), 10);
892 /// ```
893 #[inline]
894 pub fn read_sync<Q, R, F: FnOnce(&K, &V) -> R>(&self, key: &Q, reader: F) -> Option<R>
895 where
896 Q: Equivalent<K> + Hash + ?Sized,
897 {
898 self.reader_sync(key, reader)
899 }
900
901 /// Returns `true` if the [`HashIndex`] contains a value for the specified key.
902 ///
903 /// # Examples
904 ///
905 /// ```
906 /// use scc::HashIndex;
907 ///
908 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
909 ///
910 /// assert!(!hashindex.contains(&1));
911 /// assert!(hashindex.insert_sync(1, 0).is_ok());
912 /// assert!(hashindex.contains(&1));
913 /// ```
914 #[inline]
915 pub fn contains<Q>(&self, key: &Q) -> bool
916 where
917 Q: Equivalent<K> + Hash + ?Sized,
918 {
919 self.peek_with(key, |_, _| ()).is_some()
920 }
921
922 /// Iterates over entries asynchronously for reading.
923 ///
924 /// Stops iterating when the closure returns `false`, and this method also returns `false`.
925 ///
926 /// # Examples
927 ///
928 /// ```
929 /// use scc::HashIndex;
930 ///
931 /// let hashindex: HashIndex<u64, u64> = HashIndex::default();
932 ///
933 /// assert!(hashindex.insert_sync(1, 0).is_ok());
934 ///
935 /// async {
936 /// let result = hashindex.iter_async(|k, v| {
937 /// false
938 /// }).await;
939 /// assert!(!result);
940 /// };
941 /// ```
942 #[inline]
943 pub async fn iter_async<F: FnMut(&K, &V) -> bool>(&self, mut f: F) -> bool {
944 self.reclaim_memory();
945 let mut result = true;
946 self.for_each_reader_async(|reader, data_block| {
947 let mut entry_ptr = EntryPtr::null();
948 while entry_ptr.find_next(&reader) {
949 if !f(entry_ptr.key(data_block), entry_ptr.val(data_block)) {
950 result = false;
951 return false;
952 }
953 }
954 true
955 })
956 .await;
957 result
958 }
959
960 /// Iterates over entries synchronously for reading.
961 ///
962 /// Stops iterating when the closure returns `false`, and this method also returns `false`.
963 ///
964 /// # Examples
965 ///
966 /// ```
967 /// use scc::HashIndex;
968 ///
969 /// let hashindex: HashIndex<u64, u64> = HashIndex::default();
970 ///
971 /// assert!(hashindex.insert_sync(1, 0).is_ok());
972 /// assert!(hashindex.insert_sync(2, 1).is_ok());
973 ///
974 /// let mut acc = 0_u64;
975 /// let result = hashindex.iter_sync(|k, v| {
976 /// acc += *k;
977 /// acc += *v;
978 /// true
979 /// });
980 ///
981 /// assert!(result);
982 /// assert_eq!(acc, 4);
983 /// ```
984 #[inline]
985 pub fn iter_sync<F: FnMut(&K, &V) -> bool>(&self, mut f: F) -> bool {
986 self.reclaim_memory();
987 let mut result = true;
988 let guard = Guard::new();
989 self.for_each_reader_sync(&guard, |reader, data_block| {
990 let mut entry_ptr = EntryPtr::null();
991 while entry_ptr.find_next(&reader) {
992 if !f(entry_ptr.key(data_block), entry_ptr.val(data_block)) {
993 result = false;
994 return false;
995 }
996 }
997 true
998 });
999 result
1000 }
1001
1002 /// Retains the entries specified by the predicate.
1003 ///
1004 /// Entries that have existed since the invocation of the method are guaranteed to be visited
1005 /// if they are not removed, however the same entry can be visited more than once if the
1006 /// [`HashIndex`] gets resized by another thread.
1007 ///
1008 /// # Examples
1009 ///
1010 /// ```
1011 /// use scc::HashIndex;
1012 ///
1013 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1014 ///
1015 /// let future_insert = hashindex.insert_async(1, 0);
1016 /// let future_retain = hashindex.retain_async(|k, v| *k == 1);
1017 /// ```
1018 #[inline]
1019 pub async fn retain_async<F: FnMut(&K, &V) -> bool>(&self, mut pred: F) {
1020 self.reclaim_memory();
1021 self.for_each_writer_async(0, 0, |mut locked_bucket, removed| {
1022 let mut entry_ptr = EntryPtr::null();
1023 while entry_ptr.find_next(&locked_bucket.writer) {
1024 let (k, v) = locked_bucket.entry_mut(&mut entry_ptr);
1025 if !pred(k, v) {
1026 locked_bucket
1027 .writer
1028 .mark_removed(&mut entry_ptr, &Guard::new());
1029 *removed = true;
1030 }
1031 }
1032 true
1033 })
1034 .await;
1035 }
1036
1037 /// Retains the entries specified by the predicate.
1038 ///
1039 /// Entries that have existed since the invocation of the method are guaranteed to be visited
1040 /// if they are not removed, however the same entry can be visited more than once if the
1041 /// [`HashIndex`] gets resized by another thread.
1042 ///
1043 /// # Examples
1044 ///
1045 /// ```
1046 /// use scc::HashIndex;
1047 ///
1048 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1049 ///
1050 /// assert!(hashindex.insert_sync(1, 0).is_ok());
1051 /// assert!(hashindex.insert_sync(2, 1).is_ok());
1052 /// assert!(hashindex.insert_sync(3, 2).is_ok());
1053 ///
1054 /// hashindex.retain_sync(|k, v| *k == 1 && *v == 0);
1055 ///
1056 /// assert!(hashindex.contains(&1));
1057 /// assert!(!hashindex.contains(&2));
1058 /// assert!(!hashindex.contains(&3));
1059 /// ```
1060 #[inline]
1061 pub fn retain_sync<F: FnMut(&K, &V) -> bool>(&self, mut pred: F) {
1062 self.reclaim_memory();
1063 let guard = Guard::new();
1064 self.for_each_writer_sync(0, 0, &guard, |mut locked_bucket, removed| {
1065 let mut entry_ptr = EntryPtr::null();
1066 while entry_ptr.find_next(&locked_bucket.writer) {
1067 let (k, v) = locked_bucket.entry_mut(&mut entry_ptr);
1068 if !pred(k, v) {
1069 locked_bucket.writer.mark_removed(&mut entry_ptr, &guard);
1070 *removed = true;
1071 }
1072 }
1073 true
1074 });
1075 }
1076
1077 /// Clears the [`HashIndex`] by removing all key-value pairs.
1078 ///
1079 /// # Examples
1080 ///
1081 /// ```
1082 /// use scc::HashIndex;
1083 ///
1084 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1085 ///
1086 /// let future_insert = hashindex.insert_async(1, 0);
1087 /// let future_retain = hashindex.clear_async();
1088 /// ```
1089 pub async fn clear_async(&self) {
1090 self.retain_async(|_, _| false).await;
1091 }
1092
1093 /// Clears the [`HashIndex`] by removing all key-value pairs.
1094 ///
1095 /// # Examples
1096 ///
1097 /// ```
1098 /// use scc::HashIndex;
1099 ///
1100 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1101 ///
1102 /// assert!(hashindex.insert_sync(1, 0).is_ok());
1103 /// hashindex.clear_sync();
1104 ///
1105 /// assert!(!hashindex.contains(&1));
1106 /// ```
1107 pub fn clear_sync(&self) {
1108 self.retain_sync(|_, _| false);
1109 }
1110
1111 /// Returns the number of entries in the [`HashIndex`].
1112 ///
1113 /// It reads the entire metadata area of the bucket array to calculate the number of valid
1114 /// entries, making its time complexity `O(N)`. Furthermore, it may overcount entries if an old
1115 /// bucket array has yet to be dropped.
1116 ///
1117 /// # Examples
1118 ///
1119 /// ```
1120 /// use scc::HashIndex;
1121 ///
1122 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1123 ///
1124 /// assert!(hashindex.insert_sync(1, 0).is_ok());
1125 /// assert_eq!(hashindex.len(), 1);
1126 /// ```
1127 #[inline]
1128 pub fn len(&self) -> usize {
1129 self.num_entries(&Guard::new())
1130 }
1131
1132 /// Returns `true` if the [`HashIndex`] is empty.
1133 ///
1134 /// # Examples
1135 ///
1136 /// ```
1137 /// use scc::HashIndex;
1138 ///
1139 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1140 ///
1141 /// assert!(hashindex.is_empty());
1142 /// assert!(hashindex.insert_sync(1, 0).is_ok());
1143 /// assert!(!hashindex.is_empty());
1144 /// ```
1145 #[inline]
1146 pub fn is_empty(&self) -> bool {
1147 !self.has_entry(&Guard::new())
1148 }
1149
1150 /// Returns the capacity of the [`HashIndex`].
1151 ///
1152 /// # Examples
1153 ///
1154 /// ```
1155 /// use scc::HashIndex;
1156 ///
1157 /// let hashindex_default: HashIndex<u64, u32> = HashIndex::default();
1158 /// assert_eq!(hashindex_default.capacity(), 0);
1159 ///
1160 /// assert!(hashindex_default.insert_sync(1, 0).is_ok());
1161 /// assert_eq!(hashindex_default.capacity(), 64);
1162 ///
1163 /// let hashindex: HashIndex<u64, u32> = HashIndex::with_capacity(1000);
1164 /// assert_eq!(hashindex.capacity(), 1024);
1165 /// ```
1166 #[inline]
1167 pub fn capacity(&self) -> usize {
1168 self.num_slots(&Guard::new())
1169 }
1170
1171 /// Returns the current capacity range of the [`HashIndex`].
1172 ///
1173 /// # Examples
1174 ///
1175 /// ```
1176 /// use scc::HashIndex;
1177 ///
1178 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1179 ///
1180 /// assert_eq!(hashindex.capacity_range(), 0..=(1_usize << (usize::BITS - 2)));
1181 ///
1182 /// let reserved = hashindex.reserve(1000);
1183 /// assert_eq!(hashindex.capacity_range(), 1000..=(1_usize << (usize::BITS - 2)));
1184 /// ```
1185 #[inline]
1186 pub fn capacity_range(&self) -> RangeInclusive<usize> {
1187 self.minimum_capacity.load(Relaxed)..=self.maximum_capacity()
1188 }
1189
1190 /// Returns the index of the bucket that may contain the key.
1191 ///
1192 /// The method returns the index of the bucket associated with the key. The number of buckets
1193 /// can be calculated by dividing the capacity by `32`.
1194 ///
1195 /// # Examples
1196 ///
1197 /// ```
1198 /// use scc::HashIndex;
1199 ///
1200 /// let hashindex: HashIndex<u64, u32> = HashIndex::with_capacity(1024);
1201 ///
1202 /// let bucket_index = hashindex.bucket_index(&11);
1203 /// assert!(bucket_index < hashindex.capacity() / 32);
1204 /// ```
1205 #[inline]
1206 pub fn bucket_index<Q>(&self, key: &Q) -> usize
1207 where
1208 Q: Equivalent<K> + Hash + ?Sized,
1209 {
1210 let hash = self.hash(key);
1211 self.calculate_bucket_index(hash)
1212 }
1213
1214 /// Returns an [`Iter`].
1215 ///
1216 /// It is guaranteed to go through all the key-value pairs pertaining in the [`HashIndex`]
1217 /// at the moment, however the same key-value pair can be visited more than once if the
1218 /// [`HashIndex`] is being resized.
1219 ///
1220 /// It requires the user to supply a reference to a [`Guard`].
1221 ///
1222 /// # Examples
1223 ///
1224 /// ```
1225 /// use scc::{Guard, HashIndex};
1226 ///
1227 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1228 ///
1229 /// assert!(hashindex.insert_sync(1, 0).is_ok());
1230 ///
1231 /// let guard = Guard::new();
1232 ///
1233 /// let mut iter = hashindex.iter(&guard);
1234 /// let entry_ref = iter.next().unwrap();
1235 /// assert_eq!(iter.next(), None);
1236 ///
1237 /// for iter in hashindex.iter(&guard) {
1238 /// assert_eq!(iter, (&1, &0));
1239 /// }
1240 /// ```
1241 #[inline]
1242 pub fn iter<'h>(&'h self, guard: &'h Guard) -> Iter<'h, K, V, H> {
1243 Iter {
1244 hashindex: self,
1245 bucket_array: None,
1246 index: 0,
1247 bucket: None,
1248 entry_ptr: EntryPtr::null(),
1249 guard,
1250 }
1251 }
1252
1253 /// Reclaims memory by dropping all garbage bucket arrays if they are unreachable.
1254 #[inline]
1255 fn reclaim_memory(&self) {
1256 if !self.garbage_chain.is_null(Acquire) {
1257 self.dealloc_garbage(false);
1258 }
1259 }
1260}
1261
1262impl<K, V, H> Clone for HashIndex<K, V, H>
1263where
1264 K: Clone + Eq + Hash,
1265 V: Clone,
1266 H: BuildHasher + Clone,
1267{
1268 #[inline]
1269 fn clone(&self) -> Self {
1270 let self_clone = Self::with_capacity_and_hasher(self.capacity(), self.hasher().clone());
1271 for (k, v) in self.iter(&Guard::new()) {
1272 let _result = self_clone.insert_sync(k.clone(), v.clone());
1273 }
1274 self_clone
1275 }
1276}
1277
1278impl<K, V, H> Debug for HashIndex<K, V, H>
1279where
1280 K: Debug + Eq + Hash,
1281 V: Debug,
1282 H: BuildHasher,
1283{
1284 #[inline]
1285 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1286 self.reclaim_memory();
1287 let guard = Guard::new();
1288 f.debug_map().entries(self.iter(&guard)).finish()
1289 }
1290}
1291
1292impl<K, V> HashIndex<K, V, RandomState>
1293where
1294 K: Eq + Hash,
1295{
1296 /// Creates an empty default [`HashIndex`].
1297 ///
1298 /// # Examples
1299 ///
1300 /// ```
1301 /// use scc::HashIndex;
1302 ///
1303 /// let hashindex: HashIndex<u64, u32> = HashIndex::new();
1304 ///
1305 /// let result = hashindex.capacity();
1306 /// assert_eq!(result, 0);
1307 /// ```
1308 #[inline]
1309 #[must_use]
1310 pub fn new() -> Self {
1311 Self::default()
1312 }
1313
1314 /// Creates an empty [`HashIndex`] with the specified capacity.
1315 ///
1316 /// The actual capacity is equal to or greater than `capacity` unless it is greater than
1317 /// `1 << (usize::BITS - 1)`.
1318 ///
1319 /// # Examples
1320 ///
1321 /// ```
1322 /// use scc::HashIndex;
1323 ///
1324 /// let hashindex: HashIndex<u64, u32> = HashIndex::with_capacity(1000);
1325 ///
1326 /// let result = hashindex.capacity();
1327 /// assert_eq!(result, 1024);
1328 /// ```
1329 #[inline]
1330 #[must_use]
1331 pub fn with_capacity(capacity: usize) -> Self {
1332 Self::with_capacity_and_hasher(capacity, RandomState::new())
1333 }
1334}
1335
1336impl<K, V, H> Default for HashIndex<K, V, H>
1337where
1338 H: BuildHasher + Default,
1339{
1340 /// Creates an empty default [`HashIndex`].
1341 ///
1342 /// The default capacity is `64`.
1343 ///
1344 /// # Examples
1345 ///
1346 /// ```
1347 /// use scc::HashIndex;
1348 ///
1349 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1350 ///
1351 /// let result = hashindex.capacity();
1352 /// assert_eq!(result, 0);
1353 /// ```
1354 #[inline]
1355 fn default() -> Self {
1356 Self::with_hasher(H::default())
1357 }
1358}
1359
1360impl<K, V, H> Drop for HashIndex<K, V, H>
1361where
1362 H: BuildHasher,
1363{
1364 #[inline]
1365 fn drop(&mut self) {
1366 if let Some(bucket_array) = get_owned(self.bucket_array.load(Acquire, fake_ref(self))) {
1367 unsafe {
1368 // The entire array does not need to wait for an epoch change as no references will
1369 // remain outside the lifetime of the `HashCache`.
1370 bucket_array.drop_in_place();
1371 }
1372 }
1373 self.dealloc_garbage(true);
1374 for _ in 0..4 {
1375 Guard::new().accelerate();
1376 }
1377 }
1378}
1379
1380impl<K, V, H> FromIterator<(K, V)> for HashIndex<K, V, H>
1381where
1382 K: Eq + Hash,
1383 H: BuildHasher + Default,
1384{
1385 #[inline]
1386 fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> Self {
1387 let into_iter = iter.into_iter();
1388 let hashindex = Self::with_capacity_and_hasher(
1389 Self::capacity_from_size_hint(into_iter.size_hint()),
1390 H::default(),
1391 );
1392 into_iter.for_each(|e| {
1393 let _result = hashindex.insert_sync(e.0, e.1);
1394 });
1395 hashindex
1396 }
1397}
1398
1399impl<K, V, H> HashTable<K, V, H, (), INDEX> for HashIndex<K, V, H>
1400where
1401 K: Eq + Hash,
1402 H: BuildHasher,
1403{
1404 #[inline]
1405 fn hasher(&self) -> &H {
1406 &self.build_hasher
1407 }
1408
1409 #[inline]
1410 fn defer_reclaim(&self, bucket_array: Owned<BucketArray<K, V, (), INDEX>>, guard: &Guard) {
1411 guard.accelerate();
1412 self.reclaim_memory();
1413 self.garbage_epoch.swap(u8::from(guard.epoch()), Release);
1414 let bucket_array_ptr = bucket_array.into_raw();
1415 let Some(prev_head) = get_owned(self.garbage_chain.swap(bucket_array_ptr, AcqRel, guard))
1416 else {
1417 return;
1418 };
1419 // The bucket array will be dropped when the epoch enters the next generation.
1420 if let Some(bucket_array) = deref_unchecked(bucket_array_ptr) {
1421 bucket_array
1422 .linked_array_var()
1423 .swap(prev_head.into_raw(), Release, guard);
1424 }
1425 }
1426
1427 #[inline]
1428 fn bucket_array_var(&self) -> &AtomicRaw<BucketArray<K, V, (), INDEX>> {
1429 &self.bucket_array
1430 }
1431
1432 #[inline]
1433 fn minimum_capacity_var(&self) -> &AtomicUsize {
1434 &self.minimum_capacity
1435 }
1436}
1437
1438impl<K, V, H> PartialEq for HashIndex<K, V, H>
1439where
1440 K: Eq + Hash,
1441 V: PartialEq,
1442 H: BuildHasher,
1443{
1444 #[inline]
1445 fn eq(&self, other: &Self) -> bool {
1446 self.reclaim_memory();
1447 let guard = Guard::new();
1448 if !self
1449 .iter(&guard)
1450 .any(|(k, v)| other.peek_with(k, |_, ov| v == ov) != Some(true))
1451 {
1452 return !other
1453 .iter(&guard)
1454 .any(|(k, v)| self.peek_with(k, |_, sv| v == sv) != Some(true));
1455 }
1456 false
1457 }
1458}
1459
1460impl<'h, K, V, H> Entry<'h, K, V, H>
1461where
1462 K: Eq + Hash,
1463 H: BuildHasher,
1464{
1465 /// Ensures a value is in the entry by inserting the supplied instance if empty.
1466 ///
1467 /// # Examples
1468 ///
1469 /// ```
1470 /// use scc::HashIndex;
1471 ///
1472 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1473 ///
1474 /// hashindex.entry_sync(3).or_insert(7);
1475 /// assert_eq!(hashindex.peek_with(&3, |_, v| *v), Some(7));
1476 /// ```
1477 #[inline]
1478 pub fn or_insert(self, val: V) -> OccupiedEntry<'h, K, V, H> {
1479 self.or_insert_with(|| val)
1480 }
1481
1482 /// Ensures a value is in the entry by inserting the result of the supplied closure if empty.
1483 ///
1484 /// # Examples
1485 ///
1486 /// ```
1487 /// use scc::HashIndex;
1488 ///
1489 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1490 ///
1491 /// hashindex.entry_sync(19).or_insert_with(|| 5);
1492 /// assert_eq!(hashindex.peek_with(&19, |_, v| *v), Some(5));
1493 /// ```
1494 #[inline]
1495 pub fn or_insert_with<F: FnOnce() -> V>(self, constructor: F) -> OccupiedEntry<'h, K, V, H> {
1496 self.or_insert_with_key(|_| constructor())
1497 }
1498
1499 /// Ensures a value is in the entry by inserting the result of the supplied closure if empty.
1500 ///
1501 /// The reference to the moved key is provided, therefore cloning or copying the key is
1502 /// unnecessary.
1503 ///
1504 /// # Examples
1505 ///
1506 /// ```
1507 /// use scc::HashIndex;
1508 ///
1509 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1510 ///
1511 /// hashindex.entry_sync(11).or_insert_with_key(|k| if *k == 11 { 7 } else { 3 });
1512 /// assert_eq!(hashindex.peek_with(&11, |_, v| *v), Some(7));
1513 /// ```
1514 #[inline]
1515 pub fn or_insert_with_key<F: FnOnce(&K) -> V>(
1516 self,
1517 constructor: F,
1518 ) -> OccupiedEntry<'h, K, V, H> {
1519 match self {
1520 Self::Occupied(o) => o,
1521 Self::Vacant(v) => {
1522 let val = constructor(v.key());
1523 v.insert_entry(val)
1524 }
1525 }
1526 }
1527
1528 /// Returns a reference to the key of this entry.
1529 ///
1530 /// # Examples
1531 ///
1532 /// ```
1533 /// use scc::HashIndex;
1534 ///
1535 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1536 /// assert_eq!(hashindex.entry_sync(31).key(), &31);
1537 /// ```
1538 #[inline]
1539 pub fn key(&self) -> &K {
1540 match self {
1541 Self::Occupied(o) => o.key(),
1542 Self::Vacant(v) => v.key(),
1543 }
1544 }
1545
1546 /// Provides in-place mutable access to an occupied entry.
1547 ///
1548 /// # Safety
1549 ///
1550 /// The caller has to make sure that there are no readers of the entry, e.g., a reader keeping
1551 /// a reference to the entry via [`HashIndex::iter`], [`HashIndex::peek`], or
1552 /// [`HashIndex::peek_with`], unless an instance of `V` can be safely read when there is a
1553 /// single writer, e.g., `V = [u8; 32]`.
1554 ///
1555 /// # Examples
1556 ///
1557 /// ```
1558 /// use scc::HashIndex;
1559 ///
1560 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1561 ///
1562 /// unsafe {
1563 /// hashindex.entry_sync(37).and_modify(|v| { *v += 1 }).or_insert(47);
1564 /// }
1565 /// assert_eq!(hashindex.peek_with(&37, |_, v| *v), Some(47));
1566 ///
1567 /// unsafe {
1568 /// hashindex.entry_sync(37).and_modify(|v| { *v += 1 }).or_insert(3);
1569 /// }
1570 /// assert_eq!(hashindex.peek_with(&37, |_, v| *v), Some(48));
1571 /// ```
1572 #[inline]
1573 #[must_use]
1574 pub unsafe fn and_modify<F>(self, f: F) -> Self
1575 where
1576 F: FnOnce(&mut V),
1577 {
1578 unsafe {
1579 match self {
1580 Self::Occupied(mut o) => {
1581 f(o.get_mut());
1582 Self::Occupied(o)
1583 }
1584 Self::Vacant(_) => self,
1585 }
1586 }
1587 }
1588}
1589
1590impl<'h, K, V, H> Entry<'h, K, V, H>
1591where
1592 K: Eq + Hash,
1593 V: Default,
1594 H: BuildHasher,
1595{
1596 /// Ensures a value is in the entry by inserting the default value if empty.
1597 ///
1598 /// # Examples
1599 ///
1600 /// ```
1601 /// use scc::HashIndex;
1602 ///
1603 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1604 /// hashindex.entry_sync(11).or_default();
1605 /// assert_eq!(hashindex.peek_with(&11, |_, v| *v), Some(0));
1606 /// ```
1607 #[inline]
1608 pub fn or_default(self) -> OccupiedEntry<'h, K, V, H> {
1609 match self {
1610 Self::Occupied(o) => o,
1611 Self::Vacant(v) => v.insert_entry(Default::default()),
1612 }
1613 }
1614}
1615
1616impl<K, V, H> Debug for Entry<'_, K, V, H>
1617where
1618 K: Debug + Eq + Hash,
1619 V: Debug,
1620 H: BuildHasher,
1621{
1622 #[inline]
1623 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1624 match self {
1625 Self::Vacant(v) => f.debug_tuple("Entry").field(v).finish(),
1626 Self::Occupied(o) => f.debug_tuple("Entry").field(o).finish(),
1627 }
1628 }
1629}
1630
1631impl<'h, K, V, H> OccupiedEntry<'h, K, V, H>
1632where
1633 K: Eq + Hash,
1634 H: BuildHasher,
1635{
1636 /// Gets a reference to the key in the entry.
1637 ///
1638 /// # Examples
1639 ///
1640 /// ```
1641 /// use scc::HashIndex;
1642 ///
1643 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1644 ///
1645 /// assert_eq!(hashindex.entry_sync(29).or_default().key(), &29);
1646 /// ```
1647 #[inline]
1648 #[must_use]
1649 pub fn key(&self) -> &K {
1650 self.locked_bucket.entry(&self.entry_ptr).0
1651 }
1652
1653 /// Marks that the entry is removed from the [`HashIndex`].
1654 ///
1655 /// # Examples
1656 ///
1657 /// ```
1658 /// use scc::HashIndex;
1659 /// use scc::hash_index::Entry;
1660 ///
1661 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1662 ///
1663 /// hashindex.entry_sync(11).or_insert(17);
1664 ///
1665 /// if let Entry::Occupied(o) = hashindex.entry_sync(11) {
1666 /// o.remove_entry();
1667 /// };
1668 /// assert_eq!(hashindex.peek_with(&11, |_, v| *v), None);
1669 /// ```
1670 #[inline]
1671 pub fn remove_entry(mut self) {
1672 self.locked_bucket
1673 .mark_removed(self.hashindex, &mut self.entry_ptr);
1674 }
1675
1676 /// Gets a reference to the value in the entry.
1677 ///
1678 /// # Examples
1679 ///
1680 /// ```
1681 /// use scc::HashIndex;
1682 /// use scc::hash_index::Entry;
1683 ///
1684 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1685 ///
1686 /// hashindex.entry_sync(19).or_insert(11);
1687 ///
1688 /// if let Entry::Occupied(o) = hashindex.entry_sync(19) {
1689 /// assert_eq!(o.get(), &11);
1690 /// };
1691 /// ```
1692 #[inline]
1693 #[must_use]
1694 pub fn get(&self) -> &V {
1695 self.locked_bucket.entry(&self.entry_ptr).1
1696 }
1697
1698 /// Gets a mutable reference to the value in the entry.
1699 ///
1700 /// # Safety
1701 ///
1702 /// The caller has to make sure that there are no readers of the entry, e.g., a reader keeping
1703 /// a reference to the entry via [`HashIndex::iter`], [`HashIndex::peek`], or
1704 /// [`HashIndex::peek_with`], unless an instance of `V` can be safely read when there is a
1705 /// single writer, e.g., `V = [u8; 32]`.
1706 ///
1707 /// # Examples
1708 ///
1709 /// ```
1710 /// use scc::HashIndex;
1711 /// use scc::hash_index::Entry;
1712 ///
1713 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1714 ///
1715 /// hashindex.entry_sync(37).or_insert(11);
1716 ///
1717 /// if let Entry::Occupied(mut o) = hashindex.entry_sync(37) {
1718 /// // Safety: `u32` can be safely read while being modified.
1719 /// unsafe { *o.get_mut() += 18; }
1720 /// assert_eq!(*o.get(), 29);
1721 /// }
1722 ///
1723 /// assert_eq!(hashindex.peek_with(&37, |_, v| *v), Some(29));
1724 /// ```
1725 #[inline]
1726 pub unsafe fn get_mut(&mut self) -> &mut V {
1727 self.locked_bucket.entry_mut(&mut self.entry_ptr).1
1728 }
1729
1730 /// Gets the next closest occupied entry after removing the entry.
1731 ///
1732 /// [`HashIndex::begin_async`] and this method together enable the [`OccupiedEntry`] to
1733 /// effectively act as a mutable iterator over entries. The method never acquires more than one
1734 /// lock even when it searches other buckets for the next closest occupied entry.
1735 ///
1736 /// # Examples
1737 ///
1738 /// ```
1739 /// use scc::HashIndex;
1740 /// use scc::hash_index::Entry;
1741 ///
1742 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1743 ///
1744 /// assert!(hashindex.insert_sync(1, 0).is_ok());
1745 /// assert!(hashindex.insert_sync(2, 0).is_ok());
1746 ///
1747 /// let second_entry_future = hashindex.begin_sync().unwrap().remove_and_async();
1748 /// ```
1749 #[inline]
1750 pub async fn remove_and_async(self) -> Option<OccupiedEntry<'h, K, V, H>> {
1751 let hashindex = self.hashindex;
1752 let mut entry_ptr = self.entry_ptr.clone();
1753 let guard = Guard::new();
1754 self.locked_bucket
1755 .writer
1756 .mark_removed(&mut entry_ptr, &guard);
1757 self.locked_bucket.set_has_garbage(&guard);
1758 if let Some(locked_bucket) = self
1759 .locked_bucket
1760 .next_async(hashindex, &mut entry_ptr)
1761 .await
1762 {
1763 return Some(OccupiedEntry {
1764 hashindex,
1765 locked_bucket,
1766 entry_ptr,
1767 });
1768 }
1769 None
1770 }
1771
1772 /// Gets the next closest occupied entry after removing the entry.
1773 ///
1774 /// [`HashIndex::begin_sync`] and this method together enable the [`OccupiedEntry`] to
1775 /// effectively act as a mutable iterator over entries. The method never acquires more than one
1776 /// lock even when it searches other buckets for the next closest occupied entry.
1777 ///
1778 /// # Examples
1779 ///
1780 /// ```
1781 /// use scc::HashIndex;
1782 /// use scc::hash_index::Entry;
1783 ///
1784 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1785 ///
1786 /// assert!(hashindex.insert_sync(1, 0).is_ok());
1787 /// assert!(hashindex.insert_sync(2, 0).is_ok());
1788 ///
1789 /// let first_entry = hashindex.begin_sync().unwrap();
1790 /// let first_key = *first_entry.key();
1791 /// let second_entry = first_entry.remove_and_sync().unwrap();
1792 /// assert_eq!(hashindex.len(), 1);
1793 ///
1794 /// let second_key = *second_entry.key();
1795 ///
1796 /// assert!(second_entry.remove_and_sync().is_none());
1797 /// assert_eq!(first_key + second_key, 3);
1798 /// assert_eq!(hashindex.len(), 0);
1799 /// ```
1800 #[inline]
1801 #[must_use]
1802 pub fn remove_and_sync(self) -> Option<Self> {
1803 let hashindex = self.hashindex;
1804 let mut entry_ptr = self.entry_ptr.clone();
1805 let guard = Guard::new();
1806 self.locked_bucket
1807 .writer
1808 .mark_removed(&mut entry_ptr, &guard);
1809 self.locked_bucket.set_has_garbage(&guard);
1810 if let Some(locked_bucket) = self.locked_bucket.next_sync(hashindex, &mut entry_ptr) {
1811 return Some(OccupiedEntry {
1812 hashindex,
1813 locked_bucket,
1814 entry_ptr,
1815 });
1816 }
1817 None
1818 }
1819
1820 /// Gets the next closest occupied entry.
1821 ///
1822 /// [`HashIndex::begin_async`] and this method together enable the [`OccupiedEntry`] to
1823 /// effectively act as a mutable iterator over entries. The method never acquires more than one
1824 /// lock even when it searches other buckets for the next closest occupied entry.
1825 ///
1826 /// # Examples
1827 ///
1828 /// ```
1829 /// use scc::HashIndex;
1830 /// use scc::hash_index::Entry;
1831 ///
1832 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1833 ///
1834 /// assert!(hashindex.insert_sync(1, 0).is_ok());
1835 /// assert!(hashindex.insert_sync(2, 0).is_ok());
1836 ///
1837 /// let second_entry_future = hashindex.begin_sync().unwrap().next_async();
1838 /// ```
1839 #[inline]
1840 pub async fn next_async(self) -> Option<OccupiedEntry<'h, K, V, H>> {
1841 let hashindex = self.hashindex;
1842 let mut entry_ptr = self.entry_ptr.clone();
1843 if let Some(locked_bucket) = self
1844 .locked_bucket
1845 .next_async(hashindex, &mut entry_ptr)
1846 .await
1847 {
1848 return Some(OccupiedEntry {
1849 hashindex,
1850 locked_bucket,
1851 entry_ptr,
1852 });
1853 }
1854 None
1855 }
1856
1857 /// Gets the next closest occupied entry.
1858 ///
1859 /// [`HashIndex::begin_sync`] and this method together enable the [`OccupiedEntry`] to
1860 /// effectively act as a mutable iterator over entries. The method never acquires more than one
1861 /// lock even when it searches other buckets for the next closest occupied entry.
1862 ///
1863 /// # Examples
1864 ///
1865 /// ```
1866 /// use scc::HashIndex;
1867 /// use scc::hash_index::Entry;
1868 ///
1869 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1870 ///
1871 /// assert!(hashindex.insert_sync(1, 0).is_ok());
1872 /// assert!(hashindex.insert_sync(2, 0).is_ok());
1873 ///
1874 /// let first_entry = hashindex.begin_sync().unwrap();
1875 /// let first_key = *first_entry.key();
1876 /// let second_entry = first_entry.next_sync().unwrap();
1877 /// let second_key = *second_entry.key();
1878 ///
1879 /// assert!(second_entry.next_sync().is_none());
1880 /// assert_eq!(first_key + second_key, 3);
1881 /// ```
1882 #[inline]
1883 #[must_use]
1884 pub fn next_sync(self) -> Option<Self> {
1885 let hashindex = self.hashindex;
1886 let mut entry_ptr = self.entry_ptr.clone();
1887 if let Some(locked_bucket) = self.locked_bucket.next_sync(hashindex, &mut entry_ptr) {
1888 return Some(OccupiedEntry {
1889 hashindex,
1890 locked_bucket,
1891 entry_ptr,
1892 });
1893 }
1894 None
1895 }
1896}
1897
1898impl<K, V, H> OccupiedEntry<'_, K, V, H>
1899where
1900 K: Clone + Eq + Hash,
1901 H: BuildHasher,
1902{
1903 /// Updates the entry by inserting a new entry and marking the existing entry removed.
1904 ///
1905 /// # Examples
1906 ///
1907 /// ```
1908 /// use scc::HashIndex;
1909 /// use scc::hash_index::Entry;
1910 ///
1911 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1912 ///
1913 /// hashindex.entry_sync(37).or_insert(11);
1914 ///
1915 /// if let Entry::Occupied(mut o) = hashindex.entry_sync(37) {
1916 /// o.update(29);
1917 /// assert_eq!(o.get(), &29);
1918 /// }
1919 ///
1920 /// assert_eq!(hashindex.peek_with(&37, |_, v| *v), Some(29));
1921 /// ```
1922 #[inline]
1923 pub fn update(&mut self, val: V) {
1924 let key = self.key().clone();
1925 let partial_hash = self.entry_ptr.partial_hash(&self.locked_bucket.writer);
1926 let entry_ptr = self
1927 .locked_bucket
1928 .insert(u64::from(partial_hash), (key, val));
1929 let guard = Guard::new();
1930 self.locked_bucket
1931 .writer
1932 .mark_removed(&mut self.entry_ptr, &guard);
1933 self.locked_bucket.set_has_garbage(&guard);
1934 self.entry_ptr = entry_ptr;
1935 }
1936}
1937
1938impl<K, V, H> Debug for OccupiedEntry<'_, K, V, H>
1939where
1940 K: Debug + Eq + Hash,
1941 V: Debug,
1942 H: BuildHasher,
1943{
1944 #[inline]
1945 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1946 f.debug_struct("OccupiedEntry")
1947 .field("key", self.key())
1948 .field("value", self.get())
1949 .finish_non_exhaustive()
1950 }
1951}
1952
1953impl<K, V, H> Deref for OccupiedEntry<'_, K, V, H>
1954where
1955 K: Debug + Eq + Hash,
1956 V: Debug,
1957 H: BuildHasher,
1958{
1959 type Target = V;
1960
1961 #[inline]
1962 fn deref(&self) -> &Self::Target {
1963 self.get()
1964 }
1965}
1966
1967impl<'h, K, V, H> VacantEntry<'h, K, V, H>
1968where
1969 K: Eq + Hash,
1970 H: BuildHasher,
1971{
1972 /// Gets a reference to the key.
1973 ///
1974 /// # Examples
1975 ///
1976 /// ```
1977 /// use scc::HashIndex;
1978 ///
1979 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1980 /// assert_eq!(hashindex.entry_sync(11).key(), &11);
1981 /// ```
1982 #[inline]
1983 pub fn key(&self) -> &K {
1984 &self.key
1985 }
1986
1987 /// Takes ownership of the key.
1988 ///
1989 /// # Examples
1990 ///
1991 /// ```
1992 /// use scc::HashIndex;
1993 /// use scc::hash_index::Entry;
1994 ///
1995 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1996 ///
1997 /// if let Entry::Vacant(v) = hashindex.entry_sync(17) {
1998 /// assert_eq!(v.into_key(), 17);
1999 /// };
2000 /// ```
2001 #[inline]
2002 pub fn into_key(self) -> K {
2003 self.key
2004 }
2005
2006 /// Sets the value of the entry with its key, and returns an [`OccupiedEntry`].
2007 ///
2008 /// # Examples
2009 ///
2010 /// ```
2011 /// use scc::HashIndex;
2012 /// use scc::hash_index::Entry;
2013 ///
2014 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
2015 ///
2016 /// if let Entry::Vacant(o) = hashindex.entry_sync(19) {
2017 /// o.insert_entry(29);
2018 /// }
2019 ///
2020 /// assert_eq!(hashindex.peek_with(&19, |_, v| *v), Some(29));
2021 /// ```
2022 #[inline]
2023 pub fn insert_entry(self, val: V) -> OccupiedEntry<'h, K, V, H> {
2024 let entry_ptr = self.locked_bucket.insert(self.hash, (self.key, val));
2025 OccupiedEntry {
2026 hashindex: self.hashindex,
2027 locked_bucket: self.locked_bucket,
2028 entry_ptr,
2029 }
2030 }
2031}
2032
2033impl<K, V, H> Debug for VacantEntry<'_, K, V, H>
2034where
2035 K: Debug + Eq + Hash,
2036 V: Debug,
2037 H: BuildHasher,
2038{
2039 #[inline]
2040 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2041 f.debug_tuple("VacantEntry").field(self.key()).finish()
2042 }
2043}
2044
2045impl<K, V, H> Reserve<'_, K, V, H>
2046where
2047 K: Eq + Hash,
2048 H: BuildHasher,
2049{
2050 /// Returns the number of reserved slots.
2051 #[inline]
2052 #[must_use]
2053 pub fn additional_capacity(&self) -> usize {
2054 self.additional
2055 }
2056}
2057
2058impl<K, V, H> AsRef<HashIndex<K, V, H>> for Reserve<'_, K, V, H>
2059where
2060 K: Eq + Hash,
2061 H: BuildHasher,
2062{
2063 #[inline]
2064 fn as_ref(&self) -> &HashIndex<K, V, H> {
2065 self.hashindex
2066 }
2067}
2068
2069impl<K, V, H> Debug for Reserve<'_, K, V, H>
2070where
2071 K: Eq + Hash,
2072 H: BuildHasher,
2073{
2074 #[inline]
2075 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2076 f.debug_tuple("Reserve").field(&self.additional).finish()
2077 }
2078}
2079
2080impl<K, V, H> Deref for Reserve<'_, K, V, H>
2081where
2082 K: Eq + Hash,
2083 H: BuildHasher,
2084{
2085 type Target = HashIndex<K, V, H>;
2086
2087 #[inline]
2088 fn deref(&self) -> &Self::Target {
2089 self.hashindex
2090 }
2091}
2092
2093impl<K, V, H> Drop for Reserve<'_, K, V, H>
2094where
2095 K: Eq + Hash,
2096 H: BuildHasher,
2097{
2098 #[inline]
2099 fn drop(&mut self) {
2100 let result = self
2101 .hashindex
2102 .minimum_capacity
2103 .fetch_sub(self.additional, Relaxed);
2104 debug_assert!(result >= self.additional);
2105
2106 let guard = Guard::new();
2107 if let Some(current_array) = self.hashindex.bucket_array(&guard) {
2108 self.try_shrink(current_array, 0, &guard);
2109 }
2110 }
2111}
2112
2113impl<K, V, H> Clone for Iter<'_, K, V, H>
2114where
2115 K: Eq + Hash,
2116 H: BuildHasher,
2117{
2118 #[inline]
2119 fn clone(&self) -> Self {
2120 Self {
2121 hashindex: self.hashindex,
2122 bucket_array: self.bucket_array,
2123 index: self.index,
2124 bucket: self.bucket,
2125 entry_ptr: self.entry_ptr.clone(),
2126 guard: self.guard,
2127 }
2128 }
2129}
2130
2131impl<'h, K, V, H> Iter<'h, K, V, H>
2132where
2133 K: Eq + Hash,
2134 H: BuildHasher,
2135{
2136 /// Starts iteration.
2137 fn start(&mut self) -> Option<&'h BucketArray<K, V, (), INDEX>> {
2138 // Start scanning.
2139 let current_array = self.hashindex.bucket_array(self.guard)?;
2140 let array = if let Some(old_array) = current_array.linked_array(self.guard) {
2141 old_array
2142 } else {
2143 current_array
2144 };
2145 self.bucket_array.replace(array);
2146 self.bucket.replace(array.bucket(0));
2147 Some(array)
2148 }
2149}
2150
2151impl<K, V, H> Debug for Iter<'_, K, V, H>
2152where
2153 K: Eq + Hash,
2154 H: BuildHasher,
2155{
2156 #[inline]
2157 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2158 f.debug_struct("Iter")
2159 .field("bucket_index", &self.index)
2160 .field("position_in_bucket", &self.entry_ptr.pos())
2161 .finish()
2162 }
2163}
2164
2165impl<'h, K, V, H> Iterator for Iter<'h, K, V, H>
2166where
2167 K: Eq + Hash,
2168 H: BuildHasher,
2169{
2170 type Item = (&'h K, &'h V);
2171
2172 #[inline]
2173 fn next(&mut self) -> Option<Self::Item> {
2174 let mut array = if let Some(array) = self.bucket_array {
2175 array
2176 } else {
2177 self.start()?
2178 };
2179
2180 // Move to the next entry.
2181 loop {
2182 if let Some(bucket) = self.bucket {
2183 // Move to the next entry in the bucket.
2184 if self.entry_ptr.find_next(bucket) {
2185 let k = self.entry_ptr.key(array.data_block(self.index));
2186 let v = self.entry_ptr.val(array.data_block(self.index));
2187 return Some((k, v));
2188 }
2189 }
2190 self.entry_ptr = EntryPtr::null();
2191
2192 if self.index + 1 == array.len() {
2193 // Move to a newer bucket array.
2194 self.index = 0;
2195 let current_array = self.hashindex.bucket_array(self.guard)?;
2196 if self.bucket_array.is_some_and(|a| ptr::eq(a, current_array)) {
2197 // Finished scanning.
2198 self.bucket.take();
2199 break;
2200 }
2201
2202 if let Some(old_array) = current_array.linked_array(self.guard) {
2203 if self.bucket_array.is_some_and(|a| ptr::eq(a, old_array)) {
2204 // Start scanning the current array.
2205 array = current_array;
2206 } else {
2207 array = old_array;
2208 }
2209 } else {
2210 array = current_array;
2211 }
2212
2213 self.bucket_array.replace(array);
2214 self.bucket.replace(array.bucket(0));
2215 } else {
2216 self.index += 1;
2217 self.bucket.replace(array.bucket(self.index));
2218 }
2219 }
2220 None
2221 }
2222}
2223
2224impl<K, V, H> FusedIterator for Iter<'_, K, V, H>
2225where
2226 K: Eq + Hash,
2227 H: BuildHasher,
2228{
2229}
2230
2231impl<K, V, H> UnwindSafe for Iter<'_, K, V, H>
2232where
2233 K: Eq + Hash + UnwindSafe,
2234 V: UnwindSafe,
2235 H: BuildHasher + UnwindSafe,
2236{
2237}