scc/hash_map.rs
1//! [`HashMap`] is a concurrent hash map.
2
3use std::collections::hash_map::RandomState;
4use std::fmt::{self, Debug};
5use std::hash::{BuildHasher, Hash};
6use std::mem::replace;
7use std::ops::{Deref, DerefMut, RangeInclusive};
8#[cfg(not(feature = "loom"))]
9use std::sync::atomic::AtomicUsize;
10use std::sync::atomic::Ordering::Relaxed;
11
12#[cfg(feature = "loom")]
13use loom::sync::atomic::AtomicUsize;
14use sdd::{AtomicShared, Guard, Shared, Tag};
15
16use super::Equivalent;
17use super::hash_table::bucket::{EntryPtr, MAP};
18use super::hash_table::bucket_array::BucketArray;
19use super::hash_table::{HashTable, LockedBucket};
20
21/// Scalable concurrent hash map.
22///
23/// [`HashMap`] is a concurrent hash map data structure optimized for highly concurrent workloads.
24/// [`HashMap`] has a dynamically sized array of buckets where a bucket is a fixed-size hash table
25/// with linear probing that can be expanded by allocating a linked list of smaller buckets when it
26/// is full.
27///
28/// ## The key features of [`HashMap`]
29///
30/// * Non-sharded: data is stored in a single array of entry buckets.
31/// * Non-blocking resizing: resizing does not block other threads or tasks.
32/// * Automatic resizing: grows or shrinks as needed.
33/// * Incremental resizing: entries in the old bucket array are incrementally relocated.
34/// * No busy waiting: no spin locks or hot loops to wait for desired resources.
35/// * Linearizability: [`HashMap`] manipulation methods are linearizable.
36///
37/// ## The key statistics for [`HashMap`]
38///
39/// * The expected size of metadata for a single entry: 2 bytes.
40/// * The expected number of atomic write operations required for an operation on a single key: 2.
41/// * The expected number of atomic variables accessed during a single key operation: 2.
42/// * The number of entries managed by a single bucket without a linked list: 32.
43/// * The expected maximum linked list length when a resize is triggered: log(capacity) / 8.
44///
45/// ## Locking behavior
46///
47/// ### Bucket access
48///
49/// Bucket arrays are protected by [`sdd`], thus allowing lock-free access to them.
50///
51/// ### Entry access
52///
53/// Each read/write access to an entry is serialized by the read-write lock in the bucket containing
54/// the entry. There are no container-level locks, therefore, the larger the [`HashMap`] gets, the
55/// lower the chance that the bucket-level lock will be contended.
56///
57/// ### Resize
58///
59/// Resizing of the [`HashMap`] is non-blocking and lock-free; resizing does not block any other
60/// read/write access to the [`HashMap`] or resizing attempts. Resizing is analogous to pushing a
61/// new bucket array into a lock-free stack. Each entry in the old bucket array will be
62/// incrementally relocated to the new bucket array upon future access to the [`HashMap`], and the old
63/// bucket array is dropped when it becomes empty and unreachable.
64///
65/// ### Synchronous methods in an asynchronous code block
66///
67/// It is generally not recommended to use blocking methods, such as [`HashMap::insert_sync`], in an
68/// asynchronous code block or [`poll`](std::future::Future::poll), since it may lead to deadlocks
69/// or performance degradation.
70///
71/// ## Unwind safety
72///
73/// [`HashMap`] is impervious to out-of-memory errors and panics in user-specified code under one
74/// condition: `H::Hasher::hash`, `K::drop` and `V::drop` must not panic.
75pub struct HashMap<K, V, H = RandomState>
76where
77 H: BuildHasher,
78{
79 bucket_array: AtomicShared<BucketArray<K, V, (), MAP>>,
80 minimum_capacity: AtomicUsize,
81 build_hasher: H,
82}
83
84/// [`Entry`] represents a single entry in a [`HashMap`].
85pub enum Entry<'h, K, V, H = RandomState>
86where
87 H: BuildHasher,
88{
89 /// An occupied entry.
90 Occupied(OccupiedEntry<'h, K, V, H>),
91 /// A vacant entry.
92 Vacant(VacantEntry<'h, K, V, H>),
93}
94
95/// [`OccupiedEntry`] is a view into an occupied entry in a [`HashMap`].
96pub struct OccupiedEntry<'h, K, V, H = RandomState>
97where
98 H: BuildHasher,
99{
100 hashmap: &'h HashMap<K, V, H>,
101 locked_bucket: LockedBucket<K, V, (), MAP>,
102 entry_ptr: EntryPtr<K, V, MAP>,
103}
104
105/// [`VacantEntry`] is a view into a vacant entry in a [`HashMap`].
106pub struct VacantEntry<'h, K, V, H = RandomState>
107where
108 H: BuildHasher,
109{
110 hashmap: &'h HashMap<K, V, H>,
111 key: K,
112 hash: u64,
113 locked_bucket: LockedBucket<K, V, (), MAP>,
114}
115
116/// [`ConsumableEntry`] is a view into an occupied entry in a [`HashMap`] when iterating over
117/// entries in it.
118pub struct ConsumableEntry<'b, K, V> {
119 /// Holds an exclusive lock on the entry bucket.
120 locked_bucket: &'b mut LockedBucket<K, V, (), MAP>,
121 /// Pointer to the entry.
122 entry_ptr: &'b mut EntryPtr<K, V, MAP>,
123 /// Probes removal.
124 remove_probe: &'b mut bool,
125}
126
127/// [`ReplaceResult`] is the result type of the [`HashMap::replace_async`] and
128/// [`HashMap::replace_sync`] methods.
129pub enum ReplaceResult<'h, K, V, H = RandomState>
130where
131 H: BuildHasher,
132{
133 /// The key was replaced.
134 Replaced(OccupiedEntry<'h, K, V, H>, K),
135 /// The key did not exist in the [`HashMap`].
136 ///
137 /// An [`OccupiedEntry`] can be created from the [`VacantEntry`].
138 NotReplaced(VacantEntry<'h, K, V, H>),
139}
140
141/// [`Reserve`] keeps the capacity of the associated [`HashMap`] higher than a certain level.
142///
143/// The [`HashMap`] does not shrink the capacity below the reserved capacity.
144pub struct Reserve<'h, K, V, H = RandomState>
145where
146 K: Eq + Hash,
147 H: BuildHasher,
148{
149 hashmap: &'h HashMap<K, V, H>,
150 additional: usize,
151}
152
153impl<K, V, H> HashMap<K, V, H>
154where
155 H: BuildHasher,
156{
157 /// Creates an empty [`HashMap`] with the given [`BuildHasher`].
158 ///
159 /// # Examples
160 ///
161 /// ```
162 /// use scc::HashMap;
163 /// use std::collections::hash_map::RandomState;
164 ///
165 /// let hashmap: HashMap<u64, u32, RandomState> = HashMap::with_hasher(RandomState::new());
166 /// ```
167 #[cfg(not(feature = "loom"))]
168 #[inline]
169 pub const fn with_hasher(build_hasher: H) -> Self {
170 Self {
171 bucket_array: AtomicShared::null(),
172 minimum_capacity: AtomicUsize::new(0),
173 build_hasher,
174 }
175 }
176
177 /// Creates an empty [`HashMap`] with the given [`BuildHasher`].
178 #[cfg(feature = "loom")]
179 #[inline]
180 pub fn with_hasher(build_hasher: H) -> Self {
181 Self {
182 bucket_array: AtomicShared::null(),
183 minimum_capacity: AtomicUsize::new(0),
184 build_hasher,
185 }
186 }
187
188 /// Creates an empty [`HashMap`] with the specified capacity and [`BuildHasher`].
189 ///
190 /// The actual capacity is equal to or greater than `capacity` unless it is greater than
191 /// `1 << (usize::BITS - 1)`.
192 ///
193 /// # Examples
194 ///
195 /// ```
196 /// use scc::HashMap;
197 /// use std::collections::hash_map::RandomState;
198 ///
199 /// let hashmap: HashMap<u64, u32, RandomState> =
200 /// HashMap::with_capacity_and_hasher(1000, RandomState::new());
201 ///
202 /// let result = hashmap.capacity();
203 /// assert_eq!(result, 1024);
204 /// ```
205 #[inline]
206 pub fn with_capacity_and_hasher(capacity: usize, build_hasher: H) -> Self {
207 let (array, minimum_capacity) = if capacity == 0 {
208 (AtomicShared::null(), AtomicUsize::new(0))
209 } else {
210 let array = unsafe {
211 Shared::new_unchecked(BucketArray::<K, V, (), MAP>::new(
212 capacity,
213 AtomicShared::null(),
214 ))
215 };
216 let minimum_capacity = array.num_slots();
217 (
218 AtomicShared::from(array),
219 AtomicUsize::new(minimum_capacity),
220 )
221 };
222 Self {
223 bucket_array: array,
224 minimum_capacity,
225 build_hasher,
226 }
227 }
228}
229
230impl<K, V, H> HashMap<K, V, H>
231where
232 K: Eq + Hash,
233 H: BuildHasher,
234{
235 /// Temporarily increases the minimum capacity of the [`HashMap`] to prevent shrinking.
236 ///
237 /// A [`Reserve`] is returned if the [`HashMap`] can increase the minimum capacity. The
238 /// increased capacity is not exclusively owned by the returned [`Reserve`], allowing others to
239 /// benefit from it. The memory for the additional space may not be immediately allocated if
240 /// the [`HashMap`] is empty or currently being resized; however, once the memory is eventually reserved,
241 /// the capacity will not shrink below the additional capacity until the returned
242 /// [`Reserve`] is dropped.
243 ///
244 /// # Errors
245 ///
246 /// Returns `None` if a too large number is given.
247 ///
248 /// # Examples
249 ///
250 /// ```
251 /// use scc::HashMap;
252 ///
253 /// let hashmap: HashMap<usize, usize> = HashMap::with_capacity(1000);
254 /// assert_eq!(hashmap.capacity(), 1024);
255 ///
256 /// let reserved = hashmap.reserve(10000);
257 /// assert!(reserved.is_some());
258 /// assert_eq!(hashmap.capacity(), 16384);
259 ///
260 /// assert!(hashmap.reserve(usize::MAX).is_none());
261 /// assert_eq!(hashmap.capacity(), 16384);
262 ///
263 /// for i in 0..16 {
264 /// assert!(hashmap.insert_sync(i, i).is_ok());
265 /// }
266 /// drop(reserved);
267 ///
268 /// assert_eq!(hashmap.capacity(), 1024);
269 /// ```
270 #[inline]
271 pub fn reserve(&self, additional_capacity: usize) -> Option<Reserve<'_, K, V, H>> {
272 let additional = self.reserve_capacity(additional_capacity);
273 if additional == 0 {
274 None
275 } else {
276 Some(Reserve {
277 hashmap: self,
278 additional,
279 })
280 }
281 }
282
283 /// Gets the entry associated with the given key in the map for in-place manipulation.
284 ///
285 /// # Examples
286 ///
287 /// ```
288 /// use scc::HashMap;
289 ///
290 /// let hashmap: HashMap<char, u32> = HashMap::default();
291 ///
292 /// let future_entry = hashmap.entry_async('b');
293 /// ```
294 #[inline]
295 pub async fn entry_async(&self, key: K) -> Entry<'_, K, V, H> {
296 let hash = self.hash(&key);
297 let locked_bucket = self.writer_async(hash).await;
298 let entry_ptr = locked_bucket.search(&key, hash);
299 if entry_ptr.is_valid() {
300 Entry::Occupied(OccupiedEntry {
301 hashmap: self,
302 locked_bucket,
303 entry_ptr,
304 })
305 } else {
306 let vacant_entry = VacantEntry {
307 hashmap: self,
308 key,
309 hash,
310 locked_bucket,
311 };
312 Entry::Vacant(vacant_entry)
313 }
314 }
315
316 /// Gets the entry associated with the given key in the map for in-place manipulation.
317 ///
318 /// # Examples
319 ///
320 /// ```
321 /// use scc::HashMap;
322 ///
323 /// let hashmap: HashMap<char, u32> = HashMap::default();
324 ///
325 /// for ch in "a short treatise on fungi".chars() {
326 /// hashmap.entry_sync(ch).and_modify(|counter| *counter += 1).or_insert(1);
327 /// }
328 ///
329 /// assert_eq!(hashmap.read_sync(&'s', |_, v| *v), Some(2));
330 /// assert_eq!(hashmap.read_sync(&'t', |_, v| *v), Some(3));
331 /// assert!(hashmap.read_sync(&'y', |_, v| *v).is_none());
332 /// ```
333 #[inline]
334 pub fn entry_sync(&self, key: K) -> Entry<'_, K, V, H> {
335 let hash = self.hash(&key);
336 let locked_bucket = self.writer_sync(hash);
337 let entry_ptr = locked_bucket.search(&key, hash);
338 if entry_ptr.is_valid() {
339 Entry::Occupied(OccupiedEntry {
340 hashmap: self,
341 locked_bucket,
342 entry_ptr,
343 })
344 } else {
345 let vacant_entry = VacantEntry {
346 hashmap: self,
347 key,
348 hash,
349 locked_bucket,
350 };
351 Entry::Vacant(vacant_entry)
352 }
353 }
354
355 /// Tries to get the entry associated with the given key in the map for in-place manipulation.
356 ///
357 /// Returns `None` if the entry could not be locked.
358 ///
359 /// # Examples
360 ///
361 /// ```
362 /// use scc::HashMap;
363 ///
364 /// let hashmap: HashMap<usize, usize> = HashMap::default();
365 ///
366 /// assert!(hashmap.insert_sync(0, 1).is_ok());
367 /// assert!(hashmap.try_entry(0).is_some());
368 /// ```
369 #[inline]
370 pub fn try_entry(&self, key: K) -> Option<Entry<'_, K, V, H>> {
371 let hash = self.hash(&key);
372 let guard = Guard::new();
373 let locked_bucket = self.try_reserve_bucket(hash, &guard)?;
374 let entry_ptr = locked_bucket.search(&key, hash);
375 if entry_ptr.is_valid() {
376 Some(Entry::Occupied(OccupiedEntry {
377 hashmap: self,
378 locked_bucket,
379 entry_ptr,
380 }))
381 } else {
382 Some(Entry::Vacant(VacantEntry {
383 hashmap: self,
384 key,
385 hash,
386 locked_bucket,
387 }))
388 }
389 }
390
391 /// Begins iterating over entries by getting the first occupied entry.
392 ///
393 /// The returned [`OccupiedEntry`] in combination with [`OccupiedEntry::next_async`] can act as
394 /// a mutable iterator over entries.
395 ///
396 /// # Examples
397 ///
398 /// ```
399 /// use scc::HashMap;
400 ///
401 /// let hashmap: HashMap<char, u32> = HashMap::default();
402 ///
403 /// let future_entry = hashmap.begin_async();
404 /// ```
405 #[inline]
406 pub async fn begin_async(&self) -> Option<OccupiedEntry<'_, K, V, H>> {
407 self.any_async(|_, _| true).await
408 }
409
410 /// Begins iterating over entries by getting the first occupied entry.
411 ///
412 /// The returned [`OccupiedEntry`] in combination with [`OccupiedEntry::next_sync`] can act as a
413 /// mutable iterator over entries.
414 ///
415 /// # Examples
416 ///
417 /// ```
418 /// use scc::HashMap;
419 ///
420 /// let hashmap: HashMap<u64, u32> = HashMap::default();
421 ///
422 /// assert!(hashmap.insert_sync(1, 0).is_ok());
423 ///
424 /// let mut first_entry = hashmap.begin_sync().unwrap();
425 /// *first_entry.get_mut() = 2;
426 ///
427 /// assert!(first_entry.next_sync().is_none());
428 /// assert_eq!(hashmap.read_sync(&1, |_, v| *v), Some(2));
429 /// ```
430 #[inline]
431 pub fn begin_sync(&self) -> Option<OccupiedEntry<'_, K, V, H>> {
432 self.any_sync(|_, _| true)
433 }
434
435 /// Finds any entry satisfying the supplied predicate for in-place manipulation.
436 ///
437 /// # Examples
438 ///
439 /// ```
440 /// use scc::HashMap;
441 ///
442 /// let hashmap: HashMap<u64, u32> = HashMap::default();
443 ///
444 /// let future_entry = hashmap.any_async(|k, _| *k == 2);
445 /// ```
446 #[inline]
447 pub async fn any_async<P: FnMut(&K, &V) -> bool>(
448 &self,
449 mut pred: P,
450 ) -> Option<OccupiedEntry<'_, K, V, H>> {
451 let mut entry = None;
452 self.for_each_writer_async(0, 0, |locked_bucket, _| {
453 let mut entry_ptr = EntryPtr::null();
454 while entry_ptr.move_to_next(&locked_bucket.writer) {
455 let (k, v) = locked_bucket.entry(&entry_ptr);
456 if pred(k, v) {
457 entry = Some(OccupiedEntry {
458 hashmap: self,
459 locked_bucket,
460 entry_ptr,
461 });
462 return true;
463 }
464 }
465 false
466 })
467 .await;
468 entry
469 }
470
471 /// Finds any entry satisfying the supplied predicate for in-place manipulation.
472 ///
473 /// # Examples
474 ///
475 /// ```
476 /// use scc::HashMap;
477 ///
478 /// let hashmap: HashMap<u64, u32> = HashMap::default();
479 ///
480 /// assert!(hashmap.insert_sync(1, 0).is_ok());
481 /// assert!(hashmap.insert_sync(2, 3).is_ok());
482 ///
483 /// let mut entry = hashmap.any_sync(|k, _| *k == 2).unwrap();
484 /// assert_eq!(*entry.get(), 3);
485 /// ```
486 #[inline]
487 pub fn any_sync<P: FnMut(&K, &V) -> bool>(
488 &self,
489 mut pred: P,
490 ) -> Option<OccupiedEntry<'_, K, V, H>> {
491 let mut entry = None;
492 let guard = Guard::new();
493 self.for_each_writer_sync(0, 0, &guard, |locked_bucket, _| {
494 let mut entry_ptr = EntryPtr::null();
495 while entry_ptr.move_to_next(&locked_bucket.writer) {
496 let (k, v) = locked_bucket.entry(&entry_ptr);
497 if pred(k, v) {
498 entry = Some(OccupiedEntry {
499 hashmap: self,
500 locked_bucket,
501 entry_ptr,
502 });
503 return true;
504 }
505 }
506 false
507 });
508 entry
509 }
510
511 /// Inserts a key-value pair into the [`HashMap`].
512 ///
513 /// # Note
514 ///
515 /// If the key exists, the value is *not* updated. [`upsert_async`](Self::upsert_async)
516 /// provides a way to update the value if the key exists.
517 ///
518 /// # Errors
519 ///
520 /// Returns an error containing the supplied key-value pair if the key exists.
521 ///
522 /// # Examples
523 ///
524 /// ```
525 /// use scc::HashMap;
526 ///
527 /// let hashmap: HashMap<u64, u32> = HashMap::default();
528 /// let future_insert = hashmap.insert_async(11, 17);
529 /// ```
530 #[inline]
531 pub async fn insert_async(&self, key: K, val: V) -> Result<(), (K, V)> {
532 let hash = self.hash(&key);
533 let locked_bucket = self.writer_async(hash).await;
534 if locked_bucket.search(&key, hash).is_valid() {
535 Err((key, val))
536 } else {
537 locked_bucket.insert(hash, (key, val));
538 Ok(())
539 }
540 }
541
542 /// Inserts a key-value pair into the [`HashMap`].
543 ///
544 /// # Note
545 ///
546 /// If the key exists, the value is *not* updated. [`upsert_sync`](Self::upsert_sync)
547 /// provides a way to update the value if the key exists.
548 ///
549 /// # Errors
550 ///
551 /// Returns an error containing the supplied key-value pair if the key exists.
552 ///
553 /// # Examples
554 ///
555 /// ```
556 /// use scc::HashMap;
557 ///
558 /// let hashmap: HashMap<u64, u32> = HashMap::default();
559 ///
560 /// assert!(hashmap.insert_sync(1, 0).is_ok());
561 /// assert_eq!(hashmap.insert_sync(1, 1).unwrap_err(), (1, 1));
562 /// ```
563 #[inline]
564 pub fn insert_sync(&self, key: K, val: V) -> Result<(), (K, V)> {
565 let hash = self.hash(&key);
566 let locked_bucket = self.writer_sync(hash);
567 if locked_bucket.search(&key, hash).is_valid() {
568 Err((key, val))
569 } else {
570 locked_bucket.insert(hash, (key, val));
571 Ok(())
572 }
573 }
574
575 /// Upserts a key-value pair into the [`HashMap`].
576 ///
577 /// Returns the old value if the [`HashMap`] has this key present, or returns `None`.
578 ///
579 /// # Examples
580 ///
581 /// ```
582 /// use scc::HashMap;
583 ///
584 /// let hashmap: HashMap<u64, u32> = HashMap::default();
585 /// let future_upsert = hashmap.upsert_async(11, 17);
586 /// ```
587 #[inline]
588 pub async fn upsert_async(&self, key: K, val: V) -> Option<V> {
589 match self.entry_async(key).await {
590 Entry::Occupied(mut o) => Some(replace(o.get_mut(), val)),
591 Entry::Vacant(v) => {
592 v.insert_entry(val);
593 None
594 }
595 }
596 }
597
598 /// Upserts a key-value pair into the [`HashMap`].
599 ///
600 /// Returns the old value if the [`HashMap`] has this key present, or returns `None`.
601 ///
602 /// # Examples
603 ///
604 /// ```
605 /// use scc::HashMap;
606 ///
607 /// let hashmap: HashMap<u64, u32> = HashMap::default();
608 ///
609 /// assert!(hashmap.upsert_sync(1, 0).is_none());
610 /// assert_eq!(hashmap.upsert_sync(1, 1).unwrap(), 0);
611 /// assert_eq!(hashmap.read_sync(&1, |_, v| *v).unwrap(), 1);
612 /// ```
613 #[inline]
614 pub fn upsert_sync(&self, key: K, val: V) -> Option<V> {
615 match self.entry_sync(key) {
616 Entry::Occupied(mut o) => Some(replace(o.get_mut(), val)),
617 Entry::Vacant(v) => {
618 v.insert_entry(val);
619 None
620 }
621 }
622 }
623
624 /// Updates an existing key-value pair in-place.
625 ///
626 /// Returns `None` if the key does not exist.
627 ///
628 /// # Examples
629 ///
630 /// ```
631 /// use scc::HashMap;
632 ///
633 /// let hashmap: HashMap<u64, u32> = HashMap::default();
634 ///
635 /// assert!(hashmap.insert_sync(1, 0).is_ok());
636 /// let future_update = hashmap.update_async(&1, |_, v| { *v = 2; *v });
637 /// ```
638 #[inline]
639 pub async fn update_async<Q, U, R>(&self, key: &Q, updater: U) -> Option<R>
640 where
641 Q: Equivalent<K> + Hash + ?Sized,
642 U: FnOnce(&K, &mut V) -> R,
643 {
644 let hash = self.hash(key);
645 let mut locked_bucket = self.optional_writer_async(hash).await?;
646 let mut entry_ptr = locked_bucket.search(key, hash);
647 if entry_ptr.is_valid() {
648 let (k, v) = locked_bucket.entry_mut(&mut entry_ptr);
649 Some(updater(k, v))
650 } else {
651 None
652 }
653 }
654
655 /// Updates an existing key-value pair in-place.
656 ///
657 /// Returns `None` if the key does not exist.
658 ///
659 /// # Examples
660 ///
661 /// ```
662 /// use scc::HashMap;
663 ///
664 /// let hashmap: HashMap<u64, u32> = HashMap::default();
665 ///
666 /// assert!(hashmap.update_sync(&1, |_, _| true).is_none());
667 /// assert!(hashmap.insert_sync(1, 0).is_ok());
668 /// assert_eq!(hashmap.update_sync(&1, |_, v| { *v = 2; *v }).unwrap(), 2);
669 /// assert_eq!(hashmap.read_sync(&1, |_, v| *v).unwrap(), 2);
670 /// ```
671 #[inline]
672 pub fn update_sync<Q, U, R>(&self, key: &Q, updater: U) -> Option<R>
673 where
674 Q: Equivalent<K> + Hash + ?Sized,
675 U: FnOnce(&K, &mut V) -> R,
676 {
677 let hash = self.hash(key);
678 let mut locked_bucket = self.optional_writer_sync(hash)?;
679 let mut entry_ptr = locked_bucket.search(key, hash);
680 if entry_ptr.is_valid() {
681 let (k, v) = locked_bucket.entry_mut(&mut entry_ptr);
682 Some(updater(k, v))
683 } else {
684 None
685 }
686 }
687
688 /// Adds a key to the [`HashMap`], replacing the existing key, if any, that is equal to the
689 /// given one.
690 ///
691 /// # Examples
692 ///
693 /// ```
694 /// use std::cmp::{Eq, PartialEq};
695 /// use std::hash::{Hash, Hasher};
696 ///
697 /// use scc::HashMap;
698 /// use scc::hash_map::ReplaceResult;
699 ///
700 /// #[derive(Debug)]
701 /// struct MaybeEqual(u64, u64);
702 ///
703 /// impl Eq for MaybeEqual {}
704 ///
705 /// impl Hash for MaybeEqual {
706 /// fn hash<H: Hasher>(&self, state: &mut H) {
707 /// // Do not read `self.1`.
708 /// self.0.hash(state);
709 /// }
710 /// }
711 ///
712 /// impl PartialEq for MaybeEqual {
713 /// fn eq(&self, other: &Self) -> bool {
714 /// // Do not compare `self.1`.
715 /// self.0 == other.0
716 /// }
717 /// }
718 ///
719 /// let hashmap: HashMap<MaybeEqual, usize> = HashMap::default();
720 ///
721 /// async {
722 /// let ReplaceResult::NotReplaced(v) = hashmap.replace_async(MaybeEqual(11, 7)).await else {
723 /// unreachable!();
724 /// };
725 /// drop(v.insert_entry(17));
726 /// let ReplaceResult::Replaced(_, k) = hashmap.replace_async(MaybeEqual(11, 11)).await else {
727 /// unreachable!();
728 /// };
729 /// assert_eq!(k.1, 7);
730 /// };
731 /// ```
732 #[inline]
733 pub async fn replace_async(&self, key: K) -> ReplaceResult<'_, K, V, H> {
734 let hash = self.hash(&key);
735 let locked_bucket = self.writer_async(hash).await;
736 let mut entry_ptr = locked_bucket.search(&key, hash);
737 if entry_ptr.is_valid() {
738 let prev_key = replace(
739 &mut entry_ptr
740 .get_mut(locked_bucket.data_block, &locked_bucket.writer)
741 .0,
742 key,
743 );
744 ReplaceResult::Replaced(
745 OccupiedEntry {
746 hashmap: self,
747 locked_bucket,
748 entry_ptr,
749 },
750 prev_key,
751 )
752 } else {
753 ReplaceResult::NotReplaced(VacantEntry {
754 hashmap: self,
755 key,
756 hash,
757 locked_bucket,
758 })
759 }
760 }
761
762 /// Adds a key to the [`HashMap`], replacing the existing key, if any, that is equal to the
763 /// given one.
764 ///
765 /// # Examples
766 ///
767 /// ```
768 /// use std::cmp::{Eq, PartialEq};
769 /// use std::hash::{Hash, Hasher};
770 ///
771 /// use scc::HashMap;
772 /// use scc::hash_map::ReplaceResult;
773 ///
774 /// #[derive(Debug)]
775 /// struct MaybeEqual(u64, u64);
776 ///
777 /// impl Eq for MaybeEqual {}
778 ///
779 /// impl Hash for MaybeEqual {
780 /// fn hash<H: Hasher>(&self, state: &mut H) {
781 /// // Do not read `self.1`.
782 /// self.0.hash(state);
783 /// }
784 /// }
785 ///
786 /// impl PartialEq for MaybeEqual {
787 /// fn eq(&self, other: &Self) -> bool {
788 /// // Do not compare `self.1`.
789 /// self.0 == other.0
790 /// }
791 /// }
792 ///
793 /// let hashmap: HashMap<MaybeEqual, usize> = HashMap::default();
794 ///
795 /// let ReplaceResult::NotReplaced(v) = hashmap.replace_sync(MaybeEqual(11, 7)) else {
796 /// unreachable!();
797 /// };
798 /// drop(v.insert_entry(17));
799 /// let ReplaceResult::Replaced(_, k) = hashmap.replace_sync(MaybeEqual(11, 11)) else {
800 /// unreachable!();
801 /// };
802 /// assert_eq!(k.1, 7);
803 /// ```
804 #[inline]
805 pub fn replace_sync(&self, key: K) -> ReplaceResult<'_, K, V, H> {
806 let hash = self.hash(&key);
807 let locked_bucket = self.writer_sync(hash);
808 let mut entry_ptr = locked_bucket.search(&key, hash);
809 if entry_ptr.is_valid() {
810 let prev_key = replace(
811 &mut entry_ptr
812 .get_mut(locked_bucket.data_block, &locked_bucket.writer)
813 .0,
814 key,
815 );
816 ReplaceResult::Replaced(
817 OccupiedEntry {
818 hashmap: self,
819 locked_bucket,
820 entry_ptr,
821 },
822 prev_key,
823 )
824 } else {
825 ReplaceResult::NotReplaced(VacantEntry {
826 hashmap: self,
827 key,
828 hash,
829 locked_bucket,
830 })
831 }
832 }
833
834 /// Removes a key-value pair if the key exists.
835 ///
836 /// Returns `None` if the key does not exist.
837 ///
838 /// # Examples
839 ///
840 /// ```
841 /// use scc::HashMap;
842 ///
843 /// let hashmap: HashMap<u64, u32> = HashMap::default();
844 /// let future_insert = hashmap.insert_async(11, 17);
845 /// let future_remove = hashmap.remove_async(&11);
846 /// ```
847 #[inline]
848 pub async fn remove_async<Q>(&self, key: &Q) -> Option<(K, V)>
849 where
850 Q: Equivalent<K> + Hash + ?Sized,
851 {
852 self.remove_if_async(key, |_| true).await
853 }
854
855 /// Removes a key-value pair if the key exists.
856 ///
857 /// Returns `None` if the key does not exist.
858 ///
859 /// # Examples
860 ///
861 /// ```
862 /// use scc::HashMap;
863 ///
864 /// let hashmap: HashMap<u64, u32> = HashMap::default();
865 ///
866 /// assert!(hashmap.remove_sync(&1).is_none());
867 /// assert!(hashmap.insert_sync(1, 0).is_ok());
868 /// assert_eq!(hashmap.remove_sync(&1).unwrap(), (1, 0));
869 /// ```
870 #[inline]
871 pub fn remove_sync<Q>(&self, key: &Q) -> Option<(K, V)>
872 where
873 Q: Equivalent<K> + Hash + ?Sized,
874 {
875 self.remove_if_sync(key, |_| true)
876 }
877
878 /// Removes a key-value pair if the key exists and the given condition is met.
879 ///
880 /// Returns `None` if the key does not exist or the condition was not met.
881 ///
882 /// # Examples
883 ///
884 /// ```
885 /// use scc::HashMap;
886 ///
887 /// let hashmap: HashMap<u64, u32> = HashMap::default();
888 /// let future_insert = hashmap.insert_async(11, 17);
889 /// let future_remove = hashmap.remove_if_async(&11, |_| true);
890 /// ```
891 #[inline]
892 pub async fn remove_if_async<Q, F: FnOnce(&mut V) -> bool>(
893 &self,
894 key: &Q,
895 condition: F,
896 ) -> Option<(K, V)>
897 where
898 Q: Equivalent<K> + Hash + ?Sized,
899 {
900 let hash = self.hash(key);
901 let mut locked_bucket = self.optional_writer_async(hash).await?;
902 let mut entry_ptr = locked_bucket.search(key, hash);
903 if entry_ptr.is_valid() && condition(&mut locked_bucket.entry_mut(&mut entry_ptr).1) {
904 Some(locked_bucket.remove(self, &mut entry_ptr))
905 } else {
906 None
907 }
908 }
909
910 /// Removes a key-value pair if the key exists and the given condition is met.
911 ///
912 /// Returns `None` if the key does not exist or the condition was not met.
913 ///
914 /// # Examples
915 ///
916 /// ```
917 /// use scc::HashMap;
918 ///
919 /// let hashmap: HashMap<u64, u32> = HashMap::default();
920 ///
921 /// assert!(hashmap.insert_sync(1, 0).is_ok());
922 /// assert!(hashmap.remove_if_sync(&1, |v| { *v += 1; false }).is_none());
923 /// assert_eq!(hashmap.remove_if_sync(&1, |v| *v == 1).unwrap(), (1, 1));
924 /// ```
925 #[inline]
926 pub fn remove_if_sync<Q, F: FnOnce(&mut V) -> bool>(
927 &self,
928 key: &Q,
929 condition: F,
930 ) -> Option<(K, V)>
931 where
932 Q: Equivalent<K> + Hash + ?Sized,
933 {
934 let hash = self.hash(key);
935 let mut locked_bucket = self.optional_writer_sync(hash)?;
936 let mut entry_ptr = locked_bucket.search(key, hash);
937 if entry_ptr.is_valid() && condition(&mut locked_bucket.entry_mut(&mut entry_ptr).1) {
938 Some(locked_bucket.remove(self, &mut entry_ptr))
939 } else {
940 None
941 }
942 }
943
944 /// Gets an [`OccupiedEntry`] corresponding to the key for in-place modification.
945 ///
946 /// [`OccupiedEntry`] exclusively owns the entry, preventing others from gaining access to it:
947 /// use [`read_async`](Self::read_async) if read-only access is sufficient.
948 ///
949 /// Returns `None` if the key does not exist.
950 ///
951 /// # Examples
952 ///
953 /// ```
954 /// use scc::HashMap;
955 ///
956 /// let hashmap: HashMap<u64, u32> = HashMap::default();
957 /// let future_insert = hashmap.insert_async(11, 17);
958 /// let future_get = hashmap.get_async(&11);
959 /// ```
960 #[inline]
961 pub async fn get_async<Q>(&self, key: &Q) -> Option<OccupiedEntry<'_, K, V, H>>
962 where
963 Q: Equivalent<K> + Hash + ?Sized,
964 {
965 let hash = self.hash(key);
966 let locked_bucket = self.optional_writer_async(hash).await?;
967 let entry_ptr = locked_bucket.search(key, hash);
968 if entry_ptr.is_valid() {
969 return Some(OccupiedEntry {
970 hashmap: self,
971 locked_bucket,
972 entry_ptr,
973 });
974 }
975 None
976 }
977
978 /// Gets an [`OccupiedEntry`] corresponding to the key for in-place modification.
979 ///
980 /// [`OccupiedEntry`] exclusively owns the entry, preventing others from gaining access to it:
981 /// use [`read_sync`](Self::read_sync) if read-only access is sufficient.
982 ///
983 /// Returns `None` if the key does not exist.
984 ///
985 /// # Examples
986 ///
987 /// ```
988 /// use scc::HashMap;
989 ///
990 /// let hashmap: HashMap<u64, u32> = HashMap::default();
991 ///
992 /// assert!(hashmap.get_sync(&1).is_none());
993 /// assert!(hashmap.insert_sync(1, 10).is_ok());
994 /// assert_eq!(*hashmap.get_sync(&1).unwrap().get(), 10);
995 ///
996 /// *hashmap.get_sync(&1).unwrap() = 11;
997 /// assert_eq!(*hashmap.get_sync(&1).unwrap(), 11);
998 /// ```
999 #[inline]
1000 pub fn get_sync<Q>(&self, key: &Q) -> Option<OccupiedEntry<'_, K, V, H>>
1001 where
1002 Q: Equivalent<K> + Hash + ?Sized,
1003 {
1004 let hash = self.hash(key);
1005 let locked_bucket = self.optional_writer_sync(hash)?;
1006 let entry_ptr = locked_bucket.search(key, hash);
1007 if entry_ptr.is_valid() {
1008 return Some(OccupiedEntry {
1009 hashmap: self,
1010 locked_bucket,
1011 entry_ptr,
1012 });
1013 }
1014 None
1015 }
1016
1017 /// Reads a key-value pair.
1018 ///
1019 /// Returns `None` if the key does not exist.
1020 ///
1021 /// # Examples
1022 ///
1023 /// ```
1024 /// use scc::HashMap;
1025 ///
1026 /// let hashmap: HashMap<u64, u32> = HashMap::default();
1027 /// let future_insert = hashmap.insert_async(11, 17);
1028 /// let future_read = hashmap.read_async(&11, |_, v| *v);
1029 /// ```
1030 #[inline]
1031 pub async fn read_async<Q, R, F: FnOnce(&K, &V) -> R>(&self, key: &Q, reader: F) -> Option<R>
1032 where
1033 Q: Equivalent<K> + Hash + ?Sized,
1034 {
1035 self.reader_async(key, reader).await
1036 }
1037
1038 /// Reads a key-value pair.
1039 ///
1040 /// Returns `None` if the key does not exist.
1041 ///
1042 /// # Examples
1043 ///
1044 /// ```
1045 /// use scc::HashMap;
1046 ///
1047 /// let hashmap: HashMap<u64, u32> = HashMap::default();
1048 ///
1049 /// assert!(hashmap.read_sync(&1, |_, v| *v).is_none());
1050 /// assert!(hashmap.insert_sync(1, 10).is_ok());
1051 /// assert_eq!(hashmap.read_sync(&1, |_, v| *v).unwrap(), 10);
1052 /// ```
1053 #[inline]
1054 pub fn read_sync<Q, R, F: FnOnce(&K, &V) -> R>(&self, key: &Q, reader: F) -> Option<R>
1055 where
1056 Q: Equivalent<K> + Hash + ?Sized,
1057 {
1058 self.reader_sync(key, reader)
1059 }
1060
1061 /// Returns `true` if the [`HashMap`] contains a value for the specified key.
1062 ///
1063 /// # Examples
1064 ///
1065 /// ```
1066 /// use scc::HashMap;
1067 ///
1068 /// let hashmap: HashMap<u64, u32> = HashMap::default();
1069 ///
1070 /// let future_contains = hashmap.contains_async(&1);
1071 /// ```
1072 #[inline]
1073 pub async fn contains_async<Q>(&self, key: &Q) -> bool
1074 where
1075 Q: Equivalent<K> + Hash + ?Sized,
1076 {
1077 self.reader_async(key, |_, _| ()).await.is_some()
1078 }
1079
1080 /// Returns `true` if the [`HashMap`] contains a value for the specified key.
1081 ///
1082 /// # Examples
1083 ///
1084 /// ```
1085 /// use scc::HashMap;
1086 ///
1087 /// let hashmap: HashMap<u64, u32> = HashMap::default();
1088 ///
1089 /// assert!(!hashmap.contains_sync(&1));
1090 /// assert!(hashmap.insert_sync(1, 0).is_ok());
1091 /// assert!(hashmap.contains_sync(&1));
1092 /// ```
1093 #[inline]
1094 pub fn contains_sync<Q>(&self, key: &Q) -> bool
1095 where
1096 Q: Equivalent<K> + Hash + ?Sized,
1097 {
1098 self.read_sync(key, |_, _| ()).is_some()
1099 }
1100
1101 /// Iterates over entries asynchronously for reading.
1102 ///
1103 /// Stops iterating when the closure returns `false`, and this method also returns `false`.
1104 ///
1105 /// # Examples
1106 ///
1107 /// ```
1108 /// use scc::HashMap;
1109 ///
1110 /// let hashmap: HashMap<u64, u64> = HashMap::default();
1111 ///
1112 /// assert!(hashmap.insert_sync(1, 0).is_ok());
1113 ///
1114 /// async {
1115 /// let result = hashmap.iter_async(|k, v| {
1116 /// false
1117 /// }).await;
1118 /// assert!(!result);
1119 /// };
1120 /// ```
1121 #[inline]
1122 pub async fn iter_async<F: FnMut(&K, &V) -> bool>(&self, mut f: F) -> bool {
1123 let mut result = true;
1124 self.for_each_reader_async(|reader, data_block| {
1125 let mut entry_ptr = EntryPtr::null();
1126 while entry_ptr.move_to_next(&reader) {
1127 let (k, v) = entry_ptr.get(data_block);
1128 if !f(k, v) {
1129 result = false;
1130 return false;
1131 }
1132 }
1133 true
1134 })
1135 .await;
1136 result
1137 }
1138
1139 /// Iterates over entries synchronously for reading.
1140 ///
1141 /// Stops iterating when the closure returns `false`, and this method also returns `false`.
1142 ///
1143 /// # Examples
1144 ///
1145 /// ```
1146 /// use scc::HashMap;
1147 ///
1148 /// let hashmap: HashMap<u64, u64> = HashMap::default();
1149 ///
1150 /// assert!(hashmap.insert_sync(1, 0).is_ok());
1151 /// assert!(hashmap.insert_sync(2, 1).is_ok());
1152 ///
1153 /// let mut acc = 0_u64;
1154 /// let result = hashmap.iter_sync(|k, v| {
1155 /// acc += *k;
1156 /// acc += *v;
1157 /// true
1158 /// });
1159 ///
1160 /// assert!(result);
1161 /// assert_eq!(acc, 4);
1162 /// ```
1163 #[inline]
1164 pub fn iter_sync<F: FnMut(&K, &V) -> bool>(&self, mut f: F) -> bool {
1165 let mut result = true;
1166 let guard = Guard::new();
1167 self.for_each_reader_sync(&guard, |reader, data_block| {
1168 let mut entry_ptr = EntryPtr::null();
1169 while entry_ptr.move_to_next(&reader) {
1170 let (k, v) = entry_ptr.get(data_block);
1171 if !f(k, v) {
1172 result = false;
1173 return false;
1174 }
1175 }
1176 true
1177 });
1178 result
1179 }
1180
1181 /// Iterates over entries asynchronously for modification.
1182 ///
1183 /// Stops iterating when the closure returns `false`, and this method also returns `false`.
1184 ///
1185 /// # Examples
1186 ///
1187 /// ```
1188 /// use scc::HashMap;
1189 ///
1190 /// let hashmap: HashMap<u64, u32> = HashMap::default();
1191 ///
1192 /// assert!(hashmap.insert_sync(1, 0).is_ok());
1193 /// assert!(hashmap.insert_sync(2, 1).is_ok());
1194 ///
1195 /// async {
1196 /// let result = hashmap.iter_mut_async(|entry| {
1197 /// if entry.0 == 1 {
1198 /// entry.consume();
1199 /// return false;
1200 /// }
1201 /// true
1202 /// }).await;
1203 ///
1204 /// assert!(!result);
1205 /// assert_eq!(hashmap.len(), 1);
1206 /// };
1207 /// ```
1208 #[inline]
1209 pub async fn iter_mut_async<F: FnMut(ConsumableEntry<'_, K, V>) -> bool>(
1210 &self,
1211 mut f: F,
1212 ) -> bool {
1213 let mut result = true;
1214 self.for_each_writer_async(0, 0, |mut locked_bucket, removed| {
1215 let mut entry_ptr = EntryPtr::null();
1216 while entry_ptr.move_to_next(&locked_bucket.writer) {
1217 let consumable_entry = ConsumableEntry {
1218 locked_bucket: &mut locked_bucket,
1219 entry_ptr: &mut entry_ptr,
1220 remove_probe: removed,
1221 };
1222 if !f(consumable_entry) {
1223 result = false;
1224 return true;
1225 }
1226 }
1227 false
1228 })
1229 .await;
1230 result
1231 }
1232
1233 /// Iterates over entries synchronously for modification.
1234 ///
1235 /// Stops iterating when the closure returns `false`, and this method also returns `false`.
1236 ///
1237 /// # Examples
1238 ///
1239 /// ```
1240 /// use scc::HashMap;
1241 ///
1242 /// let hashmap: HashMap<u64, u32> = HashMap::default();
1243 ///
1244 /// assert!(hashmap.insert_sync(1, 0).is_ok());
1245 /// assert!(hashmap.insert_sync(2, 1).is_ok());
1246 /// assert!(hashmap.insert_sync(3, 2).is_ok());
1247 ///
1248 /// let result = hashmap.iter_mut_sync(|entry| {
1249 /// if entry.0 == 1 {
1250 /// entry.consume();
1251 /// return false;
1252 /// }
1253 /// true
1254 /// });
1255 ///
1256 /// assert!(!result);
1257 /// assert!(!hashmap.contains_sync(&1));
1258 /// assert_eq!(hashmap.len(), 2);
1259 /// ```
1260 #[inline]
1261 pub fn iter_mut_sync<F: FnMut(ConsumableEntry<'_, K, V>) -> bool>(&self, mut f: F) -> bool {
1262 let mut result = true;
1263 let guard = Guard::new();
1264 self.for_each_writer_sync(0, 0, &guard, |mut locked_bucket, removed| {
1265 let mut entry_ptr = EntryPtr::null();
1266 while entry_ptr.move_to_next(&locked_bucket.writer) {
1267 let consumable_entry = ConsumableEntry {
1268 locked_bucket: &mut locked_bucket,
1269 entry_ptr: &mut entry_ptr,
1270 remove_probe: removed,
1271 };
1272 if !f(consumable_entry) {
1273 result = false;
1274 return true;
1275 }
1276 }
1277 false
1278 });
1279 result
1280 }
1281
1282 /// Retains the entries specified by the predicate.
1283 ///
1284 /// This method allows the predicate closure to modify the value field.
1285 ///
1286 /// Entries that have existed since the invocation of the method are guaranteed to be visited
1287 /// if they are not removed, however the same entry can be visited more than once if the
1288 /// [`HashMap`] gets resized by another thread.
1289 ///
1290 /// # Examples
1291 ///
1292 /// ```
1293 /// use scc::HashMap;
1294 ///
1295 /// let hashmap: HashMap<u64, u32> = HashMap::default();
1296 ///
1297 /// let future_insert = hashmap.insert_async(1, 0);
1298 /// let future_retain = hashmap.retain_async(|k, v| *k == 1);
1299 /// ```
1300 #[inline]
1301 pub async fn retain_async<F: FnMut(&K, &mut V) -> bool>(&self, mut pred: F) {
1302 self.iter_mut_async(|mut e| {
1303 let (k, v) = &mut *e;
1304 if !pred(k, v) {
1305 drop(e.consume());
1306 }
1307 true
1308 })
1309 .await;
1310 }
1311
1312 /// Retains the entries specified by the predicate.
1313 ///
1314 /// This method allows the predicate closure to modify the value field.
1315 ///
1316 /// Entries that have existed since the invocation of the method are guaranteed to be visited
1317 /// if they are not removed, however the same entry can be visited more than once if the
1318 /// [`HashMap`] gets resized by another thread.
1319 ///
1320 /// # Examples
1321 ///
1322 /// ```
1323 /// use scc::HashMap;
1324 ///
1325 /// let hashmap: HashMap<u64, u32> = HashMap::default();
1326 ///
1327 /// assert!(hashmap.insert_sync(1, 0).is_ok());
1328 /// assert!(hashmap.insert_sync(2, 1).is_ok());
1329 /// assert!(hashmap.insert_sync(3, 2).is_ok());
1330 ///
1331 /// hashmap.retain_sync(|k, v| *k == 1 && *v == 0);
1332 ///
1333 /// assert!(hashmap.contains_sync(&1));
1334 /// assert!(!hashmap.contains_sync(&2));
1335 /// assert!(!hashmap.contains_sync(&3));
1336 /// ```
1337 #[inline]
1338 pub fn retain_sync<F: FnMut(&K, &mut V) -> bool>(&self, mut pred: F) {
1339 self.iter_mut_sync(|mut e| {
1340 let (k, v) = &mut *e;
1341 if !pred(k, v) {
1342 drop(e.consume());
1343 }
1344 true
1345 });
1346 }
1347
1348 /// Clears the [`HashMap`] by removing all key-value pairs.
1349 ///
1350 /// # Examples
1351 ///
1352 /// ```
1353 /// use scc::HashMap;
1354 ///
1355 /// let hashmap: HashMap<u64, u32> = HashMap::default();
1356 ///
1357 /// let future_insert = hashmap.insert_async(1, 0);
1358 /// let future_clear = hashmap.clear_async();
1359 /// ```
1360 #[inline]
1361 pub async fn clear_async(&self) {
1362 self.retain_async(|_, _| false).await;
1363 }
1364
1365 /// Clears the [`HashMap`] by removing all key-value pairs.
1366 ///
1367 /// # Examples
1368 ///
1369 /// ```
1370 /// use scc::HashMap;
1371 ///
1372 /// let hashmap: HashMap<u64, u32> = HashMap::default();
1373 ///
1374 /// assert!(hashmap.insert_sync(1, 0).is_ok());
1375 /// hashmap.clear_sync();
1376 ///
1377 /// assert!(!hashmap.contains_sync(&1));
1378 /// ```
1379 #[inline]
1380 pub fn clear_sync(&self) {
1381 self.retain_sync(|_, _| false);
1382 }
1383
1384 /// Returns the number of entries in the [`HashMap`].
1385 ///
1386 /// It reads the entire metadata area of the bucket array to calculate the number of valid
1387 /// entries, making its time complexity `O(N)`. Furthermore, it may overcount entries if an old
1388 /// bucket array has not yet been dropped.
1389 ///
1390 /// # Examples
1391 ///
1392 /// ```
1393 /// use scc::HashMap;
1394 ///
1395 /// let hashmap: HashMap<u64, u32> = HashMap::default();
1396 ///
1397 /// assert!(hashmap.insert_sync(1, 0).is_ok());
1398 /// assert_eq!(hashmap.len(), 1);
1399 /// ```
1400 #[inline]
1401 pub fn len(&self) -> usize {
1402 self.num_entries(&Guard::new())
1403 }
1404
1405 /// Returns `true` if the [`HashMap`] is empty.
1406 ///
1407 /// # Examples
1408 ///
1409 /// ```
1410 /// use scc::HashMap;
1411 ///
1412 /// let hashmap: HashMap<u64, u32> = HashMap::default();
1413 ///
1414 /// assert!(hashmap.is_empty());
1415 /// assert!(hashmap.insert_sync(1, 0).is_ok());
1416 /// assert!(!hashmap.is_empty());
1417 /// ```
1418 #[inline]
1419 pub fn is_empty(&self) -> bool {
1420 !self.has_entry(&Guard::new())
1421 }
1422
1423 /// Returns the capacity of the [`HashMap`].
1424 ///
1425 /// # Examples
1426 ///
1427 /// ```
1428 /// use scc::HashMap;
1429 ///
1430 /// let hashmap_default: HashMap<u64, u32> = HashMap::default();
1431 /// assert_eq!(hashmap_default.capacity(), 0);
1432 ///
1433 /// assert!(hashmap_default.insert_sync(1, 0).is_ok());
1434 /// assert_eq!(hashmap_default.capacity(), 64);
1435 ///
1436 /// let hashmap: HashMap<u64, u32> = HashMap::with_capacity(1000);
1437 /// assert_eq!(hashmap.capacity(), 1024);
1438 /// ```
1439 #[inline]
1440 pub fn capacity(&self) -> usize {
1441 self.num_slots(&Guard::new())
1442 }
1443
1444 /// Returns the current capacity range of the [`HashMap`].
1445 ///
1446 /// # Examples
1447 ///
1448 /// ```
1449 /// use scc::HashMap;
1450 ///
1451 /// let hashmap: HashMap<u64, u32> = HashMap::default();
1452 ///
1453 /// assert_eq!(hashmap.capacity_range(), 0..=(1_usize << (usize::BITS - 2)));
1454 ///
1455 /// let reserved = hashmap.reserve(1000);
1456 /// assert_eq!(hashmap.capacity_range(), 1000..=(1_usize << (usize::BITS - 2)));
1457 /// ```
1458 #[inline]
1459 pub fn capacity_range(&self) -> RangeInclusive<usize> {
1460 self.minimum_capacity.load(Relaxed)..=self.maximum_capacity()
1461 }
1462
1463 /// Returns the index of the bucket that may contain the key.
1464 ///
1465 /// The method returns the index of the bucket associated with the key. The number of buckets
1466 /// can be calculated by dividing the capacity by `32`.
1467 ///
1468 /// # Examples
1469 ///
1470 /// ```
1471 /// use scc::HashMap;
1472 ///
1473 /// let hashmap: HashMap<u64, u32> = HashMap::with_capacity(1024);
1474 ///
1475 /// let bucket_index = hashmap.bucket_index(&11);
1476 /// assert!(bucket_index < hashmap.capacity() / 32);
1477 /// ```
1478 #[inline]
1479 pub fn bucket_index<Q>(&self, key: &Q) -> usize
1480 where
1481 Q: Equivalent<K> + Hash + ?Sized,
1482 {
1483 self.calculate_bucket_index(key)
1484 }
1485}
1486
1487impl<K, V> HashMap<K, V, RandomState> {
1488 /// Creates an empty default [`HashMap`].
1489 ///
1490 /// # Examples
1491 ///
1492 /// ```
1493 /// use scc::HashMap;
1494 ///
1495 /// let hashmap: HashMap<u64, u32> = HashMap::new();
1496 ///
1497 /// let result = hashmap.capacity();
1498 /// assert_eq!(result, 0);
1499 /// ```
1500 #[inline]
1501 #[must_use]
1502 pub fn new() -> Self {
1503 Self::default()
1504 }
1505
1506 /// Creates an empty [`HashMap`] with the specified capacity.
1507 ///
1508 /// The actual capacity is equal to or greater than `capacity` unless it is greater than
1509 /// `1 << (usize::BITS - 1)`.
1510 ///
1511 /// # Examples
1512 ///
1513 /// ```
1514 /// use scc::HashMap;
1515 ///
1516 /// let hashmap: HashMap<u64, u32> = HashMap::with_capacity(1000);
1517 ///
1518 /// let result = hashmap.capacity();
1519 /// assert_eq!(result, 1024);
1520 /// ```
1521 #[inline]
1522 #[must_use]
1523 pub fn with_capacity(capacity: usize) -> Self {
1524 Self::with_capacity_and_hasher(capacity, RandomState::new())
1525 }
1526}
1527
1528impl<K, V, H> Clone for HashMap<K, V, H>
1529where
1530 K: Clone + Eq + Hash,
1531 V: Clone,
1532 H: BuildHasher + Clone,
1533{
1534 #[inline]
1535 fn clone(&self) -> Self {
1536 let self_clone = Self::with_capacity_and_hasher(self.capacity(), self.hasher().clone());
1537 self.iter_sync(|k, v| {
1538 let _result = self_clone.insert_sync(k.clone(), v.clone());
1539 true
1540 });
1541 self_clone
1542 }
1543}
1544
1545impl<K, V, H> Debug for HashMap<K, V, H>
1546where
1547 K: Debug + Eq + Hash,
1548 V: Debug,
1549 H: BuildHasher,
1550{
1551 /// Iterates over all the entries in the [`HashMap`] to print them.
1552 ///
1553 /// # Locking behavior
1554 ///
1555 /// Shared locks on buckets are acquired during iteration, therefore any [`Entry`],
1556 /// [`OccupiedEntry`], or [`VacantEntry`] owned by the current thread will lead to deadlocks.
1557 #[inline]
1558 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1559 let mut d = f.debug_map();
1560 self.iter_sync(|k, v| {
1561 d.entry(k, v);
1562 true
1563 });
1564 d.finish()
1565 }
1566}
1567
1568impl<K, V, H> Default for HashMap<K, V, H>
1569where
1570 H: BuildHasher + Default,
1571{
1572 /// Creates an empty default [`HashMap`].
1573 ///
1574 /// # Examples
1575 ///
1576 /// ```
1577 /// use scc::HashMap;
1578 ///
1579 /// let hashmap: HashMap<u64, u32> = HashMap::default();
1580 ///
1581 /// let result = hashmap.capacity();
1582 /// assert_eq!(result, 0);
1583 /// ```
1584 #[inline]
1585 fn default() -> Self {
1586 Self::with_hasher(H::default())
1587 }
1588}
1589
1590impl<K, V, H> Drop for HashMap<K, V, H>
1591where
1592 H: BuildHasher,
1593{
1594 #[inline]
1595 fn drop(&mut self) {
1596 self.bucket_array
1597 .swap((None, Tag::None), Relaxed)
1598 .0
1599 .map(|a| unsafe {
1600 // The entire array does not need to wait for an epoch change as no references will
1601 // remain outside the lifetime of the `HashMap`.
1602 a.drop_in_place()
1603 });
1604 }
1605}
1606
1607impl<K, V, H> FromIterator<(K, V)> for HashMap<K, V, H>
1608where
1609 K: Eq + Hash,
1610 H: BuildHasher + Default,
1611{
1612 #[inline]
1613 fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> Self {
1614 let into_iter = iter.into_iter();
1615 let hashmap = Self::with_capacity_and_hasher(
1616 Self::capacity_from_size_hint(into_iter.size_hint()),
1617 H::default(),
1618 );
1619 into_iter.for_each(|e| {
1620 hashmap.upsert_sync(e.0, e.1);
1621 });
1622 hashmap
1623 }
1624}
1625
1626impl<K, V, H> HashTable<K, V, H, (), MAP> for HashMap<K, V, H>
1627where
1628 K: Eq + Hash,
1629 H: BuildHasher,
1630{
1631 #[inline]
1632 fn hasher(&self) -> &H {
1633 &self.build_hasher
1634 }
1635
1636 #[inline]
1637 fn bucket_array_var(&self) -> &AtomicShared<BucketArray<K, V, (), MAP>> {
1638 &self.bucket_array
1639 }
1640
1641 #[inline]
1642 fn minimum_capacity_var(&self) -> &AtomicUsize {
1643 &self.minimum_capacity
1644 }
1645}
1646
1647impl<K, V, H> PartialEq for HashMap<K, V, H>
1648where
1649 K: Eq + Hash,
1650 V: PartialEq,
1651 H: BuildHasher,
1652{
1653 /// Compares two [`HashMap`] instances.
1654 ///
1655 /// # Locking behavior
1656 ///
1657 /// Shared locks on buckets are acquired when comparing two instances of [`HashMap`], therefore
1658 /// this may lead to deadlocks if the instances are being modified by another thread.
1659 #[inline]
1660 fn eq(&self, other: &Self) -> bool {
1661 if self.iter_sync(|k, v| other.read_sync(k, |_, ov| v == ov) == Some(true)) {
1662 return other.iter_sync(|k, v| self.read_sync(k, |_, sv| v == sv) == Some(true));
1663 }
1664 false
1665 }
1666}
1667
1668impl<'h, K, V, H> Entry<'h, K, V, H>
1669where
1670 K: Eq + Hash,
1671 H: BuildHasher,
1672{
1673 /// Ensures a value is in the entry by inserting the supplied instance if empty.
1674 ///
1675 /// # Examples
1676 ///
1677 /// ```
1678 /// use scc::HashMap;
1679 ///
1680 /// let hashmap: HashMap<u64, u32> = HashMap::default();
1681 ///
1682 /// hashmap.entry_sync(3).or_insert(7);
1683 /// assert_eq!(hashmap.read_sync(&3, |_, v| *v), Some(7));
1684 /// ```
1685 #[inline]
1686 pub fn or_insert(self, val: V) -> OccupiedEntry<'h, K, V, H> {
1687 self.or_insert_with(|| val)
1688 }
1689
1690 /// Ensures a value is in the entry by inserting the result of the supplied closure if empty.
1691 ///
1692 /// # Examples
1693 ///
1694 /// ```
1695 /// use scc::HashMap;
1696 ///
1697 /// let hashmap: HashMap<u64, u32> = HashMap::default();
1698 ///
1699 /// hashmap.entry_sync(19).or_insert_with(|| 5);
1700 /// assert_eq!(hashmap.read_sync(&19, |_, v| *v), Some(5));
1701 /// ```
1702 #[inline]
1703 pub fn or_insert_with<F: FnOnce() -> V>(self, constructor: F) -> OccupiedEntry<'h, K, V, H> {
1704 self.or_insert_with_key(|_| constructor())
1705 }
1706
1707 /// Ensures a value is in the entry by inserting the result of the supplied closure if empty.
1708 ///
1709 /// The reference to the moved key is provided, therefore cloning or copying the key is
1710 /// unnecessary.
1711 ///
1712 /// # Examples
1713 ///
1714 /// ```
1715 /// use scc::HashMap;
1716 ///
1717 /// let hashmap: HashMap<u64, u32> = HashMap::default();
1718 ///
1719 /// hashmap.entry_sync(11).or_insert_with_key(|k| if *k == 11 { 7 } else { 3 });
1720 /// assert_eq!(hashmap.read_sync(&11, |_, v| *v), Some(7));
1721 /// ```
1722 #[inline]
1723 pub fn or_insert_with_key<F: FnOnce(&K) -> V>(
1724 self,
1725 constructor: F,
1726 ) -> OccupiedEntry<'h, K, V, H> {
1727 match self {
1728 Self::Occupied(o) => o,
1729 Self::Vacant(v) => {
1730 let val = constructor(v.key());
1731 v.insert_entry(val)
1732 }
1733 }
1734 }
1735
1736 /// Returns a reference to the key of this entry.
1737 ///
1738 /// # Examples
1739 ///
1740 /// ```
1741 /// use scc::HashMap;
1742 ///
1743 /// let hashmap: HashMap<u64, u32> = HashMap::default();
1744 /// assert_eq!(hashmap.entry_sync(31).key(), &31);
1745 /// ```
1746 #[inline]
1747 pub fn key(&self) -> &K {
1748 match self {
1749 Self::Occupied(o) => o.key(),
1750 Self::Vacant(v) => v.key(),
1751 }
1752 }
1753
1754 /// Provides in-place mutable access to an occupied entry.
1755 ///
1756 /// # Examples
1757 ///
1758 /// ```
1759 /// use scc::HashMap;
1760 ///
1761 /// let hashmap: HashMap<u64, u32> = HashMap::default();
1762 ///
1763 /// hashmap.entry_sync(37).and_modify(|v| { *v += 1 }).or_insert(47);
1764 /// assert_eq!(hashmap.read_sync(&37, |_, v| *v), Some(47));
1765 ///
1766 /// hashmap.entry_sync(37).and_modify(|v| { *v += 1 }).or_insert(3);
1767 /// assert_eq!(hashmap.read_sync(&37, |_, v| *v), Some(48));
1768 /// ```
1769 #[inline]
1770 #[must_use]
1771 pub fn and_modify<F>(self, f: F) -> Self
1772 where
1773 F: FnOnce(&mut V),
1774 {
1775 match self {
1776 Self::Occupied(mut o) => {
1777 f(o.get_mut());
1778 Self::Occupied(o)
1779 }
1780 Self::Vacant(_) => self,
1781 }
1782 }
1783
1784 /// Sets the value of the entry.
1785 ///
1786 /// # Examples
1787 ///
1788 /// ```
1789 /// use scc::HashMap;
1790 ///
1791 /// let hashmap: HashMap<u64, u32> = HashMap::default();
1792 /// let entry = hashmap.entry_sync(11).insert_entry(17);
1793 /// assert_eq!(entry.key(), &11);
1794 /// ```
1795 #[inline]
1796 pub fn insert_entry(self, val: V) -> OccupiedEntry<'h, K, V, H> {
1797 match self {
1798 Self::Occupied(mut o) => {
1799 o.insert(val);
1800 o
1801 }
1802 Self::Vacant(v) => v.insert_entry(val),
1803 }
1804 }
1805}
1806
1807impl<'h, K, V, H> Entry<'h, K, V, H>
1808where
1809 K: Eq + Hash,
1810 V: Default,
1811 H: BuildHasher,
1812{
1813 /// Ensures a value is in the entry by inserting the default value if empty.
1814 ///
1815 /// # Examples
1816 ///
1817 /// ```
1818 /// use scc::HashMap;
1819 ///
1820 /// let hashmap: HashMap<u64, u32> = HashMap::default();
1821 /// hashmap.entry_sync(11).or_default();
1822 /// assert_eq!(hashmap.read_sync(&11, |_, v| *v), Some(0));
1823 /// ```
1824 #[inline]
1825 pub fn or_default(self) -> OccupiedEntry<'h, K, V, H> {
1826 match self {
1827 Self::Occupied(o) => o,
1828 Self::Vacant(v) => v.insert_entry(Default::default()),
1829 }
1830 }
1831}
1832
1833impl<K, V, H> Debug for Entry<'_, K, V, H>
1834where
1835 K: Debug + Eq + Hash,
1836 V: Debug,
1837 H: BuildHasher,
1838{
1839 #[inline]
1840 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1841 match self {
1842 Self::Vacant(v) => f.debug_tuple("Entry").field(v).finish(),
1843 Self::Occupied(o) => f.debug_tuple("Entry").field(o).finish(),
1844 }
1845 }
1846}
1847
1848impl<'h, K, V, H> OccupiedEntry<'h, K, V, H>
1849where
1850 K: Eq + Hash,
1851 H: BuildHasher,
1852{
1853 /// Gets a reference to the key in the entry.
1854 ///
1855 /// # Examples
1856 ///
1857 /// ```
1858 /// use scc::HashMap;
1859 ///
1860 /// let hashmap: HashMap<u64, u32> = HashMap::default();
1861 ///
1862 /// assert_eq!(hashmap.entry_sync(29).or_default().key(), &29);
1863 /// ```
1864 #[inline]
1865 #[must_use]
1866 pub fn key(&self) -> &K {
1867 &self.locked_bucket.entry(&self.entry_ptr).0
1868 }
1869
1870 /// Takes ownership of the key and value from the [`HashMap`].
1871 ///
1872 /// # Examples
1873 ///
1874 /// ```
1875 /// use scc::HashMap;
1876 /// use scc::hash_map::Entry;
1877 ///
1878 /// let hashmap: HashMap<u64, u32> = HashMap::default();
1879 ///
1880 /// hashmap.entry_sync(11).or_insert(17);
1881 ///
1882 /// if let Entry::Occupied(o) = hashmap.entry_sync(11) {
1883 /// assert_eq!(o.remove_entry(), (11, 17));
1884 /// };
1885 /// ```
1886 #[inline]
1887 #[must_use]
1888 pub fn remove_entry(mut self) -> (K, V) {
1889 self.locked_bucket.remove(self.hashmap, &mut self.entry_ptr)
1890 }
1891
1892 /// Gets a reference to the value in the entry.
1893 ///
1894 /// # Examples
1895 ///
1896 /// ```
1897 /// use scc::HashMap;
1898 /// use scc::hash_map::Entry;
1899 ///
1900 /// let hashmap: HashMap<u64, u32> = HashMap::default();
1901 ///
1902 /// hashmap.entry_sync(19).or_insert(11);
1903 ///
1904 /// if let Entry::Occupied(o) = hashmap.entry_sync(19) {
1905 /// assert_eq!(o.get(), &11);
1906 /// };
1907 /// ```
1908 #[inline]
1909 #[must_use]
1910 pub fn get(&self) -> &V {
1911 &self.locked_bucket.entry(&self.entry_ptr).1
1912 }
1913
1914 /// Gets a mutable reference to the value in the entry.
1915 ///
1916 /// # Examples
1917 ///
1918 /// ```
1919 /// use scc::HashMap;
1920 /// use scc::hash_map::Entry;
1921 ///
1922 /// let hashmap: HashMap<u64, u32> = HashMap::default();
1923 ///
1924 /// hashmap.entry_sync(37).or_insert(11);
1925 ///
1926 /// if let Entry::Occupied(mut o) = hashmap.entry_sync(37) {
1927 /// *o.get_mut() += 18;
1928 /// assert_eq!(*o.get(), 29);
1929 /// }
1930 ///
1931 /// assert_eq!(hashmap.read_sync(&37, |_, v| *v), Some(29));
1932 /// ```
1933 #[inline]
1934 pub fn get_mut(&mut self) -> &mut V {
1935 &mut self.locked_bucket.entry_mut(&mut self.entry_ptr).1
1936 }
1937
1938 /// Sets the value of the entry, and returns the old value.
1939 ///
1940 /// # Examples
1941 ///
1942 /// ```
1943 /// use scc::HashMap;
1944 /// use scc::hash_map::Entry;
1945 ///
1946 /// let hashmap: HashMap<u64, u32> = HashMap::default();
1947 ///
1948 /// hashmap.entry_sync(37).or_insert(11);
1949 ///
1950 /// if let Entry::Occupied(mut o) = hashmap.entry_sync(37) {
1951 /// assert_eq!(o.insert(17), 11);
1952 /// }
1953 ///
1954 /// assert_eq!(hashmap.read_sync(&37, |_, v| *v), Some(17));
1955 /// ```
1956 #[inline]
1957 pub fn insert(&mut self, val: V) -> V {
1958 replace(self.get_mut(), val)
1959 }
1960
1961 /// Takes the value out of the entry, and returns it.
1962 ///
1963 /// # Examples
1964 ///
1965 /// ```
1966 /// use scc::HashMap;
1967 /// use scc::hash_map::Entry;
1968 ///
1969 /// let hashmap: HashMap<u64, u32> = HashMap::default();
1970 ///
1971 /// hashmap.entry_sync(11).or_insert(17);
1972 ///
1973 /// if let Entry::Occupied(o) = hashmap.entry_sync(11) {
1974 /// assert_eq!(o.remove(), 17);
1975 /// };
1976 /// ```
1977 #[inline]
1978 #[must_use]
1979 pub fn remove(self) -> V {
1980 self.remove_entry().1
1981 }
1982
1983 /// Removes the entry and gets the next closest occupied entry.
1984 ///
1985 /// [`HashMap::begin_async`] and this method together allow the [`OccupiedEntry`] to
1986 /// effectively act as a mutable iterator over entries. This method never acquires more than one
1987 /// lock, even when it searches other buckets for the next closest occupied entry.
1988 ///
1989 /// # Examples
1990 ///
1991 /// ```
1992 /// use scc::HashMap;
1993 /// use scc::hash_map::Entry;
1994 ///
1995 /// let hashmap: HashMap<u64, u32> = HashMap::default();
1996 ///
1997 /// assert!(hashmap.insert_sync(1, 0).is_ok());
1998 /// assert!(hashmap.insert_sync(2, 0).is_ok());
1999 ///
2000 /// let second_entry_future = hashmap.begin_sync().unwrap().remove_and_async();
2001 /// ```
2002 #[inline]
2003 pub async fn remove_and_async(self) -> ((K, V), Option<OccupiedEntry<'h, K, V, H>>) {
2004 let hashmap = self.hashmap;
2005 let mut entry_ptr = self.entry_ptr.clone();
2006 let entry = self
2007 .locked_bucket
2008 .writer
2009 .remove(self.locked_bucket.data_block, &mut entry_ptr);
2010 if let Some(locked_bucket) = self.locked_bucket.next_async(hashmap, &mut entry_ptr).await {
2011 return (
2012 entry,
2013 Some(OccupiedEntry {
2014 hashmap,
2015 locked_bucket,
2016 entry_ptr,
2017 }),
2018 );
2019 }
2020 (entry, None)
2021 }
2022
2023 /// Removes the entry and gets the next closest occupied entry.
2024 ///
2025 /// [`HashMap::begin_sync`] and this method together allow the [`OccupiedEntry`] to effectively
2026 /// act as a mutable iterator over entries. This method never acquires more than one lock, even
2027 /// when it searches other buckets for the next closest occupied entry.
2028 ///
2029 /// # Examples
2030 ///
2031 /// ```
2032 /// use scc::HashMap;
2033 /// use scc::hash_map::Entry;
2034 ///
2035 /// let hashmap: HashMap<u64, u32> = HashMap::default();
2036 ///
2037 /// assert!(hashmap.insert_sync(1, 0).is_ok());
2038 /// assert!(hashmap.insert_sync(2, 0).is_ok());
2039 ///
2040 /// let first_entry = hashmap.begin_sync().unwrap();
2041 /// let first_key = *first_entry.key();
2042 /// let (removed, second_entry) = first_entry.remove_and_sync();
2043 /// assert_eq!(removed.1, 0);
2044 /// assert_eq!(hashmap.len(), 1);
2045 ///
2046 /// let second_entry = second_entry.unwrap();
2047 /// let second_key = *second_entry.key();
2048 ///
2049 /// assert!(second_entry.remove_and_sync().1.is_none());
2050 /// assert_eq!(first_key + second_key, 3);
2051 /// ```
2052 #[inline]
2053 #[must_use]
2054 pub fn remove_and_sync(self) -> ((K, V), Option<Self>) {
2055 let hashmap = self.hashmap;
2056 let mut entry_ptr = self.entry_ptr.clone();
2057 let entry = self
2058 .locked_bucket
2059 .writer
2060 .remove(self.locked_bucket.data_block, &mut entry_ptr);
2061 if let Some(locked_bucket) = self.locked_bucket.next_sync(hashmap, &mut entry_ptr) {
2062 return (
2063 entry,
2064 Some(OccupiedEntry {
2065 hashmap,
2066 locked_bucket,
2067 entry_ptr,
2068 }),
2069 );
2070 }
2071 (entry, None)
2072 }
2073
2074 /// Gets the next closest occupied entry.
2075 ///
2076 /// [`HashMap::begin_async`] and this method together allow the [`OccupiedEntry`] to
2077 /// effectively act as a mutable iterator over entries. This method never acquires more than one
2078 /// lock, even when it searches other buckets for the next closest occupied entry.
2079 ///
2080 /// # Examples
2081 ///
2082 /// ```
2083 /// use scc::HashMap;
2084 /// use scc::hash_map::Entry;
2085 ///
2086 /// let hashmap: HashMap<u64, u32> = HashMap::default();
2087 ///
2088 /// assert!(hashmap.insert_sync(1, 0).is_ok());
2089 /// assert!(hashmap.insert_sync(2, 0).is_ok());
2090 ///
2091 /// let second_entry_future = hashmap.begin_sync().unwrap().next_async();
2092 /// ```
2093 #[inline]
2094 pub async fn next_async(self) -> Option<OccupiedEntry<'h, K, V, H>> {
2095 let hashmap = self.hashmap;
2096 let mut entry_ptr = self.entry_ptr.clone();
2097 if let Some(locked_bucket) = self.locked_bucket.next_async(hashmap, &mut entry_ptr).await {
2098 return Some(OccupiedEntry {
2099 hashmap,
2100 locked_bucket,
2101 entry_ptr,
2102 });
2103 }
2104 None
2105 }
2106
2107 /// Gets the next closest occupied entry.
2108 ///
2109 /// [`HashMap::begin_sync`] and this method together allow the [`OccupiedEntry`] to effectively
2110 /// act as a mutable iterator over entries. This method never acquires more than one lock, even
2111 /// when it searches other buckets for the next closest occupied entry.
2112 ///
2113 /// # Examples
2114 ///
2115 /// ```
2116 /// use scc::HashMap;
2117 /// use scc::hash_map::Entry;
2118 ///
2119 /// let hashmap: HashMap<u64, u32> = HashMap::default();
2120 ///
2121 /// assert!(hashmap.insert_sync(1, 0).is_ok());
2122 /// assert!(hashmap.insert_sync(2, 0).is_ok());
2123 ///
2124 /// let first_entry = hashmap.begin_sync().unwrap();
2125 /// let first_key = *first_entry.key();
2126 /// let second_entry = first_entry.next_sync().unwrap();
2127 /// let second_key = *second_entry.key();
2128 ///
2129 /// assert!(second_entry.next_sync().is_none());
2130 /// assert_eq!(first_key + second_key, 3);
2131 /// ```
2132 #[inline]
2133 #[must_use]
2134 pub fn next_sync(self) -> Option<Self> {
2135 let hashmap = self.hashmap;
2136 let mut entry_ptr = self.entry_ptr.clone();
2137 if let Some(locked_bucket) = self.locked_bucket.next_sync(hashmap, &mut entry_ptr) {
2138 return Some(OccupiedEntry {
2139 hashmap,
2140 locked_bucket,
2141 entry_ptr,
2142 });
2143 }
2144 None
2145 }
2146}
2147
2148impl<K, V, H> Debug for OccupiedEntry<'_, K, V, H>
2149where
2150 K: Debug + Eq + Hash,
2151 V: Debug,
2152 H: BuildHasher,
2153{
2154 #[inline]
2155 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2156 f.debug_struct("OccupiedEntry")
2157 .field("key", self.key())
2158 .field("value", self.get())
2159 .finish_non_exhaustive()
2160 }
2161}
2162
2163impl<K, V, H> Deref for OccupiedEntry<'_, K, V, H>
2164where
2165 K: Eq + Hash,
2166 H: BuildHasher,
2167{
2168 type Target = V;
2169
2170 #[inline]
2171 fn deref(&self) -> &Self::Target {
2172 self.get()
2173 }
2174}
2175
2176impl<K, V, H> DerefMut for OccupiedEntry<'_, K, V, H>
2177where
2178 K: Eq + Hash,
2179 H: BuildHasher,
2180{
2181 #[inline]
2182 fn deref_mut(&mut self) -> &mut Self::Target {
2183 self.get_mut()
2184 }
2185}
2186
2187impl<'h, K, V, H> VacantEntry<'h, K, V, H>
2188where
2189 K: Eq + Hash,
2190 H: BuildHasher,
2191{
2192 /// Gets a reference to the key.
2193 ///
2194 /// # Examples
2195 ///
2196 /// ```
2197 /// use scc::HashMap;
2198 ///
2199 /// let hashmap: HashMap<u64, u32> = HashMap::default();
2200 /// assert_eq!(hashmap.entry_sync(11).key(), &11);
2201 /// ```
2202 #[inline]
2203 pub fn key(&self) -> &K {
2204 &self.key
2205 }
2206
2207 /// Takes ownership of the key.
2208 ///
2209 /// # Examples
2210 ///
2211 /// ```
2212 /// use scc::HashMap;
2213 /// use scc::hash_map::Entry;
2214 ///
2215 /// let hashmap: HashMap<u64, u32> = HashMap::default();
2216 ///
2217 /// if let Entry::Vacant(v) = hashmap.entry_sync(17) {
2218 /// assert_eq!(v.into_key(), 17);
2219 /// };
2220 /// ```
2221 #[inline]
2222 pub fn into_key(self) -> K {
2223 self.key
2224 }
2225
2226 /// Sets the value of the entry with its key, and returns an [`OccupiedEntry`].
2227 ///
2228 /// # Examples
2229 ///
2230 /// ```
2231 /// use scc::HashMap;
2232 /// use scc::hash_map::Entry;
2233 ///
2234 /// let hashmap: HashMap<u64, u32> = HashMap::default();
2235 ///
2236 /// if let Entry::Vacant(o) = hashmap.entry_sync(19) {
2237 /// o.insert_entry(29);
2238 /// }
2239 ///
2240 /// assert_eq!(hashmap.read_sync(&19, |_, v| *v), Some(29));
2241 /// ```
2242 #[inline]
2243 pub fn insert_entry(self, val: V) -> OccupiedEntry<'h, K, V, H> {
2244 let entry_ptr = self.locked_bucket.insert(self.hash, (self.key, val));
2245 OccupiedEntry {
2246 hashmap: self.hashmap,
2247 locked_bucket: self.locked_bucket,
2248 entry_ptr,
2249 }
2250 }
2251}
2252
2253impl<K, V, H> Debug for VacantEntry<'_, K, V, H>
2254where
2255 K: Debug + Eq + Hash,
2256 V: Debug,
2257 H: BuildHasher,
2258{
2259 #[inline]
2260 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2261 f.debug_tuple("VacantEntry").field(self.key()).finish()
2262 }
2263}
2264
2265impl<K, V> ConsumableEntry<'_, K, V> {
2266 /// Consumes the entry by moving out the key and value.
2267 ///
2268 /// # Examples
2269 ///
2270 /// ```
2271 /// use scc::HashMap;
2272 ///
2273 /// let hashmap: HashMap<u64, u32> = HashMap::default();
2274 ///
2275 /// assert!(hashmap.insert_sync(1, 0).is_ok());
2276 /// assert!(hashmap.insert_sync(2, 1).is_ok());
2277 /// assert!(hashmap.insert_sync(3, 2).is_ok());
2278 ///
2279 /// let mut consumed = None;
2280 ///
2281 /// hashmap.iter_mut_sync(|entry| {
2282 /// if entry.0 == 1 {
2283 /// consumed.replace(entry.consume().1);
2284 /// }
2285 /// true
2286 /// });
2287 ///
2288 /// assert!(!hashmap.contains_sync(&1));
2289 /// assert_eq!(consumed, Some(0));
2290 /// ```
2291 #[inline]
2292 #[must_use]
2293 pub fn consume(self) -> (K, V) {
2294 *self.remove_probe |= true;
2295 self.locked_bucket
2296 .writer
2297 .remove(self.locked_bucket.data_block, self.entry_ptr)
2298 }
2299}
2300
2301impl<K, V> Deref for ConsumableEntry<'_, K, V> {
2302 type Target = (K, V);
2303
2304 #[inline]
2305 fn deref(&self) -> &Self::Target {
2306 self.locked_bucket.entry(self.entry_ptr)
2307 }
2308}
2309
2310impl<K, V> DerefMut for ConsumableEntry<'_, K, V> {
2311 #[inline]
2312 fn deref_mut(&mut self) -> &mut Self::Target {
2313 self.locked_bucket.entry_mut(self.entry_ptr)
2314 }
2315}
2316
2317impl<K, V, H> Reserve<'_, K, V, H>
2318where
2319 K: Eq + Hash,
2320 H: BuildHasher,
2321{
2322 /// Returns the number of reserved slots.
2323 #[inline]
2324 #[must_use]
2325 pub fn additional_capacity(&self) -> usize {
2326 self.additional
2327 }
2328}
2329
2330impl<K, V, H> AsRef<HashMap<K, V, H>> for Reserve<'_, K, V, H>
2331where
2332 K: Eq + Hash,
2333 H: BuildHasher,
2334{
2335 #[inline]
2336 fn as_ref(&self) -> &HashMap<K, V, H> {
2337 self.hashmap
2338 }
2339}
2340
2341impl<K, V, H> Debug for Reserve<'_, K, V, H>
2342where
2343 K: Eq + Hash,
2344 H: BuildHasher,
2345{
2346 #[inline]
2347 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2348 f.debug_tuple("Reserve").field(&self.additional).finish()
2349 }
2350}
2351
2352impl<K, V, H> Deref for Reserve<'_, K, V, H>
2353where
2354 K: Eq + Hash,
2355 H: BuildHasher,
2356{
2357 type Target = HashMap<K, V, H>;
2358
2359 #[inline]
2360 fn deref(&self) -> &Self::Target {
2361 self.hashmap
2362 }
2363}
2364
2365impl<K, V, H> Drop for Reserve<'_, K, V, H>
2366where
2367 K: Eq + Hash,
2368 H: BuildHasher,
2369{
2370 #[inline]
2371 fn drop(&mut self) {
2372 let result = self
2373 .hashmap
2374 .minimum_capacity
2375 .fetch_sub(self.additional, Relaxed);
2376 debug_assert!(result >= self.additional);
2377
2378 let guard = Guard::new();
2379 if let Some(current_array) = self.hashmap.bucket_array(&guard) {
2380 self.try_shrink(current_array, 0, &guard);
2381 }
2382 }
2383}