scc/hash_index.rs
1//! [`HashIndex`] is a read-optimized concurrent and asynchronous hash map.
2
3use super::ebr::{AtomicShared, Guard, Shared};
4use super::hash_table::bucket::{Bucket, EntryPtr, Locker, OPTIMISTIC};
5use super::hash_table::bucket_array::BucketArray;
6use super::hash_table::{HashTable, LockedEntry};
7use super::wait_queue::AsyncWait;
8use super::Equivalent;
9use std::collections::hash_map::RandomState;
10use std::fmt::{self, Debug};
11use std::hash::{BuildHasher, Hash};
12use std::iter::FusedIterator;
13use std::ops::{Deref, RangeInclusive};
14use std::panic::UnwindSafe;
15use std::pin::Pin;
16use std::ptr;
17use std::sync::atomic::AtomicUsize;
18use std::sync::atomic::Ordering::{Acquire, Relaxed};
19
20/// Scalable concurrent hash index.
21///
22/// [`HashIndex`] is a concurrent and asynchronous hash map data structure optimized for parallel
23/// read operations. The key characteristics of [`HashIndex`] are similar to that of
24/// [`HashMap`](super::HashMap) except its read operations are lock-free.
25///
26/// ## The key differences between [`HashIndex`] and [`HashMap`](crate::HashMap).
27///
28/// * Lock-free-read: read and scan operations are never blocked and do not modify shared data.
29/// * Immutability: the data in the container is immutable until it becomes unreachable.
30/// * Linearizability: linearizability of read operations relies on the CPU architecture.
31///
32/// ## The key statistics for [`HashIndex`]
33///
34/// * The expected size of metadata for a single key-value pair: 2-byte.
35/// * The expected number of atomic write operations required for an operation on a single key: 2.
36/// * The expected number of atomic variables accessed during a single key operation: 2.
37/// * The number of entries managed by a single bucket without a linked list: 32.
38/// * The expected maximum linked list length when resize is triggered: log(capacity) / 8.
39///
40/// ## Unwind safety
41///
42/// [`HashIndex`] is impervious to out-of-memory errors and panics in user-specified code on one
43/// condition; `H::Hasher::hash`, `K::drop` and `V::drop` must not panic.
44pub struct HashIndex<K, V, H = RandomState>
45where
46 H: BuildHasher,
47{
48 array: AtomicShared<BucketArray<K, V, (), OPTIMISTIC>>,
49 minimum_capacity: AtomicUsize,
50 build_hasher: H,
51}
52
53/// [`Entry`] represents a single entry in a [`HashIndex`].
54pub enum Entry<'h, K, V, H = RandomState>
55where
56 H: BuildHasher,
57{
58 /// An occupied entry.
59 Occupied(OccupiedEntry<'h, K, V, H>),
60
61 /// A vacant entry.
62 Vacant(VacantEntry<'h, K, V, H>),
63}
64
65/// [`OccupiedEntry`] is a view into an occupied entry in a [`HashIndex`].
66pub struct OccupiedEntry<'h, K, V, H = RandomState>
67where
68 H: BuildHasher,
69{
70 hashindex: &'h HashIndex<K, V, H>,
71 locked_entry: LockedEntry<'h, K, V, (), OPTIMISTIC>,
72}
73
74/// [`VacantEntry`] is a view into a vacant entry in a [`HashIndex`].
75pub struct VacantEntry<'h, K, V, H = RandomState>
76where
77 H: BuildHasher,
78{
79 hashindex: &'h HashIndex<K, V, H>,
80 key: K,
81 hash: u64,
82 locked_entry: LockedEntry<'h, K, V, (), OPTIMISTIC>,
83}
84
85/// [`Reserve`] keeps the capacity of the associated [`HashIndex`] higher than a certain level.
86///
87/// The [`HashIndex`] does not shrink the capacity below the reserved capacity.
88pub struct Reserve<'h, K, V, H = RandomState>
89where
90 K: 'static + Clone + Eq + Hash,
91 V: 'static + Clone,
92 H: BuildHasher,
93{
94 hashindex: &'h HashIndex<K, V, H>,
95 additional: usize,
96}
97
98/// An iterator over the entries of a [`HashIndex`].
99///
100/// An [`Iter`] iterates over all the entries that survive the [`Iter`].
101pub struct Iter<'h, 'g, K, V, H = RandomState>
102where
103 H: BuildHasher,
104{
105 hashindex: &'h HashIndex<K, V, H>,
106 current_array: Option<&'g BucketArray<K, V, (), OPTIMISTIC>>,
107 current_index: usize,
108 current_bucket: Option<&'g Bucket<K, V, (), OPTIMISTIC>>,
109 current_entry_ptr: EntryPtr<'g, K, V, OPTIMISTIC>,
110 guard: &'g Guard,
111}
112
113impl<K, V, H> HashIndex<K, V, H>
114where
115 H: BuildHasher,
116{
117 /// Creates an empty [`HashIndex`] with the given [`BuildHasher`].
118 ///
119 /// # Examples
120 ///
121 /// ```
122 /// use scc::HashIndex;
123 /// use std::collections::hash_map::RandomState;
124 ///
125 /// let hashindex: HashIndex<u64, u32, RandomState> =
126 /// HashIndex::with_hasher(RandomState::new());
127 /// ```
128 #[cfg(not(feature = "loom"))]
129 #[inline]
130 pub const fn with_hasher(build_hasher: H) -> Self {
131 Self {
132 array: AtomicShared::null(),
133 minimum_capacity: AtomicUsize::new(0),
134 build_hasher,
135 }
136 }
137
138 /// Creates an empty [`HashIndex`] with the given [`BuildHasher`].
139 #[cfg(feature = "loom")]
140 #[inline]
141 pub fn with_hasher(build_hasher: H) -> Self {
142 Self {
143 array: AtomicShared::null(),
144 minimum_capacity: AtomicUsize::new(0),
145 build_hasher,
146 }
147 }
148
149 /// Creates an empty [`HashIndex`] with the specified capacity and [`BuildHasher`].
150 ///
151 /// The actual capacity is equal to or greater than the specified capacity.
152 ///
153 /// # Examples
154 ///
155 /// ```
156 /// use scc::HashIndex;
157 /// use std::collections::hash_map::RandomState;
158 ///
159 /// let hashindex: HashIndex<u64, u32, RandomState> =
160 /// HashIndex::with_capacity_and_hasher(1000, RandomState::new());
161 ///
162 /// let result = hashindex.capacity();
163 /// assert_eq!(result, 1024);
164 /// ```
165 #[inline]
166 pub fn with_capacity_and_hasher(capacity: usize, build_hasher: H) -> Self {
167 let (array, minimum_capacity) = if capacity == 0 {
168 (AtomicShared::null(), AtomicUsize::new(0))
169 } else {
170 let array = unsafe {
171 Shared::new_unchecked(BucketArray::<K, V, (), OPTIMISTIC>::new(
172 capacity,
173 AtomicShared::null(),
174 ))
175 };
176 let minimum_capacity = array.num_entries();
177 (
178 AtomicShared::from(array),
179 AtomicUsize::new(minimum_capacity),
180 )
181 };
182 Self {
183 array,
184 minimum_capacity,
185 build_hasher,
186 }
187 }
188}
189
190impl<K, V, H> HashIndex<K, V, H>
191where
192 K: 'static + Clone + Eq + Hash,
193 V: 'static + Clone,
194 H: BuildHasher,
195{
196 /// Temporarily increases the minimum capacity of the [`HashIndex`].
197 ///
198 /// A [`Reserve`] is returned if the [`HashIndex`] could increase the minimum capacity while
199 /// the increased capacity is not exclusively owned by the returned [`Reserve`], allowing
200 /// others to benefit from it. The memory for the additional space may not be immediately
201 /// allocated if the [`HashIndex`] is empty or currently being resized, however once the memory
202 /// is reserved eventually, the capacity will not shrink below the additional capacity until
203 /// the returned [`Reserve`] is dropped.
204 ///
205 /// # Errors
206 ///
207 /// Returns `None` if a too large number is given.
208 ///
209 /// # Examples
210 ///
211 /// ```
212 /// use scc::HashIndex;
213 ///
214 /// let hashindex: HashIndex<usize, usize> = HashIndex::with_capacity(1000);
215 /// assert_eq!(hashindex.capacity(), 1024);
216 ///
217 /// let reserved = hashindex.reserve(10000);
218 /// assert!(reserved.is_some());
219 /// assert_eq!(hashindex.capacity(), 16384);
220 ///
221 /// assert!(hashindex.reserve(usize::MAX).is_none());
222 /// assert_eq!(hashindex.capacity(), 16384);
223 ///
224 /// for i in 0..16 {
225 /// assert!(hashindex.insert(i, i).is_ok());
226 /// }
227 /// drop(reserved);
228 ///
229 /// assert_eq!(hashindex.capacity(), 1024);
230 /// ```
231 #[inline]
232 pub fn reserve(&self, additional_capacity: usize) -> Option<Reserve<'_, K, V, H>> {
233 let additional = self.reserve_capacity(additional_capacity);
234 if additional == 0 {
235 None
236 } else {
237 Some(Reserve {
238 hashindex: self,
239 additional,
240 })
241 }
242 }
243
244 /// Gets the entry associated with the given key in the map for in-place manipulation.
245 ///
246 /// # Examples
247 ///
248 /// ```
249 /// use scc::HashIndex;
250 ///
251 /// let hashindex: HashIndex<char, u32> = HashIndex::default();
252 ///
253 /// for ch in "a short treatise on fungi".chars() {
254 /// unsafe {
255 /// hashindex.entry(ch).and_modify(|counter| *counter += 1).or_insert(1);
256 /// }
257 /// }
258 ///
259 /// assert_eq!(hashindex.peek_with(&'s', |_, v| *v), Some(2));
260 /// assert_eq!(hashindex.peek_with(&'t', |_, v| *v), Some(3));
261 /// assert!(hashindex.peek_with(&'y', |_, v| *v).is_none());
262 /// ```
263 #[inline]
264 pub fn entry(&self, key: K) -> Entry<'_, K, V, H> {
265 let guard = Guard::new();
266 let hash = self.hash(&key);
267 let locked_entry = unsafe {
268 self.reserve_entry(&key, hash, &mut (), self.prolonged_guard_ref(&guard))
269 .ok()
270 .unwrap_unchecked()
271 };
272 if locked_entry.entry_ptr.is_valid() {
273 Entry::Occupied(OccupiedEntry {
274 hashindex: self,
275 locked_entry,
276 })
277 } else {
278 Entry::Vacant(VacantEntry {
279 hashindex: self,
280 key,
281 hash,
282 locked_entry,
283 })
284 }
285 }
286
287 /// Gets the entry associated with the given key in the map for in-place manipulation.
288 ///
289 /// It is an asynchronous method returning an `impl Future` for the caller to await.
290 ///
291 /// # Examples
292 ///
293 /// ```
294 /// use scc::HashIndex;
295 ///
296 /// let hashindex: HashIndex<char, u32> = HashIndex::default();
297 ///
298 /// let future_entry = hashindex.entry_async('b');
299 /// ```
300 #[inline]
301 pub async fn entry_async(&self, key: K) -> Entry<'_, K, V, H> {
302 let hash = self.hash(&key);
303 loop {
304 let mut async_wait = AsyncWait::default();
305 let mut async_wait_pinned = Pin::new(&mut async_wait);
306 {
307 let guard = Guard::new();
308 if let Ok(locked_entry) = self.reserve_entry(
309 &key,
310 hash,
311 &mut async_wait_pinned,
312 self.prolonged_guard_ref(&guard),
313 ) {
314 if locked_entry.entry_ptr.is_valid() {
315 return Entry::Occupied(OccupiedEntry {
316 hashindex: self,
317 locked_entry,
318 });
319 }
320 return Entry::Vacant(VacantEntry {
321 hashindex: self,
322 key,
323 hash,
324 locked_entry,
325 });
326 }
327 }
328 async_wait_pinned.await;
329 }
330 }
331
332 /// Tries to get the entry associated with the given key in the map for in-place manipulation.
333 ///
334 /// Returns `None` if the entry could not be locked.
335 ///
336 /// # Examples
337 ///
338 /// ```
339 /// use scc::HashIndex;
340 ///
341 /// let hashindex: HashIndex<usize, usize> = HashIndex::default();
342 ///
343 /// assert!(hashindex.insert(0, 1).is_ok());
344 /// assert!(hashindex.try_entry(0).is_some());
345 /// ```
346 #[inline]
347 pub fn try_entry(&self, key: K) -> Option<Entry<'_, K, V, H>> {
348 let guard = Guard::new();
349 let hash = self.hash(&key);
350 let locked_entry = self.try_reserve_entry(&key, hash, self.prolonged_guard_ref(&guard))?;
351 if locked_entry.entry_ptr.is_valid() {
352 Some(Entry::Occupied(OccupiedEntry {
353 hashindex: self,
354 locked_entry,
355 }))
356 } else {
357 Some(Entry::Vacant(VacantEntry {
358 hashindex: self,
359 key,
360 hash,
361 locked_entry,
362 }))
363 }
364 }
365
366 /// Gets the first occupied entry for in-place manipulation.
367 ///
368 /// The returned [`OccupiedEntry`] in combination with [`OccupiedEntry::next`] or
369 /// [`OccupiedEntry::next_async`] can act as a mutable iterator over entries.
370 ///
371 /// # Examples
372 ///
373 /// ```
374 /// use scc::HashIndex;
375 ///
376 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
377 ///
378 /// assert!(hashindex.insert(1, 0).is_ok());
379 ///
380 /// let mut first_entry = hashindex.first_entry().unwrap();
381 /// unsafe {
382 /// *first_entry.get_mut() = 2;
383 /// }
384 ///
385 /// assert!(first_entry.next().is_none());
386 /// assert_eq!(hashindex.peek_with(&1, |_, v| *v), Some(2));
387 /// ```
388 #[inline]
389 pub fn first_entry(&self) -> Option<OccupiedEntry<'_, K, V, H>> {
390 let guard = Guard::new();
391 let prolonged_guard = self.prolonged_guard_ref(&guard);
392 if let Some(locked_entry) = self.lock_first_entry(prolonged_guard) {
393 return Some(OccupiedEntry {
394 hashindex: self,
395 locked_entry,
396 });
397 }
398 None
399 }
400
401 /// Gets the first occupied entry for in-place manipulation.
402 ///
403 /// The returned [`OccupiedEntry`] in combination with [`OccupiedEntry::next`] or
404 /// [`OccupiedEntry::next_async`] can act as a mutable iterator over entries.
405 ///
406 /// It is an asynchronous method returning an `impl Future` for the caller to await.
407 ///
408 /// # Examples
409 ///
410 /// ```
411 /// use scc::HashIndex;
412 ///
413 /// let hashindex: HashIndex<char, u32> = HashIndex::default();
414 ///
415 /// let future_entry = hashindex.first_entry_async();
416 /// ```
417 #[inline]
418 pub async fn first_entry_async(&self) -> Option<OccupiedEntry<'_, K, V, H>> {
419 if let Some(locked_entry) = LockedEntry::first_entry_async(self).await {
420 return Some(OccupiedEntry {
421 hashindex: self,
422 locked_entry,
423 });
424 }
425 None
426 }
427
428 /// Finds any entry satisfying the supplied predicate for in-place manipulation.
429 ///
430 /// The returned [`OccupiedEntry`] in combination with [`OccupiedEntry::next`] or
431 /// [`OccupiedEntry::next_async`] can act as a mutable iterator over entries.
432 ///
433 /// # Examples
434 ///
435 /// ```
436 /// use scc::HashIndex;
437 ///
438 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
439 ///
440 /// assert!(hashindex.insert(1, 0).is_ok());
441 /// assert!(hashindex.insert(2, 3).is_ok());
442 ///
443 /// let mut entry = hashindex.any_entry(|k, _| *k == 2).unwrap();
444 /// assert_eq!(*entry.get(), 3);
445 /// ```
446 #[inline]
447 pub fn any_entry<P: FnMut(&K, &V) -> bool>(
448 &self,
449 pred: P,
450 ) -> Option<OccupiedEntry<'_, K, V, H>> {
451 let guard = Guard::new();
452 let prolonged_guard = self.prolonged_guard_ref(&guard);
453 let locked_entry = self.find_entry(pred, prolonged_guard)?;
454 Some(OccupiedEntry {
455 hashindex: self,
456 locked_entry,
457 })
458 }
459
460 /// Finds any entry satisfying the supplied predicate for in-place manipulation.
461 ///
462 /// The returned [`OccupiedEntry`] in combination with [`OccupiedEntry::next`] or
463 /// [`OccupiedEntry::next_async`] can act as a mutable iterator over entries.
464 ///
465 /// It is an asynchronous method returning an `impl Future` for the caller to await.
466 ///
467 /// # Examples
468 ///
469 /// ```
470 /// use scc::HashIndex;
471 ///
472 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
473 ///
474 /// let future_entry = hashindex.any_entry_async(|k, _| *k == 2);
475 /// ```
476 #[inline]
477 pub async fn any_entry_async<P: FnMut(&K, &V) -> bool>(
478 &self,
479 mut pred: P,
480 ) -> Option<OccupiedEntry<'_, K, V, H>> {
481 if let Some(locked_entry) = LockedEntry::first_entry_async(self).await {
482 let mut entry = OccupiedEntry {
483 hashindex: self,
484 locked_entry,
485 };
486 loop {
487 if pred(entry.key(), entry.get()) {
488 return Some(entry);
489 }
490 entry = entry.next()?;
491 }
492 }
493 None
494 }
495
496 /// Inserts a key-value pair into the [`HashIndex`].
497 ///
498 /// # Errors
499 ///
500 /// Returns an error along with the supplied key-value pair if the key exists.
501 ///
502 /// # Examples
503 ///
504 /// ```
505 /// use scc::HashIndex;
506 ///
507 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
508 ///
509 /// assert!(hashindex.insert(1, 0).is_ok());
510 /// assert_eq!(hashindex.insert(1, 1).unwrap_err(), (1, 1));
511 /// ```
512 #[inline]
513 pub fn insert(&self, key: K, val: V) -> Result<(), (K, V)> {
514 let guard = Guard::new();
515 let hash = self.hash(&key);
516 if let Ok(Some((k, v))) = self.insert_entry(key, val, hash, &mut (), &guard) {
517 Err((k, v))
518 } else {
519 Ok(())
520 }
521 }
522
523 /// Inserts a key-value pair into the [`HashIndex`].
524 ///
525 /// It is an asynchronous method returning an `impl Future` for the caller to await.
526 ///
527 /// # Errors
528 ///
529 /// Returns an error along with the supplied key-value pair if the key exists.
530 ///
531 /// # Examples
532 ///
533 /// ```
534 /// use scc::HashIndex;
535 ///
536 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
537 /// let future_insert = hashindex.insert_async(11, 17);
538 /// ```
539 #[inline]
540 pub async fn insert_async(&self, mut key: K, mut val: V) -> Result<(), (K, V)> {
541 let hash = self.hash(&key);
542 loop {
543 let mut async_wait = AsyncWait::default();
544 let mut async_wait_pinned = Pin::new(&mut async_wait);
545 match self.insert_entry(key, val, hash, &mut async_wait_pinned, &Guard::new()) {
546 Ok(Some(returned)) => return Err(returned),
547 Ok(None) => return Ok(()),
548 Err(returned) => {
549 key = returned.0;
550 val = returned.1;
551 }
552 }
553 async_wait_pinned.await;
554 }
555 }
556
557 /// Removes a key-value pair if the key exists.
558 ///
559 /// Returns `false` if the key does not exist.
560 ///
561 /// Returns `true` if the key existed and the condition was met after marking the entry
562 /// unreachable; the memory will be reclaimed later.
563 ///
564 /// # Examples
565 ///
566 /// ```
567 /// use scc::HashIndex;
568 ///
569 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
570 ///
571 /// assert!(!hashindex.remove(&1));
572 /// assert!(hashindex.insert(1, 0).is_ok());
573 /// assert!(hashindex.remove(&1));
574 /// ```
575 #[inline]
576 pub fn remove<Q>(&self, key: &Q) -> bool
577 where
578 Q: Equivalent<K> + Hash + ?Sized,
579 {
580 self.remove_if(key, |_| true)
581 }
582
583 /// Removes a key-value pair if the key exists.
584 ///
585 /// Returns `false` if the key does not exist. It is an asynchronous method returning an
586 /// `impl Future` for the caller to await.
587 ///
588 /// Returns `true` if the key existed and the condition was met after marking the entry
589 /// unreachable; the memory will be reclaimed later.
590 ///
591 /// # Examples
592 ///
593 /// ```
594 /// use scc::HashIndex;
595 ///
596 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
597 /// let future_insert = hashindex.insert_async(11, 17);
598 /// let future_remove = hashindex.remove_async(&11);
599 /// ```
600 #[inline]
601 pub async fn remove_async<Q>(&self, key: &Q) -> bool
602 where
603 Q: Equivalent<K> + Hash + ?Sized,
604 {
605 self.remove_if_async(key, |_| true).await
606 }
607
608 /// Removes a key-value pair if the key exists and the given condition is met.
609 ///
610 /// Returns `false` if the key does not exist or the condition was not met.
611 ///
612 /// Returns `true` if the key existed and the condition was met after marking the entry
613 /// unreachable; the memory will be reclaimed later.
614 ///
615 /// # Examples
616 ///
617 /// ```
618 /// use scc::HashIndex;
619 ///
620 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
621 ///
622 /// assert!(hashindex.insert(1, 0).is_ok());
623 /// assert!(!hashindex.remove_if(&1, |v| *v == 1));
624 /// assert!(hashindex.remove_if(&1, |v| *v == 0));
625 /// ```
626 #[inline]
627 pub fn remove_if<Q, F: FnOnce(&V) -> bool>(&self, key: &Q, condition: F) -> bool
628 where
629 Q: Equivalent<K> + Hash + ?Sized,
630 {
631 self.remove_entry(
632 key,
633 self.hash(key),
634 |v: &mut V| condition(v),
635 |r| r.is_some(),
636 &mut (),
637 &Guard::new(),
638 )
639 .ok()
640 .map_or(false, |r| r)
641 }
642
643 /// Removes a key-value pair if the key exists and the given condition is met.
644 ///
645 /// Returns `false` if the key does not exist or the condition was not met. It is an
646 /// asynchronous method returning an `impl Future` for the caller to await.
647 ///
648 /// Returns `true` if the key existed and the condition was met after marking the entry
649 /// unreachable; the memory will be reclaimed later.
650 ///
651 /// # Examples
652 ///
653 /// ```
654 /// use scc::HashIndex;
655 ///
656 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
657 /// let future_insert = hashindex.insert_async(11, 17);
658 /// let future_remove = hashindex.remove_if_async(&11, |_| true);
659 /// ```
660 #[inline]
661 pub async fn remove_if_async<Q, F: FnOnce(&V) -> bool>(&self, key: &Q, condition: F) -> bool
662 where
663 Q: Equivalent<K> + Hash + ?Sized,
664 {
665 let hash = self.hash(key);
666 let mut condition = |v: &mut V| condition(v);
667 loop {
668 let mut async_wait = AsyncWait::default();
669 let mut async_wait_pinned = Pin::new(&mut async_wait);
670 match self.remove_entry(
671 key,
672 hash,
673 condition,
674 |r| r.is_some(),
675 &mut async_wait_pinned,
676 &Guard::new(),
677 ) {
678 Ok(r) => return r,
679 Err(c) => condition = c,
680 }
681 async_wait_pinned.await;
682 }
683 }
684
685 /// Gets an [`OccupiedEntry`] corresponding to the key for in-place modification.
686 ///
687 /// [`OccupiedEntry`] exclusively owns the entry, preventing others from gaining access to it:
688 /// use [`peek`](Self::peek) if read-only access is sufficient.
689 ///
690 /// Returns `None` if the key does not exist.
691 ///
692 /// # Examples
693 ///
694 /// ```
695 /// use scc::HashIndex;
696 ///
697 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
698 ///
699 /// assert!(hashindex.get(&1).is_none());
700 /// assert!(hashindex.insert(1, 10).is_ok());
701 /// assert_eq!(*hashindex.get(&1).unwrap().get(), 10);
702 /// assert_eq!(*hashindex.get(&1).unwrap(), 10);
703 /// ```
704 #[inline]
705 pub fn get<Q>(&self, key: &Q) -> Option<OccupiedEntry<'_, K, V, H>>
706 where
707 Q: Equivalent<K> + Hash + ?Sized,
708 {
709 let guard = Guard::new();
710 let locked_entry = self
711 .get_entry(
712 key,
713 self.hash(key),
714 &mut (),
715 self.prolonged_guard_ref(&guard),
716 )
717 .ok()
718 .flatten()?;
719 Some(OccupiedEntry {
720 hashindex: self,
721 locked_entry,
722 })
723 }
724
725 /// Gets an [`OccupiedEntry`] corresponding to the key for in-place modification.
726 ///
727 /// [`OccupiedEntry`] exclusively owns the entry, preventing others from gaining access to it:
728 /// use [`peek`](Self::peek) if read-only access is sufficient.
729 ///
730 /// Returns `None` if the key does not exist. It is an asynchronous method returning an
731 /// `impl Future` for the caller to await.
732 ///
733 /// # Examples
734 ///
735 /// ```
736 /// use scc::HashIndex;
737 ///
738 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
739 /// let future_insert = hashindex.insert_async(11, 17);
740 /// let future_get = hashindex.get_async(&11);
741 /// ```
742 #[inline]
743 pub async fn get_async<Q>(&self, key: &Q) -> Option<OccupiedEntry<'_, K, V, H>>
744 where
745 Q: Equivalent<K> + Hash + ?Sized,
746 {
747 let hash = self.hash(key);
748 loop {
749 let mut async_wait = AsyncWait::default();
750 let mut async_wait_pinned = Pin::new(&mut async_wait);
751 if let Ok(result) = self.get_entry(
752 key,
753 hash,
754 &mut async_wait_pinned,
755 self.prolonged_guard_ref(&Guard::new()),
756 ) {
757 if let Some(locked_entry) = result {
758 return Some(OccupiedEntry {
759 hashindex: self,
760 locked_entry,
761 });
762 }
763 return None;
764 }
765 async_wait_pinned.await;
766 }
767 }
768
769 /// Returns a guarded reference to the value for the specified key without acquiring locks.
770 ///
771 /// Returns `None` if the key does not exist. The returned reference can survive as long as the
772 /// associated [`Guard`] is alive.
773 ///
774 /// # Examples
775 ///
776 /// ```
777 /// use scc::ebr::Guard;
778 /// use scc::HashIndex;
779 ///
780 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
781 ///
782 /// assert!(hashindex.insert(1, 10).is_ok());
783 ///
784 /// let guard = Guard::new();
785 /// let value_ref = hashindex.peek(&1, &guard).unwrap();
786 /// assert_eq!(*value_ref, 10);
787 /// ```
788 #[inline]
789 pub fn peek<'g, Q>(&self, key: &Q, guard: &'g Guard) -> Option<&'g V>
790 where
791 Q: Equivalent<K> + Hash + ?Sized,
792 {
793 self.peek_entry(key, self.hash(key), guard).map(|(_, v)| v)
794 }
795
796 /// Peeks a key-value pair without acquiring locks.
797 ///
798 /// Returns `None` if the key does not exist.
799 ///
800 /// # Examples
801 ///
802 /// ```
803 /// use scc::HashIndex;
804 ///
805 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
806 ///
807 /// assert!(hashindex.peek_with(&1, |_, v| *v).is_none());
808 /// assert!(hashindex.insert(1, 10).is_ok());
809 /// assert_eq!(hashindex.peek_with(&1, |_, v| *v).unwrap(), 10);
810 /// ```
811 #[inline]
812 pub fn peek_with<Q, R, F: FnOnce(&K, &V) -> R>(&self, key: &Q, reader: F) -> Option<R>
813 where
814 Q: Equivalent<K> + Hash + ?Sized,
815 {
816 let guard = Guard::new();
817 self.peek_entry(key, self.hash(key), &guard)
818 .map(|(k, v)| reader(k, v))
819 }
820
821 /// Returns `true` if the [`HashIndex`] contains a value for the specified key.
822 ///
823 /// # Examples
824 ///
825 /// ```
826 /// use scc::HashIndex;
827 ///
828 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
829 ///
830 /// assert!(!hashindex.contains(&1));
831 /// assert!(hashindex.insert(1, 0).is_ok());
832 /// assert!(hashindex.contains(&1));
833 /// ```
834 #[inline]
835 pub fn contains<Q>(&self, key: &Q) -> bool
836 where
837 Q: Equivalent<K> + Hash + ?Sized,
838 {
839 self.peek_with(key, |_, _| ()).is_some()
840 }
841
842 /// Retains the entries specified by the predicate.
843 ///
844 /// Entries that have existed since the invocation of the method are guaranteed to be visited
845 /// if they are not removed, however the same entry can be visited more than once if the
846 /// [`HashIndex`] gets resized by another thread.
847 ///
848 /// # Examples
849 ///
850 /// ```
851 /// use scc::HashIndex;
852 ///
853 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
854 ///
855 /// assert!(hashindex.insert(1, 0).is_ok());
856 /// assert!(hashindex.insert(2, 1).is_ok());
857 /// assert!(hashindex.insert(3, 2).is_ok());
858 ///
859 /// hashindex.retain(|k, v| *k == 1 && *v == 0);
860 ///
861 /// assert!(hashindex.contains(&1));
862 /// assert!(!hashindex.contains(&2));
863 /// assert!(!hashindex.contains(&3));
864 /// ```
865 #[inline]
866 pub fn retain<F: FnMut(&K, &V) -> bool>(&self, mut pred: F) {
867 self.retain_entries(|k, v| pred(k, v));
868 }
869
870 /// Retains the entries specified by the predicate.
871 ///
872 /// Entries that have existed since the invocation of the method are guaranteed to be visited
873 /// if they are not removed, however the same entry can be visited more than once if the
874 /// [`HashIndex`] gets resized by another thread.
875 ///
876 /// It is an asynchronous method returning an `impl Future` for the caller to await.
877 ///
878 /// # Examples
879 ///
880 /// ```
881 /// use scc::HashIndex;
882 ///
883 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
884 ///
885 /// let future_insert = hashindex.insert_async(1, 0);
886 /// let future_retain = hashindex.retain_async(|k, v| *k == 1);
887 /// ```
888 #[inline]
889 pub async fn retain_async<F: FnMut(&K, &V) -> bool>(&self, mut pred: F) {
890 let mut removed = false;
891 let mut current_array_holder = self.array.get_shared(Acquire, &Guard::new());
892 while let Some(current_array) = current_array_holder.take() {
893 self.cleanse_old_array_async(¤t_array).await;
894 for index in 0..current_array.num_buckets() {
895 loop {
896 let mut async_wait = AsyncWait::default();
897 let mut async_wait_pinned = Pin::new(&mut async_wait);
898 {
899 let guard = Guard::new();
900 let bucket = current_array.bucket_mut(index);
901 if let Ok(locker) =
902 Locker::try_lock_or_wait(bucket, &mut async_wait_pinned, &guard)
903 {
904 if let Some(mut locker) = locker {
905 let data_block_mut = current_array.data_block_mut(index);
906 let mut entry_ptr = EntryPtr::new(&guard);
907 while entry_ptr.move_to_next(&locker, &guard) {
908 let (k, v) = entry_ptr.get(data_block_mut);
909 if !pred(k, v) {
910 locker.mark_removed(&mut entry_ptr, &guard);
911 removed = true;
912 }
913 }
914 }
915 break;
916 };
917 }
918 async_wait_pinned.await;
919 }
920 }
921
922 if let Some(new_current_array) = self.array.get_shared(Acquire, &Guard::new()) {
923 if new_current_array.as_ptr() == current_array.as_ptr() {
924 break;
925 }
926 current_array_holder.replace(new_current_array);
927 continue;
928 }
929 break;
930 }
931
932 if removed {
933 self.try_resize(0, &Guard::new());
934 }
935 }
936
937 /// Clears the [`HashIndex`] by removing all key-value pairs.
938 ///
939 /// # Examples
940 ///
941 /// ```
942 /// use scc::HashIndex;
943 ///
944 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
945 ///
946 /// assert!(hashindex.insert(1, 0).is_ok());
947 /// hashindex.clear();
948 ///
949 /// assert!(!hashindex.contains(&1));
950 /// ```
951 pub fn clear(&self) {
952 self.retain(|_, _| false);
953 }
954
955 /// Clears the [`HashIndex`] by removing all key-value pairs.
956 ///
957 /// It is an asynchronous method returning an `impl Future` for the caller to await.
958 ///
959 /// # Examples
960 ///
961 /// ```
962 /// use scc::HashIndex;
963 ///
964 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
965 ///
966 /// let future_insert = hashindex.insert_async(1, 0);
967 /// let future_retain = hashindex.clear_async();
968 /// ```
969 pub async fn clear_async(&self) {
970 self.retain_async(|_, _| false).await;
971 }
972
973 /// Returns the number of entries in the [`HashIndex`].
974 ///
975 /// It reads the entire metadata area of the bucket array to calculate the number of valid
976 /// entries, making its time complexity `O(N)`. Furthermore, it may overcount entries if an old
977 /// bucket array has yet to be dropped.
978 ///
979 /// # Examples
980 ///
981 /// ```
982 /// use scc::HashIndex;
983 ///
984 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
985 ///
986 /// assert!(hashindex.insert(1, 0).is_ok());
987 /// assert_eq!(hashindex.len(), 1);
988 /// ```
989 #[inline]
990 pub fn len(&self) -> usize {
991 self.num_entries(&Guard::new())
992 }
993
994 /// Returns `true` if the [`HashIndex`] is empty.
995 ///
996 /// # Examples
997 ///
998 /// ```
999 /// use scc::HashIndex;
1000 ///
1001 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1002 ///
1003 /// assert!(hashindex.is_empty());
1004 /// assert!(hashindex.insert(1, 0).is_ok());
1005 /// assert!(!hashindex.is_empty());
1006 /// ```
1007 #[inline]
1008 pub fn is_empty(&self) -> bool {
1009 !self.has_entry(&Guard::new())
1010 }
1011
1012 /// Returns the capacity of the [`HashIndex`].
1013 ///
1014 /// # Examples
1015 ///
1016 /// ```
1017 /// use scc::HashIndex;
1018 ///
1019 /// let hashindex_default: HashIndex<u64, u32> = HashIndex::default();
1020 /// assert_eq!(hashindex_default.capacity(), 0);
1021 ///
1022 /// assert!(hashindex_default.insert(1, 0).is_ok());
1023 /// assert_eq!(hashindex_default.capacity(), 64);
1024 ///
1025 /// let hashindex: HashIndex<u64, u32> = HashIndex::with_capacity(1000);
1026 /// assert_eq!(hashindex.capacity(), 1024);
1027 /// ```
1028 #[inline]
1029 pub fn capacity(&self) -> usize {
1030 self.num_slots(&Guard::new())
1031 }
1032
1033 /// Returns the current capacity range of the [`HashIndex`].
1034 ///
1035 /// # Examples
1036 ///
1037 /// ```
1038 /// use scc::HashIndex;
1039 ///
1040 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1041 ///
1042 /// assert_eq!(hashindex.capacity_range(), 0..=(1_usize << (usize::BITS - 1)));
1043 ///
1044 /// let reserved = hashindex.reserve(1000);
1045 /// assert_eq!(hashindex.capacity_range(), 1000..=(1_usize << (usize::BITS - 1)));
1046 /// ```
1047 #[inline]
1048 pub fn capacity_range(&self) -> RangeInclusive<usize> {
1049 self.minimum_capacity.load(Relaxed)..=self.maximum_capacity()
1050 }
1051
1052 /// Returns the index of the bucket that may contain the key.
1053 ///
1054 /// The method returns the index of the bucket associated with the key. The number of buckets
1055 /// can be calculated by dividing `32` into the capacity.
1056 ///
1057 /// # Examples
1058 ///
1059 /// ```
1060 /// use scc::HashIndex;
1061 ///
1062 /// let hashindex: HashIndex<u64, u32> = HashIndex::with_capacity(1024);
1063 ///
1064 /// let bucket_index = hashindex.bucket_index(&11);
1065 /// assert!(bucket_index < hashindex.capacity() / 32);
1066 /// ```
1067 #[inline]
1068 pub fn bucket_index<Q>(&self, key: &Q) -> usize
1069 where
1070 Q: Equivalent<K> + Hash + ?Sized,
1071 {
1072 self.calculate_bucket_index(key)
1073 }
1074
1075 /// Returns an [`Iter`].
1076 ///
1077 /// It is guaranteed to go through all the key-value pairs pertaining in the [`HashIndex`]
1078 /// at the moment, however the same key-value pair can be visited more than once if the
1079 /// [`HashIndex`] is being resized.
1080 ///
1081 /// It requires the user to supply a reference to a [`Guard`].
1082 ///
1083 /// # Examples
1084 ///
1085 /// ```
1086 /// use scc::ebr::Guard;
1087 /// use scc::HashIndex;
1088 ///
1089 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1090 ///
1091 /// assert!(hashindex.insert(1, 0).is_ok());
1092 ///
1093 /// let guard = Guard::new();
1094 ///
1095 /// let mut iter = hashindex.iter(&guard);
1096 /// let entry_ref = iter.next().unwrap();
1097 /// assert_eq!(iter.next(), None);
1098 ///
1099 /// for iter in hashindex.iter(&guard) {
1100 /// assert_eq!(iter, (&1, &0));
1101 /// }
1102 ///
1103 /// drop(hashindex);
1104 ///
1105 /// assert_eq!(entry_ref, (&1, &0));
1106 /// ```
1107 #[inline]
1108 pub fn iter<'h, 'g>(&'h self, guard: &'g Guard) -> Iter<'h, 'g, K, V, H> {
1109 Iter {
1110 hashindex: self,
1111 current_array: None,
1112 current_index: 0,
1113 current_bucket: None,
1114 current_entry_ptr: EntryPtr::new(guard),
1115 guard,
1116 }
1117 }
1118
1119 /// Clears the old array asynchronously.
1120 async fn cleanse_old_array_async(&self, current_array: &BucketArray<K, V, (), OPTIMISTIC>) {
1121 while current_array.has_old_array() {
1122 let mut async_wait = AsyncWait::default();
1123 let mut async_wait_pinned = Pin::new(&mut async_wait);
1124 if self.incremental_rehash::<K, _, false>(
1125 current_array,
1126 &mut async_wait_pinned,
1127 &Guard::new(),
1128 ) == Ok(true)
1129 {
1130 break;
1131 }
1132 async_wait_pinned.await;
1133 }
1134 }
1135}
1136
1137impl<K, V, H> Clone for HashIndex<K, V, H>
1138where
1139 K: 'static + Clone + Eq + Hash,
1140 V: 'static + Clone,
1141 H: BuildHasher + Clone,
1142{
1143 #[inline]
1144 fn clone(&self) -> Self {
1145 let self_clone = Self::with_capacity_and_hasher(self.capacity(), self.hasher().clone());
1146 for (k, v) in self.iter(&Guard::new()) {
1147 let _result = self_clone.insert(k.clone(), v.clone());
1148 }
1149 self_clone
1150 }
1151}
1152
1153impl<K, V, H> Debug for HashIndex<K, V, H>
1154where
1155 K: 'static + Clone + Debug + Eq + Hash,
1156 V: 'static + Clone + Debug,
1157 H: BuildHasher,
1158{
1159 #[inline]
1160 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1161 let guard = Guard::new();
1162 f.debug_map().entries(self.iter(&guard)).finish()
1163 }
1164}
1165
1166impl<K, V> HashIndex<K, V, RandomState>
1167where
1168 K: 'static + Clone + Eq + Hash,
1169 V: 'static + Clone,
1170{
1171 /// Creates an empty default [`HashIndex`].
1172 ///
1173 /// # Examples
1174 ///
1175 /// ```
1176 /// use scc::HashIndex;
1177 ///
1178 /// let hashindex: HashIndex<u64, u32> = HashIndex::new();
1179 ///
1180 /// let result = hashindex.capacity();
1181 /// assert_eq!(result, 0);
1182 /// ```
1183 #[inline]
1184 #[must_use]
1185 pub fn new() -> Self {
1186 Self::default()
1187 }
1188
1189 /// Creates an empty [`HashIndex`] with the specified capacity.
1190 ///
1191 /// The actual capacity is equal to or greater than the specified capacity.
1192 ///
1193 /// # Examples
1194 ///
1195 /// ```
1196 /// use scc::HashIndex;
1197 ///
1198 /// let hashindex: HashIndex<u64, u32> = HashIndex::with_capacity(1000);
1199 ///
1200 /// let result = hashindex.capacity();
1201 /// assert_eq!(result, 1024);
1202 /// ```
1203 #[inline]
1204 #[must_use]
1205 pub fn with_capacity(capacity: usize) -> Self {
1206 Self::with_capacity_and_hasher(capacity, RandomState::new())
1207 }
1208}
1209
1210impl<K, V, H> Default for HashIndex<K, V, H>
1211where
1212 K: 'static,
1213 V: 'static,
1214 H: BuildHasher + Default,
1215{
1216 /// Creates an empty default [`HashIndex`].
1217 ///
1218 /// The default capacity is `64`.
1219 ///
1220 /// # Examples
1221 ///
1222 /// ```
1223 /// use scc::HashIndex;
1224 ///
1225 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1226 ///
1227 /// let result = hashindex.capacity();
1228 /// assert_eq!(result, 0);
1229 /// ```
1230 #[inline]
1231 fn default() -> Self {
1232 Self::with_hasher(H::default())
1233 }
1234}
1235
1236impl<K, V, H> FromIterator<(K, V)> for HashIndex<K, V, H>
1237where
1238 K: 'static + Clone + Eq + Hash,
1239 V: 'static + Clone,
1240 H: BuildHasher + Default,
1241{
1242 #[inline]
1243 fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> Self {
1244 let into_iter = iter.into_iter();
1245 let hashindex = Self::with_capacity_and_hasher(
1246 Self::capacity_from_size_hint(into_iter.size_hint()),
1247 H::default(),
1248 );
1249 into_iter.for_each(|e| {
1250 let _result = hashindex.insert(e.0, e.1);
1251 });
1252 hashindex
1253 }
1254}
1255
1256impl<K, V, H> HashTable<K, V, H, (), OPTIMISTIC> for HashIndex<K, V, H>
1257where
1258 K: 'static + Clone + Eq + Hash,
1259 V: 'static + Clone,
1260 H: BuildHasher,
1261{
1262 #[inline]
1263 fn hasher(&self) -> &H {
1264 &self.build_hasher
1265 }
1266 #[inline]
1267 fn try_clone(entry: &(K, V)) -> Option<(K, V)> {
1268 Some((entry.0.clone(), entry.1.clone()))
1269 }
1270 #[inline]
1271 fn bucket_array(&self) -> &AtomicShared<BucketArray<K, V, (), OPTIMISTIC>> {
1272 &self.array
1273 }
1274 #[inline]
1275 fn minimum_capacity(&self) -> &AtomicUsize {
1276 &self.minimum_capacity
1277 }
1278 #[inline]
1279 fn maximum_capacity(&self) -> usize {
1280 1_usize << (usize::BITS - 1)
1281 }
1282}
1283
1284impl<K, V, H> PartialEq for HashIndex<K, V, H>
1285where
1286 K: 'static + Clone + Eq + Hash,
1287 V: 'static + Clone + PartialEq,
1288 H: BuildHasher,
1289{
1290 #[inline]
1291 fn eq(&self, other: &Self) -> bool {
1292 let guard = Guard::new();
1293 if !self
1294 .iter(&guard)
1295 .any(|(k, v)| other.peek_with(k, |_, ov| v == ov) != Some(true))
1296 {
1297 return !other
1298 .iter(&guard)
1299 .any(|(k, v)| self.peek_with(k, |_, sv| v == sv) != Some(true));
1300 }
1301 false
1302 }
1303}
1304
1305impl<'h, K, V, H> Entry<'h, K, V, H>
1306where
1307 K: 'static + Clone + Eq + Hash,
1308 V: 'static + Clone,
1309 H: BuildHasher,
1310{
1311 /// Ensures a value is in the entry by inserting the supplied instance if empty.
1312 ///
1313 /// # Examples
1314 ///
1315 /// ```
1316 /// use scc::HashIndex;
1317 ///
1318 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1319 ///
1320 /// hashindex.entry(3).or_insert(7);
1321 /// assert_eq!(hashindex.peek_with(&3, |_, v| *v), Some(7));
1322 /// ```
1323 #[inline]
1324 pub fn or_insert(self, val: V) -> OccupiedEntry<'h, K, V, H> {
1325 self.or_insert_with(|| val)
1326 }
1327
1328 /// Ensures a value is in the entry by inserting the result of the supplied closure if empty.
1329 ///
1330 /// # Examples
1331 ///
1332 /// ```
1333 /// use scc::HashIndex;
1334 ///
1335 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1336 ///
1337 /// hashindex.entry(19).or_insert_with(|| 5);
1338 /// assert_eq!(hashindex.peek_with(&19, |_, v| *v), Some(5));
1339 /// ```
1340 #[inline]
1341 pub fn or_insert_with<F: FnOnce() -> V>(self, constructor: F) -> OccupiedEntry<'h, K, V, H> {
1342 self.or_insert_with_key(|_| constructor())
1343 }
1344
1345 /// Ensures a value is in the entry by inserting the result of the supplied closure if empty.
1346 ///
1347 /// The reference to the moved key is provided, therefore cloning or copying the key is
1348 /// unnecessary.
1349 ///
1350 /// # Examples
1351 ///
1352 /// ```
1353 /// use scc::HashIndex;
1354 ///
1355 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1356 ///
1357 /// hashindex.entry(11).or_insert_with_key(|k| if *k == 11 { 7 } else { 3 });
1358 /// assert_eq!(hashindex.peek_with(&11, |_, v| *v), Some(7));
1359 /// ```
1360 #[inline]
1361 pub fn or_insert_with_key<F: FnOnce(&K) -> V>(
1362 self,
1363 constructor: F,
1364 ) -> OccupiedEntry<'h, K, V, H> {
1365 match self {
1366 Self::Occupied(o) => o,
1367 Self::Vacant(v) => {
1368 let val = constructor(v.key());
1369 v.insert_entry(val)
1370 }
1371 }
1372 }
1373
1374 /// Returns a reference to the key of this entry.
1375 ///
1376 /// # Examples
1377 ///
1378 /// ```
1379 /// use scc::HashIndex;
1380 ///
1381 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1382 /// assert_eq!(hashindex.entry(31).key(), &31);
1383 /// ```
1384 #[inline]
1385 pub fn key(&self) -> &K {
1386 match self {
1387 Self::Occupied(o) => o.key(),
1388 Self::Vacant(v) => v.key(),
1389 }
1390 }
1391
1392 /// Provides in-place mutable access to an occupied entry.
1393 ///
1394 /// # Safety
1395 ///
1396 /// The caller has to make sure that there are no readers of the entry, e.g., a reader keeping
1397 /// a reference to the entry via [`HashIndex::iter`], [`HashIndex::peek`], or
1398 /// [`HashIndex::peek_with`], unless an instance of `V` can be safely read when there is a
1399 /// single writer, e.g., `V = [u8; 32]`.
1400 ///
1401 /// # Examples
1402 ///
1403 /// ```
1404 /// use scc::HashIndex;
1405 ///
1406 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1407 ///
1408 /// unsafe {
1409 /// hashindex.entry(37).and_modify(|v| { *v += 1 }).or_insert(47);
1410 /// }
1411 /// assert_eq!(hashindex.peek_with(&37, |_, v| *v), Some(47));
1412 ///
1413 /// unsafe {
1414 /// hashindex.entry(37).and_modify(|v| { *v += 1 }).or_insert(3);
1415 /// }
1416 /// assert_eq!(hashindex.peek_with(&37, |_, v| *v), Some(48));
1417 /// ```
1418 #[inline]
1419 #[must_use]
1420 pub unsafe fn and_modify<F>(self, f: F) -> Self
1421 where
1422 F: FnOnce(&mut V),
1423 {
1424 match self {
1425 Self::Occupied(mut o) => {
1426 f(o.get_mut());
1427 Self::Occupied(o)
1428 }
1429 Self::Vacant(_) => self,
1430 }
1431 }
1432}
1433
1434impl<'h, K, V, H> Entry<'h, K, V, H>
1435where
1436 K: 'static + Clone + Eq + Hash,
1437 V: 'static + Clone + Default,
1438 H: BuildHasher,
1439{
1440 /// Ensures a value is in the entry by inserting the default value if empty.
1441 ///
1442 /// # Examples
1443 ///
1444 /// ```
1445 /// use scc::HashIndex;
1446 ///
1447 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1448 /// hashindex.entry(11).or_default();
1449 /// assert_eq!(hashindex.peek_with(&11, |_, v| *v), Some(0));
1450 /// ```
1451 #[inline]
1452 pub fn or_default(self) -> OccupiedEntry<'h, K, V, H> {
1453 match self {
1454 Self::Occupied(o) => o,
1455 Self::Vacant(v) => v.insert_entry(Default::default()),
1456 }
1457 }
1458}
1459
1460impl<K, V, H> Debug for Entry<'_, K, V, H>
1461where
1462 K: 'static + Clone + Debug + Eq + Hash,
1463 V: 'static + Clone + Debug,
1464 H: BuildHasher,
1465{
1466 #[inline]
1467 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1468 match self {
1469 Self::Vacant(v) => f.debug_tuple("Entry").field(v).finish(),
1470 Self::Occupied(o) => f.debug_tuple("Entry").field(o).finish(),
1471 }
1472 }
1473}
1474
1475impl<'h, K, V, H> OccupiedEntry<'h, K, V, H>
1476where
1477 K: 'static + Clone + Eq + Hash,
1478 V: 'static + Clone,
1479 H: BuildHasher,
1480{
1481 /// Gets a reference to the key in the entry.
1482 ///
1483 /// # Examples
1484 ///
1485 /// ```
1486 /// use scc::HashIndex;
1487 ///
1488 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1489 ///
1490 /// assert_eq!(hashindex.entry(29).or_default().key(), &29);
1491 /// ```
1492 #[inline]
1493 #[must_use]
1494 pub fn key(&self) -> &K {
1495 &self
1496 .locked_entry
1497 .entry_ptr
1498 .get(self.locked_entry.data_block_mut)
1499 .0
1500 }
1501
1502 /// Marks that the entry is removed from the [`HashIndex`].
1503 ///
1504 /// # Examples
1505 ///
1506 /// ```
1507 /// use scc::HashIndex;
1508 /// use scc::hash_index::Entry;
1509 ///
1510 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1511 ///
1512 /// hashindex.entry(11).or_insert(17);
1513 ///
1514 /// if let Entry::Occupied(o) = hashindex.entry(11) {
1515 /// o.remove_entry();
1516 /// };
1517 /// assert_eq!(hashindex.peek_with(&11, |_, v| *v), None);
1518 /// ```
1519 #[inline]
1520 pub fn remove_entry(mut self) {
1521 let guard = Guard::new();
1522 self.locked_entry.locker.mark_removed(
1523 &mut self.locked_entry.entry_ptr,
1524 self.hashindex.prolonged_guard_ref(&guard),
1525 );
1526 if self.locked_entry.locker.num_entries() <= 1 || self.locked_entry.locker.need_rebuild() {
1527 let hashindex = self.hashindex;
1528 if let Some(current_array) = hashindex.bucket_array().load(Acquire, &guard).as_ref() {
1529 if !current_array.has_old_array() {
1530 let index = self.locked_entry.index;
1531 if current_array.initiate_sampling(index) {
1532 drop(self);
1533 hashindex.try_shrink_or_rebuild(current_array, index, &guard);
1534 }
1535 }
1536 }
1537 }
1538 }
1539
1540 /// Gets a reference to the value in the entry.
1541 ///
1542 /// # Examples
1543 ///
1544 /// ```
1545 /// use scc::HashIndex;
1546 /// use scc::hash_index::Entry;
1547 ///
1548 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1549 ///
1550 /// hashindex.entry(19).or_insert(11);
1551 ///
1552 /// if let Entry::Occupied(o) = hashindex.entry(19) {
1553 /// assert_eq!(o.get(), &11);
1554 /// };
1555 /// ```
1556 #[inline]
1557 #[must_use]
1558 pub fn get(&self) -> &V {
1559 &self
1560 .locked_entry
1561 .entry_ptr
1562 .get(self.locked_entry.data_block_mut)
1563 .1
1564 }
1565
1566 /// Gets a mutable reference to the value in the entry.
1567 ///
1568 /// # Safety
1569 ///
1570 /// The caller has to make sure that there are no readers of the entry, e.g., a reader keeping
1571 /// a reference to the entry via [`HashIndex::iter`], [`HashIndex::peek`], or
1572 /// [`HashIndex::peek_with`], unless an instance of `V` can be safely read when there is a
1573 /// single writer, e.g., `V = [u8; 32]`.
1574 ///
1575 /// # Examples
1576 ///
1577 /// ```
1578 /// use scc::HashIndex;
1579 /// use scc::hash_index::Entry;
1580 ///
1581 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1582 ///
1583 /// hashindex.entry(37).or_insert(11);
1584 ///
1585 /// if let Entry::Occupied(mut o) = hashindex.entry(37) {
1586 /// // Safety: `u32` can be safely read while being modified.
1587 /// unsafe { *o.get_mut() += 18; }
1588 /// assert_eq!(*o.get(), 29);
1589 /// }
1590 ///
1591 /// assert_eq!(hashindex.peek_with(&37, |_, v| *v), Some(29));
1592 /// ```
1593 #[inline]
1594 pub unsafe fn get_mut(&mut self) -> &mut V {
1595 &mut self
1596 .locked_entry
1597 .entry_ptr
1598 .get_mut(
1599 self.locked_entry.data_block_mut,
1600 &mut self.locked_entry.locker,
1601 )
1602 .1
1603 }
1604
1605 /// Updates the entry by inserting a new entry and marking the existing entry removed.
1606 ///
1607 /// # Examples
1608 ///
1609 /// ```
1610 /// use scc::HashIndex;
1611 /// use scc::hash_index::Entry;
1612 ///
1613 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1614 ///
1615 /// hashindex.entry(37).or_insert(11);
1616 ///
1617 /// if let Entry::Occupied(mut o) = hashindex.entry(37) {
1618 /// o.update(29);
1619 /// }
1620 ///
1621 /// assert_eq!(hashindex.peek_with(&37, |_, v| *v), Some(29));
1622 /// ```
1623 #[inline]
1624 pub fn update(mut self, val: V) {
1625 let key = self.key().clone();
1626 let partial_hash = self
1627 .locked_entry
1628 .entry_ptr
1629 .partial_hash(&self.locked_entry.locker);
1630 let guard = Guard::new();
1631 self.locked_entry.locker.insert_with(
1632 self.locked_entry.data_block_mut,
1633 partial_hash,
1634 || (key, val),
1635 self.hashindex.prolonged_guard_ref(&guard),
1636 );
1637 self.locked_entry.locker.mark_removed(
1638 &mut self.locked_entry.entry_ptr,
1639 self.hashindex.prolonged_guard_ref(&guard),
1640 );
1641 }
1642
1643 /// Gets the next closest occupied entry.
1644 ///
1645 /// [`HashIndex::first_entry`], [`HashIndex::first_entry_async`], and this method together
1646 /// enables the [`OccupiedEntry`] to effectively act as a mutable iterator over entries. The
1647 /// method never acquires more than one lock even when it searches other buckets for the next
1648 /// closest occupied entry.
1649 ///
1650 /// # Examples
1651 ///
1652 /// ```
1653 /// use scc::HashIndex;
1654 /// use scc::hash_index::Entry;
1655 ///
1656 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1657 ///
1658 /// assert!(hashindex.insert(1, 0).is_ok());
1659 /// assert!(hashindex.insert(2, 0).is_ok());
1660 ///
1661 /// let first_entry = hashindex.first_entry().unwrap();
1662 /// let first_key = *first_entry.key();
1663 /// let second_entry = first_entry.next().unwrap();
1664 /// let second_key = *second_entry.key();
1665 ///
1666 /// assert!(second_entry.next().is_none());
1667 /// assert_eq!(first_key + second_key, 3);
1668 /// ```
1669 #[inline]
1670 #[must_use]
1671 pub fn next(self) -> Option<Self> {
1672 let hashindex = self.hashindex;
1673 if let Some(locked_entry) = self.locked_entry.next(hashindex) {
1674 return Some(OccupiedEntry {
1675 hashindex,
1676 locked_entry,
1677 });
1678 }
1679 None
1680 }
1681
1682 /// Gets the next closest occupied entry.
1683 ///
1684 /// [`HashIndex::first_entry`], [`HashIndex::first_entry_async`], and this method together
1685 /// enables the [`OccupiedEntry`] to effectively act as a mutable iterator over entries. The
1686 /// method never acquires more than one lock even when it searches other buckets for the next
1687 /// closest occupied entry.
1688 ///
1689 /// It is an asynchronous method returning an `impl Future` for the caller to await.
1690 ///
1691 /// # Examples
1692 ///
1693 /// ```
1694 /// use scc::HashIndex;
1695 /// use scc::hash_index::Entry;
1696 ///
1697 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1698 ///
1699 /// assert!(hashindex.insert(1, 0).is_ok());
1700 /// assert!(hashindex.insert(2, 0).is_ok());
1701 ///
1702 /// let second_entry_future = hashindex.first_entry().unwrap().next_async();
1703 /// ```
1704 #[inline]
1705 pub async fn next_async(self) -> Option<OccupiedEntry<'h, K, V, H>> {
1706 let hashindex = self.hashindex;
1707 if let Some(locked_entry) = self.locked_entry.next_async(hashindex).await {
1708 return Some(OccupiedEntry {
1709 hashindex,
1710 locked_entry,
1711 });
1712 }
1713 None
1714 }
1715}
1716
1717impl<K, V, H> Debug for OccupiedEntry<'_, K, V, H>
1718where
1719 K: 'static + Clone + Debug + Eq + Hash,
1720 V: 'static + Clone + Debug,
1721 H: BuildHasher,
1722{
1723 #[inline]
1724 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1725 f.debug_struct("OccupiedEntry")
1726 .field("key", self.key())
1727 .field("value", self.get())
1728 .finish_non_exhaustive()
1729 }
1730}
1731
1732impl<K, V, H> Deref for OccupiedEntry<'_, K, V, H>
1733where
1734 K: 'static + Clone + Debug + Eq + Hash,
1735 V: 'static + Clone + Debug,
1736 H: BuildHasher,
1737{
1738 type Target = V;
1739
1740 #[inline]
1741 fn deref(&self) -> &Self::Target {
1742 self.get()
1743 }
1744}
1745
1746impl<'h, K, V, H> VacantEntry<'h, K, V, H>
1747where
1748 K: 'static + Clone + Eq + Hash,
1749 V: 'static + Clone,
1750 H: BuildHasher,
1751{
1752 /// Gets a reference to the key.
1753 ///
1754 /// # Examples
1755 ///
1756 /// ```
1757 /// use scc::HashIndex;
1758 ///
1759 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1760 /// assert_eq!(hashindex.entry(11).key(), &11);
1761 /// ```
1762 #[inline]
1763 pub fn key(&self) -> &K {
1764 &self.key
1765 }
1766
1767 /// Takes ownership of the key.
1768 ///
1769 /// # Examples
1770 ///
1771 /// ```
1772 /// use scc::HashIndex;
1773 /// use scc::hash_index::Entry;
1774 ///
1775 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1776 ///
1777 /// if let Entry::Vacant(v) = hashindex.entry(17) {
1778 /// assert_eq!(v.into_key(), 17);
1779 /// };
1780 /// ```
1781 #[inline]
1782 pub fn into_key(self) -> K {
1783 self.key
1784 }
1785
1786 /// Sets the value of the entry with its key, and returns an [`OccupiedEntry`].
1787 ///
1788 /// # Examples
1789 ///
1790 /// ```
1791 /// use scc::HashIndex;
1792 /// use scc::hash_index::Entry;
1793 ///
1794 /// let hashindex: HashIndex<u64, u32> = HashIndex::default();
1795 ///
1796 /// if let Entry::Vacant(o) = hashindex.entry(19) {
1797 /// o.insert_entry(29);
1798 /// }
1799 ///
1800 /// assert_eq!(hashindex.peek_with(&19, |_, v| *v), Some(29));
1801 /// ```
1802 #[inline]
1803 pub fn insert_entry(mut self, val: V) -> OccupiedEntry<'h, K, V, H> {
1804 let guard = Guard::new();
1805 let entry_ptr = self.locked_entry.locker.insert_with(
1806 self.locked_entry.data_block_mut,
1807 BucketArray::<K, V, (), OPTIMISTIC>::partial_hash(self.hash),
1808 || (self.key, val),
1809 self.hashindex.prolonged_guard_ref(&guard),
1810 );
1811 OccupiedEntry {
1812 hashindex: self.hashindex,
1813 locked_entry: LockedEntry {
1814 index: self.locked_entry.index,
1815 data_block_mut: self.locked_entry.data_block_mut,
1816 locker: self.locked_entry.locker,
1817 entry_ptr,
1818 },
1819 }
1820 }
1821}
1822
1823impl<K, V, H> Debug for VacantEntry<'_, K, V, H>
1824where
1825 K: 'static + Clone + Debug + Eq + Hash,
1826 V: 'static + Clone + Debug,
1827 H: BuildHasher,
1828{
1829 #[inline]
1830 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1831 f.debug_tuple("VacantEntry").field(self.key()).finish()
1832 }
1833}
1834
1835impl<K, V, H> Reserve<'_, K, V, H>
1836where
1837 K: 'static + Clone + Eq + Hash,
1838 V: 'static + Clone,
1839 H: BuildHasher,
1840{
1841 /// Returns the number of reserved slots.
1842 #[inline]
1843 #[must_use]
1844 pub fn additional_capacity(&self) -> usize {
1845 self.additional
1846 }
1847}
1848
1849impl<K, V, H> AsRef<HashIndex<K, V, H>> for Reserve<'_, K, V, H>
1850where
1851 K: 'static + Clone + Eq + Hash,
1852 V: 'static + Clone,
1853 H: BuildHasher,
1854{
1855 #[inline]
1856 fn as_ref(&self) -> &HashIndex<K, V, H> {
1857 self.hashindex
1858 }
1859}
1860
1861impl<K, V, H> Debug for Reserve<'_, K, V, H>
1862where
1863 K: 'static + Clone + Eq + Hash,
1864 V: 'static + Clone,
1865 H: BuildHasher,
1866{
1867 #[inline]
1868 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1869 f.debug_tuple("Reserve").field(&self.additional).finish()
1870 }
1871}
1872
1873impl<K, V, H> Deref for Reserve<'_, K, V, H>
1874where
1875 K: 'static + Clone + Eq + Hash,
1876 V: 'static + Clone,
1877 H: BuildHasher,
1878{
1879 type Target = HashIndex<K, V, H>;
1880
1881 #[inline]
1882 fn deref(&self) -> &Self::Target {
1883 self.hashindex
1884 }
1885}
1886
1887impl<K, V, H> Drop for Reserve<'_, K, V, H>
1888where
1889 K: 'static + Clone + Eq + Hash,
1890 V: 'static + Clone,
1891 H: BuildHasher,
1892{
1893 #[inline]
1894 fn drop(&mut self) {
1895 let result = self
1896 .hashindex
1897 .minimum_capacity
1898 .fetch_sub(self.additional, Relaxed);
1899 self.hashindex.try_resize(0, &Guard::new());
1900 debug_assert!(result >= self.additional);
1901 }
1902}
1903
1904impl<K, V, H> Debug for Iter<'_, '_, K, V, H>
1905where
1906 K: 'static + Clone + Eq + Hash,
1907 V: 'static + Clone,
1908 H: BuildHasher,
1909{
1910 #[inline]
1911 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1912 f.debug_struct("Iter")
1913 .field("current_index", &self.current_index)
1914 .field("current_entry_ptr", &self.current_entry_ptr)
1915 .finish()
1916 }
1917}
1918
1919impl<'g, K, V, H> Iterator for Iter<'_, 'g, K, V, H>
1920where
1921 K: 'static + Clone + Eq + Hash,
1922 V: 'static + Clone,
1923 H: BuildHasher,
1924{
1925 type Item = (&'g K, &'g V);
1926
1927 #[inline]
1928 fn next(&mut self) -> Option<Self::Item> {
1929 let mut array = if let Some(array) = self.current_array.as_ref().copied() {
1930 array
1931 } else {
1932 // Start scanning.
1933 let current_array = self
1934 .hashindex
1935 .bucket_array()
1936 .load(Acquire, self.guard)
1937 .as_ref()?;
1938 let old_array_ptr = current_array.old_array(self.guard);
1939 let array = if let Some(old_array) = old_array_ptr.as_ref() {
1940 old_array
1941 } else {
1942 current_array
1943 };
1944 self.current_array.replace(array);
1945 self.current_bucket.replace(array.bucket(0));
1946 self.current_entry_ptr = EntryPtr::new(self.guard);
1947 array
1948 };
1949
1950 // Go to the next bucket.
1951 loop {
1952 if let Some(bucket) = self.current_bucket.take() {
1953 // Go to the next entry in the bucket.
1954 if self.current_entry_ptr.move_to_next(bucket, self.guard) {
1955 let (k, v) = self
1956 .current_entry_ptr
1957 .get(array.data_block(self.current_index));
1958 self.current_bucket.replace(bucket);
1959 return Some((k, v));
1960 }
1961 }
1962 self.current_index += 1;
1963 if self.current_index == array.num_buckets() {
1964 let current_array = self
1965 .hashindex
1966 .bucket_array()
1967 .load(Acquire, self.guard)
1968 .as_ref()?;
1969 if self
1970 .current_array
1971 .as_ref()
1972 .copied()
1973 .map_or(false, |a| ptr::eq(a, current_array))
1974 {
1975 // Finished scanning the entire array.
1976 break;
1977 }
1978 let old_array_ptr = current_array.old_array(self.guard);
1979 if self
1980 .current_array
1981 .as_ref()
1982 .copied()
1983 .map_or(false, |a| ptr::eq(a, old_array_ptr.as_ptr()))
1984 {
1985 // Start scanning the current array.
1986 array = current_array;
1987 self.current_array.replace(array);
1988 self.current_index = 0;
1989 self.current_bucket.replace(array.bucket(0));
1990 self.current_entry_ptr = EntryPtr::new(self.guard);
1991 continue;
1992 }
1993
1994 // Start from the very beginning.
1995 array = if let Some(old_array) = old_array_ptr.as_ref() {
1996 old_array
1997 } else {
1998 current_array
1999 };
2000 self.current_array.replace(array);
2001 self.current_index = 0;
2002 self.current_bucket.replace(array.bucket(0));
2003 self.current_entry_ptr = EntryPtr::new(self.guard);
2004 continue;
2005 }
2006 self.current_bucket
2007 .replace(array.bucket(self.current_index));
2008 self.current_entry_ptr = EntryPtr::new(self.guard);
2009 }
2010 None
2011 }
2012}
2013
2014impl<K, V, H> FusedIterator for Iter<'_, '_, K, V, H>
2015where
2016 K: 'static + Clone + Eq + Hash,
2017 V: 'static + Clone,
2018 H: BuildHasher,
2019{
2020}
2021
2022impl<K, V, H> UnwindSafe for Iter<'_, '_, K, V, H>
2023where
2024 K: 'static + Clone + Eq + Hash + UnwindSafe,
2025 V: 'static + Clone + UnwindSafe,
2026 H: BuildHasher + UnwindSafe,
2027{
2028}