1mod raw;
11
12use hashbrown::raw::RawTable;
13
14use crate::vec::{Drain, Vec};
15use core::cmp;
16use core::fmt;
17use core::mem::replace;
18use core::ops::RangeBounds;
19
20use crate::equivalent::Equivalent;
21use crate::util::{enumerate, simplify_range};
22use crate::{Bucket, Entries, HashValue};
23
24pub(crate) struct IndexMapCore<K, V> {
26 indices: RawTable<usize>,
28 entries: Vec<Bucket<K, V>>,
30}
31
32#[inline(always)]
33fn get_hash<K, V>(entries: &[Bucket<K, V>]) -> impl Fn(&usize) -> u64 + '_ {
34 move |&i| entries[i].hash.get()
35}
36
37#[inline]
38fn equivalent<'a, K, V, Q: ?Sized + Equivalent<K>>(
39 key: &'a Q,
40 entries: &'a [Bucket<K, V>],
41) -> impl Fn(&usize) -> bool + 'a {
42 move |&i| Q::equivalent(key, &entries[i].key)
43}
44
45#[inline]
46fn erase_index(table: &mut RawTable<usize>, hash: HashValue, index: usize) {
47 table.erase_entry(hash.get(), move |&i| i == index);
48}
49
50#[inline]
51fn update_index(table: &mut RawTable<usize>, hash: HashValue, old: usize, new: usize) {
52 let index = table
53 .get_mut(hash.get(), move |&i| i == old)
54 .expect("index not found");
55 *index = new;
56}
57
58impl<K, V> Clone for IndexMapCore<K, V>
59where
60 K: Clone,
61 V: Clone,
62{
63 fn clone(&self) -> Self {
64 let indices = self.indices.clone();
65 let mut entries = Vec::with_capacity(indices.capacity());
66 entries.clone_from(&self.entries);
67 IndexMapCore { indices, entries }
68 }
69
70 fn clone_from(&mut self, other: &Self) {
71 let hasher = get_hash(&other.entries);
72 self.indices.clone_from_with_hasher(&other.indices, hasher);
73 if self.entries.capacity() < other.entries.len() {
74 self.reserve_entries();
76 }
77 self.entries.clone_from(&other.entries);
78 }
79}
80
81impl<K, V> fmt::Debug for IndexMapCore<K, V>
82where
83 K: fmt::Debug,
84 V: fmt::Debug,
85{
86 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
87 f.debug_struct("IndexMapCore")
88 .field("indices", &raw::DebugIndices(&self.indices))
89 .field("entries", &self.entries)
90 .finish()
91 }
92}
93
94impl<K, V> Entries for IndexMapCore<K, V> {
95 type Entry = Bucket<K, V>;
96
97 #[inline]
98 fn into_entries(self) -> Vec<Self::Entry> {
99 self.entries
100 }
101
102 #[inline]
103 fn as_entries(&self) -> &[Self::Entry] {
104 &self.entries
105 }
106
107 #[inline]
108 fn as_entries_mut(&mut self) -> &mut [Self::Entry] {
109 &mut self.entries
110 }
111
112 fn with_entries<F>(&mut self, f: F)
113 where
114 F: FnOnce(&mut [Self::Entry]),
115 {
116 f(&mut self.entries);
117 self.rebuild_hash_table();
118 }
119}
120
121impl<K, V> IndexMapCore<K, V> {
122 #[inline]
123 pub(crate) fn new() -> Self {
124 IndexMapCore {
125 indices: RawTable::new(),
126 entries: Vec::new(),
127 }
128 }
129
130 #[inline]
131 pub(crate) fn with_capacity(n: usize) -> Self {
132 IndexMapCore {
133 indices: RawTable::with_capacity(n),
134 entries: Vec::with_capacity(n),
135 }
136 }
137
138 #[inline]
139 pub(crate) fn len(&self) -> usize {
140 self.indices.len()
141 }
142
143 #[inline]
144 pub(crate) fn capacity(&self) -> usize {
145 cmp::min(self.indices.capacity(), self.entries.capacity())
146 }
147
148 pub(crate) fn clear(&mut self) {
149 self.indices.clear();
150 self.entries.clear();
151 }
152
153 pub(crate) fn truncate(&mut self, len: usize) {
154 if len < self.len() {
155 self.erase_indices(len, self.entries.len());
156 self.entries.truncate(len);
157 }
158 }
159
160 pub(crate) fn drain<R>(&mut self, range: R) -> Drain<'_, Bucket<K, V>>
161 where
162 R: RangeBounds<usize>,
163 {
164 let range = simplify_range(range, self.entries.len());
165 self.erase_indices(range.start, range.end);
166 self.entries.drain(range)
167 }
168
169 pub(crate) fn split_off(&mut self, at: usize) -> Self {
170 assert!(at <= self.entries.len());
171 self.erase_indices(at, self.entries.len());
172 let entries = self.entries.split_off(at);
173
174 let mut indices = RawTable::with_capacity(entries.len());
175 for (i, entry) in enumerate(&entries) {
176 indices.insert_no_grow(entry.hash.get(), i);
177 }
178 Self { indices, entries }
179 }
180
181 pub(crate) fn reserve(&mut self, additional: usize) {
183 self.indices.reserve(additional, get_hash(&self.entries));
184 self.reserve_entries();
185 }
186
187 fn reserve_entries(&mut self) {
189 let additional = self.indices.capacity() - self.entries.len();
190 self.entries.reserve_exact(additional);
191 }
192
193 pub(crate) fn shrink_to_fit(&mut self) {
195 self.indices.shrink_to(0, get_hash(&self.entries));
196 self.entries.shrink_to_fit();
197 }
198
199 pub(crate) fn pop(&mut self) -> Option<(K, V)> {
201 if let Some(entry) = self.entries.pop() {
202 let last = self.entries.len();
203 erase_index(&mut self.indices, entry.hash, last);
204 Some((entry.key, entry.value))
205 } else {
206 None
207 }
208 }
209
210 fn push(&mut self, hash: HashValue, key: K, value: V) -> usize {
213 let i = self.entries.len();
214 self.indices.insert(hash.get(), i, get_hash(&self.entries));
215 if i == self.entries.capacity() {
216 self.reserve_entries();
219 }
220 self.entries.push(Bucket { hash, key, value });
221 i
222 }
223
224 pub(crate) fn get_index_of<Q>(&self, hash: HashValue, key: &Q) -> Option<usize>
226 where
227 Q: ?Sized + Equivalent<K>,
228 {
229 let eq = equivalent(key, &self.entries);
230 self.indices.get(hash.get(), eq).copied()
231 }
232
233 pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option<V>)
234 where
235 K: Eq,
236 {
237 match self.get_index_of(hash, &key) {
238 Some(i) => (i, Some(replace(&mut self.entries[i].value, value))),
239 None => (self.push(hash, key, value), None),
240 }
241 }
242
243 pub(crate) fn shift_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
245 where
246 Q: ?Sized + Equivalent<K>,
247 {
248 let eq = equivalent(key, &self.entries);
249 match self.indices.remove_entry(hash.get(), eq) {
250 Some(index) => {
251 let (key, value) = self.shift_remove_finish(index);
252 Some((index, key, value))
253 }
254 None => None,
255 }
256 }
257
258 pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
260 match self.entries.get(index) {
261 Some(entry) => {
262 erase_index(&mut self.indices, entry.hash, index);
263 Some(self.shift_remove_finish(index))
264 }
265 None => None,
266 }
267 }
268
269 fn shift_remove_finish(&mut self, index: usize) -> (K, V) {
273 let entry = self.entries.remove(index);
276
277 let raw_capacity = self.indices.buckets();
280 let shifted_entries = &self.entries[index..];
281 if shifted_entries.len() > raw_capacity / 2 {
282 for i in self.indices_mut() {
284 if *i > index {
285 *i -= 1;
286 }
287 }
288 } else {
289 for (i, entry) in (index + 1..).zip(shifted_entries) {
291 update_index(&mut self.indices, entry.hash, i, i - 1);
292 }
293 }
294
295 (entry.key, entry.value)
296 }
297
298 pub(crate) fn swap_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
300 where
301 Q: ?Sized + Equivalent<K>,
302 {
303 let eq = equivalent(key, &self.entries);
304 match self.indices.remove_entry(hash.get(), eq) {
305 Some(index) => {
306 let (key, value) = self.swap_remove_finish(index);
307 Some((index, key, value))
308 }
309 None => None,
310 }
311 }
312
313 pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
315 match self.entries.get(index) {
316 Some(entry) => {
317 erase_index(&mut self.indices, entry.hash, index);
318 Some(self.swap_remove_finish(index))
319 }
320 None => None,
321 }
322 }
323
324 fn swap_remove_finish(&mut self, index: usize) -> (K, V) {
328 let entry = self.entries.swap_remove(index);
331
332 if let Some(entry) = self.entries.get(index) {
334 let last = self.entries.len();
337 update_index(&mut self.indices, entry.hash, last, index);
338 }
339
340 (entry.key, entry.value)
341 }
342
343 fn erase_indices(&mut self, start: usize, end: usize) {
348 let (init, shifted_entries) = self.entries.split_at(end);
349 let (start_entries, erased_entries) = init.split_at(start);
350
351 let erased = erased_entries.len();
352 let shifted = shifted_entries.len();
353 let half_capacity = self.indices.buckets() / 2;
354
355 if erased == 0 {
357 } else if start + shifted < half_capacity && start < erased {
359 self.indices.clear();
361
362 for (i, entry) in enumerate(start_entries) {
364 self.indices.insert_no_grow(entry.hash.get(), i);
365 }
366
367 for (i, entry) in (start..).zip(shifted_entries) {
369 self.indices.insert_no_grow(entry.hash.get(), i);
370 }
371 } else if erased + shifted < half_capacity {
372 for (i, entry) in (start..).zip(erased_entries) {
376 erase_index(&mut self.indices, entry.hash, i);
377 }
378
379 for ((new, old), entry) in (start..).zip(end..).zip(shifted_entries) {
381 update_index(&mut self.indices, entry.hash, old, new);
382 }
383 } else {
384 self.erase_indices_sweep(start, end);
386 }
387
388 debug_assert_eq!(self.indices.len(), start + shifted);
389 }
390
391 pub(crate) fn retain_in_order<F>(&mut self, mut keep: F)
392 where
393 F: FnMut(&mut K, &mut V) -> bool,
394 {
395 let len = self.entries.len();
399 let mut n_deleted = 0;
400 for i in 0..len {
401 let will_keep = {
402 let entry = &mut self.entries[i];
403 keep(&mut entry.key, &mut entry.value)
404 };
405 if !will_keep {
406 n_deleted += 1;
407 } else if n_deleted > 0 {
408 self.entries.swap(i - n_deleted, i);
409 }
410 }
411 if n_deleted > 0 {
412 self.entries.truncate(len - n_deleted);
413 self.rebuild_hash_table();
414 }
415 }
416
417 fn rebuild_hash_table(&mut self) {
418 self.indices.clear();
419 debug_assert!(self.indices.capacity() >= self.entries.len());
420 for (i, entry) in enumerate(&self.entries) {
421 self.indices.insert_no_grow(entry.hash.get(), i);
423 }
424 }
425
426 pub(crate) fn reverse(&mut self) {
427 self.entries.reverse();
428
429 let len = self.entries.len();
432 for i in self.indices_mut() {
433 *i = len - *i - 1;
434 }
435 }
436}
437
438pub enum Entry<'a, K, V> {
441 Occupied(OccupiedEntry<'a, K, V>),
443 Vacant(VacantEntry<'a, K, V>),
445}
446
447impl<'a, K, V> Entry<'a, K, V> {
448 pub fn or_insert(self, default: V) -> &'a mut V {
450 match self {
451 Entry::Occupied(entry) => entry.into_mut(),
452 Entry::Vacant(entry) => entry.insert(default),
453 }
454 }
455
456 pub fn or_insert_with<F>(self, call: F) -> &'a mut V
458 where
459 F: FnOnce() -> V,
460 {
461 match self {
462 Entry::Occupied(entry) => entry.into_mut(),
463 Entry::Vacant(entry) => entry.insert(call()),
464 }
465 }
466
467 pub fn key(&self) -> &K {
468 match *self {
469 Entry::Occupied(ref entry) => entry.key(),
470 Entry::Vacant(ref entry) => entry.key(),
471 }
472 }
473
474 pub fn index(&self) -> usize {
476 match *self {
477 Entry::Occupied(ref entry) => entry.index(),
478 Entry::Vacant(ref entry) => entry.index(),
479 }
480 }
481
482 pub fn and_modify<F>(self, f: F) -> Self
484 where
485 F: FnOnce(&mut V),
486 {
487 match self {
488 Entry::Occupied(mut o) => {
489 f(o.get_mut());
490 Entry::Occupied(o)
491 }
492 x => x,
493 }
494 }
495
496 pub fn or_default(self) -> &'a mut V
501 where
502 V: Default,
503 {
504 match self {
505 Entry::Occupied(entry) => entry.into_mut(),
506 Entry::Vacant(entry) => entry.insert(V::default()),
507 }
508 }
509}
510
511impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Entry<'_, K, V> {
512 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
513 match *self {
514 Entry::Vacant(ref v) => f.debug_tuple(stringify!(Entry)).field(v).finish(),
515 Entry::Occupied(ref o) => f.debug_tuple(stringify!(Entry)).field(o).finish(),
516 }
517 }
518}
519
520pub use self::raw::OccupiedEntry;
521
522impl<K, V> OccupiedEntry<'_, K, V> {
524 pub fn insert(&mut self, value: V) -> V {
526 replace(self.get_mut(), value)
527 }
528
529 pub fn remove(self) -> V {
533 self.swap_remove()
534 }
535
536 pub fn swap_remove(self) -> V {
544 self.swap_remove_entry().1
545 }
546
547 pub fn shift_remove(self) -> V {
555 self.shift_remove_entry().1
556 }
557
558 pub fn remove_entry(self) -> (K, V) {
562 self.swap_remove_entry()
563 }
564}
565
566impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for OccupiedEntry<'_, K, V> {
567 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
568 f.debug_struct(stringify!(OccupiedEntry))
569 .field("key", self.key())
570 .field("value", self.get())
571 .finish()
572 }
573}
574
575pub struct VacantEntry<'a, K, V> {
580 map: &'a mut IndexMapCore<K, V>,
581 hash: HashValue,
582 key: K,
583}
584
585impl<'a, K, V> VacantEntry<'a, K, V> {
586 pub fn key(&self) -> &K {
587 &self.key
588 }
589
590 pub fn into_key(self) -> K {
591 self.key
592 }
593
594 pub fn index(&self) -> usize {
596 self.map.len()
597 }
598
599 pub fn insert(self, value: V) -> &'a mut V {
600 let i = self.map.push(self.hash, self.key, value);
601 &mut self.map.entries[i].value
602 }
603}
604
605impl<K: fmt::Debug, V> fmt::Debug for VacantEntry<'_, K, V> {
606 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
607 f.debug_tuple(stringify!(VacantEntry))
608 .field(self.key())
609 .finish()
610 }
611}
612
613#[test]
614fn assert_send_sync() {
615 fn assert_send_sync<T: Send + Sync>() {}
616 assert_send_sync::<IndexMapCore<i32, i32>>();
617 assert_send_sync::<Entry<'_, i32, i32>>();
618}