kaspa_notify/address/
tracker.rs

1use crate::address::error::{Error, Result};
2use indexmap::{map::Entry, IndexMap};
3use itertools::Itertools;
4use kaspa_addresses::{Address, Prefix};
5use kaspa_consensus_core::tx::ScriptPublicKey;
6use kaspa_core::{debug, trace};
7use kaspa_txscript::{extract_script_pub_key_address, pay_to_address_script};
8use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard};
9use std::{
10    collections::{hash_map, hash_set, HashMap, HashSet},
11    fmt::Display,
12};
13
14pub trait Indexer {
15    fn contains(&self, index: Index) -> bool;
16
17    /// Inserts an [`Index`].
18    ///
19    /// Returns true if the index was not present and was successfully inserted, false otherwise.
20    fn insert(&mut self, index: Index) -> bool;
21
22    /// Removes an [`Index`].
23    ///
24    /// Returns true if the index was present and successfully removed, false otherwise.
25    fn remove(&mut self, index: Index) -> bool;
26
27    fn len(&self) -> usize;
28    fn is_empty(&self) -> bool;
29}
30
31pub type Index = u32;
32pub type RefCount = u16;
33
34/// Tracks reference count of indexes
35pub type Counters = CounterMap;
36
37/// Tracks indexes
38pub type Indexes = IndexSet;
39
40/// Tracks reference count of indexes
41#[derive(Debug, Default, Clone, PartialEq, Eq)]
42pub struct CounterMap(HashMap<Index, RefCount>);
43
44impl CounterMap {
45    pub fn new() -> Self {
46        Self(HashMap::new())
47    }
48
49    pub fn with_capacity(capacity: usize) -> Self {
50        Self(HashMap::with_capacity(capacity))
51    }
52
53    #[cfg(test)]
54    pub fn with_counters(counters: Vec<Counter>) -> Self {
55        Self(counters.into_iter().map(|x| (x.index, x.count)).collect())
56    }
57
58    pub fn iter(&self) -> hash_map::Iter<'_, Index, RefCount> {
59        self.0.iter()
60    }
61
62    pub fn len(&self) -> usize {
63        self.0.len()
64    }
65
66    pub fn is_empty(&self) -> bool {
67        self.0.is_empty()
68    }
69
70    pub fn capacity(&self) -> usize {
71        self.0.capacity()
72    }
73}
74
75impl Indexer for CounterMap {
76    fn contains(&self, index: Index) -> bool {
77        self.0.contains_key(&index)
78    }
79
80    fn insert(&mut self, index: Index) -> bool {
81        let mut result = true;
82        self.0
83            .entry(index)
84            .and_modify(|x| {
85                *x += 1;
86                result = *x == 1;
87            })
88            .or_insert(1);
89        result
90    }
91
92    fn remove(&mut self, index: Index) -> bool {
93        let mut result = false;
94        self.0.entry(index).and_modify(|x| {
95            if *x > 0 {
96                *x -= 1;
97                result = *x == 0
98            }
99        });
100        result
101    }
102
103    fn len(&self) -> usize {
104        self.len()
105    }
106
107    fn is_empty(&self) -> bool {
108        self.is_empty()
109    }
110}
111
112#[cfg(test)]
113#[derive(Debug, Clone)]
114pub struct Counter {
115    pub index: Index,
116    pub count: RefCount,
117    pub locked: bool,
118}
119
120#[cfg(test)]
121impl Counter {
122    pub fn new(index: Index, count: RefCount) -> Self {
123        Self { index, count, locked: false }
124    }
125
126    pub fn active(&self) -> bool {
127        self.count > 0
128    }
129}
130
131#[cfg(test)]
132impl PartialEq for Counter {
133    fn eq(&self, other: &Self) -> bool {
134        self.index == other.index
135    }
136}
137#[cfg(test)]
138impl Eq for Counter {}
139
140#[cfg(test)]
141impl PartialOrd for Counter {
142    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
143        Some(self.cmp(other))
144    }
145}
146#[cfg(test)]
147impl Ord for Counter {
148    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
149        self.index.cmp(&other.index)
150    }
151}
152
153/// Set of `Index`
154#[derive(Debug, Clone, PartialEq, Eq)]
155pub struct IndexSet(HashSet<Index>);
156
157impl IndexSet {
158    pub fn new(indexes: Vec<Index>) -> Self {
159        Self(indexes.into_iter().collect())
160    }
161
162    pub fn with_capacity(capacity: usize) -> Self {
163        Self(HashSet::with_capacity(capacity))
164    }
165
166    pub fn iter(&self) -> hash_set::Iter<'_, Index> {
167        self.0.iter()
168    }
169
170    pub fn len(&self) -> usize {
171        self.0.len()
172    }
173
174    pub fn is_empty(&self) -> bool {
175        self.0.is_empty()
176    }
177
178    pub fn capacity(&self) -> usize {
179        self.0.capacity()
180    }
181
182    pub fn drain(&mut self) -> hash_set::Drain<'_, Index> {
183        self.0.drain()
184    }
185}
186
187impl Indexer for IndexSet {
188    fn contains(&self, index: Index) -> bool {
189        self.0.contains(&index)
190    }
191
192    fn insert(&mut self, index: Index) -> bool {
193        self.0.insert(index)
194    }
195
196    fn remove(&mut self, index: Index) -> bool {
197        self.0.remove(&index)
198    }
199
200    fn len(&self) -> usize {
201        self.len()
202    }
203
204    fn is_empty(&self) -> bool {
205        self.is_empty()
206    }
207}
208
209#[derive(Debug)]
210struct Inner {
211    /// Index-based map of [`ScriptPublicKey`] to its reference count
212    ///
213    /// ### Implementation note
214    ///
215    /// The whole purpose of the tracker is to reduce a [`ScriptPublicKey`] to an [`Index`] in all
216    /// [`Indexer`] instances. Therefore, every mutable access to the struct must be careful not to
217    /// use `IndexMap` APIs which alter the index order of existing entries.
218    script_pub_keys: IndexMap<ScriptPublicKey, RefCount>,
219
220    /// Maximum address count that can be registered. Note this must be `<= Index::MAX` since we cast the returned indexes to `Index`
221    max_addresses: usize,
222
223    /// The preallocation used for the address index (`script_pub_keys`)
224    addresses_preallocation: Option<usize>,
225
226    /// Set of entries [`Index`] in `script_pub_keys` having their [`RefCount`] at 0 hence considered
227    /// empty.
228    ///
229    /// An empty entry can be recycled and hold a new `script_pub_key`.
230    empty_entries: HashSet<Index>,
231}
232
233/// Fails at compile time if `MAX_ADDRESS_UPPER_BOUND > Index::MAX`.
234/// This is mandatory since we cast the returned indexes to `Index`
235const _: usize = Index::MAX as usize - Inner::MAX_ADDRESS_UPPER_BOUND;
236
237impl Inner {
238    /// The upper bound of the maximum address count. Note that the upper bound must
239    /// never exceed `Index::MAX` since we cast the returned indexes to `Index`. See
240    /// compile-time assertion above
241    const MAX_ADDRESS_UPPER_BOUND: usize = Self::expand_max_addresses(10_000_000);
242
243    /// The lower bound of the maximum address count
244    const MAX_ADDRESS_LOWER_BOUND: usize = 6;
245
246    /// Expanded count for a maximum of 1M addresses
247    const DEFAULT_MAX_ADDRESSES: usize = Self::expand_max_addresses(1_000_000);
248
249    /// Computes the optimal expanded max address count fitting in the actual allocated size of
250    /// the internal storage structure
251    const fn expand_max_addresses(max_addresses: usize) -> usize {
252        if max_addresses >= Self::MAX_ADDRESS_LOWER_BOUND {
253            // The following formula matches the internal allocation of an IndexMap or a HashMap
254            // as found in fns hashbrown::raw::inner::{capacity_to_buckets, bucket_mask_to_capacity}.
255            //
256            // The last allocated entry is reserved for recycling entries, hence the plus and minus 1
257            // which differ from the hashbrown formula.
258            ((max_addresses + 1) * 8 / 7).next_power_of_two() * 7 / 8 - 1
259        } else {
260            Self::MAX_ADDRESS_LOWER_BOUND
261        }
262    }
263
264    fn new(max_addresses: Option<usize>) -> Self {
265        // Expands the maximum address count to the IndexMap actual usable allocated size minus 1.
266        // Saving one entry for the insert/swap_remove scheme during entry recycling prevents a reallocation
267        // when reaching the maximum.
268        let max_addresses = max_addresses.map(Self::expand_max_addresses);
269        let addresses_preallocation = max_addresses;
270        let capacity = max_addresses.map(|x| x + 1).unwrap_or_default();
271
272        assert!(
273            capacity <= Self::MAX_ADDRESS_UPPER_BOUND + 1,
274            "Tracker maximum address count cannot exceed {}",
275            Self::MAX_ADDRESS_UPPER_BOUND
276        );
277        let max_addresses = max_addresses.unwrap_or(Self::DEFAULT_MAX_ADDRESSES);
278        debug!("Memory configuration: UTXO changed events wil be tracked for at most {} addresses", max_addresses);
279
280        let script_pub_keys = IndexMap::with_capacity(capacity);
281        debug!("Creating an address tracker with a capacity of {}", script_pub_keys.capacity());
282
283        let empty_entries = HashSet::with_capacity(capacity);
284        Self { script_pub_keys, max_addresses, addresses_preallocation, empty_entries }
285    }
286
287    fn is_full(&self) -> bool {
288        self.script_pub_keys.len() >= self.max_addresses && self.empty_entries.is_empty()
289    }
290
291    fn get(&self, spk: &ScriptPublicKey) -> Option<(Index, RefCount)> {
292        self.script_pub_keys.get_full(spk).map(|(index, _, count)| (index as Index, *count))
293    }
294
295    fn get_index(&self, index: Index) -> Option<&ScriptPublicKey> {
296        self.script_pub_keys.get_index(index as usize).map(|(spk, _)| spk)
297    }
298
299    fn get_index_address(&self, index: Index, prefix: Prefix) -> Option<Address> {
300        self.script_pub_keys
301            .get_index(index as usize)
302            .map(|(spk, _)| extract_script_pub_key_address(spk, prefix).expect("is retro-convertible"))
303    }
304
305    fn get_or_insert(&mut self, spk: ScriptPublicKey) -> Result<Index> {
306        match self.is_full() {
307            false => match self.script_pub_keys.entry(spk) {
308                Entry::Occupied(entry) => Ok(entry.index() as Index),
309                Entry::Vacant(entry) => {
310                    let mut index = entry.index() as Index;
311                    trace!(
312                        "AddressTracker insert #{} {}",
313                        index,
314                        extract_script_pub_key_address(entry.key(), Prefix::Mainnet).unwrap()
315                    );
316                    let _ = *entry.insert(0);
317
318                    // Try to recycle an empty entry if there is some
319                    let mut recycled = false;
320                    if (index + 1) as usize == self.script_pub_keys.len() && !self.empty_entries.is_empty() {
321                        // Takes the first empty entry index
322                        let empty_index = self.empty_entries.iter().cloned().next();
323                        if let Some(empty_index) = empty_index {
324                            // Stores the newly created entry at the empty entry index while keeping it registered as an
325                            // empty entry (because it is so at this stage, the ref count being 0).
326                            self.script_pub_keys.swap_remove_index(empty_index as usize);
327                            index = empty_index;
328                            recycled = true;
329                        }
330                    }
331                    // If no recycling occurred, registers the newly created entry as empty (since ref count is 0).
332                    if !recycled {
333                        self.empty_entries.insert(index);
334                    }
335                    Ok(index)
336                }
337            },
338            true => match self.script_pub_keys.get_index_of(&spk) {
339                Some(index) => Ok(index as Index),
340                None => Err(Error::MaxCapacityReached),
341            },
342        }
343    }
344
345    /// Increases by one the [`RefCount`] of the [`ScriptPublicKey`] at `index`.
346    ///
347    /// If the entry had a reference count of 0 before the increase, its index is removed from
348    /// the empty entries set.
349    fn inc_count(&mut self, index: Index) {
350        if let Some((_, count)) = self.script_pub_keys.get_index_mut(index as usize) {
351            *count += 1;
352            trace!("AddressTracker inc count #{} to {}", index, *count);
353            if *count == 1 {
354                self.empty_entries.remove(&index);
355            }
356        }
357    }
358
359    /// Decreases by one the [`RefCount`] of the [`ScriptPublicKey`] at `index`.
360    ///
361    /// Panics if the ref count is already 0.
362    ///
363    /// When the reference count reaches zero, the index is inserted into the empty entries set.
364    fn dec_count(&mut self, index: Index) {
365        if let Some((_, count)) = self.script_pub_keys.get_index_mut(index as usize) {
366            if *count == 0 {
367                panic!("Address tracker is trying to decrease an address counter that is already at zero");
368            }
369            *count -= 1;
370            trace!("AddressTracker dec count #{} to {}", index, *count);
371            if *count == 0 {
372                self.empty_entries.insert(index);
373            }
374        }
375    }
376
377    fn len(&self) -> usize {
378        assert!(self.script_pub_keys.len() >= self.empty_entries.len(), "entries marked empty are never removed from script_pub_keys");
379        self.script_pub_keys.len() - self.empty_entries.len()
380    }
381
382    fn is_empty(&self) -> bool {
383        self.len() == 0
384    }
385}
386
387/// Tracker of a set of [`Address`], indexing and counting registrations
388///
389/// #### Implementation design
390///
391/// Each [`Address`] is stored internally as a [`ScriptPubKey`](kaspa_consensus_core::tx::ScriptPublicKey).
392/// This prevents inter-network duplication and optimizes UTXOs filtering efficiency.
393///
394/// But consequently the address network prefix gets lost and must be globally provided when querying for addresses by indexes.
395#[derive(Debug)]
396pub struct Tracker {
397    inner: RwLock<Inner>,
398}
399
400impl Display for Tracker {
401    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
402        write!(f, "{} addresses", self.inner.read().script_pub_keys.len())
403    }
404}
405
406impl Tracker {
407    /// The upper bound of the maximum address count
408    pub const MAX_ADDRESS_UPPER_BOUND: usize = Inner::MAX_ADDRESS_UPPER_BOUND;
409
410    /// Expanded count for a maximum of 1M addresses
411    pub const DEFAULT_MAX_ADDRESSES: usize = Inner::DEFAULT_MAX_ADDRESSES;
412
413    const ADDRESS_CHUNK_SIZE: usize = 1024;
414
415    /// Computes the optimal expanded max address count fitting in the actual allocated size of
416    /// the internal storage structure
417    pub const fn expand_max_addresses(max_addresses: usize) -> usize {
418        Inner::expand_max_addresses(max_addresses)
419    }
420
421    /// Creates a new `Tracker` instance. If `max_addresses` is `Some`, uses it to prealloc
422    /// the internal index as well as for bounding the index size. Otherwise, performs no
423    /// prealloc while bounding the index size by `Tracker::DEFAULT_MAX_ADDRESSES`
424    pub fn new(max_addresses: Option<usize>) -> Self {
425        Self { inner: RwLock::new(Inner::new(max_addresses)) }
426    }
427
428    #[cfg(test)]
429    pub fn with_addresses(addresses: &[Address]) -> Self {
430        let tracker = Self { inner: RwLock::new(Inner::new(None)) };
431        for chunk in addresses.chunks(Self::ADDRESS_CHUNK_SIZE) {
432            let mut inner = tracker.inner.write();
433            for address in chunk {
434                let index = inner.get_or_insert(pay_to_address_script(address)).unwrap();
435                inner.inc_count(index);
436            }
437        }
438        tracker
439    }
440
441    pub fn data(&self) -> TrackerReadGuard<'_> {
442        TrackerReadGuard { guard: self.inner.read() }
443    }
444
445    pub fn get(&self, spk: &ScriptPublicKey) -> Option<(Index, RefCount)> {
446        self.inner.read().get(spk)
447    }
448
449    pub fn get_address(&self, address: &Address) -> Option<(Index, RefCount)> {
450        self.get(&pay_to_address_script(address))
451    }
452
453    pub fn get_address_at_index(&self, index: Index, prefix: Prefix) -> Option<Address> {
454        self.inner.read().get_index_address(index, prefix)
455    }
456
457    pub fn contains<T: Indexer>(&self, indexes: &T, spk: &ScriptPublicKey) -> bool {
458        self.get(spk).is_some_and(|(index, _)| indexes.contains(index))
459    }
460
461    pub fn contains_address<T: Indexer>(&self, indexes: &T, address: &Address) -> bool {
462        self.contains(indexes, &pay_to_address_script(address))
463    }
464
465    /// Returns an index set containing the indexes of all the addresses both registered in the tracker and in `indexes`.
466    pub fn unregistering_indexes(&self, indexes: &Indexes, addresses: &[Address]) -> Indexes {
467        Indexes::new(
468            addresses
469                .iter()
470                .filter_map(|address| {
471                    self.get(&pay_to_address_script(address)).and_then(|(index, _)| indexes.contains(index).then_some(index))
472                })
473                .collect(),
474        )
475    }
476
477    /// Tries to register an `Address` vector into an `Indexer`. The addresses are first registered in the tracker if unknown
478    /// yet and their reference count is increased when successfully inserted in the `Indexer`.
479    ///
480    /// On success, returns the addresses that were actually inserted in the `Indexer`.
481    ///
482    /// Fails if the maximum capacity gets reached, leaving the tracker unchanged.
483    pub fn register<T: Indexer>(&self, indexes: &mut T, mut addresses: Vec<Address>) -> Result<Vec<Address>> {
484        let mut rollback: bool = false;
485        {
486            let mut counter: usize = 0;
487            let mut inner = self.inner.write();
488            addresses.retain(|address| {
489                counter += 1;
490                if counter % Self::ADDRESS_CHUNK_SIZE == 0 {
491                    RwLockWriteGuard::bump(&mut inner);
492                }
493                let spk = pay_to_address_script(address);
494                match inner.get_or_insert(spk) {
495                    Ok(index) => {
496                        if indexes.insert(index) {
497                            inner.inc_count(index);
498                            true
499                        } else {
500                            false
501                        }
502                    }
503                    Err(Error::MaxCapacityReached) => {
504                        // Rollback registration
505                        rollback = true;
506                        false
507                    }
508                }
509            });
510        }
511        match rollback {
512            false => Ok(addresses),
513            true => {
514                let _ = self.unregister(indexes, addresses);
515                Err(Error::MaxCapacityReached)
516            }
517        }
518    }
519
520    /// Unregisters an `Address` vector from an `Indexer`. The addresses, when existing both in the tracker
521    /// and the `Indexer`, are first removed from the `Indexer` and on success get their reference count
522    /// decreased.
523    ///
524    /// Returns the addresses that where successfully unregistered from the `Indexer`.
525    pub fn unregister<T: Indexer>(&self, indexes: &mut T, mut addresses: Vec<Address>) -> Vec<Address> {
526        if indexes.is_empty() {
527            vec![]
528        } else {
529            let mut counter: usize = 0;
530            let mut inner = self.inner.write();
531            addresses.retain(|address| {
532                counter += 1;
533                if counter % Self::ADDRESS_CHUNK_SIZE == 0 {
534                    RwLockWriteGuard::bump(&mut inner);
535                }
536                let spk = pay_to_address_script(address);
537                if let Some((index, _)) = inner.get(&spk) {
538                    if indexes.remove(index) {
539                        inner.dec_count(index);
540                        true
541                    } else {
542                        false
543                    }
544                } else {
545                    false
546                }
547            });
548            addresses
549        }
550    }
551
552    /// Unregisters all indexes contained in `indexes`, draining it in the process.
553    pub fn unregister_indexes(&self, indexes: &mut Indexes) {
554        for chunk in &indexes.drain().chunks(Self::ADDRESS_CHUNK_SIZE) {
555            let mut inner = self.inner.write();
556            chunk.for_each(|index| inner.dec_count(index));
557        }
558    }
559
560    pub fn to_addresses(&self, indexes: &[Index], prefix: Prefix) -> Vec<Address> {
561        let mut addresses = Vec::with_capacity(indexes.len());
562        for chunk in indexes.chunks(Self::ADDRESS_CHUNK_SIZE) {
563            let inner = self.inner.read();
564            chunk.iter().for_each(|index| {
565                if let Some(address) = inner.get_index_address(*index, prefix) {
566                    addresses.push(address);
567                }
568            });
569        }
570        addresses
571    }
572
573    pub fn len(&self) -> usize {
574        self.inner.read().len()
575    }
576
577    pub fn is_empty(&self) -> bool {
578        self.inner.read().is_empty()
579    }
580
581    pub fn capacity(&self) -> usize {
582        self.inner.read().script_pub_keys.capacity()
583    }
584
585    pub fn addresses_preallocation(&self) -> Option<usize> {
586        self.inner.read().addresses_preallocation
587    }
588}
589
590impl Default for Tracker {
591    fn default() -> Self {
592        Self::new(None)
593    }
594}
595
596pub struct TrackerReadGuard<'a> {
597    guard: RwLockReadGuard<'a, Inner>,
598}
599
600impl<'a> TrackerReadGuard<'a> {
601    pub fn get_index(&'a self, index: Index) -> Option<&'a ScriptPublicKey> {
602        self.guard.get_index(index)
603    }
604
605    pub fn iter_keys(&'a self, indexes: &'a Indexes) -> impl Iterator<Item = Option<&'a ScriptPublicKey>> {
606        indexes.0.iter().cloned().map(|index| self.get_index(index))
607    }
608}
609
610#[cfg(test)]
611mod tests {
612    use super::*;
613    use kaspa_math::Uint256;
614
615    fn create_addresses(start: usize, count: usize) -> Vec<Address> {
616        (start..start + count)
617            .map(|i| Address::new(Prefix::Mainnet, kaspa_addresses::Version::PubKey, &Uint256::from_u64(i as u64).to_le_bytes()))
618            .collect()
619    }
620
621    #[test]
622    fn test_tracker_capacity_and_entry_recycling() {
623        const INIT_MAX_ADDRESSES: usize = 6;
624        const MAX_ADDRESSES: usize = ((INIT_MAX_ADDRESSES + 1) * 8 / 7).next_power_of_two() * 7 / 8 - 1;
625        const CAPACITY: usize = MAX_ADDRESSES + 1;
626
627        let tracker = Tracker::new(Some(MAX_ADDRESSES));
628        assert_eq!(
629            tracker.addresses_preallocation().unwrap(),
630            MAX_ADDRESSES,
631            "tracker maximum address count should be expanded to the available allocated entries, minus 1 for a transient insert/swap_remove"
632        );
633        assert_eq!(
634            tracker.capacity(),
635            CAPACITY,
636            "tracker capacity should match the maximum address count plus 1 extra entry for a transient insert/swap_remove"
637        );
638        let aa = create_addresses(0, MAX_ADDRESSES);
639        assert_eq!(aa.len(), MAX_ADDRESSES);
640
641        // Register addresses 0..MAX_ADDRESSES
642        let mut idx_a = Indexes::new(vec![]);
643        let aa = tracker.register(&mut idx_a, aa).unwrap();
644        let aai = aa.iter().map(|x| tracker.get_address(x).unwrap().0).collect_vec();
645        assert_eq!(aa.len(), MAX_ADDRESSES, "all addresses should be registered");
646        assert_eq!(idx_a.len(), MAX_ADDRESSES, "all addresses should be registered");
647        for i in 0..aa.len() {
648            assert!(tracker.contains_address(&idx_a, &aa[i]), "tracker should contain the registered address");
649            assert!(idx_a.contains(aai[i]), "index set should contain the registered address index");
650        }
651        assert_eq!(tracker.capacity(), CAPACITY);
652
653        // Try to re-register addresses 0..MAX_ADDRESSES
654        let a = tracker.register(&mut idx_a, aa).unwrap();
655        assert_eq!(a.len(), 0, "all addresses should already be registered");
656        assert_eq!(idx_a.len(), MAX_ADDRESSES, "all addresses should still be registered");
657
658        // Try to register an additional address while the tracker is full
659        assert!(
660            tracker.register(&mut idx_a, create_addresses(MAX_ADDRESSES, 1)).is_err(),
661            "the tracker is full and should refuse a new address"
662        );
663
664        // Register address set 1..MAX_ADDRESSES, already fully covered by the tracker address set
665        const AB_COUNT: usize = MAX_ADDRESSES - 1;
666        let mut idx_b = Indexes::new(vec![]);
667        let ab = tracker.register(&mut idx_b, create_addresses(1, AB_COUNT)).unwrap();
668        assert_eq!(ab.len(), AB_COUNT, "all addresses should be registered");
669        assert_eq!(idx_b.len(), AB_COUNT, "all addresses should be registered");
670
671        // Empty the tracker entry containing A0
672        assert_eq!(tracker.unregister(&mut idx_a, create_addresses(0, 1)).len(), 1);
673        assert_eq!(idx_a.len(), MAX_ADDRESSES - 1, "entry #0 with address A0 should now be marked empty");
674
675        // Fill the empty entry with a single new address A8
676        const AC_COUNT: usize = 1;
677        let ac = tracker.register(&mut idx_a, create_addresses(MAX_ADDRESSES, AC_COUNT)).unwrap();
678        let aci = ac.iter().map(|x| tracker.get_address(x).unwrap().0).collect_vec();
679        assert_eq!(ac.len(), AC_COUNT, "a new address should be registered");
680        assert_eq!(idx_a.len(), MAX_ADDRESSES, "a new address should be registered");
681        assert_eq!(ac[0], create_addresses(MAX_ADDRESSES, AC_COUNT)[0], "the new address A8 should be registered");
682        assert!(tracker.contains_address(&idx_a, &ac[0]), "the new address A8 should be registered");
683        assert_eq!(aai[0], aci[0], "the newly registered address A8 should occupy the previously emptied entry");
684
685        assert_eq!(
686            tracker.capacity(),
687            CAPACITY,
688            "the tracker capacity should not have been affected by the transient insert/swap_remove"
689        );
690    }
691
692    #[test]
693    fn test_indexes_eq() {
694        let i1 = IndexSet::new(vec![0, 1, 2, 3, 5, 7, 11]);
695        let i2 = IndexSet::new(vec![5, 7, 11, 0, 1, 2, 3]);
696        let i3 = IndexSet::new(vec![0, 1, 2, 4, 8, 16, 32]);
697        let i4 = IndexSet::new(vec![0, 1]);
698        assert_eq!(i1, i1);
699        assert_eq!(i1, i2);
700        assert_ne!(i1, i3);
701        assert_ne!(i1, i4);
702        assert_eq!(i2, i2);
703        assert_ne!(i2, i3);
704        assert_ne!(i2, i4);
705        assert_eq!(i3, i3);
706        assert_ne!(i3, i4);
707        assert_eq!(i4, i4);
708    }
709
710    #[test]
711    fn test_index_map_replace() {
712        let mut m: IndexMap<u64, RefCount> = IndexMap::with_capacity(7);
713        m.insert(1, 10);
714        m.insert(2, 0);
715        m.insert(3, 30);
716        m.insert(4, 40);
717        assert_eq!(m.get_index(0), Some((&1, &10)));
718        assert_eq!(m.get_index(1), Some((&2, &0)));
719        assert_eq!(m.get_index(2), Some((&3, &30)));
720        assert_eq!(m.get_index(3), Some((&4, &40)));
721
722        assert_eq!(m.swap_remove_index(1), Some((2, 0)));
723
724        assert_eq!(m.get_index(0), Some((&1, &10)));
725        assert_eq!(m.get_index(1), Some((&4, &40)));
726        assert_eq!(m.get_index(2), Some((&3, &30)));
727    }
728
729    #[test]
730    fn test_index_map_capacity() {
731        const CAPACITY: usize = 14;
732        let mut m: IndexMap<u64, RefCount> = IndexMap::with_capacity(CAPACITY);
733        for i in 0..CAPACITY {
734            m.insert(i as u64, 0);
735            assert_eq!(m.capacity(), CAPACITY);
736        }
737        m.insert(CAPACITY as u64 + 1, 0);
738        assert_eq!(m.capacity(), ((CAPACITY + 1) * 8 / 7).next_power_of_two() * 7 / 8);
739    }
740}