solana_runtime/
in_mem_accounts_index.rs

1use {
2    crate::{
3        accounts_index::{
4            AccountMapEntry, AccountMapEntryInner, AccountMapEntryMeta, IndexValue,
5            PreAllocatedAccountMapEntry, RefCount, SlotList, SlotSlice, UpsertReclaim, ZeroLamport,
6        },
7        bucket_map_holder::{Age, BucketMapHolder},
8        bucket_map_holder_stats::BucketMapHolderStats,
9        waitable_condvar::WaitableCondvar,
10    },
11    rand::{thread_rng, Rng},
12    solana_bucket_map::bucket_api::BucketApi,
13    safecoin_measure::measure::Measure,
14    solana_sdk::{clock::Slot, pubkey::Pubkey},
15    std::{
16        collections::{
17            hash_map::{Entry, VacantEntry},
18            HashMap,
19        },
20        fmt::Debug,
21        ops::{Bound, RangeBounds, RangeInclusive},
22        sync::{
23            atomic::{AtomicBool, AtomicU64, AtomicU8, Ordering},
24            Arc, Mutex, RwLock, RwLockWriteGuard,
25        },
26    },
27};
28type K = Pubkey;
29type CacheRangesHeld = RwLock<Vec<RangeInclusive<Pubkey>>>;
30
31type InMemMap<T> = HashMap<Pubkey, AccountMapEntry<T>>;
32
33#[derive(Debug)]
34pub struct PossibleEvictions<T: IndexValue> {
35    /// vec per age in the future, up to size 'ages_to_stay_in_cache'
36    possible_evictions: Vec<FlushScanResult<T>>,
37    /// next index to use into 'possible_evictions'
38    /// if 'index' >= 'possible_evictions.len()', then there are no available entries
39    index: usize,
40}
41
42impl<T: IndexValue> PossibleEvictions<T> {
43    fn new(max_ages: Age) -> Self {
44        Self {
45            possible_evictions: (0..max_ages).map(|_| FlushScanResult::default()).collect(),
46            index: max_ages as usize, // initially no data
47        }
48    }
49
50    /// remove the possible evictions. This is required because we need ownership of the Arc strong counts to transfer to caller so entries can be removed from the accounts index
51    fn get_possible_evictions(&mut self) -> Option<FlushScanResult<T>> {
52        self.possible_evictions.get_mut(self.index).map(|result| {
53            self.index += 1;
54            // remove the list from 'possible_evictions'
55            std::mem::take(result)
56        })
57    }
58
59    /// clear existing data and prepare to add 'entries' more ages of data
60    fn reset(&mut self, entries: Age) {
61        self.possible_evictions.iter_mut().for_each(|entry| {
62            entry.evictions_random.clear();
63            entry.evictions_age_possible.clear();
64        });
65        let entries = entries as usize;
66        assert!(
67            entries <= self.possible_evictions.len(),
68            "entries: {}, len: {}",
69            entries,
70            self.possible_evictions.len()
71        );
72        self.index = self.possible_evictions.len() - entries;
73    }
74
75    /// insert 'entry' at 'relative_age' in the future into 'possible_evictions'
76    fn insert(&mut self, relative_age: Age, key: Pubkey, entry: AccountMapEntry<T>, random: bool) {
77        let index = self.index + (relative_age as usize);
78        let list = &mut self.possible_evictions[index];
79        if random {
80            &mut list.evictions_random
81        } else {
82            &mut list.evictions_age_possible
83        }
84        .push((key, entry));
85    }
86}
87
88// one instance of this represents one bin of the accounts index.
89pub struct InMemAccountsIndex<T: IndexValue> {
90    last_age_flushed: AtomicU8,
91
92    // backing store
93    map_internal: RwLock<InMemMap<T>>,
94    storage: Arc<BucketMapHolder<T>>,
95    bin: usize,
96
97    bucket: Option<Arc<BucketApi<(Slot, T)>>>,
98
99    // pubkey ranges that this bin must hold in the cache while the range is present in this vec
100    pub(crate) cache_ranges_held: CacheRangesHeld,
101    // incremented each time stop_evictions is changed
102    stop_evictions_changes: AtomicU64,
103    // true while ranges are being manipulated. Used to keep an async flush from removing things while a range is being held.
104    stop_evictions: AtomicU64,
105    // set to true while this bin is being actively flushed
106    flushing_active: AtomicBool,
107
108    /// info to streamline initial index generation
109    startup_info: Mutex<StartupInfo<T>>,
110
111    /// possible evictions for next few slots coming up
112    possible_evictions: RwLock<PossibleEvictions<T>>,
113    /// when age % ages_to_stay_in_cache == 'age_to_flush_bin_offset', then calculate the next 'ages_to_stay_in_cache' 'possible_evictions'
114    /// this causes us to scan the entire in-mem hash map every 1/'ages_to_stay_in_cache' instead of each age
115    age_to_flush_bin_mod: Age,
116}
117
118impl<T: IndexValue> Debug for InMemAccountsIndex<T> {
119    fn fmt(&self, _f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
120        Ok(())
121    }
122}
123
124pub enum InsertNewEntryResults {
125    DidNotExist,
126    ExistedNewEntryZeroLamports,
127    ExistedNewEntryNonZeroLamports,
128}
129
130#[derive(Default, Debug)]
131struct StartupInfo<T: IndexValue> {
132    /// entries to add next time we are flushing to disk
133    insert: Vec<(Slot, Pubkey, T)>,
134    /// pubkeys that were found to have duplicate index entries
135    duplicates: Vec<(Slot, Pubkey)>,
136}
137
138#[derive(Default, Debug)]
139/// result from scanning in-mem index during flush
140struct FlushScanResult<T> {
141    /// pubkeys whose age indicates they may be evicted now, pending further checks.
142    evictions_age_possible: Vec<(Pubkey, AccountMapEntry<T>)>,
143    /// pubkeys chosen to evict based on random eviction
144    evictions_random: Vec<(Pubkey, AccountMapEntry<T>)>,
145}
146
147impl<T: IndexValue> InMemAccountsIndex<T> {
148    pub fn new(storage: &Arc<BucketMapHolder<T>>, bin: usize) -> Self {
149        let ages_to_stay_in_cache = storage.ages_to_stay_in_cache;
150        Self {
151            map_internal: RwLock::default(),
152            storage: Arc::clone(storage),
153            bin,
154            bucket: storage
155                .disk
156                .as_ref()
157                .map(|disk| disk.get_bucket_from_index(bin))
158                .map(Arc::clone),
159            cache_ranges_held: CacheRangesHeld::default(),
160            stop_evictions_changes: AtomicU64::default(),
161            stop_evictions: AtomicU64::default(),
162            flushing_active: AtomicBool::default(),
163            // initialize this to max, to make it clear we have not flushed at age 0, the starting age
164            last_age_flushed: AtomicU8::new(Age::MAX),
165            startup_info: Mutex::default(),
166            possible_evictions: RwLock::new(PossibleEvictions::new(ages_to_stay_in_cache)),
167            // Spread out the scanning across all ages within the window.
168            // This causes us to scan 1/N of the bins each 'Age'
169            age_to_flush_bin_mod: thread_rng().gen_range(0, ages_to_stay_in_cache),
170        }
171    }
172
173    /// # ages to scan ahead
174    fn ages_to_scan_ahead(&self, current_age: Age) -> Age {
175        let ages_to_stay_in_cache = self.storage.ages_to_stay_in_cache;
176        if (self.age_to_flush_bin_mod == current_age % ages_to_stay_in_cache)
177            && !self.storage.get_startup()
178        {
179            // scan ahead multiple ages
180            ages_to_stay_in_cache
181        } else {
182            1 // just current age
183        }
184    }
185
186    /// true if this bucket needs to call flush for the current age
187    /// we need to scan each bucket once per value of age
188    fn get_should_age(&self, age: Age) -> bool {
189        let last_age_flushed = self.last_age_flushed();
190        last_age_flushed != age
191    }
192
193    /// called after flush scans this bucket at the current age
194    fn set_has_aged(&self, age: Age, can_advance_age: bool) {
195        self.last_age_flushed.store(age, Ordering::Release);
196        self.storage.bucket_flushed_at_current_age(can_advance_age);
197    }
198
199    fn last_age_flushed(&self) -> Age {
200        self.last_age_flushed.load(Ordering::Acquire)
201    }
202
203    /// Release entire in-mem hashmap to free all memory associated with it.
204    /// Idea is that during startup we needed a larger map than we need during runtime.
205    /// When using disk-buckets, in-mem index grows over time with dynamic use and then shrinks, in theory back to 0.
206    pub fn shrink_to_fit(&self) {
207        // shrink_to_fit could be quite expensive on large map sizes, which 'no disk buckets' could produce, so avoid shrinking in case we end up here
208        if self.storage.is_disk_index_enabled() {
209            self.map_internal.write().unwrap().shrink_to_fit();
210        }
211    }
212
213    pub fn items<R>(&self, range: &R) -> Vec<(K, AccountMapEntry<T>)>
214    where
215        R: RangeBounds<Pubkey> + std::fmt::Debug,
216    {
217        let m = Measure::start("items");
218        self.hold_range_in_memory(range, true);
219        let map = self.map_internal.read().unwrap();
220        let mut result = Vec::with_capacity(map.len());
221        map.iter().for_each(|(k, v)| {
222            if range.contains(k) {
223                result.push((*k, Arc::clone(v)));
224            }
225        });
226        drop(map);
227        self.hold_range_in_memory(range, false);
228        Self::update_stat(&self.stats().items, 1);
229        Self::update_time_stat(&self.stats().items_us, m);
230        result
231    }
232
233    // only called in debug code paths
234    pub fn keys(&self) -> Vec<Pubkey> {
235        Self::update_stat(&self.stats().keys, 1);
236        // easiest implementation is to load evrything from disk into cache and return the keys
237        let evictions_guard = EvictionsGuard::lock(self);
238        self.put_range_in_cache(&None::<&RangeInclusive<Pubkey>>, &evictions_guard);
239        let keys = self.map_internal.read().unwrap().keys().cloned().collect();
240        keys
241    }
242
243    fn load_from_disk(&self, pubkey: &Pubkey) -> Option<(SlotList<T>, RefCount)> {
244        self.bucket.as_ref().and_then(|disk| {
245            let m = Measure::start("load_disk_found_count");
246            let entry_disk = disk.read_value(pubkey);
247            match &entry_disk {
248                Some(_) => {
249                    Self::update_time_stat(&self.stats().load_disk_found_us, m);
250                    Self::update_stat(&self.stats().load_disk_found_count, 1);
251                }
252                None => {
253                    Self::update_time_stat(&self.stats().load_disk_missing_us, m);
254                    Self::update_stat(&self.stats().load_disk_missing_count, 1);
255                }
256            }
257            entry_disk
258        })
259    }
260
261    fn load_account_entry_from_disk(&self, pubkey: &Pubkey) -> Option<AccountMapEntry<T>> {
262        let entry_disk = self.load_from_disk(pubkey)?; // returns None if not on disk
263
264        Some(self.disk_to_cache_entry(entry_disk.0, entry_disk.1))
265    }
266
267    /// lookup 'pubkey' by only looking in memory. Does not look on disk.
268    /// callback is called whether pubkey is found or not
269    fn get_only_in_mem<RT>(
270        &self,
271        pubkey: &K,
272        callback: impl for<'a> FnOnce(Option<&'a AccountMapEntry<T>>) -> RT,
273    ) -> RT {
274        let mut found = true;
275        let mut m = Measure::start("get");
276        let result = {
277            let map = self.map_internal.read().unwrap();
278            let result = map.get(pubkey);
279            m.stop();
280
281            callback(if let Some(entry) = result {
282                entry.set_age(self.storage.future_age_to_flush());
283                Some(entry)
284            } else {
285                drop(map);
286                found = false;
287                None
288            })
289        };
290
291        let stats = self.stats();
292        let (count, time) = if found {
293            (&stats.gets_from_mem, &stats.get_mem_us)
294        } else {
295            (&stats.gets_missing, &stats.get_missing_us)
296        };
297        Self::update_stat(time, m.as_us());
298        Self::update_stat(count, 1);
299
300        result
301    }
302
303    /// lookup 'pubkey' in index (in mem or on disk)
304    pub fn get(&self, pubkey: &K) -> Option<AccountMapEntry<T>> {
305        self.get_internal(pubkey, |entry| (true, entry.map(Arc::clone)))
306    }
307
308    /// lookup 'pubkey' in index (in_mem or disk).
309    /// call 'callback' whether found or not
310    pub(crate) fn get_internal<RT>(
311        &self,
312        pubkey: &K,
313        // return true if item should be added to in_mem cache
314        callback: impl for<'a> FnOnce(Option<&AccountMapEntry<T>>) -> (bool, RT),
315    ) -> RT {
316        self.get_only_in_mem(pubkey, |entry| {
317            if let Some(entry) = entry {
318                entry.set_age(self.storage.future_age_to_flush());
319                callback(Some(entry)).1
320            } else {
321                // not in cache, look on disk
322                let stats = self.stats();
323                let disk_entry = self.load_account_entry_from_disk(pubkey);
324                if disk_entry.is_none() {
325                    return callback(None).1;
326                }
327                let disk_entry = disk_entry.unwrap();
328                let mut map = self.map_internal.write().unwrap();
329                let entry = map.entry(*pubkey);
330                match entry {
331                    Entry::Occupied(occupied) => callback(Some(occupied.get())).1,
332                    Entry::Vacant(vacant) => {
333                        let (add_to_cache, rt) = callback(Some(&disk_entry));
334
335                        if add_to_cache {
336                            stats.inc_mem_count(self.bin);
337                            vacant.insert(disk_entry);
338                        }
339                        rt
340                    }
341                }
342            }
343        })
344    }
345
346    fn remove_if_slot_list_empty_value(&self, slot_list: SlotSlice<T>) -> bool {
347        if slot_list.is_empty() {
348            self.stats().inc_delete();
349            true
350        } else {
351            false
352        }
353    }
354
355    fn delete_disk_key(&self, pubkey: &Pubkey) {
356        if let Some(disk) = self.bucket.as_ref() {
357            disk.delete_key(pubkey)
358        }
359    }
360
361    /// return false if the entry is in the index (disk or memory) and has a slot list len > 0
362    /// return true in all other cases, including if the entry is NOT in the index at all
363    fn remove_if_slot_list_empty_entry(&self, entry: Entry<K, AccountMapEntry<T>>) -> bool {
364        match entry {
365            Entry::Occupied(occupied) => {
366                let result =
367                    self.remove_if_slot_list_empty_value(&occupied.get().slot_list.read().unwrap());
368                if result {
369                    // note there is a potential race here that has existed.
370                    // if someone else holds the arc,
371                    //  then they think the item is still in the index and can make modifications.
372                    // We have to have a write lock to the map here, which means nobody else can get
373                    //  the arc, but someone may already have retrieved a clone of it.
374                    // account index in_mem flushing is one such possibility
375                    self.delete_disk_key(occupied.key());
376                    self.stats().dec_mem_count(self.bin);
377                    occupied.remove();
378                }
379                result
380            }
381            Entry::Vacant(vacant) => {
382                // not in cache, look on disk
383                let entry_disk = self.load_from_disk(vacant.key());
384                match entry_disk {
385                    Some(entry_disk) => {
386                        // on disk
387                        if self.remove_if_slot_list_empty_value(&entry_disk.0) {
388                            // not in cache, but on disk, so just delete from disk
389                            self.delete_disk_key(vacant.key());
390                            true
391                        } else {
392                            // could insert into cache here, but not required for correctness and value is unclear
393                            false
394                        }
395                    }
396                    None => true, // not in cache or on disk, but slot list is 'empty' and entry is not in index, so return true
397                }
398            }
399        }
400    }
401
402    // If the slot list for pubkey exists in the index and is empty, remove the index entry for pubkey and return true.
403    // Return false otherwise.
404    pub fn remove_if_slot_list_empty(&self, pubkey: Pubkey) -> bool {
405        let mut m = Measure::start("entry");
406        let mut map = self.map_internal.write().unwrap();
407        let entry = map.entry(pubkey);
408        m.stop();
409        let found = matches!(entry, Entry::Occupied(_));
410        let result = self.remove_if_slot_list_empty_entry(entry);
411        drop(map);
412
413        self.update_entry_stats(m, found);
414        result
415    }
416
417    pub fn slot_list_mut<RT>(
418        &self,
419        pubkey: &Pubkey,
420        user: impl for<'a> FnOnce(&mut RwLockWriteGuard<'a, SlotList<T>>) -> RT,
421    ) -> Option<RT> {
422        self.get_internal(pubkey, |entry| {
423            (
424                true,
425                entry.map(|entry| {
426                    let result = user(&mut entry.slot_list.write().unwrap());
427                    entry.set_dirty(true);
428                    result
429                }),
430            )
431        })
432    }
433
434    pub fn unref(&self, pubkey: &Pubkey) {
435        self.get_internal(pubkey, |entry| {
436            if let Some(entry) = entry {
437                entry.add_un_ref(false)
438            }
439            (true, ())
440        })
441    }
442
443    pub fn upsert(
444        &self,
445        pubkey: &Pubkey,
446        new_value: PreAllocatedAccountMapEntry<T>,
447        other_slot: Option<Slot>,
448        reclaims: &mut SlotList<T>,
449        reclaim: UpsertReclaim,
450    ) {
451        let mut updated_in_mem = true;
452        // try to get it just from memory first using only a read lock
453        self.get_only_in_mem(pubkey, |entry| {
454            if let Some(entry) = entry {
455                Self::lock_and_update_slot_list(
456                    entry,
457                    new_value.into(),
458                    other_slot,
459                    reclaims,
460                    reclaim,
461                );
462                // age is incremented by caller
463            } else {
464                let mut m = Measure::start("entry");
465                let mut map = self.map_internal.write().unwrap();
466                let entry = map.entry(*pubkey);
467                m.stop();
468                let found = matches!(entry, Entry::Occupied(_));
469                match entry {
470                    Entry::Occupied(mut occupied) => {
471                        let current = occupied.get_mut();
472                        Self::lock_and_update_slot_list(
473                            current,
474                            new_value.into(),
475                            other_slot,
476                            reclaims,
477                            reclaim,
478                        );
479                        current.set_age(self.storage.future_age_to_flush());
480                    }
481                    Entry::Vacant(vacant) => {
482                        // not in cache, look on disk
483                        updated_in_mem = false;
484
485                        // desired to be this for filler accounts: self.storage.get_startup();
486                        // but, this has proven to be far too slow at high account counts
487                        let directly_to_disk = false;
488                        if directly_to_disk {
489                            // We may like this to always run, but it is unclear.
490                            // If disk bucket needs to resize, then this call can stall for a long time.
491                            // Right now, we know it is safe during startup.
492                            let already_existed = self
493                                .upsert_on_disk(vacant, new_value, other_slot, reclaims, reclaim);
494                            if !already_existed {
495                                self.stats().inc_insert();
496                            }
497                        } else {
498                            // go to in-mem cache first
499                            let disk_entry = self.load_account_entry_from_disk(vacant.key());
500                            let new_value = if let Some(disk_entry) = disk_entry {
501                                // on disk, so merge new_value with what was on disk
502                                Self::lock_and_update_slot_list(
503                                    &disk_entry,
504                                    new_value.into(),
505                                    other_slot,
506                                    reclaims,
507                                    reclaim,
508                                );
509                                disk_entry
510                            } else {
511                                // not on disk, so insert new thing
512                                self.stats().inc_insert();
513                                new_value.into_account_map_entry(&self.storage)
514                            };
515                            assert!(new_value.dirty());
516                            vacant.insert(new_value);
517                            self.stats().inc_mem_count(self.bin);
518                        }
519                    }
520                }
521
522                drop(map);
523                self.update_entry_stats(m, found);
524            };
525        });
526        if updated_in_mem {
527            Self::update_stat(&self.stats().updates_in_mem, 1);
528        }
529    }
530
531    fn update_entry_stats(&self, stopped_measure: Measure, found: bool) {
532        let stats = self.stats();
533        let (count, time) = if found {
534            (&stats.entries_from_mem, &stats.entry_mem_us)
535        } else {
536            (&stats.entries_missing, &stats.entry_missing_us)
537        };
538        Self::update_stat(time, stopped_measure.as_us());
539        Self::update_stat(count, 1);
540    }
541
542    /// Try to update an item in the slot list the given `slot` If an item for the slot
543    /// already exists in the list, remove the older item, add it to `reclaims`, and insert
544    /// the new item.
545    /// if 'other_slot' is some, then also remove any entries in the slot list that are at 'other_slot'
546    pub fn lock_and_update_slot_list(
547        current: &AccountMapEntryInner<T>,
548        new_value: (Slot, T),
549        other_slot: Option<Slot>,
550        reclaims: &mut SlotList<T>,
551        reclaim: UpsertReclaim,
552    ) {
553        let mut slot_list = current.slot_list.write().unwrap();
554        let (slot, new_entry) = new_value;
555        let addref = Self::update_slot_list(
556            &mut slot_list,
557            slot,
558            new_entry,
559            other_slot,
560            reclaims,
561            reclaim,
562        );
563        if addref {
564            current.add_un_ref(true);
565        }
566        current.set_dirty(true);
567    }
568
569    /// modifies slot_list
570    /// any entry at 'slot' or slot 'other_slot' is replaced with 'account_info'.
571    /// or, 'account_info' is appended to the slot list if the slot did not exist previously.
572    /// returns true if caller should addref
573    /// conditions when caller should addref:
574    ///   'account_info' does NOT represent a cached storage (the slot is being flushed from the cache)
575    /// AND
576    ///   previous slot_list entry AT 'slot' did not exist (this is the first time this account was modified in this "slot"), or was previously cached (the storage is now being flushed from the cache)
577    /// Note that even if entry DID exist at 'other_slot', the above conditions apply.
578    fn update_slot_list(
579        slot_list: &mut SlotList<T>,
580        slot: Slot,
581        account_info: T,
582        mut other_slot: Option<Slot>,
583        reclaims: &mut SlotList<T>,
584        reclaim: UpsertReclaim,
585    ) -> bool {
586        let mut addref = !account_info.is_cached();
587
588        if other_slot == Some(slot) {
589            other_slot = None; // redundant info, so ignore
590        }
591
592        // There may be 0..=2 dirty accounts found (one at 'slot' and one at 'other_slot')
593        // that are already in the slot list.  Since the first one found will be swapped with the
594        // new account, if a second one is found, we cannot swap again. Instead, just remove it.
595        let mut found_slot = false;
596        let mut found_other_slot = false;
597        (0..slot_list.len())
598            .into_iter()
599            .rev() // rev since we delete from the list in some cases
600            .for_each(|slot_list_index| {
601                let (cur_slot, cur_account_info) = &slot_list[slot_list_index];
602                let matched_slot = *cur_slot == slot;
603                if matched_slot || Some(*cur_slot) == other_slot {
604                    // make sure neither 'slot' nor 'other_slot' are in the slot list more than once
605                    let matched_other_slot = !matched_slot;
606                    assert!(
607                        !(found_slot && matched_slot || matched_other_slot && found_other_slot),
608                        "{:?}, slot: {}, other_slot: {:?}",
609                        slot_list,
610                        slot,
611                        other_slot
612                    );
613
614                    let is_cur_account_cached = cur_account_info.is_cached();
615
616                    let reclaim_item = if !(found_slot || found_other_slot) {
617                        // first time we found an entry in 'slot' or 'other_slot', so replace it in-place.
618                        // this may be the only instance we find
619                        std::mem::replace(&mut slot_list[slot_list_index], (slot, account_info))
620                    } else {
621                        // already replaced one entry, so this one has to be removed
622                        slot_list.remove(slot_list_index)
623                    };
624                    match reclaim {
625                        UpsertReclaim::PopulateReclaims => {
626                            reclaims.push(reclaim_item);
627                        }
628                        UpsertReclaim::PreviousSlotEntryWasCached => {
629                            assert!(is_cur_account_cached);
630                        }
631                        UpsertReclaim::IgnoreReclaims => {
632                            // do nothing. nothing to assert. nothing to return in reclaims
633                        }
634                    }
635
636                    if matched_slot {
637                        found_slot = true;
638                        if !is_cur_account_cached {
639                            // current info at 'slot' is NOT cached, so we should NOT addref. This slot already has a ref count for this pubkey.
640                            addref = false;
641                        }
642                    } else {
643                        found_other_slot = true;
644                    }
645                }
646            });
647        if !found_slot && !found_other_slot {
648            // if we make it here, we did not find the slot in the list
649            slot_list.push((slot, account_info));
650        }
651        addref
652    }
653
654    // convert from raw data on disk to AccountMapEntry, set to age in future
655    fn disk_to_cache_entry(
656        &self,
657        slot_list: SlotList<T>,
658        ref_count: RefCount,
659    ) -> AccountMapEntry<T> {
660        Arc::new(AccountMapEntryInner::new(
661            slot_list,
662            ref_count,
663            AccountMapEntryMeta::new_clean(&self.storage),
664        ))
665    }
666
667    pub fn len_for_stats(&self) -> usize {
668        self.stats().count_in_bucket(self.bin)
669    }
670
671    /// Queue up these insertions for when the flush thread is dealing with this bin.
672    /// This is very fast and requires no lookups or disk access.
673    pub fn startup_insert_only(&self, slot: Slot, items: impl Iterator<Item = (Pubkey, T)>) {
674        assert!(self.storage.get_startup());
675        assert!(self.bucket.is_some());
676
677        let insert = &mut self.startup_info.lock().unwrap().insert;
678        items
679            .into_iter()
680            .for_each(|(k, v)| insert.push((slot, k, v)));
681    }
682
683    pub fn insert_new_entry_if_missing_with_lock(
684        &self,
685        pubkey: Pubkey,
686        new_entry: PreAllocatedAccountMapEntry<T>,
687    ) -> InsertNewEntryResults {
688        let mut m = Measure::start("entry");
689        let mut map = self.map_internal.write().unwrap();
690        let entry = map.entry(pubkey);
691        m.stop();
692        let new_entry_zero_lamports = new_entry.is_zero_lamport();
693        let (found_in_mem, already_existed) = match entry {
694            Entry::Occupied(occupied) => {
695                // in cache, so merge into cache
696                let (slot, account_info) = new_entry.into();
697                InMemAccountsIndex::lock_and_update_slot_list(
698                    occupied.get(),
699                    (slot, account_info),
700                    None, // should be None because we don't expect a different slot # during index generation
701                    &mut Vec::default(),
702                    UpsertReclaim::PopulateReclaims, // this should be ignore?
703                );
704                (
705                    true, /* found in mem */
706                    true, /* already existed */
707                )
708            }
709            Entry::Vacant(vacant) => {
710                // not in cache, look on disk
711                let initial_insert_directly_to_disk = false;
712                if initial_insert_directly_to_disk {
713                    // This is more direct, but becomes too slow with very large acct #.
714                    // disk buckets will be improved to make them more performant. Tuning the disks may also help.
715                    // This may become a config tuning option.
716                    let already_existed = self.upsert_on_disk(
717                        vacant,
718                        new_entry,
719                        None, // not changing slots here since it doesn't exist in the index at all
720                        &mut Vec::default(),
721                        UpsertReclaim::PopulateReclaims,
722                    );
723                    (false, already_existed)
724                } else {
725                    let disk_entry = self.load_account_entry_from_disk(vacant.key());
726                    self.stats().inc_mem_count(self.bin);
727                    if let Some(disk_entry) = disk_entry {
728                        let (slot, account_info) = new_entry.into();
729                        InMemAccountsIndex::lock_and_update_slot_list(
730                            &disk_entry,
731                            (slot, account_info),
732                            // None because we are inserting the first element in the slot list for this pubkey.
733                            // There can be no 'other' slot in the list.
734                            None,
735                            &mut Vec::default(),
736                            UpsertReclaim::PopulateReclaims,
737                        );
738                        vacant.insert(disk_entry);
739                        (
740                            false, /* found in mem */
741                            true,  /* already existed */
742                        )
743                    } else {
744                        // not on disk, so insert new thing and we're done
745                        let new_entry: AccountMapEntry<T> =
746                            new_entry.into_account_map_entry(&self.storage);
747                        assert!(new_entry.dirty());
748                        vacant.insert(new_entry);
749                        (false, false)
750                    }
751                }
752            }
753        };
754        drop(map);
755        self.update_entry_stats(m, found_in_mem);
756        let stats = self.stats();
757        if !already_existed {
758            stats.inc_insert();
759        } else {
760            Self::update_stat(&stats.updates_in_mem, 1);
761        }
762        if !already_existed {
763            InsertNewEntryResults::DidNotExist
764        } else if new_entry_zero_lamports {
765            InsertNewEntryResults::ExistedNewEntryZeroLamports
766        } else {
767            InsertNewEntryResults::ExistedNewEntryNonZeroLamports
768        }
769    }
770
771    /// return true if item already existed in the index
772    fn upsert_on_disk(
773        &self,
774        vacant: VacantEntry<K, AccountMapEntry<T>>,
775        new_entry: PreAllocatedAccountMapEntry<T>,
776        other_slot: Option<Slot>,
777        reclaims: &mut SlotList<T>,
778        reclaim: UpsertReclaim,
779    ) -> bool {
780        if let Some(disk) = self.bucket.as_ref() {
781            let mut existed = false;
782            let (slot, account_info) = new_entry.into();
783            disk.update(vacant.key(), |current| {
784                if let Some((slot_list, mut ref_count)) = current {
785                    // on disk, so merge and update disk
786                    let mut slot_list = slot_list.to_vec();
787                    let addref = Self::update_slot_list(
788                        &mut slot_list,
789                        slot,
790                        account_info,
791                        other_slot,
792                        reclaims,
793                        reclaim,
794                    );
795                    if addref {
796                        ref_count += 1
797                    };
798                    existed = true; // found on disk, so it did exist
799                    Some((slot_list, ref_count))
800                } else {
801                    // doesn't exist on disk yet, so insert it
802                    let ref_count = if account_info.is_cached() { 0 } else { 1 };
803                    Some((vec![(slot, account_info)], ref_count))
804                }
805            });
806            existed
807        } else {
808            // not using disk, so insert into mem
809            self.stats().inc_mem_count(self.bin);
810            let new_entry: AccountMapEntry<T> = new_entry.into_account_map_entry(&self.storage);
811            assert!(new_entry.dirty());
812            vacant.insert(new_entry);
813            false // not using disk, not in mem, so did not exist
814        }
815    }
816
817    /// Look at the currently held ranges. If 'range' is already included in what is
818    ///  being held, then add 'range' to the currently held list AND return true
819    /// If 'range' is NOT already included in what is being held, then return false
820    ///  withOUT adding 'range' to the list of what is currently held
821    fn add_hold_range_in_memory_if_already_held<R>(
822        &self,
823        range: &R,
824        evictions_guard: &EvictionsGuard,
825    ) -> bool
826    where
827        R: RangeBounds<Pubkey>,
828    {
829        let start_holding = true;
830        let only_add_if_already_held = true;
831        self.just_set_hold_range_in_memory_internal(
832            range,
833            start_holding,
834            only_add_if_already_held,
835            evictions_guard,
836        )
837    }
838
839    fn just_set_hold_range_in_memory<R>(
840        &self,
841        range: &R,
842        start_holding: bool,
843        evictions_guard: &EvictionsGuard,
844    ) where
845        R: RangeBounds<Pubkey>,
846    {
847        let only_add_if_already_held = false;
848        let _ = self.just_set_hold_range_in_memory_internal(
849            range,
850            start_holding,
851            only_add_if_already_held,
852            evictions_guard,
853        );
854    }
855
856    /// if 'start_holding', then caller wants to add 'range' to the list of ranges being held
857    /// if !'start_holding', then caller wants to remove 'range' to the list
858    /// if 'only_add_if_already_held', caller intends to only add 'range' to the list if the range is already held
859    /// returns true iff start_holding=true and the range we're asked to hold was already being held
860    fn just_set_hold_range_in_memory_internal<R>(
861        &self,
862        range: &R,
863        start_holding: bool,
864        only_add_if_already_held: bool,
865        _evictions_guard: &EvictionsGuard,
866    ) -> bool
867    where
868        R: RangeBounds<Pubkey>,
869    {
870        assert!(!only_add_if_already_held || start_holding);
871        let start = match range.start_bound() {
872            Bound::Included(bound) | Bound::Excluded(bound) => *bound,
873            Bound::Unbounded => Pubkey::from([0; 32]),
874        };
875
876        let end = match range.end_bound() {
877            Bound::Included(bound) | Bound::Excluded(bound) => *bound,
878            Bound::Unbounded => Pubkey::from([0xff; 32]),
879        };
880
881        // this becomes inclusive - that is ok - we are just roughly holding a range of items.
882        // inclusive is bigger than exclusive so we may hold 1 extra item worst case
883        let inclusive_range = start..=end;
884        let mut ranges = self.cache_ranges_held.write().unwrap();
885        let mut already_held = false;
886        if start_holding {
887            if only_add_if_already_held {
888                for r in ranges.iter() {
889                    if r.contains(&start) && r.contains(&end) {
890                        already_held = true;
891                        break;
892                    }
893                }
894            }
895            if already_held || !only_add_if_already_held {
896                ranges.push(inclusive_range);
897            }
898        } else {
899            // find the matching range and delete it since we don't want to hold it anymore
900            // search backwards, assuming LIFO ordering
901            for (i, r) in ranges.iter().enumerate().rev() {
902                if let (Bound::Included(start_found), Bound::Included(end_found)) =
903                    (r.start_bound(), r.end_bound())
904                {
905                    if start_found == &start && end_found == &end {
906                        // found a match. There may be dups, that's ok, we expect another call to remove the dup.
907                        ranges.remove(i);
908                        break;
909                    }
910                }
911            }
912        }
913        already_held
914    }
915
916    /// if 'start_holding'=true, then:
917    ///  at the end of this function, cache_ranges_held will be updated to contain 'range'
918    ///  and all pubkeys in that range will be in the in-mem cache
919    /// if 'start_holding'=false, then:
920    ///  'range' will be removed from cache_ranges_held
921    ///  and all pubkeys will be eligible for being removed from in-mem cache in the bg if no other range is holding them
922    /// Any in-process flush will be aborted when it gets to evicting items from in-mem.
923    pub fn hold_range_in_memory<R>(&self, range: &R, start_holding: bool)
924    where
925        R: RangeBounds<Pubkey> + Debug,
926    {
927        let evictions_guard = EvictionsGuard::lock(self);
928
929        if !start_holding || !self.add_hold_range_in_memory_if_already_held(range, &evictions_guard)
930        {
931            if start_holding {
932                // put everything in the cache and it will be held there
933                self.put_range_in_cache(&Some(range), &evictions_guard);
934            }
935            // do this AFTER items have been put in cache - that way anyone who finds this range can know that the items are already in the cache
936            self.just_set_hold_range_in_memory(range, start_holding, &evictions_guard);
937        }
938    }
939
940    fn put_range_in_cache<R>(&self, range: &Option<&R>, _evictions_guard: &EvictionsGuard)
941    where
942        R: RangeBounds<Pubkey>,
943    {
944        assert!(self.get_stop_evictions()); // caller should be controlling the lifetime of how long this needs to be present
945        let m = Measure::start("range");
946
947        let mut added_to_mem = 0;
948        // load from disk
949        if let Some(disk) = self.bucket.as_ref() {
950            let mut map = self.map_internal.write().unwrap();
951            let items = disk.items_in_range(range); // map's lock has to be held while we are getting items from disk
952            let future_age = self.storage.future_age_to_flush();
953            for item in items {
954                let entry = map.entry(item.pubkey);
955                match entry {
956                    Entry::Occupied(occupied) => {
957                        // item already in cache, bump age to future. This helps the current age flush to succeed.
958                        occupied.get().set_age(future_age);
959                    }
960                    Entry::Vacant(vacant) => {
961                        vacant.insert(self.disk_to_cache_entry(item.slot_list, item.ref_count));
962                        added_to_mem += 1;
963                    }
964                }
965            }
966        }
967        self.stats().add_mem_count(self.bin, added_to_mem);
968
969        Self::update_time_stat(&self.stats().get_range_us, m);
970    }
971
972    /// returns true if there are active requests to stop evictions
973    fn get_stop_evictions(&self) -> bool {
974        self.stop_evictions.load(Ordering::Acquire) > 0
975    }
976
977    /// return count of calls to 'start_stop_evictions', indicating changes could have been made to eviction strategy
978    fn get_stop_evictions_changes(&self) -> u64 {
979        self.stop_evictions_changes.load(Ordering::Acquire)
980    }
981
982    pub(crate) fn flush(&self, can_advance_age: bool) {
983        if let Some(flush_guard) = FlushGuard::lock(&self.flushing_active) {
984            self.flush_internal(&flush_guard, can_advance_age)
985        }
986    }
987
988    /// returns true if a dice roll indicates this call should result in a random eviction.
989    /// This causes non-determinism in cache contents per validator.
990    fn random_chance_of_eviction() -> bool {
991        // random eviction
992        const N: usize = 1000;
993        // 1/N chance of eviction
994        thread_rng().gen_range(0, N) == 0
995    }
996
997    /// assumes 1 entry in the slot list. Ignores overhead of the HashMap and such
998    fn approx_size_of_one_entry() -> usize {
999        std::mem::size_of::<T>()
1000            + std::mem::size_of::<Pubkey>()
1001            + std::mem::size_of::<AccountMapEntry<T>>()
1002    }
1003
1004    fn should_evict_based_on_age(
1005        current_age: Age,
1006        entry: &AccountMapEntry<T>,
1007        startup: bool,
1008    ) -> bool {
1009        startup || (current_age == entry.age())
1010    }
1011
1012    /// return true if 'entry' should be evicted from the in-mem index
1013    fn should_evict_from_mem<'a>(
1014        &self,
1015        current_age: Age,
1016        entry: &'a AccountMapEntry<T>,
1017        startup: bool,
1018        update_stats: bool,
1019        exceeds_budget: bool,
1020    ) -> (bool, Option<std::sync::RwLockReadGuard<'a, SlotList<T>>>) {
1021        // this could be tunable dynamically based on memory pressure
1022        // we could look at more ages or we could throw out more items we are choosing to keep in the cache
1023        if Self::should_evict_based_on_age(current_age, entry, startup) {
1024            if exceeds_budget {
1025                // if we are already holding too many items in-mem, then we need to be more aggressive at kicking things out
1026                (true, None)
1027            } else {
1028                // only read the slot list if we are planning to throw the item out
1029                let slot_list = entry.slot_list.read().unwrap();
1030                if slot_list.len() != 1 {
1031                    if update_stats {
1032                        Self::update_stat(&self.stats().held_in_mem_slot_list_len, 1);
1033                    }
1034                    (false, None) // keep 0 and > 1 slot lists in mem. They will be cleaned or shrunk soon.
1035                } else {
1036                    // keep items with slot lists that contained cached items
1037                    let evict = !slot_list.iter().any(|(_, info)| info.is_cached());
1038                    if !evict && update_stats {
1039                        Self::update_stat(&self.stats().held_in_mem_slot_list_cached, 1);
1040                    }
1041                    (evict, if evict { Some(slot_list) } else { None })
1042                }
1043            }
1044        } else {
1045            (false, None)
1046        }
1047    }
1048
1049    /// scan loop
1050    /// holds read lock
1051    /// identifies items which are dirty and items to evict
1052    fn flush_scan(
1053        &self,
1054        current_age: Age,
1055        startup: bool,
1056        _flush_guard: &FlushGuard,
1057    ) -> FlushScanResult<T> {
1058        let mut possible_evictions = self.possible_evictions.write().unwrap();
1059        if let Some(result) = possible_evictions.get_possible_evictions() {
1060            // we have previously calculated the possible evictions for this age
1061            return result;
1062        }
1063        // otherwise, we need to scan some number of ages into the future now
1064        let ages_to_scan = self.ages_to_scan_ahead(current_age);
1065        possible_evictions.reset(ages_to_scan);
1066
1067        let m;
1068        {
1069            let map = self.map_internal.read().unwrap();
1070            m = Measure::start("flush_scan"); // we don't care about lock time in this metric - bg threads can wait
1071            for (k, v) in map.iter() {
1072                let random = Self::random_chance_of_eviction();
1073                let age_offset = if random {
1074                    thread_rng().gen_range(0, ages_to_scan)
1075                } else if startup {
1076                    0
1077                } else {
1078                    let ages_in_future = v.age().wrapping_sub(current_age);
1079                    if ages_in_future >= ages_to_scan {
1080                        // not planning to evict this item from memory within the next few ages
1081                        continue;
1082                    }
1083                    ages_in_future
1084                };
1085
1086                possible_evictions.insert(age_offset, *k, Arc::clone(v), random);
1087            }
1088        }
1089        Self::update_time_stat(&self.stats().flush_scan_us, m);
1090
1091        possible_evictions.get_possible_evictions().unwrap()
1092    }
1093
1094    fn write_startup_info_to_disk(&self) {
1095        let insert = std::mem::take(&mut self.startup_info.lock().unwrap().insert);
1096        if insert.is_empty() {
1097            // nothing to insert for this bin
1098            return;
1099        }
1100
1101        // during startup, nothing should be in the in-mem map
1102        let map_internal = self.map_internal.read().unwrap();
1103        assert!(
1104            map_internal.is_empty(),
1105            "len: {}, first: {:?}",
1106            map_internal.len(),
1107            map_internal.iter().take(1).collect::<Vec<_>>()
1108        );
1109        drop(map_internal);
1110
1111        let mut duplicates = vec![];
1112
1113        // merge all items into the disk index now
1114        let disk = self.bucket.as_ref().unwrap();
1115        let mut count = 0;
1116        insert.into_iter().for_each(|(slot, k, v)| {
1117            let entry = (slot, v);
1118            let new_ref_count = u64::from(!v.is_cached());
1119            disk.update(&k, |current| {
1120                match current {
1121                    Some((current_slot_list, mut ref_count)) => {
1122                        // merge this in, mark as conflict
1123                        let mut slot_list = Vec::with_capacity(current_slot_list.len() + 1);
1124                        slot_list.extend_from_slice(current_slot_list);
1125                        slot_list.push(entry); // will never be from the same slot that already exists in the list
1126                        ref_count += new_ref_count;
1127                        duplicates.push((slot, k));
1128                        Some((slot_list, ref_count))
1129                    }
1130                    None => {
1131                        count += 1;
1132                        // not on disk, insert it
1133                        Some((vec![entry], new_ref_count))
1134                    }
1135                }
1136            });
1137        });
1138        self.stats().inc_insert_count(count);
1139        self.startup_info
1140            .lock()
1141            .unwrap()
1142            .duplicates
1143            .append(&mut duplicates);
1144    }
1145
1146    /// pull out all duplicate pubkeys from 'startup_info'
1147    /// duplicate pubkeys have a slot list with len > 1
1148    /// These were collected for this bin when we did batch inserts in the bg flush threads.
1149    pub fn retrieve_duplicate_keys_from_startup(&self) -> Vec<(Slot, Pubkey)> {
1150        let mut write = self.startup_info.lock().unwrap();
1151        // in order to return accurate and complete duplicates, we must have nothing left remaining to insert
1152        assert!(write.insert.is_empty());
1153
1154        std::mem::take(&mut write.duplicates)
1155    }
1156
1157    /// synchronize the in-mem index with the disk index
1158    fn flush_internal(&self, flush_guard: &FlushGuard, can_advance_age: bool) {
1159        let current_age = self.storage.current_age();
1160        let iterate_for_age = self.get_should_age(current_age);
1161        let startup = self.storage.get_startup();
1162        if !iterate_for_age && !startup {
1163            // no need to age, so no need to flush this bucket
1164            // but, at startup we want to evict from buckets as fast as possible if any items exist
1165            return;
1166        }
1167
1168        // scan in-mem map for items that we may evict
1169        let FlushScanResult {
1170            mut evictions_age_possible,
1171            mut evictions_random,
1172        } = self.flush_scan(current_age, startup, flush_guard);
1173
1174        if startup {
1175            self.write_startup_info_to_disk();
1176        }
1177
1178        // write to disk outside in-mem map read lock
1179        {
1180            let mut evictions_age = Vec::with_capacity(evictions_age_possible.len());
1181            if !evictions_age_possible.is_empty() || !evictions_random.is_empty() {
1182                let disk = self.bucket.as_ref().unwrap();
1183                let mut flush_entries_updated_on_disk = 0;
1184                let exceeds_budget = self.get_exceeds_budget();
1185                let mut flush_should_evict_us = 0;
1186                // we don't care about lock time in this metric - bg threads can wait
1187                let m = Measure::start("flush_update");
1188
1189                // consider whether to write to disk for all the items we may evict, whether evicting due to age or random
1190                for (is_random, check_for_eviction_and_dirty) in [
1191                    (false, &mut evictions_age_possible),
1192                    (true, &mut evictions_random),
1193                ] {
1194                    for (k, v) in check_for_eviction_and_dirty.drain(..) {
1195                        let mut slot_list = None;
1196                        if !is_random {
1197                            let mut mse = Measure::start("flush_should_evict");
1198                            let (evict_for_age, slot_list_temp) = self.should_evict_from_mem(
1199                                current_age,
1200                                &v,
1201                                startup,
1202                                true,
1203                                exceeds_budget,
1204                            );
1205                            slot_list = slot_list_temp;
1206                            mse.stop();
1207                            flush_should_evict_us += mse.as_us();
1208                            if evict_for_age {
1209                                evictions_age.push(k);
1210                            } else {
1211                                // not evicting, so don't write, even if dirty
1212                                continue;
1213                            }
1214                        }
1215                        // if we are evicting it, then we need to update disk if we're dirty
1216                        if v.clear_dirty() {
1217                            // step 1: clear the dirty flag
1218                            // step 2: perform the update on disk based on the fields in the entry
1219                            // If a parallel operation dirties the item again - even while this flush is occurring,
1220                            //  the last thing the writer will do, after updating contents, is set_dirty(true)
1221                            //  That prevents dropping an item from cache before disk is updated to latest in mem.
1222                            // It is possible that the item in the cache is marked as dirty while these updates are happening. That is ok.
1223                            //  The dirty will be picked up and the item will be prevented from being evicted.
1224
1225                            // may have to loop if disk has to grow and we have to retry the write
1226                            loop {
1227                                let disk_resize = {
1228                                    let slot_list = slot_list
1229                                        .take()
1230                                        .unwrap_or_else(|| v.slot_list.read().unwrap());
1231                                    disk.try_write(&k, (&slot_list, v.ref_count()))
1232                                };
1233                                match disk_resize {
1234                                    Ok(_) => {
1235                                        // successfully written to disk
1236                                        flush_entries_updated_on_disk += 1;
1237                                        break;
1238                                    }
1239                                    Err(err) => {
1240                                        // disk needs to resize. This item did not get written. Resize and try again.
1241                                        let m = Measure::start("flush_grow");
1242                                        disk.grow(err);
1243                                        Self::update_time_stat(&self.stats().flush_grow_us, m);
1244                                    }
1245                                }
1246                            }
1247                        }
1248                    }
1249                }
1250                Self::update_time_stat(&self.stats().flush_update_us, m);
1251                Self::update_stat(&self.stats().flush_should_evict_us, flush_should_evict_us);
1252                Self::update_stat(
1253                    &self.stats().flush_entries_updated_on_disk,
1254                    flush_entries_updated_on_disk,
1255                );
1256                // remove the 'v'
1257                let evictions_random = evictions_random
1258                    .into_iter()
1259                    .map(|(k, _v)| k)
1260                    .collect::<Vec<_>>();
1261
1262                let m = Measure::start("flush_evict");
1263                self.evict_from_cache(evictions_age, current_age, startup, false);
1264                self.evict_from_cache(evictions_random, current_age, startup, true);
1265                Self::update_time_stat(&self.stats().flush_evict_us, m);
1266            }
1267
1268            if iterate_for_age {
1269                // completed iteration of the buckets at the current age
1270                assert_eq!(current_age, self.storage.current_age());
1271                self.set_has_aged(current_age, can_advance_age);
1272            }
1273        }
1274    }
1275
1276    /// calculate the estimated size of the in-mem index
1277    /// return whether the size exceeds the specified budget
1278    fn get_exceeds_budget(&self) -> bool {
1279        let in_mem_count = self.stats().count_in_mem.load(Ordering::Relaxed);
1280        let limit = self.storage.mem_budget_mb;
1281        let estimate_mem = in_mem_count * Self::approx_size_of_one_entry();
1282        let exceeds_budget = limit
1283            .map(|limit| estimate_mem >= limit * 1024 * 1024)
1284            .unwrap_or_default();
1285        self.stats()
1286            .estimate_mem
1287            .store(estimate_mem as u64, Ordering::Relaxed);
1288        exceeds_budget
1289    }
1290
1291    /// for each key in 'keys', look up in map, set age to the future
1292    fn move_ages_to_future(&self, next_age: Age, current_age: Age, keys: &[Pubkey]) {
1293        let map = self.map_internal.read().unwrap();
1294        keys.iter().for_each(|key| {
1295            if let Some(entry) = map.get(key) {
1296                entry.try_exchange_age(next_age, current_age);
1297            }
1298        });
1299    }
1300
1301    // evict keys in 'evictions' from in-mem cache, likely due to age
1302    fn evict_from_cache(
1303        &self,
1304        mut evictions: Vec<Pubkey>,
1305        current_age: Age,
1306        startup: bool,
1307        randomly_evicted: bool,
1308    ) {
1309        if evictions.is_empty() {
1310            return;
1311        }
1312
1313        let stop_evictions_changes_at_start = self.get_stop_evictions_changes();
1314        let next_age_on_failure = self.storage.future_age_to_flush();
1315        if self.get_stop_evictions() {
1316            // ranges were changed
1317            self.move_ages_to_future(next_age_on_failure, current_age, &evictions);
1318            return;
1319        }
1320
1321        let mut failed = 0;
1322
1323        // skip any keys that are held in memory because of ranges being held
1324        let ranges = self.cache_ranges_held.read().unwrap().clone();
1325        if !ranges.is_empty() {
1326            let mut move_age = Vec::default();
1327            evictions.retain(|k| {
1328                if ranges.iter().any(|range| range.contains(k)) {
1329                    // this item is held in mem by range, so don't evict
1330                    move_age.push(*k);
1331                    false
1332                } else {
1333                    true
1334                }
1335            });
1336            if !move_age.is_empty() {
1337                failed += move_age.len();
1338                self.move_ages_to_future(next_age_on_failure, current_age, &move_age);
1339            }
1340        }
1341
1342        let mut evicted = 0;
1343        // chunk these so we don't hold the write lock too long
1344        for evictions in evictions.chunks(50) {
1345            let mut map = self.map_internal.write().unwrap();
1346            for k in evictions {
1347                if let Entry::Occupied(occupied) = map.entry(*k) {
1348                    let v = occupied.get();
1349                    if Arc::strong_count(v) > 1 {
1350                        // someone is holding the value arc's ref count and could modify it, so do not evict
1351                        failed += 1;
1352                        v.try_exchange_age(next_age_on_failure, current_age);
1353                        continue;
1354                    }
1355
1356                    if v.dirty()
1357                        || (!randomly_evicted
1358                            && !Self::should_evict_based_on_age(current_age, v, startup))
1359                    {
1360                        // marked dirty or bumped in age after we looked above
1361                        // these evictions will be handled in later passes (at later ages)
1362                        // but, at startup, everything is ready to age out if it isn't dirty
1363                        failed += 1;
1364                        continue;
1365                    }
1366
1367                    if stop_evictions_changes_at_start != self.get_stop_evictions_changes() {
1368                        // ranges were changed
1369                        failed += 1;
1370                        v.try_exchange_age(next_age_on_failure, current_age);
1371                        continue;
1372                    }
1373
1374                    // all conditions for eviction succeeded, so really evict item from in-mem cache
1375                    evicted += 1;
1376                    occupied.remove();
1377                }
1378            }
1379            if map.is_empty() {
1380                map.shrink_to_fit();
1381            }
1382        }
1383        self.stats().sub_mem_count(self.bin, evicted);
1384        Self::update_stat(&self.stats().flush_entries_evicted_from_mem, evicted as u64);
1385        Self::update_stat(&self.stats().failed_to_evict, failed as u64);
1386    }
1387
1388    pub fn stats(&self) -> &BucketMapHolderStats {
1389        &self.storage.stats
1390    }
1391
1392    fn update_stat(stat: &AtomicU64, value: u64) {
1393        if value != 0 {
1394            stat.fetch_add(value, Ordering::Relaxed);
1395        }
1396    }
1397
1398    pub fn update_time_stat(stat: &AtomicU64, mut m: Measure) {
1399        m.stop();
1400        let value = m.as_us();
1401        Self::update_stat(stat, value);
1402    }
1403}
1404
1405/// An RAII implementation of a scoped lock for the `flushing_active` atomic flag in
1406/// `InMemAccountsIndex`.  When this structure is dropped (falls out of scope), the flag will be
1407/// cleared (set to false).
1408///
1409/// After successfully locking (calling `FlushGuard::lock()`), pass a reference to the `FlashGuard`
1410/// instance to any function/code that requires the `flushing_active` flag has been set (to true).
1411#[derive(Debug)]
1412struct FlushGuard<'a> {
1413    flushing: &'a AtomicBool,
1414}
1415
1416impl<'a> FlushGuard<'a> {
1417    /// Set the `flushing` atomic flag to true.  If the flag was already true, then return `None`
1418    /// (so as to not clear the flag erroneously).  Otherwise return `Some(FlushGuard)`.
1419    #[must_use = "if unused, the `flushing` flag will immediately clear"]
1420    fn lock(flushing: &'a AtomicBool) -> Option<Self> {
1421        let already_flushing = flushing.swap(true, Ordering::AcqRel);
1422        (!already_flushing).then(|| Self { flushing })
1423    }
1424}
1425
1426impl Drop for FlushGuard<'_> {
1427    fn drop(&mut self) {
1428        self.flushing.store(false, Ordering::Release);
1429    }
1430}
1431
1432/// Disable (and safely enable) the background flusher from evicting entries from the in-mem
1433/// accounts index.  When disabled, no entries may be evicted.  When enabled, only eligible entries
1434/// may be evicted (i.e. those not in a held range).
1435///
1436/// An RAII implementation of a scoped lock for the `stop_evictions` atomic flag/counter in
1437/// `InMemAccountsIndex`.  When this structure is dropped (falls out of scope), the counter will
1438/// decrement and conditionally notify its storage.
1439///
1440/// After successfully locking (calling `EvictionsGuard::lock()`), pass a reference to the
1441/// `EvictionsGuard` instance to any function/code that requires `stop_evictions` to be
1442/// incremented/decremented correctly.
1443#[derive(Debug)]
1444struct EvictionsGuard<'a> {
1445    /// The number of active callers disabling evictions
1446    stop_evictions: &'a AtomicU64,
1447    /// The number of times that evictions have been disabled or enabled
1448    num_state_changes: &'a AtomicU64,
1449    /// Who will be notified after the evictions are re-enabled
1450    storage_notifier: &'a WaitableCondvar,
1451}
1452
1453impl<'a> EvictionsGuard<'a> {
1454    #[must_use = "if unused, this evictions lock will be immediately unlocked"]
1455    fn lock<T: IndexValue>(in_mem_accounts_index: &'a InMemAccountsIndex<T>) -> Self {
1456        Self::lock_with(
1457            &in_mem_accounts_index.stop_evictions,
1458            &in_mem_accounts_index.stop_evictions_changes,
1459            &in_mem_accounts_index.storage.wait_dirty_or_aged,
1460        )
1461    }
1462
1463    #[must_use = "if unused, this evictions lock will be immediately unlocked"]
1464    fn lock_with(
1465        stop_evictions: &'a AtomicU64,
1466        num_state_changes: &'a AtomicU64,
1467        storage_notifier: &'a WaitableCondvar,
1468    ) -> Self {
1469        num_state_changes.fetch_add(1, Ordering::Release);
1470        stop_evictions.fetch_add(1, Ordering::Release);
1471
1472        Self {
1473            stop_evictions,
1474            num_state_changes,
1475            storage_notifier,
1476        }
1477    }
1478}
1479
1480impl Drop for EvictionsGuard<'_> {
1481    fn drop(&mut self) {
1482        let previous_value = self.stop_evictions.fetch_sub(1, Ordering::AcqRel);
1483        debug_assert!(previous_value > 0);
1484
1485        let should_notify = previous_value == 1;
1486        if should_notify {
1487            // stop_evictions went to 0, so this bucket could now be ready to be aged
1488            self.storage_notifier.notify_one();
1489        }
1490
1491        self.num_state_changes.fetch_add(1, Ordering::Release);
1492    }
1493}
1494
1495#[cfg(test)]
1496mod tests {
1497    use {
1498        super::*,
1499        crate::accounts_index::{AccountsIndexConfig, IndexLimitMb, BINS_FOR_TESTING},
1500        itertools::Itertools,
1501    };
1502
1503    fn new_for_test<T: IndexValue>() -> InMemAccountsIndex<T> {
1504        let holder = Arc::new(BucketMapHolder::new(
1505            BINS_FOR_TESTING,
1506            &Some(AccountsIndexConfig::default()),
1507            1,
1508        ));
1509        let bin = 0;
1510        InMemAccountsIndex::new(&holder, bin)
1511    }
1512
1513    fn new_disk_buckets_for_test<T: IndexValue>() -> InMemAccountsIndex<T> {
1514        let holder = Arc::new(BucketMapHolder::new(
1515            BINS_FOR_TESTING,
1516            &Some(AccountsIndexConfig {
1517                index_limit_mb: IndexLimitMb::Limit(1),
1518                ..AccountsIndexConfig::default()
1519            }),
1520            1,
1521        ));
1522        let bin = 0;
1523        let bucket = InMemAccountsIndex::new(&holder, bin);
1524        assert!(bucket.storage.is_disk_index_enabled());
1525        bucket
1526    }
1527
1528    #[test]
1529    fn test_should_evict_from_mem() {
1530        solana_logger::setup();
1531        let bucket = new_for_test::<u64>();
1532        let mut startup = false;
1533        let mut current_age = 0;
1534        let ref_count = 0;
1535        let one_element_slot_list = vec![(0, 0)];
1536        let one_element_slot_list_entry = Arc::new(AccountMapEntryInner::new(
1537            one_element_slot_list,
1538            ref_count,
1539            AccountMapEntryMeta::default(),
1540        ));
1541
1542        // exceeded budget
1543        assert!(
1544            bucket
1545                .should_evict_from_mem(
1546                    current_age,
1547                    &Arc::new(AccountMapEntryInner::new(
1548                        vec![],
1549                        ref_count,
1550                        AccountMapEntryMeta::default()
1551                    )),
1552                    startup,
1553                    false,
1554                    true,
1555                )
1556                .0
1557        );
1558        // empty slot list
1559        assert!(
1560            !bucket
1561                .should_evict_from_mem(
1562                    current_age,
1563                    &Arc::new(AccountMapEntryInner::new(
1564                        vec![],
1565                        ref_count,
1566                        AccountMapEntryMeta::default()
1567                    )),
1568                    startup,
1569                    false,
1570                    false,
1571                )
1572                .0
1573        );
1574        // 1 element slot list
1575        assert!(
1576            bucket
1577                .should_evict_from_mem(
1578                    current_age,
1579                    &one_element_slot_list_entry,
1580                    startup,
1581                    false,
1582                    false,
1583                )
1584                .0
1585        );
1586        // 2 element slot list
1587        assert!(
1588            !bucket
1589                .should_evict_from_mem(
1590                    current_age,
1591                    &Arc::new(AccountMapEntryInner::new(
1592                        vec![(0, 0), (1, 1)],
1593                        ref_count,
1594                        AccountMapEntryMeta::default()
1595                    )),
1596                    startup,
1597                    false,
1598                    false,
1599                )
1600                .0
1601        );
1602
1603        {
1604            let bucket = new_for_test::<f64>();
1605            // 1 element slot list with a CACHED item - f64 acts like cached
1606            assert!(
1607                !bucket
1608                    .should_evict_from_mem(
1609                        current_age,
1610                        &Arc::new(AccountMapEntryInner::new(
1611                            vec![(0, 0.0)],
1612                            ref_count,
1613                            AccountMapEntryMeta::default()
1614                        )),
1615                        startup,
1616                        false,
1617                        false,
1618                    )
1619                    .0
1620            );
1621        }
1622
1623        // 1 element slot list, age is now
1624        assert!(
1625            bucket
1626                .should_evict_from_mem(
1627                    current_age,
1628                    &one_element_slot_list_entry,
1629                    startup,
1630                    false,
1631                    false,
1632                )
1633                .0
1634        );
1635
1636        // 1 element slot list, but not current age
1637        current_age = 1;
1638        assert!(
1639            !bucket
1640                .should_evict_from_mem(
1641                    current_age,
1642                    &one_element_slot_list_entry,
1643                    startup,
1644                    false,
1645                    false,
1646                )
1647                .0
1648        );
1649
1650        // 1 element slot list, but at startup and age not current
1651        startup = true;
1652        assert!(
1653            bucket
1654                .should_evict_from_mem(
1655                    current_age,
1656                    &one_element_slot_list_entry,
1657                    startup,
1658                    false,
1659                    false,
1660                )
1661                .0
1662        );
1663    }
1664
1665    #[test]
1666    fn test_hold_range_in_memory() {
1667        let bucket = new_disk_buckets_for_test::<u64>();
1668        // 0x81 is just some other range
1669        let all = Pubkey::from([0; 32])..=Pubkey::from([0xff; 32]);
1670        let ranges = [
1671            all.clone(),
1672            Pubkey::from([0x81; 32])..=Pubkey::from([0xff; 32]),
1673        ];
1674        for range in ranges.clone() {
1675            assert!(bucket.cache_ranges_held.read().unwrap().is_empty());
1676            bucket.hold_range_in_memory(&range, true);
1677            assert_eq!(
1678                bucket.cache_ranges_held.read().unwrap().to_vec(),
1679                vec![range.clone()]
1680            );
1681            {
1682                let evictions_guard = EvictionsGuard::lock(&bucket);
1683                assert!(bucket.add_hold_range_in_memory_if_already_held(&range, &evictions_guard));
1684                bucket.hold_range_in_memory(&range, false);
1685            }
1686            bucket.hold_range_in_memory(&range, false);
1687            assert!(bucket.cache_ranges_held.read().unwrap().is_empty());
1688            bucket.hold_range_in_memory(&range, true);
1689            assert_eq!(
1690                bucket.cache_ranges_held.read().unwrap().to_vec(),
1691                vec![range.clone()]
1692            );
1693            bucket.hold_range_in_memory(&range, true);
1694            assert_eq!(
1695                bucket.cache_ranges_held.read().unwrap().to_vec(),
1696                vec![range.clone(), range.clone()]
1697            );
1698            bucket.hold_range_in_memory(&ranges[0], true);
1699            assert_eq!(
1700                bucket.cache_ranges_held.read().unwrap().to_vec(),
1701                vec![range.clone(), range.clone(), ranges[0].clone()]
1702            );
1703            bucket.hold_range_in_memory(&range, false);
1704            assert_eq!(
1705                bucket.cache_ranges_held.read().unwrap().to_vec(),
1706                vec![range.clone(), ranges[0].clone()]
1707            );
1708            bucket.hold_range_in_memory(&range, false);
1709            assert_eq!(
1710                bucket.cache_ranges_held.read().unwrap().to_vec(),
1711                vec![ranges[0].clone()]
1712            );
1713            bucket.hold_range_in_memory(&ranges[0].clone(), false);
1714            assert!(bucket.cache_ranges_held.read().unwrap().is_empty());
1715
1716            // hold all in mem first
1717            assert!(bucket.cache_ranges_held.read().unwrap().is_empty());
1718            bucket.hold_range_in_memory(&all, true);
1719
1720            let evictions_guard = EvictionsGuard::lock(&bucket);
1721            assert!(bucket.add_hold_range_in_memory_if_already_held(&range, &evictions_guard));
1722            bucket.hold_range_in_memory(&range, false);
1723            bucket.hold_range_in_memory(&all, false);
1724        }
1725    }
1726
1727    #[test]
1728    fn test_age() {
1729        solana_logger::setup();
1730        let test = new_for_test::<u64>();
1731        assert!(test.get_should_age(test.storage.current_age()));
1732        assert_eq!(test.storage.count_buckets_flushed(), 0);
1733        test.set_has_aged(0, true);
1734        assert!(!test.get_should_age(test.storage.current_age()));
1735        assert_eq!(test.storage.count_buckets_flushed(), 1);
1736        // simulate rest of buckets aging
1737        for _ in 1..BINS_FOR_TESTING {
1738            assert!(!test.storage.all_buckets_flushed_at_current_age());
1739            test.storage.bucket_flushed_at_current_age(true);
1740        }
1741        assert!(test.storage.all_buckets_flushed_at_current_age());
1742        // advance age
1743        test.storage.increment_age();
1744        assert_eq!(test.storage.current_age(), 1);
1745        assert!(!test.storage.all_buckets_flushed_at_current_age());
1746        assert!(test.get_should_age(test.storage.current_age()));
1747        assert_eq!(test.storage.count_buckets_flushed(), 0);
1748    }
1749
1750    #[test]
1751    fn test_update_slot_list_other() {
1752        solana_logger::setup();
1753        let reclaim = UpsertReclaim::PopulateReclaims;
1754        let new_slot = 0;
1755        let info = 1;
1756        let other_value = info + 1;
1757        let at_new_slot = (new_slot, info);
1758        let unique_other_slot = new_slot + 1;
1759        for other_slot in [Some(new_slot), Some(unique_other_slot), None] {
1760            let mut reclaims = Vec::default();
1761            let mut slot_list = Vec::default();
1762            // upserting into empty slot_list, so always addref
1763            assert!(
1764                InMemAccountsIndex::update_slot_list(
1765                    &mut slot_list,
1766                    new_slot,
1767                    info,
1768                    other_slot,
1769                    &mut reclaims,
1770                    reclaim
1771                ),
1772                "other_slot: {:?}",
1773                other_slot
1774            );
1775            assert_eq!(slot_list, vec![at_new_slot]);
1776            assert!(reclaims.is_empty());
1777        }
1778
1779        // replace other
1780        let mut slot_list = vec![(unique_other_slot, other_value)];
1781        let expected_reclaims = slot_list.clone();
1782        let other_slot = Some(unique_other_slot);
1783        let mut reclaims = Vec::default();
1784        assert!(
1785            // upserting into slot_list that does NOT contain an entry at 'new-slot', so always addref
1786            InMemAccountsIndex::update_slot_list(
1787                &mut slot_list,
1788                new_slot,
1789                info,
1790                other_slot,
1791                &mut reclaims,
1792                reclaim
1793            ),
1794            "other_slot: {:?}",
1795            other_slot
1796        );
1797        assert_eq!(slot_list, vec![at_new_slot]);
1798        assert_eq!(reclaims, expected_reclaims);
1799
1800        // replace other and new_slot
1801        let mut slot_list = vec![(unique_other_slot, other_value), (new_slot, other_value)];
1802        let expected_reclaims = slot_list.clone();
1803        let other_slot = Some(unique_other_slot);
1804        // upserting into slot_list that already contain an entry at 'new-slot', so do NOT addref
1805        let mut reclaims = Vec::default();
1806        assert!(
1807            !InMemAccountsIndex::update_slot_list(
1808                &mut slot_list,
1809                new_slot,
1810                info,
1811                other_slot,
1812                &mut reclaims,
1813                reclaim
1814            ),
1815            "other_slot: {:?}",
1816            other_slot
1817        );
1818        assert_eq!(slot_list, vec![at_new_slot]);
1819        assert_eq!(
1820            reclaims,
1821            expected_reclaims.into_iter().rev().collect::<Vec<_>>()
1822        );
1823
1824        // nothing will exist at this slot
1825        let missing_other_slot = unique_other_slot + 1;
1826        let ignored_slot = 10; // bigger than is used elsewhere in the test
1827        let ignored_value = info + 10;
1828
1829        let mut possible_initial_slot_list_contents;
1830        // build a list of possible contents in the slot_list prior to calling 'update_slot_list'
1831        {
1832            // up to 3 ignored slot account_info (ignored means not 'new_slot', not 'other_slot', but different slot #s which could exist in the slot_list initially)
1833            possible_initial_slot_list_contents = (0..3)
1834                .into_iter()
1835                .map(|i| (ignored_slot + i, ignored_value + i))
1836                .collect::<Vec<_>>();
1837            // account_info that already exists in the slot_list AT 'new_slot'
1838            possible_initial_slot_list_contents.push(at_new_slot);
1839            // account_info that already exists in the slot_list AT 'other_slot'
1840            possible_initial_slot_list_contents.push((unique_other_slot, other_value));
1841        }
1842
1843        /*
1844         * loop over all possible permutations of 'possible_initial_slot_list_contents'
1845         * some examples:
1846         * []
1847         * [other]
1848         * [other, new_slot]
1849         * [new_slot, other]
1850         * [dummy0, new_slot, dummy1, other] (and all permutation of this order)
1851         * [other, dummy1, new_slot] (and all permutation of this order)
1852         * ...
1853         * [dummy0, new_slot, dummy1, other_slot, dummy2] (and all permutation of this order)
1854         */
1855        let mut attempts = 0;
1856        // loop over each initial size of 'slot_list'
1857        for initial_slot_list_len in 0..=possible_initial_slot_list_contents.len() {
1858            // loop over every permutation of possible_initial_slot_list_contents within a list of len 'initial_slot_list_len'
1859            for content_source_indexes in
1860                (0..possible_initial_slot_list_contents.len()).permutations(initial_slot_list_len)
1861            {
1862                // loop over each possible parameter for 'other_slot'
1863                for other_slot in [
1864                    Some(new_slot),
1865                    Some(unique_other_slot),
1866                    Some(missing_other_slot),
1867                    None,
1868                ] {
1869                    attempts += 1;
1870                    // initialize slot_list prior to call to 'InMemAccountsIndex::update_slot_list'
1871                    // by inserting each possible entry at each possible position
1872                    let mut slot_list = content_source_indexes
1873                        .iter()
1874                        .map(|i| possible_initial_slot_list_contents[*i])
1875                        .collect::<Vec<_>>();
1876                    let mut expected = slot_list.clone();
1877                    let original = slot_list.clone();
1878                    let mut reclaims = Vec::default();
1879
1880                    let result = InMemAccountsIndex::update_slot_list(
1881                        &mut slot_list,
1882                        new_slot,
1883                        info,
1884                        other_slot,
1885                        &mut reclaims,
1886                        reclaim,
1887                    );
1888
1889                    // calculate expected results
1890                    let mut expected_reclaims = Vec::default();
1891                    // addref iff the slot_list did NOT previously contain an entry at 'new_slot'
1892                    let expected_result = !expected.iter().any(|(slot, _info)| slot == &new_slot);
1893                    {
1894                        // this is the logical equivalent of 'InMemAccountsIndex::update_slot_list', but slower (and ignoring addref)
1895                        expected.retain(|(slot, info)| {
1896                            let retain = slot != &new_slot && Some(*slot) != other_slot;
1897                            if !retain {
1898                                expected_reclaims.push((*slot, *info));
1899                            }
1900                            retain
1901                        });
1902                        expected.push((new_slot, info));
1903                    }
1904                    assert_eq!(
1905                        expected_result, result,
1906                        "return value different. other: {:?}, {:?}, {:?}, original: {:?}",
1907                        other_slot, expected, slot_list, original
1908                    );
1909                    // sort for easy comparison
1910                    expected_reclaims.sort_unstable();
1911                    reclaims.sort_unstable();
1912                    assert_eq!(
1913                        expected_reclaims, reclaims,
1914                        "reclaims different. other: {:?}, {:?}, {:?}, original: {:?}",
1915                        other_slot, expected, slot_list, original
1916                    );
1917                    // sort for easy comparison
1918                    slot_list.sort_unstable();
1919                    expected.sort_unstable();
1920                    assert_eq!(
1921                        slot_list, expected,
1922                        "slot_list different. other: {:?}, {:?}, {:?}, original: {:?}",
1923                        other_slot, expected, slot_list, original
1924                    );
1925                }
1926            }
1927        }
1928        assert_eq!(attempts, 1304); // complicated permutations, so make sure we ran the right #
1929    }
1930
1931    #[test]
1932    fn test_flush_guard() {
1933        let flushing_active = AtomicBool::new(false);
1934
1935        {
1936            let flush_guard = FlushGuard::lock(&flushing_active);
1937            assert!(flush_guard.is_some());
1938            assert!(flushing_active.load(Ordering::Acquire));
1939
1940            {
1941                // Trying to lock the FlushGuard again will not succeed.
1942                let flush_guard2 = FlushGuard::lock(&flushing_active);
1943                assert!(flush_guard2.is_none());
1944            }
1945
1946            // The `flushing_active` flag will remain true, even after `flush_guard2` goes out of
1947            // scope (and is dropped).  This ensures `lock()` and `drop()` work harmoniously.
1948            assert!(flushing_active.load(Ordering::Acquire));
1949        }
1950
1951        // After the FlushGuard is dropped, the flag will be cleared.
1952        assert!(!flushing_active.load(Ordering::Acquire));
1953    }
1954
1955    #[test]
1956    fn test_remove_if_slot_list_empty_entry() {
1957        let key = solana_sdk::pubkey::new_rand();
1958        let unknown_key = solana_sdk::pubkey::new_rand();
1959
1960        let test = new_for_test::<u64>();
1961
1962        let mut map = test.map_internal.write().unwrap();
1963
1964        {
1965            // item is NOT in index at all, still return true from remove_if_slot_list_empty_entry
1966            // make sure not initially in index
1967            let entry = map.entry(unknown_key);
1968            assert!(matches!(entry, Entry::Vacant(_)));
1969            let entry = map.entry(unknown_key);
1970            assert!(test.remove_if_slot_list_empty_entry(entry));
1971            // make sure still not in index
1972            let entry = map.entry(unknown_key);
1973            assert!(matches!(entry, Entry::Vacant(_)));
1974        }
1975
1976        {
1977            // add an entry with an empty slot list
1978            let val = Arc::new(AccountMapEntryInner::<u64>::default());
1979            map.insert(key, val);
1980            let entry = map.entry(key);
1981            assert!(matches!(entry, Entry::Occupied(_)));
1982            // should have removed it since it had an empty slot list
1983            assert!(test.remove_if_slot_list_empty_entry(entry));
1984            let entry = map.entry(key);
1985            assert!(matches!(entry, Entry::Vacant(_)));
1986            // return true - item is not in index at all now
1987            assert!(test.remove_if_slot_list_empty_entry(entry));
1988        }
1989
1990        {
1991            // add an entry with a NON empty slot list - it will NOT get removed
1992            let val = Arc::new(AccountMapEntryInner::<u64>::default());
1993            val.slot_list.write().unwrap().push((1, 1));
1994            map.insert(key, val);
1995            // does NOT remove it since it has a non-empty slot list
1996            let entry = map.entry(key);
1997            assert!(!test.remove_if_slot_list_empty_entry(entry));
1998            let entry = map.entry(key);
1999            assert!(matches!(entry, Entry::Occupied(_)));
2000        }
2001    }
2002}