solana_runtime/
in_mem_accounts_index.rs

1use {
2    crate::{
3        accounts_index::{
4            AccountMapEntry, AccountMapEntryInner, AccountMapEntryMeta, IndexValue,
5            PreAllocatedAccountMapEntry, RefCount, SlotList, SlotSlice, UpsertReclaim, ZeroLamport,
6        },
7        bucket_map_holder::{Age, BucketMapHolder},
8        bucket_map_holder_stats::BucketMapHolderStats,
9        waitable_condvar::WaitableCondvar,
10    },
11    rand::{thread_rng, Rng},
12    solana_bucket_map::bucket_api::BucketApi,
13    solana_measure::measure::Measure,
14    solana_sdk::{clock::Slot, pubkey::Pubkey},
15    std::{
16        collections::{hash_map::Entry, HashMap},
17        fmt::Debug,
18        ops::{Bound, RangeBounds, RangeInclusive},
19        sync::{
20            atomic::{AtomicBool, AtomicU64, AtomicU8, Ordering},
21            Arc, Mutex, RwLock, RwLockWriteGuard,
22        },
23    },
24};
25type K = Pubkey;
26type CacheRangesHeld = RwLock<Vec<RangeInclusive<Pubkey>>>;
27
28type InMemMap<T> = HashMap<Pubkey, AccountMapEntry<T>>;
29
30#[derive(Debug)]
31pub struct PossibleEvictions<T: IndexValue> {
32    /// vec per age in the future, up to size 'ages_to_stay_in_cache'
33    possible_evictions: Vec<FlushScanResult<T>>,
34    /// next index to use into 'possible_evictions'
35    /// if 'index' >= 'possible_evictions.len()', then there are no available entries
36    index: usize,
37}
38
39impl<T: IndexValue> PossibleEvictions<T> {
40    fn new(max_ages: Age) -> Self {
41        Self {
42            possible_evictions: (0..max_ages).map(|_| FlushScanResult::default()).collect(),
43            index: max_ages as usize, // initially no data
44        }
45    }
46
47    /// remove the possible evictions. This is required because we need ownership of the Arc strong counts to transfer to caller so entries can be removed from the accounts index
48    fn get_possible_evictions(&mut self) -> Option<FlushScanResult<T>> {
49        self.possible_evictions.get_mut(self.index).map(|result| {
50            self.index += 1;
51            // remove the list from 'possible_evictions'
52            std::mem::take(result)
53        })
54    }
55
56    /// clear existing data and prepare to add 'entries' more ages of data
57    fn reset(&mut self, entries: Age) {
58        self.possible_evictions.iter_mut().for_each(|entry| {
59            entry.evictions_random.clear();
60            entry.evictions_age_possible.clear();
61        });
62        let entries = entries as usize;
63        assert!(
64            entries <= self.possible_evictions.len(),
65            "entries: {}, len: {}",
66            entries,
67            self.possible_evictions.len()
68        );
69        self.index = self.possible_evictions.len() - entries;
70    }
71
72    /// insert 'entry' at 'relative_age' in the future into 'possible_evictions'
73    fn insert(&mut self, relative_age: Age, key: Pubkey, entry: AccountMapEntry<T>, random: bool) {
74        let index = self.index + (relative_age as usize);
75        let list = &mut self.possible_evictions[index];
76        if random {
77            &mut list.evictions_random
78        } else {
79            &mut list.evictions_age_possible
80        }
81        .push((key, entry));
82    }
83}
84
85// one instance of this represents one bin of the accounts index.
86pub struct InMemAccountsIndex<T: IndexValue> {
87    last_age_flushed: AtomicU8,
88
89    // backing store
90    map_internal: RwLock<InMemMap<T>>,
91    storage: Arc<BucketMapHolder<T>>,
92    bin: usize,
93
94    bucket: Option<Arc<BucketApi<(Slot, T)>>>,
95
96    // pubkey ranges that this bin must hold in the cache while the range is present in this vec
97    pub(crate) cache_ranges_held: CacheRangesHeld,
98    // incremented each time stop_evictions is changed
99    stop_evictions_changes: AtomicU64,
100    // true while ranges are being manipulated. Used to keep an async flush from removing things while a range is being held.
101    stop_evictions: AtomicU64,
102    // set to true while this bin is being actively flushed
103    flushing_active: AtomicBool,
104
105    /// info to streamline initial index generation
106    startup_info: Mutex<StartupInfo<T>>,
107
108    /// possible evictions for next few slots coming up
109    possible_evictions: RwLock<PossibleEvictions<T>>,
110    /// when age % ages_to_stay_in_cache == 'age_to_flush_bin_offset', then calculate the next 'ages_to_stay_in_cache' 'possible_evictions'
111    /// this causes us to scan the entire in-mem hash map every 1/'ages_to_stay_in_cache' instead of each age
112    age_to_flush_bin_mod: Age,
113}
114
115impl<T: IndexValue> Debug for InMemAccountsIndex<T> {
116    fn fmt(&self, _f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
117        Ok(())
118    }
119}
120
121pub enum InsertNewEntryResults {
122    DidNotExist,
123    ExistedNewEntryZeroLamports,
124    ExistedNewEntryNonZeroLamports,
125}
126
127#[derive(Default, Debug)]
128struct StartupInfo<T: IndexValue> {
129    /// entries to add next time we are flushing to disk
130    insert: Vec<(Slot, Pubkey, T)>,
131    /// pubkeys that were found to have duplicate index entries
132    duplicates: Vec<(Slot, Pubkey)>,
133}
134
135#[derive(Default, Debug)]
136/// result from scanning in-mem index during flush
137struct FlushScanResult<T> {
138    /// pubkeys whose age indicates they may be evicted now, pending further checks.
139    evictions_age_possible: Vec<(Pubkey, AccountMapEntry<T>)>,
140    /// pubkeys chosen to evict based on random eviction
141    evictions_random: Vec<(Pubkey, AccountMapEntry<T>)>,
142}
143
144impl<T: IndexValue> InMemAccountsIndex<T> {
145    pub fn new(storage: &Arc<BucketMapHolder<T>>, bin: usize) -> Self {
146        let ages_to_stay_in_cache = storage.ages_to_stay_in_cache;
147        Self {
148            map_internal: RwLock::default(),
149            storage: Arc::clone(storage),
150            bin,
151            bucket: storage
152                .disk
153                .as_ref()
154                .map(|disk| disk.get_bucket_from_index(bin))
155                .map(Arc::clone),
156            cache_ranges_held: CacheRangesHeld::default(),
157            stop_evictions_changes: AtomicU64::default(),
158            stop_evictions: AtomicU64::default(),
159            flushing_active: AtomicBool::default(),
160            // initialize this to max, to make it clear we have not flushed at age 0, the starting age
161            last_age_flushed: AtomicU8::new(Age::MAX),
162            startup_info: Mutex::default(),
163            possible_evictions: RwLock::new(PossibleEvictions::new(ages_to_stay_in_cache)),
164            // Spread out the scanning across all ages within the window.
165            // This causes us to scan 1/N of the bins each 'Age'
166            age_to_flush_bin_mod: thread_rng().gen_range(0, ages_to_stay_in_cache),
167        }
168    }
169
170    /// # ages to scan ahead
171    fn ages_to_scan_ahead(&self, current_age: Age) -> Age {
172        let ages_to_stay_in_cache = self.storage.ages_to_stay_in_cache;
173        if (self.age_to_flush_bin_mod == current_age % ages_to_stay_in_cache)
174            && !self.storage.get_startup()
175        {
176            // scan ahead multiple ages
177            ages_to_stay_in_cache
178        } else {
179            1 // just current age
180        }
181    }
182
183    /// true if this bucket needs to call flush for the current age
184    /// we need to scan each bucket once per value of age
185    fn get_should_age(&self, age: Age) -> bool {
186        let last_age_flushed = self.last_age_flushed();
187        last_age_flushed != age
188    }
189
190    /// called after flush scans this bucket at the current age
191    fn set_has_aged(&self, age: Age, can_advance_age: bool) {
192        self.last_age_flushed.store(age, Ordering::Release);
193        self.storage.bucket_flushed_at_current_age(can_advance_age);
194    }
195
196    fn last_age_flushed(&self) -> Age {
197        self.last_age_flushed.load(Ordering::Acquire)
198    }
199
200    /// Release entire in-mem hashmap to free all memory associated with it.
201    /// Idea is that during startup we needed a larger map than we need during runtime.
202    /// When using disk-buckets, in-mem index grows over time with dynamic use and then shrinks, in theory back to 0.
203    pub fn shrink_to_fit(&self) {
204        // shrink_to_fit could be quite expensive on large map sizes, which 'no disk buckets' could produce, so avoid shrinking in case we end up here
205        if self.storage.is_disk_index_enabled() {
206            self.map_internal.write().unwrap().shrink_to_fit();
207        }
208    }
209
210    pub fn items<R>(&self, range: &R) -> Vec<(K, AccountMapEntry<T>)>
211    where
212        R: RangeBounds<Pubkey> + std::fmt::Debug,
213    {
214        let m = Measure::start("items");
215        self.hold_range_in_memory(range, true);
216        let map = self.map_internal.read().unwrap();
217        let mut result = Vec::with_capacity(map.len());
218        map.iter().for_each(|(k, v)| {
219            if range.contains(k) {
220                result.push((*k, Arc::clone(v)));
221            }
222        });
223        drop(map);
224        self.hold_range_in_memory(range, false);
225        Self::update_stat(&self.stats().items, 1);
226        Self::update_time_stat(&self.stats().items_us, m);
227        result
228    }
229
230    // only called in debug code paths
231    pub fn keys(&self) -> Vec<Pubkey> {
232        Self::update_stat(&self.stats().keys, 1);
233        // easiest implementation is to load evrything from disk into cache and return the keys
234        let evictions_guard = EvictionsGuard::lock(self);
235        self.put_range_in_cache(&None::<&RangeInclusive<Pubkey>>, &evictions_guard);
236        let keys = self.map_internal.read().unwrap().keys().cloned().collect();
237        keys
238    }
239
240    fn load_from_disk(&self, pubkey: &Pubkey) -> Option<(SlotList<T>, RefCount)> {
241        self.bucket.as_ref().and_then(|disk| {
242            let m = Measure::start("load_disk_found_count");
243            let entry_disk = disk.read_value(pubkey);
244            match &entry_disk {
245                Some(_) => {
246                    Self::update_time_stat(&self.stats().load_disk_found_us, m);
247                    Self::update_stat(&self.stats().load_disk_found_count, 1);
248                }
249                None => {
250                    Self::update_time_stat(&self.stats().load_disk_missing_us, m);
251                    Self::update_stat(&self.stats().load_disk_missing_count, 1);
252                }
253            }
254            entry_disk
255        })
256    }
257
258    fn load_account_entry_from_disk(&self, pubkey: &Pubkey) -> Option<AccountMapEntry<T>> {
259        let entry_disk = self.load_from_disk(pubkey)?; // returns None if not on disk
260
261        Some(self.disk_to_cache_entry(entry_disk.0, entry_disk.1))
262    }
263
264    /// lookup 'pubkey' by only looking in memory. Does not look on disk.
265    /// callback is called whether pubkey is found or not
266    fn get_only_in_mem<RT>(
267        &self,
268        pubkey: &K,
269        update_age: bool,
270        callback: impl for<'a> FnOnce(Option<&'a AccountMapEntry<T>>) -> RT,
271    ) -> RT {
272        let mut found = true;
273        let mut m = Measure::start("get");
274        let result = {
275            let map = self.map_internal.read().unwrap();
276            let result = map.get(pubkey);
277            m.stop();
278
279            callback(if let Some(entry) = result {
280                if update_age {
281                    self.set_age_to_future(entry, false);
282                }
283                Some(entry)
284            } else {
285                drop(map);
286                found = false;
287                None
288            })
289        };
290
291        let stats = self.stats();
292        let (count, time) = if found {
293            (&stats.gets_from_mem, &stats.get_mem_us)
294        } else {
295            (&stats.gets_missing, &stats.get_missing_us)
296        };
297        Self::update_stat(time, m.as_us());
298        Self::update_stat(count, 1);
299
300        result
301    }
302
303    /// lookup 'pubkey' in index (in mem or on disk)
304    pub fn get(&self, pubkey: &K) -> Option<AccountMapEntry<T>> {
305        self.get_internal(pubkey, |entry| (true, entry.map(Arc::clone)))
306    }
307
308    /// set age of 'entry' to the future
309    /// if 'is_cached', age will be set farther
310    fn set_age_to_future(&self, entry: &AccountMapEntry<T>, is_cached: bool) {
311        entry.set_age(self.storage.future_age_to_flush(is_cached));
312    }
313
314    /// lookup 'pubkey' in index (in_mem or disk).
315    /// call 'callback' whether found or not
316    pub(crate) fn get_internal<RT>(
317        &self,
318        pubkey: &K,
319        // return true if item should be added to in_mem cache
320        callback: impl for<'a> FnOnce(Option<&AccountMapEntry<T>>) -> (bool, RT),
321    ) -> RT {
322        self.get_only_in_mem(pubkey, true, |entry| {
323            if let Some(entry) = entry {
324                callback(Some(entry)).1
325            } else {
326                // not in cache, look on disk
327                let stats = self.stats();
328                let disk_entry = self.load_account_entry_from_disk(pubkey);
329                if disk_entry.is_none() {
330                    return callback(None).1;
331                }
332                let disk_entry = disk_entry.unwrap();
333                let mut map = self.map_internal.write().unwrap();
334                let entry = map.entry(*pubkey);
335                match entry {
336                    Entry::Occupied(occupied) => callback(Some(occupied.get())).1,
337                    Entry::Vacant(vacant) => {
338                        let (add_to_cache, rt) = callback(Some(&disk_entry));
339
340                        if add_to_cache {
341                            stats.inc_mem_count(self.bin);
342                            vacant.insert(disk_entry);
343                        }
344                        rt
345                    }
346                }
347            }
348        })
349    }
350
351    fn remove_if_slot_list_empty_value(&self, slot_list: SlotSlice<T>) -> bool {
352        if slot_list.is_empty() {
353            self.stats().inc_delete();
354            true
355        } else {
356            false
357        }
358    }
359
360    fn delete_disk_key(&self, pubkey: &Pubkey) {
361        if let Some(disk) = self.bucket.as_ref() {
362            disk.delete_key(pubkey)
363        }
364    }
365
366    /// return false if the entry is in the index (disk or memory) and has a slot list len > 0
367    /// return true in all other cases, including if the entry is NOT in the index at all
368    fn remove_if_slot_list_empty_entry(&self, entry: Entry<K, AccountMapEntry<T>>) -> bool {
369        match entry {
370            Entry::Occupied(occupied) => {
371                let result =
372                    self.remove_if_slot_list_empty_value(&occupied.get().slot_list.read().unwrap());
373                if result {
374                    // note there is a potential race here that has existed.
375                    // if someone else holds the arc,
376                    //  then they think the item is still in the index and can make modifications.
377                    // We have to have a write lock to the map here, which means nobody else can get
378                    //  the arc, but someone may already have retrieved a clone of it.
379                    // account index in_mem flushing is one such possibility
380                    self.delete_disk_key(occupied.key());
381                    self.stats().dec_mem_count(self.bin);
382                    occupied.remove();
383                }
384                result
385            }
386            Entry::Vacant(vacant) => {
387                // not in cache, look on disk
388                let entry_disk = self.load_from_disk(vacant.key());
389                match entry_disk {
390                    Some(entry_disk) => {
391                        // on disk
392                        if self.remove_if_slot_list_empty_value(&entry_disk.0) {
393                            // not in cache, but on disk, so just delete from disk
394                            self.delete_disk_key(vacant.key());
395                            true
396                        } else {
397                            // could insert into cache here, but not required for correctness and value is unclear
398                            false
399                        }
400                    }
401                    None => true, // not in cache or on disk, but slot list is 'empty' and entry is not in index, so return true
402                }
403            }
404        }
405    }
406
407    // If the slot list for pubkey exists in the index and is empty, remove the index entry for pubkey and return true.
408    // Return false otherwise.
409    pub fn remove_if_slot_list_empty(&self, pubkey: Pubkey) -> bool {
410        let mut m = Measure::start("entry");
411        let mut map = self.map_internal.write().unwrap();
412        let entry = map.entry(pubkey);
413        m.stop();
414        let found = matches!(entry, Entry::Occupied(_));
415        let result = self.remove_if_slot_list_empty_entry(entry);
416        drop(map);
417
418        self.update_entry_stats(m, found);
419        result
420    }
421
422    pub fn slot_list_mut<RT>(
423        &self,
424        pubkey: &Pubkey,
425        user: impl for<'a> FnOnce(&mut RwLockWriteGuard<'a, SlotList<T>>) -> RT,
426    ) -> Option<RT> {
427        self.get_internal(pubkey, |entry| {
428            (
429                true,
430                entry.map(|entry| {
431                    let result = user(&mut entry.slot_list.write().unwrap());
432                    entry.set_dirty(true);
433                    result
434                }),
435            )
436        })
437    }
438
439    /// update 'entry' with 'new_value'
440    fn update_slot_list_entry(
441        &self,
442        entry: &AccountMapEntry<T>,
443        new_value: PreAllocatedAccountMapEntry<T>,
444        other_slot: Option<Slot>,
445        reclaims: &mut SlotList<T>,
446        reclaim: UpsertReclaim,
447    ) {
448        let new_value: (Slot, T) = new_value.into();
449        let mut upsert_cached = new_value.1.is_cached();
450        if Self::lock_and_update_slot_list(entry, new_value, other_slot, reclaims, reclaim) > 1 {
451            // if slot list > 1, then we are going to hold this entry in memory until it gets set back to 1
452            upsert_cached = true;
453        }
454        self.set_age_to_future(entry, upsert_cached);
455    }
456
457    pub fn upsert(
458        &self,
459        pubkey: &Pubkey,
460        new_value: PreAllocatedAccountMapEntry<T>,
461        other_slot: Option<Slot>,
462        reclaims: &mut SlotList<T>,
463        reclaim: UpsertReclaim,
464    ) {
465        let mut updated_in_mem = true;
466        // try to get it just from memory first using only a read lock
467        self.get_only_in_mem(pubkey, false, |entry| {
468            if let Some(entry) = entry {
469                self.update_slot_list_entry(entry, new_value, other_slot, reclaims, reclaim);
470            } else {
471                let mut m = Measure::start("entry");
472                let mut map = self.map_internal.write().unwrap();
473                let entry = map.entry(*pubkey);
474                m.stop();
475                let found = matches!(entry, Entry::Occupied(_));
476                match entry {
477                    Entry::Occupied(mut occupied) => {
478                        let current = occupied.get_mut();
479                        self.update_slot_list_entry(
480                            current, new_value, other_slot, reclaims, reclaim,
481                        );
482                    }
483                    Entry::Vacant(vacant) => {
484                        // not in cache, look on disk
485                        updated_in_mem = false;
486
487                        // go to in-mem cache first
488                        let disk_entry = self.load_account_entry_from_disk(vacant.key());
489                        let new_value = if let Some(disk_entry) = disk_entry {
490                            // on disk, so merge new_value with what was on disk
491                            self.update_slot_list_entry(
492                                &disk_entry,
493                                new_value,
494                                other_slot,
495                                reclaims,
496                                reclaim,
497                            );
498                            disk_entry
499                        } else {
500                            // not on disk, so insert new thing
501                            self.stats().inc_insert();
502                            new_value.into_account_map_entry(&self.storage)
503                        };
504                        assert!(new_value.dirty());
505                        vacant.insert(new_value);
506                        self.stats().inc_mem_count(self.bin);
507                    }
508                };
509
510                drop(map);
511                self.update_entry_stats(m, found);
512            };
513        });
514        if updated_in_mem {
515            Self::update_stat(&self.stats().updates_in_mem, 1);
516        }
517    }
518
519    fn update_entry_stats(&self, stopped_measure: Measure, found: bool) {
520        let stats = self.stats();
521        let (count, time) = if found {
522            (&stats.entries_from_mem, &stats.entry_mem_us)
523        } else {
524            (&stats.entries_missing, &stats.entry_missing_us)
525        };
526        Self::update_stat(time, stopped_measure.as_us());
527        Self::update_stat(count, 1);
528    }
529
530    /// Try to update an item in the slot list the given `slot` If an item for the slot
531    /// already exists in the list, remove the older item, add it to `reclaims`, and insert
532    /// the new item.
533    /// if 'other_slot' is some, then also remove any entries in the slot list that are at 'other_slot'
534    /// return resulting len of slot list
535    pub(crate) fn lock_and_update_slot_list(
536        current: &AccountMapEntryInner<T>,
537        new_value: (Slot, T),
538        other_slot: Option<Slot>,
539        reclaims: &mut SlotList<T>,
540        reclaim: UpsertReclaim,
541    ) -> usize {
542        let mut slot_list = current.slot_list.write().unwrap();
543        let (slot, new_entry) = new_value;
544        let addref = Self::update_slot_list(
545            &mut slot_list,
546            slot,
547            new_entry,
548            other_slot,
549            reclaims,
550            reclaim,
551        );
552        if addref {
553            current.addref();
554        }
555        current.set_dirty(true);
556        slot_list.len()
557    }
558
559    /// modifies slot_list
560    /// any entry at 'slot' or slot 'other_slot' is replaced with 'account_info'.
561    /// or, 'account_info' is appended to the slot list if the slot did not exist previously.
562    /// returns true if caller should addref
563    /// conditions when caller should addref:
564    ///   'account_info' does NOT represent a cached storage (the slot is being flushed from the cache)
565    /// AND
566    ///   previous slot_list entry AT 'slot' did not exist (this is the first time this account was modified in this "slot"), or was previously cached (the storage is now being flushed from the cache)
567    /// Note that even if entry DID exist at 'other_slot', the above conditions apply.
568    fn update_slot_list(
569        slot_list: &mut SlotList<T>,
570        slot: Slot,
571        account_info: T,
572        mut other_slot: Option<Slot>,
573        reclaims: &mut SlotList<T>,
574        reclaim: UpsertReclaim,
575    ) -> bool {
576        let mut addref = !account_info.is_cached();
577
578        if other_slot == Some(slot) {
579            other_slot = None; // redundant info, so ignore
580        }
581
582        // There may be 0..=2 dirty accounts found (one at 'slot' and one at 'other_slot')
583        // that are already in the slot list.  Since the first one found will be swapped with the
584        // new account, if a second one is found, we cannot swap again. Instead, just remove it.
585        let mut found_slot = false;
586        let mut found_other_slot = false;
587        (0..slot_list.len())
588            .rev() // rev since we delete from the list in some cases
589            .for_each(|slot_list_index| {
590                let (cur_slot, cur_account_info) = &slot_list[slot_list_index];
591                let matched_slot = *cur_slot == slot;
592                if matched_slot || Some(*cur_slot) == other_slot {
593                    // make sure neither 'slot' nor 'other_slot' are in the slot list more than once
594                    let matched_other_slot = !matched_slot;
595                    assert!(
596                        !(found_slot && matched_slot || matched_other_slot && found_other_slot),
597                        "{slot_list:?}, slot: {slot}, other_slot: {other_slot:?}"
598                    );
599
600                    let is_cur_account_cached = cur_account_info.is_cached();
601
602                    let reclaim_item = if !(found_slot || found_other_slot) {
603                        // first time we found an entry in 'slot' or 'other_slot', so replace it in-place.
604                        // this may be the only instance we find
605                        std::mem::replace(&mut slot_list[slot_list_index], (slot, account_info))
606                    } else {
607                        // already replaced one entry, so this one has to be removed
608                        slot_list.remove(slot_list_index)
609                    };
610                    match reclaim {
611                        UpsertReclaim::PopulateReclaims => {
612                            reclaims.push(reclaim_item);
613                        }
614                        UpsertReclaim::PreviousSlotEntryWasCached => {
615                            assert!(is_cur_account_cached);
616                        }
617                        UpsertReclaim::IgnoreReclaims => {
618                            // do nothing. nothing to assert. nothing to return in reclaims
619                        }
620                    }
621
622                    if matched_slot {
623                        found_slot = true;
624                    } else {
625                        found_other_slot = true;
626                    }
627                    if !is_cur_account_cached {
628                        // current info at 'slot' is NOT cached, so we should NOT addref. This slot already has a ref count for this pubkey.
629                        addref = false;
630                    }
631                }
632            });
633        if !found_slot && !found_other_slot {
634            // if we make it here, we did not find the slot in the list
635            slot_list.push((slot, account_info));
636        }
637        addref
638    }
639
640    // convert from raw data on disk to AccountMapEntry, set to age in future
641    fn disk_to_cache_entry(
642        &self,
643        slot_list: SlotList<T>,
644        ref_count: RefCount,
645    ) -> AccountMapEntry<T> {
646        Arc::new(AccountMapEntryInner::new(
647            slot_list,
648            ref_count,
649            AccountMapEntryMeta::new_clean(&self.storage),
650        ))
651    }
652
653    pub fn len_for_stats(&self) -> usize {
654        self.stats().count_in_bucket(self.bin)
655    }
656
657    /// Queue up these insertions for when the flush thread is dealing with this bin.
658    /// This is very fast and requires no lookups or disk access.
659    pub fn startup_insert_only(&self, slot: Slot, items: impl Iterator<Item = (Pubkey, T)>) {
660        assert!(self.storage.get_startup());
661        assert!(self.bucket.is_some());
662
663        let insert = &mut self.startup_info.lock().unwrap().insert;
664        items
665            .into_iter()
666            .for_each(|(k, v)| insert.push((slot, k, v)));
667    }
668
669    pub fn insert_new_entry_if_missing_with_lock(
670        &self,
671        pubkey: Pubkey,
672        new_entry: PreAllocatedAccountMapEntry<T>,
673    ) -> InsertNewEntryResults {
674        let mut m = Measure::start("entry");
675        let mut map = self.map_internal.write().unwrap();
676        let entry = map.entry(pubkey);
677        m.stop();
678        let new_entry_zero_lamports = new_entry.is_zero_lamport();
679        let (found_in_mem, already_existed) = match entry {
680            Entry::Occupied(occupied) => {
681                // in cache, so merge into cache
682                let (slot, account_info) = new_entry.into();
683                InMemAccountsIndex::lock_and_update_slot_list(
684                    occupied.get(),
685                    (slot, account_info),
686                    None, // should be None because we don't expect a different slot # during index generation
687                    &mut Vec::default(),
688                    UpsertReclaim::PopulateReclaims, // this should be ignore?
689                );
690                (
691                    true, /* found in mem */
692                    true, /* already existed */
693                )
694            }
695            Entry::Vacant(vacant) => {
696                // not in cache, look on disk
697                let disk_entry = self.load_account_entry_from_disk(vacant.key());
698                self.stats().inc_mem_count(self.bin);
699                if let Some(disk_entry) = disk_entry {
700                    let (slot, account_info) = new_entry.into();
701                    InMemAccountsIndex::lock_and_update_slot_list(
702                        &disk_entry,
703                        (slot, account_info),
704                        // None because we are inserting the first element in the slot list for this pubkey.
705                        // There can be no 'other' slot in the list.
706                        None,
707                        &mut Vec::default(),
708                        UpsertReclaim::PopulateReclaims,
709                    );
710                    vacant.insert(disk_entry);
711                    (
712                        false, /* found in mem */
713                        true,  /* already existed */
714                    )
715                } else {
716                    // not on disk, so insert new thing and we're done
717                    let new_entry: AccountMapEntry<T> =
718                        new_entry.into_account_map_entry(&self.storage);
719                    assert!(new_entry.dirty());
720                    vacant.insert(new_entry);
721                    (false, false)
722                }
723            }
724        };
725        drop(map);
726        self.update_entry_stats(m, found_in_mem);
727        let stats = self.stats();
728        if !already_existed {
729            stats.inc_insert();
730        } else {
731            Self::update_stat(&stats.updates_in_mem, 1);
732        }
733        if !already_existed {
734            InsertNewEntryResults::DidNotExist
735        } else if new_entry_zero_lamports {
736            InsertNewEntryResults::ExistedNewEntryZeroLamports
737        } else {
738            InsertNewEntryResults::ExistedNewEntryNonZeroLamports
739        }
740    }
741
742    /// Look at the currently held ranges. If 'range' is already included in what is
743    ///  being held, then add 'range' to the currently held list AND return true
744    /// If 'range' is NOT already included in what is being held, then return false
745    ///  withOUT adding 'range' to the list of what is currently held
746    fn add_hold_range_in_memory_if_already_held<R>(
747        &self,
748        range: &R,
749        evictions_guard: &EvictionsGuard,
750    ) -> bool
751    where
752        R: RangeBounds<Pubkey>,
753    {
754        let start_holding = true;
755        let only_add_if_already_held = true;
756        self.just_set_hold_range_in_memory_internal(
757            range,
758            start_holding,
759            only_add_if_already_held,
760            evictions_guard,
761        )
762    }
763
764    fn just_set_hold_range_in_memory<R>(
765        &self,
766        range: &R,
767        start_holding: bool,
768        evictions_guard: &EvictionsGuard,
769    ) where
770        R: RangeBounds<Pubkey>,
771    {
772        let only_add_if_already_held = false;
773        let _ = self.just_set_hold_range_in_memory_internal(
774            range,
775            start_holding,
776            only_add_if_already_held,
777            evictions_guard,
778        );
779    }
780
781    /// if 'start_holding', then caller wants to add 'range' to the list of ranges being held
782    /// if !'start_holding', then caller wants to remove 'range' to the list
783    /// if 'only_add_if_already_held', caller intends to only add 'range' to the list if the range is already held
784    /// returns true iff start_holding=true and the range we're asked to hold was already being held
785    fn just_set_hold_range_in_memory_internal<R>(
786        &self,
787        range: &R,
788        start_holding: bool,
789        only_add_if_already_held: bool,
790        _evictions_guard: &EvictionsGuard,
791    ) -> bool
792    where
793        R: RangeBounds<Pubkey>,
794    {
795        assert!(!only_add_if_already_held || start_holding);
796        let start = match range.start_bound() {
797            Bound::Included(bound) | Bound::Excluded(bound) => *bound,
798            Bound::Unbounded => Pubkey::from([0; 32]),
799        };
800
801        let end = match range.end_bound() {
802            Bound::Included(bound) | Bound::Excluded(bound) => *bound,
803            Bound::Unbounded => Pubkey::from([0xff; 32]),
804        };
805
806        // this becomes inclusive - that is ok - we are just roughly holding a range of items.
807        // inclusive is bigger than exclusive so we may hold 1 extra item worst case
808        let inclusive_range = start..=end;
809        let mut ranges = self.cache_ranges_held.write().unwrap();
810        let mut already_held = false;
811        if start_holding {
812            if only_add_if_already_held {
813                for r in ranges.iter() {
814                    if r.contains(&start) && r.contains(&end) {
815                        already_held = true;
816                        break;
817                    }
818                }
819            }
820            if already_held || !only_add_if_already_held {
821                ranges.push(inclusive_range);
822            }
823        } else {
824            // find the matching range and delete it since we don't want to hold it anymore
825            // search backwards, assuming LIFO ordering
826            for (i, r) in ranges.iter().enumerate().rev() {
827                if let (Bound::Included(start_found), Bound::Included(end_found)) =
828                    (r.start_bound(), r.end_bound())
829                {
830                    if start_found == &start && end_found == &end {
831                        // found a match. There may be dups, that's ok, we expect another call to remove the dup.
832                        ranges.remove(i);
833                        break;
834                    }
835                }
836            }
837        }
838        already_held
839    }
840
841    /// if 'start_holding'=true, then:
842    ///  at the end of this function, cache_ranges_held will be updated to contain 'range'
843    ///  and all pubkeys in that range will be in the in-mem cache
844    /// if 'start_holding'=false, then:
845    ///  'range' will be removed from cache_ranges_held
846    ///  and all pubkeys will be eligible for being removed from in-mem cache in the bg if no other range is holding them
847    /// Any in-process flush will be aborted when it gets to evicting items from in-mem.
848    pub fn hold_range_in_memory<R>(&self, range: &R, start_holding: bool)
849    where
850        R: RangeBounds<Pubkey> + Debug,
851    {
852        let evictions_guard = EvictionsGuard::lock(self);
853
854        if !start_holding || !self.add_hold_range_in_memory_if_already_held(range, &evictions_guard)
855        {
856            if start_holding {
857                // put everything in the cache and it will be held there
858                self.put_range_in_cache(&Some(range), &evictions_guard);
859            }
860            // do this AFTER items have been put in cache - that way anyone who finds this range can know that the items are already in the cache
861            self.just_set_hold_range_in_memory(range, start_holding, &evictions_guard);
862        }
863    }
864
865    fn put_range_in_cache<R>(&self, range: &Option<&R>, _evictions_guard: &EvictionsGuard)
866    where
867        R: RangeBounds<Pubkey>,
868    {
869        assert!(self.get_stop_evictions()); // caller should be controlling the lifetime of how long this needs to be present
870        let m = Measure::start("range");
871
872        let mut added_to_mem = 0;
873        // load from disk
874        if let Some(disk) = self.bucket.as_ref() {
875            let mut map = self.map_internal.write().unwrap();
876            let items = disk.items_in_range(range); // map's lock has to be held while we are getting items from disk
877            let future_age = self.storage.future_age_to_flush(false);
878            for item in items {
879                let entry = map.entry(item.pubkey);
880                match entry {
881                    Entry::Occupied(occupied) => {
882                        // item already in cache, bump age to future. This helps the current age flush to succeed.
883                        occupied.get().set_age(future_age);
884                    }
885                    Entry::Vacant(vacant) => {
886                        vacant.insert(self.disk_to_cache_entry(item.slot_list, item.ref_count));
887                        added_to_mem += 1;
888                    }
889                }
890            }
891        }
892        self.stats().add_mem_count(self.bin, added_to_mem);
893
894        Self::update_time_stat(&self.stats().get_range_us, m);
895    }
896
897    /// returns true if there are active requests to stop evictions
898    fn get_stop_evictions(&self) -> bool {
899        self.stop_evictions.load(Ordering::Acquire) > 0
900    }
901
902    /// return count of calls to 'start_stop_evictions', indicating changes could have been made to eviction strategy
903    fn get_stop_evictions_changes(&self) -> u64 {
904        self.stop_evictions_changes.load(Ordering::Acquire)
905    }
906
907    pub(crate) fn flush(&self, can_advance_age: bool) {
908        if let Some(flush_guard) = FlushGuard::lock(&self.flushing_active) {
909            self.flush_internal(&flush_guard, can_advance_age)
910        }
911    }
912
913    /// returns true if a dice roll indicates this call should result in a random eviction.
914    /// This causes non-determinism in cache contents per validator.
915    fn random_chance_of_eviction() -> bool {
916        // random eviction
917        const N: usize = 1000;
918        // 1/N chance of eviction
919        thread_rng().gen_range(0, N) == 0
920    }
921
922    /// assumes 1 entry in the slot list. Ignores overhead of the HashMap and such
923    fn approx_size_of_one_entry() -> usize {
924        std::mem::size_of::<T>()
925            + std::mem::size_of::<Pubkey>()
926            + std::mem::size_of::<AccountMapEntry<T>>()
927    }
928
929    fn should_evict_based_on_age(
930        current_age: Age,
931        entry: &AccountMapEntry<T>,
932        startup: bool,
933    ) -> bool {
934        startup || (current_age == entry.age())
935    }
936
937    /// return true if 'entry' should be evicted from the in-mem index
938    fn should_evict_from_mem<'a>(
939        &self,
940        current_age: Age,
941        entry: &'a AccountMapEntry<T>,
942        startup: bool,
943        update_stats: bool,
944        exceeds_budget: bool,
945    ) -> (bool, Option<std::sync::RwLockReadGuard<'a, SlotList<T>>>) {
946        // this could be tunable dynamically based on memory pressure
947        // we could look at more ages or we could throw out more items we are choosing to keep in the cache
948        if Self::should_evict_based_on_age(current_age, entry, startup) {
949            if exceeds_budget {
950                // if we are already holding too many items in-mem, then we need to be more aggressive at kicking things out
951                (true, None)
952            } else {
953                // only read the slot list if we are planning to throw the item out
954                let slot_list = entry.slot_list.read().unwrap();
955                if slot_list.len() != 1 {
956                    if update_stats {
957                        Self::update_stat(&self.stats().held_in_mem_slot_list_len, 1);
958                    }
959                    (false, None) // keep 0 and > 1 slot lists in mem. They will be cleaned or shrunk soon.
960                } else {
961                    // keep items with slot lists that contained cached items
962                    let evict = !slot_list.iter().any(|(_, info)| info.is_cached());
963                    if !evict && update_stats {
964                        Self::update_stat(&self.stats().held_in_mem_slot_list_cached, 1);
965                    }
966                    (evict, if evict { Some(slot_list) } else { None })
967                }
968            }
969        } else {
970            (false, None)
971        }
972    }
973
974    /// scan loop
975    /// holds read lock
976    /// identifies items which are dirty and items to evict
977    fn flush_scan(
978        &self,
979        current_age: Age,
980        startup: bool,
981        _flush_guard: &FlushGuard,
982    ) -> FlushScanResult<T> {
983        let mut possible_evictions = self.possible_evictions.write().unwrap();
984        if let Some(result) = possible_evictions.get_possible_evictions() {
985            // we have previously calculated the possible evictions for this age
986            return result;
987        }
988        // otherwise, we need to scan some number of ages into the future now
989        let ages_to_scan = self.ages_to_scan_ahead(current_age);
990        possible_evictions.reset(ages_to_scan);
991
992        let m;
993        {
994            let map = self.map_internal.read().unwrap();
995            m = Measure::start("flush_scan"); // we don't care about lock time in this metric - bg threads can wait
996            for (k, v) in map.iter() {
997                let random = Self::random_chance_of_eviction();
998                let age_offset = if random {
999                    thread_rng().gen_range(0, ages_to_scan)
1000                } else if startup {
1001                    0
1002                } else {
1003                    let ages_in_future = v.age().wrapping_sub(current_age);
1004                    if ages_in_future >= ages_to_scan {
1005                        // not planning to evict this item from memory within the next few ages
1006                        continue;
1007                    }
1008                    ages_in_future
1009                };
1010
1011                possible_evictions.insert(age_offset, *k, Arc::clone(v), random);
1012            }
1013        }
1014        Self::update_time_stat(&self.stats().flush_scan_us, m);
1015
1016        possible_evictions.get_possible_evictions().unwrap()
1017    }
1018
1019    fn write_startup_info_to_disk(&self) {
1020        let insert = std::mem::take(&mut self.startup_info.lock().unwrap().insert);
1021        if insert.is_empty() {
1022            // nothing to insert for this bin
1023            return;
1024        }
1025
1026        // during startup, nothing should be in the in-mem map
1027        let map_internal = self.map_internal.read().unwrap();
1028        assert!(
1029            map_internal.is_empty(),
1030            "len: {}, first: {:?}",
1031            map_internal.len(),
1032            map_internal.iter().take(1).collect::<Vec<_>>()
1033        );
1034        drop(map_internal);
1035
1036        let mut duplicates = vec![];
1037
1038        // merge all items into the disk index now
1039        let disk = self.bucket.as_ref().unwrap();
1040        let mut count = 0;
1041        insert.into_iter().for_each(|(slot, k, v)| {
1042            let entry = (slot, v);
1043            let new_ref_count = u64::from(!v.is_cached());
1044            disk.update(&k, |current| {
1045                match current {
1046                    Some((current_slot_list, mut ref_count)) => {
1047                        // merge this in, mark as conflict
1048                        let mut slot_list = Vec::with_capacity(current_slot_list.len() + 1);
1049                        slot_list.extend_from_slice(current_slot_list);
1050                        slot_list.push(entry); // will never be from the same slot that already exists in the list
1051                        ref_count += new_ref_count;
1052                        duplicates.push((slot, k));
1053                        Some((slot_list, ref_count))
1054                    }
1055                    None => {
1056                        count += 1;
1057                        // not on disk, insert it
1058                        Some((vec![entry], new_ref_count))
1059                    }
1060                }
1061            });
1062        });
1063        self.stats().inc_insert_count(count);
1064        self.startup_info
1065            .lock()
1066            .unwrap()
1067            .duplicates
1068            .append(&mut duplicates);
1069    }
1070
1071    /// pull out all duplicate pubkeys from 'startup_info'
1072    /// duplicate pubkeys have a slot list with len > 1
1073    /// These were collected for this bin when we did batch inserts in the bg flush threads.
1074    pub fn retrieve_duplicate_keys_from_startup(&self) -> Vec<(Slot, Pubkey)> {
1075        let mut write = self.startup_info.lock().unwrap();
1076        // in order to return accurate and complete duplicates, we must have nothing left remaining to insert
1077        assert!(write.insert.is_empty());
1078
1079        std::mem::take(&mut write.duplicates)
1080    }
1081
1082    /// synchronize the in-mem index with the disk index
1083    fn flush_internal(&self, flush_guard: &FlushGuard, can_advance_age: bool) {
1084        let current_age = self.storage.current_age();
1085        let iterate_for_age = self.get_should_age(current_age);
1086        let startup = self.storage.get_startup();
1087        if !iterate_for_age && !startup {
1088            // no need to age, so no need to flush this bucket
1089            // but, at startup we want to evict from buckets as fast as possible if any items exist
1090            return;
1091        }
1092
1093        // scan in-mem map for items that we may evict
1094        let FlushScanResult {
1095            mut evictions_age_possible,
1096            mut evictions_random,
1097        } = self.flush_scan(current_age, startup, flush_guard);
1098
1099        if startup {
1100            self.write_startup_info_to_disk();
1101        }
1102
1103        // write to disk outside in-mem map read lock
1104        {
1105            let mut evictions_age = Vec::with_capacity(evictions_age_possible.len());
1106            if !evictions_age_possible.is_empty() || !evictions_random.is_empty() {
1107                let disk = self.bucket.as_ref().unwrap();
1108                let mut flush_entries_updated_on_disk = 0;
1109                let exceeds_budget = self.get_exceeds_budget();
1110                let mut flush_should_evict_us = 0;
1111                // we don't care about lock time in this metric - bg threads can wait
1112                let m = Measure::start("flush_update");
1113
1114                // consider whether to write to disk for all the items we may evict, whether evicting due to age or random
1115                for (is_random, check_for_eviction_and_dirty) in [
1116                    (false, &mut evictions_age_possible),
1117                    (true, &mut evictions_random),
1118                ] {
1119                    for (k, v) in check_for_eviction_and_dirty.drain(..) {
1120                        let mut slot_list = None;
1121                        if !is_random {
1122                            let mut mse = Measure::start("flush_should_evict");
1123                            let (evict_for_age, slot_list_temp) = self.should_evict_from_mem(
1124                                current_age,
1125                                &v,
1126                                startup,
1127                                true,
1128                                exceeds_budget,
1129                            );
1130                            slot_list = slot_list_temp;
1131                            mse.stop();
1132                            flush_should_evict_us += mse.as_us();
1133                            if evict_for_age {
1134                                evictions_age.push(k);
1135                            } else {
1136                                // not evicting, so don't write, even if dirty
1137                                continue;
1138                            }
1139                        }
1140                        // if we are evicting it, then we need to update disk if we're dirty
1141                        if v.clear_dirty() {
1142                            // step 1: clear the dirty flag
1143                            // step 2: perform the update on disk based on the fields in the entry
1144                            // If a parallel operation dirties the item again - even while this flush is occurring,
1145                            //  the last thing the writer will do, after updating contents, is set_dirty(true)
1146                            //  That prevents dropping an item from cache before disk is updated to latest in mem.
1147                            // It is possible that the item in the cache is marked as dirty while these updates are happening. That is ok.
1148                            //  The dirty will be picked up and the item will be prevented from being evicted.
1149
1150                            // may have to loop if disk has to grow and we have to retry the write
1151                            loop {
1152                                let disk_resize = {
1153                                    let slot_list = slot_list
1154                                        .take()
1155                                        .unwrap_or_else(|| v.slot_list.read().unwrap());
1156                                    disk.try_write(&k, (&slot_list, v.ref_count()))
1157                                };
1158                                match disk_resize {
1159                                    Ok(_) => {
1160                                        // successfully written to disk
1161                                        flush_entries_updated_on_disk += 1;
1162                                        break;
1163                                    }
1164                                    Err(err) => {
1165                                        // disk needs to resize. This item did not get written. Resize and try again.
1166                                        let m = Measure::start("flush_grow");
1167                                        disk.grow(err);
1168                                        Self::update_time_stat(&self.stats().flush_grow_us, m);
1169                                    }
1170                                }
1171                            }
1172                        }
1173                    }
1174                }
1175                Self::update_time_stat(&self.stats().flush_update_us, m);
1176                Self::update_stat(&self.stats().flush_should_evict_us, flush_should_evict_us);
1177                Self::update_stat(
1178                    &self.stats().flush_entries_updated_on_disk,
1179                    flush_entries_updated_on_disk,
1180                );
1181                // remove the 'v'
1182                let evictions_random = evictions_random
1183                    .into_iter()
1184                    .map(|(k, _v)| k)
1185                    .collect::<Vec<_>>();
1186
1187                let m = Measure::start("flush_evict");
1188                self.evict_from_cache(evictions_age, current_age, startup, false);
1189                self.evict_from_cache(evictions_random, current_age, startup, true);
1190                Self::update_time_stat(&self.stats().flush_evict_us, m);
1191            }
1192
1193            if iterate_for_age {
1194                // completed iteration of the buckets at the current age
1195                assert_eq!(current_age, self.storage.current_age());
1196                self.set_has_aged(current_age, can_advance_age);
1197            }
1198        }
1199    }
1200
1201    /// calculate the estimated size of the in-mem index
1202    /// return whether the size exceeds the specified budget
1203    fn get_exceeds_budget(&self) -> bool {
1204        let in_mem_count = self.stats().count_in_mem.load(Ordering::Relaxed);
1205        let limit = self.storage.mem_budget_mb;
1206        let estimate_mem = in_mem_count * Self::approx_size_of_one_entry();
1207        let exceeds_budget = limit
1208            .map(|limit| estimate_mem >= limit * 1024 * 1024)
1209            .unwrap_or_default();
1210        self.stats()
1211            .estimate_mem
1212            .store(estimate_mem as u64, Ordering::Relaxed);
1213        exceeds_budget
1214    }
1215
1216    /// for each key in 'keys', look up in map, set age to the future
1217    fn move_ages_to_future(&self, next_age: Age, current_age: Age, keys: &[Pubkey]) {
1218        let map = self.map_internal.read().unwrap();
1219        keys.iter().for_each(|key| {
1220            if let Some(entry) = map.get(key) {
1221                entry.try_exchange_age(next_age, current_age);
1222            }
1223        });
1224    }
1225
1226    // evict keys in 'evictions' from in-mem cache, likely due to age
1227    fn evict_from_cache(
1228        &self,
1229        mut evictions: Vec<Pubkey>,
1230        current_age: Age,
1231        startup: bool,
1232        randomly_evicted: bool,
1233    ) {
1234        if evictions.is_empty() {
1235            return;
1236        }
1237
1238        let stop_evictions_changes_at_start = self.get_stop_evictions_changes();
1239        let next_age_on_failure = self.storage.future_age_to_flush(false);
1240        if self.get_stop_evictions() {
1241            // ranges were changed
1242            self.move_ages_to_future(next_age_on_failure, current_age, &evictions);
1243            return;
1244        }
1245
1246        let mut failed = 0;
1247
1248        // skip any keys that are held in memory because of ranges being held
1249        let ranges = self.cache_ranges_held.read().unwrap().clone();
1250        if !ranges.is_empty() {
1251            let mut move_age = Vec::default();
1252            evictions.retain(|k| {
1253                if ranges.iter().any(|range| range.contains(k)) {
1254                    // this item is held in mem by range, so don't evict
1255                    move_age.push(*k);
1256                    false
1257                } else {
1258                    true
1259                }
1260            });
1261            if !move_age.is_empty() {
1262                failed += move_age.len();
1263                self.move_ages_to_future(next_age_on_failure, current_age, &move_age);
1264            }
1265        }
1266
1267        let mut evicted = 0;
1268        // chunk these so we don't hold the write lock too long
1269        for evictions in evictions.chunks(50) {
1270            let mut map = self.map_internal.write().unwrap();
1271            for k in evictions {
1272                if let Entry::Occupied(occupied) = map.entry(*k) {
1273                    let v = occupied.get();
1274                    if Arc::strong_count(v) > 1 {
1275                        // someone is holding the value arc's ref count and could modify it, so do not evict
1276                        failed += 1;
1277                        v.try_exchange_age(next_age_on_failure, current_age);
1278                        continue;
1279                    }
1280
1281                    if v.dirty()
1282                        || (!randomly_evicted
1283                            && !Self::should_evict_based_on_age(current_age, v, startup))
1284                    {
1285                        // marked dirty or bumped in age after we looked above
1286                        // these evictions will be handled in later passes (at later ages)
1287                        // but, at startup, everything is ready to age out if it isn't dirty
1288                        failed += 1;
1289                        continue;
1290                    }
1291
1292                    if stop_evictions_changes_at_start != self.get_stop_evictions_changes() {
1293                        // ranges were changed
1294                        failed += 1;
1295                        v.try_exchange_age(next_age_on_failure, current_age);
1296                        continue;
1297                    }
1298
1299                    // all conditions for eviction succeeded, so really evict item from in-mem cache
1300                    evicted += 1;
1301                    occupied.remove();
1302                }
1303            }
1304            if map.is_empty() {
1305                map.shrink_to_fit();
1306            }
1307        }
1308        self.stats().sub_mem_count(self.bin, evicted);
1309        Self::update_stat(&self.stats().flush_entries_evicted_from_mem, evicted as u64);
1310        Self::update_stat(&self.stats().failed_to_evict, failed as u64);
1311    }
1312
1313    pub fn stats(&self) -> &BucketMapHolderStats {
1314        &self.storage.stats
1315    }
1316
1317    fn update_stat(stat: &AtomicU64, value: u64) {
1318        if value != 0 {
1319            stat.fetch_add(value, Ordering::Relaxed);
1320        }
1321    }
1322
1323    pub fn update_time_stat(stat: &AtomicU64, mut m: Measure) {
1324        m.stop();
1325        let value = m.as_us();
1326        Self::update_stat(stat, value);
1327    }
1328}
1329
1330/// An RAII implementation of a scoped lock for the `flushing_active` atomic flag in
1331/// `InMemAccountsIndex`.  When this structure is dropped (falls out of scope), the flag will be
1332/// cleared (set to false).
1333///
1334/// After successfully locking (calling `FlushGuard::lock()`), pass a reference to the `FlashGuard`
1335/// instance to any function/code that requires the `flushing_active` flag has been set (to true).
1336#[derive(Debug)]
1337struct FlushGuard<'a> {
1338    flushing: &'a AtomicBool,
1339}
1340
1341impl<'a> FlushGuard<'a> {
1342    /// Set the `flushing` atomic flag to true.  If the flag was already true, then return `None`
1343    /// (so as to not clear the flag erroneously).  Otherwise return `Some(FlushGuard)`.
1344    #[must_use = "if unused, the `flushing` flag will immediately clear"]
1345    fn lock(flushing: &'a AtomicBool) -> Option<Self> {
1346        let already_flushing = flushing.swap(true, Ordering::AcqRel);
1347        // Eager evaluation here would result in dropping Self and clearing flushing flag
1348        #[allow(clippy::unnecessary_lazy_evaluations)]
1349        (!already_flushing).then(|| Self { flushing })
1350    }
1351}
1352
1353impl Drop for FlushGuard<'_> {
1354    fn drop(&mut self) {
1355        self.flushing.store(false, Ordering::Release);
1356    }
1357}
1358
1359/// Disable (and safely enable) the background flusher from evicting entries from the in-mem
1360/// accounts index.  When disabled, no entries may be evicted.  When enabled, only eligible entries
1361/// may be evicted (i.e. those not in a held range).
1362///
1363/// An RAII implementation of a scoped lock for the `stop_evictions` atomic flag/counter in
1364/// `InMemAccountsIndex`.  When this structure is dropped (falls out of scope), the counter will
1365/// decrement and conditionally notify its storage.
1366///
1367/// After successfully locking (calling `EvictionsGuard::lock()`), pass a reference to the
1368/// `EvictionsGuard` instance to any function/code that requires `stop_evictions` to be
1369/// incremented/decremented correctly.
1370#[derive(Debug)]
1371struct EvictionsGuard<'a> {
1372    /// The number of active callers disabling evictions
1373    stop_evictions: &'a AtomicU64,
1374    /// The number of times that evictions have been disabled or enabled
1375    num_state_changes: &'a AtomicU64,
1376    /// Who will be notified after the evictions are re-enabled
1377    storage_notifier: &'a WaitableCondvar,
1378}
1379
1380impl<'a> EvictionsGuard<'a> {
1381    #[must_use = "if unused, this evictions lock will be immediately unlocked"]
1382    fn lock<T: IndexValue>(in_mem_accounts_index: &'a InMemAccountsIndex<T>) -> Self {
1383        Self::lock_with(
1384            &in_mem_accounts_index.stop_evictions,
1385            &in_mem_accounts_index.stop_evictions_changes,
1386            &in_mem_accounts_index.storage.wait_dirty_or_aged,
1387        )
1388    }
1389
1390    #[must_use = "if unused, this evictions lock will be immediately unlocked"]
1391    fn lock_with(
1392        stop_evictions: &'a AtomicU64,
1393        num_state_changes: &'a AtomicU64,
1394        storage_notifier: &'a WaitableCondvar,
1395    ) -> Self {
1396        num_state_changes.fetch_add(1, Ordering::Release);
1397        stop_evictions.fetch_add(1, Ordering::Release);
1398
1399        Self {
1400            stop_evictions,
1401            num_state_changes,
1402            storage_notifier,
1403        }
1404    }
1405}
1406
1407impl Drop for EvictionsGuard<'_> {
1408    fn drop(&mut self) {
1409        let previous_value = self.stop_evictions.fetch_sub(1, Ordering::AcqRel);
1410        debug_assert!(previous_value > 0);
1411
1412        let should_notify = previous_value == 1;
1413        if should_notify {
1414            // stop_evictions went to 0, so this bucket could now be ready to be aged
1415            self.storage_notifier.notify_one();
1416        }
1417
1418        self.num_state_changes.fetch_add(1, Ordering::Release);
1419    }
1420}
1421
1422#[cfg(test)]
1423mod tests {
1424    use {
1425        super::*,
1426        crate::accounts_index::{AccountsIndexConfig, IndexLimitMb, BINS_FOR_TESTING},
1427        itertools::Itertools,
1428    };
1429
1430    fn new_for_test<T: IndexValue>() -> InMemAccountsIndex<T> {
1431        let holder = Arc::new(BucketMapHolder::new(
1432            BINS_FOR_TESTING,
1433            &Some(AccountsIndexConfig::default()),
1434            1,
1435        ));
1436        let bin = 0;
1437        InMemAccountsIndex::new(&holder, bin)
1438    }
1439
1440    fn new_disk_buckets_for_test<T: IndexValue>() -> InMemAccountsIndex<T> {
1441        let holder = Arc::new(BucketMapHolder::new(
1442            BINS_FOR_TESTING,
1443            &Some(AccountsIndexConfig {
1444                index_limit_mb: IndexLimitMb::Limit(1),
1445                ..AccountsIndexConfig::default()
1446            }),
1447            1,
1448        ));
1449        let bin = 0;
1450        let bucket = InMemAccountsIndex::new(&holder, bin);
1451        assert!(bucket.storage.is_disk_index_enabled());
1452        bucket
1453    }
1454
1455    #[test]
1456    fn test_should_evict_from_mem() {
1457        solana_logger::setup();
1458        let bucket = new_for_test::<u64>();
1459        let mut startup = false;
1460        let mut current_age = 0;
1461        let ref_count = 0;
1462        let one_element_slot_list = vec![(0, 0)];
1463        let one_element_slot_list_entry = Arc::new(AccountMapEntryInner::new(
1464            one_element_slot_list,
1465            ref_count,
1466            AccountMapEntryMeta::default(),
1467        ));
1468
1469        // exceeded budget
1470        assert!(
1471            bucket
1472                .should_evict_from_mem(
1473                    current_age,
1474                    &Arc::new(AccountMapEntryInner::new(
1475                        vec![],
1476                        ref_count,
1477                        AccountMapEntryMeta::default()
1478                    )),
1479                    startup,
1480                    false,
1481                    true,
1482                )
1483                .0
1484        );
1485        // empty slot list
1486        assert!(
1487            !bucket
1488                .should_evict_from_mem(
1489                    current_age,
1490                    &Arc::new(AccountMapEntryInner::new(
1491                        vec![],
1492                        ref_count,
1493                        AccountMapEntryMeta::default()
1494                    )),
1495                    startup,
1496                    false,
1497                    false,
1498                )
1499                .0
1500        );
1501        // 1 element slot list
1502        assert!(
1503            bucket
1504                .should_evict_from_mem(
1505                    current_age,
1506                    &one_element_slot_list_entry,
1507                    startup,
1508                    false,
1509                    false,
1510                )
1511                .0
1512        );
1513        // 2 element slot list
1514        assert!(
1515            !bucket
1516                .should_evict_from_mem(
1517                    current_age,
1518                    &Arc::new(AccountMapEntryInner::new(
1519                        vec![(0, 0), (1, 1)],
1520                        ref_count,
1521                        AccountMapEntryMeta::default()
1522                    )),
1523                    startup,
1524                    false,
1525                    false,
1526                )
1527                .0
1528        );
1529
1530        {
1531            let bucket = new_for_test::<f64>();
1532            // 1 element slot list with a CACHED item - f64 acts like cached
1533            assert!(
1534                !bucket
1535                    .should_evict_from_mem(
1536                        current_age,
1537                        &Arc::new(AccountMapEntryInner::new(
1538                            vec![(0, 0.0)],
1539                            ref_count,
1540                            AccountMapEntryMeta::default()
1541                        )),
1542                        startup,
1543                        false,
1544                        false,
1545                    )
1546                    .0
1547            );
1548        }
1549
1550        // 1 element slot list, age is now
1551        assert!(
1552            bucket
1553                .should_evict_from_mem(
1554                    current_age,
1555                    &one_element_slot_list_entry,
1556                    startup,
1557                    false,
1558                    false,
1559                )
1560                .0
1561        );
1562
1563        // 1 element slot list, but not current age
1564        current_age = 1;
1565        assert!(
1566            !bucket
1567                .should_evict_from_mem(
1568                    current_age,
1569                    &one_element_slot_list_entry,
1570                    startup,
1571                    false,
1572                    false,
1573                )
1574                .0
1575        );
1576
1577        // 1 element slot list, but at startup and age not current
1578        startup = true;
1579        assert!(
1580            bucket
1581                .should_evict_from_mem(
1582                    current_age,
1583                    &one_element_slot_list_entry,
1584                    startup,
1585                    false,
1586                    false,
1587                )
1588                .0
1589        );
1590    }
1591
1592    #[test]
1593    fn test_hold_range_in_memory() {
1594        let bucket = new_disk_buckets_for_test::<u64>();
1595        // 0x81 is just some other range
1596        let all = Pubkey::from([0; 32])..=Pubkey::from([0xff; 32]);
1597        let ranges = [
1598            all.clone(),
1599            Pubkey::from([0x81; 32])..=Pubkey::from([0xff; 32]),
1600        ];
1601        for range in ranges.clone() {
1602            assert!(bucket.cache_ranges_held.read().unwrap().is_empty());
1603            bucket.hold_range_in_memory(&range, true);
1604            assert_eq!(
1605                bucket.cache_ranges_held.read().unwrap().to_vec(),
1606                vec![range.clone()]
1607            );
1608            {
1609                let evictions_guard = EvictionsGuard::lock(&bucket);
1610                assert!(bucket.add_hold_range_in_memory_if_already_held(&range, &evictions_guard));
1611                bucket.hold_range_in_memory(&range, false);
1612            }
1613            bucket.hold_range_in_memory(&range, false);
1614            assert!(bucket.cache_ranges_held.read().unwrap().is_empty());
1615            bucket.hold_range_in_memory(&range, true);
1616            assert_eq!(
1617                bucket.cache_ranges_held.read().unwrap().to_vec(),
1618                vec![range.clone()]
1619            );
1620            bucket.hold_range_in_memory(&range, true);
1621            assert_eq!(
1622                bucket.cache_ranges_held.read().unwrap().to_vec(),
1623                vec![range.clone(), range.clone()]
1624            );
1625            bucket.hold_range_in_memory(&ranges[0], true);
1626            assert_eq!(
1627                bucket.cache_ranges_held.read().unwrap().to_vec(),
1628                vec![range.clone(), range.clone(), ranges[0].clone()]
1629            );
1630            bucket.hold_range_in_memory(&range, false);
1631            assert_eq!(
1632                bucket.cache_ranges_held.read().unwrap().to_vec(),
1633                vec![range.clone(), ranges[0].clone()]
1634            );
1635            bucket.hold_range_in_memory(&range, false);
1636            assert_eq!(
1637                bucket.cache_ranges_held.read().unwrap().to_vec(),
1638                vec![ranges[0].clone()]
1639            );
1640            bucket.hold_range_in_memory(&ranges[0].clone(), false);
1641            assert!(bucket.cache_ranges_held.read().unwrap().is_empty());
1642
1643            // hold all in mem first
1644            assert!(bucket.cache_ranges_held.read().unwrap().is_empty());
1645            bucket.hold_range_in_memory(&all, true);
1646
1647            let evictions_guard = EvictionsGuard::lock(&bucket);
1648            assert!(bucket.add_hold_range_in_memory_if_already_held(&range, &evictions_guard));
1649            bucket.hold_range_in_memory(&range, false);
1650            bucket.hold_range_in_memory(&all, false);
1651        }
1652    }
1653
1654    #[test]
1655    fn test_age() {
1656        solana_logger::setup();
1657        let test = new_for_test::<u64>();
1658        assert!(test.get_should_age(test.storage.current_age()));
1659        assert_eq!(test.storage.count_buckets_flushed(), 0);
1660        test.set_has_aged(0, true);
1661        assert!(!test.get_should_age(test.storage.current_age()));
1662        assert_eq!(test.storage.count_buckets_flushed(), 1);
1663        // simulate rest of buckets aging
1664        for _ in 1..BINS_FOR_TESTING {
1665            assert!(!test.storage.all_buckets_flushed_at_current_age());
1666            test.storage.bucket_flushed_at_current_age(true);
1667        }
1668        assert!(test.storage.all_buckets_flushed_at_current_age());
1669        // advance age
1670        test.storage.increment_age();
1671        assert_eq!(test.storage.current_age(), 1);
1672        assert!(!test.storage.all_buckets_flushed_at_current_age());
1673        assert!(test.get_should_age(test.storage.current_age()));
1674        assert_eq!(test.storage.count_buckets_flushed(), 0);
1675    }
1676
1677    #[test]
1678    fn test_update_slot_list_other() {
1679        solana_logger::setup();
1680        let reclaim = UpsertReclaim::PopulateReclaims;
1681        let new_slot = 0;
1682        let info = 1;
1683        let other_value = info + 1;
1684        let at_new_slot = (new_slot, info);
1685        let unique_other_slot = new_slot + 1;
1686        for other_slot in [Some(new_slot), Some(unique_other_slot), None] {
1687            let mut reclaims = Vec::default();
1688            let mut slot_list = Vec::default();
1689            // upserting into empty slot_list, so always addref
1690            assert!(
1691                InMemAccountsIndex::update_slot_list(
1692                    &mut slot_list,
1693                    new_slot,
1694                    info,
1695                    other_slot,
1696                    &mut reclaims,
1697                    reclaim
1698                ),
1699                "other_slot: {other_slot:?}"
1700            );
1701            assert_eq!(slot_list, vec![at_new_slot]);
1702            assert!(reclaims.is_empty());
1703        }
1704
1705        // replace other
1706        let mut slot_list = vec![(unique_other_slot, other_value)];
1707        let expected_reclaims = slot_list.clone();
1708        let other_slot = Some(unique_other_slot);
1709        let mut reclaims = Vec::default();
1710        assert!(
1711            // upserting into slot_list that does NOT contain an entry at 'new_slot'
1712            // but, it DOES contain an entry at other_slot, so we do NOT add-ref. The assumption is that 'other_slot' is going away
1713            // and that the previously held add-ref is now used by 'new_slot'
1714            !InMemAccountsIndex::update_slot_list(
1715                &mut slot_list,
1716                new_slot,
1717                info,
1718                other_slot,
1719                &mut reclaims,
1720                reclaim
1721            ),
1722            "other_slot: {other_slot:?}"
1723        );
1724        assert_eq!(slot_list, vec![at_new_slot]);
1725        assert_eq!(reclaims, expected_reclaims);
1726
1727        // replace other and new_slot
1728        let mut slot_list = vec![(unique_other_slot, other_value), (new_slot, other_value)];
1729        let expected_reclaims = slot_list.clone();
1730        let other_slot = Some(unique_other_slot);
1731        // upserting into slot_list that already contain an entry at 'new-slot', so do NOT addref
1732        let mut reclaims = Vec::default();
1733        assert!(
1734            !InMemAccountsIndex::update_slot_list(
1735                &mut slot_list,
1736                new_slot,
1737                info,
1738                other_slot,
1739                &mut reclaims,
1740                reclaim
1741            ),
1742            "other_slot: {other_slot:?}"
1743        );
1744        assert_eq!(slot_list, vec![at_new_slot]);
1745        assert_eq!(
1746            reclaims,
1747            expected_reclaims.into_iter().rev().collect::<Vec<_>>()
1748        );
1749
1750        // nothing will exist at this slot
1751        let missing_other_slot = unique_other_slot + 1;
1752        let ignored_slot = 10; // bigger than is used elsewhere in the test
1753        let ignored_value = info + 10;
1754
1755        let mut possible_initial_slot_list_contents;
1756        // build a list of possible contents in the slot_list prior to calling 'update_slot_list'
1757        {
1758            // up to 3 ignored slot account_info (ignored means not 'new_slot', not 'other_slot', but different slot #s which could exist in the slot_list initially)
1759            possible_initial_slot_list_contents = (0..3)
1760                .map(|i| (ignored_slot + i, ignored_value + i))
1761                .collect::<Vec<_>>();
1762            // account_info that already exists in the slot_list AT 'new_slot'
1763            possible_initial_slot_list_contents.push(at_new_slot);
1764            // account_info that already exists in the slot_list AT 'other_slot'
1765            possible_initial_slot_list_contents.push((unique_other_slot, other_value));
1766        }
1767
1768        /*
1769         * loop over all possible permutations of 'possible_initial_slot_list_contents'
1770         * some examples:
1771         * []
1772         * [other]
1773         * [other, new_slot]
1774         * [new_slot, other]
1775         * [dummy0, new_slot, dummy1, other] (and all permutation of this order)
1776         * [other, dummy1, new_slot] (and all permutation of this order)
1777         * ...
1778         * [dummy0, new_slot, dummy1, other_slot, dummy2] (and all permutation of this order)
1779         */
1780        let mut attempts = 0;
1781        // loop over each initial size of 'slot_list'
1782        for initial_slot_list_len in 0..=possible_initial_slot_list_contents.len() {
1783            // loop over every permutation of possible_initial_slot_list_contents within a list of len 'initial_slot_list_len'
1784            for content_source_indexes in
1785                (0..possible_initial_slot_list_contents.len()).permutations(initial_slot_list_len)
1786            {
1787                // loop over each possible parameter for 'other_slot'
1788                for other_slot in [
1789                    Some(new_slot),
1790                    Some(unique_other_slot),
1791                    Some(missing_other_slot),
1792                    None,
1793                ] {
1794                    attempts += 1;
1795                    // initialize slot_list prior to call to 'InMemAccountsIndex::update_slot_list'
1796                    // by inserting each possible entry at each possible position
1797                    let mut slot_list = content_source_indexes
1798                        .iter()
1799                        .map(|i| possible_initial_slot_list_contents[*i])
1800                        .collect::<Vec<_>>();
1801                    let mut expected = slot_list.clone();
1802                    let original = slot_list.clone();
1803                    let mut reclaims = Vec::default();
1804
1805                    let result = InMemAccountsIndex::update_slot_list(
1806                        &mut slot_list,
1807                        new_slot,
1808                        info,
1809                        other_slot,
1810                        &mut reclaims,
1811                        reclaim,
1812                    );
1813
1814                    // calculate expected results
1815                    let mut expected_reclaims = Vec::default();
1816                    // addref iff the slot_list did NOT previously contain an entry at 'new_slot' and it also did not contain an entry at 'other_slot'
1817                    let expected_result = !expected
1818                        .iter()
1819                        .any(|(slot, _info)| slot == &new_slot || Some(*slot) == other_slot);
1820                    {
1821                        // this is the logical equivalent of 'InMemAccountsIndex::update_slot_list', but slower (and ignoring addref)
1822                        expected.retain(|(slot, info)| {
1823                            let retain = slot != &new_slot && Some(*slot) != other_slot;
1824                            if !retain {
1825                                expected_reclaims.push((*slot, *info));
1826                            }
1827                            retain
1828                        });
1829                        expected.push((new_slot, info));
1830                    }
1831                    assert_eq!(
1832                        expected_result, result,
1833                        "return value different. other: {other_slot:?}, {expected:?}, {slot_list:?}, original: {original:?}"
1834                    );
1835                    // sort for easy comparison
1836                    expected_reclaims.sort_unstable();
1837                    reclaims.sort_unstable();
1838                    assert_eq!(
1839                        expected_reclaims, reclaims,
1840                        "reclaims different. other: {other_slot:?}, {expected:?}, {slot_list:?}, original: {original:?}"
1841                    );
1842                    // sort for easy comparison
1843                    slot_list.sort_unstable();
1844                    expected.sort_unstable();
1845                    assert_eq!(
1846                        slot_list, expected,
1847                        "slot_list different. other: {other_slot:?}, {expected:?}, {slot_list:?}, original: {original:?}"
1848                    );
1849                }
1850            }
1851        }
1852        assert_eq!(attempts, 1304); // complicated permutations, so make sure we ran the right #
1853    }
1854
1855    #[test]
1856    fn test_flush_guard() {
1857        let flushing_active = AtomicBool::new(false);
1858
1859        {
1860            let flush_guard = FlushGuard::lock(&flushing_active);
1861            assert!(flush_guard.is_some());
1862            assert!(flushing_active.load(Ordering::Acquire));
1863
1864            {
1865                // Trying to lock the FlushGuard again will not succeed.
1866                let flush_guard2 = FlushGuard::lock(&flushing_active);
1867                assert!(flush_guard2.is_none());
1868            }
1869
1870            // The `flushing_active` flag will remain true, even after `flush_guard2` goes out of
1871            // scope (and is dropped).  This ensures `lock()` and `drop()` work harmoniously.
1872            assert!(flushing_active.load(Ordering::Acquire));
1873        }
1874
1875        // After the FlushGuard is dropped, the flag will be cleared.
1876        assert!(!flushing_active.load(Ordering::Acquire));
1877    }
1878
1879    #[test]
1880    fn test_remove_if_slot_list_empty_entry() {
1881        let key = solana_sdk::pubkey::new_rand();
1882        let unknown_key = solana_sdk::pubkey::new_rand();
1883
1884        let test = new_for_test::<u64>();
1885
1886        let mut map = test.map_internal.write().unwrap();
1887
1888        {
1889            // item is NOT in index at all, still return true from remove_if_slot_list_empty_entry
1890            // make sure not initially in index
1891            let entry = map.entry(unknown_key);
1892            assert!(matches!(entry, Entry::Vacant(_)));
1893            let entry = map.entry(unknown_key);
1894            assert!(test.remove_if_slot_list_empty_entry(entry));
1895            // make sure still not in index
1896            let entry = map.entry(unknown_key);
1897            assert!(matches!(entry, Entry::Vacant(_)));
1898        }
1899
1900        {
1901            // add an entry with an empty slot list
1902            let val = Arc::new(AccountMapEntryInner::<u64>::default());
1903            map.insert(key, val);
1904            let entry = map.entry(key);
1905            assert!(matches!(entry, Entry::Occupied(_)));
1906            // should have removed it since it had an empty slot list
1907            assert!(test.remove_if_slot_list_empty_entry(entry));
1908            let entry = map.entry(key);
1909            assert!(matches!(entry, Entry::Vacant(_)));
1910            // return true - item is not in index at all now
1911            assert!(test.remove_if_slot_list_empty_entry(entry));
1912        }
1913
1914        {
1915            // add an entry with a NON empty slot list - it will NOT get removed
1916            let val = Arc::new(AccountMapEntryInner::<u64>::default());
1917            val.slot_list.write().unwrap().push((1, 1));
1918            map.insert(key, val);
1919            // does NOT remove it since it has a non-empty slot list
1920            let entry = map.entry(key);
1921            assert!(!test.remove_if_slot_list_empty_entry(entry));
1922            let entry = map.entry(key);
1923            assert!(matches!(entry, Entry::Occupied(_)));
1924        }
1925    }
1926
1927    #[test]
1928    fn test_lock_and_update_slot_list() {
1929        let test = AccountMapEntryInner::<u64>::default();
1930        let info = 65;
1931        let mut reclaims = Vec::default();
1932        // first upsert, should increase
1933        let len = InMemAccountsIndex::lock_and_update_slot_list(
1934            &test,
1935            (1, info),
1936            None,
1937            &mut reclaims,
1938            UpsertReclaim::IgnoreReclaims,
1939        );
1940        assert_eq!(test.slot_list.read().unwrap().len(), len);
1941        assert_eq!(len, 1);
1942        // update to different slot, should increase
1943        let len = InMemAccountsIndex::lock_and_update_slot_list(
1944            &test,
1945            (2, info),
1946            None,
1947            &mut reclaims,
1948            UpsertReclaim::IgnoreReclaims,
1949        );
1950        assert_eq!(test.slot_list.read().unwrap().len(), len);
1951        assert_eq!(len, 2);
1952        // update to same slot, should not increase
1953        let len = InMemAccountsIndex::lock_and_update_slot_list(
1954            &test,
1955            (2, info),
1956            None,
1957            &mut reclaims,
1958            UpsertReclaim::IgnoreReclaims,
1959        );
1960        assert_eq!(test.slot_list.read().unwrap().len(), len);
1961        assert_eq!(len, 2);
1962    }
1963}