solana_runtime/
status_cache.rs

1#[cfg(feature = "shuttle-test")]
2use shuttle::sync::{Arc, Mutex};
3use {
4    ahash::{HashMap, HashMapExt as _},
5    log::*,
6    serde::Serialize,
7    solana_accounts_db::ancestors::Ancestors,
8    solana_clock::{Slot, MAX_RECENT_BLOCKHASHES},
9    solana_hash::Hash,
10    std::collections::{hash_map::Entry, HashSet},
11};
12#[cfg(not(feature = "shuttle-test"))]
13use {
14    rand::{thread_rng, Rng},
15    std::sync::{Arc, Mutex},
16};
17
18// The maximum number of entries to store in the cache. This is the same as the number of recent
19// blockhashes because we automatically reject txs that use older blockhashes so we don't need to
20// track those explicitly.
21pub const MAX_CACHE_ENTRIES: usize = MAX_RECENT_BLOCKHASHES;
22
23// Only store 20 bytes of the tx keys processed to save some memory.
24const CACHED_KEY_SIZE: usize = 20;
25
26// Store forks in a single chunk of memory to avoid another hash lookup.
27pub type ForkStatus<T> = Vec<(Slot, T)>;
28
29// The type of the key used in the cache.
30pub(crate) type KeySlice = [u8; CACHED_KEY_SIZE];
31
32type KeyMap<T> = HashMap<KeySlice, ForkStatus<T>>;
33
34// Map of Hash and status
35pub type Status<T> = Arc<Mutex<HashMap<Hash, (usize, Vec<(KeySlice, T)>)>>>;
36
37// A Map of hash + the highest fork it's been observed on along with
38// the key offset and a Map of the key slice + Fork status for that key
39type KeyStatusMap<T> = HashMap<Hash, (Slot, usize, KeyMap<T>)>;
40
41// The type used for StatusCache::slot_deltas. See the field definition for more details.
42type SlotDeltaMap<T> = HashMap<Slot, Status<T>>;
43
44// The statuses added during a slot, can be used to build on top of a status cache or to
45// construct a new one. Usually derived from a status cache's `SlotDeltaMap`
46pub type SlotDelta<T> = (Slot, bool, Status<T>);
47
48#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
49#[derive(Clone, Debug)]
50pub struct StatusCache<T: Serialize + Clone> {
51    // cache[blockhash][tx_key] => [(fork1_slot, tx_result), (fork2_slot, tx_result), ...] used to
52    // check if a tx_key was seen on a fork and for rpc to retrieve the tx_result
53    cache: KeyStatusMap<T>,
54    roots: HashSet<Slot>,
55    // slot_deltas[slot][blockhash] => [(tx_key, tx_result), ...] used to serialize for snapshots
56    // and to rebuild cache[blockhash][tx_key] from a snapshot
57    slot_deltas: SlotDeltaMap<T>,
58}
59
60impl<T: Serialize + Clone> Default for StatusCache<T> {
61    fn default() -> Self {
62        Self {
63            cache: HashMap::default(),
64            // 0 is always a root
65            roots: HashSet::from([0]),
66            slot_deltas: HashMap::default(),
67        }
68    }
69}
70
71impl<T: Serialize + Clone> StatusCache<T> {
72    /// Clear all entries for a slot.
73    ///
74    /// This is used when a slot is purged from the cache, see
75    /// ReplayStage::purge_unconfirmed_duplicate_slot().
76    ///
77    /// When this is called, it's guaranteed that there are no threads inserting new entries for
78    /// this slot. root_slot_deltas() also never accesses slots that are being cleared because roots
79    /// are never purged.
80    pub fn clear_slot_entries(&mut self, slot: Slot) {
81        let slot_deltas = self.slot_deltas.remove(&slot);
82        if let Some(slot_deltas) = slot_deltas {
83            let slot_deltas = slot_deltas.lock().unwrap();
84            for (blockhash, (_, key_list)) in slot_deltas.iter() {
85                // Any blockhash that exists in self.slot_deltas must also exist
86                // in self.cache, because in self.purge_roots(), when an entry
87                // (b, (max_slot, _, _)) is removed from self.cache, this implies
88                // all entries in self.slot_deltas < max_slot are also removed
89                if let Entry::Occupied(mut o_blockhash_entries) = self.cache.entry(*blockhash) {
90                    let (_, _, all_hash_maps) = o_blockhash_entries.get_mut();
91
92                    for (key_slice, _) in key_list {
93                        if let Entry::Occupied(mut o_key_list) = all_hash_maps.entry(*key_slice) {
94                            let key_list = o_key_list.get_mut();
95                            key_list.retain(|(updated_slot, _)| *updated_slot != slot);
96                            if key_list.is_empty() {
97                                o_key_list.remove_entry();
98                            }
99                        } else {
100                            panic!(
101                                "Map for key must exist if key exists in self.slot_deltas, slot: \
102                                 {slot}"
103                            )
104                        }
105                    }
106
107                    if all_hash_maps.is_empty() {
108                        o_blockhash_entries.remove_entry();
109                    }
110                } else {
111                    panic!("Blockhash must exist if it exists in self.slot_deltas, slot: {slot}")
112                }
113            }
114        }
115    }
116
117    /// Check if the key is in any of the forks in the ancestors set and
118    /// with a certain blockhash.
119    pub fn get_status<K: AsRef<[u8]>>(
120        &self,
121        key: K,
122        transaction_blockhash: &Hash,
123        ancestors: &Ancestors,
124    ) -> Option<(Slot, T)> {
125        let map = self.cache.get(transaction_blockhash)?;
126        let (_, index, keymap) = map;
127        let max_key_index = key.as_ref().len().saturating_sub(CACHED_KEY_SIZE + 1);
128        let index = (*index).min(max_key_index);
129        let key_slice: &[u8; CACHED_KEY_SIZE] =
130            arrayref::array_ref![key.as_ref(), index, CACHED_KEY_SIZE];
131        if let Some(stored_forks) = keymap.get(key_slice) {
132            let res = stored_forks
133                .iter()
134                .find(|(f, _)| ancestors.contains_key(f) || self.roots.contains(f))
135                .cloned();
136            if res.is_some() {
137                return res;
138            }
139        }
140        None
141    }
142
143    /// Search for a key with any blockhash.
144    ///
145    /// Prefer get_status for performance reasons, it doesn't need to search all blockhashes.
146    pub fn get_status_any_blockhash<K: AsRef<[u8]>>(
147        &self,
148        key: K,
149        ancestors: &Ancestors,
150    ) -> Option<(Slot, T)> {
151        let keys: Vec<_> = self.cache.keys().copied().collect();
152
153        for blockhash in keys.iter() {
154            trace!("get_status_any_blockhash: trying {blockhash}");
155            let status = self.get_status(&key, blockhash, ancestors);
156            if status.is_some() {
157                return status;
158            }
159        }
160        None
161    }
162
163    /// Add a known root fork.
164    ///
165    /// Roots are always valid ancestors. After MAX_CACHE_ENTRIES, roots are removed, and any old
166    /// keys are cleared.
167    pub fn add_root(&mut self, fork: Slot) {
168        self.roots.insert(fork);
169        self.purge_roots();
170    }
171
172    pub fn roots(&self) -> &HashSet<Slot> {
173        &self.roots
174    }
175
176    /// Insert a new key using the given blockhash at the given slot.
177    pub fn insert<K: AsRef<[u8]>>(
178        &mut self,
179        transaction_blockhash: &Hash,
180        key: K,
181        slot: Slot,
182        res: T,
183    ) {
184        let max_key_index = key.as_ref().len().saturating_sub(CACHED_KEY_SIZE + 1);
185
186        // Get the cache entry for this blockhash.
187        let (max_slot, key_index, hash_map) =
188            self.cache.entry(*transaction_blockhash).or_insert_with(|| {
189                // DFS tests need deterministic behavior
190                #[cfg(feature = "shuttle-test")]
191                let key_index = 0;
192                #[cfg(not(feature = "shuttle-test"))]
193                let key_index = thread_rng().gen_range(0..max_key_index + 1);
194                (slot, key_index, HashMap::new())
195            });
196
197        // Update the max slot observed to contain txs using this blockhash.
198        *max_slot = std::cmp::max(slot, *max_slot);
199
200        // Grab the key slice.
201        let key_index = (*key_index).min(max_key_index);
202        let mut key_slice = [0u8; CACHED_KEY_SIZE];
203        key_slice.clone_from_slice(&key.as_ref()[key_index..key_index + CACHED_KEY_SIZE]);
204
205        // Insert the slot and tx result into the cache entry associated with
206        // this blockhash and keyslice.
207        let forks = hash_map.entry(key_slice).or_default();
208        forks.push((slot, res.clone()));
209
210        self.add_to_slot_delta(transaction_blockhash, slot, key_index, key_slice, res);
211    }
212
213    pub fn purge_roots(&mut self) {
214        if self.roots.len() > MAX_CACHE_ENTRIES {
215            if let Some(min) = self.roots.iter().min().cloned() {
216                self.roots.remove(&min);
217                self.cache.retain(|_, (fork, _, _)| *fork > min);
218                self.slot_deltas.retain(|slot, _| *slot > min);
219            }
220        }
221    }
222
223    #[cfg(feature = "dev-context-only-utils")]
224    pub fn clear(&mut self) {
225        for v in self.cache.values_mut() {
226            v.2 = HashMap::new();
227        }
228
229        self.slot_deltas
230            .iter_mut()
231            .for_each(|(_, status)| status.lock().unwrap().clear());
232    }
233
234    /// Get the statuses for all the root slots.
235    ///
236    /// This is never called concurrently with add_root(), and for a slot to be a root there must be
237    /// no new entries for that slot, so there are no races.
238    ///
239    /// See ReplayStage::handle_new_root() => BankForks::set_root() =>
240    /// BankForks::do_set_root_return_metrics() => root_slot_deltas()
241    pub fn root_slot_deltas(&self) -> Vec<SlotDelta<T>> {
242        self.roots()
243            .iter()
244            .map(|root| {
245                (
246                    *root,
247                    true, // <-- is_root
248                    self.slot_deltas.get(root).cloned().unwrap_or_default(),
249                )
250            })
251            .collect()
252    }
253
254    /// Populate the cache with the slot deltas from a snapshot.
255    ///
256    /// Really badly named method. See load_bank_forks() => ... =>
257    /// rebuild_bank_from_snapshot() => [load slot deltas from snapshot] => append()
258    pub fn append(&mut self, slot_deltas: &[SlotDelta<T>]) {
259        for (slot, is_root, statuses) in slot_deltas {
260            statuses
261                .lock()
262                .unwrap()
263                .iter()
264                .for_each(|(tx_hash, (key_index, statuses))| {
265                    for (key_slice, res) in statuses.iter() {
266                        self.insert_with_slice(tx_hash, *slot, *key_index, *key_slice, res.clone())
267                    }
268                });
269            if *is_root {
270                self.add_root(*slot);
271            }
272        }
273    }
274
275    fn insert_with_slice(
276        &mut self,
277        transaction_blockhash: &Hash,
278        slot: Slot,
279        key_index: usize,
280        key_slice: [u8; CACHED_KEY_SIZE],
281        res: T,
282    ) {
283        let hash_map =
284            self.cache
285                .entry(*transaction_blockhash)
286                .or_insert((slot, key_index, HashMap::new()));
287        hash_map.0 = std::cmp::max(slot, hash_map.0);
288
289        let forks = hash_map.2.entry(key_slice).or_default();
290        forks.push((slot, res.clone()));
291
292        self.add_to_slot_delta(transaction_blockhash, slot, key_index, key_slice, res);
293    }
294
295    // Add this key slice to the list of key slices for this slot and blockhash combo.
296    fn add_to_slot_delta(
297        &mut self,
298        transaction_blockhash: &Hash,
299        slot: Slot,
300        key_index: usize,
301        key_slice: [u8; CACHED_KEY_SIZE],
302        res: T,
303    ) {
304        let mut fork_entry = self.slot_deltas.entry(slot).or_default().lock().unwrap();
305        let (_key_index, hash_entry) = fork_entry
306            .entry(*transaction_blockhash)
307            .or_insert((key_index, vec![]));
308        hash_entry.push((key_slice, res))
309    }
310}
311
312#[cfg(test)]
313mod tests {
314    use {super::*, solana_sha256_hasher::hash, solana_signature::Signature};
315
316    type BankStatusCache = StatusCache<()>;
317
318    impl<T: Serialize + Clone> StatusCache<T> {
319        fn from_slot_deltas(slot_deltas: &[SlotDelta<T>]) -> Self {
320            let mut cache = Self::default();
321            cache.append(slot_deltas);
322            cache
323        }
324    }
325
326    impl<T: Serialize + Clone + PartialEq> PartialEq for StatusCache<T> {
327        fn eq(&self, other: &Self) -> bool {
328            self.roots == other.roots
329                && self
330                    .cache
331                    .iter()
332                    .all(|(hash, (slot, key_index, hash_map))| {
333                        if let Some((other_slot, other_key_index, other_hash_map)) =
334                            other.cache.get(hash)
335                        {
336                            if slot == other_slot && key_index == other_key_index {
337                                return hash_map.iter().all(|(slice, fork_map)| {
338                                    if let Some(other_fork_map) = other_hash_map.get(slice) {
339                                        // all this work just to compare the highest forks in the fork map
340                                        // per entry
341                                        return fork_map.last() == other_fork_map.last();
342                                    }
343                                    false
344                                });
345                            }
346                        }
347                        false
348                    })
349        }
350    }
351
352    #[test]
353    fn test_empty_has_no_sigs() {
354        let sig = Signature::default();
355        let blockhash = hash(Hash::default().as_ref());
356        let status_cache = BankStatusCache::default();
357        assert_eq!(
358            status_cache.get_status(sig, &blockhash, &Ancestors::default()),
359            None
360        );
361        assert_eq!(
362            status_cache.get_status_any_blockhash(sig, &Ancestors::default()),
363            None
364        );
365    }
366
367    #[test]
368    fn test_find_sig_with_ancestor_fork() {
369        let sig = Signature::default();
370        let mut status_cache = BankStatusCache::default();
371        let blockhash = hash(Hash::default().as_ref());
372        let ancestors = vec![(0, 1)].into_iter().collect();
373        status_cache.insert(&blockhash, sig, 0, ());
374        assert_eq!(
375            status_cache.get_status(sig, &blockhash, &ancestors),
376            Some((0, ()))
377        );
378        assert_eq!(
379            status_cache.get_status_any_blockhash(sig, &ancestors),
380            Some((0, ()))
381        );
382    }
383
384    #[test]
385    fn test_find_sig_without_ancestor_fork() {
386        let sig = Signature::default();
387        let mut status_cache = BankStatusCache::default();
388        let blockhash = hash(Hash::default().as_ref());
389        let ancestors = Ancestors::default();
390        status_cache.insert(&blockhash, sig, 1, ());
391        assert_eq!(status_cache.get_status(sig, &blockhash, &ancestors), None);
392        assert_eq!(status_cache.get_status_any_blockhash(sig, &ancestors), None);
393    }
394
395    #[test]
396    fn test_find_sig_with_root_ancestor_fork() {
397        let sig = Signature::default();
398        let mut status_cache = BankStatusCache::default();
399        let blockhash = hash(Hash::default().as_ref());
400        let ancestors = Ancestors::default();
401        status_cache.insert(&blockhash, sig, 0, ());
402        status_cache.add_root(0);
403        assert_eq!(
404            status_cache.get_status(sig, &blockhash, &ancestors),
405            Some((0, ()))
406        );
407    }
408
409    #[test]
410    fn test_insert_picks_latest_blockhash_fork() {
411        let sig = Signature::default();
412        let mut status_cache = BankStatusCache::default();
413        let blockhash = hash(Hash::default().as_ref());
414        let ancestors = vec![(0, 0)].into_iter().collect();
415        status_cache.insert(&blockhash, sig, 0, ());
416        status_cache.insert(&blockhash, sig, 1, ());
417        for i in 0..(MAX_CACHE_ENTRIES + 1) {
418            status_cache.add_root(i as u64);
419        }
420        assert!(status_cache
421            .get_status(sig, &blockhash, &ancestors)
422            .is_some());
423    }
424
425    #[test]
426    fn test_root_expires() {
427        let sig = Signature::default();
428        let mut status_cache = BankStatusCache::default();
429        let blockhash = hash(Hash::default().as_ref());
430        let ancestors = Ancestors::default();
431        status_cache.insert(&blockhash, sig, 0, ());
432        for i in 0..(MAX_CACHE_ENTRIES + 1) {
433            status_cache.add_root(i as u64);
434        }
435        assert_eq!(status_cache.get_status(sig, &blockhash, &ancestors), None);
436    }
437
438    #[test]
439    fn test_clear_signatures_sigs_are_gone() {
440        let sig = Signature::default();
441        let mut status_cache = BankStatusCache::default();
442        let blockhash = hash(Hash::default().as_ref());
443        let ancestors = Ancestors::default();
444        status_cache.insert(&blockhash, sig, 0, ());
445        status_cache.add_root(0);
446        status_cache.clear();
447        assert_eq!(status_cache.get_status(sig, &blockhash, &ancestors), None);
448    }
449
450    #[test]
451    fn test_clear_signatures_insert_works() {
452        let sig = Signature::default();
453        let mut status_cache = BankStatusCache::default();
454        let blockhash = hash(Hash::default().as_ref());
455        let ancestors = Ancestors::default();
456        status_cache.add_root(0);
457        status_cache.clear();
458        status_cache.insert(&blockhash, sig, 0, ());
459        assert!(status_cache
460            .get_status(sig, &blockhash, &ancestors)
461            .is_some());
462    }
463
464    #[test]
465    fn test_signatures_slice() {
466        let sig = Signature::default();
467        let mut status_cache = BankStatusCache::default();
468        let blockhash = hash(Hash::default().as_ref());
469        status_cache.clear();
470        status_cache.insert(&blockhash, sig, 0, ());
471        let (_, index, sig_map) = status_cache.cache.get(&blockhash).unwrap();
472        let sig_slice: &[u8; CACHED_KEY_SIZE] =
473            arrayref::array_ref![sig.as_ref(), *index, CACHED_KEY_SIZE];
474        assert!(sig_map.get(sig_slice).is_some());
475    }
476
477    #[test]
478    fn test_slot_deltas() {
479        let sig = Signature::default();
480        let mut status_cache = BankStatusCache::default();
481        let blockhash = hash(Hash::default().as_ref());
482        status_cache.clear();
483        status_cache.insert(&blockhash, sig, 0, ());
484        assert!(status_cache.roots().contains(&0));
485        let slot_deltas = status_cache.root_slot_deltas();
486        let cache = StatusCache::from_slot_deltas(&slot_deltas);
487        assert_eq!(cache, status_cache);
488        let slot_deltas = cache.root_slot_deltas();
489        let cache = StatusCache::from_slot_deltas(&slot_deltas);
490        assert_eq!(cache, status_cache);
491    }
492
493    #[test]
494    fn test_roots_deltas() {
495        let sig = Signature::default();
496        let mut status_cache = BankStatusCache::default();
497        let blockhash = hash(Hash::default().as_ref());
498        let blockhash2 = hash(blockhash.as_ref());
499        status_cache.insert(&blockhash, sig, 0, ());
500        status_cache.insert(&blockhash, sig, 1, ());
501        status_cache.insert(&blockhash2, sig, 1, ());
502        for i in 0..(MAX_CACHE_ENTRIES + 1) {
503            status_cache.add_root(i as u64);
504        }
505        assert_eq!(status_cache.slot_deltas.len(), 1);
506        assert!(status_cache.slot_deltas.contains_key(&1));
507        let slot_deltas = status_cache.root_slot_deltas();
508        let cache = StatusCache::from_slot_deltas(&slot_deltas);
509        assert_eq!(cache, status_cache);
510    }
511
512    #[test]
513    #[allow(clippy::assertions_on_constants)]
514    fn test_age_sanity() {
515        assert!(MAX_CACHE_ENTRIES <= MAX_RECENT_BLOCKHASHES);
516    }
517
518    #[test]
519    fn test_clear_slot_signatures() {
520        let sig = Signature::default();
521        let mut status_cache = BankStatusCache::default();
522        let blockhash = hash(Hash::default().as_ref());
523        let blockhash2 = hash(blockhash.as_ref());
524        status_cache.insert(&blockhash, sig, 0, ());
525        status_cache.insert(&blockhash, sig, 1, ());
526        status_cache.insert(&blockhash2, sig, 1, ());
527
528        let mut ancestors0 = Ancestors::default();
529        ancestors0.insert(0, 0);
530        let mut ancestors1 = Ancestors::default();
531        ancestors1.insert(1, 0);
532
533        // Clear slot 0 related data
534        assert!(status_cache
535            .get_status(sig, &blockhash, &ancestors0)
536            .is_some());
537        status_cache.clear_slot_entries(0);
538        assert!(status_cache
539            .get_status(sig, &blockhash, &ancestors0)
540            .is_none());
541        assert!(status_cache
542            .get_status(sig, &blockhash, &ancestors1)
543            .is_some());
544        assert!(status_cache
545            .get_status(sig, &blockhash2, &ancestors1)
546            .is_some());
547
548        // Check that the slot delta for slot 0 is gone, but slot 1 still
549        // exists
550        assert!(!status_cache.slot_deltas.contains_key(&0));
551        assert!(status_cache.slot_deltas.contains_key(&1));
552
553        // Clear slot 1 related data
554        status_cache.clear_slot_entries(1);
555        assert!(status_cache.slot_deltas.is_empty());
556        assert!(status_cache
557            .get_status(sig, &blockhash, &ancestors1)
558            .is_none());
559        assert!(status_cache
560            .get_status(sig, &blockhash2, &ancestors1)
561            .is_none());
562        assert!(status_cache.cache.is_empty());
563    }
564
565    // Status cache uses a random key offset for each blockhash. Ensure that shorter
566    // keys can still be used if the offset if greater than the key length.
567    #[test]
568    fn test_different_sized_keys() {
569        let mut status_cache = BankStatusCache::default();
570        let ancestors = vec![(0, 0)].into_iter().collect();
571        let blockhash = Hash::default();
572        for _ in 0..100 {
573            let blockhash = hash(blockhash.as_ref());
574            let sig_key = Signature::default();
575            let hash_key = Hash::new_unique();
576            status_cache.insert(&blockhash, sig_key, 0, ());
577            status_cache.insert(&blockhash, hash_key, 0, ());
578            assert!(status_cache
579                .get_status(sig_key, &blockhash, &ancestors)
580                .is_some());
581            assert!(status_cache
582                .get_status(hash_key, &blockhash, &ancestors)
583                .is_some());
584        }
585    }
586}
587
588#[cfg(all(test, feature = "shuttle-test"))]
589mod shuttle_tests {
590    use {super::*, shuttle::sync::RwLock};
591
592    type BankStatusCache = RwLock<StatusCache<()>>;
593
594    const CLEAR_DFS_ITERATIONS: Option<usize> = None;
595    const CLEAR_RANDOM_ITERATIONS: usize = 20000;
596    const PURGE_DFS_ITERATIONS: Option<usize> = None;
597    const PURGE_RANDOM_ITERATIONS: usize = 8000;
598    const INSERT_DFS_ITERATIONS: Option<usize> = Some(20000);
599    const INSERT_RANDOM_ITERATIONS: usize = 20000;
600
601    fn do_test_shuttle_clear_slots_blockhash_overlap() {
602        let status_cache = Arc::new(BankStatusCache::default());
603
604        let blockhash1 = Hash::new_from_array([1; 32]);
605
606        let key1 = Hash::new_from_array([3; 32]);
607        let key2 = Hash::new_from_array([4; 32]);
608
609        status_cache
610            .write()
611            .unwrap()
612            .insert(&blockhash1, key1, 1, ());
613        let th_clear = shuttle::thread::spawn({
614            let status_cache = status_cache.clone();
615            move || {
616                status_cache.write().unwrap().clear_slot_entries(1);
617            }
618        });
619
620        let th_insert = shuttle::thread::spawn({
621            let status_cache = status_cache.clone();
622            move || {
623                // insert an entry for slot 1 so clear_slot_entries will remove it
624                status_cache
625                    .write()
626                    .unwrap()
627                    .insert(&blockhash1, key2, 2, ());
628            }
629        });
630
631        th_clear.join().unwrap();
632        th_insert.join().unwrap();
633
634        let mut ancestors2 = Ancestors::default();
635        ancestors2.insert(2, 0);
636
637        assert!(status_cache
638            .read()
639            .unwrap()
640            .get_status(key2, &blockhash1, &ancestors2)
641            .is_some());
642    }
643    #[test]
644    fn test_shuttle_clear_slots_blockhash_overlap_random() {
645        shuttle::check_random(
646            do_test_shuttle_clear_slots_blockhash_overlap,
647            CLEAR_RANDOM_ITERATIONS,
648        );
649    }
650
651    #[test]
652    fn test_shuttle_clear_slots_blockhash_overlap_dfs() {
653        shuttle::check_dfs(
654            do_test_shuttle_clear_slots_blockhash_overlap,
655            CLEAR_DFS_ITERATIONS,
656        );
657    }
658
659    // unlike clear_slot_entries(), purge_slots() can't overlap with regular blockhashes since
660    // they'd have expired by the time roots are old enough to be purged. However, nonces don't
661    // expire, so they can overlap.
662    fn do_test_shuttle_purge_nonce_overlap() {
663        let status_cache = Arc::new(BankStatusCache::default());
664        // fill the cache so that the next add_root() will purge the oldest root
665        for i in 0..MAX_CACHE_ENTRIES {
666            status_cache.write().unwrap().add_root(i as u64);
667        }
668
669        let blockhash1 = Hash::new_from_array([1; 32]);
670
671        let key1 = Hash::new_from_array([3; 32]);
672        let key2 = Hash::new_from_array([4; 32]);
673
674        // this slot/key is going to get purged when the th_purge thread calls add_root()
675        status_cache
676            .write()
677            .unwrap()
678            .insert(&blockhash1, key1, 0, ());
679
680        let th_purge = shuttle::thread::spawn({
681            let status_cache = status_cache.clone();
682            move || {
683                status_cache
684                    .write()
685                    .unwrap()
686                    .add_root(MAX_CACHE_ENTRIES as Slot + 1);
687            }
688        });
689
690        let th_insert = shuttle::thread::spawn({
691            let status_cache = status_cache.clone();
692            move || {
693                // insert an entry for a blockhash that gets concurrently purged
694                status_cache.write().unwrap().insert(
695                    &blockhash1,
696                    key2,
697                    MAX_CACHE_ENTRIES as Slot + 2,
698                    (),
699                );
700            }
701        });
702        th_purge.join().unwrap();
703        th_insert.join().unwrap();
704
705        let mut ancestors2 = Ancestors::default();
706        ancestors2.insert(MAX_CACHE_ENTRIES as Slot + 2, 0);
707
708        assert!(status_cache
709            .read()
710            .unwrap()
711            .get_status(key1, &blockhash1, &ancestors2)
712            .is_none());
713        assert!(status_cache
714            .read()
715            .unwrap()
716            .get_status(key2, &blockhash1, &ancestors2)
717            .is_some());
718    }
719
720    #[test]
721    fn test_shuttle_purge_nonce_overlap_random() {
722        shuttle::check_random(do_test_shuttle_purge_nonce_overlap, PURGE_RANDOM_ITERATIONS);
723    }
724
725    #[test]
726    fn test_shuttle_purge_nonce_overlap_dfs() {
727        shuttle::check_dfs(do_test_shuttle_purge_nonce_overlap, PURGE_DFS_ITERATIONS);
728    }
729
730    fn do_test_shuttle_concurrent_inserts() {
731        let status_cache = Arc::new(BankStatusCache::default());
732        let blockhash1 = Hash::new_from_array([42; 32]);
733        let blockhash2 = Hash::new_from_array([43; 32]);
734        const N_INSERTS: u8 = 50;
735
736        let mut handles = Vec::with_capacity(N_INSERTS as usize);
737        for i in 0..N_INSERTS {
738            let status_cache = status_cache.clone();
739            let slot = (i % 3) + 1;
740            let bh = if i % 2 == 0 { blockhash1 } else { blockhash2 };
741            handles.push(shuttle::thread::spawn(move || {
742                let key = Hash::new_from_array([i; 32]);
743                status_cache
744                    .write()
745                    .unwrap()
746                    .insert(&bh, key, slot as Slot, ());
747            }));
748        }
749
750        for handle in handles {
751            handle.join().unwrap();
752        }
753
754        let mut ancestors = Ancestors::default();
755        ancestors.insert(1, 0);
756        ancestors.insert(2, 0);
757        ancestors.insert(3, 0);
758
759        // verify all 100 inserts are visible
760        for i in 0..N_INSERTS {
761            let key = Hash::new_from_array([i; 32]);
762            let bh = if i % 2 == 0 { blockhash1 } else { blockhash2 };
763            assert!(
764                status_cache
765                    .read()
766                    .unwrap()
767                    .get_status(key, &bh, &ancestors)
768                    .is_some(),
769                "missing key {}",
770                i
771            );
772        }
773    }
774
775    #[test]
776    fn test_shuttle_concurrent_inserts_dfs() {
777        shuttle::check_dfs(do_test_shuttle_concurrent_inserts, INSERT_DFS_ITERATIONS);
778    }
779
780    #[test]
781    fn test_shuttle_concurrent_inserts_random() {
782        shuttle::check_random(do_test_shuttle_concurrent_inserts, INSERT_RANDOM_ITERATIONS);
783    }
784}