solana_ledger/
blockstore.rs

1//! The `blockstore` module provides functions for parallel verification of the
2//! Proof of History ledger as well as iterative read, append write, and random
3//! access read to a persistent file-based ledger.
4
5use {
6    crate::{
7        ancestor_iterator::AncestorIterator,
8        blockstore::column::{columns as cf, Column, ColumnIndexDeprecation},
9        blockstore_db::{IteratorDirection, IteratorMode, LedgerColumn, Rocks, WriteBatch},
10        blockstore_meta::*,
11        blockstore_metrics::BlockstoreRpcApiMetrics,
12        blockstore_options::{
13            BlockstoreOptions, LedgerColumnOptions, BLOCKSTORE_DIRECTORY_ROCKS_LEVEL,
14        },
15        blockstore_processor::BlockstoreProcessorError,
16        leader_schedule_cache::LeaderScheduleCache,
17        next_slots_iterator::NextSlotsIterator,
18        shred::{
19            self, max_ticks_per_n_shreds, ErasureSetId, ProcessShredsStats, ReedSolomonCache,
20            Shred, ShredData, ShredId, ShredType, Shredder, DATA_SHREDS_PER_FEC_BLOCK,
21        },
22        slot_stats::{ShredSource, SlotsStats},
23        transaction_address_lookup_table_scanner::scan_transaction,
24    },
25    assert_matches::debug_assert_matches,
26    bincode::{deserialize, serialize},
27    crossbeam_channel::{bounded, Receiver, Sender, TrySendError},
28    dashmap::DashSet,
29    itertools::Itertools,
30    log::*,
31    rand::Rng,
32    rayon::iter::{IntoParallelIterator, ParallelIterator},
33    rocksdb::{DBRawIterator, LiveFile},
34    solana_accounts_db::hardened_unpack::unpack_genesis_archive,
35    solana_entry::entry::{create_ticks, Entry},
36    solana_measure::measure::Measure,
37    solana_metrics::{
38        datapoint_error,
39        poh_timing_point::{send_poh_timing_point, PohTimingSender, SlotPohTimingInfo},
40    },
41    solana_runtime::bank::Bank,
42    solana_sdk::{
43        account::ReadableAccount,
44        address_lookup_table::state::AddressLookupTable,
45        clock::{Slot, UnixTimestamp, DEFAULT_TICKS_PER_SECOND},
46        feature_set::FeatureSet,
47        genesis_config::{GenesisConfig, DEFAULT_GENESIS_ARCHIVE, DEFAULT_GENESIS_FILE},
48        hash::Hash,
49        pubkey::Pubkey,
50        signature::{Keypair, Signature, Signer},
51        timing::timestamp,
52        transaction::{SanitizedVersionedTransaction, VersionedTransaction},
53    },
54    solana_storage_proto::{StoredExtendedRewards, StoredTransactionStatusMeta},
55    solana_transaction_status::{
56        ConfirmedTransactionStatusWithSignature, ConfirmedTransactionWithStatusMeta, Rewards,
57        RewardsAndNumPartitions, TransactionStatusMeta, TransactionWithStatusMeta,
58        VersionedConfirmedBlock, VersionedConfirmedBlockWithEntries,
59        VersionedTransactionWithStatusMeta,
60    },
61    std::{
62        borrow::Cow,
63        cell::RefCell,
64        cmp,
65        collections::{
66            btree_map::Entry as BTreeMapEntry, hash_map::Entry as HashMapEntry, BTreeMap, BTreeSet,
67            HashMap, HashSet, VecDeque,
68        },
69        convert::TryInto,
70        fmt::Write,
71        fs::{self, File},
72        io::{Error as IoError, ErrorKind},
73        ops::{Bound, Range},
74        path::{Path, PathBuf},
75        rc::Rc,
76        sync::{
77            atomic::{AtomicBool, AtomicU64, Ordering},
78            Arc, Mutex, RwLock,
79        },
80    },
81    tar,
82    tempfile::{Builder, TempDir},
83    thiserror::Error,
84    trees::{Tree, TreeWalk},
85};
86pub mod blockstore_purge;
87pub mod column;
88pub mod error;
89#[cfg(test)]
90use static_assertions::const_assert_eq;
91pub use {
92    crate::{
93        blockstore::error::{BlockstoreError, Result},
94        blockstore_db::{default_num_compaction_threads, default_num_flush_threads},
95        blockstore_meta::{OptimisticSlotMetaVersioned, SlotMeta},
96        blockstore_metrics::BlockstoreInsertionMetrics,
97    },
98    blockstore_purge::PurgeType,
99    rocksdb::properties as RocksProperties,
100};
101
102pub const MAX_REPLAY_WAKE_UP_SIGNALS: usize = 1;
103pub const MAX_COMPLETED_SLOTS_IN_CHANNEL: usize = 100_000;
104
105// An upper bound on maximum number of data shreds we can handle in a slot
106// 32K shreds would allow ~320K peak TPS
107// (32K shreds per slot * 4 TX per shred * 2.5 slots per sec)
108pub const MAX_DATA_SHREDS_PER_SLOT: usize = 32_768;
109
110pub type CompletedSlotsSender = Sender<Vec<Slot>>;
111pub type CompletedSlotsReceiver = Receiver<Vec<Slot>>;
112
113// Contiguous, sorted and non-empty ranges of shred indices:
114//     completed_ranges[i].start < completed_ranges[i].end
115//     completed_ranges[i].end  == completed_ranges[i + 1].start
116// The ranges represent data shred indices that can reconstruct a Vec<Entry>.
117// In particular, the data shred at index
118//     completed_ranges[i].end - 1
119// has DATA_COMPLETE_SHRED flag.
120type CompletedRanges = Vec<Range<u32>>;
121
122#[derive(Default)]
123pub struct SignatureInfosForAddress {
124    pub infos: Vec<ConfirmedTransactionStatusWithSignature>,
125    pub found_before: bool,
126}
127
128#[derive(Error, Debug)]
129enum InsertDataShredError {
130    #[error("Data shred already exists in Blockstore")]
131    Exists,
132    #[error("Invalid data shred")]
133    InvalidShred,
134    #[error(transparent)]
135    BlockstoreError(#[from] BlockstoreError),
136}
137
138#[derive(Eq, PartialEq, Debug, Clone)]
139pub enum PossibleDuplicateShred {
140    Exists(Shred), // Blockstore has another shred in its spot
141    // The index of this shred conflicts with `slot_meta.last_index`
142    LastIndexConflict(
143        Shred,          // original
144        shred::Payload, // conflict
145    ),
146    // The coding shred has a conflict in the erasure_meta
147    ErasureConflict(
148        Shred,          // original
149        shred::Payload, // conflict
150    ),
151    // Merkle root conflict in the same fec set
152    MerkleRootConflict(
153        Shred,          // original
154        shred::Payload, // conflict
155    ),
156    // Merkle root chaining conflict with previous fec set
157    ChainedMerkleRootConflict(
158        Shred,          // original
159        shred::Payload, // conflict
160    ),
161}
162
163impl PossibleDuplicateShred {
164    pub fn slot(&self) -> Slot {
165        match self {
166            Self::Exists(shred) => shred.slot(),
167            Self::LastIndexConflict(shred, _) => shred.slot(),
168            Self::ErasureConflict(shred, _) => shred.slot(),
169            Self::MerkleRootConflict(shred, _) => shred.slot(),
170            Self::ChainedMerkleRootConflict(shred, _) => shred.slot(),
171        }
172    }
173}
174
175enum WorkingEntry<T> {
176    Dirty(T), // Value has been modified with respect to the blockstore column
177    Clean(T), // Value matches what is currently in the blockstore column
178}
179
180impl<T> WorkingEntry<T> {
181    fn should_write(&self) -> bool {
182        matches!(self, Self::Dirty(_))
183    }
184}
185
186impl<T> AsRef<T> for WorkingEntry<T> {
187    fn as_ref(&self) -> &T {
188        match self {
189            Self::Dirty(value) => value,
190            Self::Clean(value) => value,
191        }
192    }
193}
194
195#[derive(Clone, Copy, PartialEq, Eq, Debug)]
196pub struct LastFECSetCheckResults {
197    last_fec_set_merkle_root: Option<Hash>,
198    is_retransmitter_signed: bool,
199}
200
201impl LastFECSetCheckResults {
202    fn get_last_fec_set_merkle_root(
203        &self,
204        feature_set: &FeatureSet,
205    ) -> std::result::Result<Option<Hash>, BlockstoreProcessorError> {
206        if feature_set.is_active(&solana_sdk::feature_set::vote_only_full_fec_sets::id())
207            && self.last_fec_set_merkle_root.is_none()
208        {
209            return Err(BlockstoreProcessorError::IncompleteFinalFecSet);
210        } else if feature_set
211            .is_active(&solana_sdk::feature_set::vote_only_retransmitter_signed_fec_sets::id())
212            && !self.is_retransmitter_signed
213        {
214            return Err(BlockstoreProcessorError::InvalidRetransmitterSignatureFinalFecSet);
215        }
216        Ok(self.last_fec_set_merkle_root)
217    }
218}
219
220pub struct InsertResults {
221    completed_data_set_infos: Vec<CompletedDataSetInfo>,
222    duplicate_shreds: Vec<PossibleDuplicateShred>,
223}
224
225/// A "complete data set" is a range of [`Shred`]s that combined in sequence carry a single
226/// serialized [`Vec<Entry>`].
227///
228/// Services such as the `WindowService` for a TVU, and `ReplayStage` for a TPU, piece together
229/// these sets by inserting shreds via direct or indirect calls to
230/// [`Blockstore::insert_shreds_handle_duplicate()`].
231///
232/// `solana_core::completed_data_sets_service::CompletedDataSetsService` is the main receiver of
233/// `CompletedDataSetInfo`.
234#[derive(Clone, Debug, Eq, PartialEq)]
235pub struct CompletedDataSetInfo {
236    /// [`Slot`] to which the [`Shred`]s in this set belong.
237    pub slot: Slot,
238    /// Data [`Shred`]s' indices in this set.
239    pub indices: Range<u32>,
240}
241
242pub struct BlockstoreSignals {
243    pub blockstore: Blockstore,
244    pub ledger_signal_receiver: Receiver<bool>,
245    pub completed_slots_receiver: CompletedSlotsReceiver,
246}
247
248// ledger window
249pub struct Blockstore {
250    ledger_path: PathBuf,
251    db: Arc<Rocks>,
252    // Column families
253    address_signatures_cf: LedgerColumn<cf::AddressSignatures>,
254    bank_hash_cf: LedgerColumn<cf::BankHash>,
255    block_height_cf: LedgerColumn<cf::BlockHeight>,
256    blocktime_cf: LedgerColumn<cf::Blocktime>,
257    code_shred_cf: LedgerColumn<cf::ShredCode>,
258    data_shred_cf: LedgerColumn<cf::ShredData>,
259    dead_slots_cf: LedgerColumn<cf::DeadSlots>,
260    duplicate_slots_cf: LedgerColumn<cf::DuplicateSlots>,
261    erasure_meta_cf: LedgerColumn<cf::ErasureMeta>,
262    index_cf: LedgerColumn<cf::Index>,
263    merkle_root_meta_cf: LedgerColumn<cf::MerkleRootMeta>,
264    meta_cf: LedgerColumn<cf::SlotMeta>,
265    optimistic_slots_cf: LedgerColumn<cf::OptimisticSlots>,
266    orphans_cf: LedgerColumn<cf::Orphans>,
267    perf_samples_cf: LedgerColumn<cf::PerfSamples>,
268    program_costs_cf: LedgerColumn<cf::ProgramCosts>,
269    rewards_cf: LedgerColumn<cf::Rewards>,
270    roots_cf: LedgerColumn<cf::Root>,
271    transaction_memos_cf: LedgerColumn<cf::TransactionMemos>,
272    transaction_status_cf: LedgerColumn<cf::TransactionStatus>,
273    transaction_status_index_cf: LedgerColumn<cf::TransactionStatusIndex>,
274
275    highest_primary_index_slot: RwLock<Option<Slot>>,
276    max_root: AtomicU64,
277    insert_shreds_lock: Mutex<()>,
278    new_shreds_signals: Mutex<Vec<Sender<bool>>>,
279    completed_slots_senders: Mutex<Vec<CompletedSlotsSender>>,
280    pub shred_timing_point_sender: Option<PohTimingSender>,
281    pub lowest_cleanup_slot: RwLock<Slot>,
282    pub slots_stats: SlotsStats,
283    rpc_api_metrics: BlockstoreRpcApiMetrics,
284}
285
286pub struct IndexMetaWorkingSetEntry {
287    index: Index,
288    // true only if at least one shred for this Index was inserted since the time this
289    // struct was created
290    did_insert_occur: bool,
291}
292
293/// The in-memory data structure for updating entries in the column family
294/// [`cf::SlotMeta`].
295pub struct SlotMetaWorkingSetEntry {
296    /// The dirty version of the `SlotMeta` which might not be persisted
297    /// to the blockstore yet.
298    new_slot_meta: Rc<RefCell<SlotMeta>>,
299    /// The latest version of the `SlotMeta` that was persisted in the
300    /// blockstore.  If None, it means the current slot is new to the
301    /// blockstore.
302    old_slot_meta: Option<SlotMeta>,
303    /// True only if at least one shred for this SlotMeta was inserted since
304    /// this struct was created.
305    did_insert_occur: bool,
306}
307
308struct ShredInsertionTracker {
309    // Map which contains data shreds that have just been inserted.
310    just_inserted_shreds: HashMap<ShredId, Shred>,
311    // In-memory map that maintains the dirty copy of the erasure meta.  It will
312    // later be written to `cf::ErasureMeta`
313    erasure_metas: BTreeMap<ErasureSetId, WorkingEntry<ErasureMeta>>,
314    // In-memory map that maintains the dirty copy of the merkle root meta. It
315    // will later be written to `cf::MerkleRootMeta`
316    merkle_root_metas: HashMap<ErasureSetId, WorkingEntry<MerkleRootMeta>>,
317    // In-memory map that maintains the dirty copy of the index meta.  It will
318    // later be written to `cf::SlotMeta`
319    slot_meta_working_set: HashMap<u64, SlotMetaWorkingSetEntry>,
320    // In-memory map that maintains the dirty copy of the index meta.  It will
321    // later be written to `cf::Index`
322    index_working_set: HashMap<u64, IndexMetaWorkingSetEntry>,
323    duplicate_shreds: Vec<PossibleDuplicateShred>,
324    // Collection of the current blockstore writes which will be committed
325    // atomically.
326    write_batch: WriteBatch,
327    // Time spent on loading or creating the index meta entry from the db
328    index_meta_time_us: u64,
329    // Collection of recently completed data sets (data portion of erasure batch)
330    newly_completed_data_sets: Vec<CompletedDataSetInfo>,
331}
332
333impl ShredInsertionTracker {
334    fn new(shred_num: usize, write_batch: WriteBatch) -> Self {
335        Self {
336            just_inserted_shreds: HashMap::with_capacity(shred_num),
337            erasure_metas: BTreeMap::new(),
338            merkle_root_metas: HashMap::new(),
339            slot_meta_working_set: HashMap::new(),
340            index_working_set: HashMap::new(),
341            duplicate_shreds: vec![],
342            write_batch,
343            index_meta_time_us: 0,
344            newly_completed_data_sets: vec![],
345        }
346    }
347}
348
349impl SlotMetaWorkingSetEntry {
350    /// Construct a new SlotMetaWorkingSetEntry with the specified `new_slot_meta`
351    /// and `old_slot_meta`.  `did_insert_occur` is set to false.
352    fn new(new_slot_meta: Rc<RefCell<SlotMeta>>, old_slot_meta: Option<SlotMeta>) -> Self {
353        Self {
354            new_slot_meta,
355            old_slot_meta,
356            did_insert_occur: false,
357        }
358    }
359}
360
361pub fn banking_trace_path(path: &Path) -> PathBuf {
362    path.join("banking_trace")
363}
364
365pub fn banking_retrace_path(path: &Path) -> PathBuf {
366    path.join("banking_retrace")
367}
368
369impl Blockstore {
370    pub fn ledger_path(&self) -> &PathBuf {
371        &self.ledger_path
372    }
373
374    pub fn banking_trace_path(&self) -> PathBuf {
375        banking_trace_path(&self.ledger_path)
376    }
377
378    pub fn banking_retracer_path(&self) -> PathBuf {
379        banking_retrace_path(&self.ledger_path)
380    }
381
382    /// Opens a Ledger in directory, provides "infinite" window of shreds
383    pub fn open(ledger_path: &Path) -> Result<Blockstore> {
384        Self::do_open(ledger_path, BlockstoreOptions::default())
385    }
386
387    pub fn open_with_options(ledger_path: &Path, options: BlockstoreOptions) -> Result<Blockstore> {
388        Self::do_open(ledger_path, options)
389    }
390
391    fn do_open(ledger_path: &Path, options: BlockstoreOptions) -> Result<Blockstore> {
392        fs::create_dir_all(ledger_path)?;
393        let blockstore_path = ledger_path.join(BLOCKSTORE_DIRECTORY_ROCKS_LEVEL);
394
395        adjust_ulimit_nofile(options.enforce_ulimit_nofile)?;
396
397        // Open the database
398        let mut measure = Measure::start("blockstore open");
399        info!("Opening blockstore at {:?}", blockstore_path);
400        let db = Arc::new(Rocks::open(blockstore_path, options)?);
401
402        let address_signatures_cf = db.column();
403        let bank_hash_cf = db.column();
404        let block_height_cf = db.column();
405        let blocktime_cf = db.column();
406        let code_shred_cf = db.column();
407        let data_shred_cf = db.column();
408        let dead_slots_cf = db.column();
409        let duplicate_slots_cf = db.column();
410        let erasure_meta_cf = db.column();
411        let index_cf = db.column();
412        let merkle_root_meta_cf = db.column();
413        let meta_cf = db.column();
414        let optimistic_slots_cf = db.column();
415        let orphans_cf = db.column();
416        let perf_samples_cf = db.column();
417        let program_costs_cf = db.column();
418        let rewards_cf = db.column();
419        let roots_cf = db.column();
420        let transaction_memos_cf = db.column();
421        let transaction_status_cf = db.column();
422        let transaction_status_index_cf = db.column();
423
424        // Get max root or 0 if it doesn't exist
425        let max_root = roots_cf
426            .iter(IteratorMode::End)?
427            .next()
428            .map(|(slot, _)| slot)
429            .unwrap_or(0);
430        let max_root = AtomicU64::new(max_root);
431
432        measure.stop();
433        info!("Opening blockstore done; {measure}");
434        let blockstore = Blockstore {
435            ledger_path: ledger_path.to_path_buf(),
436            db,
437            address_signatures_cf,
438            bank_hash_cf,
439            block_height_cf,
440            blocktime_cf,
441            code_shred_cf,
442            data_shred_cf,
443            dead_slots_cf,
444            duplicate_slots_cf,
445            erasure_meta_cf,
446            index_cf,
447            merkle_root_meta_cf,
448            meta_cf,
449            optimistic_slots_cf,
450            orphans_cf,
451            perf_samples_cf,
452            program_costs_cf,
453            rewards_cf,
454            roots_cf,
455            transaction_memos_cf,
456            transaction_status_cf,
457            transaction_status_index_cf,
458            highest_primary_index_slot: RwLock::<Option<Slot>>::default(),
459            new_shreds_signals: Mutex::default(),
460            completed_slots_senders: Mutex::default(),
461            shred_timing_point_sender: None,
462            insert_shreds_lock: Mutex::<()>::default(),
463            max_root,
464            lowest_cleanup_slot: RwLock::<Slot>::default(),
465            slots_stats: SlotsStats::default(),
466            rpc_api_metrics: BlockstoreRpcApiMetrics::default(),
467        };
468        blockstore.cleanup_old_entries()?;
469        blockstore.update_highest_primary_index_slot()?;
470
471        Ok(blockstore)
472    }
473
474    pub fn open_with_signal(
475        ledger_path: &Path,
476        options: BlockstoreOptions,
477    ) -> Result<BlockstoreSignals> {
478        let blockstore = Self::open_with_options(ledger_path, options)?;
479        let (ledger_signal_sender, ledger_signal_receiver) = bounded(MAX_REPLAY_WAKE_UP_SIGNALS);
480        let (completed_slots_sender, completed_slots_receiver) =
481            bounded(MAX_COMPLETED_SLOTS_IN_CHANNEL);
482
483        blockstore.add_new_shred_signal(ledger_signal_sender);
484        blockstore.add_completed_slots_signal(completed_slots_sender);
485
486        Ok(BlockstoreSignals {
487            blockstore,
488            ledger_signal_receiver,
489            completed_slots_receiver,
490        })
491    }
492
493    pub fn add_tree(
494        &self,
495        forks: Tree<Slot>,
496        is_orphan: bool,
497        is_slot_complete: bool,
498        num_ticks: u64,
499        starting_hash: Hash,
500    ) {
501        let mut walk = TreeWalk::from(forks);
502        let mut blockhashes = HashMap::new();
503        while let Some(visit) = walk.get() {
504            let slot = *visit.node().data();
505            if self.meta(slot).unwrap().is_some() && self.orphan(slot).unwrap().is_none() {
506                // If slot exists in blockstore and is not an orphan, then skip it
507                walk.forward();
508                continue;
509            }
510            let parent = walk.get_parent().map(|n| *n.data());
511            if parent.is_some() || !is_orphan {
512                let parent_hash = parent
513                    // parent won't exist for first node in a tree where
514                    // `is_orphan == true`
515                    .and_then(|parent| blockhashes.get(&parent))
516                    .unwrap_or(&starting_hash);
517                let mut entries = create_ticks(
518                    num_ticks * (std::cmp::max(1, slot - parent.unwrap_or(slot))),
519                    0,
520                    *parent_hash,
521                );
522                blockhashes.insert(slot, entries.last().unwrap().hash);
523                if !is_slot_complete {
524                    entries.pop().unwrap();
525                }
526                let shreds = entries_to_test_shreds(
527                    &entries,
528                    slot,
529                    parent.unwrap_or(slot),
530                    is_slot_complete,
531                    0,
532                    true, // merkle_variant
533                );
534                self.insert_shreds(shreds, None, false).unwrap();
535            }
536            walk.forward();
537        }
538    }
539
540    /// Deletes the blockstore at the specified path.
541    ///
542    /// Note that if the `ledger_path` has multiple rocksdb instances, this
543    /// function will destroy all.
544    pub fn destroy(ledger_path: &Path) -> Result<()> {
545        // Database::destroy() fails if the root directory doesn't exist
546        fs::create_dir_all(ledger_path)?;
547        Rocks::destroy(&Path::new(ledger_path).join(BLOCKSTORE_DIRECTORY_ROCKS_LEVEL))
548    }
549
550    /// Returns the SlotMeta of the specified slot.
551    pub fn meta(&self, slot: Slot) -> Result<Option<SlotMeta>> {
552        self.meta_cf.get(slot)
553    }
554
555    /// Returns true if the specified slot is full.
556    pub fn is_full(&self, slot: Slot) -> bool {
557        if let Ok(Some(meta)) = self.meta_cf.get(slot) {
558            return meta.is_full();
559        }
560        false
561    }
562
563    fn erasure_meta(&self, erasure_set: ErasureSetId) -> Result<Option<ErasureMeta>> {
564        let (slot, fec_set_index) = erasure_set.store_key();
565        self.erasure_meta_cf.get((slot, u64::from(fec_set_index)))
566    }
567
568    #[cfg(test)]
569    fn put_erasure_meta(
570        &self,
571        erasure_set: ErasureSetId,
572        erasure_meta: &ErasureMeta,
573    ) -> Result<()> {
574        let (slot, fec_set_index) = erasure_set.store_key();
575        self.erasure_meta_cf.put_bytes(
576            (slot, u64::from(fec_set_index)),
577            &bincode::serialize(erasure_meta).unwrap(),
578        )
579    }
580
581    /// Attempts to find the previous consecutive erasure set for `erasure_set`.
582    ///
583    /// Checks the map `erasure_metas`, if not present scans blockstore. Returns None
584    /// if the previous consecutive erasure set is not present in either.
585    fn previous_erasure_set<'a>(
586        &'a self,
587        erasure_set: ErasureSetId,
588        erasure_metas: &'a BTreeMap<ErasureSetId, WorkingEntry<ErasureMeta>>,
589    ) -> Result<Option<(ErasureSetId, Cow<'a, ErasureMeta>)>> {
590        let (slot, fec_set_index) = erasure_set.store_key();
591
592        // Check the previous entry from the in memory map to see if it is the consecutive
593        // set to `erasure set`
594        let candidate_erasure_entry = erasure_metas
595            .range((
596                Bound::Included(ErasureSetId::new(slot, 0)),
597                Bound::Excluded(erasure_set),
598            ))
599            .next_back();
600        let candidate_erasure_set_and_meta = candidate_erasure_entry
601            .filter(|(_, candidate_erasure_meta)| {
602                candidate_erasure_meta.as_ref().next_fec_set_index() == Some(fec_set_index)
603            })
604            .map(|(erasure_set, erasure_meta)| {
605                (*erasure_set, Cow::Borrowed(erasure_meta.as_ref()))
606            });
607        if candidate_erasure_set_and_meta.is_some() {
608            return Ok(candidate_erasure_set_and_meta);
609        }
610
611        // Consecutive set was not found in memory, scan blockstore for a potential candidate
612        let Some(((_, candidate_fec_set_index), candidate_erasure_meta)) = self
613            .erasure_meta_cf
614            .iter(IteratorMode::From(
615                (slot, u64::from(fec_set_index)),
616                IteratorDirection::Reverse,
617            ))?
618            // `find` here, to skip the first element in case the erasure meta for fec_set_index is already present
619            .find(|((_, candidate_fec_set_index), _)| {
620                *candidate_fec_set_index != u64::from(fec_set_index)
621            })
622            // Do not consider sets from the previous slot
623            .filter(|((candidate_slot, _), _)| *candidate_slot == slot)
624        else {
625            // No potential candidates
626            return Ok(None);
627        };
628        let candidate_fec_set_index = u32::try_from(candidate_fec_set_index)
629            .expect("fec_set_index from a previously inserted shred should fit in u32");
630        let candidate_erasure_set = ErasureSetId::new(slot, candidate_fec_set_index);
631        let candidate_erasure_meta: ErasureMeta = deserialize(candidate_erasure_meta.as_ref())?;
632
633        // Check if this is actually the consecutive erasure set
634        let Some(next_fec_set_index) = candidate_erasure_meta.next_fec_set_index() else {
635            return Err(BlockstoreError::InvalidErasureConfig);
636        };
637        if next_fec_set_index == fec_set_index {
638            return Ok(Some((
639                candidate_erasure_set,
640                Cow::Owned(candidate_erasure_meta),
641            )));
642        }
643        Ok(None)
644    }
645
646    fn merkle_root_meta(&self, erasure_set: ErasureSetId) -> Result<Option<MerkleRootMeta>> {
647        self.merkle_root_meta_cf.get(erasure_set.store_key())
648    }
649
650    /// Check whether the specified slot is an orphan slot which does not
651    /// have a parent slot.
652    ///
653    /// Returns true if the specified slot does not have a parent slot.
654    /// For other return values, it means either the slot is not in the
655    /// blockstore or the slot isn't an orphan slot.
656    pub fn orphan(&self, slot: Slot) -> Result<Option<bool>> {
657        self.orphans_cf.get(slot)
658    }
659
660    pub fn slot_meta_iterator(
661        &self,
662        slot: Slot,
663    ) -> Result<impl Iterator<Item = (Slot, SlotMeta)> + '_> {
664        let meta_iter = self
665            .meta_cf
666            .iter(IteratorMode::From(slot, IteratorDirection::Forward))?;
667        Ok(meta_iter.map(|(slot, slot_meta_bytes)| {
668            (
669                slot,
670                deserialize(&slot_meta_bytes).unwrap_or_else(|e| {
671                    panic!("Could not deserialize SlotMeta for slot {slot}: {e:?}")
672                }),
673            )
674        }))
675    }
676
677    pub fn live_slots_iterator(&self, root: Slot) -> impl Iterator<Item = (Slot, SlotMeta)> + '_ {
678        let root_forks = NextSlotsIterator::new(root, self);
679
680        let orphans_iter = self.orphans_iterator(root + 1).unwrap();
681        root_forks.chain(orphans_iter.flat_map(move |orphan| NextSlotsIterator::new(orphan, self)))
682    }
683
684    pub fn live_files_metadata(&self) -> Result<Vec<LiveFile>> {
685        self.db.live_files_metadata()
686    }
687
688    #[cfg(feature = "dev-context-only-utils")]
689    #[allow(clippy::type_complexity)]
690    pub fn iterator_cf(
691        &self,
692        cf_name: &str,
693    ) -> Result<impl Iterator<Item = (Box<[u8]>, Box<[u8]>)> + '_> {
694        let cf = self.db.cf_handle(cf_name);
695        let iterator = self.db.iterator_cf(cf, rocksdb::IteratorMode::Start);
696        Ok(iterator.map(|pair| pair.unwrap()))
697    }
698
699    #[allow(clippy::type_complexity)]
700    pub fn slot_data_iterator(
701        &self,
702        slot: Slot,
703        index: u64,
704    ) -> Result<impl Iterator<Item = ((u64, u64), Box<[u8]>)> + '_> {
705        let slot_iterator = self.data_shred_cf.iter(IteratorMode::From(
706            (slot, index),
707            IteratorDirection::Forward,
708        ))?;
709        Ok(slot_iterator.take_while(move |((shred_slot, _), _)| *shred_slot == slot))
710    }
711
712    #[allow(clippy::type_complexity)]
713    pub fn slot_coding_iterator(
714        &self,
715        slot: Slot,
716        index: u64,
717    ) -> Result<impl Iterator<Item = ((u64, u64), Box<[u8]>)> + '_> {
718        let slot_iterator = self.code_shred_cf.iter(IteratorMode::From(
719            (slot, index),
720            IteratorDirection::Forward,
721        ))?;
722        Ok(slot_iterator.take_while(move |((shred_slot, _), _)| *shred_slot == slot))
723    }
724
725    fn prepare_rooted_slot_iterator(
726        &self,
727        slot: Slot,
728        direction: IteratorDirection,
729    ) -> Result<impl Iterator<Item = Slot> + '_> {
730        let slot_iterator = self.roots_cf.iter(IteratorMode::From(slot, direction))?;
731        Ok(slot_iterator.map(move |(rooted_slot, _)| rooted_slot))
732    }
733
734    pub fn rooted_slot_iterator(&self, slot: Slot) -> Result<impl Iterator<Item = Slot> + '_> {
735        self.prepare_rooted_slot_iterator(slot, IteratorDirection::Forward)
736    }
737
738    pub fn reversed_rooted_slot_iterator(
739        &self,
740        slot: Slot,
741    ) -> Result<impl Iterator<Item = Slot> + '_> {
742        self.prepare_rooted_slot_iterator(slot, IteratorDirection::Reverse)
743    }
744
745    pub fn reversed_optimistic_slots_iterator(
746        &self,
747    ) -> Result<impl Iterator<Item = (Slot, Hash, UnixTimestamp)> + '_> {
748        let iter = self.optimistic_slots_cf.iter(IteratorMode::End)?;
749        Ok(iter.map(|(slot, bytes)| {
750            let meta: OptimisticSlotMetaVersioned = deserialize(&bytes).unwrap();
751            (slot, meta.hash(), meta.timestamp())
752        }))
753    }
754
755    /// Determines if we can iterate from `starting_slot` to >= `ending_slot` by full slots
756    /// `starting_slot` is excluded from the `is_full()` check
757    pub fn slot_range_connected(&self, starting_slot: Slot, ending_slot: Slot) -> bool {
758        if starting_slot == ending_slot {
759            return true;
760        }
761
762        let mut next_slots: VecDeque<_> = match self.meta(starting_slot) {
763            Ok(Some(starting_slot_meta)) => starting_slot_meta.next_slots.into(),
764            _ => return false,
765        };
766        while let Some(slot) = next_slots.pop_front() {
767            if let Ok(Some(slot_meta)) = self.meta(slot) {
768                if slot_meta.is_full() {
769                    match slot.cmp(&ending_slot) {
770                        cmp::Ordering::Less => next_slots.extend(slot_meta.next_slots),
771                        _ => return true,
772                    }
773                }
774            }
775        }
776
777        false
778    }
779
780    fn get_recovery_data_shreds<'a>(
781        &'a self,
782        index: &'a Index,
783        erasure_meta: &'a ErasureMeta,
784        prev_inserted_shreds: &'a HashMap<ShredId, Shred>,
785    ) -> impl Iterator<Item = Shred> + 'a {
786        let slot = index.slot;
787        erasure_meta.data_shreds_indices().filter_map(move |i| {
788            let key = ShredId::new(slot, u32::try_from(i).unwrap(), ShredType::Data);
789            if let Some(shred) = prev_inserted_shreds.get(&key) {
790                return Some(shred.clone());
791            }
792            if !index.data().contains(i) {
793                return None;
794            }
795            match self.data_shred_cf.get_bytes((slot, i)).unwrap() {
796                None => {
797                    error!(
798                        "Unable to read the data shred with slot {slot}, index {i} for shred \
799                         recovery. The shred is marked present in the slot's data shred index, \
800                         but the shred could not be found in the data shred column."
801                    );
802                    None
803                }
804                Some(data) => Shred::new_from_serialized_shred(data).ok(),
805            }
806        })
807    }
808
809    fn get_recovery_coding_shreds<'a>(
810        &'a self,
811        index: &'a Index,
812        erasure_meta: &'a ErasureMeta,
813        prev_inserted_shreds: &'a HashMap<ShredId, Shred>,
814    ) -> impl Iterator<Item = Shred> + 'a {
815        let slot = index.slot;
816        erasure_meta.coding_shreds_indices().filter_map(move |i| {
817            let key = ShredId::new(slot, u32::try_from(i).unwrap(), ShredType::Code);
818            if let Some(shred) = prev_inserted_shreds.get(&key) {
819                return Some(shred.clone());
820            }
821            if !index.coding().contains(i) {
822                return None;
823            }
824            match self.code_shred_cf.get_bytes((slot, i)).unwrap() {
825                None => {
826                    error!(
827                        "Unable to read the coding shred with slot {slot}, index {i} for shred \
828                         recovery. The shred is marked present in the slot's coding shred index, \
829                         but the shred could not be found in the coding shred column."
830                    );
831                    None
832                }
833                Some(code) => Shred::new_from_serialized_shred(code).ok(),
834            }
835        })
836    }
837
838    fn recover_shreds<'a>(
839        &'a self,
840        index: &'a Index,
841        erasure_meta: &'a ErasureMeta,
842        prev_inserted_shreds: &'a HashMap<ShredId, Shred>,
843        reed_solomon_cache: &'a ReedSolomonCache,
844    ) -> std::result::Result<impl Iterator<Item = Shred> + 'a, shred::Error> {
845        // Find shreds for this erasure set and try recovery
846        let data = self.get_recovery_data_shreds(index, erasure_meta, prev_inserted_shreds);
847        let code = self.get_recovery_coding_shreds(index, erasure_meta, prev_inserted_shreds);
848        let shreds = shred::recover(data.chain(code), reed_solomon_cache)?;
849        Ok(shreds.filter_map(std::result::Result::ok))
850    }
851
852    /// Collects and reports [`BlockstoreRocksDbColumnFamilyMetrics`] for the
853    /// all the column families.
854    ///
855    /// [`BlockstoreRocksDbColumnFamilyMetrics`]: crate::blockstore_metrics::BlockstoreRocksDbColumnFamilyMetrics
856    pub fn submit_rocksdb_cf_metrics_for_all_cfs(&self) {
857        self.meta_cf.submit_rocksdb_cf_metrics();
858        self.dead_slots_cf.submit_rocksdb_cf_metrics();
859        self.duplicate_slots_cf.submit_rocksdb_cf_metrics();
860        self.roots_cf.submit_rocksdb_cf_metrics();
861        self.erasure_meta_cf.submit_rocksdb_cf_metrics();
862        self.orphans_cf.submit_rocksdb_cf_metrics();
863        self.index_cf.submit_rocksdb_cf_metrics();
864        self.data_shred_cf.submit_rocksdb_cf_metrics();
865        self.code_shred_cf.submit_rocksdb_cf_metrics();
866        self.transaction_status_cf.submit_rocksdb_cf_metrics();
867        self.address_signatures_cf.submit_rocksdb_cf_metrics();
868        self.transaction_memos_cf.submit_rocksdb_cf_metrics();
869        self.transaction_status_index_cf.submit_rocksdb_cf_metrics();
870        self.rewards_cf.submit_rocksdb_cf_metrics();
871        self.blocktime_cf.submit_rocksdb_cf_metrics();
872        self.perf_samples_cf.submit_rocksdb_cf_metrics();
873        self.block_height_cf.submit_rocksdb_cf_metrics();
874        self.program_costs_cf.submit_rocksdb_cf_metrics();
875        self.bank_hash_cf.submit_rocksdb_cf_metrics();
876        self.optimistic_slots_cf.submit_rocksdb_cf_metrics();
877        self.merkle_root_meta_cf.submit_rocksdb_cf_metrics();
878    }
879
880    /// Report the accumulated RPC API metrics
881    pub(crate) fn report_rpc_api_metrics(&self) {
882        self.rpc_api_metrics.report();
883    }
884
885    /// Attempts to insert shreds into blockstore and updates relevant metrics
886    /// based on the results, split out by shred source (tubine vs. repair).
887    fn attempt_shred_insertion(
888        &self,
889        shreds: impl ExactSizeIterator<Item = (Shred, /*is_repaired:*/ bool)>,
890        is_trusted: bool,
891        leader_schedule: Option<&LeaderScheduleCache>,
892        shred_insertion_tracker: &mut ShredInsertionTracker,
893        metrics: &mut BlockstoreInsertionMetrics,
894    ) {
895        metrics.num_shreds += shreds.len();
896        let mut start = Measure::start("Shred insertion");
897        for (shred, is_repaired) in shreds {
898            let shred_source = if is_repaired {
899                ShredSource::Repaired
900            } else {
901                ShredSource::Turbine
902            };
903            match shred.shred_type() {
904                ShredType::Data => {
905                    match self.check_insert_data_shred(
906                        shred,
907                        shred_insertion_tracker,
908                        is_trusted,
909                        leader_schedule,
910                        shred_source,
911                    ) {
912                        Err(InsertDataShredError::Exists) => {
913                            if is_repaired {
914                                metrics.num_repaired_data_shreds_exists += 1;
915                            } else {
916                                metrics.num_turbine_data_shreds_exists += 1;
917                            }
918                        }
919                        Err(InsertDataShredError::InvalidShred) => {
920                            metrics.num_data_shreds_invalid += 1
921                        }
922                        Err(InsertDataShredError::BlockstoreError(err)) => {
923                            metrics.num_data_shreds_blockstore_error += 1;
924                            error!("blockstore error: {}", err);
925                        }
926                        Ok(()) => {
927                            if is_repaired {
928                                metrics.num_repair += 1;
929                            }
930                            metrics.num_inserted += 1;
931                        }
932                    };
933                }
934                ShredType::Code => {
935                    self.check_insert_coding_shred(
936                        shred,
937                        shred_insertion_tracker,
938                        is_trusted,
939                        shred_source,
940                        metrics,
941                    );
942                }
943            };
944        }
945        start.stop();
946
947        metrics.insert_shreds_elapsed_us += start.as_us();
948    }
949
950    fn try_shred_recovery<'a>(
951        &'a self,
952        erasure_metas: &'a BTreeMap<ErasureSetId, WorkingEntry<ErasureMeta>>,
953        index_working_set: &'a HashMap<u64, IndexMetaWorkingSetEntry>,
954        prev_inserted_shreds: &'a HashMap<ShredId, Shred>,
955        reed_solomon_cache: &'a ReedSolomonCache,
956    ) -> impl Iterator<Item = Shred> + 'a {
957        // Recovery rules:
958        // 1. Only try recovery around indexes for which new data or coding shreds are received
959        // 2. For new data shreds, check if an erasure set exists. If not, don't try recovery
960        // 3. Before trying recovery, check if enough number of shreds have been received
961        // 3a. Enough number of shreds = (#data + #coding shreds) > erasure.num_data
962        erasure_metas
963            .iter()
964            .filter_map(|(erasure_set, working_erasure_meta)| {
965                let erasure_meta = working_erasure_meta.as_ref();
966                let slot = erasure_set.slot();
967                let index_meta_entry = index_working_set.get(&slot).expect("Index");
968                let index = &index_meta_entry.index;
969                erasure_meta
970                    .should_recover_shreds(index)
971                    .then(|| {
972                        self.recover_shreds(
973                            index,
974                            erasure_meta,
975                            prev_inserted_shreds,
976                            reed_solomon_cache,
977                        )
978                    })?
979                    .ok()
980            })
981            .flatten()
982    }
983
984    /// Attempts shred recovery and does the following for recovered data
985    /// shreds:
986    /// 1. Verify signatures
987    /// 2. Insert into blockstore
988    /// 3. Send for retransmit.
989    fn handle_shred_recovery(
990        &self,
991        leader_schedule: Option<&LeaderScheduleCache>,
992        reed_solomon_cache: &ReedSolomonCache,
993        shred_insertion_tracker: &mut ShredInsertionTracker,
994        retransmit_sender: &Sender<Vec<shred::Payload>>,
995        is_trusted: bool,
996        metrics: &mut BlockstoreInsertionMetrics,
997    ) {
998        let mut start = Measure::start("Shred recovery");
999        let mut recovered_shreds = Vec::new();
1000        let recovered_data_shreds: Vec<_> = self
1001            .try_shred_recovery(
1002                &shred_insertion_tracker.erasure_metas,
1003                &shred_insertion_tracker.index_working_set,
1004                &shred_insertion_tracker.just_inserted_shreds,
1005                reed_solomon_cache,
1006            )
1007            .filter_map(|shred| {
1008                // All shreds should be retransmitted, but because there are no
1009                // more missing data shreds in the erasure batch, coding shreds
1010                // are not stored in blockstore.
1011                match shred.shred_type() {
1012                    ShredType::Code => {
1013                        recovered_shreds.push(shred.into_payload());
1014                        None
1015                    }
1016                    ShredType::Data => {
1017                        recovered_shreds.push(shred.payload().clone());
1018                        Some(shred)
1019                    }
1020                }
1021            })
1022            .collect();
1023        if !recovered_shreds.is_empty() {
1024            let _ = retransmit_sender.send(recovered_shreds);
1025        }
1026        metrics.num_recovered += recovered_data_shreds.len();
1027        for shred in recovered_data_shreds {
1028            *match self.check_insert_data_shred(
1029                shred,
1030                shred_insertion_tracker,
1031                is_trusted,
1032                leader_schedule,
1033                ShredSource::Recovered,
1034            ) {
1035                Err(InsertDataShredError::Exists) => &mut metrics.num_recovered_exists,
1036                Err(InsertDataShredError::InvalidShred) => {
1037                    &mut metrics.num_recovered_failed_invalid
1038                }
1039                Err(InsertDataShredError::BlockstoreError(err)) => {
1040                    error!("blockstore error: {err}");
1041                    &mut metrics.num_recovered_blockstore_error
1042                }
1043                Ok(()) => &mut metrics.num_recovered_inserted,
1044            } += 1;
1045        }
1046        start.stop();
1047        metrics.shred_recovery_elapsed_us += start.as_us();
1048    }
1049
1050    fn check_chained_merkle_root_consistency(
1051        &self,
1052        shred_insertion_tracker: &mut ShredInsertionTracker,
1053    ) {
1054        for (erasure_set, working_erasure_meta) in shred_insertion_tracker.erasure_metas.iter() {
1055            if !working_erasure_meta.should_write() {
1056                // Not a new erasure meta
1057                continue;
1058            }
1059            let (slot, _) = erasure_set.store_key();
1060            if self.has_duplicate_shreds_in_slot(slot) {
1061                continue;
1062            }
1063            // First coding shred from this erasure batch, check the forward merkle root chaining
1064            let erasure_meta = working_erasure_meta.as_ref();
1065            let shred_id = ShredId::new(
1066                slot,
1067                erasure_meta
1068                    .first_received_coding_shred_index()
1069                    .expect("First received coding index must fit in u32"),
1070                ShredType::Code,
1071            );
1072            let shred = shred_insertion_tracker
1073                .just_inserted_shreds
1074                .get(&shred_id)
1075                .expect("Erasure meta was just created, initial shred must exist");
1076
1077            self.check_forward_chained_merkle_root_consistency(
1078                shred,
1079                erasure_meta,
1080                &shred_insertion_tracker.just_inserted_shreds,
1081                &shred_insertion_tracker.merkle_root_metas,
1082                &mut shred_insertion_tracker.duplicate_shreds,
1083            );
1084        }
1085
1086        for (erasure_set, working_merkle_root_meta) in
1087            shred_insertion_tracker.merkle_root_metas.iter()
1088        {
1089            if !working_merkle_root_meta.should_write() {
1090                // Not a new merkle root meta
1091                continue;
1092            }
1093            let (slot, _) = erasure_set.store_key();
1094            if self.has_duplicate_shreds_in_slot(slot) {
1095                continue;
1096            }
1097            // First shred from this erasure batch, check the backwards merkle root chaining
1098            let merkle_root_meta = working_merkle_root_meta.as_ref();
1099            let shred_id = ShredId::new(
1100                slot,
1101                merkle_root_meta.first_received_shred_index(),
1102                merkle_root_meta.first_received_shred_type(),
1103            );
1104            let shred = shred_insertion_tracker
1105                .just_inserted_shreds
1106                .get(&shred_id)
1107                .expect("Merkle root meta was just created, initial shred must exist");
1108
1109            self.check_backwards_chained_merkle_root_consistency(
1110                shred,
1111                &shred_insertion_tracker.just_inserted_shreds,
1112                &shred_insertion_tracker.erasure_metas,
1113                &mut shred_insertion_tracker.duplicate_shreds,
1114            );
1115        }
1116    }
1117
1118    fn commit_updates_to_write_batch(
1119        &self,
1120        shred_insertion_tracker: &mut ShredInsertionTracker,
1121        metrics: &mut BlockstoreInsertionMetrics,
1122    ) -> Result<(
1123        /* signal slot updates */ bool,
1124        /* slots updated */ Vec<u64>,
1125    )> {
1126        let mut start = Measure::start("Commit Working Sets");
1127        let (should_signal, newly_completed_slots) = self.commit_slot_meta_working_set(
1128            &shred_insertion_tracker.slot_meta_working_set,
1129            &mut shred_insertion_tracker.write_batch,
1130        )?;
1131
1132        for (erasure_set, working_erasure_meta) in &shred_insertion_tracker.erasure_metas {
1133            if !working_erasure_meta.should_write() {
1134                // No need to rewrite the column
1135                continue;
1136            }
1137            let (slot, fec_set_index) = erasure_set.store_key();
1138            self.erasure_meta_cf.put_in_batch(
1139                &mut shred_insertion_tracker.write_batch,
1140                (slot, u64::from(fec_set_index)),
1141                working_erasure_meta.as_ref(),
1142            )?;
1143        }
1144
1145        for (erasure_set, working_merkle_root_meta) in &shred_insertion_tracker.merkle_root_metas {
1146            if !working_merkle_root_meta.should_write() {
1147                // No need to rewrite the column
1148                continue;
1149            }
1150            self.merkle_root_meta_cf.put_in_batch(
1151                &mut shred_insertion_tracker.write_batch,
1152                erasure_set.store_key(),
1153                working_merkle_root_meta.as_ref(),
1154            )?;
1155        }
1156
1157        for (&slot, index_working_set_entry) in shred_insertion_tracker.index_working_set.iter() {
1158            if index_working_set_entry.did_insert_occur {
1159                self.index_cf.put_in_batch(
1160                    &mut shred_insertion_tracker.write_batch,
1161                    slot,
1162                    &index_working_set_entry.index,
1163                )?;
1164            }
1165        }
1166        start.stop();
1167        metrics.commit_working_sets_elapsed_us += start.as_us();
1168
1169        Ok((should_signal, newly_completed_slots))
1170    }
1171
1172    /// The main helper function that performs the shred insertion logic
1173    /// and updates corresponding meta-data.
1174    ///
1175    /// This function updates the following column families:
1176    ///   - [`cf::DeadSlots`]: mark a shred as "dead" if its meta-data indicates
1177    ///     there is no need to replay this shred.  Specifically when both the
1178    ///     following conditions satisfy,
1179    ///     - We get a new shred N marked as the last shred in the slot S,
1180    ///       but N.index() is less than the current slot_meta.received
1181    ///       for slot S.
1182    ///     - The slot is not currently full
1183    ///       It means there's an alternate version of this slot. See
1184    ///       `check_insert_data_shred` for more details.
1185    ///   - [`cf::ShredData`]: stores data shreds (in check_insert_data_shreds).
1186    ///   - [`cf::ShredCode`]: stores coding shreds (in check_insert_coding_shreds).
1187    ///   - [`cf::SlotMeta`]: the SlotMeta of the input `shreds` and their related
1188    ///     shreds are updated.  Specifically:
1189    ///     - `handle_chaining()` updates `cf::SlotMeta` in two ways.  First, it
1190    ///       updates the in-memory slot_meta_working_set, which will later be
1191    ///       persisted in commit_slot_meta_working_set().  Second, for the newly
1192    ///       chained slots (updated inside handle_chaining_for_slot()), it will
1193    ///       directly persist their slot-meta into `cf::SlotMeta`.
1194    ///     - In `commit_slot_meta_working_set()`, persists everything stored
1195    ///       in the in-memory structure slot_meta_working_set, which is updated
1196    ///       by both `check_insert_data_shred()` and `handle_chaining()`.
1197    ///   - [`cf::Orphans`]: add or remove the ID of a slot to `cf::Orphans`
1198    ///     if it becomes / is no longer an orphan slot in `handle_chaining()`.
1199    ///   - [`cf::ErasureMeta`]: the associated ErasureMeta of the coding and data
1200    ///     shreds inside `shreds` will be updated and committed to
1201    ///     `cf::ErasureMeta`.
1202    ///   - [`cf::MerkleRootMeta`]: the associated MerkleRootMeta of the coding and data
1203    ///     shreds inside `shreds` will be updated and committed to
1204    ///     `cf::MerkleRootMeta`.
1205    ///   - [`cf::Index`]: stores (slot id, index to the index_working_set_entry)
1206    ///     pair to the `cf::Index` column family for each index_working_set_entry which insert did occur in this function call.
1207    ///
1208    /// Arguments:
1209    ///  - `shreds`: the shreds to be inserted.
1210    ///  - `is_repaired`: a boolean vector aligned with `shreds` where each
1211    ///    boolean indicates whether the corresponding shred is repaired or not.
1212    ///  - `leader_schedule`: the leader schedule
1213    ///  - `is_trusted`: whether the shreds come from a trusted source. If this
1214    ///    is set to true, then the function will skip the shred duplication and
1215    ///    integrity checks.
1216    ///  - `retransmit_sender`: the sender for transmitting any recovered
1217    ///    data shreds.
1218    ///  - `handle_duplicate`: a function for handling shreds that have the same slot
1219    ///    and index.
1220    ///  - `metrics`: the metric for reporting detailed stats
1221    ///
1222    /// On success, the function returns an Ok result with a vector of
1223    /// `CompletedDataSetInfo` and a vector of its corresponding index in the
1224    /// input `shreds` vector.
1225    fn do_insert_shreds(
1226        &self,
1227        shreds: impl ExactSizeIterator<Item = (Shred, /*is_repaired:*/ bool)>,
1228        leader_schedule: Option<&LeaderScheduleCache>,
1229        is_trusted: bool,
1230        // When inserting own shreds during leader slots, we shouldn't try to
1231        // recover shreds. If shreds are not to be recovered we don't need the
1232        // retransmit channel either. Otherwise, if we are inserting shreds
1233        // from another leader, we need to try erasure recovery and retransmit
1234        // recovered shreds.
1235        should_recover_shreds: Option<(
1236            &ReedSolomonCache,
1237            &Sender<Vec<shred::Payload>>, // retransmit_sender
1238        )>,
1239        metrics: &mut BlockstoreInsertionMetrics,
1240    ) -> Result<InsertResults> {
1241        let mut total_start = Measure::start("Total elapsed");
1242
1243        // Acquire the insertion lock
1244        let mut start = Measure::start("Blockstore lock");
1245        let _lock = self.insert_shreds_lock.lock().unwrap();
1246        start.stop();
1247        metrics.insert_lock_elapsed_us += start.as_us();
1248
1249        let mut shred_insertion_tracker =
1250            ShredInsertionTracker::new(shreds.len(), self.get_write_batch()?);
1251
1252        self.attempt_shred_insertion(
1253            shreds,
1254            is_trusted,
1255            leader_schedule,
1256            &mut shred_insertion_tracker,
1257            metrics,
1258        );
1259        if let Some((reed_solomon_cache, retransmit_sender)) = should_recover_shreds {
1260            self.handle_shred_recovery(
1261                leader_schedule,
1262                reed_solomon_cache,
1263                &mut shred_insertion_tracker,
1264                retransmit_sender,
1265                is_trusted,
1266                metrics,
1267            );
1268        }
1269        // Handle chaining for the members of the slot_meta_working_set that
1270        // were inserted into, drop the others.
1271        self.handle_chaining(
1272            &mut shred_insertion_tracker.write_batch,
1273            &mut shred_insertion_tracker.slot_meta_working_set,
1274            metrics,
1275        )?;
1276
1277        self.check_chained_merkle_root_consistency(&mut shred_insertion_tracker);
1278
1279        let (should_signal, newly_completed_slots) =
1280            self.commit_updates_to_write_batch(&mut shred_insertion_tracker, metrics)?;
1281
1282        // Write out the accumulated batch.
1283        let mut start = Measure::start("Write Batch");
1284        self.write_batch(shred_insertion_tracker.write_batch)?;
1285        start.stop();
1286        metrics.write_batch_elapsed_us += start.as_us();
1287
1288        send_signals(
1289            &self.new_shreds_signals.lock().unwrap(),
1290            &self.completed_slots_senders.lock().unwrap(),
1291            should_signal,
1292            newly_completed_slots,
1293        );
1294
1295        // Roll up metrics
1296        total_start.stop();
1297        metrics.total_elapsed_us += total_start.as_us();
1298        metrics.index_meta_time_us += shred_insertion_tracker.index_meta_time_us;
1299
1300        Ok(InsertResults {
1301            completed_data_set_infos: shred_insertion_tracker.newly_completed_data_sets,
1302            duplicate_shreds: shred_insertion_tracker.duplicate_shreds,
1303        })
1304    }
1305
1306    // Attempts to recover and retransmit recovered shreds (also identifying
1307    // and handling duplicate shreds). Broadcast stage should instead call
1308    // Blockstore::insert_shreds when inserting own shreds during leader slots.
1309    pub fn insert_shreds_handle_duplicate<F>(
1310        &self,
1311        shreds: impl IntoIterator<Item = (Shred, /*is_repaired:*/ bool), IntoIter: ExactSizeIterator>,
1312        leader_schedule: Option<&LeaderScheduleCache>,
1313        is_trusted: bool,
1314        retransmit_sender: &Sender<Vec<shred::Payload>>,
1315        handle_duplicate: &F,
1316        reed_solomon_cache: &ReedSolomonCache,
1317        metrics: &mut BlockstoreInsertionMetrics,
1318    ) -> Result<Vec<CompletedDataSetInfo>>
1319    where
1320        F: Fn(PossibleDuplicateShred),
1321    {
1322        let InsertResults {
1323            completed_data_set_infos,
1324            duplicate_shreds,
1325        } = self.do_insert_shreds(
1326            shreds.into_iter(),
1327            leader_schedule,
1328            is_trusted,
1329            Some((reed_solomon_cache, retransmit_sender)),
1330            metrics,
1331        )?;
1332
1333        for shred in duplicate_shreds {
1334            handle_duplicate(shred);
1335        }
1336
1337        Ok(completed_data_set_infos)
1338    }
1339
1340    pub fn add_new_shred_signal(&self, s: Sender<bool>) {
1341        self.new_shreds_signals.lock().unwrap().push(s);
1342    }
1343
1344    pub fn add_completed_slots_signal(&self, s: CompletedSlotsSender) {
1345        self.completed_slots_senders.lock().unwrap().push(s);
1346    }
1347
1348    pub fn get_new_shred_signals_len(&self) -> usize {
1349        self.new_shreds_signals.lock().unwrap().len()
1350    }
1351
1352    pub fn get_new_shred_signal(&self, index: usize) -> Option<Sender<bool>> {
1353        self.new_shreds_signals.lock().unwrap().get(index).cloned()
1354    }
1355
1356    pub fn drop_signal(&self) {
1357        self.new_shreds_signals.lock().unwrap().clear();
1358        self.completed_slots_senders.lock().unwrap().clear();
1359    }
1360
1361    /// Clear `slot` from the Blockstore, see ``Blockstore::purge_slot_cleanup_chaining`
1362    /// for more details.
1363    ///
1364    /// This function currently requires `insert_shreds_lock`, as both
1365    /// `clear_unconfirmed_slot()` and `insert_shreds_handle_duplicate()`
1366    /// try to perform read-modify-write operation on [`cf::SlotMeta`] column
1367    /// family.
1368    pub fn clear_unconfirmed_slot(&self, slot: Slot) {
1369        let _lock = self.insert_shreds_lock.lock().unwrap();
1370        // Purge the slot and insert an empty `SlotMeta` with only the `next_slots` field preserved.
1371        // Shreds inherently know their parent slot, and a parent's SlotMeta `next_slots` list
1372        // will be updated when the child is inserted (see `Blockstore::handle_chaining()`).
1373        // However, we are only purging and repairing the parent slot here. Since the child will not be
1374        // reinserted the chaining will be lost. In order for bank forks discovery to ingest the child,
1375        // we must retain the chain by preserving `next_slots`.
1376        match self.purge_slot_cleanup_chaining(slot) {
1377            Ok(_) => {}
1378            Err(BlockstoreError::SlotUnavailable) => error!(
1379                "clear_unconfirmed_slot() called on slot {} with no SlotMeta",
1380                slot
1381            ),
1382            Err(e) => panic!("Purge database operations failed {}", e),
1383        }
1384    }
1385
1386    // Bypasses erasure recovery becuase it is called from broadcast stage
1387    // when inserting own shreds during leader slots.
1388    pub fn insert_shreds(
1389        &self,
1390        shreds: impl IntoIterator<Item = Shred, IntoIter: ExactSizeIterator>,
1391        leader_schedule: Option<&LeaderScheduleCache>,
1392        is_trusted: bool,
1393    ) -> Result<Vec<CompletedDataSetInfo>> {
1394        let shreds = shreds
1395            .into_iter()
1396            .map(|shred| (shred, /*is_repaired:*/ false));
1397        let insert_results = self.do_insert_shreds(
1398            shreds,
1399            leader_schedule,
1400            is_trusted,
1401            None, // (reed_solomon_cache, retransmit_sender)
1402            &mut BlockstoreInsertionMetrics::default(),
1403        )?;
1404        Ok(insert_results.completed_data_set_infos)
1405    }
1406
1407    #[cfg(test)]
1408    fn insert_shred_return_duplicate(
1409        &self,
1410        shred: Shred,
1411        leader_schedule: &LeaderScheduleCache,
1412    ) -> Vec<PossibleDuplicateShred> {
1413        let insert_results = self
1414            .do_insert_shreds(
1415                [(shred, /*is_repaired:*/ false)].into_iter(),
1416                Some(leader_schedule),
1417                false,
1418                None, // (reed_solomon_cache, retransmit_sender)
1419                &mut BlockstoreInsertionMetrics::default(),
1420            )
1421            .unwrap();
1422        insert_results.duplicate_shreds
1423    }
1424
1425    #[allow(clippy::too_many_arguments)]
1426    fn check_insert_coding_shred(
1427        &self,
1428        shred: Shred,
1429        shred_insertion_tracker: &mut ShredInsertionTracker,
1430        is_trusted: bool,
1431        shred_source: ShredSource,
1432        metrics: &mut BlockstoreInsertionMetrics,
1433    ) -> bool {
1434        let slot = shred.slot();
1435        let shred_index = u64::from(shred.index());
1436
1437        let ShredInsertionTracker {
1438            just_inserted_shreds,
1439            erasure_metas,
1440            merkle_root_metas,
1441            index_working_set,
1442            index_meta_time_us,
1443            duplicate_shreds,
1444            write_batch,
1445            ..
1446        } = shred_insertion_tracker;
1447
1448        let index_meta_working_set_entry =
1449            self.get_index_meta_entry(slot, index_working_set, index_meta_time_us);
1450
1451        let index_meta = &mut index_meta_working_set_entry.index;
1452        let erasure_set = shred.erasure_set();
1453
1454        if let HashMapEntry::Vacant(entry) = merkle_root_metas.entry(erasure_set) {
1455            if let Some(meta) = self.merkle_root_meta(erasure_set).unwrap() {
1456                entry.insert(WorkingEntry::Clean(meta));
1457            }
1458        }
1459
1460        // This gives the index of first coding shred in this FEC block
1461        // So, all coding shreds in a given FEC block will have the same set index
1462        if !is_trusted {
1463            if index_meta.coding().contains(shred_index) {
1464                metrics.num_coding_shreds_exists += 1;
1465                duplicate_shreds.push(PossibleDuplicateShred::Exists(shred));
1466                return false;
1467            }
1468
1469            if !Blockstore::should_insert_coding_shred(&shred, self.max_root()) {
1470                metrics.num_coding_shreds_invalid += 1;
1471                return false;
1472            }
1473
1474            if let Some(merkle_root_meta) = merkle_root_metas.get(&erasure_set) {
1475                // A previous shred has been inserted in this batch or in blockstore
1476                // Compare our current shred against the previous shred for potential
1477                // conflicts
1478                if !self.check_merkle_root_consistency(
1479                    just_inserted_shreds,
1480                    slot,
1481                    merkle_root_meta.as_ref(),
1482                    &shred,
1483                    duplicate_shreds,
1484                ) {
1485                    return false;
1486                }
1487            }
1488        }
1489
1490        let erasure_meta_entry = erasure_metas.entry(erasure_set).or_insert_with(|| {
1491            self.erasure_meta(erasure_set)
1492                .expect("Expect database get to succeed")
1493                .map(WorkingEntry::Clean)
1494                .unwrap_or_else(|| {
1495                    WorkingEntry::Dirty(ErasureMeta::from_coding_shred(&shred).unwrap())
1496                })
1497        });
1498        let erasure_meta = erasure_meta_entry.as_ref();
1499
1500        if !erasure_meta.check_coding_shred(&shred) {
1501            metrics.num_coding_shreds_invalid_erasure_config += 1;
1502            if !self.has_duplicate_shreds_in_slot(slot) {
1503                if let Some(conflicting_shred) = self
1504                    .find_conflicting_coding_shred(&shred, slot, erasure_meta, just_inserted_shreds)
1505                    .map(Cow::into_owned)
1506                {
1507                    if let Err(e) = self.store_duplicate_slot(
1508                        slot,
1509                        conflicting_shred.clone(),
1510                        shred.payload().clone(),
1511                    ) {
1512                        warn!(
1513                            "Unable to store conflicting erasure meta duplicate proof for {slot} \
1514                             {erasure_set:?} {e}"
1515                        );
1516                    }
1517
1518                    duplicate_shreds.push(PossibleDuplicateShred::ErasureConflict(
1519                        shred.clone(),
1520                        conflicting_shred,
1521                    ));
1522                } else {
1523                    error!(
1524                        "Unable to find the conflicting coding shred that set {erasure_meta:?}. \
1525                         This should only happen in extreme cases where blockstore cleanup has \
1526                         caught up to the root. Skipping the erasure meta duplicate shred check"
1527                    );
1528                }
1529            }
1530
1531            // ToDo: This is a potential slashing condition
1532            warn!("Received multiple erasure configs for the same erasure set!!!");
1533            warn!(
1534                "Slot: {}, shred index: {}, erasure_set: {:?}, is_duplicate: {}, stored config: \
1535                 {:#?}, new shred: {:#?}",
1536                slot,
1537                shred.index(),
1538                erasure_set,
1539                self.has_duplicate_shreds_in_slot(slot),
1540                erasure_meta.config(),
1541                shred,
1542            );
1543            return false;
1544        }
1545
1546        self.slots_stats
1547            .record_shred(shred.slot(), shred.fec_set_index(), shred_source, None);
1548
1549        // insert coding shred into rocks
1550        let result = self
1551            .insert_coding_shred(index_meta, &shred, write_batch)
1552            .is_ok();
1553
1554        if result {
1555            index_meta_working_set_entry.did_insert_occur = true;
1556            metrics.num_inserted += 1;
1557
1558            merkle_root_metas
1559                .entry(erasure_set)
1560                .or_insert(WorkingEntry::Dirty(MerkleRootMeta::from_shred(&shred)));
1561        }
1562
1563        if let HashMapEntry::Vacant(entry) = just_inserted_shreds.entry(shred.id()) {
1564            metrics.num_coding_shreds_inserted += 1;
1565            entry.insert(shred);
1566        }
1567
1568        result
1569    }
1570
1571    fn find_conflicting_coding_shred<'a>(
1572        &'a self,
1573        shred: &Shred,
1574        slot: Slot,
1575        erasure_meta: &ErasureMeta,
1576        just_received_shreds: &'a HashMap<ShredId, Shred>,
1577    ) -> Option<Cow<'a, shred::Payload>> {
1578        // Search for the shred which set the initial erasure config, either inserted,
1579        // or in the current batch in just_received_shreds.
1580        let index = erasure_meta.first_received_coding_shred_index()?;
1581        let shred_id = ShredId::new(slot, index, ShredType::Code);
1582        let maybe_shred = self.get_shred_from_just_inserted_or_db(just_received_shreds, shred_id);
1583
1584        if index != 0 || maybe_shred.is_some() {
1585            return maybe_shred;
1586        }
1587
1588        // If we are using a blockstore created from an earlier version than 1.18.12,
1589        // `index` will be 0 as it was not yet populated, revert to a scan until  we no longer support
1590        // those blockstore versions.
1591        for coding_index in erasure_meta.coding_shreds_indices() {
1592            let maybe_shred = self.get_coding_shred(slot, coding_index);
1593            if let Ok(Some(shred_data)) = maybe_shred {
1594                let potential_shred = Shred::new_from_serialized_shred(shred_data).unwrap();
1595                if shred.erasure_mismatch(&potential_shred).unwrap() {
1596                    return Some(Cow::Owned(potential_shred.into_payload()));
1597                }
1598            } else if let Some(potential_shred) = {
1599                let key = ShredId::new(slot, u32::try_from(coding_index).unwrap(), ShredType::Code);
1600                just_received_shreds.get(&key)
1601            } {
1602                if shred.erasure_mismatch(potential_shred).unwrap() {
1603                    return Some(Cow::Borrowed(potential_shred.payload()));
1604                }
1605            }
1606        }
1607        None
1608    }
1609
1610    /// Create an entry to the specified `write_batch` that performs shred
1611    /// insertion and associated metadata update.  The function also updates
1612    /// its in-memory copy of the associated metadata.
1613    ///
1614    /// Currently, this function must be invoked while holding
1615    /// `insert_shreds_lock` as it performs read-modify-write operations
1616    /// on multiple column families.
1617    ///
1618    /// The resulting `write_batch` may include updates to [`cf::DeadSlots`]
1619    /// and [`cf::ShredData`].  Note that it will also update the in-memory copy
1620    /// of `erasure_metas`, `merkle_root_metas`, and `index_working_set`, which will
1621    /// later be used to update other column families such as [`cf::ErasureMeta`] and
1622    /// [`cf::Index`].
1623    ///
1624    /// Arguments:
1625    /// - `shred`: the shred to be inserted
1626    /// - `shred_insertion_tracker`: collection of shred insertion tracking
1627    ///     data.
1628    /// - `is_trusted`: if false, this function will check whether the
1629    ///     input shred is duplicate.
1630    /// - `handle_duplicate`: the function that handles duplication.
1631    /// - `leader_schedule`: the leader schedule will be used to check
1632    ///     whether it is okay to insert the input shred.
1633    /// - `shred_source`: the source of the shred.
1634    #[allow(clippy::too_many_arguments)]
1635    fn check_insert_data_shred(
1636        &self,
1637        shred: Shred,
1638        shred_insertion_tracker: &mut ShredInsertionTracker,
1639        is_trusted: bool,
1640        leader_schedule: Option<&LeaderScheduleCache>,
1641        shred_source: ShredSource,
1642    ) -> std::result::Result<(), InsertDataShredError> {
1643        let slot = shred.slot();
1644        let shred_index = u64::from(shred.index());
1645
1646        let ShredInsertionTracker {
1647            index_working_set,
1648            slot_meta_working_set,
1649            just_inserted_shreds,
1650            merkle_root_metas,
1651            duplicate_shreds,
1652            index_meta_time_us,
1653            erasure_metas,
1654            write_batch,
1655            newly_completed_data_sets,
1656        } = shred_insertion_tracker;
1657
1658        let index_meta_working_set_entry =
1659            self.get_index_meta_entry(slot, index_working_set, index_meta_time_us);
1660        let index_meta = &mut index_meta_working_set_entry.index;
1661        let slot_meta_entry = self.get_slot_meta_entry(
1662            slot_meta_working_set,
1663            slot,
1664            shred
1665                .parent()
1666                .map_err(|_| InsertDataShredError::InvalidShred)?,
1667        );
1668
1669        let slot_meta = &mut slot_meta_entry.new_slot_meta.borrow_mut();
1670        let erasure_set = shred.erasure_set();
1671        if let HashMapEntry::Vacant(entry) = merkle_root_metas.entry(erasure_set) {
1672            if let Some(meta) = self.merkle_root_meta(erasure_set).unwrap() {
1673                entry.insert(WorkingEntry::Clean(meta));
1674            }
1675        }
1676
1677        if !is_trusted {
1678            if Self::is_data_shred_present(&shred, slot_meta, index_meta.data()) {
1679                duplicate_shreds.push(PossibleDuplicateShred::Exists(shred));
1680                return Err(InsertDataShredError::Exists);
1681            }
1682
1683            if shred.last_in_slot() && shred_index < slot_meta.received && !slot_meta.is_full() {
1684                // We got a last shred < slot_meta.received, which signals there's an alternative,
1685                // shorter version of the slot. Because also `!slot_meta.is_full()`, then this
1686                // means, for the current version of the slot, we might never get all the
1687                // shreds < the current last index, never replay this slot, and make no
1688                // progress (for instance if a leader sends an additional detached "last index"
1689                // shred with a very high index, but none of the intermediate shreds). Ideally, we would
1690                // just purge all shreds > the new last index slot, but because replay may have already
1691                // replayed entries past the newly detected "last" shred, then mark the slot as dead
1692                // and wait for replay to dump and repair the correct version.
1693                warn!(
1694                    "Received *last* shred index {} less than previous shred index {}, and slot \
1695                     {} is not full, marking slot dead",
1696                    shred_index, slot_meta.received, slot
1697                );
1698                self.dead_slots_cf
1699                    .put_in_batch(write_batch, slot, &true)
1700                    .unwrap();
1701            }
1702
1703            if !self.should_insert_data_shred(
1704                &shred,
1705                slot_meta,
1706                just_inserted_shreds,
1707                self.max_root(),
1708                leader_schedule,
1709                shred_source,
1710                duplicate_shreds,
1711            ) {
1712                return Err(InsertDataShredError::InvalidShred);
1713            }
1714
1715            if let Some(merkle_root_meta) = merkle_root_metas.get(&erasure_set) {
1716                // A previous shred has been inserted in this batch or in blockstore
1717                // Compare our current shred against the previous shred for potential
1718                // conflicts
1719                if !self.check_merkle_root_consistency(
1720                    just_inserted_shreds,
1721                    slot,
1722                    merkle_root_meta.as_ref(),
1723                    &shred,
1724                    duplicate_shreds,
1725                ) {
1726                    // This indicates there is an alternate version of this block.
1727                    // Similar to the last index case above, we might never get all the
1728                    // shreds for our current version, never replay this slot, and make no
1729                    // progress. We cannot determine if we have the version that will eventually
1730                    // be complete, so we take the conservative approach and mark the slot as dead
1731                    // so that replay can dump and repair the correct version.
1732                    self.dead_slots_cf
1733                        .put_in_batch(write_batch, slot, &true)
1734                        .unwrap();
1735                    return Err(InsertDataShredError::InvalidShred);
1736                }
1737            }
1738        }
1739
1740        let completed_data_sets = self.insert_data_shred(
1741            slot_meta,
1742            index_meta.data_mut(),
1743            &shred,
1744            write_batch,
1745            shred_source,
1746        )?;
1747        newly_completed_data_sets.extend(completed_data_sets);
1748        merkle_root_metas
1749            .entry(erasure_set)
1750            .or_insert(WorkingEntry::Dirty(MerkleRootMeta::from_shred(&shred)));
1751        just_inserted_shreds.insert(shred.id(), shred);
1752        index_meta_working_set_entry.did_insert_occur = true;
1753        slot_meta_entry.did_insert_occur = true;
1754        if let BTreeMapEntry::Vacant(entry) = erasure_metas.entry(erasure_set) {
1755            if let Some(meta) = self.erasure_meta(erasure_set).unwrap() {
1756                entry.insert(WorkingEntry::Clean(meta));
1757            }
1758        }
1759        Ok(())
1760    }
1761
1762    fn should_insert_coding_shred(shred: &Shred, max_root: Slot) -> bool {
1763        debug_assert_matches!(shred.sanitize(), Ok(()));
1764        shred.is_code() && shred.slot() > max_root
1765    }
1766
1767    fn insert_coding_shred(
1768        &self,
1769        index_meta: &mut Index,
1770        shred: &Shred,
1771        write_batch: &mut WriteBatch,
1772    ) -> Result<()> {
1773        let slot = shred.slot();
1774        let shred_index = u64::from(shred.index());
1775
1776        // Assert guaranteed by integrity checks on the shred that happen before
1777        // `insert_coding_shred` is called
1778        debug_assert_matches!(shred.sanitize(), Ok(()));
1779        assert!(shred.is_code());
1780
1781        // Commit step: commit all changes to the mutable structures at once, or none at all.
1782        // We don't want only a subset of these changes going through.
1783        self.code_shred_cf
1784            .put_bytes_in_batch(write_batch, (slot, shred_index), shred.payload())?;
1785        index_meta.coding_mut().insert(shred_index);
1786
1787        Ok(())
1788    }
1789
1790    fn is_data_shred_present(shred: &Shred, slot_meta: &SlotMeta, data_index: &ShredIndex) -> bool {
1791        let shred_index = u64::from(shred.index());
1792        // Check that the shred doesn't already exist in blockstore
1793        shred_index < slot_meta.consumed || data_index.contains(shred_index)
1794    }
1795
1796    /// Finds the corresponding shred at `shred_id` in the just inserted
1797    /// shreds or the backing store. Returns None if there is no shred.
1798    fn get_shred_from_just_inserted_or_db<'a>(
1799        &'a self,
1800        just_inserted_shreds: &'a HashMap<ShredId, Shred>,
1801        shred_id: ShredId,
1802    ) -> Option<Cow<'a, shred::Payload>> {
1803        let (slot, index, shred_type) = shred_id.unpack();
1804        match (just_inserted_shreds.get(&shred_id), shred_type) {
1805            (Some(shred), _) => Some(Cow::Borrowed(shred.payload())),
1806            // If it doesn't exist in the just inserted set, it must exist in
1807            // the backing store
1808            (_, ShredType::Data) => self
1809                .get_data_shred(slot, u64::from(index))
1810                .unwrap()
1811                .map(shred::Payload::from)
1812                .map(Cow::Owned),
1813            (_, ShredType::Code) => self
1814                .get_coding_shred(slot, u64::from(index))
1815                .unwrap()
1816                .map(shred::Payload::from)
1817                .map(Cow::Owned),
1818        }
1819    }
1820
1821    /// Returns true if there is no merkle root conflict between
1822    /// the existing `merkle_root_meta` and `shred`
1823    ///
1824    /// Otherwise return false and if not already present, add duplicate proof to
1825    /// `duplicate_shreds`.
1826    fn check_merkle_root_consistency(
1827        &self,
1828        just_inserted_shreds: &HashMap<ShredId, Shred>,
1829        slot: Slot,
1830        merkle_root_meta: &MerkleRootMeta,
1831        shred: &Shred,
1832        duplicate_shreds: &mut Vec<PossibleDuplicateShred>,
1833    ) -> bool {
1834        let new_merkle_root = shred.merkle_root().ok();
1835        if merkle_root_meta.merkle_root() == new_merkle_root {
1836            // No conflict, either both merkle shreds with same merkle root
1837            // or both legacy shreds with merkle_root `None`
1838            return true;
1839        }
1840
1841        warn!(
1842            "Received conflicting merkle roots for slot: {}, erasure_set: {:?} original merkle \
1843             root meta {:?} vs conflicting merkle root {:?} shred index {} type {:?}. Reporting \
1844             as duplicate",
1845            slot,
1846            shred.erasure_set(),
1847            merkle_root_meta,
1848            new_merkle_root,
1849            shred.index(),
1850            shred.shred_type(),
1851        );
1852
1853        if !self.has_duplicate_shreds_in_slot(slot) {
1854            let shred_id = ShredId::new(
1855                slot,
1856                merkle_root_meta.first_received_shred_index(),
1857                merkle_root_meta.first_received_shred_type(),
1858            );
1859            let Some(conflicting_shred) = self
1860                .get_shred_from_just_inserted_or_db(just_inserted_shreds, shred_id)
1861                .map(Cow::into_owned)
1862            else {
1863                error!(
1864                    "Shred {shred_id:?} indiciated by merkle root meta {merkle_root_meta:?} is \
1865                     missing from blockstore. This should only happen in extreme cases where \
1866                     blockstore cleanup has caught up to the root. Skipping the merkle root \
1867                     consistency check"
1868                );
1869                return true;
1870            };
1871            if let Err(e) = self.store_duplicate_slot(
1872                slot,
1873                conflicting_shred.clone(),
1874                shred.clone().into_payload(),
1875            ) {
1876                warn!(
1877                    "Unable to store conflicting merkle root duplicate proof for {slot} \
1878                     {:?} {e}",
1879                    shred.erasure_set(),
1880                );
1881            }
1882            duplicate_shreds.push(PossibleDuplicateShred::MerkleRootConflict(
1883                shred.clone(),
1884                conflicting_shred,
1885            ));
1886        }
1887        false
1888    }
1889
1890    /// Returns true if there is no chaining conflict between
1891    /// the `shred` and `merkle_root_meta` of the next FEC set,
1892    /// or if shreds from the next set are yet to be received.
1893    ///
1894    /// Otherwise return false and add duplicate proof to
1895    /// `duplicate_shreds`.
1896    ///
1897    /// This is intended to be used right after `shred`'s `erasure_meta`
1898    /// has been created for the first time.
1899    fn check_forward_chained_merkle_root_consistency(
1900        &self,
1901        shred: &Shred,
1902        erasure_meta: &ErasureMeta,
1903        just_inserted_shreds: &HashMap<ShredId, Shred>,
1904        merkle_root_metas: &HashMap<ErasureSetId, WorkingEntry<MerkleRootMeta>>,
1905        duplicate_shreds: &mut Vec<PossibleDuplicateShred>,
1906    ) -> bool {
1907        debug_assert!(erasure_meta.check_coding_shred(shred));
1908        let slot = shred.slot();
1909        let erasure_set = shred.erasure_set();
1910
1911        // If a shred from the next fec set has already been inserted, check the chaining
1912        let Some(next_fec_set_index) = erasure_meta.next_fec_set_index() else {
1913            error!("Invalid erasure meta, unable to compute next fec set index {erasure_meta:?}");
1914            return false;
1915        };
1916        let next_erasure_set = ErasureSetId::new(slot, next_fec_set_index);
1917        let Some(next_merkle_root_meta) = merkle_root_metas
1918            .get(&next_erasure_set)
1919            .map(WorkingEntry::as_ref)
1920            .map(Cow::Borrowed)
1921            .or_else(|| {
1922                self.merkle_root_meta(next_erasure_set)
1923                    .unwrap()
1924                    .map(Cow::Owned)
1925            })
1926        else {
1927            // No shred from the next fec set has been received
1928            return true;
1929        };
1930        let next_shred_id = ShredId::new(
1931            slot,
1932            next_merkle_root_meta.first_received_shred_index(),
1933            next_merkle_root_meta.first_received_shred_type(),
1934        );
1935        let Some(next_shred) =
1936            Self::get_shred_from_just_inserted_or_db(self, just_inserted_shreds, next_shred_id)
1937                .map(Cow::into_owned)
1938        else {
1939            error!(
1940                "Shred {next_shred_id:?} indicated by merkle root meta {next_merkle_root_meta:?} \
1941                 is missing from blockstore. This should only happen in extreme cases where \
1942                 blockstore cleanup has caught up to the root. Skipping the forward chained \
1943                 merkle root consistency check"
1944            );
1945            return true;
1946        };
1947        let merkle_root = shred.merkle_root().ok();
1948        let chained_merkle_root = shred::layout::get_chained_merkle_root(&next_shred);
1949
1950        if !self.check_chaining(merkle_root, chained_merkle_root) {
1951            warn!(
1952                "Received conflicting chained merkle roots for slot: {slot}, shred \
1953                 {erasure_set:?} type {:?} has merkle root {merkle_root:?}, however next fec set \
1954                 shred {next_erasure_set:?} type {:?} chains to merkle root \
1955                 {chained_merkle_root:?}. Reporting as duplicate",
1956                shred.shred_type(),
1957                next_merkle_root_meta.first_received_shred_type(),
1958            );
1959
1960            if !self.has_duplicate_shreds_in_slot(shred.slot()) {
1961                duplicate_shreds.push(PossibleDuplicateShred::ChainedMerkleRootConflict(
1962                    shred.clone(),
1963                    next_shred,
1964                ));
1965            }
1966            return false;
1967        }
1968
1969        true
1970    }
1971
1972    /// Returns true if there is no chaining conflict between
1973    /// the `shred` and `merkle_root_meta` of the previous FEC set,
1974    /// or if shreds from the previous set are yet to be received.
1975    ///
1976    /// Otherwise return false and add duplicate proof to
1977    /// `duplicate_shreds`.
1978    ///
1979    /// This is intended to be used right after `shred`'s `merkle_root_meta`
1980    /// has been created for the first time.
1981    fn check_backwards_chained_merkle_root_consistency(
1982        &self,
1983        shred: &Shred,
1984        just_inserted_shreds: &HashMap<ShredId, Shred>,
1985        erasure_metas: &BTreeMap<ErasureSetId, WorkingEntry<ErasureMeta>>,
1986        duplicate_shreds: &mut Vec<PossibleDuplicateShred>,
1987    ) -> bool {
1988        let slot = shred.slot();
1989        let erasure_set = shred.erasure_set();
1990        let fec_set_index = shred.fec_set_index();
1991
1992        if fec_set_index == 0 {
1993            // Although the first fec set chains to the last fec set of the parent block,
1994            // if this chain is incorrect we do not know which block is the duplicate until votes
1995            // are received. We instead delay this check until the block reaches duplicate
1996            // confirmation.
1997            return true;
1998        }
1999
2000        // If a shred from the previous fec set has already been inserted, check the chaining.
2001        // Since we cannot compute the previous fec set index, we check the in memory map, otherwise
2002        // check the previous key from blockstore to see if it is consecutive with our current set.
2003        let Some((prev_erasure_set, prev_erasure_meta)) = self
2004            .previous_erasure_set(erasure_set, erasure_metas)
2005            .expect("Expect database operations to succeed")
2006        else {
2007            // No shreds from the previous erasure batch have been received,
2008            // so nothing to check. Once the previous erasure batch is received,
2009            // we will verify this chain through the forward check above.
2010            return true;
2011        };
2012
2013        let prev_shred_id = ShredId::new(
2014            slot,
2015            prev_erasure_meta
2016                .first_received_coding_shred_index()
2017                .expect("First received coding index must fit in u32"),
2018            ShredType::Code,
2019        );
2020        let Some(prev_shred) =
2021            Self::get_shred_from_just_inserted_or_db(self, just_inserted_shreds, prev_shred_id)
2022                .map(Cow::into_owned)
2023        else {
2024            warn!(
2025                "Shred {prev_shred_id:?} indicated by the erasure meta {prev_erasure_meta:?} \
2026                 is missing from blockstore. This can happen if you have recently upgraded \
2027                 from a version < v1.18.13, or if blockstore cleanup has caught up to the root. \
2028                 Skipping the backwards chained merkle root consistency check"
2029            );
2030            return true;
2031        };
2032        let merkle_root = shred::layout::get_merkle_root(&prev_shred);
2033        let chained_merkle_root = shred.chained_merkle_root().ok();
2034
2035        if !self.check_chaining(merkle_root, chained_merkle_root) {
2036            warn!(
2037                "Received conflicting chained merkle roots for slot: {slot}, shred {:?} type {:?} \
2038                 chains to merkle root {chained_merkle_root:?}, however previous fec set coding \
2039                 shred {prev_erasure_set:?} has merkle root {merkle_root:?}. Reporting as duplicate",
2040                shred.erasure_set(),
2041                shred.shred_type(),
2042            );
2043
2044            if !self.has_duplicate_shreds_in_slot(shred.slot()) {
2045                duplicate_shreds.push(PossibleDuplicateShred::ChainedMerkleRootConflict(
2046                    shred.clone(),
2047                    prev_shred,
2048                ));
2049            }
2050            return false;
2051        }
2052
2053        true
2054    }
2055
2056    /// Checks if the chained merkle root == merkle root
2057    ///
2058    /// Returns true if no conflict, or if chained merkle roots are not enabled
2059    fn check_chaining(&self, merkle_root: Option<Hash>, chained_merkle_root: Option<Hash>) -> bool {
2060        chained_merkle_root.is_none()  // Chained merkle roots have not been enabled yet
2061            || chained_merkle_root == merkle_root
2062    }
2063
2064    fn should_insert_data_shred(
2065        &self,
2066        shred: &Shred,
2067        slot_meta: &SlotMeta,
2068        just_inserted_shreds: &HashMap<ShredId, Shred>,
2069        max_root: Slot,
2070        leader_schedule: Option<&LeaderScheduleCache>,
2071        shred_source: ShredSource,
2072        duplicate_shreds: &mut Vec<PossibleDuplicateShred>,
2073    ) -> bool {
2074        let shred_index = u64::from(shred.index());
2075        let slot = shred.slot();
2076        let last_in_slot = if shred.last_in_slot() {
2077            debug!("got last in slot");
2078            true
2079        } else {
2080            false
2081        };
2082        debug_assert_matches!(shred.sanitize(), Ok(()));
2083        // Check that we do not receive shred_index >= than the last_index
2084        // for the slot
2085        let last_index = slot_meta.last_index;
2086        if last_index.map(|ix| shred_index >= ix).unwrap_or_default() {
2087            let leader_pubkey = leader_schedule
2088                .and_then(|leader_schedule| leader_schedule.slot_leader_at(slot, None));
2089
2090            if !self.has_duplicate_shreds_in_slot(slot) {
2091                let shred_id = ShredId::new(
2092                    slot,
2093                    u32::try_from(last_index.unwrap()).unwrap(),
2094                    ShredType::Data,
2095                );
2096                let Some(ending_shred) = self
2097                    .get_shred_from_just_inserted_or_db(just_inserted_shreds, shred_id)
2098                    .map(Cow::into_owned)
2099                else {
2100                    error!(
2101                        "Last index data shred {shred_id:?} indiciated by slot meta {slot_meta:?} \
2102                         is missing from blockstore. This should only happen in extreme cases \
2103                         where blockstore cleanup has caught up to the root. Skipping data shred \
2104                         insertion"
2105                    );
2106                    return false;
2107                };
2108
2109                if self
2110                    .store_duplicate_slot(slot, ending_shred.clone(), shred.payload().clone())
2111                    .is_err()
2112                {
2113                    warn!("store duplicate error");
2114                }
2115                duplicate_shreds.push(PossibleDuplicateShred::LastIndexConflict(
2116                    shred.clone(),
2117                    ending_shred,
2118                ));
2119            }
2120
2121            datapoint_error!(
2122                "blockstore_error",
2123                (
2124                    "error",
2125                    format!(
2126                        "Leader {leader_pubkey:?}, slot {slot}: received index {shred_index} >= \
2127                         slot.last_index {last_index:?}, shred_source: {shred_source:?}"
2128                    ),
2129                    String
2130                )
2131            );
2132            return false;
2133        }
2134        // Check that we do not receive a shred with "last_index" true, but shred_index
2135        // less than our current received
2136        if last_in_slot && shred_index < slot_meta.received {
2137            let leader_pubkey = leader_schedule
2138                .and_then(|leader_schedule| leader_schedule.slot_leader_at(slot, None));
2139
2140            if !self.has_duplicate_shreds_in_slot(slot) {
2141                let shred_id = ShredId::new(
2142                    slot,
2143                    u32::try_from(slot_meta.received - 1).unwrap(),
2144                    ShredType::Data,
2145                );
2146                let Some(ending_shred) = self
2147                    .get_shred_from_just_inserted_or_db(just_inserted_shreds, shred_id)
2148                    .map(Cow::into_owned)
2149                else {
2150                    error!(
2151                        "Last received data shred {shred_id:?} indiciated by slot meta \
2152                         {slot_meta:?} is missing from blockstore. This should only happen in \
2153                         extreme cases where blockstore cleanup has caught up to the root. \
2154                         Skipping data shred insertion"
2155                    );
2156                    return false;
2157                };
2158
2159                if self
2160                    .store_duplicate_slot(slot, ending_shred.clone(), shred.payload().clone())
2161                    .is_err()
2162                {
2163                    warn!("store duplicate error");
2164                }
2165                duplicate_shreds.push(PossibleDuplicateShred::LastIndexConflict(
2166                    shred.clone(),
2167                    ending_shred,
2168                ));
2169            }
2170
2171            datapoint_error!(
2172                "blockstore_error",
2173                (
2174                    "error",
2175                    format!(
2176                        "Leader {:?}, slot {}: received shred_index {} < slot.received {}, \
2177                         shred_source: {:?}",
2178                        leader_pubkey, slot, shred_index, slot_meta.received, shred_source
2179                    ),
2180                    String
2181                )
2182            );
2183            return false;
2184        }
2185
2186        // TODO Shouldn't this use shred.parent() instead and update
2187        // slot_meta.parent_slot accordingly?
2188        slot_meta
2189            .parent_slot
2190            .map(|parent_slot| verify_shred_slots(slot, parent_slot, max_root))
2191            .unwrap_or_default()
2192    }
2193
2194    /// send slot full timing point to poh_timing_report service
2195    fn send_slot_full_timing(&self, slot: Slot) {
2196        if let Some(ref sender) = self.shred_timing_point_sender {
2197            send_poh_timing_point(
2198                sender,
2199                SlotPohTimingInfo::new_slot_full_poh_time_point(
2200                    slot,
2201                    Some(self.max_root()),
2202                    solana_sdk::timing::timestamp(),
2203                ),
2204            );
2205        }
2206    }
2207
2208    fn insert_data_shred<'a>(
2209        &self,
2210        slot_meta: &mut SlotMeta,
2211        data_index: &'a mut ShredIndex,
2212        shred: &Shred,
2213        write_batch: &mut WriteBatch,
2214        shred_source: ShredSource,
2215    ) -> Result<impl Iterator<Item = CompletedDataSetInfo> + 'a> {
2216        let slot = shred.slot();
2217        let index = u64::from(shred.index());
2218
2219        let last_in_slot = if shred.last_in_slot() {
2220            debug!("got last in slot");
2221            true
2222        } else {
2223            false
2224        };
2225
2226        let last_in_data = if shred.data_complete() {
2227            debug!("got last in data");
2228            true
2229        } else {
2230            false
2231        };
2232
2233        // Parent for slot meta should have been set by this point
2234        assert!(!slot_meta.is_orphan());
2235
2236        let new_consumed = if slot_meta.consumed == index {
2237            let mut current_index = index + 1;
2238
2239            while data_index.contains(current_index) {
2240                current_index += 1;
2241            }
2242            current_index
2243        } else {
2244            slot_meta.consumed
2245        };
2246
2247        // Commit step: commit all changes to the mutable structures at once, or none at all.
2248        // We don't want only a subset of these changes going through.
2249        self.data_shred_cf.put_bytes_in_batch(
2250            write_batch,
2251            (slot, index),
2252            shred.bytes_to_store(),
2253        )?;
2254        data_index.insert(index);
2255        let newly_completed_data_sets = update_slot_meta(
2256            last_in_slot,
2257            last_in_data,
2258            slot_meta,
2259            index as u32,
2260            new_consumed,
2261            shred.reference_tick(),
2262            data_index,
2263        )
2264        .map(move |indices| CompletedDataSetInfo { slot, indices });
2265
2266        self.slots_stats.record_shred(
2267            shred.slot(),
2268            shred.fec_set_index(),
2269            shred_source,
2270            Some(slot_meta),
2271        );
2272
2273        // slot is full, send slot full timing to poh_timing_report service.
2274        if slot_meta.is_full() {
2275            self.send_slot_full_timing(slot);
2276        }
2277
2278        trace!("inserted shred into slot {:?} and index {:?}", slot, index);
2279
2280        Ok(newly_completed_data_sets)
2281    }
2282
2283    pub fn get_data_shred(&self, slot: Slot, index: u64) -> Result<Option<Vec<u8>>> {
2284        let shred = self.data_shred_cf.get_bytes((slot, index))?;
2285        let shred = shred.map(ShredData::resize_stored_shred).transpose();
2286        shred.map_err(|err| {
2287            let err = format!("Invalid stored shred: {err}");
2288            let err = Box::new(bincode::ErrorKind::Custom(err));
2289            BlockstoreError::InvalidShredData(err)
2290        })
2291    }
2292
2293    pub fn get_data_shreds_for_slot(&self, slot: Slot, start_index: u64) -> Result<Vec<Shred>> {
2294        self.slot_data_iterator(slot, start_index)
2295            .expect("blockstore couldn't fetch iterator")
2296            .map(|(_, bytes)| {
2297                Shred::new_from_serialized_shred(Vec::from(bytes)).map_err(|err| {
2298                    BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom(
2299                        format!("Could not reconstruct shred from shred payload: {err:?}"),
2300                    )))
2301                })
2302            })
2303            .collect()
2304    }
2305
2306    #[cfg(test)]
2307    fn get_data_shreds(
2308        &self,
2309        slot: Slot,
2310        from_index: u64,
2311        to_index: u64,
2312        buffer: &mut [u8],
2313    ) -> Result<(u64, usize)> {
2314        let _lock = self.check_lowest_cleanup_slot(slot)?;
2315        let mut buffer_offset = 0;
2316        let mut last_index = 0;
2317        if let Some(meta) = self.meta_cf.get(slot)? {
2318            if !meta.is_full() {
2319                warn!("The slot is not yet full. Will not return any shreds");
2320                return Ok((last_index, buffer_offset));
2321            }
2322            let to_index = cmp::min(to_index, meta.consumed);
2323            for index in from_index..to_index {
2324                if let Some(shred_data) = self.get_data_shred(slot, index)? {
2325                    let shred_len = shred_data.len();
2326                    if buffer.len().saturating_sub(buffer_offset) >= shred_len {
2327                        buffer[buffer_offset..buffer_offset + shred_len]
2328                            .copy_from_slice(&shred_data[..shred_len]);
2329                        buffer_offset += shred_len;
2330                        last_index = index;
2331                        // All shreds are of the same length.
2332                        // Let's check if we have scope to accommodate another shred
2333                        // If not, let's break right away, as it'll save on 1 DB read
2334                        if buffer.len().saturating_sub(buffer_offset) < shred_len {
2335                            break;
2336                        }
2337                    } else {
2338                        break;
2339                    }
2340                }
2341            }
2342        }
2343        Ok((last_index, buffer_offset))
2344    }
2345
2346    pub fn get_coding_shred(&self, slot: Slot, index: u64) -> Result<Option<Vec<u8>>> {
2347        self.code_shred_cf.get_bytes((slot, index))
2348    }
2349
2350    pub fn get_coding_shreds_for_slot(
2351        &self,
2352        slot: Slot,
2353        start_index: u64,
2354    ) -> std::result::Result<Vec<Shred>, shred::Error> {
2355        self.slot_coding_iterator(slot, start_index)
2356            .expect("blockstore couldn't fetch iterator")
2357            .map(|(_, bytes)| Shred::new_from_serialized_shred(Vec::from(bytes)))
2358            .collect()
2359    }
2360
2361    // Only used by tests
2362    #[allow(clippy::too_many_arguments)]
2363    pub(crate) fn write_entries(
2364        &self,
2365        start_slot: Slot,
2366        num_ticks_in_start_slot: u64,
2367        start_index: u32,
2368        ticks_per_slot: u64,
2369        parent: Option<u64>,
2370        is_full_slot: bool,
2371        keypair: &Keypair,
2372        entries: Vec<Entry>,
2373        version: u16,
2374    ) -> Result<usize /*num of data shreds*/> {
2375        let mut parent_slot = parent.map_or(start_slot.saturating_sub(1), |v| v);
2376        let num_slots = (start_slot - parent_slot).max(1); // Note: slot 0 has parent slot 0
2377        assert!(num_ticks_in_start_slot < num_slots * ticks_per_slot);
2378        let mut remaining_ticks_in_slot = num_slots * ticks_per_slot - num_ticks_in_start_slot;
2379
2380        let mut current_slot = start_slot;
2381        let mut shredder = Shredder::new(current_slot, parent_slot, 0, version).unwrap();
2382        let mut all_shreds = vec![];
2383        let mut slot_entries = vec![];
2384        let reed_solomon_cache = ReedSolomonCache::default();
2385        let mut chained_merkle_root = Some(Hash::new_from_array(rand::thread_rng().gen()));
2386        // Find all the entries for start_slot
2387        for entry in entries.into_iter() {
2388            if remaining_ticks_in_slot == 0 {
2389                current_slot += 1;
2390                parent_slot = current_slot - 1;
2391                remaining_ticks_in_slot = ticks_per_slot;
2392                let current_entries = std::mem::take(&mut slot_entries);
2393                let start_index = {
2394                    if all_shreds.is_empty() {
2395                        start_index
2396                    } else {
2397                        0
2398                    }
2399                };
2400                let (mut data_shreds, mut coding_shreds) = shredder.entries_to_shreds(
2401                    keypair,
2402                    &current_entries,
2403                    true, // is_last_in_slot
2404                    chained_merkle_root,
2405                    start_index, // next_shred_index
2406                    start_index, // next_code_index
2407                    true,        // merkle_variant
2408                    &reed_solomon_cache,
2409                    &mut ProcessShredsStats::default(),
2410                );
2411                all_shreds.append(&mut data_shreds);
2412                all_shreds.append(&mut coding_shreds);
2413                chained_merkle_root = Some(coding_shreds.last().unwrap().merkle_root().unwrap());
2414                shredder = Shredder::new(
2415                    current_slot,
2416                    parent_slot,
2417                    (ticks_per_slot - remaining_ticks_in_slot) as u8,
2418                    version,
2419                )
2420                .unwrap();
2421            }
2422
2423            if entry.is_tick() {
2424                remaining_ticks_in_slot -= 1;
2425            }
2426            slot_entries.push(entry);
2427        }
2428
2429        if !slot_entries.is_empty() {
2430            let (mut data_shreds, mut coding_shreds) = shredder.entries_to_shreds(
2431                keypair,
2432                &slot_entries,
2433                is_full_slot,
2434                chained_merkle_root,
2435                0,    // next_shred_index
2436                0,    // next_code_index
2437                true, // merkle_variant
2438                &reed_solomon_cache,
2439                &mut ProcessShredsStats::default(),
2440            );
2441            all_shreds.append(&mut data_shreds);
2442            all_shreds.append(&mut coding_shreds);
2443        }
2444        let num_data = all_shreds.iter().filter(|shred| shred.is_data()).count();
2445        self.insert_shreds(all_shreds, None, false)?;
2446        Ok(num_data)
2447    }
2448
2449    pub fn get_index(&self, slot: Slot) -> Result<Option<Index>> {
2450        self.index_cf.get(slot)
2451    }
2452
2453    /// Manually update the meta for a slot.
2454    /// Can interfere with automatic meta update and potentially break chaining.
2455    /// Dangerous. Use with care.
2456    pub fn put_meta_bytes(&self, slot: Slot, bytes: &[u8]) -> Result<()> {
2457        self.meta_cf.put_bytes(slot, bytes)
2458    }
2459
2460    /// Manually update the meta for a slot.
2461    /// Can interfere with automatic meta update and potentially break chaining.
2462    /// Dangerous. Use with care.
2463    pub fn put_meta(&self, slot: Slot, meta: &SlotMeta) -> Result<()> {
2464        self.put_meta_bytes(slot, &bincode::serialize(meta)?)
2465    }
2466
2467    /// Find missing shred indices for a given `slot` within the range
2468    /// [`start_index`, `end_index`]. Missing shreds will only be reported as
2469    /// missing if they should be present by the time this function is called,
2470    /// as controlled by`first_timestamp` and `defer_threshold_ticks`.
2471    ///
2472    /// Arguments:
2473    ///  - `db_iterator`: Iterator to run search over.
2474    ///  - `slot`: The slot to search for missing shreds for.
2475    ///  - 'first_timestamp`: Timestamp (ms) for slot's first shred insertion.
2476    ///  - `defer_threshold_ticks`: A grace period to allow shreds that are
2477    ///    missing to be excluded from the reported missing list. This allows
2478    ///    tuning on how aggressively missing shreds should be reported and
2479    ///    acted upon.
2480    ///  - `start_index`: Begin search (inclusively) at this shred index.
2481    ///  - `end_index`: Finish search (exclusively) at this shred index.
2482    ///  - `max_missing`: Limit result to this many indices.
2483    fn find_missing_indexes<C>(
2484        db_iterator: &mut DBRawIterator,
2485        slot: Slot,
2486        first_timestamp: u64,
2487        defer_threshold_ticks: u64,
2488        start_index: u64,
2489        end_index: u64,
2490        max_missing: usize,
2491    ) -> Vec<u64>
2492    where
2493        C: Column<Index = (u64, u64)>,
2494    {
2495        if start_index >= end_index || max_missing == 0 {
2496            return vec![];
2497        }
2498
2499        let mut missing_indexes = vec![];
2500        // System time is not monotonic
2501        let ticks_since_first_insert =
2502            DEFAULT_TICKS_PER_SECOND * timestamp().saturating_sub(first_timestamp) / 1000;
2503
2504        // Seek to the first shred with index >= start_index
2505        db_iterator.seek(C::key(&(slot, start_index)));
2506
2507        // The index of the first missing shred in the slot
2508        let mut prev_index = start_index;
2509        loop {
2510            if !db_iterator.valid() {
2511                let num_to_take = max_missing - missing_indexes.len();
2512                missing_indexes.extend((prev_index..end_index).take(num_to_take));
2513                break;
2514            }
2515            let (current_slot, index) = C::index(db_iterator.key().expect("Expect a valid key"));
2516
2517            let current_index = {
2518                if current_slot > slot {
2519                    end_index
2520                } else {
2521                    index
2522                }
2523            };
2524
2525            let upper_index = cmp::min(current_index, end_index);
2526            // the tick that will be used to figure out the timeout for this hole
2527            let data = db_iterator.value().expect("couldn't read value");
2528            let reference_tick = u64::from(shred::layout::get_reference_tick(data).unwrap());
2529            if ticks_since_first_insert < reference_tick + defer_threshold_ticks {
2530                // The higher index holes have not timed out yet
2531                break;
2532            }
2533
2534            let num_to_take = max_missing - missing_indexes.len();
2535            missing_indexes.extend((prev_index..upper_index).take(num_to_take));
2536
2537            if missing_indexes.len() == max_missing
2538                || current_slot > slot
2539                || current_index >= end_index
2540            {
2541                break;
2542            }
2543
2544            prev_index = current_index + 1;
2545            db_iterator.next();
2546        }
2547
2548        missing_indexes
2549    }
2550
2551    /// Find missing data shreds for the given `slot`.
2552    ///
2553    /// For more details on the arguments, see [`find_missing_indexes`].
2554    pub fn find_missing_data_indexes(
2555        &self,
2556        slot: Slot,
2557        first_timestamp: u64,
2558        defer_threshold_ticks: u64,
2559        start_index: u64,
2560        end_index: u64,
2561        max_missing: usize,
2562    ) -> Vec<u64> {
2563        let Ok(mut db_iterator) = self.db.raw_iterator_cf(self.data_shred_cf.handle()) else {
2564            return vec![];
2565        };
2566
2567        Self::find_missing_indexes::<cf::ShredData>(
2568            &mut db_iterator,
2569            slot,
2570            first_timestamp,
2571            defer_threshold_ticks,
2572            start_index,
2573            end_index,
2574            max_missing,
2575        )
2576    }
2577
2578    fn get_block_time(&self, slot: Slot) -> Result<Option<UnixTimestamp>> {
2579        let _lock = self.check_lowest_cleanup_slot(slot)?;
2580        self.blocktime_cf.get(slot)
2581    }
2582
2583    pub fn get_rooted_block_time(&self, slot: Slot) -> Result<UnixTimestamp> {
2584        self.rpc_api_metrics
2585            .num_get_rooted_block_time
2586            .fetch_add(1, Ordering::Relaxed);
2587        let _lock = self.check_lowest_cleanup_slot(slot)?;
2588
2589        if self.is_root(slot) {
2590            return self
2591                .blocktime_cf
2592                .get(slot)?
2593                .ok_or(BlockstoreError::SlotUnavailable);
2594        }
2595        Err(BlockstoreError::SlotNotRooted)
2596    }
2597
2598    pub fn set_block_time(&self, slot: Slot, timestamp: UnixTimestamp) -> Result<()> {
2599        self.blocktime_cf.put(slot, &timestamp)
2600    }
2601
2602    pub fn get_block_height(&self, slot: Slot) -> Result<Option<u64>> {
2603        self.rpc_api_metrics
2604            .num_get_block_height
2605            .fetch_add(1, Ordering::Relaxed);
2606        let _lock = self.check_lowest_cleanup_slot(slot)?;
2607
2608        self.block_height_cf.get(slot)
2609    }
2610
2611    pub fn set_block_height(&self, slot: Slot, block_height: u64) -> Result<()> {
2612        self.block_height_cf.put(slot, &block_height)
2613    }
2614
2615    /// The first complete block that is available in the Blockstore ledger
2616    pub fn get_first_available_block(&self) -> Result<Slot> {
2617        let mut root_iterator = self.rooted_slot_iterator(self.lowest_slot_with_genesis())?;
2618        let first_root = root_iterator.next().unwrap_or_default();
2619        // If the first root is slot 0, it is genesis. Genesis is always complete, so it is correct
2620        // to return it as first-available.
2621        if first_root == 0 {
2622            return Ok(first_root);
2623        }
2624        // Otherwise, the block at root-index 0 cannot ever be complete, because it is missing its
2625        // parent blockhash. A parent blockhash must be calculated from the entries of the previous
2626        // block. Therefore, the first available complete block is that at root-index 1.
2627        Ok(root_iterator.next().unwrap_or_default())
2628    }
2629
2630    pub fn get_rooted_block(
2631        &self,
2632        slot: Slot,
2633        require_previous_blockhash: bool,
2634    ) -> Result<VersionedConfirmedBlock> {
2635        self.rpc_api_metrics
2636            .num_get_rooted_block
2637            .fetch_add(1, Ordering::Relaxed);
2638        let _lock = self.check_lowest_cleanup_slot(slot)?;
2639
2640        if self.is_root(slot) {
2641            return self.get_complete_block(slot, require_previous_blockhash);
2642        }
2643        Err(BlockstoreError::SlotNotRooted)
2644    }
2645
2646    pub fn get_complete_block(
2647        &self,
2648        slot: Slot,
2649        require_previous_blockhash: bool,
2650    ) -> Result<VersionedConfirmedBlock> {
2651        self.do_get_complete_block_with_entries(
2652            slot,
2653            require_previous_blockhash,
2654            false,
2655            /*allow_dead_slots:*/ false,
2656        )
2657        .map(|result| result.block)
2658    }
2659
2660    pub fn get_rooted_block_with_entries(
2661        &self,
2662        slot: Slot,
2663        require_previous_blockhash: bool,
2664    ) -> Result<VersionedConfirmedBlockWithEntries> {
2665        self.rpc_api_metrics
2666            .num_get_rooted_block_with_entries
2667            .fetch_add(1, Ordering::Relaxed);
2668        let _lock = self.check_lowest_cleanup_slot(slot)?;
2669
2670        if self.is_root(slot) {
2671            return self.do_get_complete_block_with_entries(
2672                slot,
2673                require_previous_blockhash,
2674                true,
2675                /*allow_dead_slots:*/ false,
2676            );
2677        }
2678        Err(BlockstoreError::SlotNotRooted)
2679    }
2680
2681    #[cfg(feature = "dev-context-only-utils")]
2682    pub fn get_complete_block_with_entries(
2683        &self,
2684        slot: Slot,
2685        require_previous_blockhash: bool,
2686        populate_entries: bool,
2687        allow_dead_slots: bool,
2688    ) -> Result<VersionedConfirmedBlockWithEntries> {
2689        self.do_get_complete_block_with_entries(
2690            slot,
2691            require_previous_blockhash,
2692            populate_entries,
2693            allow_dead_slots,
2694        )
2695    }
2696
2697    fn do_get_complete_block_with_entries(
2698        &self,
2699        slot: Slot,
2700        require_previous_blockhash: bool,
2701        populate_entries: bool,
2702        allow_dead_slots: bool,
2703    ) -> Result<VersionedConfirmedBlockWithEntries> {
2704        let Some(slot_meta) = self.meta_cf.get(slot)? else {
2705            trace!("do_get_complete_block_with_entries() failed for {slot} (missing SlotMeta)");
2706            return Err(BlockstoreError::SlotUnavailable);
2707        };
2708        if slot_meta.is_full() {
2709            let (slot_entries, _, _) = self.get_slot_entries_with_shred_info(
2710                slot,
2711                /*shred_start_index:*/ 0,
2712                allow_dead_slots,
2713            )?;
2714            if !slot_entries.is_empty() {
2715                let blockhash = slot_entries
2716                    .last()
2717                    .map(|entry| entry.hash)
2718                    .unwrap_or_else(|| panic!("Rooted slot {slot:?} must have blockhash"));
2719                let mut starting_transaction_index = 0;
2720                let mut entries = if populate_entries {
2721                    Vec::with_capacity(slot_entries.len())
2722                } else {
2723                    Vec::new()
2724                };
2725                let slot_transaction_iterator = slot_entries
2726                    .into_iter()
2727                    .flat_map(|entry| {
2728                        if populate_entries {
2729                            entries.push(solana_transaction_status::EntrySummary {
2730                                num_hashes: entry.num_hashes,
2731                                hash: entry.hash,
2732                                num_transactions: entry.transactions.len() as u64,
2733                                starting_transaction_index,
2734                            });
2735                            starting_transaction_index += entry.transactions.len();
2736                        }
2737                        entry.transactions
2738                    })
2739                    .map(|transaction| {
2740                        if let Err(err) = transaction.sanitize() {
2741                            warn!(
2742                                "Blockstore::get_block sanitize failed: {:?}, slot: {:?}, {:?}",
2743                                err, slot, transaction,
2744                            );
2745                        }
2746                        transaction
2747                    });
2748                let parent_slot_entries = slot_meta
2749                    .parent_slot
2750                    .and_then(|parent_slot| {
2751                        self.get_slot_entries_with_shred_info(
2752                            parent_slot,
2753                            /*shred_start_index:*/ 0,
2754                            allow_dead_slots,
2755                        )
2756                        .ok()
2757                        .map(|(entries, _, _)| entries)
2758                    })
2759                    .unwrap_or_default();
2760                if parent_slot_entries.is_empty() && require_previous_blockhash {
2761                    return Err(BlockstoreError::ParentEntriesUnavailable);
2762                }
2763                let previous_blockhash = if !parent_slot_entries.is_empty() {
2764                    get_last_hash(parent_slot_entries.iter()).unwrap()
2765                } else {
2766                    Hash::default()
2767                };
2768
2769                let (rewards, num_partitions) = self
2770                    .rewards_cf
2771                    .get_protobuf_or_bincode::<StoredExtendedRewards>(slot)?
2772                    .unwrap_or_default()
2773                    .into();
2774
2775                // The Blocktime and BlockHeight column families are updated asynchronously; they
2776                // may not be written by the time the complete slot entries are available. In this
2777                // case, these fields will be `None`.
2778                let block_time = self.blocktime_cf.get(slot)?;
2779                let block_height = self.block_height_cf.get(slot)?;
2780
2781                let block = VersionedConfirmedBlock {
2782                    previous_blockhash: previous_blockhash.to_string(),
2783                    blockhash: blockhash.to_string(),
2784                    // If the slot is full it should have parent_slot populated
2785                    // from shreds received.
2786                    parent_slot: slot_meta.parent_slot.unwrap(),
2787                    transactions: self
2788                        .map_transactions_to_statuses(slot, slot_transaction_iterator)?,
2789                    rewards,
2790                    num_partitions,
2791                    block_time,
2792                    block_height,
2793                };
2794                return Ok(VersionedConfirmedBlockWithEntries { block, entries });
2795            }
2796        }
2797        trace!("do_get_complete_block_with_entries() failed for {slot} (slot not full)");
2798        Err(BlockstoreError::SlotUnavailable)
2799    }
2800
2801    pub fn map_transactions_to_statuses(
2802        &self,
2803        slot: Slot,
2804        iterator: impl Iterator<Item = VersionedTransaction>,
2805    ) -> Result<Vec<VersionedTransactionWithStatusMeta>> {
2806        iterator
2807            .map(|transaction| {
2808                let signature = transaction.signatures[0];
2809                Ok(VersionedTransactionWithStatusMeta {
2810                    transaction,
2811                    meta: self
2812                        .read_transaction_status((signature, slot))?
2813                        .ok_or(BlockstoreError::MissingTransactionMetadata)?,
2814                })
2815            })
2816            .collect()
2817    }
2818
2819    fn cleanup_old_entries(&self) -> Result<()> {
2820        if !self.is_primary_access() {
2821            return Ok(());
2822        }
2823
2824        // Initialize TransactionStatusIndexMeta if they are not present already
2825        if self.transaction_status_index_cf.get(0)?.is_none() {
2826            self.transaction_status_index_cf
2827                .put(0, &TransactionStatusIndexMeta::default())?;
2828        }
2829        if self.transaction_status_index_cf.get(1)?.is_none() {
2830            self.transaction_status_index_cf
2831                .put(1, &TransactionStatusIndexMeta::default())?;
2832        }
2833
2834        // If present, delete dummy entries inserted by old software
2835        // https://github.com/solana-labs/solana/blob/bc2b372/ledger/src/blockstore.rs#L2130-L2137
2836        let transaction_status_dummy_key = cf::TransactionStatus::as_index(2);
2837        if self
2838            .transaction_status_cf
2839            .get_protobuf_or_bincode::<StoredTransactionStatusMeta>(transaction_status_dummy_key)?
2840            .is_some()
2841        {
2842            self.transaction_status_cf
2843                .delete(transaction_status_dummy_key)?;
2844        };
2845        let address_signatures_dummy_key = cf::AddressSignatures::as_index(2);
2846        if self
2847            .address_signatures_cf
2848            .get(address_signatures_dummy_key)?
2849            .is_some()
2850        {
2851            self.address_signatures_cf
2852                .delete(address_signatures_dummy_key)?;
2853        };
2854
2855        Ok(())
2856    }
2857
2858    fn get_highest_primary_index_slot(&self) -> Option<Slot> {
2859        *self.highest_primary_index_slot.read().unwrap()
2860    }
2861
2862    fn set_highest_primary_index_slot(&self, slot: Option<Slot>) {
2863        *self.highest_primary_index_slot.write().unwrap() = slot;
2864    }
2865
2866    fn update_highest_primary_index_slot(&self) -> Result<()> {
2867        let iterator = self.transaction_status_index_cf.iter(IteratorMode::Start)?;
2868        let mut highest_primary_index_slot = None;
2869        for (_, data) in iterator {
2870            let meta: TransactionStatusIndexMeta = deserialize(&data).unwrap();
2871            if highest_primary_index_slot.is_none()
2872                || highest_primary_index_slot.is_some_and(|slot| slot < meta.max_slot)
2873            {
2874                highest_primary_index_slot = Some(meta.max_slot);
2875            }
2876        }
2877        if highest_primary_index_slot.is_some_and(|slot| slot != 0) {
2878            self.set_highest_primary_index_slot(highest_primary_index_slot);
2879        } else {
2880            self.db.set_clean_slot_0(true);
2881        }
2882        Ok(())
2883    }
2884
2885    fn maybe_cleanup_highest_primary_index_slot(&self, oldest_slot: Slot) -> Result<()> {
2886        let mut w_highest_primary_index_slot = self.highest_primary_index_slot.write().unwrap();
2887        if let Some(highest_primary_index_slot) = *w_highest_primary_index_slot {
2888            if oldest_slot > highest_primary_index_slot {
2889                *w_highest_primary_index_slot = None;
2890                self.db.set_clean_slot_0(true);
2891            }
2892        }
2893        Ok(())
2894    }
2895
2896    fn read_deprecated_transaction_status(
2897        &self,
2898        index: (Signature, Slot),
2899    ) -> Result<Option<TransactionStatusMeta>> {
2900        let (signature, slot) = index;
2901        let result = self
2902            .transaction_status_cf
2903            .get_raw_protobuf_or_bincode::<StoredTransactionStatusMeta>(
2904                &cf::TransactionStatus::deprecated_key((0, signature, slot)),
2905            )?;
2906        if result.is_none() {
2907            Ok(self
2908                .transaction_status_cf
2909                .get_raw_protobuf_or_bincode::<StoredTransactionStatusMeta>(
2910                    &cf::TransactionStatus::deprecated_key((1, signature, slot)),
2911                )?
2912                .and_then(|meta| meta.try_into().ok()))
2913        } else {
2914            Ok(result.and_then(|meta| meta.try_into().ok()))
2915        }
2916    }
2917
2918    pub fn read_transaction_status(
2919        &self,
2920        index: (Signature, Slot),
2921    ) -> Result<Option<TransactionStatusMeta>> {
2922        let result = self.transaction_status_cf.get_protobuf(index)?;
2923        if result.is_none()
2924            && self
2925                .get_highest_primary_index_slot()
2926                .is_some_and(|highest_slot| highest_slot >= index.1)
2927        {
2928            self.read_deprecated_transaction_status(index)
2929        } else {
2930            Ok(result.and_then(|meta| meta.try_into().ok()))
2931        }
2932    }
2933
2934    #[inline]
2935    fn write_transaction_status_helper<'a, F>(
2936        &self,
2937        slot: Slot,
2938        signature: Signature,
2939        keys_with_writable: impl Iterator<Item = (&'a Pubkey, bool)>,
2940        status: TransactionStatusMeta,
2941        transaction_index: usize,
2942        mut write_fn: F,
2943    ) -> Result<()>
2944    where
2945        F: FnMut(&Pubkey, Slot, u32, Signature, bool) -> Result<()>,
2946    {
2947        let status = status.into();
2948        let transaction_index = u32::try_from(transaction_index)
2949            .map_err(|_| BlockstoreError::TransactionIndexOverflow)?;
2950        self.transaction_status_cf
2951            .put_protobuf((signature, slot), &status)?;
2952
2953        for (address, writeable) in keys_with_writable {
2954            write_fn(address, slot, transaction_index, signature, writeable)?;
2955        }
2956
2957        Ok(())
2958    }
2959
2960    pub fn write_transaction_status<'a>(
2961        &self,
2962        slot: Slot,
2963        signature: Signature,
2964        keys_with_writable: impl Iterator<Item = (&'a Pubkey, bool)>,
2965        status: TransactionStatusMeta,
2966        transaction_index: usize,
2967    ) -> Result<()> {
2968        self.write_transaction_status_helper(
2969            slot,
2970            signature,
2971            keys_with_writable,
2972            status,
2973            transaction_index,
2974            |address, slot, tx_index, signature, writeable| {
2975                self.address_signatures_cf.put(
2976                    (*address, slot, tx_index, signature),
2977                    &AddressSignatureMeta { writeable },
2978                )
2979            },
2980        )
2981    }
2982
2983    pub fn add_transaction_status_to_batch<'a>(
2984        &self,
2985        slot: Slot,
2986        signature: Signature,
2987        keys_with_writable: impl Iterator<Item = (&'a Pubkey, bool)>,
2988        status: TransactionStatusMeta,
2989        transaction_index: usize,
2990        db_write_batch: &mut WriteBatch,
2991    ) -> Result<()> {
2992        self.write_transaction_status_helper(
2993            slot,
2994            signature,
2995            keys_with_writable,
2996            status,
2997            transaction_index,
2998            |address, slot, tx_index, signature, writeable| {
2999                self.address_signatures_cf.put_in_batch(
3000                    db_write_batch,
3001                    (*address, slot, tx_index, signature),
3002                    &AddressSignatureMeta { writeable },
3003                )
3004            },
3005        )
3006    }
3007
3008    pub fn read_transaction_memos(
3009        &self,
3010        signature: Signature,
3011        slot: Slot,
3012    ) -> Result<Option<String>> {
3013        let memos = self.transaction_memos_cf.get((signature, slot))?;
3014        if memos.is_none()
3015            && self
3016                .get_highest_primary_index_slot()
3017                .is_some_and(|highest_slot| highest_slot >= slot)
3018        {
3019            self.transaction_memos_cf
3020                .get_raw(cf::TransactionMemos::deprecated_key(signature))
3021        } else {
3022            Ok(memos)
3023        }
3024    }
3025
3026    pub fn write_transaction_memos(
3027        &self,
3028        signature: &Signature,
3029        slot: Slot,
3030        memos: String,
3031    ) -> Result<()> {
3032        self.transaction_memos_cf.put((*signature, slot), &memos)
3033    }
3034
3035    pub fn add_transaction_memos_to_batch(
3036        &self,
3037        signature: &Signature,
3038        slot: Slot,
3039        memos: String,
3040        db_write_batch: &mut WriteBatch,
3041    ) -> Result<()> {
3042        self.transaction_memos_cf
3043            .put_in_batch(db_write_batch, (*signature, slot), &memos)
3044    }
3045
3046    /// Acquires the `lowest_cleanup_slot` lock and returns a tuple of the held lock
3047    /// and lowest available slot.
3048    ///
3049    /// The function will return BlockstoreError::SlotCleanedUp if the input
3050    /// `slot` has already been cleaned-up.
3051    fn check_lowest_cleanup_slot(&self, slot: Slot) -> Result<std::sync::RwLockReadGuard<Slot>> {
3052        // lowest_cleanup_slot is the last slot that was not cleaned up by LedgerCleanupService
3053        let lowest_cleanup_slot = self.lowest_cleanup_slot.read().unwrap();
3054        if *lowest_cleanup_slot > 0 && *lowest_cleanup_slot >= slot {
3055            return Err(BlockstoreError::SlotCleanedUp);
3056        }
3057        // Make caller hold this lock properly; otherwise LedgerCleanupService can purge/compact
3058        // needed slots here at any given moment
3059        Ok(lowest_cleanup_slot)
3060    }
3061
3062    /// Acquires the lock of `lowest_cleanup_slot` and returns the tuple of
3063    /// the held lock and the lowest available slot.
3064    ///
3065    /// This function ensures a consistent result by using lowest_cleanup_slot
3066    /// as the lower bound for reading columns that do not employ strong read
3067    /// consistency with slot-based delete_range.
3068    fn ensure_lowest_cleanup_slot(&self) -> (std::sync::RwLockReadGuard<Slot>, Slot) {
3069        let lowest_cleanup_slot = self.lowest_cleanup_slot.read().unwrap();
3070        let lowest_available_slot = (*lowest_cleanup_slot)
3071            .checked_add(1)
3072            .expect("overflow from trusted value");
3073
3074        // Make caller hold this lock properly; otherwise LedgerCleanupService can purge/compact
3075        // needed slots here at any given moment.
3076        // Blockstore callers, like rpc, can process concurrent read queries
3077        (lowest_cleanup_slot, lowest_available_slot)
3078    }
3079
3080    // Returns a transaction status, as well as a loop counter for unit testing
3081    fn get_transaction_status_with_counter(
3082        &self,
3083        signature: Signature,
3084        confirmed_unrooted_slots: &HashSet<Slot>,
3085    ) -> Result<(Option<(Slot, TransactionStatusMeta)>, u64)> {
3086        let mut counter = 0;
3087        let (lock, _) = self.ensure_lowest_cleanup_slot();
3088        let first_available_block = self.get_first_available_block()?;
3089
3090        let iterator =
3091            self.transaction_status_cf
3092                .iter_current_index_filtered(IteratorMode::From(
3093                    (signature, first_available_block),
3094                    IteratorDirection::Forward,
3095                ))?;
3096
3097        for ((sig, slot), _data) in iterator {
3098            counter += 1;
3099            if sig != signature {
3100                break;
3101            }
3102            if !self.is_root(slot) && !confirmed_unrooted_slots.contains(&slot) {
3103                continue;
3104            }
3105            let status = self
3106                .transaction_status_cf
3107                .get_protobuf((signature, slot))?
3108                .and_then(|status| status.try_into().ok())
3109                .map(|status| (slot, status));
3110            return Ok((status, counter));
3111        }
3112
3113        if self.get_highest_primary_index_slot().is_none() {
3114            return Ok((None, counter));
3115        }
3116        for transaction_status_cf_primary_index in 0..=1 {
3117            let index_iterator =
3118                self.transaction_status_cf
3119                    .iter_deprecated_index_filtered(IteratorMode::From(
3120                        (
3121                            transaction_status_cf_primary_index,
3122                            signature,
3123                            first_available_block,
3124                        ),
3125                        IteratorDirection::Forward,
3126                    ))?;
3127            for ((i, sig, slot), _data) in index_iterator {
3128                counter += 1;
3129                if i != transaction_status_cf_primary_index || sig != signature {
3130                    break;
3131                }
3132                if !self.is_root(slot) && !confirmed_unrooted_slots.contains(&slot) {
3133                    continue;
3134                }
3135                let status = self
3136                    .transaction_status_cf
3137                    .get_raw_protobuf_or_bincode::<StoredTransactionStatusMeta>(
3138                        &cf::TransactionStatus::deprecated_key((i, signature, slot)),
3139                    )?
3140                    .and_then(|status| status.try_into().ok())
3141                    .map(|status| (slot, status));
3142                return Ok((status, counter));
3143            }
3144        }
3145        drop(lock);
3146
3147        Ok((None, counter))
3148    }
3149
3150    /// Returns a transaction status
3151    pub fn get_rooted_transaction_status(
3152        &self,
3153        signature: Signature,
3154    ) -> Result<Option<(Slot, TransactionStatusMeta)>> {
3155        self.rpc_api_metrics
3156            .num_get_rooted_transaction_status
3157            .fetch_add(1, Ordering::Relaxed);
3158
3159        self.get_transaction_status(signature, &HashSet::default())
3160    }
3161
3162    /// Returns a transaction status
3163    pub fn get_transaction_status(
3164        &self,
3165        signature: Signature,
3166        confirmed_unrooted_slots: &HashSet<Slot>,
3167    ) -> Result<Option<(Slot, TransactionStatusMeta)>> {
3168        self.rpc_api_metrics
3169            .num_get_transaction_status
3170            .fetch_add(1, Ordering::Relaxed);
3171
3172        self.get_transaction_status_with_counter(signature, confirmed_unrooted_slots)
3173            .map(|(status, _)| status)
3174    }
3175
3176    /// Returns a complete transaction if it was processed in a root
3177    pub fn get_rooted_transaction(
3178        &self,
3179        signature: Signature,
3180    ) -> Result<Option<ConfirmedTransactionWithStatusMeta>> {
3181        self.rpc_api_metrics
3182            .num_get_rooted_transaction
3183            .fetch_add(1, Ordering::Relaxed);
3184
3185        self.get_transaction_with_status(signature, &HashSet::default())
3186    }
3187
3188    /// Returns a complete transaction
3189    pub fn get_complete_transaction(
3190        &self,
3191        signature: Signature,
3192        highest_confirmed_slot: Slot,
3193    ) -> Result<Option<ConfirmedTransactionWithStatusMeta>> {
3194        self.rpc_api_metrics
3195            .num_get_complete_transaction
3196            .fetch_add(1, Ordering::Relaxed);
3197
3198        let max_root = self.max_root();
3199        let confirmed_unrooted_slots: HashSet<_> =
3200            AncestorIterator::new_inclusive(highest_confirmed_slot, self)
3201                .take_while(|&slot| slot > max_root)
3202                .collect();
3203        self.get_transaction_with_status(signature, &confirmed_unrooted_slots)
3204    }
3205
3206    fn get_transaction_with_status(
3207        &self,
3208        signature: Signature,
3209        confirmed_unrooted_slots: &HashSet<Slot>,
3210    ) -> Result<Option<ConfirmedTransactionWithStatusMeta>> {
3211        if let Some((slot, meta)) =
3212            self.get_transaction_status(signature, confirmed_unrooted_slots)?
3213        {
3214            let transaction = self
3215                .find_transaction_in_slot(slot, signature)?
3216                .ok_or(BlockstoreError::TransactionStatusSlotMismatch)?; // Should not happen
3217
3218            let block_time = self.get_block_time(slot)?;
3219            Ok(Some(ConfirmedTransactionWithStatusMeta {
3220                slot,
3221                tx_with_meta: TransactionWithStatusMeta::Complete(
3222                    VersionedTransactionWithStatusMeta { transaction, meta },
3223                ),
3224                block_time,
3225            }))
3226        } else {
3227            Ok(None)
3228        }
3229    }
3230
3231    fn find_transaction_in_slot(
3232        &self,
3233        slot: Slot,
3234        signature: Signature,
3235    ) -> Result<Option<VersionedTransaction>> {
3236        let slot_entries = self.get_slot_entries(slot, 0)?;
3237        Ok(slot_entries
3238            .iter()
3239            .cloned()
3240            .flat_map(|entry| entry.transactions)
3241            .map(|transaction| {
3242                if let Err(err) = transaction.sanitize() {
3243                    warn!(
3244                        "Blockstore::find_transaction_in_slot sanitize failed: {:?}, slot: {:?}, \
3245                         {:?}",
3246                        err, slot, transaction,
3247                    );
3248                }
3249                transaction
3250            })
3251            .find(|transaction| transaction.signatures[0] == signature))
3252    }
3253
3254    // DEPRECATED and decommissioned
3255    // This method always returns an empty Vec
3256    fn find_address_signatures(
3257        &self,
3258        _pubkey: Pubkey,
3259        _start_slot: Slot,
3260        _end_slot: Slot,
3261    ) -> Result<Vec<(Slot, Signature)>> {
3262        Ok(vec![])
3263    }
3264
3265    // Returns all signatures for an address in a particular slot, regardless of whether that slot
3266    // has been rooted. The transactions will be ordered by their occurrence in the block
3267    fn find_address_signatures_for_slot(
3268        &self,
3269        pubkey: Pubkey,
3270        slot: Slot,
3271    ) -> Result<Vec<(Slot, Signature)>> {
3272        let (lock, lowest_available_slot) = self.ensure_lowest_cleanup_slot();
3273        let mut signatures: Vec<(Slot, Signature)> = vec![];
3274        if slot < lowest_available_slot {
3275            return Ok(signatures);
3276        }
3277        let index_iterator =
3278            self.address_signatures_cf
3279                .iter_current_index_filtered(IteratorMode::From(
3280                    (
3281                        pubkey,
3282                        slot.max(lowest_available_slot),
3283                        0,
3284                        Signature::default(),
3285                    ),
3286                    IteratorDirection::Forward,
3287                ))?;
3288        for ((address, transaction_slot, _transaction_index, signature), _) in index_iterator {
3289            if transaction_slot > slot || address != pubkey {
3290                break;
3291            }
3292            signatures.push((slot, signature));
3293        }
3294        drop(lock);
3295        Ok(signatures)
3296    }
3297
3298    // DEPRECATED and decommissioned
3299    // This method always returns an empty Vec
3300    pub fn get_confirmed_signatures_for_address(
3301        &self,
3302        pubkey: Pubkey,
3303        start_slot: Slot,
3304        end_slot: Slot,
3305    ) -> Result<Vec<Signature>> {
3306        self.rpc_api_metrics
3307            .num_get_confirmed_signatures_for_address
3308            .fetch_add(1, Ordering::Relaxed);
3309
3310        self.find_address_signatures(pubkey, start_slot, end_slot)
3311            .map(|signatures| signatures.iter().map(|(_, signature)| *signature).collect())
3312    }
3313
3314    fn get_block_signatures_rev(&self, slot: Slot) -> Result<Vec<Signature>> {
3315        let block = self.get_complete_block(slot, false).map_err(|err| {
3316            BlockstoreError::Io(IoError::new(
3317                ErrorKind::Other,
3318                format!("Unable to get block: {err}"),
3319            ))
3320        })?;
3321
3322        Ok(block
3323            .transactions
3324            .into_iter()
3325            .rev()
3326            .filter_map(|transaction_with_meta| {
3327                transaction_with_meta
3328                    .transaction
3329                    .signatures
3330                    .into_iter()
3331                    .next()
3332            })
3333            .collect())
3334    }
3335
3336    pub fn get_confirmed_signatures_for_address2(
3337        &self,
3338        address: Pubkey,
3339        highest_slot: Slot, // highest_super_majority_root or highest_confirmed_slot
3340        before: Option<Signature>,
3341        until: Option<Signature>,
3342        limit: usize,
3343    ) -> Result<SignatureInfosForAddress> {
3344        self.rpc_api_metrics
3345            .num_get_confirmed_signatures_for_address2
3346            .fetch_add(1, Ordering::Relaxed);
3347
3348        let max_root = self.max_root();
3349        let confirmed_unrooted_slots: HashSet<_> =
3350            AncestorIterator::new_inclusive(highest_slot, self)
3351                .take_while(|&slot| slot > max_root)
3352                .collect();
3353
3354        // Figure the `slot` to start listing signatures at, based on the ledger location of the
3355        // `before` signature if present.  Also generate a HashSet of signatures that should
3356        // be excluded from the results.
3357        let mut get_before_slot_timer = Measure::start("get_before_slot_timer");
3358        let (slot, mut before_excluded_signatures) = match before {
3359            None => (highest_slot, None),
3360            Some(before) => {
3361                let transaction_status =
3362                    self.get_transaction_status(before, &confirmed_unrooted_slots)?;
3363                match transaction_status {
3364                    None => return Ok(SignatureInfosForAddress::default()),
3365                    Some((slot, _)) => {
3366                        let mut slot_signatures = self.get_block_signatures_rev(slot)?;
3367                        if let Some(pos) = slot_signatures.iter().position(|&x| x == before) {
3368                            slot_signatures.truncate(pos + 1);
3369                        }
3370
3371                        (
3372                            slot,
3373                            Some(slot_signatures.into_iter().collect::<HashSet<_>>()),
3374                        )
3375                    }
3376                }
3377            }
3378        };
3379        get_before_slot_timer.stop();
3380
3381        let first_available_block = self.get_first_available_block()?;
3382        // Generate a HashSet of signatures that should be excluded from the results based on
3383        // `until` signature
3384        let mut get_until_slot_timer = Measure::start("get_until_slot_timer");
3385        let (lowest_slot, until_excluded_signatures) = match until {
3386            None => (first_available_block, HashSet::new()),
3387            Some(until) => {
3388                let transaction_status =
3389                    self.get_transaction_status(until, &confirmed_unrooted_slots)?;
3390                match transaction_status {
3391                    None => (first_available_block, HashSet::new()),
3392                    Some((slot, _)) => {
3393                        let mut slot_signatures = self.get_block_signatures_rev(slot)?;
3394                        if let Some(pos) = slot_signatures.iter().position(|&x| x == until) {
3395                            slot_signatures = slot_signatures.split_off(pos);
3396                        }
3397
3398                        (slot, slot_signatures.into_iter().collect::<HashSet<_>>())
3399                    }
3400                }
3401            }
3402        };
3403        get_until_slot_timer.stop();
3404
3405        // Fetch the list of signatures that affect the given address
3406        let mut address_signatures = vec![];
3407
3408        // Get signatures in `slot`
3409        let mut get_initial_slot_timer = Measure::start("get_initial_slot_timer");
3410        let mut signatures = self.find_address_signatures_for_slot(address, slot)?;
3411        signatures.reverse();
3412        if let Some(excluded_signatures) = before_excluded_signatures.take() {
3413            address_signatures.extend(
3414                signatures
3415                    .into_iter()
3416                    .filter(|(_, signature)| !excluded_signatures.contains(signature)),
3417            )
3418        } else {
3419            address_signatures.append(&mut signatures);
3420        }
3421        get_initial_slot_timer.stop();
3422
3423        let mut address_signatures_iter_timer = Measure::start("iter_timer");
3424        let mut iterator =
3425            self.address_signatures_cf
3426                .iter_current_index_filtered(IteratorMode::From(
3427                    // Regardless of whether a `before` signature is provided, the latest relevant
3428                    // `slot` is queried directly with the `find_address_signatures_for_slot()`
3429                    // call above. Thus, this iterator starts at the lowest entry of `address,
3430                    // slot` and iterates backwards to continue reporting the next earliest
3431                    // signatures.
3432                    (address, slot, 0, Signature::default()),
3433                    IteratorDirection::Reverse,
3434                ))?;
3435
3436        // Iterate until limit is reached
3437        while address_signatures.len() < limit {
3438            if let Some(((key_address, slot, _transaction_index, signature), _)) = iterator.next() {
3439                if slot < lowest_slot {
3440                    break;
3441                }
3442                if key_address == address {
3443                    if self.is_root(slot) || confirmed_unrooted_slots.contains(&slot) {
3444                        address_signatures.push((slot, signature));
3445                    }
3446                    continue;
3447                }
3448            }
3449            break;
3450        }
3451        address_signatures_iter_timer.stop();
3452
3453        let mut address_signatures: Vec<(Slot, Signature)> = address_signatures
3454            .into_iter()
3455            .filter(|(_, signature)| !until_excluded_signatures.contains(signature))
3456            .collect();
3457        address_signatures.truncate(limit);
3458
3459        // Fill in the status information for each found transaction
3460        let mut get_status_info_timer = Measure::start("get_status_info_timer");
3461        let mut infos = vec![];
3462        for (slot, signature) in address_signatures.into_iter() {
3463            let transaction_status =
3464                self.get_transaction_status(signature, &confirmed_unrooted_slots)?;
3465            let err = transaction_status.and_then(|(_slot, status)| status.status.err());
3466            let memo = self.read_transaction_memos(signature, slot)?;
3467            let block_time = self.get_block_time(slot)?;
3468            infos.push(ConfirmedTransactionStatusWithSignature {
3469                signature,
3470                slot,
3471                err,
3472                memo,
3473                block_time,
3474            });
3475        }
3476        get_status_info_timer.stop();
3477
3478        datapoint_info!(
3479            "blockstore-get-conf-sigs-for-addr-2",
3480            (
3481                "get_before_slot_us",
3482                get_before_slot_timer.as_us() as i64,
3483                i64
3484            ),
3485            (
3486                "get_initial_slot_us",
3487                get_initial_slot_timer.as_us() as i64,
3488                i64
3489            ),
3490            (
3491                "address_signatures_iter_us",
3492                address_signatures_iter_timer.as_us() as i64,
3493                i64
3494            ),
3495            (
3496                "get_status_info_us",
3497                get_status_info_timer.as_us() as i64,
3498                i64
3499            ),
3500            (
3501                "get_until_slot_us",
3502                get_until_slot_timer.as_us() as i64,
3503                i64
3504            )
3505        );
3506
3507        Ok(SignatureInfosForAddress {
3508            infos,
3509            found_before: true, // if `before` signature was not found, this method returned early
3510        })
3511    }
3512
3513    pub fn read_rewards(&self, index: Slot) -> Result<Option<Rewards>> {
3514        self.rewards_cf
3515            .get_protobuf_or_bincode::<Rewards>(index)
3516            .map(|result| result.map(|option| option.into()))
3517    }
3518
3519    pub fn write_rewards(&self, index: Slot, rewards: RewardsAndNumPartitions) -> Result<()> {
3520        let rewards = rewards.into();
3521        self.rewards_cf.put_protobuf(index, &rewards)
3522    }
3523
3524    pub fn get_recent_perf_samples(&self, num: usize) -> Result<Vec<(Slot, PerfSample)>> {
3525        // When reading `PerfSamples`, the database may contain samples with either `PerfSampleV1`
3526        // or `PerfSampleV2` encoding.  We expect `PerfSampleV1` to be a prefix of the
3527        // `PerfSampleV2` encoding (see [`perf_sample_v1_is_prefix_of_perf_sample_v2`]), so we try
3528        // them in order.
3529        let samples =
3530            self.perf_samples_cf
3531                .iter(IteratorMode::End)?
3532                .take(num)
3533                .map(|(slot, data)| {
3534                    deserialize::<PerfSampleV2>(&data)
3535                        .map(|sample| (slot, sample.into()))
3536                        .or_else(|err| {
3537                            match &*err {
3538                                bincode::ErrorKind::Io(io_err)
3539                                    if matches!(io_err.kind(), ErrorKind::UnexpectedEof) =>
3540                                {
3541                                    // Not enough bytes to deserialize as `PerfSampleV2`.
3542                                }
3543                                _ => return Err(err),
3544                            }
3545
3546                            deserialize::<PerfSampleV1>(&data).map(|sample| (slot, sample.into()))
3547                        })
3548                        .map_err(Into::into)
3549                });
3550
3551        samples.collect()
3552    }
3553
3554    pub fn write_perf_sample(&self, index: Slot, perf_sample: &PerfSampleV2) -> Result<()> {
3555        // Always write as the current version.
3556        let bytes =
3557            serialize(&perf_sample).expect("`PerfSampleV2` can be serialized with `bincode`");
3558        self.perf_samples_cf.put_bytes(index, &bytes)
3559    }
3560
3561    pub fn read_program_costs(&self) -> Result<Vec<(Pubkey, u64)>> {
3562        Ok(self
3563            .program_costs_cf
3564            .iter(IteratorMode::End)?
3565            .map(|(pubkey, data)| {
3566                let program_cost: ProgramCost = deserialize(&data).unwrap();
3567                (pubkey, program_cost.cost)
3568            })
3569            .collect())
3570    }
3571
3572    pub fn write_program_cost(&self, key: &Pubkey, value: &u64) -> Result<()> {
3573        self.program_costs_cf
3574            .put(*key, &ProgramCost { cost: *value })
3575    }
3576
3577    pub fn delete_program_cost(&self, key: &Pubkey) -> Result<()> {
3578        self.program_costs_cf.delete(*key)
3579    }
3580
3581    /// Returns the entry vector for the slot starting with `shred_start_index`
3582    pub fn get_slot_entries(&self, slot: Slot, shred_start_index: u64) -> Result<Vec<Entry>> {
3583        self.get_slot_entries_with_shred_info(slot, shred_start_index, false)
3584            .map(|x| x.0)
3585    }
3586
3587    /// Returns the entry vector for the slot starting with `shred_start_index`, the number of
3588    /// shreds that comprise the entry vector, and whether the slot is full (consumed all shreds).
3589    pub fn get_slot_entries_with_shred_info(
3590        &self,
3591        slot: Slot,
3592        start_index: u64,
3593        allow_dead_slots: bool,
3594    ) -> Result<(Vec<Entry>, u64, bool)> {
3595        let (completed_ranges, slot_meta) = self.get_completed_ranges(slot, start_index)?;
3596
3597        // Check if the slot is dead *after* fetching completed ranges to avoid a race
3598        // where a slot is marked dead by another thread before the completed range query finishes.
3599        // This should be sufficient because full slots will never be marked dead from another thread,
3600        // this can only happen during entry processing during replay stage.
3601        if self.is_dead(slot) && !allow_dead_slots {
3602            return Err(BlockstoreError::DeadSlot);
3603        } else if completed_ranges.is_empty() {
3604            return Ok((vec![], 0, false));
3605        }
3606
3607        let slot_meta = slot_meta.unwrap();
3608        let num_shreds = completed_ranges
3609            .last()
3610            .map(|&Range { end, .. }| u64::from(end) - start_index)
3611            .unwrap_or(0);
3612
3613        let entries = self.get_slot_entries_in_block(slot, completed_ranges, Some(&slot_meta))?;
3614        Ok((entries, num_shreds, slot_meta.is_full()))
3615    }
3616
3617    /// Gets accounts used in transactions in the slot range [starting_slot, ending_slot].
3618    /// Additionally returns a bool indicating if the set may be incomplete.
3619    /// Used by ledger-tool to create a minimized snapshot
3620    pub fn get_accounts_used_in_range(
3621        &self,
3622        bank: &Bank,
3623        starting_slot: Slot,
3624        ending_slot: Slot,
3625    ) -> (DashSet<Pubkey>, bool) {
3626        let result = DashSet::new();
3627        let lookup_tables = DashSet::new();
3628        let possible_cpi_alt_extend = AtomicBool::new(false);
3629
3630        fn add_to_set<'a>(set: &DashSet<Pubkey>, iter: impl IntoIterator<Item = &'a Pubkey>) {
3631            iter.into_iter().for_each(|key| {
3632                set.insert(*key);
3633            });
3634        }
3635
3636        (starting_slot..=ending_slot)
3637            .into_par_iter()
3638            .for_each(|slot| {
3639                if let Ok(entries) = self.get_slot_entries(slot, 0) {
3640                    entries.into_par_iter().for_each(|entry| {
3641                        entry.transactions.into_iter().for_each(|tx| {
3642                            if let Some(lookups) = tx.message.address_table_lookups() {
3643                                add_to_set(
3644                                    &lookup_tables,
3645                                    lookups.iter().map(|lookup| &lookup.account_key),
3646                                );
3647                            }
3648                            // Attempt to verify transaction and load addresses from the current bank,
3649                            // or manually scan the transaction for addresses if the transaction.
3650                            if let Ok(tx) = bank.fully_verify_transaction(tx.clone()) {
3651                                add_to_set(&result, tx.message().account_keys().iter());
3652                            } else {
3653                                add_to_set(&result, tx.message.static_account_keys());
3654
3655                                let tx = SanitizedVersionedTransaction::try_from(tx)
3656                                    .expect("transaction failed to sanitize");
3657
3658                                let alt_scan_extensions = scan_transaction(&tx);
3659                                add_to_set(&result, &alt_scan_extensions.accounts);
3660                                if alt_scan_extensions.possibly_incomplete {
3661                                    possible_cpi_alt_extend.store(true, Ordering::Relaxed);
3662                                }
3663                            }
3664                        });
3665                    });
3666                }
3667            });
3668
3669        // For each unique lookup table add all accounts to the minimized set.
3670        lookup_tables.into_par_iter().for_each(|lookup_table_key| {
3671            bank.get_account(&lookup_table_key)
3672                .map(|lookup_table_account| {
3673                    add_to_set(&result, &[lookup_table_key]);
3674                    AddressLookupTable::deserialize(lookup_table_account.data()).map(|t| {
3675                        add_to_set(&result, &t.addresses[..]);
3676                    })
3677                });
3678        });
3679
3680        (result, possible_cpi_alt_extend.into_inner())
3681    }
3682
3683    fn get_completed_ranges(
3684        &self,
3685        slot: Slot,
3686        start_index: u64,
3687    ) -> Result<(CompletedRanges, Option<SlotMeta>)> {
3688        let Some(slot_meta) = self.meta_cf.get(slot)? else {
3689            return Ok((vec![], None));
3690        };
3691        // Find all the ranges for the completed data blocks
3692        let completed_ranges = Self::get_completed_data_ranges(
3693            start_index as u32,
3694            &slot_meta.completed_data_indexes,
3695            slot_meta.consumed as u32,
3696        );
3697
3698        Ok((completed_ranges, Some(slot_meta)))
3699    }
3700
3701    // Get the range of indexes [start_index, end_index] of every completed data block
3702    fn get_completed_data_ranges(
3703        start_index: u32,
3704        completed_data_indexes: &BTreeSet<u32>,
3705        consumed: u32,
3706    ) -> CompletedRanges {
3707        // `consumed` is the next missing shred index, but shred `i` existing in
3708        // completed_data_end_indexes implies it's not missing
3709        assert!(!completed_data_indexes.contains(&consumed));
3710        completed_data_indexes
3711            .range(start_index..consumed)
3712            .scan(start_index, |start, &index| {
3713                let out = *start..index + 1;
3714                *start = index + 1;
3715                Some(out)
3716            })
3717            .collect()
3718    }
3719
3720    /// Fetch the entries corresponding to all of the shred indices in `completed_ranges`
3721    /// This function takes advantage of the fact that `completed_ranges` are both
3722    /// contiguous and in sorted order. To clarify, suppose completed_ranges is as follows:
3723    ///   completed_ranges = [..., (s_i..e_i), (s_i+1..e_i+1), ...]
3724    /// Then, the following statements are true:
3725    ///   s_i < e_i == s_i+1 < e_i+1
3726    fn get_slot_entries_in_block(
3727        &self,
3728        slot: Slot,
3729        completed_ranges: CompletedRanges,
3730        slot_meta: Option<&SlotMeta>,
3731    ) -> Result<Vec<Entry>> {
3732        debug_assert!(completed_ranges
3733            .iter()
3734            .tuple_windows()
3735            .all(|(a, b)| a.start < a.end && a.end == b.start && b.start < b.end));
3736        let maybe_panic = |index: u64| {
3737            if let Some(slot_meta) = slot_meta {
3738                if slot > self.lowest_cleanup_slot() {
3739                    panic!("Missing shred. slot: {slot}, index: {index}, slot meta: {slot_meta:?}");
3740                }
3741            }
3742        };
3743        let Some((&Range { start, .. }, &Range { end, .. })) =
3744            completed_ranges.first().zip(completed_ranges.last())
3745        else {
3746            return Ok(vec![]);
3747        };
3748        let indices = u64::from(start)..u64::from(end);
3749        let keys = indices.clone().map(|index| (slot, index));
3750        let keys = self.data_shred_cf.multi_get_keys(keys);
3751        let mut shreds =
3752            self.data_shred_cf
3753                .multi_get_bytes(&keys)
3754                .zip(indices)
3755                .map(|(shred, index)| {
3756                    shred?.ok_or_else(|| {
3757                        maybe_panic(index);
3758                        BlockstoreError::MissingShred(slot, index)
3759                    })
3760                });
3761        completed_ranges
3762            .into_iter()
3763            .map(|Range { start, end }| end - start)
3764            .map(|num_shreds| {
3765                shreds
3766                    .by_ref()
3767                    .take(num_shreds as usize)
3768                    .process_results(|shreds| Shredder::deshred(shreds))?
3769                    .map_err(|e| {
3770                        BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom(
3771                            format!("could not reconstruct entries buffer from shreds: {e:?}"),
3772                        )))
3773                    })
3774                    .and_then(|payload| {
3775                        bincode::deserialize::<Vec<Entry>>(&payload).map_err(|e| {
3776                            BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom(
3777                                format!("could not reconstruct entries: {e:?}"),
3778                            )))
3779                        })
3780                    })
3781            })
3782            .flatten_ok()
3783            .collect()
3784    }
3785
3786    pub fn get_entries_in_data_block(
3787        &self,
3788        slot: Slot,
3789        range: Range<u32>,
3790        slot_meta: Option<&SlotMeta>,
3791    ) -> Result<Vec<Entry>> {
3792        self.get_slot_entries_in_block(slot, vec![range], slot_meta)
3793    }
3794
3795    /// Performs checks on the last fec set of a replayed slot, and returns the block_id.
3796    /// Returns:
3797    ///     - BlockstoreProcessorError::IncompleteFinalFecSet
3798    ///       if the last fec set is not full
3799    ///     - BlockstoreProcessorError::InvalidRetransmitterSignatureFinalFecSet
3800    ///       if the last fec set is not signed by retransmitters
3801    pub fn check_last_fec_set_and_get_block_id(
3802        &self,
3803        slot: Slot,
3804        bank_hash: Hash,
3805        feature_set: &FeatureSet,
3806    ) -> std::result::Result<Option<Hash>, BlockstoreProcessorError> {
3807        let results = self.check_last_fec_set(slot);
3808        let Ok(results) = results else {
3809            warn!(
3810                "Unable to check the last fec set for slot {} {},
3811                 marking as dead: {results:?}",
3812                slot, bank_hash,
3813            );
3814            if feature_set.is_active(&solana_sdk::feature_set::vote_only_full_fec_sets::id()) {
3815                return Err(BlockstoreProcessorError::IncompleteFinalFecSet);
3816            }
3817            return Ok(None);
3818        };
3819        // Update metrics
3820        if results.last_fec_set_merkle_root.is_none() {
3821            datapoint_warn!("incomplete_final_fec_set", ("slot", slot, i64),);
3822        }
3823        // Return block id / error based on feature flags
3824        results.get_last_fec_set_merkle_root(feature_set)
3825    }
3826
3827    /// Performs checks on the last FEC set for this slot.
3828    /// - `block_id` will be `Some(mr)` if the last `DATA_SHREDS_PER_FEC_BLOCK` data shreds of
3829    ///   `slot` have the same merkle root of `mr`, indicating they are a part of the same FEC set.
3830    ///   This indicates that the last FEC set is sufficiently sized.
3831    /// - `is_retransmitter_signed` will be true if the last `DATA_SHREDS_PER_FEC_BLOCK`
3832    ///   data shreds of `slot` are of the retransmitter variant. Since we already discard
3833    ///   invalid signatures on ingestion, this indicates that the last FEC set is properly
3834    ///   signed by retransmitters.
3835    ///
3836    /// Will error if:
3837    ///     - Slot meta is missing
3838    ///     - LAST_SHRED_IN_SLOT flag has not been received
3839    ///     - There are missing shreds in the last fec set
3840    ///     - The block contains legacy shreds
3841    fn check_last_fec_set(&self, slot: Slot) -> Result<LastFECSetCheckResults> {
3842        // We need to check if the last FEC set index contains at least `DATA_SHREDS_PER_FEC_BLOCK` data shreds.
3843        // We compare the merkle roots of the last `DATA_SHREDS_PER_FEC_BLOCK` shreds in this block.
3844        // Since the merkle root contains the fec_set_index, if all of them match, we know that the last fec set has
3845        // at least `DATA_SHREDS_PER_FEC_BLOCK` shreds.
3846        let slot_meta = self.meta(slot)?.ok_or(BlockstoreError::SlotUnavailable)?;
3847        let last_shred_index = slot_meta
3848            .last_index
3849            .ok_or(BlockstoreError::UnknownLastIndex(slot))?;
3850
3851        const MINIMUM_INDEX: u64 = DATA_SHREDS_PER_FEC_BLOCK as u64 - 1;
3852        #[cfg(test)]
3853        const_assert_eq!(MINIMUM_INDEX, 31);
3854        let Some(start_index) = last_shred_index.checked_sub(MINIMUM_INDEX) else {
3855            warn!("Slot {slot} has only {} shreds, fewer than the {DATA_SHREDS_PER_FEC_BLOCK} required", last_shred_index + 1);
3856            return Ok(LastFECSetCheckResults {
3857                last_fec_set_merkle_root: None,
3858                is_retransmitter_signed: false,
3859            });
3860        };
3861        let keys = self
3862            .data_shred_cf
3863            .multi_get_keys((start_index..=last_shred_index).map(|index| (slot, index)));
3864
3865        let deduped_shred_checks: Vec<(Hash, bool)> = self
3866            .data_shred_cf
3867            .multi_get_bytes(&keys)
3868            .enumerate()
3869            .map(|(offset, shred_bytes)| {
3870                let shred_bytes = shred_bytes.ok().flatten().ok_or_else(|| {
3871                    let shred_index = start_index + u64::try_from(offset).unwrap();
3872                    warn!("Missing shred for {slot} index {shred_index}");
3873                    BlockstoreError::MissingShred(slot, shred_index)
3874                })?;
3875                let is_retransmitter_signed =
3876                    shred::layout::is_retransmitter_signed_variant(&shred_bytes).map_err(|_| {
3877                        let shred_index = start_index + u64::try_from(offset).unwrap();
3878                        warn!("Found legacy shred for {slot}, index {shred_index}");
3879                        BlockstoreError::LegacyShred(slot, shred_index)
3880                    })?;
3881                let merkle_root =
3882                    shred::layout::get_merkle_root(&shred_bytes).ok_or_else(|| {
3883                        let shred_index = start_index + u64::try_from(offset).unwrap();
3884                        warn!("Unable to read merkle root for {slot}, index {shred_index}");
3885                        BlockstoreError::MissingMerkleRoot(slot, shred_index)
3886                    })?;
3887                Ok((merkle_root, is_retransmitter_signed))
3888            })
3889            .dedup_by(|res1, res2| res1.as_ref().ok() == res2.as_ref().ok())
3890            .collect::<Result<Vec<(Hash, bool)>>>()?;
3891
3892        // After the dedup there should be exactly one Hash left and one true value
3893        let &[(block_id, is_retransmitter_signed)] = deduped_shred_checks.as_slice() else {
3894            return Ok(LastFECSetCheckResults {
3895                last_fec_set_merkle_root: None,
3896                is_retransmitter_signed: false,
3897            });
3898        };
3899        Ok(LastFECSetCheckResults {
3900            last_fec_set_merkle_root: Some(block_id),
3901            is_retransmitter_signed,
3902        })
3903    }
3904
3905    /// Returns a mapping from each elements of `slots` to a list of the
3906    /// element's children slots.
3907    pub fn get_slots_since(&self, slots: &[Slot]) -> Result<HashMap<Slot, Vec<Slot>>> {
3908        let keys = self.meta_cf.multi_get_keys(slots.iter().copied());
3909        let slot_metas = self.meta_cf.multi_get(&keys);
3910
3911        let mut slots_since: HashMap<Slot, Vec<Slot>> = HashMap::with_capacity(slots.len());
3912        for meta in slot_metas.into_iter() {
3913            let meta = meta?;
3914            if let Some(meta) = meta {
3915                slots_since.insert(meta.slot, meta.next_slots);
3916            }
3917        }
3918
3919        Ok(slots_since)
3920    }
3921
3922    pub fn is_root(&self, slot: Slot) -> bool {
3923        matches!(self.roots_cf.get(slot), Ok(Some(true)))
3924    }
3925
3926    /// Returns true if a slot is between the rooted slot bounds of the ledger, but has not itself
3927    /// been rooted. This is either because the slot was skipped, or due to a gap in ledger data,
3928    /// as when booting from a newer snapshot.
3929    pub fn is_skipped(&self, slot: Slot) -> bool {
3930        let lowest_root = self
3931            .rooted_slot_iterator(0)
3932            .ok()
3933            .and_then(|mut iter| iter.next())
3934            .unwrap_or_default();
3935        match self.roots_cf.get(slot).ok().flatten() {
3936            Some(_) => false,
3937            None => slot < self.max_root() && slot > lowest_root,
3938        }
3939    }
3940
3941    pub fn insert_bank_hash(&self, slot: Slot, frozen_hash: Hash, is_duplicate_confirmed: bool) {
3942        if let Some(prev_value) = self.bank_hash_cf.get(slot).unwrap() {
3943            if prev_value.frozen_hash() == frozen_hash && prev_value.is_duplicate_confirmed() {
3944                // Don't overwrite is_duplicate_confirmed == true with is_duplicate_confirmed == false,
3945                // which may happen on startup when procesing from blockstore processor because the
3946                // blocks may not reflect earlier observed gossip votes from before the restart.
3947                return;
3948            }
3949        }
3950        let data = FrozenHashVersioned::Current(FrozenHashStatus {
3951            frozen_hash,
3952            is_duplicate_confirmed,
3953        });
3954        self.bank_hash_cf.put(slot, &data).unwrap()
3955    }
3956
3957    pub fn get_bank_hash(&self, slot: Slot) -> Option<Hash> {
3958        self.bank_hash_cf
3959            .get(slot)
3960            .unwrap()
3961            .map(|versioned| versioned.frozen_hash())
3962    }
3963
3964    pub fn is_duplicate_confirmed(&self, slot: Slot) -> bool {
3965        self.bank_hash_cf
3966            .get(slot)
3967            .unwrap()
3968            .map(|versioned| versioned.is_duplicate_confirmed())
3969            .unwrap_or(false)
3970    }
3971
3972    pub fn insert_optimistic_slot(
3973        &self,
3974        slot: Slot,
3975        hash: &Hash,
3976        timestamp: UnixTimestamp,
3977    ) -> Result<()> {
3978        let slot_data = OptimisticSlotMetaVersioned::new(*hash, timestamp);
3979        self.optimistic_slots_cf.put(slot, &slot_data)
3980    }
3981
3982    /// Returns information about a single optimistically confirmed slot
3983    pub fn get_optimistic_slot(&self, slot: Slot) -> Result<Option<(Hash, UnixTimestamp)>> {
3984        Ok(self
3985            .optimistic_slots_cf
3986            .get(slot)?
3987            .map(|meta| (meta.hash(), meta.timestamp())))
3988    }
3989
3990    /// Returns information about the `num` latest optimistically confirmed slot
3991    pub fn get_latest_optimistic_slots(
3992        &self,
3993        num: usize,
3994    ) -> Result<Vec<(Slot, Hash, UnixTimestamp)>> {
3995        let iter = self.reversed_optimistic_slots_iterator()?;
3996        Ok(iter.take(num).collect())
3997    }
3998
3999    pub fn set_duplicate_confirmed_slots_and_hashes(
4000        &self,
4001        duplicate_confirmed_slot_hashes: impl Iterator<Item = (Slot, Hash)>,
4002    ) -> Result<()> {
4003        let mut write_batch = self.get_write_batch()?;
4004        for (slot, frozen_hash) in duplicate_confirmed_slot_hashes {
4005            let data = FrozenHashVersioned::Current(FrozenHashStatus {
4006                frozen_hash,
4007                is_duplicate_confirmed: true,
4008            });
4009            self.bank_hash_cf
4010                .put_in_batch(&mut write_batch, slot, &data)?;
4011        }
4012
4013        self.write_batch(write_batch)?;
4014        Ok(())
4015    }
4016
4017    pub fn set_roots<'a>(&self, rooted_slots: impl Iterator<Item = &'a Slot>) -> Result<()> {
4018        let mut write_batch = self.get_write_batch()?;
4019        let mut max_new_rooted_slot = 0;
4020        for slot in rooted_slots {
4021            max_new_rooted_slot = std::cmp::max(max_new_rooted_slot, *slot);
4022            self.roots_cf.put_in_batch(&mut write_batch, *slot, &true)?;
4023        }
4024
4025        self.write_batch(write_batch)?;
4026        self.max_root
4027            .fetch_max(max_new_rooted_slot, Ordering::Relaxed);
4028        Ok(())
4029    }
4030
4031    pub fn mark_slots_as_if_rooted_normally_at_startup(
4032        &self,
4033        slots: Vec<(Slot, Option<Hash>)>,
4034        with_hash: bool,
4035    ) -> Result<()> {
4036        self.set_roots(slots.iter().map(|(slot, _hash)| slot))?;
4037        if with_hash {
4038            self.set_duplicate_confirmed_slots_and_hashes(
4039                slots
4040                    .into_iter()
4041                    .map(|(slot, maybe_hash)| (slot, maybe_hash.unwrap())),
4042            )?;
4043        }
4044        Ok(())
4045    }
4046
4047    pub fn is_dead(&self, slot: Slot) -> bool {
4048        matches!(
4049            self.dead_slots_cf
4050                .get(slot)
4051                .expect("fetch from DeadSlots column family failed"),
4052            Some(true)
4053        )
4054    }
4055
4056    pub fn set_dead_slot(&self, slot: Slot) -> Result<()> {
4057        self.dead_slots_cf.put(slot, &true)
4058    }
4059
4060    pub fn remove_dead_slot(&self, slot: Slot) -> Result<()> {
4061        self.dead_slots_cf.delete(slot)
4062    }
4063
4064    pub fn remove_slot_duplicate_proof(&self, slot: Slot) -> Result<()> {
4065        self.duplicate_slots_cf.delete(slot)
4066    }
4067
4068    pub fn get_first_duplicate_proof(&self) -> Option<(Slot, DuplicateSlotProof)> {
4069        let mut iter = self
4070            .duplicate_slots_cf
4071            .iter(IteratorMode::From(0, IteratorDirection::Forward))
4072            .unwrap();
4073        iter.next()
4074            .map(|(slot, proof_bytes)| (slot, deserialize(&proof_bytes).unwrap()))
4075    }
4076
4077    pub fn store_duplicate_slot<S, T>(&self, slot: Slot, shred1: S, shred2: T) -> Result<()>
4078    where
4079        shred::Payload: From<S> + From<T>,
4080    {
4081        let duplicate_slot_proof = DuplicateSlotProof::new(shred1, shred2);
4082        self.duplicate_slots_cf.put(slot, &duplicate_slot_proof)
4083    }
4084
4085    pub fn get_duplicate_slot(&self, slot: u64) -> Option<DuplicateSlotProof> {
4086        self.duplicate_slots_cf
4087            .get(slot)
4088            .expect("fetch from DuplicateSlots column family failed")
4089    }
4090
4091    /// Returns the shred already stored in blockstore if it has a different
4092    /// payload than the given `shred` but the same (slot, index, shred-type).
4093    /// This implies the leader generated two different shreds with the same
4094    /// slot, index and shred-type.
4095    /// The payload is modified so that it has the same retransmitter's
4096    /// signature as the `shred` argument.
4097    pub fn is_shred_duplicate(&self, shred: &Shred) -> Option<Vec<u8>> {
4098        let (slot, index, shred_type) = shred.id().unpack();
4099        let mut other = match shred_type {
4100            ShredType::Data => self.get_data_shred(slot, u64::from(index)),
4101            ShredType::Code => self.get_coding_shred(slot, u64::from(index)),
4102        }
4103        .expect("fetch from DuplicateSlots column family failed")?;
4104        if let Ok(signature) = shred.retransmitter_signature() {
4105            if let Err(err) = shred::layout::set_retransmitter_signature(&mut other, &signature) {
4106                error!("set retransmitter signature failed: {err:?}");
4107            }
4108        }
4109        (other != **shred.payload()).then_some(other)
4110    }
4111
4112    pub fn has_duplicate_shreds_in_slot(&self, slot: Slot) -> bool {
4113        self.duplicate_slots_cf
4114            .get(slot)
4115            .expect("fetch from DuplicateSlots column family failed")
4116            .is_some()
4117    }
4118
4119    pub fn orphans_iterator(&self, slot: Slot) -> Result<impl Iterator<Item = u64> + '_> {
4120        let orphans_iter = self
4121            .orphans_cf
4122            .iter(IteratorMode::From(slot, IteratorDirection::Forward))?;
4123        Ok(orphans_iter.map(|(slot, _)| slot))
4124    }
4125
4126    pub fn dead_slots_iterator(&self, slot: Slot) -> Result<impl Iterator<Item = Slot> + '_> {
4127        let dead_slots_iterator = self
4128            .dead_slots_cf
4129            .iter(IteratorMode::From(slot, IteratorDirection::Forward))?;
4130        Ok(dead_slots_iterator.map(|(slot, _)| slot))
4131    }
4132
4133    pub fn duplicate_slots_iterator(&self, slot: Slot) -> Result<impl Iterator<Item = Slot> + '_> {
4134        let duplicate_slots_iterator = self
4135            .duplicate_slots_cf
4136            .iter(IteratorMode::From(slot, IteratorDirection::Forward))?;
4137        Ok(duplicate_slots_iterator.map(|(slot, _)| slot))
4138    }
4139
4140    pub fn has_existing_shreds_for_slot(&self, slot: Slot) -> bool {
4141        match self.meta(slot).unwrap() {
4142            Some(meta) => meta.received > 0,
4143            None => false,
4144        }
4145    }
4146
4147    /// Returns the max root or 0 if it does not exist
4148    pub fn max_root(&self) -> Slot {
4149        self.max_root.load(Ordering::Relaxed)
4150    }
4151
4152    #[deprecated(
4153        since = "1.18.0",
4154        note = "Please use `solana_ledger::blockstore::Blockstore::max_root()` instead"
4155    )]
4156    pub fn last_root(&self) -> Slot {
4157        self.max_root()
4158    }
4159
4160    // find the first available slot in blockstore that has some data in it
4161    pub fn lowest_slot(&self) -> Slot {
4162        for (slot, meta) in self
4163            .slot_meta_iterator(0)
4164            .expect("unable to iterate over meta")
4165        {
4166            if slot > 0 && meta.received > 0 {
4167                return slot;
4168            }
4169        }
4170        // This means blockstore is empty, should never get here aside from right at boot.
4171        self.max_root()
4172    }
4173
4174    fn lowest_slot_with_genesis(&self) -> Slot {
4175        for (slot, meta) in self
4176            .slot_meta_iterator(0)
4177            .expect("unable to iterate over meta")
4178        {
4179            if meta.received > 0 {
4180                return slot;
4181            }
4182        }
4183        // This means blockstore is empty, should never get here aside from right at boot.
4184        self.max_root()
4185    }
4186
4187    /// Returns the highest available slot in the blockstore
4188    pub fn highest_slot(&self) -> Result<Option<Slot>> {
4189        let highest_slot = self
4190            .meta_cf
4191            .iter(IteratorMode::End)?
4192            .next()
4193            .map(|(slot, _)| slot);
4194        Ok(highest_slot)
4195    }
4196
4197    pub fn lowest_cleanup_slot(&self) -> Slot {
4198        *self.lowest_cleanup_slot.read().unwrap()
4199    }
4200
4201    pub fn storage_size(&self) -> Result<u64> {
4202        self.db.storage_size()
4203    }
4204
4205    /// Returns the total physical storage size contributed by all data shreds.
4206    ///
4207    /// Note that the reported size does not include those recently inserted
4208    /// shreds that are still in memory.
4209    pub fn total_data_shred_storage_size(&self) -> Result<i64> {
4210        self.data_shred_cf
4211            .get_int_property(RocksProperties::TOTAL_SST_FILES_SIZE)
4212    }
4213
4214    /// Returns the total physical storage size contributed by all coding shreds.
4215    ///
4216    /// Note that the reported size does not include those recently inserted
4217    /// shreds that are still in memory.
4218    pub fn total_coding_shred_storage_size(&self) -> Result<i64> {
4219        self.code_shred_cf
4220            .get_int_property(RocksProperties::TOTAL_SST_FILES_SIZE)
4221    }
4222
4223    /// Returns whether the blockstore has primary (read and write) access
4224    pub fn is_primary_access(&self) -> bool {
4225        self.db.is_primary_access()
4226    }
4227
4228    /// Scan for any ancestors of the supplied `start_root` that are not
4229    /// marked as roots themselves. Mark any found slots as roots since
4230    /// the ancestor of a root is also inherently a root. Returns the
4231    /// number of slots that were actually updated.
4232    ///
4233    /// Arguments:
4234    ///  - `start_root`: The root to start scan from, or the highest root in
4235    ///    the blockstore if this value is `None`. This slot must be a root.
4236    ///  - `end_slot``: The slot to stop the scan at; the scan will continue to
4237    ///    the earliest slot in the Blockstore if this value is `None`.
4238    ///  - `exit`: Exit early if this flag is set to `true`.
4239    pub fn scan_and_fix_roots(
4240        &self,
4241        start_root: Option<Slot>,
4242        end_slot: Option<Slot>,
4243        exit: &AtomicBool,
4244    ) -> Result<usize> {
4245        // Hold the lowest_cleanup_slot read lock to prevent any cleaning of
4246        // the blockstore from another thread. Doing so will prevent a
4247        // possible inconsistency across column families where a slot is:
4248        //  - Identified as needing root repair by this thread
4249        //  - Cleaned from the blockstore by another thread (LedgerCleanupSerivce)
4250        //  - Marked as root via Self::set_root() by this this thread
4251        let lowest_cleanup_slot = self.lowest_cleanup_slot.read().unwrap();
4252
4253        let start_root = if let Some(slot) = start_root {
4254            if !self.is_root(slot) {
4255                return Err(BlockstoreError::SlotNotRooted);
4256            }
4257            slot
4258        } else {
4259            self.max_root()
4260        };
4261        let end_slot = end_slot.unwrap_or(*lowest_cleanup_slot);
4262        let ancestor_iterator =
4263            AncestorIterator::new(start_root, self).take_while(|&slot| slot >= end_slot);
4264
4265        let mut find_missing_roots = Measure::start("find_missing_roots");
4266        let mut roots_to_fix = vec![];
4267        for slot in ancestor_iterator.filter(|slot| !self.is_root(*slot)) {
4268            if exit.load(Ordering::Relaxed) {
4269                return Ok(0);
4270            }
4271            roots_to_fix.push(slot);
4272        }
4273        find_missing_roots.stop();
4274        let mut fix_roots = Measure::start("fix_roots");
4275        if !roots_to_fix.is_empty() {
4276            info!("{} slots to be rooted", roots_to_fix.len());
4277            let chunk_size = 100;
4278            for (i, chunk) in roots_to_fix.chunks(chunk_size).enumerate() {
4279                if exit.load(Ordering::Relaxed) {
4280                    return Ok(i * chunk_size);
4281                }
4282                trace!("{:?}", chunk);
4283                self.set_roots(chunk.iter())?;
4284            }
4285        } else {
4286            debug!("No missing roots found in range {start_root} to {end_slot}");
4287        }
4288        fix_roots.stop();
4289        datapoint_info!(
4290            "blockstore-scan_and_fix_roots",
4291            (
4292                "find_missing_roots_us",
4293                find_missing_roots.as_us() as i64,
4294                i64
4295            ),
4296            ("num_roots_to_fix", roots_to_fix.len() as i64, i64),
4297            ("fix_roots_us", fix_roots.as_us() as i64, i64),
4298        );
4299        Ok(roots_to_fix.len())
4300    }
4301
4302    /// Mark a root `slot` as connected, traverse `slot`'s children and update
4303    /// the children's connected status if appropriate.
4304    ///
4305    /// A ledger with a full path of blocks from genesis to the latest root will
4306    /// have all of the rooted blocks marked as connected such that new blocks
4307    /// could also be connected. However, starting from some root (such as from
4308    /// a snapshot) is a valid way to join a cluster. For this case, mark this
4309    /// root as connected such that the node that joined midway through can
4310    /// have their slots considered connected.
4311    pub fn set_and_chain_connected_on_root_and_next_slots(&self, root: Slot) -> Result<()> {
4312        let mut root_meta = self
4313            .meta(root)?
4314            .unwrap_or_else(|| SlotMeta::new(root, None));
4315        // If the slot was already connected, there is nothing to do as this slot's
4316        // children are also assumed to be appropriately connected
4317        if root_meta.is_connected() {
4318            return Ok(());
4319        }
4320        info!(
4321            "Marking slot {} and any full children slots as connected",
4322            root
4323        );
4324        let mut write_batch = self.get_write_batch()?;
4325
4326        // Mark both connected bits on the root slot so that the flags for this
4327        // slot match the flags of slots that become connected the typical way.
4328        root_meta.set_parent_connected();
4329        root_meta.set_connected();
4330        self.meta_cf
4331            .put_in_batch(&mut write_batch, root_meta.slot, &root_meta)?;
4332
4333        let mut next_slots = VecDeque::from(root_meta.next_slots);
4334        while !next_slots.is_empty() {
4335            let slot = next_slots.pop_front().unwrap();
4336            let mut meta = self.meta(slot)?.unwrap_or_else(|| {
4337                panic!("Slot {slot} is a child but has no SlotMeta in blockstore")
4338            });
4339
4340            if meta.set_parent_connected() {
4341                next_slots.extend(meta.next_slots.iter());
4342            }
4343            self.meta_cf
4344                .put_in_batch(&mut write_batch, meta.slot, &meta)?;
4345        }
4346
4347        self.write_batch(write_batch)?;
4348        Ok(())
4349    }
4350
4351    /// For each entry in `working_set` whose `did_insert_occur` is true, this
4352    /// function handles its chaining effect by updating the SlotMeta of both
4353    /// the slot and its parent slot to reflect the slot descends from the
4354    /// parent slot.  In addition, when a slot is newly connected, it also
4355    /// checks whether any of its direct and indirect children slots are connected
4356    /// or not.
4357    ///
4358    /// This function may update column families [`cf::SlotMeta`] and
4359    /// [`cf::Orphans`].
4360    ///
4361    /// For more information about the chaining, check the previous discussion here:
4362    /// https://github.com/solana-labs/solana/pull/2253
4363    ///
4364    /// Arguments:
4365    /// - `db`: the blockstore db that stores both shreds and their metadata.
4366    /// - `write_batch`: the write batch which includes all the updates of the
4367    ///   the current write and ensures their atomicity.
4368    /// - `working_set`: a slot-id to SlotMetaWorkingSetEntry map.  This function
4369    ///   will remove all entries which insertion did not actually occur.
4370    fn handle_chaining(
4371        &self,
4372        write_batch: &mut WriteBatch,
4373        working_set: &mut HashMap<u64, SlotMetaWorkingSetEntry>,
4374        metrics: &mut BlockstoreInsertionMetrics,
4375    ) -> Result<()> {
4376        let mut start = Measure::start("Shred chaining");
4377        // Handle chaining for all the SlotMetas that were inserted into
4378        working_set.retain(|_, entry| entry.did_insert_occur);
4379        let mut new_chained_slots = HashMap::new();
4380        let working_set_slots: Vec<_> = working_set.keys().collect();
4381        for slot in working_set_slots {
4382            self.handle_chaining_for_slot(write_batch, working_set, &mut new_chained_slots, *slot)?;
4383        }
4384
4385        // Write all the newly changed slots in new_chained_slots to the write_batch
4386        for (slot, meta) in new_chained_slots.iter() {
4387            let meta: &SlotMeta = &RefCell::borrow(meta);
4388            self.meta_cf.put_in_batch(write_batch, *slot, meta)?;
4389        }
4390        start.stop();
4391        metrics.chaining_elapsed_us += start.as_us();
4392        Ok(())
4393    }
4394
4395    /// A helper function of handle_chaining which handles the chaining based
4396    /// on the `SlotMetaWorkingSetEntry` of the specified `slot`.  Specifically,
4397    /// it handles the following two things:
4398    ///
4399    /// 1. based on the `SlotMetaWorkingSetEntry` for `slot`, check if `slot`
4400    ///    did not previously have a parent slot but does now.  If `slot` satisfies
4401    ///    this condition, update the Orphan property of both `slot` and its parent
4402    ///    slot based on their current orphan status.  Specifically:
4403    ///  - updates the orphan property of slot to no longer be an orphan because
4404    ///    it has a parent.
4405    ///  - adds the parent to the orphan column family if the parent's parent is
4406    ///    currently unknown.
4407    ///
4408    /// 2. if the `SlotMetaWorkingSetEntry` for `slot` indicates this slot
4409    ///    is newly connected to a parent slot, then this function will update
4410    ///    the is_connected property of all its direct and indirect children slots.
4411    ///
4412    /// This function may update column family [`cf::Orphans`] and indirectly
4413    /// update SlotMeta from its output parameter `new_chained_slots`.
4414    ///
4415    /// Arguments:
4416    /// `db`: the underlying db for blockstore
4417    /// `write_batch`: the write batch which includes all the updates of the
4418    ///   the current write and ensures their atomicity.
4419    /// `working_set`: the working set which include the specified `slot`
4420    /// `new_chained_slots`: an output parameter which includes all the slots
4421    ///   which connectivity have been updated.
4422    /// `slot`: the slot which we want to handle its chaining effect.
4423    fn handle_chaining_for_slot(
4424        &self,
4425        write_batch: &mut WriteBatch,
4426        working_set: &HashMap<u64, SlotMetaWorkingSetEntry>,
4427        new_chained_slots: &mut HashMap<u64, Rc<RefCell<SlotMeta>>>,
4428        slot: Slot,
4429    ) -> Result<()> {
4430        let slot_meta_entry = working_set
4431            .get(&slot)
4432            .expect("Slot must exist in the working_set hashmap");
4433
4434        let meta = &slot_meta_entry.new_slot_meta;
4435        let meta_backup = &slot_meta_entry.old_slot_meta;
4436        {
4437            let mut meta_mut = meta.borrow_mut();
4438            let was_orphan_slot =
4439                meta_backup.is_some() && meta_backup.as_ref().unwrap().is_orphan();
4440
4441            // If:
4442            // 1) This is a new slot
4443            // 2) slot != 0
4444            // then try to chain this slot to a previous slot
4445            if slot != 0 && meta_mut.parent_slot.is_some() {
4446                let prev_slot = meta_mut.parent_slot.unwrap();
4447
4448                // Check if the slot represented by meta_mut is either a new slot or a orphan.
4449                // In both cases we need to run the chaining logic b/c the parent on the slot was
4450                // previously unknown.
4451                if meta_backup.is_none() || was_orphan_slot {
4452                    let prev_slot_meta =
4453                        self.find_slot_meta_else_create(working_set, new_chained_slots, prev_slot)?;
4454
4455                    // This is a newly inserted slot/orphan so run the chaining logic to link it to a
4456                    // newly discovered parent
4457                    chain_new_slot_to_prev_slot(
4458                        &mut prev_slot_meta.borrow_mut(),
4459                        slot,
4460                        &mut meta_mut,
4461                    );
4462
4463                    // If the parent of `slot` is a newly inserted orphan, insert it into the orphans
4464                    // column family
4465                    if RefCell::borrow(&*prev_slot_meta).is_orphan() {
4466                        self.orphans_cf
4467                            .put_in_batch(write_batch, prev_slot, &true)?;
4468                    }
4469                }
4470            }
4471
4472            // At this point this slot has received a parent, so it's no longer an orphan
4473            if was_orphan_slot {
4474                self.orphans_cf.delete_in_batch(write_batch, slot)?;
4475            }
4476        }
4477
4478        // If this is a newly completed slot and the parent is connected, then the
4479        // slot is now connected. Mark the slot as connected, and then traverse the
4480        // children to update their parent_connected and connected status.
4481        let should_propagate_is_connected =
4482            is_newly_completed_slot(&RefCell::borrow(meta), meta_backup)
4483                && RefCell::borrow(meta).is_parent_connected();
4484
4485        if should_propagate_is_connected {
4486            meta.borrow_mut().set_connected();
4487            self.traverse_children_mut(
4488                meta,
4489                working_set,
4490                new_chained_slots,
4491                SlotMeta::set_parent_connected,
4492            )?;
4493        }
4494
4495        Ok(())
4496    }
4497
4498    /// Traverse all the children (direct and indirect) of `slot_meta`, and apply
4499    /// `slot_function` to each of the children (but not `slot_meta`).
4500    ///
4501    /// Arguments:
4502    /// `db`: the blockstore db that stores shreds and their metadata.
4503    /// `slot_meta`: the SlotMeta of the above `slot`.
4504    /// `working_set`: a slot-id to SlotMetaWorkingSetEntry map which is used
4505    ///   to traverse the graph.
4506    /// `passed_visited_slots`: all the traversed slots which have passed the
4507    ///   slot_function.  This may also include the input `slot`.
4508    /// `slot_function`: a function which updates the SlotMeta of the visisted
4509    ///   slots and determine whether to further traverse the children slots of
4510    ///   a given slot.
4511    fn traverse_children_mut<F>(
4512        &self,
4513        slot_meta: &Rc<RefCell<SlotMeta>>,
4514        working_set: &HashMap<u64, SlotMetaWorkingSetEntry>,
4515        passed_visisted_slots: &mut HashMap<u64, Rc<RefCell<SlotMeta>>>,
4516        slot_function: F,
4517    ) -> Result<()>
4518    where
4519        F: Fn(&mut SlotMeta) -> bool,
4520    {
4521        let slot_meta = slot_meta.borrow();
4522        let mut next_slots: VecDeque<u64> = slot_meta.next_slots.to_vec().into();
4523        while !next_slots.is_empty() {
4524            let slot = next_slots.pop_front().unwrap();
4525            let meta_ref =
4526                self.find_slot_meta_else_create(working_set, passed_visisted_slots, slot)?;
4527            let mut meta = meta_ref.borrow_mut();
4528            if slot_function(&mut meta) {
4529                meta.next_slots
4530                    .iter()
4531                    .for_each(|slot| next_slots.push_back(*slot));
4532            }
4533        }
4534        Ok(())
4535    }
4536
4537    /// For each slot in the slot_meta_working_set which has any change, include
4538    /// corresponding updates to cf::SlotMeta via the specified `write_batch`.
4539    /// The `write_batch` will later be atomically committed to the blockstore.
4540    ///
4541    /// Arguments:
4542    /// - `slot_meta_working_set`: a map that maintains slot-id to its `SlotMeta`
4543    ///   mapping.
4544    /// - `write_batch`: the write batch which includes all the updates of the
4545    ///   the current write and ensures their atomicity.
4546    ///
4547    /// On success, the function returns an Ok result with <should_signal,
4548    /// newly_completed_slots> pair where:
4549    ///  - `should_signal`: a boolean flag indicating whether to send signal.
4550    ///  - `newly_completed_slots`: a subset of slot_meta_working_set which are
4551    ///    newly completed.
4552    fn commit_slot_meta_working_set(
4553        &self,
4554        slot_meta_working_set: &HashMap<u64, SlotMetaWorkingSetEntry>,
4555        write_batch: &mut WriteBatch,
4556    ) -> Result<(bool, Vec<u64>)> {
4557        let mut should_signal = false;
4558        let mut newly_completed_slots = vec![];
4559        let completed_slots_senders = self.completed_slots_senders.lock().unwrap();
4560
4561        // Check if any metadata was changed, if so, insert the new version of the
4562        // metadata into the write batch
4563        for (slot, slot_meta_entry) in slot_meta_working_set.iter() {
4564            // Any slot that wasn't written to should have been filtered out by now.
4565            assert!(slot_meta_entry.did_insert_occur);
4566            let meta: &SlotMeta = &RefCell::borrow(&*slot_meta_entry.new_slot_meta);
4567            let meta_backup = &slot_meta_entry.old_slot_meta;
4568            if !completed_slots_senders.is_empty() && is_newly_completed_slot(meta, meta_backup) {
4569                newly_completed_slots.push(*slot);
4570            }
4571            // Check if the working copy of the metadata has changed
4572            if Some(meta) != meta_backup.as_ref() {
4573                should_signal = should_signal || slot_has_updates(meta, meta_backup);
4574                self.meta_cf.put_in_batch(write_batch, *slot, meta)?;
4575            }
4576        }
4577
4578        Ok((should_signal, newly_completed_slots))
4579    }
4580
4581    /// Obtain the SlotMeta from the in-memory slot_meta_working_set or load
4582    /// it from the database if it does not exist in slot_meta_working_set.
4583    ///
4584    /// In case none of the above has the specified SlotMeta, a new one will
4585    /// be created.
4586    ///
4587    /// Note that this function will also update the parent slot of the specified
4588    /// slot.
4589    ///
4590    /// Arguments:
4591    /// - `db`: the database
4592    /// - `slot_meta_working_set`: a in-memory structure for storing the cached
4593    ///   SlotMeta.
4594    /// - `slot`: the slot for loading its meta.
4595    /// - `parent_slot`: the parent slot to be assigned to the specified slot meta
4596    ///
4597    /// This function returns the matched `SlotMetaWorkingSetEntry`.  If such entry
4598    /// does not exist in the database, a new entry will be created.
4599    fn get_slot_meta_entry<'a>(
4600        &self,
4601        slot_meta_working_set: &'a mut HashMap<u64, SlotMetaWorkingSetEntry>,
4602        slot: Slot,
4603        parent_slot: Slot,
4604    ) -> &'a mut SlotMetaWorkingSetEntry {
4605        // Check if we've already inserted the slot metadata for this shred's slot
4606        slot_meta_working_set.entry(slot).or_insert_with(|| {
4607            // Store a 2-tuple of the metadata (working copy, backup copy)
4608            if let Some(mut meta) = self
4609                .meta_cf
4610                .get(slot)
4611                .expect("Expect database get to succeed")
4612            {
4613                let backup = Some(meta.clone());
4614                // If parent_slot == None, then this is one of the orphans inserted
4615                // during the chaining process, see the function find_slot_meta_in_cached_state()
4616                // for details. Slots that are orphans are missing a parent_slot, so we should
4617                // fill in the parent now that we know it.
4618                if meta.is_orphan() {
4619                    meta.parent_slot = Some(parent_slot);
4620                }
4621
4622                SlotMetaWorkingSetEntry::new(Rc::new(RefCell::new(meta)), backup)
4623            } else {
4624                SlotMetaWorkingSetEntry::new(
4625                    Rc::new(RefCell::new(SlotMeta::new(slot, Some(parent_slot)))),
4626                    None,
4627                )
4628            }
4629        })
4630    }
4631
4632    /// Returns the `SlotMeta` with the specified `slot_index`.  The resulting
4633    /// `SlotMeta` could be either from the cache or from the DB.  Specifically,
4634    /// the function:
4635    ///
4636    /// 1) Finds the slot metadata in the cache of dirty slot metadata we've
4637    ///    previously touched, otherwise:
4638    /// 2) Searches the database for that slot metadata. If still no luck, then:
4639    /// 3) Create a dummy orphan slot in the database.
4640    ///
4641    /// Also see [`find_slot_meta_in_cached_state`] and [`find_slot_meta_in_db_else_create`].
4642    fn find_slot_meta_else_create<'a>(
4643        &self,
4644        working_set: &'a HashMap<u64, SlotMetaWorkingSetEntry>,
4645        chained_slots: &'a mut HashMap<u64, Rc<RefCell<SlotMeta>>>,
4646        slot_index: u64,
4647    ) -> Result<Rc<RefCell<SlotMeta>>> {
4648        let result = find_slot_meta_in_cached_state(working_set, chained_slots, slot_index);
4649        if let Some(slot) = result {
4650            Ok(slot)
4651        } else {
4652            self.find_slot_meta_in_db_else_create(slot_index, chained_slots)
4653        }
4654    }
4655
4656    /// A helper function to [`find_slot_meta_else_create`] that searches the
4657    /// `SlotMeta` based on the specified `slot` in `db` and updates `insert_map`.
4658    ///
4659    /// If the specified `db` does not contain a matched entry, then it will create
4660    /// a dummy orphan slot in the database.
4661    fn find_slot_meta_in_db_else_create(
4662        &self,
4663        slot: Slot,
4664        insert_map: &mut HashMap<u64, Rc<RefCell<SlotMeta>>>,
4665    ) -> Result<Rc<RefCell<SlotMeta>>> {
4666        if let Some(slot_meta) = self.meta_cf.get(slot)? {
4667            insert_map.insert(slot, Rc::new(RefCell::new(slot_meta)));
4668        } else {
4669            // If this slot doesn't exist, make a orphan slot. This way we
4670            // remember which slots chained to this one when we eventually get a real shred
4671            // for this slot
4672            insert_map.insert(slot, Rc::new(RefCell::new(SlotMeta::new_orphan(slot))));
4673        }
4674        Ok(insert_map.get(&slot).unwrap().clone())
4675    }
4676
4677    fn get_index_meta_entry<'a>(
4678        &self,
4679        slot: Slot,
4680        index_working_set: &'a mut HashMap<u64, IndexMetaWorkingSetEntry>,
4681        index_meta_time_us: &mut u64,
4682    ) -> &'a mut IndexMetaWorkingSetEntry {
4683        let mut total_start = Measure::start("Total elapsed");
4684        let res = index_working_set.entry(slot).or_insert_with(|| {
4685            let newly_inserted_meta = self
4686                .index_cf
4687                .get(slot)
4688                .unwrap()
4689                .unwrap_or_else(|| Index::new(slot));
4690            IndexMetaWorkingSetEntry {
4691                index: newly_inserted_meta,
4692                did_insert_occur: false,
4693            }
4694        });
4695        total_start.stop();
4696        *index_meta_time_us += total_start.as_us();
4697        res
4698    }
4699
4700    pub fn get_write_batch(&self) -> Result<WriteBatch> {
4701        self.db.batch()
4702    }
4703
4704    pub fn write_batch(&self, write_batch: WriteBatch) -> Result<()> {
4705        self.db.write(write_batch)
4706    }
4707}
4708
4709// Updates the `completed_data_indexes` with a new shred `new_shred_index`.
4710// If a data set is complete, returns the range of shred indexes
4711//     start_index..end_index
4712// for that completed data set.
4713fn update_completed_data_indexes<'a>(
4714    is_last_in_data: bool,
4715    new_shred_index: u32,
4716    received_data_shreds: &'a ShredIndex,
4717    // Shreds indices which are marked data complete.
4718    completed_data_indexes: &mut BTreeSet<u32>,
4719) -> impl Iterator<Item = Range<u32>> + 'a {
4720    // Consecutive entries i, j, k in this array represent potential ranges
4721    // [i, j), [j, k) that could be completed data ranges
4722    [
4723        completed_data_indexes
4724            .range(..new_shred_index)
4725            .next_back()
4726            .map(|index| index + 1)
4727            .or(Some(0u32)),
4728        is_last_in_data.then(|| {
4729            // new_shred_index is data complete, so need to insert here into
4730            // the completed_data_indexes.
4731            completed_data_indexes.insert(new_shred_index);
4732            new_shred_index + 1
4733        }),
4734        completed_data_indexes
4735            .range(new_shred_index + 1..)
4736            .next()
4737            .map(|index| index + 1),
4738    ]
4739    .into_iter()
4740    .flatten()
4741    .tuple_windows()
4742    .filter(|&(start, end)| {
4743        let bounds = u64::from(start)..u64::from(end);
4744        received_data_shreds.range(bounds.clone()).eq(bounds)
4745    })
4746    .map(|(start, end)| start..end)
4747}
4748
4749fn update_slot_meta<'a>(
4750    is_last_in_slot: bool,
4751    is_last_in_data: bool,
4752    slot_meta: &mut SlotMeta,
4753    index: u32,
4754    new_consumed: u64,
4755    reference_tick: u8,
4756    received_data_shreds: &'a ShredIndex,
4757) -> impl Iterator<Item = Range<u32>> + 'a {
4758    let first_insert = slot_meta.received == 0;
4759    // Index is zero-indexed, while the "received" height starts from 1,
4760    // so received = index + 1 for the same shred.
4761    slot_meta.received = cmp::max(u64::from(index) + 1, slot_meta.received);
4762    if first_insert {
4763        // predict the timestamp of what would have been the first shred in this slot
4764        let slot_time_elapsed = u64::from(reference_tick) * 1000 / DEFAULT_TICKS_PER_SECOND;
4765        slot_meta.first_shred_timestamp = timestamp() - slot_time_elapsed;
4766    }
4767    slot_meta.consumed = new_consumed;
4768    // If the last index in the slot hasn't been set before, then
4769    // set it to this shred index
4770    if is_last_in_slot && slot_meta.last_index.is_none() {
4771        slot_meta.last_index = Some(u64::from(index));
4772    }
4773    update_completed_data_indexes(
4774        is_last_in_slot || is_last_in_data,
4775        index,
4776        received_data_shreds,
4777        &mut slot_meta.completed_data_indexes,
4778    )
4779}
4780
4781fn get_last_hash<'a>(iterator: impl Iterator<Item = &'a Entry> + 'a) -> Option<Hash> {
4782    iterator.last().map(|entry| entry.hash)
4783}
4784
4785fn send_signals(
4786    new_shreds_signals: &[Sender<bool>],
4787    completed_slots_senders: &[Sender<Vec<u64>>],
4788    should_signal: bool,
4789    newly_completed_slots: Vec<u64>,
4790) {
4791    if should_signal {
4792        for signal in new_shreds_signals {
4793            match signal.try_send(true) {
4794                Ok(_) => {}
4795                Err(TrySendError::Full(_)) => {
4796                    trace!("replay wake up signal channel is full.")
4797                }
4798                Err(TrySendError::Disconnected(_)) => {
4799                    trace!("replay wake up signal channel is disconnected.")
4800                }
4801            }
4802        }
4803    }
4804
4805    if !completed_slots_senders.is_empty() && !newly_completed_slots.is_empty() {
4806        let mut slots: Vec<_> = (0..completed_slots_senders.len() - 1)
4807            .map(|_| newly_completed_slots.clone())
4808            .collect();
4809
4810        slots.push(newly_completed_slots);
4811
4812        for (signal, slots) in completed_slots_senders.iter().zip(slots.into_iter()) {
4813            let res = signal.try_send(slots);
4814            if let Err(TrySendError::Full(_)) = res {
4815                datapoint_error!(
4816                    "blockstore_error",
4817                    (
4818                        "error",
4819                        "Unable to send newly completed slot because channel is full",
4820                        String
4821                    ),
4822                );
4823            }
4824        }
4825    }
4826}
4827
4828/// Returns the `SlotMeta` of the specified `slot` from the two cached states:
4829/// `working_set` and `chained_slots`.  If both contain the `SlotMeta`, then
4830/// the latest one from the `working_set` will be returned.
4831fn find_slot_meta_in_cached_state<'a>(
4832    working_set: &'a HashMap<u64, SlotMetaWorkingSetEntry>,
4833    chained_slots: &'a HashMap<u64, Rc<RefCell<SlotMeta>>>,
4834    slot: Slot,
4835) -> Option<Rc<RefCell<SlotMeta>>> {
4836    if let Some(entry) = working_set.get(&slot) {
4837        Some(entry.new_slot_meta.clone())
4838    } else {
4839        chained_slots.get(&slot).cloned()
4840    }
4841}
4842
4843// 1) Chain current_slot to the previous slot defined by prev_slot_meta
4844fn chain_new_slot_to_prev_slot(
4845    prev_slot_meta: &mut SlotMeta,
4846    current_slot: Slot,
4847    current_slot_meta: &mut SlotMeta,
4848) {
4849    prev_slot_meta.next_slots.push(current_slot);
4850    if prev_slot_meta.is_connected() {
4851        current_slot_meta.set_parent_connected();
4852    }
4853}
4854
4855fn is_newly_completed_slot(slot_meta: &SlotMeta, backup_slot_meta: &Option<SlotMeta>) -> bool {
4856    slot_meta.is_full()
4857        && (backup_slot_meta.is_none()
4858            || slot_meta.consumed != backup_slot_meta.as_ref().unwrap().consumed)
4859}
4860
4861/// Returns a boolean indicating whether a slot has received additional shreds
4862/// that can be replayed since the previous update to the slot's SlotMeta.
4863fn slot_has_updates(slot_meta: &SlotMeta, slot_meta_backup: &Option<SlotMeta>) -> bool {
4864    // First, this slot's parent must be connected in order to even consider
4865    // starting replay; otherwise, the replayed results may not be valid.
4866    slot_meta.is_parent_connected() &&
4867        // Then,
4868        // If the slot didn't exist in the db before, any consecutive shreds
4869        // at the start of the slot are ready to be replayed.
4870        ((slot_meta_backup.is_none() && slot_meta.consumed != 0) ||
4871        // Or,
4872        // If the slot has more consecutive shreds than it last did from the
4873        // last update, those shreds are new and also ready to be replayed.
4874        (slot_meta_backup.is_some() && slot_meta_backup.as_ref().unwrap().consumed != slot_meta.consumed))
4875}
4876
4877// Creates a new ledger with slot 0 full of ticks (and only ticks).
4878//
4879// Returns the blockhash that can be used to append entries with.
4880pub fn create_new_ledger(
4881    ledger_path: &Path,
4882    genesis_config: &GenesisConfig,
4883    max_genesis_archive_unpacked_size: u64,
4884    column_options: LedgerColumnOptions,
4885) -> Result<Hash> {
4886    Blockstore::destroy(ledger_path)?;
4887    genesis_config.write(ledger_path)?;
4888
4889    // Fill slot 0 with ticks that link back to the genesis_config to bootstrap the ledger.
4890    let blockstore_dir = BLOCKSTORE_DIRECTORY_ROCKS_LEVEL;
4891    let blockstore = Blockstore::open_with_options(
4892        ledger_path,
4893        BlockstoreOptions {
4894            enforce_ulimit_nofile: false,
4895            column_options: column_options.clone(),
4896            ..BlockstoreOptions::default()
4897        },
4898    )?;
4899    let ticks_per_slot = genesis_config.ticks_per_slot;
4900    let hashes_per_tick = genesis_config.poh_config.hashes_per_tick.unwrap_or(0);
4901    let entries = create_ticks(ticks_per_slot, hashes_per_tick, genesis_config.hash());
4902    let last_hash = entries.last().unwrap().hash;
4903    let version = solana_sdk::shred_version::version_from_hash(&last_hash);
4904
4905    let shredder = Shredder::new(0, 0, 0, version).unwrap();
4906    let (shreds, _) = shredder.entries_to_shreds(
4907        &Keypair::new(),
4908        &entries,
4909        true, // is_last_in_slot
4910        // chained_merkle_root
4911        Some(Hash::new_from_array(rand::thread_rng().gen())),
4912        0,    // next_shred_index
4913        0,    // next_code_index
4914        true, // merkle_variant
4915        &ReedSolomonCache::default(),
4916        &mut ProcessShredsStats::default(),
4917    );
4918    assert!(shreds.last().unwrap().last_in_slot());
4919
4920    blockstore.insert_shreds(shreds, None, false)?;
4921    blockstore.set_roots(std::iter::once(&0))?;
4922    // Explicitly close the blockstore before we create the archived genesis file
4923    drop(blockstore);
4924
4925    let archive_path = ledger_path.join(DEFAULT_GENESIS_ARCHIVE);
4926    let archive_file = File::create(&archive_path)?;
4927    let encoder = bzip2::write::BzEncoder::new(archive_file, bzip2::Compression::best());
4928    let mut archive = tar::Builder::new(encoder);
4929    archive.append_path_with_name(ledger_path.join(DEFAULT_GENESIS_FILE), DEFAULT_GENESIS_FILE)?;
4930    archive.append_dir_all(blockstore_dir, ledger_path.join(blockstore_dir))?;
4931    archive.into_inner()?;
4932
4933    // ensure the genesis archive can be unpacked and it is under
4934    // max_genesis_archive_unpacked_size, immediately after creating it above.
4935    {
4936        let temp_dir = tempfile::tempdir_in(ledger_path).unwrap();
4937        // unpack into a temp dir, while completely discarding the unpacked files
4938        let unpack_check = unpack_genesis_archive(
4939            &archive_path,
4940            temp_dir.path(),
4941            max_genesis_archive_unpacked_size,
4942        );
4943        if let Err(unpack_err) = unpack_check {
4944            // stash problematic original archived genesis related files to
4945            // examine them later and to prevent validator and ledger-tool from
4946            // naively consuming them
4947            let mut error_messages = String::new();
4948
4949            fs::rename(
4950                ledger_path.join(DEFAULT_GENESIS_ARCHIVE),
4951                ledger_path.join(format!("{DEFAULT_GENESIS_ARCHIVE}.failed")),
4952            )
4953            .unwrap_or_else(|e| {
4954                let _ = write!(
4955                    &mut error_messages,
4956                    "/failed to stash problematic {DEFAULT_GENESIS_ARCHIVE}: {e}"
4957                );
4958            });
4959            fs::rename(
4960                ledger_path.join(DEFAULT_GENESIS_FILE),
4961                ledger_path.join(format!("{DEFAULT_GENESIS_FILE}.failed")),
4962            )
4963            .unwrap_or_else(|e| {
4964                let _ = write!(
4965                    &mut error_messages,
4966                    "/failed to stash problematic {DEFAULT_GENESIS_FILE}: {e}"
4967                );
4968            });
4969            fs::rename(
4970                ledger_path.join(blockstore_dir),
4971                ledger_path.join(format!("{blockstore_dir}.failed")),
4972            )
4973            .unwrap_or_else(|e| {
4974                let _ = write!(
4975                    &mut error_messages,
4976                    "/failed to stash problematic {blockstore_dir}: {e}"
4977                );
4978            });
4979
4980            return Err(BlockstoreError::Io(IoError::new(
4981                ErrorKind::Other,
4982                format!("Error checking to unpack genesis archive: {unpack_err}{error_messages}"),
4983            )));
4984        }
4985    }
4986
4987    Ok(last_hash)
4988}
4989
4990#[macro_export]
4991macro_rules! tmp_ledger_name {
4992    () => {
4993        &format!("{}-{}", file!(), line!())
4994    };
4995}
4996
4997#[macro_export]
4998macro_rules! get_tmp_ledger_path {
4999    () => {
5000        $crate::blockstore::get_ledger_path_from_name($crate::tmp_ledger_name!())
5001    };
5002}
5003
5004#[macro_export]
5005macro_rules! get_tmp_ledger_path_auto_delete {
5006    () => {
5007        $crate::blockstore::get_ledger_path_from_name_auto_delete($crate::tmp_ledger_name!())
5008    };
5009}
5010
5011pub fn get_ledger_path_from_name_auto_delete(name: &str) -> TempDir {
5012    let mut path = get_ledger_path_from_name(name);
5013    // path is a directory so .file_name() returns the last component of the path
5014    let last = path.file_name().unwrap().to_str().unwrap().to_string();
5015    path.pop();
5016    fs::create_dir_all(&path).unwrap();
5017    Builder::new()
5018        .prefix(&last)
5019        .rand_bytes(0)
5020        .tempdir_in(path)
5021        .unwrap()
5022}
5023
5024pub fn get_ledger_path_from_name(name: &str) -> PathBuf {
5025    use std::env;
5026    let out_dir = env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string());
5027    let keypair = Keypair::new();
5028
5029    let path = [
5030        out_dir,
5031        "ledger".to_string(),
5032        format!("{}-{}", name, keypair.pubkey()),
5033    ]
5034    .iter()
5035    .collect();
5036
5037    // whack any possible collision
5038    let _ignored = fs::remove_dir_all(&path);
5039
5040    path
5041}
5042
5043#[macro_export]
5044macro_rules! create_new_tmp_ledger {
5045    ($genesis_config:expr) => {
5046        $crate::blockstore::create_new_ledger_from_name(
5047            $crate::tmp_ledger_name!(),
5048            $genesis_config,
5049            $crate::macro_reexports::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
5050            $crate::blockstore_options::LedgerColumnOptions::default(),
5051        )
5052    };
5053}
5054
5055#[macro_export]
5056macro_rules! create_new_tmp_ledger_with_size {
5057    (
5058        $genesis_config:expr,
5059        $max_genesis_archive_unpacked_size:expr $(,)?
5060    ) => {
5061        $crate::blockstore::create_new_ledger_from_name(
5062            $crate::tmp_ledger_name!(),
5063            $genesis_config,
5064            $max_genesis_archive_unpacked_size,
5065            $crate::blockstore_options::LedgerColumnOptions::default(),
5066        )
5067    };
5068}
5069
5070#[macro_export]
5071macro_rules! create_new_tmp_ledger_auto_delete {
5072    ($genesis_config:expr) => {
5073        $crate::blockstore::create_new_ledger_from_name_auto_delete(
5074            $crate::tmp_ledger_name!(),
5075            $genesis_config,
5076            $crate::macro_reexports::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
5077            $crate::blockstore_options::LedgerColumnOptions::default(),
5078        )
5079    };
5080}
5081
5082pub(crate) fn verify_shred_slots(slot: Slot, parent: Slot, root: Slot) -> bool {
5083    if slot == 0 && parent == 0 && root == 0 {
5084        return true; // valid write to slot zero.
5085    }
5086    // Ignore shreds that chain to slots before the root,
5087    // or have invalid parent >= slot.
5088    root <= parent && parent < slot
5089}
5090
5091// Same as `create_new_ledger()` but use a temporary ledger name based on the provided `name`
5092//
5093// Note: like `create_new_ledger` the returned ledger will have slot 0 full of ticks (and only
5094// ticks)
5095pub fn create_new_ledger_from_name(
5096    name: &str,
5097    genesis_config: &GenesisConfig,
5098    max_genesis_archive_unpacked_size: u64,
5099    column_options: LedgerColumnOptions,
5100) -> (PathBuf, Hash) {
5101    let (ledger_path, blockhash) = create_new_ledger_from_name_auto_delete(
5102        name,
5103        genesis_config,
5104        max_genesis_archive_unpacked_size,
5105        column_options,
5106    );
5107    (ledger_path.into_path(), blockhash)
5108}
5109
5110// Same as `create_new_ledger()` but use a temporary ledger name based on the provided `name`
5111//
5112// Note: like `create_new_ledger` the returned ledger will have slot 0 full of ticks (and only
5113// ticks)
5114pub fn create_new_ledger_from_name_auto_delete(
5115    name: &str,
5116    genesis_config: &GenesisConfig,
5117    max_genesis_archive_unpacked_size: u64,
5118    column_options: LedgerColumnOptions,
5119) -> (TempDir, Hash) {
5120    let ledger_path = get_ledger_path_from_name_auto_delete(name);
5121    let blockhash = create_new_ledger(
5122        ledger_path.path(),
5123        genesis_config,
5124        max_genesis_archive_unpacked_size,
5125        column_options,
5126    )
5127    .unwrap();
5128    (ledger_path, blockhash)
5129}
5130
5131pub fn entries_to_test_shreds(
5132    entries: &[Entry],
5133    slot: Slot,
5134    parent_slot: Slot,
5135    is_full_slot: bool,
5136    version: u16,
5137    merkle_variant: bool,
5138) -> Vec<Shred> {
5139    Shredder::new(slot, parent_slot, 0, version)
5140        .unwrap()
5141        .entries_to_shreds(
5142            &Keypair::new(),
5143            entries,
5144            is_full_slot,
5145            // chained_merkle_root
5146            Some(Hash::new_from_array(rand::thread_rng().gen())),
5147            0, // next_shred_index,
5148            0, // next_code_index
5149            merkle_variant,
5150            &ReedSolomonCache::default(),
5151            &mut ProcessShredsStats::default(),
5152        )
5153        .0
5154}
5155
5156// used for tests only
5157pub fn make_slot_entries(
5158    slot: Slot,
5159    parent_slot: Slot,
5160    num_entries: u64,
5161    merkle_variant: bool,
5162) -> (Vec<Shred>, Vec<Entry>) {
5163    let entries = create_ticks(num_entries, 1, Hash::new_unique());
5164    let shreds = entries_to_test_shreds(&entries, slot, parent_slot, true, 0, merkle_variant);
5165    (shreds, entries)
5166}
5167
5168// used for tests only
5169pub fn make_many_slot_entries(
5170    start_slot: Slot,
5171    num_slots: u64,
5172    entries_per_slot: u64,
5173) -> (Vec<Shred>, Vec<Entry>) {
5174    let mut shreds = vec![];
5175    let mut entries = vec![];
5176    for slot in start_slot..start_slot + num_slots {
5177        let parent_slot = if slot == 0 { 0 } else { slot - 1 };
5178
5179        let (slot_shreds, slot_entries) = make_slot_entries(
5180            slot,
5181            parent_slot,
5182            entries_per_slot,
5183            true, // merkle_variant
5184        );
5185        shreds.extend(slot_shreds);
5186        entries.extend(slot_entries);
5187    }
5188
5189    (shreds, entries)
5190}
5191
5192// test-only: check that all columns are either empty or start at `min_slot`
5193pub fn test_all_empty_or_min(blockstore: &Blockstore, min_slot: Slot) {
5194    let condition_met = blockstore
5195        .meta_cf
5196        .iter(IteratorMode::Start)
5197        .unwrap()
5198        .next()
5199        .map(|(slot, _)| slot >= min_slot)
5200        .unwrap_or(true)
5201        & blockstore
5202            .roots_cf
5203            .iter(IteratorMode::Start)
5204            .unwrap()
5205            .next()
5206            .map(|(slot, _)| slot >= min_slot)
5207            .unwrap_or(true)
5208        & blockstore
5209            .data_shred_cf
5210            .iter(IteratorMode::Start)
5211            .unwrap()
5212            .next()
5213            .map(|((slot, _), _)| slot >= min_slot)
5214            .unwrap_or(true)
5215        & blockstore
5216            .code_shred_cf
5217            .iter(IteratorMode::Start)
5218            .unwrap()
5219            .next()
5220            .map(|((slot, _), _)| slot >= min_slot)
5221            .unwrap_or(true)
5222        & blockstore
5223            .dead_slots_cf
5224            .iter(IteratorMode::Start)
5225            .unwrap()
5226            .next()
5227            .map(|(slot, _)| slot >= min_slot)
5228            .unwrap_or(true)
5229        & blockstore
5230            .duplicate_slots_cf
5231            .iter(IteratorMode::Start)
5232            .unwrap()
5233            .next()
5234            .map(|(slot, _)| slot >= min_slot)
5235            .unwrap_or(true)
5236        & blockstore
5237            .erasure_meta_cf
5238            .iter(IteratorMode::Start)
5239            .unwrap()
5240            .next()
5241            .map(|((slot, _), _)| slot >= min_slot)
5242            .unwrap_or(true)
5243        & blockstore
5244            .orphans_cf
5245            .iter(IteratorMode::Start)
5246            .unwrap()
5247            .next()
5248            .map(|(slot, _)| slot >= min_slot)
5249            .unwrap_or(true)
5250        & blockstore
5251            .index_cf
5252            .iter(IteratorMode::Start)
5253            .unwrap()
5254            .next()
5255            .map(|(slot, _)| slot >= min_slot)
5256            .unwrap_or(true)
5257        & blockstore
5258            .transaction_status_cf
5259            .iter(IteratorMode::Start)
5260            .unwrap()
5261            .next()
5262            .map(|((_, slot), _)| slot >= min_slot || slot == 0)
5263            .unwrap_or(true)
5264        & blockstore
5265            .address_signatures_cf
5266            .iter(IteratorMode::Start)
5267            .unwrap()
5268            .next()
5269            .map(|((_, slot, _, _), _)| slot >= min_slot || slot == 0)
5270            .unwrap_or(true)
5271        & blockstore
5272            .rewards_cf
5273            .iter(IteratorMode::Start)
5274            .unwrap()
5275            .next()
5276            .map(|(slot, _)| slot >= min_slot)
5277            .unwrap_or(true);
5278    assert!(condition_met);
5279}
5280
5281// used for tests only
5282// Create `num_shreds` shreds for [start_slot, start_slot + num_slot) slots
5283pub fn make_many_slot_shreds(
5284    start_slot: u64,
5285    num_slots: u64,
5286    num_shreds_per_slot: u64,
5287) -> (Vec<Shred>, Vec<Entry>) {
5288    // Use `None` as shred_size so the default (full) value is used
5289    let num_entries = max_ticks_per_n_shreds(num_shreds_per_slot, None);
5290    make_many_slot_entries(start_slot, num_slots, num_entries)
5291}
5292
5293// Create shreds for slots that have a parent-child relationship defined by the input `chain`
5294// used for tests only
5295pub fn make_chaining_slot_entries(
5296    chain: &[u64],
5297    entries_per_slot: u64,
5298    first_parent: u64,
5299) -> Vec<(Vec<Shred>, Vec<Entry>)> {
5300    let mut slots_shreds_and_entries = vec![];
5301    for (i, slot) in chain.iter().enumerate() {
5302        let parent_slot = {
5303            if *slot == 0 || i == 0 {
5304                first_parent
5305            } else {
5306                chain[i - 1]
5307            }
5308        };
5309
5310        let result = make_slot_entries(
5311            *slot,
5312            parent_slot,
5313            entries_per_slot,
5314            true, // merkle_variant
5315        );
5316        slots_shreds_and_entries.push(result);
5317    }
5318
5319    slots_shreds_and_entries
5320}
5321
5322#[cfg(not(unix))]
5323fn adjust_ulimit_nofile(_enforce_ulimit_nofile: bool) -> Result<()> {
5324    Ok(())
5325}
5326
5327#[cfg(unix)]
5328fn adjust_ulimit_nofile(enforce_ulimit_nofile: bool) -> Result<()> {
5329    // Rocks DB likes to have many open files.  The default open file descriptor limit is
5330    // usually not enough
5331    // AppendVecs and disk Account Index are also heavy users of mmapped files.
5332    // This should be kept in sync with published validator instructions.
5333    // https://docs.solanalabs.com/operations/guides/validator-start#increased-memory-mapped-files-limit
5334    let desired_nofile = 1_000_000;
5335
5336    fn get_nofile() -> libc::rlimit {
5337        let mut nofile = libc::rlimit {
5338            rlim_cur: 0,
5339            rlim_max: 0,
5340        };
5341        if unsafe { libc::getrlimit(libc::RLIMIT_NOFILE, &mut nofile) } != 0 {
5342            warn!("getrlimit(RLIMIT_NOFILE) failed");
5343        }
5344        nofile
5345    }
5346
5347    let mut nofile = get_nofile();
5348    let current = nofile.rlim_cur;
5349    if current < desired_nofile {
5350        nofile.rlim_cur = desired_nofile;
5351        if unsafe { libc::setrlimit(libc::RLIMIT_NOFILE, &nofile) } != 0 {
5352            error!(
5353                "Unable to increase the maximum open file descriptor limit to {} from {}",
5354                nofile.rlim_cur, current,
5355            );
5356
5357            if cfg!(target_os = "macos") {
5358                error!(
5359                    "On mac OS you may need to run |sudo launchctl limit maxfiles {} {}| first",
5360                    desired_nofile, desired_nofile,
5361                );
5362            }
5363            if enforce_ulimit_nofile {
5364                return Err(BlockstoreError::UnableToSetOpenFileDescriptorLimit);
5365            }
5366        }
5367
5368        nofile = get_nofile();
5369    }
5370    info!("Maximum open file descriptors: {}", nofile.rlim_cur);
5371    Ok(())
5372}
5373
5374#[cfg(test)]
5375pub mod tests {
5376    use {
5377        super::*,
5378        crate::{
5379            genesis_utils::{create_genesis_config, GenesisConfigInfo},
5380            leader_schedule::{FixedSchedule, LeaderSchedule},
5381            shred::{max_ticks_per_n_shreds, ShredFlags, LEGACY_SHRED_DATA_CAPACITY},
5382        },
5383        assert_matches::assert_matches,
5384        bincode::{serialize, Options},
5385        crossbeam_channel::unbounded,
5386        rand::{seq::SliceRandom, thread_rng},
5387        solana_account_decoder::parse_token::UiTokenAmount,
5388        solana_accounts_db::hardened_unpack::{
5389            open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
5390        },
5391        solana_entry::entry::{next_entry, next_entry_mut},
5392        solana_runtime::bank::{Bank, RewardType},
5393        solana_sdk::{
5394            clock::{DEFAULT_MS_PER_SLOT, DEFAULT_TICKS_PER_SLOT},
5395            feature_set::{vote_only_full_fec_sets, vote_only_retransmitter_signed_fec_sets},
5396            hash::{self, hash, Hash},
5397            instruction::CompiledInstruction,
5398            message::v0::LoadedAddresses,
5399            packet::PACKET_DATA_SIZE,
5400            pubkey::Pubkey,
5401            signature::Signature,
5402            transaction::{Transaction, TransactionError},
5403            transaction_context::TransactionReturnData,
5404        },
5405        solana_storage_proto::convert::generated,
5406        solana_transaction_status::{
5407            InnerInstruction, InnerInstructions, Reward, Rewards, TransactionTokenBalance,
5408        },
5409        std::{cmp::Ordering, thread::Builder, time::Duration},
5410        test_case::test_case,
5411    };
5412
5413    // used for tests only
5414    pub(crate) fn make_slot_entries_with_transactions(num_entries: u64) -> Vec<Entry> {
5415        let mut entries: Vec<Entry> = Vec::new();
5416        for x in 0..num_entries {
5417            let transaction = Transaction::new_with_compiled_instructions(
5418                &[&Keypair::new()],
5419                &[solana_pubkey::new_rand()],
5420                Hash::default(),
5421                vec![solana_pubkey::new_rand()],
5422                vec![CompiledInstruction::new(1, &(), vec![0])],
5423            );
5424            entries.push(next_entry_mut(&mut Hash::default(), 0, vec![transaction]));
5425            let mut tick = create_ticks(1, 0, hash(&serialize(&x).unwrap()));
5426            entries.append(&mut tick);
5427        }
5428        entries
5429    }
5430
5431    fn make_and_insert_slot(blockstore: &Blockstore, slot: Slot, parent_slot: Slot) {
5432        let (shreds, _) = make_slot_entries(
5433            slot,
5434            parent_slot,
5435            100,  // num_entries
5436            true, // merkle_variant
5437        );
5438        blockstore.insert_shreds(shreds, None, true).unwrap();
5439
5440        let meta = blockstore.meta(slot).unwrap().unwrap();
5441        assert_eq!(slot, meta.slot);
5442        assert!(meta.is_full());
5443        assert!(meta.next_slots.is_empty());
5444    }
5445
5446    #[test]
5447    fn test_create_new_ledger() {
5448        solana_logger::setup();
5449        let mint_total = 1_000_000_000_000;
5450        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(mint_total);
5451        let (ledger_path, _blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
5452        let blockstore = Blockstore::open(ledger_path.path()).unwrap(); //FINDME
5453
5454        let ticks = create_ticks(genesis_config.ticks_per_slot, 0, genesis_config.hash());
5455        let entries = blockstore.get_slot_entries(0, 0).unwrap();
5456
5457        assert_eq!(ticks, entries);
5458        assert!(Path::new(ledger_path.path())
5459            .join(BLOCKSTORE_DIRECTORY_ROCKS_LEVEL)
5460            .exists());
5461
5462        assert_eq!(
5463            genesis_config,
5464            open_genesis_config(ledger_path.path(), MAX_GENESIS_ARCHIVE_UNPACKED_SIZE).unwrap()
5465        );
5466        // Remove DEFAULT_GENESIS_FILE to force extraction of DEFAULT_GENESIS_ARCHIVE
5467        std::fs::remove_file(ledger_path.path().join(DEFAULT_GENESIS_FILE)).unwrap();
5468        assert_eq!(
5469            genesis_config,
5470            open_genesis_config(ledger_path.path(), MAX_GENESIS_ARCHIVE_UNPACKED_SIZE).unwrap()
5471        );
5472    }
5473
5474    #[test]
5475    fn test_insert_get_bytes() {
5476        // Create enough entries to ensure there are at least two shreds created
5477        let num_entries = max_ticks_per_n_shreds(1, None) + 1;
5478        assert!(num_entries > 1);
5479
5480        let (mut shreds, _) = make_slot_entries(
5481            0, // slot
5482            0, // parent_slot
5483            num_entries,
5484            true, // merkle_variant
5485        );
5486
5487        let ledger_path = get_tmp_ledger_path_auto_delete!();
5488        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
5489
5490        // Insert last shred, test we can retrieve it
5491        let last_shred = shreds.pop().unwrap();
5492        assert!(last_shred.index() > 0);
5493        blockstore
5494            .insert_shreds(vec![last_shred.clone()], None, false)
5495            .unwrap();
5496
5497        let serialized_shred = blockstore
5498            .data_shred_cf
5499            .get_bytes((0, last_shred.index() as u64))
5500            .unwrap()
5501            .unwrap();
5502        let deserialized_shred = Shred::new_from_serialized_shred(serialized_shred).unwrap();
5503
5504        assert_eq!(last_shred, deserialized_shred);
5505    }
5506
5507    #[test]
5508    fn test_write_entries() {
5509        solana_logger::setup();
5510        let ledger_path = get_tmp_ledger_path_auto_delete!();
5511        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
5512
5513        let ticks_per_slot = 10;
5514        let num_slots = 10;
5515        let mut ticks = vec![];
5516        //let mut shreds_per_slot = 0 as u64;
5517        let mut shreds_per_slot = vec![];
5518
5519        for i in 0..num_slots {
5520            let mut new_ticks = create_ticks(ticks_per_slot, 0, Hash::default());
5521            let num_shreds = blockstore
5522                .write_entries(
5523                    i,
5524                    0,
5525                    0,
5526                    ticks_per_slot,
5527                    Some(i.saturating_sub(1)),
5528                    true,
5529                    &Arc::new(Keypair::new()),
5530                    new_ticks.clone(),
5531                    0,
5532                )
5533                .unwrap() as u64;
5534            shreds_per_slot.push(num_shreds);
5535            ticks.append(&mut new_ticks);
5536        }
5537
5538        for i in 0..num_slots {
5539            let meta = blockstore.meta(i).unwrap().unwrap();
5540            let num_shreds = shreds_per_slot[i as usize];
5541            assert_eq!(meta.consumed, num_shreds);
5542            assert_eq!(meta.received, num_shreds);
5543            assert_eq!(meta.last_index, Some(num_shreds - 1));
5544            if i == num_slots - 1 {
5545                assert!(meta.next_slots.is_empty());
5546            } else {
5547                assert_eq!(meta.next_slots, vec![i + 1]);
5548            }
5549            if i == 0 {
5550                assert_eq!(meta.parent_slot, Some(0));
5551            } else {
5552                assert_eq!(meta.parent_slot, Some(i - 1));
5553            }
5554
5555            assert_eq!(
5556                &ticks[(i * ticks_per_slot) as usize..((i + 1) * ticks_per_slot) as usize],
5557                &blockstore.get_slot_entries(i, 0).unwrap()[..]
5558            );
5559        }
5560
5561        /*
5562                    // Simulate writing to the end of a slot with existing ticks
5563                    blockstore
5564                        .write_entries(
5565                            num_slots,
5566                            ticks_per_slot - 1,
5567                            ticks_per_slot - 2,
5568                            ticks_per_slot,
5569                            &ticks[0..2],
5570                        )
5571                        .unwrap();
5572
5573                    let meta = blockstore.meta(num_slots).unwrap().unwrap();
5574                    assert_eq!(meta.consumed, 0);
5575                    // received shred was ticks_per_slot - 2, so received should be ticks_per_slot - 2 + 1
5576                    assert_eq!(meta.received, ticks_per_slot - 1);
5577                    // last shred index ticks_per_slot - 2 because that's the shred that made tick_height == ticks_per_slot
5578                    // for the slot
5579                    assert_eq!(meta.last_index, ticks_per_slot - 2);
5580                    assert_eq!(meta.parent_slot, num_slots - 1);
5581                    assert_eq!(meta.next_slots, vec![num_slots + 1]);
5582                    assert_eq!(
5583                        &ticks[0..1],
5584                        &blockstore
5585                            .get_slot_entries(num_slots, ticks_per_slot - 2)
5586                            .unwrap()[..]
5587                    );
5588
5589                    // We wrote two entries, the second should spill into slot num_slots + 1
5590                    let meta = blockstore.meta(num_slots + 1).unwrap().unwrap();
5591                    assert_eq!(meta.consumed, 1);
5592                    assert_eq!(meta.received, 1);
5593                    assert_eq!(meta.last_index, u64::MAX);
5594                    assert_eq!(meta.parent_slot, num_slots);
5595                    assert!(meta.next_slots.is_empty());
5596
5597                    assert_eq!(
5598                        &ticks[1..2],
5599                        &blockstore.get_slot_entries(num_slots + 1, 0).unwrap()[..]
5600                    );
5601        */
5602    }
5603
5604    #[test]
5605    fn test_put_get_simple() {
5606        let ledger_path = get_tmp_ledger_path_auto_delete!();
5607        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
5608
5609        // Test meta column family
5610        let meta = SlotMeta::new(0, Some(1));
5611        blockstore.meta_cf.put(0, &meta).unwrap();
5612        let result = blockstore
5613            .meta_cf
5614            .get(0)
5615            .unwrap()
5616            .expect("Expected meta object to exist");
5617
5618        assert_eq!(result, meta);
5619
5620        // Test erasure column family
5621        let erasure = vec![1u8; 16];
5622        let erasure_key = (0, 0);
5623        blockstore
5624            .code_shred_cf
5625            .put_bytes(erasure_key, &erasure)
5626            .unwrap();
5627
5628        let result = blockstore
5629            .code_shred_cf
5630            .get_bytes(erasure_key)
5631            .unwrap()
5632            .expect("Expected erasure object to exist");
5633
5634        assert_eq!(result, erasure);
5635
5636        // Test data column family
5637        let data = vec![2u8; 16];
5638        let data_key = (0, 0);
5639        blockstore.data_shred_cf.put_bytes(data_key, &data).unwrap();
5640
5641        let result = blockstore
5642            .data_shred_cf
5643            .get_bytes(data_key)
5644            .unwrap()
5645            .expect("Expected data object to exist");
5646
5647        assert_eq!(result, data);
5648    }
5649
5650    #[test]
5651    fn test_multi_get() {
5652        const TEST_PUT_ENTRY_COUNT: usize = 100;
5653        let ledger_path = get_tmp_ledger_path_auto_delete!();
5654        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
5655
5656        // Test meta column family
5657        for i in 0..TEST_PUT_ENTRY_COUNT {
5658            let k = u64::try_from(i).unwrap();
5659            let meta = SlotMeta::new(k, Some(k + 1));
5660            blockstore.meta_cf.put(k, &meta).unwrap();
5661            let result = blockstore
5662                .meta_cf
5663                .get(k)
5664                .unwrap()
5665                .expect("Expected meta object to exist");
5666            assert_eq!(result, meta);
5667        }
5668        let keys = blockstore
5669            .meta_cf
5670            .multi_get_keys(0..TEST_PUT_ENTRY_COUNT as Slot);
5671        let values = blockstore.meta_cf.multi_get(&keys);
5672        for (i, value) in values.enumerate().take(TEST_PUT_ENTRY_COUNT) {
5673            let k = u64::try_from(i).unwrap();
5674            assert_eq!(
5675                value.as_ref().unwrap().as_ref().unwrap(),
5676                &SlotMeta::new(k, Some(k + 1))
5677            );
5678        }
5679    }
5680
5681    #[test]
5682    fn test_read_shred_bytes() {
5683        let slot = 0;
5684        let (shreds, _) = make_slot_entries(slot, 0, 100, /*merkle_variant:*/ true);
5685        let num_shreds = shreds.len() as u64;
5686        let shred_bufs: Vec<_> = shreds.iter().map(Shred::payload).cloned().collect();
5687
5688        let ledger_path = get_tmp_ledger_path_auto_delete!();
5689        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
5690        blockstore.insert_shreds(shreds, None, false).unwrap();
5691
5692        let mut buf = [0; 4096];
5693        let (_, bytes) = blockstore.get_data_shreds(slot, 0, 1, &mut buf).unwrap();
5694        assert_eq!(buf[..bytes], shred_bufs[0][..bytes]);
5695
5696        let (last_index, bytes2) = blockstore.get_data_shreds(slot, 0, 2, &mut buf).unwrap();
5697        assert_eq!(last_index, 1);
5698        assert!(bytes2 > bytes);
5699        {
5700            let shred_data_1 = &buf[..bytes];
5701            assert_eq!(shred_data_1, &shred_bufs[0][..bytes]);
5702
5703            let shred_data_2 = &buf[bytes..bytes2];
5704            assert_eq!(shred_data_2, &shred_bufs[1][..bytes2 - bytes]);
5705        }
5706
5707        // buf size part-way into shred[1], should just return shred[0]
5708        let mut buf = vec![0; bytes + 1];
5709        let (last_index, bytes3) = blockstore.get_data_shreds(slot, 0, 2, &mut buf).unwrap();
5710        assert_eq!(last_index, 0);
5711        assert_eq!(bytes3, bytes);
5712
5713        let mut buf = vec![0; bytes2 - 1];
5714        let (last_index, bytes4) = blockstore.get_data_shreds(slot, 0, 2, &mut buf).unwrap();
5715        assert_eq!(last_index, 0);
5716        assert_eq!(bytes4, bytes);
5717
5718        let mut buf = vec![0; bytes * 2];
5719        let (last_index, bytes6) = blockstore
5720            .get_data_shreds(slot, num_shreds - 1, num_shreds, &mut buf)
5721            .unwrap();
5722        assert_eq!(last_index, num_shreds - 1);
5723
5724        {
5725            let shred_data = &buf[..bytes6];
5726            assert_eq!(shred_data, &shred_bufs[(num_shreds - 1) as usize][..bytes6]);
5727        }
5728
5729        // Read out of range
5730        let (last_index, bytes6) = blockstore
5731            .get_data_shreds(slot, num_shreds, num_shreds + 2, &mut buf)
5732            .unwrap();
5733        assert_eq!(last_index, 0);
5734        assert_eq!(bytes6, 0);
5735    }
5736
5737    #[test]
5738    fn test_shred_cleanup_check() {
5739        let slot = 1;
5740        let (shreds, _) = make_slot_entries(slot, 0, 100, /*merkle_variant:*/ true);
5741
5742        let ledger_path = get_tmp_ledger_path_auto_delete!();
5743        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
5744        blockstore.insert_shreds(shreds, None, false).unwrap();
5745
5746        let mut buf = [0; 4096];
5747        assert!(blockstore.get_data_shreds(slot, 0, 1, &mut buf).is_ok());
5748
5749        let max_purge_slot = 1;
5750        blockstore
5751            .run_purge(0, max_purge_slot, PurgeType::Exact)
5752            .unwrap();
5753        *blockstore.lowest_cleanup_slot.write().unwrap() = max_purge_slot;
5754
5755        let mut buf = [0; 4096];
5756        assert!(blockstore.get_data_shreds(slot, 0, 1, &mut buf).is_err());
5757    }
5758
5759    #[test]
5760    fn test_insert_data_shreds_basic() {
5761        // Create enough entries to ensure there are at least two shreds created
5762        let num_entries = max_ticks_per_n_shreds(1, None) + 1;
5763        assert!(num_entries > 1);
5764
5765        let (mut shreds, entries) = make_slot_entries(
5766            0, // slot
5767            0, // parent_slot
5768            num_entries,
5769            true, // merkle_variant
5770        );
5771        let num_shreds = shreds.len() as u64;
5772
5773        let ledger_path = get_tmp_ledger_path_auto_delete!();
5774        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
5775
5776        // Insert last shred, we're missing the other shreds, so no consecutive
5777        // shreds starting from slot 0, index 0 should exist.
5778        assert!(shreds.len() > 1);
5779        let last_shred = shreds.pop().unwrap();
5780        blockstore
5781            .insert_shreds(vec![last_shred], None, false)
5782            .unwrap();
5783        assert!(blockstore.get_slot_entries(0, 0).unwrap().is_empty());
5784
5785        let meta = blockstore
5786            .meta(0)
5787            .unwrap()
5788            .expect("Expected new metadata object to be created");
5789        assert!(meta.consumed == 0 && meta.received == num_shreds);
5790
5791        // Insert the other shreds, check for consecutive returned entries
5792        blockstore.insert_shreds(shreds, None, false).unwrap();
5793        let result = blockstore.get_slot_entries(0, 0).unwrap();
5794
5795        assert_eq!(result, entries);
5796
5797        let meta = blockstore
5798            .meta(0)
5799            .unwrap()
5800            .expect("Expected new metadata object to exist");
5801        assert_eq!(meta.consumed, num_shreds);
5802        assert_eq!(meta.received, num_shreds);
5803        assert_eq!(meta.parent_slot, Some(0));
5804        assert_eq!(meta.last_index, Some(num_shreds - 1));
5805        assert!(meta.next_slots.is_empty());
5806        assert!(meta.is_connected());
5807    }
5808
5809    #[test]
5810    fn test_insert_data_shreds_reverse() {
5811        let num_shreds = 10;
5812        let num_entries = max_ticks_per_n_shreds(num_shreds, None);
5813        let (mut shreds, entries) = make_slot_entries(
5814            0, // slot
5815            0, // parent_slot
5816            num_entries,
5817            true, // merkle_variant
5818        );
5819        let num_shreds = shreds.len() as u64;
5820
5821        let ledger_path = get_tmp_ledger_path_auto_delete!();
5822        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
5823
5824        // Insert shreds in reverse, check for consecutive returned shreds
5825        for i in (0..num_shreds).rev() {
5826            let shred = shreds.pop().unwrap();
5827            blockstore.insert_shreds(vec![shred], None, false).unwrap();
5828            let result = blockstore.get_slot_entries(0, 0).unwrap();
5829
5830            let meta = blockstore
5831                .meta(0)
5832                .unwrap()
5833                .expect("Expected metadata object to exist");
5834            assert_eq!(meta.last_index, Some(num_shreds - 1));
5835            if i != 0 {
5836                assert_eq!(result.len(), 0);
5837                assert!(meta.consumed == 0 && meta.received == num_shreds);
5838            } else {
5839                assert_eq!(meta.parent_slot, Some(0));
5840                assert_eq!(result, entries);
5841                assert!(meta.consumed == num_shreds && meta.received == num_shreds);
5842            }
5843        }
5844    }
5845
5846    #[test]
5847    fn test_insert_slots() {
5848        test_insert_data_shreds_slots(false);
5849        test_insert_data_shreds_slots(true);
5850    }
5851
5852    #[test]
5853    fn test_index_fallback_deserialize() {
5854        let ledger_path = get_tmp_ledger_path_auto_delete!();
5855        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
5856        let mut rng = rand::thread_rng();
5857        let slot = rng.gen_range(0..100);
5858        let bincode = bincode::DefaultOptions::new()
5859            .reject_trailing_bytes()
5860            .with_fixint_encoding();
5861
5862        let data = 0..rng.gen_range(100..MAX_DATA_SHREDS_PER_SLOT as u64);
5863        let coding = 0..rng.gen_range(100..MAX_DATA_SHREDS_PER_SLOT as u64);
5864        let mut fallback = IndexFallback::new(slot);
5865        for (d, c) in data.clone().zip(coding.clone()) {
5866            fallback.data_mut().insert(d);
5867            fallback.coding_mut().insert(c);
5868        }
5869
5870        blockstore
5871            .index_cf
5872            .put_bytes(slot, &bincode.serialize(&fallback).unwrap())
5873            .unwrap();
5874
5875        let current = blockstore.index_cf.get(slot).unwrap().unwrap();
5876        for (d, c) in data.zip(coding) {
5877            assert!(current.data().contains(d));
5878            assert!(current.coding().contains(c));
5879        }
5880    }
5881
5882    /*
5883        #[test]
5884        pub fn test_iteration_order() {
5885            let slot = 0;
5886            let ledger_path = get_tmp_ledger_path_auto_delete!();
5887            let blockstore = Blockstore::open(ledger_path.path()).unwrap();
5888
5889            // Write entries
5890            let num_entries = 8;
5891            let entries = make_tiny_test_entries(num_entries);
5892            let mut shreds = entries.to_single_entry_shreds();
5893
5894            for (i, b) in shreds.iter_mut().enumerate() {
5895                b.set_index(1 << (i * 8));
5896                b.set_slot(0);
5897            }
5898
5899            blockstore
5900                .write_shreds(&shreds)
5901                .expect("Expected successful write of shreds");
5902
5903            let mut db_iterator = blockstore
5904                .db
5905                .cursor::<cf::Data>()
5906                .expect("Expected to be able to open database iterator");
5907
5908            db_iterator.seek((slot, 1));
5909
5910            // Iterate through blockstore
5911            for i in 0..num_entries {
5912                assert!(db_iterator.valid());
5913                let (_, current_index) = db_iterator.key().expect("Expected a valid key");
5914                assert_eq!(current_index, (1 as u64) << (i * 8));
5915                db_iterator.next();
5916            }
5917
5918        }
5919    */
5920
5921    #[test]
5922    fn test_get_slot_entries1() {
5923        let ledger_path = get_tmp_ledger_path_auto_delete!();
5924        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
5925        let entries = create_ticks(8, 0, Hash::default());
5926        let shreds = entries_to_test_shreds(
5927            &entries[0..4],
5928            1,
5929            0,
5930            false,
5931            0,
5932            true, // merkle_variant
5933        );
5934        blockstore
5935            .insert_shreds(shreds, None, false)
5936            .expect("Expected successful write of shreds");
5937
5938        assert_eq!(
5939            blockstore.get_slot_entries(1, 0).unwrap()[2..4],
5940            entries[2..4],
5941        );
5942    }
5943
5944    // This test seems to be unnecessary with introduction of data shreds. There are no
5945    // guarantees that a particular shred index contains a complete entry
5946    #[test]
5947    #[ignore]
5948    pub fn test_get_slot_entries2() {
5949        let ledger_path = get_tmp_ledger_path_auto_delete!();
5950        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
5951
5952        // Write entries
5953        let num_slots = 5_u64;
5954        let mut index = 0;
5955        for slot in 0..num_slots {
5956            let entries = create_ticks(slot + 1, 0, Hash::default());
5957            let last_entry = entries.last().unwrap().clone();
5958            let mut shreds = entries_to_test_shreds(
5959                &entries,
5960                slot,
5961                slot.saturating_sub(1),
5962                false,
5963                0,
5964                true, // merkle_variant
5965            );
5966            for b in shreds.iter_mut() {
5967                b.set_index(index);
5968                b.set_slot(slot);
5969                index += 1;
5970            }
5971            blockstore
5972                .insert_shreds(shreds, None, false)
5973                .expect("Expected successful write of shreds");
5974            assert_eq!(
5975                blockstore
5976                    .get_slot_entries(slot, u64::from(index - 1))
5977                    .unwrap(),
5978                vec![last_entry],
5979            );
5980        }
5981    }
5982
5983    #[test]
5984    fn test_get_slot_entries3() {
5985        // Test inserting/fetching shreds which contain multiple entries per shred
5986        let ledger_path = get_tmp_ledger_path_auto_delete!();
5987
5988        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
5989        let num_slots = 5_u64;
5990        let shreds_per_slot = 5_u64;
5991        let entry_serialized_size =
5992            bincode::serialized_size(&create_ticks(1, 0, Hash::default())).unwrap();
5993        let entries_per_slot = (shreds_per_slot * PACKET_DATA_SIZE as u64) / entry_serialized_size;
5994
5995        // Write entries
5996        for slot in 0..num_slots {
5997            let entries = create_ticks(entries_per_slot, 0, Hash::default());
5998            let shreds = entries_to_test_shreds(
5999                &entries,
6000                slot,
6001                slot.saturating_sub(1),
6002                false,
6003                0,
6004                true, // merkle_variant
6005            );
6006            assert!(shreds.len() as u64 >= shreds_per_slot);
6007            blockstore
6008                .insert_shreds(shreds, None, false)
6009                .expect("Expected successful write of shreds");
6010            assert_eq!(blockstore.get_slot_entries(slot, 0).unwrap(), entries);
6011        }
6012    }
6013
6014    #[test]
6015    fn test_insert_data_shreds_consecutive() {
6016        let ledger_path = get_tmp_ledger_path_auto_delete!();
6017        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6018        // Create enough entries to ensure there are at least two shreds created
6019        let min_entries = max_ticks_per_n_shreds(1, None) + 1;
6020        for i in 0..4 {
6021            let slot = i;
6022            let parent_slot = if i == 0 { 0 } else { i - 1 };
6023            // Write entries
6024            let num_entries = min_entries * (i + 1);
6025            let (shreds, original_entries) = make_slot_entries(
6026                slot,
6027                parent_slot,
6028                num_entries,
6029                true, // merkle_variant
6030            );
6031
6032            let num_shreds = shreds.len() as u64;
6033            assert!(num_shreds > 1);
6034            let mut even_shreds = vec![];
6035            let mut odd_shreds = vec![];
6036
6037            for (i, shred) in shreds.into_iter().enumerate() {
6038                if i % 2 == 0 {
6039                    even_shreds.push(shred);
6040                } else {
6041                    odd_shreds.push(shred);
6042                }
6043            }
6044
6045            blockstore.insert_shreds(odd_shreds, None, false).unwrap();
6046
6047            assert_eq!(blockstore.get_slot_entries(slot, 0).unwrap(), vec![]);
6048
6049            let meta = blockstore.meta(slot).unwrap().unwrap();
6050            if num_shreds % 2 == 0 {
6051                assert_eq!(meta.received, num_shreds);
6052            } else {
6053                trace!("got here");
6054                assert_eq!(meta.received, num_shreds - 1);
6055            }
6056            assert_eq!(meta.consumed, 0);
6057            if num_shreds % 2 == 0 {
6058                assert_eq!(meta.last_index, Some(num_shreds - 1));
6059            } else {
6060                assert_eq!(meta.last_index, None);
6061            }
6062
6063            blockstore.insert_shreds(even_shreds, None, false).unwrap();
6064
6065            assert_eq!(
6066                blockstore.get_slot_entries(slot, 0).unwrap(),
6067                original_entries,
6068            );
6069
6070            let meta = blockstore.meta(slot).unwrap().unwrap();
6071            assert_eq!(meta.received, num_shreds);
6072            assert_eq!(meta.consumed, num_shreds);
6073            assert_eq!(meta.parent_slot, Some(parent_slot));
6074            assert_eq!(meta.last_index, Some(num_shreds - 1));
6075        }
6076    }
6077
6078    #[test]
6079    fn test_data_set_completed_on_insert() {
6080        let ledger_path = get_tmp_ledger_path_auto_delete!();
6081        let BlockstoreSignals { blockstore, .. } =
6082            Blockstore::open_with_signal(ledger_path.path(), BlockstoreOptions::default()).unwrap();
6083
6084        // Create enough entries to fill 2 shreds, only the later one is data complete
6085        let slot = 0;
6086        let num_entries = max_ticks_per_n_shreds(1, None) + 1;
6087        let entries = create_ticks(num_entries, slot, Hash::default());
6088        let shreds =
6089            entries_to_test_shreds(&entries, slot, 0, true, 0, /*merkle_variant:*/ true);
6090        let num_shreds = shreds.len();
6091        assert!(num_shreds > 1);
6092        assert!(blockstore
6093            .insert_shreds(shreds[1..].to_vec(), None, false)
6094            .unwrap()
6095            .is_empty());
6096        assert_eq!(
6097            blockstore
6098                .insert_shreds(vec![shreds[0].clone()], None, false)
6099                .unwrap(),
6100            vec![CompletedDataSetInfo {
6101                slot,
6102                indices: 0..num_shreds as u32,
6103            }]
6104        );
6105        // Inserting shreds again doesn't trigger notification
6106        assert!(blockstore
6107            .insert_shreds(shreds, None, false)
6108            .unwrap()
6109            .is_empty());
6110    }
6111
6112    #[test]
6113    fn test_new_shreds_signal() {
6114        // Initialize blockstore
6115        let ledger_path = get_tmp_ledger_path_auto_delete!();
6116        let BlockstoreSignals {
6117            blockstore,
6118            ledger_signal_receiver: recvr,
6119            ..
6120        } = Blockstore::open_with_signal(ledger_path.path(), BlockstoreOptions::default()).unwrap();
6121
6122        let entries_per_slot = 50;
6123        // Create entries for slot 0
6124        let (mut shreds, _) = make_slot_entries(
6125            0, // slot
6126            0, // parent_slot
6127            entries_per_slot,
6128            false, // merkle_variant
6129        );
6130        let shreds_per_slot = shreds.len() as u64;
6131
6132        // Insert second shred, but we're missing the first shred, so no consecutive
6133        // shreds starting from slot 0, index 0 should exist.
6134        blockstore
6135            .insert_shreds(vec![shreds.remove(1)], None, false)
6136            .unwrap();
6137        let timer = Duration::from_secs(1);
6138        assert!(recvr.recv_timeout(timer).is_err());
6139        // Insert first shred, now we've made a consecutive block
6140        blockstore
6141            .insert_shreds(vec![shreds.remove(0)], None, false)
6142            .unwrap();
6143        // Wait to get notified of update, should only be one update
6144        assert!(recvr.recv_timeout(timer).is_ok());
6145        assert!(recvr.try_recv().is_err());
6146        // Insert the rest of the ticks
6147        blockstore.insert_shreds(shreds, None, false).unwrap();
6148        // Wait to get notified of update, should only be one update
6149        assert!(recvr.recv_timeout(timer).is_ok());
6150        assert!(recvr.try_recv().is_err());
6151
6152        // Create some other slots, and send batches of ticks for each slot such that each slot
6153        // is missing the tick at shred index == slot index - 1. Thus, no consecutive blocks
6154        // will be formed
6155        let num_slots = shreds_per_slot;
6156        let mut shreds = vec![];
6157        let mut missing_shreds = vec![];
6158        for slot in 1..num_slots + 1 {
6159            let (mut slot_shreds, _) = make_slot_entries(
6160                slot,
6161                slot - 1, // parent_slot
6162                entries_per_slot,
6163                false, // merkle_variant
6164            );
6165            let missing_shred = slot_shreds.remove(slot as usize - 1);
6166            shreds.extend(slot_shreds);
6167            missing_shreds.push(missing_shred);
6168        }
6169
6170        // Should be no updates, since no new chains from block 0 were formed
6171        blockstore.insert_shreds(shreds, None, false).unwrap();
6172        assert!(recvr.recv_timeout(timer).is_err());
6173
6174        // Insert a shred for each slot that doesn't make a consecutive block, we
6175        // should get no updates
6176        let shreds: Vec<_> = (1..num_slots + 1)
6177            .flat_map(|slot| {
6178                let (mut shred, _) = make_slot_entries(
6179                    slot,
6180                    slot - 1, // parent_slot
6181                    1,        // num_entries
6182                    false,    // merkle_variant
6183                );
6184                shred[0].set_index(2 * num_slots as u32);
6185                shred
6186            })
6187            .collect();
6188
6189        blockstore.insert_shreds(shreds, None, false).unwrap();
6190        assert!(recvr.recv_timeout(timer).is_err());
6191
6192        // For slots 1..num_slots/2, fill in the holes in one batch insertion,
6193        // so we should only get one signal
6194        let missing_shreds2 = missing_shreds
6195            .drain((num_slots / 2) as usize..)
6196            .collect_vec();
6197        blockstore
6198            .insert_shreds(missing_shreds, None, false)
6199            .unwrap();
6200        assert!(recvr.recv_timeout(timer).is_ok());
6201        assert!(recvr.try_recv().is_err());
6202
6203        // Fill in the holes for each of the remaining slots, we should get a single update
6204        // for each
6205        blockstore
6206            .insert_shreds(missing_shreds2, None, false)
6207            .unwrap();
6208    }
6209
6210    #[test]
6211    fn test_completed_shreds_signal() {
6212        // Initialize blockstore
6213        let ledger_path = get_tmp_ledger_path_auto_delete!();
6214        let BlockstoreSignals {
6215            blockstore,
6216            completed_slots_receiver: recvr,
6217            ..
6218        } = Blockstore::open_with_signal(ledger_path.path(), BlockstoreOptions::default()).unwrap();
6219
6220        let entries_per_slot = 10;
6221
6222        // Create shreds for slot 0
6223        let (mut shreds, _) =
6224            make_slot_entries(0, 0, entries_per_slot, /*merkle_variant:*/ true);
6225
6226        let shred0 = shreds.remove(0);
6227        // Insert all but the first shred in the slot, should not be considered complete
6228        blockstore.insert_shreds(shreds, None, false).unwrap();
6229        assert!(recvr.try_recv().is_err());
6230
6231        // Insert first shred, slot should now be considered complete
6232        blockstore.insert_shreds(vec![shred0], None, false).unwrap();
6233        assert_eq!(recvr.try_recv().unwrap(), vec![0]);
6234    }
6235
6236    #[test]
6237    fn test_completed_shreds_signal_orphans() {
6238        // Initialize blockstore
6239        let ledger_path = get_tmp_ledger_path_auto_delete!();
6240        let BlockstoreSignals {
6241            blockstore,
6242            completed_slots_receiver: recvr,
6243            ..
6244        } = Blockstore::open_with_signal(ledger_path.path(), BlockstoreOptions::default()).unwrap();
6245
6246        let entries_per_slot = 10;
6247        let slots = [2, 5, 10];
6248        let mut all_shreds = make_chaining_slot_entries(&slots[..], entries_per_slot, 0);
6249
6250        // Get the shreds for slot 10, chaining to slot 5
6251        let (mut orphan_child, _) = all_shreds.remove(2);
6252
6253        // Get the shreds for slot 5 chaining to slot 2
6254        let (mut orphan_shreds, _) = all_shreds.remove(1);
6255
6256        // Insert all but the first shred in the slot, should not be considered complete
6257        let orphan_child0 = orphan_child.remove(0);
6258        blockstore.insert_shreds(orphan_child, None, false).unwrap();
6259        assert!(recvr.try_recv().is_err());
6260
6261        // Insert first shred, slot should now be considered complete
6262        blockstore
6263            .insert_shreds(vec![orphan_child0], None, false)
6264            .unwrap();
6265        assert_eq!(recvr.try_recv().unwrap(), vec![slots[2]]);
6266
6267        // Insert the shreds for the orphan_slot
6268        let orphan_shred0 = orphan_shreds.remove(0);
6269        blockstore
6270            .insert_shreds(orphan_shreds, None, false)
6271            .unwrap();
6272        assert!(recvr.try_recv().is_err());
6273
6274        // Insert first shred, slot should now be considered complete
6275        blockstore
6276            .insert_shreds(vec![orphan_shred0], None, false)
6277            .unwrap();
6278        assert_eq!(recvr.try_recv().unwrap(), vec![slots[1]]);
6279    }
6280
6281    #[test]
6282    fn test_completed_shreds_signal_many() {
6283        // Initialize blockstore
6284        let ledger_path = get_tmp_ledger_path_auto_delete!();
6285        let BlockstoreSignals {
6286            blockstore,
6287            completed_slots_receiver: recvr,
6288            ..
6289        } = Blockstore::open_with_signal(ledger_path.path(), BlockstoreOptions::default()).unwrap();
6290
6291        let entries_per_slot = 10;
6292        let mut slots = vec![2, 5, 10];
6293        let mut all_shreds = make_chaining_slot_entries(&slots[..], entries_per_slot, 0);
6294        let disconnected_slot = 4;
6295
6296        let (shreds0, _) = all_shreds.remove(0);
6297        let (shreds1, _) = all_shreds.remove(0);
6298        let (shreds2, _) = all_shreds.remove(0);
6299        let (shreds3, _) = make_slot_entries(
6300            disconnected_slot,
6301            1, // parent_slot
6302            entries_per_slot,
6303            true, // merkle_variant
6304        );
6305
6306        let mut all_shreds: Vec<_> = vec![shreds0, shreds1, shreds2, shreds3]
6307            .into_iter()
6308            .flatten()
6309            .collect();
6310
6311        all_shreds.shuffle(&mut thread_rng());
6312        blockstore.insert_shreds(all_shreds, None, false).unwrap();
6313        let mut result = recvr.try_recv().unwrap();
6314        result.sort_unstable();
6315        slots.push(disconnected_slot);
6316        slots.sort_unstable();
6317        assert_eq!(result, slots);
6318    }
6319
6320    #[test]
6321    fn test_handle_chaining_basic() {
6322        let ledger_path = get_tmp_ledger_path_auto_delete!();
6323        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6324
6325        let entries_per_slot = 5;
6326        let num_slots = 3;
6327
6328        // Construct the shreds
6329        let (mut shreds, _) = make_many_slot_entries(0, num_slots, entries_per_slot);
6330        let shreds_per_slot = shreds.len() / num_slots as usize;
6331
6332        // 1) Write to the first slot
6333        let shreds1 = shreds
6334            .drain(shreds_per_slot..2 * shreds_per_slot)
6335            .collect_vec();
6336        blockstore.insert_shreds(shreds1, None, false).unwrap();
6337        let meta1 = blockstore.meta(1).unwrap().unwrap();
6338        assert!(meta1.next_slots.is_empty());
6339        // Slot 1 is not connected because slot 0 hasn't been inserted yet
6340        assert!(!meta1.is_connected());
6341        assert_eq!(meta1.parent_slot, Some(0));
6342        assert_eq!(meta1.last_index, Some(shreds_per_slot as u64 - 1));
6343
6344        // 2) Write to the second slot
6345        let shreds2 = shreds
6346            .drain(shreds_per_slot..2 * shreds_per_slot)
6347            .collect_vec();
6348        blockstore.insert_shreds(shreds2, None, false).unwrap();
6349        let meta2 = blockstore.meta(2).unwrap().unwrap();
6350        assert!(meta2.next_slots.is_empty());
6351        // Slot 2 is not connected because slot 0 hasn't been inserted yet
6352        assert!(!meta2.is_connected());
6353        assert_eq!(meta2.parent_slot, Some(1));
6354        assert_eq!(meta2.last_index, Some(shreds_per_slot as u64 - 1));
6355
6356        // Check the first slot again, it should chain to the second slot,
6357        // but still isn't connected.
6358        let meta1 = blockstore.meta(1).unwrap().unwrap();
6359        assert_eq!(meta1.next_slots, vec![2]);
6360        assert!(!meta1.is_connected());
6361        assert_eq!(meta1.parent_slot, Some(0));
6362        assert_eq!(meta1.last_index, Some(shreds_per_slot as u64 - 1));
6363
6364        // 3) Write to the zeroth slot, check that every slot
6365        // is now part of the trunk
6366        blockstore.insert_shreds(shreds, None, false).unwrap();
6367        for slot in 0..3 {
6368            let meta = blockstore.meta(slot).unwrap().unwrap();
6369            // The last slot will not chain to any other slots
6370            if slot != 2 {
6371                assert_eq!(meta.next_slots, vec![slot + 1]);
6372            }
6373            if slot == 0 {
6374                assert_eq!(meta.parent_slot, Some(0));
6375            } else {
6376                assert_eq!(meta.parent_slot, Some(slot - 1));
6377            }
6378            assert_eq!(meta.last_index, Some(shreds_per_slot as u64 - 1));
6379            assert!(meta.is_connected());
6380        }
6381    }
6382
6383    #[test]
6384    fn test_handle_chaining_missing_slots() {
6385        solana_logger::setup();
6386        let ledger_path = get_tmp_ledger_path_auto_delete!();
6387        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6388
6389        let num_slots = 30;
6390        let entries_per_slot = 5;
6391        // Make some shreds and split based on whether the slot is odd or even.
6392        let (shreds, _) = make_many_slot_entries(0, num_slots, entries_per_slot);
6393        let shreds_per_slot = shreds.len() as u64 / num_slots;
6394        let (even_slots, odd_slots): (Vec<_>, Vec<_>) =
6395            shreds.into_iter().partition(|shred| shred.slot() % 2 == 0);
6396
6397        // Write the odd slot shreds
6398        blockstore.insert_shreds(odd_slots, None, false).unwrap();
6399
6400        for slot in 0..num_slots {
6401            // The slots that were inserted (the odds) will ...
6402            // - Know who their parent is (parent encoded in the shreds)
6403            // - Have empty next_slots since next_slots would be evens
6404            // The slots that were not inserted (the evens) will ...
6405            // - Still have a meta since their child linked back to them
6406            // - Have next_slots link to child because of the above
6407            // - Have an unknown parent since no shreds to indicate
6408            let meta = blockstore.meta(slot).unwrap().unwrap();
6409            if slot % 2 == 0 {
6410                assert_eq!(meta.next_slots, vec![slot + 1]);
6411                assert_eq!(meta.parent_slot, None);
6412            } else {
6413                assert!(meta.next_slots.is_empty());
6414                assert_eq!(meta.parent_slot, Some(slot - 1));
6415            }
6416
6417            // None of the slot should be connected, but since slot 0 is
6418            // the special case, it will have parent_connected as true.
6419            assert!(!meta.is_connected());
6420            assert!(!meta.is_parent_connected() || slot == 0);
6421        }
6422
6423        // Write the even slot shreds that we did not earlier
6424        blockstore.insert_shreds(even_slots, None, false).unwrap();
6425
6426        for slot in 0..num_slots {
6427            let meta = blockstore.meta(slot).unwrap().unwrap();
6428            // All slots except the last one should have a slot in next_slots
6429            if slot != num_slots - 1 {
6430                assert_eq!(meta.next_slots, vec![slot + 1]);
6431            } else {
6432                assert!(meta.next_slots.is_empty());
6433            }
6434            // All slots should have the link back to their parent
6435            if slot == 0 {
6436                assert_eq!(meta.parent_slot, Some(0));
6437            } else {
6438                assert_eq!(meta.parent_slot, Some(slot - 1));
6439            }
6440            // All inserted slots were full and should be connected
6441            assert_eq!(meta.last_index, Some(shreds_per_slot - 1));
6442            assert!(meta.is_full());
6443            assert!(meta.is_connected());
6444        }
6445    }
6446
6447    #[test]
6448    #[allow(clippy::cognitive_complexity)]
6449    pub fn test_forward_chaining_is_connected() {
6450        solana_logger::setup();
6451        let ledger_path = get_tmp_ledger_path_auto_delete!();
6452        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6453
6454        let num_slots = 15;
6455        // Create enough entries to ensure there are at least two shreds created
6456        let entries_per_slot = max_ticks_per_n_shreds(1, None) + 1;
6457        assert!(entries_per_slot > 1);
6458
6459        let (mut shreds, _) = make_many_slot_entries(0, num_slots, entries_per_slot);
6460        let shreds_per_slot = shreds.len() / num_slots as usize;
6461        assert!(shreds_per_slot > 1);
6462
6463        // Write the shreds such that every 3rd slot has a gap in the beginning
6464        let mut missing_shreds = vec![];
6465        for slot in 0..num_slots {
6466            let mut shreds_for_slot = shreds.drain(..shreds_per_slot).collect_vec();
6467            if slot % 3 == 0 {
6468                let shred0 = shreds_for_slot.remove(0);
6469                missing_shreds.push(shred0);
6470            }
6471            blockstore
6472                .insert_shreds(shreds_for_slot, None, false)
6473                .unwrap();
6474        }
6475
6476        // Check metadata
6477        for slot in 0..num_slots {
6478            let meta = blockstore.meta(slot).unwrap().unwrap();
6479            // The last slot will not chain to any other slots
6480            if slot != num_slots - 1 {
6481                assert_eq!(meta.next_slots, vec![slot + 1]);
6482            } else {
6483                assert!(meta.next_slots.is_empty());
6484            }
6485
6486            // Ensure that each slot has their parent correct
6487            if slot == 0 {
6488                assert_eq!(meta.parent_slot, Some(0));
6489            } else {
6490                assert_eq!(meta.parent_slot, Some(slot - 1));
6491            }
6492            // No slots should be connected yet, not even slot 0
6493            // as slot 0 is still not full yet
6494            assert!(!meta.is_connected());
6495
6496            assert_eq!(meta.last_index, Some(shreds_per_slot as u64 - 1));
6497        }
6498
6499        // Iteratively finish every 3rd slot, and check that all slots up to and including
6500        // slot_index + 3 become part of the trunk
6501        for slot_index in 0..num_slots {
6502            if slot_index % 3 == 0 {
6503                let shred = missing_shreds.remove(0);
6504                blockstore.insert_shreds(vec![shred], None, false).unwrap();
6505
6506                for slot in 0..num_slots {
6507                    let meta = blockstore.meta(slot).unwrap().unwrap();
6508
6509                    if slot != num_slots - 1 {
6510                        assert_eq!(meta.next_slots, vec![slot + 1]);
6511                    } else {
6512                        assert!(meta.next_slots.is_empty());
6513                    }
6514
6515                    if slot < slot_index + 3 {
6516                        assert!(meta.is_full());
6517                        assert!(meta.is_connected());
6518                    } else {
6519                        assert!(!meta.is_connected());
6520                    }
6521
6522                    assert_eq!(meta.last_index, Some(shreds_per_slot as u64 - 1));
6523                }
6524            }
6525        }
6526    }
6527
6528    #[test]
6529    fn test_scan_and_fix_roots() {
6530        fn blockstore_roots(blockstore: &Blockstore) -> Vec<Slot> {
6531            blockstore
6532                .rooted_slot_iterator(0)
6533                .unwrap()
6534                .collect::<Vec<_>>()
6535        }
6536
6537        solana_logger::setup();
6538        let ledger_path = get_tmp_ledger_path_auto_delete!();
6539        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6540
6541        let entries_per_slot = max_ticks_per_n_shreds(5, None);
6542        let start_slot: Slot = 0;
6543        let num_slots = 18;
6544
6545        // Produce the following chains and insert shreds into Blockstore
6546        // 0 -> 2 -> 4 -> 6 -> 8 -> 10 -> 12 -> 14 -> 16 -> 18
6547        //  \
6548        //   -> 1 -> 3 -> 5 -> 7 ->  9 -> 11 -> 13 -> 15 -> 17
6549        let shreds: Vec<_> = (start_slot..=num_slots)
6550            .flat_map(|slot| {
6551                let parent_slot = if slot % 2 == 0 {
6552                    slot.saturating_sub(2)
6553                } else {
6554                    slot.saturating_sub(1)
6555                };
6556                let (shreds, _) = make_slot_entries(
6557                    slot,
6558                    parent_slot,
6559                    entries_per_slot,
6560                    true, // merkle_variant
6561                );
6562                shreds.into_iter()
6563            })
6564            .collect();
6565        blockstore.insert_shreds(shreds, None, false).unwrap();
6566
6567        // Start slot must be a root
6568        let (start, end) = (Some(16), None);
6569        assert_matches!(
6570            blockstore.scan_and_fix_roots(start, end, &AtomicBool::new(false)),
6571            Err(BlockstoreError::SlotNotRooted)
6572        );
6573
6574        // Mark several roots
6575        let new_roots = vec![6, 12];
6576        blockstore.set_roots(new_roots.iter()).unwrap();
6577        assert_eq!(&new_roots, &blockstore_roots(&blockstore));
6578
6579        // Specify both a start root and end slot
6580        let (start, end) = (Some(12), Some(8));
6581        let roots = vec![6, 8, 10, 12];
6582        blockstore
6583            .scan_and_fix_roots(start, end, &AtomicBool::new(false))
6584            .unwrap();
6585        assert_eq!(&roots, &blockstore_roots(&blockstore));
6586
6587        // Specify only an end slot
6588        let (start, end) = (None, Some(4));
6589        let roots = vec![4, 6, 8, 10, 12];
6590        blockstore
6591            .scan_and_fix_roots(start, end, &AtomicBool::new(false))
6592            .unwrap();
6593        assert_eq!(&roots, &blockstore_roots(&blockstore));
6594
6595        // Specify only a start slot
6596        let (start, end) = (Some(12), None);
6597        let roots = vec![0, 2, 4, 6, 8, 10, 12];
6598        blockstore
6599            .scan_and_fix_roots(start, end, &AtomicBool::new(false))
6600            .unwrap();
6601        assert_eq!(&roots, &blockstore_roots(&blockstore));
6602
6603        // Mark additional root
6604        let new_roots = [16];
6605        let roots = vec![0, 2, 4, 6, 8, 10, 12, 16];
6606        blockstore.set_roots(new_roots.iter()).unwrap();
6607        assert_eq!(&roots, &blockstore_roots(&blockstore));
6608
6609        // Leave both start and end unspecified
6610        let (start, end) = (None, None);
6611        let roots = vec![0, 2, 4, 6, 8, 10, 12, 14, 16];
6612        blockstore
6613            .scan_and_fix_roots(start, end, &AtomicBool::new(false))
6614            .unwrap();
6615        assert_eq!(&roots, &blockstore_roots(&blockstore));
6616
6617        // Subsequent calls should have no effect and return without error
6618        blockstore
6619            .scan_and_fix_roots(start, end, &AtomicBool::new(false))
6620            .unwrap();
6621        assert_eq!(&roots, &blockstore_roots(&blockstore));
6622    }
6623
6624    #[test]
6625    fn test_set_and_chain_connected_on_root_and_next_slots() {
6626        solana_logger::setup();
6627        let ledger_path = get_tmp_ledger_path_auto_delete!();
6628        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6629
6630        // Create enough entries to ensure 5 shreds result
6631        let entries_per_slot = max_ticks_per_n_shreds(5, None);
6632
6633        let mut start_slot = 5;
6634        // Start a chain from a slot not in blockstore, this is the case when
6635        // node starts with no blockstore and downloads a snapshot. In this
6636        // scenario, the slot will be marked connected despite its' parent not
6637        // being connected (or existing) and not being full.
6638        blockstore
6639            .set_and_chain_connected_on_root_and_next_slots(start_slot)
6640            .unwrap();
6641        let slot_meta5 = blockstore.meta(start_slot).unwrap().unwrap();
6642        assert!(!slot_meta5.is_full());
6643        assert!(slot_meta5.is_parent_connected());
6644        assert!(slot_meta5.is_connected());
6645
6646        let num_slots = 5;
6647        // Insert some new slots and ensure they connect to the root correctly
6648        start_slot += 1;
6649        let (shreds, _) = make_many_slot_entries(start_slot, num_slots, entries_per_slot);
6650        blockstore.insert_shreds(shreds, None, false).unwrap();
6651        for slot in start_slot..start_slot + num_slots {
6652            info!("Evaluating slot {}", slot);
6653            let meta = blockstore.meta(slot).unwrap().unwrap();
6654            assert!(meta.is_parent_connected());
6655            assert!(meta.is_connected());
6656        }
6657
6658        // Chain connected on slots that are already connected, should just noop
6659        blockstore
6660            .set_and_chain_connected_on_root_and_next_slots(start_slot)
6661            .unwrap();
6662        for slot in start_slot..start_slot + num_slots {
6663            let meta = blockstore.meta(slot).unwrap().unwrap();
6664            assert!(meta.is_parent_connected());
6665            assert!(meta.is_connected());
6666        }
6667
6668        // Start another chain that is disconnected from previous chain. But, insert
6669        // a non-full slot and ensure this slot (and its' children) are not marked
6670        // as connected.
6671        start_slot += 2 * num_slots;
6672        let (shreds, _) = make_many_slot_entries(start_slot, num_slots, entries_per_slot);
6673        // Insert all shreds except for the shreds with index > 0 from non_full_slot
6674        let non_full_slot = start_slot + num_slots / 2;
6675        let (shreds, missing_shreds): (Vec<_>, Vec<_>) = shreds
6676            .into_iter()
6677            .partition(|shred| shred.slot() != non_full_slot || shred.index() == 0);
6678        blockstore.insert_shreds(shreds, None, false).unwrap();
6679        // Chain method hasn't been called yet, so none of these connected yet
6680        for slot in start_slot..start_slot + num_slots {
6681            let meta = blockstore.meta(slot).unwrap().unwrap();
6682            assert!(!meta.is_parent_connected());
6683            assert!(!meta.is_connected());
6684        }
6685        // Now chain from the new starting point
6686        blockstore
6687            .set_and_chain_connected_on_root_and_next_slots(start_slot)
6688            .unwrap();
6689        for slot in start_slot..start_slot + num_slots {
6690            let meta = blockstore.meta(slot).unwrap().unwrap();
6691            match slot.cmp(&non_full_slot) {
6692                Ordering::Less => {
6693                    // These are fully connected as expected
6694                    assert!(meta.is_parent_connected());
6695                    assert!(meta.is_connected());
6696                }
6697                Ordering::Equal => {
6698                    // Parent will be connected, but this slot not connected itself
6699                    assert!(meta.is_parent_connected());
6700                    assert!(!meta.is_connected());
6701                }
6702                Ordering::Greater => {
6703                    // All children are not connected either
6704                    assert!(!meta.is_parent_connected());
6705                    assert!(!meta.is_connected());
6706                }
6707            }
6708        }
6709
6710        // Insert the missing shreds and ensure all slots connected now
6711        blockstore
6712            .insert_shreds(missing_shreds, None, false)
6713            .unwrap();
6714        for slot in start_slot..start_slot + num_slots {
6715            let meta = blockstore.meta(slot).unwrap().unwrap();
6716            assert!(meta.is_parent_connected());
6717            assert!(meta.is_connected());
6718        }
6719    }
6720
6721    /*
6722        #[test]
6723        pub fn test_chaining_tree() {
6724            let ledger_path = get_tmp_ledger_path_auto_delete!();
6725            let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6726
6727            let num_tree_levels = 6;
6728            assert!(num_tree_levels > 1);
6729            let branching_factor: u64 = 4;
6730            // Number of slots that will be in the tree
6731            let num_slots = (branching_factor.pow(num_tree_levels) - 1) / (branching_factor - 1);
6732            let erasure_config = ErasureConfig::default();
6733            let entries_per_slot = erasure_config.num_data() as u64;
6734            assert!(entries_per_slot > 1);
6735
6736            let (mut shreds, _) = make_many_slot_entries(0, num_slots, entries_per_slot);
6737
6738            // Insert tree one slot at a time in a random order
6739            let mut slots: Vec<_> = (0..num_slots).collect();
6740
6741            // Get shreds for the slot
6742            slots.shuffle(&mut thread_rng());
6743            for slot in slots {
6744                // Get shreds for the slot "slot"
6745                let slot_shreds = &mut shreds
6746                    [(slot * entries_per_slot) as usize..((slot + 1) * entries_per_slot) as usize];
6747                for shred in slot_shreds.iter_mut() {
6748                    // Get the parent slot of the slot in the tree
6749                    let slot_parent = {
6750                        if slot == 0 {
6751                            0
6752                        } else {
6753                            (slot - 1) / branching_factor
6754                        }
6755                    };
6756                    shred.set_parent(slot_parent);
6757                }
6758
6759                let shared_shreds: Vec<_> = slot_shreds
6760                    .iter()
6761                    .cloned()
6762                    .map(|shred| Arc::new(RwLock::new(shred)))
6763                    .collect();
6764                let mut coding_generator = CodingGenerator::new_from_config(&erasure_config);
6765                let coding_shreds = coding_generator.next(&shared_shreds);
6766                assert_eq!(coding_shreds.len(), erasure_config.num_coding());
6767
6768                let mut rng = thread_rng();
6769
6770                // Randomly pick whether to insert erasure or coding shreds first
6771                if rng.gen_bool(0.5) {
6772                    blockstore.write_shreds(slot_shreds).unwrap();
6773                    blockstore.put_shared_coding_shreds(&coding_shreds).unwrap();
6774                } else {
6775                    blockstore.put_shared_coding_shreds(&coding_shreds).unwrap();
6776                    blockstore.write_shreds(slot_shreds).unwrap();
6777                }
6778            }
6779
6780            // Make sure everything chains correctly
6781            let last_level =
6782                (branching_factor.pow(num_tree_levels - 1) - 1) / (branching_factor - 1);
6783            for slot in 0..num_slots {
6784                let slot_meta = blockstore.meta(slot).unwrap().unwrap();
6785                assert_eq!(slot_meta.consumed, entries_per_slot);
6786                assert_eq!(slot_meta.received, entries_per_slot);
6787                assert!(slot_meta.is_connected());
6788                let slot_parent = {
6789                    if slot == 0 {
6790                        0
6791                    } else {
6792                        (slot - 1) / branching_factor
6793                    }
6794                };
6795                assert_eq!(slot_meta.parent_slot, Some(slot_parent));
6796
6797                let expected_children: HashSet<_> = {
6798                    if slot >= last_level {
6799                        HashSet::new()
6800                    } else {
6801                        let first_child_slot = min(num_slots - 1, slot * branching_factor + 1);
6802                        let last_child_slot = min(num_slots - 1, (slot + 1) * branching_factor);
6803                        (first_child_slot..last_child_slot + 1).collect()
6804                    }
6805                };
6806
6807                let result: HashSet<_> = slot_meta.next_slots.iter().cloned().collect();
6808                if expected_children.len() != 0 {
6809                    assert_eq!(slot_meta.next_slots.len(), branching_factor as usize);
6810                } else {
6811                    assert_eq!(slot_meta.next_slots.len(), 0);
6812                }
6813                assert_eq!(expected_children, result);
6814            }
6815
6816            // No orphan slots should exist
6817            assert!(blockstore.orphans_cf.is_empty().unwrap())
6818
6819        }
6820    */
6821    #[test]
6822    fn test_slot_range_connected_chain() {
6823        let ledger_path = get_tmp_ledger_path_auto_delete!();
6824        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6825
6826        let num_slots = 3;
6827        for slot in 1..=num_slots {
6828            make_and_insert_slot(&blockstore, slot, slot.saturating_sub(1));
6829        }
6830
6831        assert!(blockstore.slot_range_connected(1, 3));
6832        assert!(!blockstore.slot_range_connected(1, 4)); // slot 4 does not exist
6833    }
6834
6835    #[test]
6836    fn test_slot_range_connected_disconnected() {
6837        let ledger_path = get_tmp_ledger_path_auto_delete!();
6838        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6839
6840        make_and_insert_slot(&blockstore, 1, 0);
6841        make_and_insert_slot(&blockstore, 2, 1);
6842        make_and_insert_slot(&blockstore, 4, 2);
6843
6844        assert!(blockstore.slot_range_connected(1, 3)); // Slot 3 does not exist, but we can still replay this range to slot 4
6845        assert!(blockstore.slot_range_connected(1, 4));
6846    }
6847
6848    #[test]
6849    fn test_slot_range_connected_same_slot() {
6850        let ledger_path = get_tmp_ledger_path_auto_delete!();
6851        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6852
6853        assert!(blockstore.slot_range_connected(54, 54));
6854    }
6855
6856    #[test]
6857    fn test_slot_range_connected_starting_slot_not_full() {
6858        let ledger_path = get_tmp_ledger_path_auto_delete!();
6859        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6860
6861        make_and_insert_slot(&blockstore, 5, 4);
6862        make_and_insert_slot(&blockstore, 6, 5);
6863
6864        assert!(!blockstore.meta(4).unwrap().unwrap().is_full());
6865        assert!(blockstore.slot_range_connected(4, 6));
6866    }
6867
6868    #[test]
6869    fn test_get_slots_since() {
6870        let ledger_path = get_tmp_ledger_path_auto_delete!();
6871        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6872
6873        // Slot doesn't exist
6874        assert!(blockstore.get_slots_since(&[0]).unwrap().is_empty());
6875
6876        let mut meta0 = SlotMeta::new(0, Some(0));
6877        blockstore.meta_cf.put(0, &meta0).unwrap();
6878
6879        // Slot exists, chains to nothing
6880        let expected: HashMap<u64, Vec<u64>> = vec![(0, vec![])].into_iter().collect();
6881        assert_eq!(blockstore.get_slots_since(&[0]).unwrap(), expected);
6882        meta0.next_slots = vec![1, 2];
6883        blockstore.meta_cf.put(0, &meta0).unwrap();
6884
6885        // Slot exists, chains to some other slots
6886        let expected: HashMap<u64, Vec<u64>> = vec![(0, vec![1, 2])].into_iter().collect();
6887        assert_eq!(blockstore.get_slots_since(&[0]).unwrap(), expected);
6888        assert_eq!(blockstore.get_slots_since(&[0, 1]).unwrap(), expected);
6889
6890        let mut meta3 = SlotMeta::new(3, Some(1));
6891        meta3.next_slots = vec![10, 5];
6892        blockstore.meta_cf.put(3, &meta3).unwrap();
6893        let expected: HashMap<u64, Vec<u64>> = vec![(0, vec![1, 2]), (3, vec![10, 5])]
6894            .into_iter()
6895            .collect();
6896        assert_eq!(blockstore.get_slots_since(&[0, 1, 3]).unwrap(), expected);
6897    }
6898
6899    #[test]
6900    fn test_orphans() {
6901        let ledger_path = get_tmp_ledger_path_auto_delete!();
6902        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6903
6904        // Create shreds and entries
6905        let entries_per_slot = 1;
6906        let (mut shreds, _) = make_many_slot_entries(0, 3, entries_per_slot);
6907        let shreds_per_slot = shreds.len() / 3;
6908
6909        // Write slot 2, which chains to slot 1. We're missing slot 0,
6910        // so slot 1 is the orphan
6911        let shreds_for_slot = shreds.drain((shreds_per_slot * 2)..).collect_vec();
6912        blockstore
6913            .insert_shreds(shreds_for_slot, None, false)
6914            .unwrap();
6915        let meta = blockstore
6916            .meta(1)
6917            .expect("Expect database get to succeed")
6918            .unwrap();
6919        assert!(meta.is_orphan());
6920        assert_eq!(
6921            blockstore.orphans_iterator(0).unwrap().collect::<Vec<_>>(),
6922            vec![1]
6923        );
6924
6925        // Write slot 1 which chains to slot 0, so now slot 0 is the
6926        // orphan, and slot 1 is no longer the orphan.
6927        let shreds_for_slot = shreds.drain(shreds_per_slot..).collect_vec();
6928        blockstore
6929            .insert_shreds(shreds_for_slot, None, false)
6930            .unwrap();
6931        let meta = blockstore
6932            .meta(1)
6933            .expect("Expect database get to succeed")
6934            .unwrap();
6935        assert!(!meta.is_orphan());
6936        let meta = blockstore
6937            .meta(0)
6938            .expect("Expect database get to succeed")
6939            .unwrap();
6940        assert!(meta.is_orphan());
6941        assert_eq!(
6942            blockstore.orphans_iterator(0).unwrap().collect::<Vec<_>>(),
6943            vec![0]
6944        );
6945
6946        // Write some slot that also chains to existing slots and orphan,
6947        // nothing should change
6948        let (shred4, _) = make_slot_entries(4, 0, 1, /*merkle_variant:*/ true);
6949        let (shred5, _) = make_slot_entries(5, 1, 1, /*merkle_variant:*/ true);
6950        blockstore.insert_shreds(shred4, None, false).unwrap();
6951        blockstore.insert_shreds(shred5, None, false).unwrap();
6952        assert_eq!(
6953            blockstore.orphans_iterator(0).unwrap().collect::<Vec<_>>(),
6954            vec![0]
6955        );
6956
6957        // Write zeroth slot, no more orphans
6958        blockstore.insert_shreds(shreds, None, false).unwrap();
6959        for i in 0..3 {
6960            let meta = blockstore
6961                .meta(i)
6962                .expect("Expect database get to succeed")
6963                .unwrap();
6964            assert!(!meta.is_orphan());
6965        }
6966        // Orphans cf is empty
6967        assert!(blockstore.orphans_cf.is_empty().unwrap());
6968    }
6969
6970    fn test_insert_data_shreds_slots(should_bulk_write: bool) {
6971        let ledger_path = get_tmp_ledger_path_auto_delete!();
6972        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6973
6974        // Create shreds and entries
6975        let num_entries = 20_u64;
6976        let mut entries = vec![];
6977        let mut shreds = vec![];
6978        let mut num_shreds_per_slot = 0;
6979        for slot in 0..num_entries {
6980            let parent_slot = {
6981                if slot == 0 {
6982                    0
6983                } else {
6984                    slot - 1
6985                }
6986            };
6987
6988            let (mut shred, entry) =
6989                make_slot_entries(slot, parent_slot, 1, /*merkle_variant:*/ false);
6990            num_shreds_per_slot = shred.len() as u64;
6991            shred.iter_mut().for_each(|shred| shred.set_index(0));
6992            shreds.extend(shred);
6993            entries.extend(entry);
6994        }
6995
6996        let num_shreds = shreds.len();
6997        // Write shreds to the database
6998        if should_bulk_write {
6999            blockstore.insert_shreds(shreds, None, false).unwrap();
7000        } else {
7001            for _ in 0..num_shreds {
7002                let shred = shreds.remove(0);
7003                blockstore.insert_shreds(vec![shred], None, false).unwrap();
7004            }
7005        }
7006
7007        for i in 0..num_entries - 1 {
7008            assert_eq!(
7009                blockstore.get_slot_entries(i, 0).unwrap()[0],
7010                entries[i as usize]
7011            );
7012
7013            let meta = blockstore.meta(i).unwrap().unwrap();
7014            assert_eq!(meta.received, 1);
7015            assert_eq!(meta.last_index, Some(0));
7016            if i != 0 {
7017                assert_eq!(meta.parent_slot, Some(i - 1));
7018                assert_eq!(meta.consumed, 1);
7019            } else {
7020                assert_eq!(meta.parent_slot, Some(0));
7021                assert_eq!(meta.consumed, num_shreds_per_slot);
7022            }
7023        }
7024    }
7025
7026    #[test]
7027    fn test_find_missing_data_indexes() {
7028        let slot = 0;
7029        let ledger_path = get_tmp_ledger_path_auto_delete!();
7030        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
7031
7032        // Write entries
7033        let gap: u64 = 10;
7034        assert!(gap > 3);
7035        // Create enough entries to ensure there are at least two shreds created
7036        let data_buffer_size = ShredData::capacity(/*merkle_proof_size:*/ None).unwrap();
7037        let num_entries = max_ticks_per_n_shreds(1, Some(data_buffer_size)) + 1;
7038        let entries = create_ticks(num_entries, 0, Hash::default());
7039        let mut shreds =
7040            entries_to_test_shreds(&entries, slot, 0, true, 0, /*merkle_variant:*/ false);
7041        let num_shreds = shreds.len();
7042        assert!(num_shreds > 1);
7043        for (i, s) in shreds.iter_mut().enumerate() {
7044            s.set_index(i as u32 * gap as u32);
7045            s.set_slot(slot);
7046        }
7047        blockstore.insert_shreds(shreds, None, false).unwrap();
7048
7049        // Index of the first shred is 0
7050        // Index of the second shred is "gap"
7051        // Thus, the missing indexes should then be [1, gap - 1] for the input index
7052        // range of [0, gap)
7053        let expected: Vec<u64> = (1..gap).collect();
7054        assert_eq!(
7055            blockstore.find_missing_data_indexes(
7056                slot,
7057                0,            // first_timestamp
7058                0,            // defer_threshold_ticks
7059                0,            // start_index
7060                gap,          // end_index
7061                gap as usize, // max_missing
7062            ),
7063            expected
7064        );
7065        assert_eq!(
7066            blockstore.find_missing_data_indexes(
7067                slot,
7068                0,                  // first_timestamp
7069                0,                  // defer_threshold_ticks
7070                1,                  // start_index
7071                gap,                // end_index
7072                (gap - 1) as usize, // max_missing
7073            ),
7074            expected,
7075        );
7076        assert_eq!(
7077            blockstore.find_missing_data_indexes(
7078                slot,
7079                0,                  // first_timestamp
7080                0,                  // defer_threshold_ticks
7081                0,                  // start_index
7082                gap - 1,            // end_index
7083                (gap - 1) as usize, // max_missing
7084            ),
7085            &expected[..expected.len() - 1],
7086        );
7087        assert_eq!(
7088            blockstore.find_missing_data_indexes(
7089                slot,
7090                0,            // first_timestamp
7091                0,            // defer_threshold_ticks
7092                gap - 2,      // start_index
7093                gap,          // end_index
7094                gap as usize, // max_missing
7095            ),
7096            vec![gap - 2, gap - 1],
7097        );
7098        assert_eq!(
7099            blockstore.find_missing_data_indexes(
7100                slot,    // slot
7101                0,       // first_timestamp
7102                0,       // defer_threshold_ticks
7103                gap - 2, // start_index
7104                gap,     // end_index
7105                1,       // max_missing
7106            ),
7107            vec![gap - 2],
7108        );
7109        assert_eq!(
7110            blockstore.find_missing_data_indexes(
7111                slot, // slot
7112                0,    // first_timestamp
7113                0,    // defer_threshold_ticks
7114                0,    // start_index
7115                gap,  // end_index
7116                1,    // max_missing
7117            ),
7118            vec![1],
7119        );
7120
7121        // Test with a range that encompasses a shred with index == gap which was
7122        // already inserted.
7123        let mut expected: Vec<u64> = (1..gap).collect();
7124        expected.push(gap + 1);
7125        assert_eq!(
7126            blockstore.find_missing_data_indexes(
7127                slot,
7128                0,                  // first_timestamp
7129                0,                  // defer_threshold_ticks
7130                0,                  // start_index
7131                gap + 2,            // end_index
7132                (gap + 2) as usize, // max_missing
7133            ),
7134            expected,
7135        );
7136        assert_eq!(
7137            blockstore.find_missing_data_indexes(
7138                slot,
7139                0,                  // first_timestamp
7140                0,                  // defer_threshold_ticks
7141                0,                  // start_index
7142                gap + 2,            // end_index
7143                (gap - 1) as usize, // max_missing
7144            ),
7145            &expected[..expected.len() - 1],
7146        );
7147
7148        for i in 0..num_shreds as u64 {
7149            for j in 0..i {
7150                let expected: Vec<u64> = (j..i)
7151                    .flat_map(|k| {
7152                        let begin = k * gap + 1;
7153                        let end = (k + 1) * gap;
7154                        begin..end
7155                    })
7156                    .collect();
7157                assert_eq!(
7158                    blockstore.find_missing_data_indexes(
7159                        slot,
7160                        0,                        // first_timestamp
7161                        0,                        // defer_threshold_ticks
7162                        j * gap,                  // start_index
7163                        i * gap,                  // end_index
7164                        ((i - j) * gap) as usize, // max_missing
7165                    ),
7166                    expected,
7167                );
7168            }
7169        }
7170    }
7171
7172    #[test]
7173    fn test_find_missing_data_indexes_timeout() {
7174        let slot = 0;
7175        let ledger_path = get_tmp_ledger_path_auto_delete!();
7176        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
7177
7178        // Blockstore::find_missing_data_indexes() compares timestamps, so
7179        // set a small value for defer_threshold_ticks to avoid flakiness.
7180        let defer_threshold_ticks = DEFAULT_TICKS_PER_SLOT / 16;
7181        let start_index = 0;
7182        let end_index = 50;
7183        let max_missing = 9;
7184
7185        // Write entries
7186        let gap: u64 = 10;
7187        let shreds: Vec<_> = (0..64)
7188            .map(|i| {
7189                Shred::new_from_data(
7190                    slot,
7191                    (i * gap) as u32,
7192                    0,
7193                    &[],
7194                    ShredFlags::empty(),
7195                    i as u8,
7196                    0,
7197                    (i * gap) as u32,
7198                )
7199            })
7200            .collect();
7201        blockstore.insert_shreds(shreds, None, false).unwrap();
7202
7203        let empty: Vec<u64> = vec![];
7204        assert_eq!(
7205            blockstore.find_missing_data_indexes(
7206                slot,
7207                timestamp(), // first_timestamp
7208                defer_threshold_ticks,
7209                start_index,
7210                end_index,
7211                max_missing,
7212            ),
7213            empty
7214        );
7215        let expected: Vec<_> = (1..=9).collect();
7216        assert_eq!(
7217            blockstore.find_missing_data_indexes(
7218                slot,
7219                timestamp() - DEFAULT_MS_PER_SLOT, // first_timestamp
7220                defer_threshold_ticks,
7221                start_index,
7222                end_index,
7223                max_missing,
7224            ),
7225            expected
7226        );
7227    }
7228
7229    #[test]
7230    fn test_find_missing_data_indexes_sanity() {
7231        let slot = 0;
7232
7233        let ledger_path = get_tmp_ledger_path_auto_delete!();
7234        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
7235
7236        // Early exit conditions
7237        let empty: Vec<u64> = vec![];
7238        assert_eq!(
7239            blockstore.find_missing_data_indexes(
7240                slot, // slot
7241                0,    // first_timestamp
7242                0,    // defer_threshold_ticks
7243                0,    // start_index
7244                0,    // end_index
7245                1,    // max_missing
7246            ),
7247            empty
7248        );
7249        assert_eq!(
7250            blockstore.find_missing_data_indexes(
7251                slot, // slot
7252                0,    // first_timestamp
7253                0,    // defer_threshold_ticks
7254                5,    // start_index
7255                5,    // end_index
7256                1,    // max_missing
7257            ),
7258            empty
7259        );
7260        assert_eq!(
7261            blockstore.find_missing_data_indexes(
7262                slot, // slot
7263                0,    // first_timestamp
7264                0,    // defer_threshold_ticks
7265                4,    // start_index
7266                3,    // end_index
7267                1,    // max_missing
7268            ),
7269            empty
7270        );
7271        assert_eq!(
7272            blockstore.find_missing_data_indexes(
7273                slot, // slot
7274                0,    // first_timestamp
7275                0,    // defer_threshold_ticks
7276                1,    // start_index
7277                2,    // end_index
7278                0,    // max_missing
7279            ),
7280            empty
7281        );
7282
7283        let entries = create_ticks(100, 0, Hash::default());
7284        let mut shreds =
7285            entries_to_test_shreds(&entries, slot, 0, true, 0, /*merkle_variant:*/ false);
7286        assert!(shreds.len() > 2);
7287        shreds.drain(2..);
7288
7289        const ONE: u64 = 1;
7290        const OTHER: u64 = 4;
7291
7292        shreds[0].set_index(ONE as u32);
7293        shreds[1].set_index(OTHER as u32);
7294
7295        // Insert one shred at index = first_index
7296        blockstore.insert_shreds(shreds, None, false).unwrap();
7297
7298        const STARTS: u64 = OTHER * 2;
7299        const END: u64 = OTHER * 3;
7300        const MAX: usize = 10;
7301        // The first shred has index = first_index. Thus, for i < first_index,
7302        // given the input range of [i, first_index], the missing indexes should be
7303        // [i, first_index - 1]
7304        for start in 0..STARTS {
7305            let result = blockstore.find_missing_data_indexes(
7306                slot,  // slot
7307                0,     // first_timestamp
7308                0,     // defer_threshold_ticks
7309                start, // start_index
7310                END,   // end_index
7311                MAX,   // max_missing
7312            );
7313            let expected: Vec<u64> = (start..END).filter(|i| *i != ONE && *i != OTHER).collect();
7314            assert_eq!(result, expected);
7315        }
7316    }
7317
7318    #[test]
7319    fn test_no_missing_shred_indexes() {
7320        let slot = 0;
7321        let ledger_path = get_tmp_ledger_path_auto_delete!();
7322        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
7323
7324        // Write entries
7325        let num_entries = 10;
7326        let entries = create_ticks(num_entries, 0, Hash::default());
7327        let shreds =
7328            entries_to_test_shreds(&entries, slot, 0, true, 0, /*merkle_variant:*/ true);
7329        let num_shreds = shreds.len();
7330
7331        blockstore.insert_shreds(shreds, None, false).unwrap();
7332
7333        let empty: Vec<u64> = vec![];
7334        for i in 0..num_shreds as u64 {
7335            for j in 0..i {
7336                assert_eq!(
7337                    blockstore.find_missing_data_indexes(
7338                        slot,
7339                        0,                // first_timestamp
7340                        0,                // defer_threshold_ticks
7341                        j,                // start_index
7342                        i,                // end_index
7343                        (i - j) as usize, // max_missing
7344                    ),
7345                    empty
7346                );
7347            }
7348        }
7349    }
7350
7351    #[test]
7352    fn test_verify_shred_slots() {
7353        // verify_shred_slots(slot, parent, root)
7354        assert!(verify_shred_slots(0, 0, 0));
7355        assert!(verify_shred_slots(2, 1, 0));
7356        assert!(verify_shred_slots(2, 1, 1));
7357        assert!(!verify_shred_slots(2, 3, 0));
7358        assert!(!verify_shred_slots(2, 2, 0));
7359        assert!(!verify_shred_slots(2, 3, 3));
7360        assert!(!verify_shred_slots(2, 2, 2));
7361        assert!(!verify_shred_slots(2, 1, 3));
7362        assert!(!verify_shred_slots(2, 3, 4));
7363        assert!(!verify_shred_slots(2, 2, 3));
7364    }
7365
7366    #[test]
7367    fn test_should_insert_data_shred() {
7368        solana_logger::setup();
7369        let (mut shreds, _) = make_slot_entries(0, 0, 200, /*merkle_variant:*/ false);
7370        let ledger_path = get_tmp_ledger_path_auto_delete!();
7371        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
7372
7373        let max_root = 0;
7374
7375        // Insert the first 5 shreds, we don't have a "is_last" shred yet
7376        blockstore
7377            .insert_shreds(shreds[0..5].to_vec(), None, false)
7378            .unwrap();
7379
7380        let slot_meta = blockstore.meta(0).unwrap().unwrap();
7381        let shred5 = shreds[5].clone();
7382
7383        // Ensure that an empty shred (one with no data) would get inserted. Such shreds
7384        // may be used as signals (broadcast does so to indicate a slot was interrupted)
7385        // Reuse shred5's header values to avoid a false negative result
7386        let empty_shred = Shred::new_from_data(
7387            shred5.slot(),
7388            shred5.index(),
7389            {
7390                let parent_offset = shred5.slot() - shred5.parent().unwrap();
7391                parent_offset as u16
7392            },
7393            &[], // data
7394            ShredFlags::LAST_SHRED_IN_SLOT,
7395            0, // reference_tick
7396            shred5.version(),
7397            shred5.fec_set_index(),
7398        );
7399        assert!(blockstore.should_insert_data_shred(
7400            &empty_shred,
7401            &slot_meta,
7402            &HashMap::new(),
7403            max_root,
7404            None,
7405            ShredSource::Repaired,
7406            &mut Vec::new(),
7407        ));
7408        // Trying to insert another "is_last" shred with index < the received index should fail
7409        // skip over shred 7
7410        blockstore
7411            .insert_shreds(shreds[8..9].to_vec(), None, false)
7412            .unwrap();
7413        let slot_meta = blockstore.meta(0).unwrap().unwrap();
7414        assert_eq!(slot_meta.received, 9);
7415        let shred7 = {
7416            if shreds[7].is_data() {
7417                shreds[7].set_last_in_slot();
7418                shreds[7].clone()
7419            } else {
7420                panic!("Shred in unexpected format")
7421            }
7422        };
7423        let mut duplicate_shreds = vec![];
7424        assert!(!blockstore.should_insert_data_shred(
7425            &shred7,
7426            &slot_meta,
7427            &HashMap::new(),
7428            max_root,
7429            None,
7430            ShredSource::Repaired,
7431            &mut duplicate_shreds,
7432        ));
7433        assert!(blockstore.has_duplicate_shreds_in_slot(0));
7434        assert_eq!(duplicate_shreds.len(), 1);
7435        assert_matches!(
7436            duplicate_shreds[0],
7437            PossibleDuplicateShred::LastIndexConflict(_, _)
7438        );
7439        assert_eq!(duplicate_shreds[0].slot(), 0);
7440
7441        // Insert all pending shreds
7442        let mut shred8 = shreds[8].clone();
7443        blockstore.insert_shreds(shreds, None, false).unwrap();
7444        let slot_meta = blockstore.meta(0).unwrap().unwrap();
7445
7446        // Trying to insert a shred with index > the "is_last" shred should fail
7447        if shred8.is_data() {
7448            shred8.set_index((slot_meta.last_index.unwrap() + 1) as u32);
7449        } else {
7450            panic!("Shred in unexpected format")
7451        }
7452        duplicate_shreds.clear();
7453        blockstore.duplicate_slots_cf.delete(0).unwrap();
7454        assert!(!blockstore.has_duplicate_shreds_in_slot(0));
7455        assert!(!blockstore.should_insert_data_shred(
7456            &shred8,
7457            &slot_meta,
7458            &HashMap::new(),
7459            max_root,
7460            None,
7461            ShredSource::Repaired,
7462            &mut duplicate_shreds,
7463        ));
7464
7465        assert_eq!(duplicate_shreds.len(), 1);
7466        assert_matches!(
7467            duplicate_shreds[0],
7468            PossibleDuplicateShred::LastIndexConflict(_, _)
7469        );
7470        assert_eq!(duplicate_shreds[0].slot(), 0);
7471        assert!(blockstore.has_duplicate_shreds_in_slot(0));
7472    }
7473
7474    #[test]
7475    fn test_is_data_shred_present() {
7476        let (shreds, _) = make_slot_entries(0, 0, 200, /*merkle_variant:*/ true);
7477        let ledger_path = get_tmp_ledger_path_auto_delete!();
7478        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
7479        let index_cf = &blockstore.index_cf;
7480
7481        blockstore
7482            .insert_shreds(shreds[0..5].to_vec(), None, false)
7483            .unwrap();
7484        // Insert a shred less than `slot_meta.consumed`, check that
7485        // it already exists
7486        let slot_meta = blockstore.meta(0).unwrap().unwrap();
7487        let index = index_cf.get(0).unwrap().unwrap();
7488        assert_eq!(slot_meta.consumed, 5);
7489        assert!(Blockstore::is_data_shred_present(
7490            &shreds[1],
7491            &slot_meta,
7492            index.data(),
7493        ));
7494
7495        // Insert a shred, check that it already exists
7496        blockstore
7497            .insert_shreds(shreds[6..7].to_vec(), None, false)
7498            .unwrap();
7499        let slot_meta = blockstore.meta(0).unwrap().unwrap();
7500        let index = index_cf.get(0).unwrap().unwrap();
7501        assert!(Blockstore::is_data_shred_present(
7502            &shreds[6],
7503            &slot_meta,
7504            index.data()
7505        ),);
7506    }
7507
7508    #[test]
7509    fn test_merkle_root_metas_coding() {
7510        let ledger_path = get_tmp_ledger_path_auto_delete!();
7511        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
7512
7513        let parent_slot = 0;
7514        let slot = 1;
7515        let index = 0;
7516        let (_, coding_shreds, _) = setup_erasure_shreds(slot, parent_slot, 10);
7517        let coding_shred = coding_shreds[index as usize].clone();
7518
7519        let mut shred_insertion_tracker =
7520            ShredInsertionTracker::new(coding_shreds.len(), blockstore.get_write_batch().unwrap());
7521        assert!(blockstore.check_insert_coding_shred(
7522            coding_shred.clone(),
7523            &mut shred_insertion_tracker,
7524            false,
7525            ShredSource::Turbine,
7526            &mut BlockstoreInsertionMetrics::default(),
7527        ));
7528        let ShredInsertionTracker {
7529            merkle_root_metas,
7530            write_batch,
7531            ..
7532        } = shred_insertion_tracker;
7533
7534        assert_eq!(merkle_root_metas.len(), 1);
7535        assert_eq!(
7536            merkle_root_metas
7537                .get(&coding_shred.erasure_set())
7538                .unwrap()
7539                .as_ref()
7540                .merkle_root(),
7541            coding_shred.merkle_root().ok(),
7542        );
7543        assert_eq!(
7544            merkle_root_metas
7545                .get(&coding_shred.erasure_set())
7546                .unwrap()
7547                .as_ref()
7548                .first_received_shred_index(),
7549            index
7550        );
7551        assert_eq!(
7552            merkle_root_metas
7553                .get(&coding_shred.erasure_set())
7554                .unwrap()
7555                .as_ref()
7556                .first_received_shred_type(),
7557            ShredType::Code,
7558        );
7559
7560        for (erasure_set, working_merkle_root_meta) in merkle_root_metas {
7561            blockstore
7562                .merkle_root_meta_cf
7563                .put(erasure_set.store_key(), working_merkle_root_meta.as_ref())
7564                .unwrap();
7565        }
7566        blockstore.write_batch(write_batch).unwrap();
7567
7568        // Add a shred with different merkle root and index
7569        let (_, coding_shreds, _) = setup_erasure_shreds(slot, parent_slot, 10);
7570        let new_coding_shred = coding_shreds[(index + 1) as usize].clone();
7571
7572        let mut shred_insertion_tracker =
7573            ShredInsertionTracker::new(coding_shreds.len(), blockstore.get_write_batch().unwrap());
7574
7575        assert!(!blockstore.check_insert_coding_shred(
7576            new_coding_shred.clone(),
7577            &mut shred_insertion_tracker,
7578            false,
7579            ShredSource::Turbine,
7580            &mut BlockstoreInsertionMetrics::default(),
7581        ));
7582        let ShredInsertionTracker {
7583            ref merkle_root_metas,
7584            ref duplicate_shreds,
7585            ..
7586        } = shred_insertion_tracker;
7587
7588        // No insert, notify duplicate
7589        assert_eq!(duplicate_shreds.len(), 1);
7590        match &duplicate_shreds[0] {
7591            PossibleDuplicateShred::MerkleRootConflict(shred, _) if shred.slot() == slot => (),
7592            _ => panic!("No merkle root conflict"),
7593        }
7594
7595        // Verify that we still have the merkle root meta from the original shred
7596        assert_eq!(merkle_root_metas.len(), 1);
7597        assert_eq!(
7598            merkle_root_metas
7599                .get(&coding_shred.erasure_set())
7600                .unwrap()
7601                .as_ref()
7602                .merkle_root(),
7603            coding_shred.merkle_root().ok()
7604        );
7605        assert_eq!(
7606            merkle_root_metas
7607                .get(&coding_shred.erasure_set())
7608                .unwrap()
7609                .as_ref()
7610                .first_received_shred_index(),
7611            index
7612        );
7613
7614        // Blockstore should also have the merkle root meta of the original shred
7615        assert_eq!(
7616            blockstore
7617                .merkle_root_meta(coding_shred.erasure_set())
7618                .unwrap()
7619                .unwrap()
7620                .merkle_root(),
7621            coding_shred.merkle_root().ok()
7622        );
7623        assert_eq!(
7624            blockstore
7625                .merkle_root_meta(coding_shred.erasure_set())
7626                .unwrap()
7627                .unwrap()
7628                .first_received_shred_index(),
7629            index
7630        );
7631
7632        // Add a shred from different fec set
7633        let new_index = index + 31;
7634        let (_, coding_shreds, _) =
7635            setup_erasure_shreds_with_index(slot, parent_slot, 10, new_index);
7636        let new_coding_shred = coding_shreds[0].clone();
7637
7638        assert!(blockstore.check_insert_coding_shred(
7639            new_coding_shred.clone(),
7640            &mut shred_insertion_tracker,
7641            false,
7642            ShredSource::Turbine,
7643            &mut BlockstoreInsertionMetrics::default(),
7644        ));
7645        let ShredInsertionTracker {
7646            ref merkle_root_metas,
7647            ..
7648        } = shred_insertion_tracker;
7649
7650        // Verify that we still have the merkle root meta for the original shred
7651        // and the new shred
7652        assert_eq!(merkle_root_metas.len(), 2);
7653        assert_eq!(
7654            merkle_root_metas
7655                .get(&coding_shred.erasure_set())
7656                .unwrap()
7657                .as_ref()
7658                .merkle_root(),
7659            coding_shred.merkle_root().ok()
7660        );
7661        assert_eq!(
7662            merkle_root_metas
7663                .get(&coding_shred.erasure_set())
7664                .unwrap()
7665                .as_ref()
7666                .first_received_shred_index(),
7667            index
7668        );
7669        assert_eq!(
7670            merkle_root_metas
7671                .get(&new_coding_shred.erasure_set())
7672                .unwrap()
7673                .as_ref()
7674                .merkle_root(),
7675            new_coding_shred.merkle_root().ok()
7676        );
7677        assert_eq!(
7678            merkle_root_metas
7679                .get(&new_coding_shred.erasure_set())
7680                .unwrap()
7681                .as_ref()
7682                .first_received_shred_index(),
7683            new_index
7684        );
7685    }
7686
7687    #[test]
7688    fn test_merkle_root_metas_data() {
7689        let ledger_path = get_tmp_ledger_path_auto_delete!();
7690        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
7691
7692        let parent_slot = 0;
7693        let slot = 1;
7694        let index = 11;
7695        let fec_set_index = 11;
7696        let (data_shreds, _, _) =
7697            setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index);
7698        let data_shred = data_shreds[0].clone();
7699
7700        let mut shred_insertion_tracker =
7701            ShredInsertionTracker::new(data_shreds.len(), blockstore.get_write_batch().unwrap());
7702        blockstore
7703            .check_insert_data_shred(
7704                data_shred.clone(),
7705                &mut shred_insertion_tracker,
7706                false,
7707                None,
7708                ShredSource::Turbine,
7709            )
7710            .unwrap();
7711        let ShredInsertionTracker {
7712            merkle_root_metas,
7713            write_batch,
7714            ..
7715        } = shred_insertion_tracker;
7716        assert_eq!(merkle_root_metas.len(), 1);
7717        assert_eq!(
7718            merkle_root_metas
7719                .get(&data_shred.erasure_set())
7720                .unwrap()
7721                .as_ref()
7722                .merkle_root(),
7723            data_shred.merkle_root().ok()
7724        );
7725        assert_eq!(
7726            merkle_root_metas
7727                .get(&data_shred.erasure_set())
7728                .unwrap()
7729                .as_ref()
7730                .first_received_shred_index(),
7731            index
7732        );
7733        assert_eq!(
7734            merkle_root_metas
7735                .get(&data_shred.erasure_set())
7736                .unwrap()
7737                .as_ref()
7738                .first_received_shred_type(),
7739            ShredType::Data,
7740        );
7741
7742        for (erasure_set, working_merkle_root_meta) in merkle_root_metas {
7743            blockstore
7744                .merkle_root_meta_cf
7745                .put(erasure_set.store_key(), working_merkle_root_meta.as_ref())
7746                .unwrap();
7747        }
7748        blockstore.write_batch(write_batch).unwrap();
7749
7750        // Add a shred with different merkle root and index
7751        let (data_shreds, _, _) =
7752            setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index);
7753        let new_data_shred = data_shreds[1].clone();
7754
7755        let mut shred_insertion_tracker =
7756            ShredInsertionTracker::new(data_shreds.len(), blockstore.get_write_batch().unwrap());
7757
7758        assert!(blockstore
7759            .check_insert_data_shred(
7760                new_data_shred.clone(),
7761                &mut shred_insertion_tracker,
7762                false,
7763                None,
7764                ShredSource::Turbine,
7765            )
7766            .is_err());
7767        let ShredInsertionTracker {
7768            merkle_root_metas,
7769            duplicate_shreds,
7770            write_batch,
7771            ..
7772        } = shred_insertion_tracker;
7773
7774        // No insert, notify duplicate, and block is dead
7775        assert_eq!(duplicate_shreds.len(), 1);
7776        assert_matches!(
7777            duplicate_shreds[0],
7778            PossibleDuplicateShred::MerkleRootConflict(_, _)
7779        );
7780
7781        // Verify that we still have the merkle root meta from the original shred
7782        assert_eq!(merkle_root_metas.len(), 1);
7783        assert_eq!(
7784            merkle_root_metas
7785                .get(&data_shred.erasure_set())
7786                .unwrap()
7787                .as_ref()
7788                .merkle_root(),
7789            data_shred.merkle_root().ok()
7790        );
7791        assert_eq!(
7792            merkle_root_metas
7793                .get(&data_shred.erasure_set())
7794                .unwrap()
7795                .as_ref()
7796                .first_received_shred_index(),
7797            index
7798        );
7799
7800        // Block is now dead
7801        blockstore.db.write(write_batch).unwrap();
7802        assert!(blockstore.is_dead(slot));
7803        blockstore.remove_dead_slot(slot).unwrap();
7804
7805        // Blockstore should also have the merkle root meta of the original shred
7806        assert_eq!(
7807            blockstore
7808                .merkle_root_meta(data_shred.erasure_set())
7809                .unwrap()
7810                .unwrap()
7811                .merkle_root(),
7812            data_shred.merkle_root().ok()
7813        );
7814        assert_eq!(
7815            blockstore
7816                .merkle_root_meta(data_shred.erasure_set())
7817                .unwrap()
7818                .unwrap()
7819                .first_received_shred_index(),
7820            index
7821        );
7822
7823        // Add a shred from different fec set
7824        let new_index = fec_set_index + 31;
7825        let new_data_shred = Shred::new_from_data(
7826            slot,
7827            new_index,
7828            1,          // parent_offset
7829            &[3, 3, 3], // data
7830            ShredFlags::empty(),
7831            0, // reference_tick,
7832            0, // version
7833            fec_set_index + 30,
7834        );
7835
7836        let mut shred_insertion_tracker =
7837            ShredInsertionTracker::new(data_shreds.len(), blockstore.db.batch().unwrap());
7838        blockstore
7839            .check_insert_data_shred(
7840                new_data_shred.clone(),
7841                &mut shred_insertion_tracker,
7842                false,
7843                None,
7844                ShredSource::Turbine,
7845            )
7846            .unwrap();
7847        let ShredInsertionTracker {
7848            merkle_root_metas,
7849            write_batch,
7850            ..
7851        } = shred_insertion_tracker;
7852        blockstore.db.write(write_batch).unwrap();
7853
7854        // Verify that we still have the merkle root meta for the original shred
7855        // and the new shred
7856        assert_eq!(
7857            blockstore
7858                .merkle_root_meta(data_shred.erasure_set())
7859                .unwrap()
7860                .as_ref()
7861                .unwrap()
7862                .merkle_root(),
7863            data_shred.merkle_root().ok()
7864        );
7865        assert_eq!(
7866            blockstore
7867                .merkle_root_meta(data_shred.erasure_set())
7868                .unwrap()
7869                .as_ref()
7870                .unwrap()
7871                .first_received_shred_index(),
7872            index
7873        );
7874        assert_eq!(
7875            merkle_root_metas
7876                .get(&new_data_shred.erasure_set())
7877                .unwrap()
7878                .as_ref()
7879                .merkle_root(),
7880            new_data_shred.merkle_root().ok()
7881        );
7882        assert_eq!(
7883            merkle_root_metas
7884                .get(&new_data_shred.erasure_set())
7885                .unwrap()
7886                .as_ref()
7887                .first_received_shred_index(),
7888            new_index
7889        );
7890    }
7891
7892    #[test]
7893    fn test_check_insert_coding_shred() {
7894        let ledger_path = get_tmp_ledger_path_auto_delete!();
7895        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
7896
7897        let slot = 1;
7898        let coding_shred = Shred::new_from_parity_shard(
7899            slot,
7900            11,  // index
7901            &[], // parity_shard
7902            11,  // fec_set_index
7903            11,  // num_data_shreds
7904            11,  // num_coding_shreds
7905            8,   // position
7906            0,   // version
7907        );
7908
7909        let mut shred_insertion_tracker =
7910            ShredInsertionTracker::new(1, blockstore.get_write_batch().unwrap());
7911        assert!(blockstore.check_insert_coding_shred(
7912            coding_shred.clone(),
7913            &mut shred_insertion_tracker,
7914            false,
7915            ShredSource::Turbine,
7916            &mut BlockstoreInsertionMetrics::default(),
7917        ));
7918
7919        // insert again fails on dupe
7920        assert!(!blockstore.check_insert_coding_shred(
7921            coding_shred.clone(),
7922            &mut shred_insertion_tracker,
7923            false,
7924            ShredSource::Turbine,
7925            &mut BlockstoreInsertionMetrics::default(),
7926        ));
7927        assert_eq!(
7928            shred_insertion_tracker.duplicate_shreds,
7929            vec![PossibleDuplicateShred::Exists(coding_shred)]
7930        );
7931    }
7932
7933    #[test]
7934    fn test_should_insert_coding_shred() {
7935        let ledger_path = get_tmp_ledger_path_auto_delete!();
7936        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
7937        let max_root = 0;
7938
7939        let slot = 1;
7940        let mut coding_shred = Shred::new_from_parity_shard(
7941            slot,
7942            11,  // index
7943            &[], // parity_shard
7944            11,  // fec_set_index
7945            11,  // num_data_shreds
7946            11,  // num_coding_shreds
7947            8,   // position
7948            0,   // version
7949        );
7950
7951        // Insert a good coding shred
7952        assert!(Blockstore::should_insert_coding_shred(
7953            &coding_shred,
7954            max_root
7955        ));
7956
7957        // Insertion should succeed
7958        blockstore
7959            .insert_shreds(vec![coding_shred.clone()], None, false)
7960            .unwrap();
7961
7962        // Trying to insert the same shred again should pass since this doesn't check for
7963        // duplicate index
7964        {
7965            assert!(Blockstore::should_insert_coding_shred(
7966                &coding_shred,
7967                max_root
7968            ));
7969        }
7970
7971        // Establish a baseline that works
7972        coding_shred.set_index(coding_shred.index() + 1);
7973        assert!(Blockstore::should_insert_coding_shred(
7974            &coding_shred,
7975            max_root
7976        ));
7977
7978        // Trying to insert value into slot <= than last root should fail
7979        {
7980            let mut coding_shred = coding_shred.clone();
7981            coding_shred.set_slot(max_root);
7982            assert!(!Blockstore::should_insert_coding_shred(
7983                &coding_shred,
7984                max_root
7985            ));
7986        }
7987    }
7988
7989    #[test]
7990    fn test_insert_multiple_is_last() {
7991        solana_logger::setup();
7992        let (shreds, _) = make_slot_entries(0, 0, 18, /*merkle_variant:*/ true);
7993        let num_shreds = shreds.len() as u64;
7994        let ledger_path = get_tmp_ledger_path_auto_delete!();
7995        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
7996
7997        blockstore.insert_shreds(shreds, None, false).unwrap();
7998        let slot_meta = blockstore.meta(0).unwrap().unwrap();
7999
8000        assert_eq!(slot_meta.consumed, num_shreds);
8001        assert_eq!(slot_meta.received, num_shreds);
8002        assert_eq!(slot_meta.last_index, Some(num_shreds - 1));
8003        assert!(slot_meta.is_full());
8004
8005        let (shreds, _) = make_slot_entries(0, 0, 600, /*merkle_variant:*/ true);
8006        assert!(shreds.len() > num_shreds as usize);
8007        blockstore.insert_shreds(shreds, None, false).unwrap();
8008        let slot_meta = blockstore.meta(0).unwrap().unwrap();
8009
8010        assert_eq!(slot_meta.consumed, num_shreds);
8011        assert_eq!(slot_meta.received, num_shreds);
8012        assert_eq!(slot_meta.last_index, Some(num_shreds - 1));
8013        assert!(slot_meta.is_full());
8014
8015        assert!(blockstore.has_duplicate_shreds_in_slot(0));
8016    }
8017
8018    #[test]
8019    fn test_slot_data_iterator() {
8020        // Construct the shreds
8021        let ledger_path = get_tmp_ledger_path_auto_delete!();
8022        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
8023        let shreds_per_slot = 10;
8024        let slots = vec![2, 4, 8, 12];
8025        let all_shreds = make_chaining_slot_entries(&slots, shreds_per_slot, 0);
8026        let slot_8_shreds = all_shreds[2].0.clone();
8027        for (slot_shreds, _) in all_shreds {
8028            blockstore.insert_shreds(slot_shreds, None, false).unwrap();
8029        }
8030
8031        // Slot doesnt exist, iterator should be empty
8032        let shred_iter = blockstore.slot_data_iterator(5, 0).unwrap();
8033        let result: Vec<_> = shred_iter.collect();
8034        assert_eq!(result, vec![]);
8035
8036        // Test that the iterator for slot 8 contains what was inserted earlier
8037        let shred_iter = blockstore.slot_data_iterator(8, 0).unwrap();
8038        let result: Vec<Shred> = shred_iter
8039            .filter_map(|(_, bytes)| Shred::new_from_serialized_shred(bytes.to_vec()).ok())
8040            .collect();
8041        assert_eq!(result.len(), slot_8_shreds.len());
8042        assert_eq!(result, slot_8_shreds);
8043    }
8044
8045    #[test]
8046    fn test_set_roots() {
8047        let ledger_path = get_tmp_ledger_path_auto_delete!();
8048        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
8049        let chained_slots = vec![0, 2, 4, 7, 12, 15];
8050        assert_eq!(blockstore.max_root(), 0);
8051
8052        blockstore.set_roots(chained_slots.iter()).unwrap();
8053
8054        assert_eq!(blockstore.max_root(), 15);
8055
8056        for i in chained_slots {
8057            assert!(blockstore.is_root(i));
8058        }
8059    }
8060
8061    #[test]
8062    fn test_is_skipped() {
8063        let ledger_path = get_tmp_ledger_path_auto_delete!();
8064        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
8065        let roots = [2, 4, 7, 12, 15];
8066        blockstore.set_roots(roots.iter()).unwrap();
8067
8068        for i in 0..20 {
8069            if i < 2 || roots.contains(&i) || i > 15 {
8070                assert!(!blockstore.is_skipped(i));
8071            } else {
8072                assert!(blockstore.is_skipped(i));
8073            }
8074        }
8075    }
8076
8077    #[test]
8078    fn test_iter_bounds() {
8079        let ledger_path = get_tmp_ledger_path_auto_delete!();
8080        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
8081
8082        // slot 5 does not exist, iter should be ok and should be a noop
8083        blockstore
8084            .slot_meta_iterator(5)
8085            .unwrap()
8086            .for_each(|_| panic!());
8087    }
8088
8089    #[test]
8090    fn test_get_completed_data_ranges() {
8091        let completed_data_end_indexes = [2, 4, 9, 11].iter().copied().collect();
8092
8093        // Consumed is 1, which means we're missing shred with index 1, should return empty
8094        let start_index = 0;
8095        let consumed = 1;
8096        assert_eq!(
8097            Blockstore::get_completed_data_ranges(
8098                start_index,
8099                &completed_data_end_indexes,
8100                consumed
8101            ),
8102            vec![]
8103        );
8104
8105        let start_index = 0;
8106        let consumed = 3;
8107        assert_eq!(
8108            Blockstore::get_completed_data_ranges(
8109                start_index,
8110                &completed_data_end_indexes,
8111                consumed
8112            ),
8113            vec![0..3]
8114        );
8115
8116        // Test all possible ranges:
8117        //
8118        // `consumed == completed_data_end_indexes[j] + 1`, means we have all the shreds up to index
8119        // `completed_data_end_indexes[j] + 1`. Thus the completed data blocks is everything in the
8120        // range:
8121        // [start_index, completed_data_end_indexes[j]] ==
8122        // [completed_data_end_indexes[i], completed_data_end_indexes[j]],
8123        let completed_data_end_indexes: Vec<_> = completed_data_end_indexes.into_iter().collect();
8124        for i in 0..completed_data_end_indexes.len() {
8125            for j in i..completed_data_end_indexes.len() {
8126                let start_index = completed_data_end_indexes[i];
8127                let consumed = completed_data_end_indexes[j] + 1;
8128                // When start_index == completed_data_end_indexes[i], then that means
8129                // the shred with index == start_index is a single-shred data block,
8130                // so the start index is the end index for that data block.
8131                let expected = std::iter::once(start_index..start_index + 1)
8132                    .chain(
8133                        completed_data_end_indexes[i..=j]
8134                            .windows(2)
8135                            .map(|end_indexes| (end_indexes[0] + 1..end_indexes[1] + 1)),
8136                    )
8137                    .collect::<Vec<_>>();
8138
8139                let completed_data_end_indexes =
8140                    completed_data_end_indexes.iter().copied().collect();
8141                assert_eq!(
8142                    Blockstore::get_completed_data_ranges(
8143                        start_index,
8144                        &completed_data_end_indexes,
8145                        consumed
8146                    ),
8147                    expected
8148                );
8149            }
8150        }
8151    }
8152
8153    #[test]
8154    fn test_get_slot_entries_with_shred_count_corruption() {
8155        let ledger_path = get_tmp_ledger_path_auto_delete!();
8156        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
8157        let num_ticks = 8;
8158        let entries = create_ticks(num_ticks, 0, Hash::default());
8159        let slot = 1;
8160        let shreds =
8161            entries_to_test_shreds(&entries, slot, 0, false, 0, /*merkle_variant:*/ true);
8162        let next_shred_index = shreds.len();
8163        blockstore
8164            .insert_shreds(shreds, None, false)
8165            .expect("Expected successful write of shreds");
8166        assert_eq!(
8167            blockstore.get_slot_entries(slot, 0).unwrap().len() as u64,
8168            num_ticks
8169        );
8170
8171        // Insert an empty shred that won't deshred into entries
8172        let shreds = vec![Shred::new_from_data(
8173            slot,
8174            next_shred_index as u32,
8175            1,
8176            &[1, 1, 1],
8177            ShredFlags::LAST_SHRED_IN_SLOT,
8178            0,
8179            0,
8180            next_shred_index as u32,
8181        )];
8182
8183        // With the corruption, nothing should be returned, even though an
8184        // earlier data block was valid
8185        blockstore
8186            .insert_shreds(shreds, None, false)
8187            .expect("Expected successful write of shreds");
8188        assert!(blockstore.get_slot_entries(slot, 0).is_err());
8189    }
8190
8191    #[test]
8192    fn test_no_insert_but_modify_slot_meta() {
8193        // This tests correctness of the SlotMeta in various cases in which a shred
8194        // that gets filtered out by checks
8195        let (shreds0, _) = make_slot_entries(0, 0, 200, /*merkle_variant:*/ true);
8196        let ledger_path = get_tmp_ledger_path_auto_delete!();
8197        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
8198
8199        // Insert the first 5 shreds, we don't have a "is_last" shred yet
8200        blockstore
8201            .insert_shreds(shreds0[0..5].to_vec(), None, false)
8202            .unwrap();
8203
8204        // Insert a repetitive shred for slot 's', should get ignored, but also
8205        // insert shreds that chains to 's', should see the update in the SlotMeta
8206        // for 's'.
8207        let (mut shreds2, _) = make_slot_entries(2, 0, 200, /*merkle_variant:*/ true);
8208        let (mut shreds3, _) = make_slot_entries(3, 0, 200, /*merkle_variant:*/ true);
8209        shreds2.push(shreds0[1].clone());
8210        shreds3.insert(0, shreds0[1].clone());
8211        blockstore.insert_shreds(shreds2, None, false).unwrap();
8212        let slot_meta = blockstore.meta(0).unwrap().unwrap();
8213        assert_eq!(slot_meta.next_slots, vec![2]);
8214        blockstore.insert_shreds(shreds3, None, false).unwrap();
8215        let slot_meta = blockstore.meta(0).unwrap().unwrap();
8216        assert_eq!(slot_meta.next_slots, vec![2, 3]);
8217    }
8218
8219    #[test]
8220    fn test_trusted_insert_shreds() {
8221        let ledger_path = get_tmp_ledger_path_auto_delete!();
8222        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
8223
8224        // Make shred for slot 1
8225        let (shreds1, _) = make_slot_entries(1, 0, 1, /*merkle_variant:*/ true);
8226        let max_root = 100;
8227
8228        blockstore.set_roots(std::iter::once(&max_root)).unwrap();
8229
8230        // Insert will fail, slot < root
8231        blockstore
8232            .insert_shreds(shreds1[..].to_vec(), None, false)
8233            .unwrap();
8234        assert!(blockstore.get_data_shred(1, 0).unwrap().is_none());
8235
8236        // Insert through trusted path will succeed
8237        blockstore
8238            .insert_shreds(shreds1[..].to_vec(), None, true)
8239            .unwrap();
8240        assert!(blockstore.get_data_shred(1, 0).unwrap().is_some());
8241    }
8242
8243    #[test]
8244    fn test_get_first_available_block() {
8245        let mint_total = 1_000_000_000_000;
8246        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(mint_total);
8247        let (ledger_path, _blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
8248        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
8249        assert_eq!(blockstore.get_first_available_block().unwrap(), 0);
8250        assert_eq!(blockstore.lowest_slot_with_genesis(), 0);
8251        assert_eq!(blockstore.lowest_slot(), 0);
8252        for slot in 1..4 {
8253            let entries = make_slot_entries_with_transactions(100);
8254            let shreds = entries_to_test_shreds(
8255                &entries,
8256                slot,
8257                slot - 1, // parent_slot
8258                true,     // is_full_slot
8259                0,        // version
8260                true,     // merkle_variant
8261            );
8262            blockstore.insert_shreds(shreds, None, false).unwrap();
8263            blockstore.set_roots([slot].iter()).unwrap();
8264        }
8265        assert_eq!(blockstore.get_first_available_block().unwrap(), 0);
8266        assert_eq!(blockstore.lowest_slot_with_genesis(), 0);
8267        assert_eq!(blockstore.lowest_slot(), 1);
8268
8269        blockstore.purge_slots(0, 1, PurgeType::CompactionFilter);
8270        assert_eq!(blockstore.get_first_available_block().unwrap(), 3);
8271        assert_eq!(blockstore.lowest_slot_with_genesis(), 2);
8272        assert_eq!(blockstore.lowest_slot(), 2);
8273    }
8274
8275    #[test]
8276    fn test_get_rooted_block() {
8277        let slot = 10;
8278        let entries = make_slot_entries_with_transactions(100);
8279        let blockhash = get_last_hash(entries.iter()).unwrap();
8280        let shreds = entries_to_test_shreds(
8281            &entries,
8282            slot,
8283            slot - 1, // parent_slot
8284            true,     // is_full_slot
8285            0,        // version
8286            true,     // merkle_variant
8287        );
8288        let more_shreds = entries_to_test_shreds(
8289            &entries,
8290            slot + 1,
8291            slot, // parent_slot
8292            true, // is_full_slot
8293            0,    // version
8294            true, // merkle_variant
8295        );
8296        let unrooted_shreds = entries_to_test_shreds(
8297            &entries,
8298            slot + 2,
8299            slot + 1, // parent_slot
8300            true,     // is_full_slot
8301            0,        // version
8302            true,     // merkle_variant
8303        );
8304        let ledger_path = get_tmp_ledger_path_auto_delete!();
8305        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
8306        blockstore.insert_shreds(shreds, None, false).unwrap();
8307        blockstore.insert_shreds(more_shreds, None, false).unwrap();
8308        blockstore
8309            .insert_shreds(unrooted_shreds, None, false)
8310            .unwrap();
8311        blockstore
8312            .set_roots([slot - 1, slot, slot + 1].iter())
8313            .unwrap();
8314
8315        let parent_meta = SlotMeta::default();
8316        blockstore
8317            .put_meta_bytes(slot - 1, &serialize(&parent_meta).unwrap())
8318            .unwrap();
8319
8320        let expected_transactions: Vec<VersionedTransactionWithStatusMeta> = entries
8321            .iter()
8322            .filter(|entry| !entry.is_tick())
8323            .cloned()
8324            .flat_map(|entry| entry.transactions)
8325            .map(|transaction| {
8326                let mut pre_balances: Vec<u64> = vec![];
8327                let mut post_balances: Vec<u64> = vec![];
8328                for i in 0..transaction.message.static_account_keys().len() {
8329                    pre_balances.push(i as u64 * 10);
8330                    post_balances.push(i as u64 * 11);
8331                }
8332                let compute_units_consumed = Some(12345);
8333                let signature = transaction.signatures[0];
8334                let status = TransactionStatusMeta {
8335                    status: Ok(()),
8336                    fee: 42,
8337                    pre_balances: pre_balances.clone(),
8338                    post_balances: post_balances.clone(),
8339                    inner_instructions: Some(vec![]),
8340                    log_messages: Some(vec![]),
8341                    pre_token_balances: Some(vec![]),
8342                    post_token_balances: Some(vec![]),
8343                    rewards: Some(vec![]),
8344                    loaded_addresses: LoadedAddresses::default(),
8345                    return_data: Some(TransactionReturnData::default()),
8346                    compute_units_consumed,
8347                }
8348                .into();
8349                blockstore
8350                    .transaction_status_cf
8351                    .put_protobuf((signature, slot), &status)
8352                    .unwrap();
8353                let status = TransactionStatusMeta {
8354                    status: Ok(()),
8355                    fee: 42,
8356                    pre_balances: pre_balances.clone(),
8357                    post_balances: post_balances.clone(),
8358                    inner_instructions: Some(vec![]),
8359                    log_messages: Some(vec![]),
8360                    pre_token_balances: Some(vec![]),
8361                    post_token_balances: Some(vec![]),
8362                    rewards: Some(vec![]),
8363                    loaded_addresses: LoadedAddresses::default(),
8364                    return_data: Some(TransactionReturnData::default()),
8365                    compute_units_consumed,
8366                }
8367                .into();
8368                blockstore
8369                    .transaction_status_cf
8370                    .put_protobuf((signature, slot + 1), &status)
8371                    .unwrap();
8372                let status = TransactionStatusMeta {
8373                    status: Ok(()),
8374                    fee: 42,
8375                    pre_balances: pre_balances.clone(),
8376                    post_balances: post_balances.clone(),
8377                    inner_instructions: Some(vec![]),
8378                    log_messages: Some(vec![]),
8379                    pre_token_balances: Some(vec![]),
8380                    post_token_balances: Some(vec![]),
8381                    rewards: Some(vec![]),
8382                    loaded_addresses: LoadedAddresses::default(),
8383                    return_data: Some(TransactionReturnData::default()),
8384                    compute_units_consumed,
8385                }
8386                .into();
8387                blockstore
8388                    .transaction_status_cf
8389                    .put_protobuf((signature, slot + 2), &status)
8390                    .unwrap();
8391                VersionedTransactionWithStatusMeta {
8392                    transaction,
8393                    meta: TransactionStatusMeta {
8394                        status: Ok(()),
8395                        fee: 42,
8396                        pre_balances,
8397                        post_balances,
8398                        inner_instructions: Some(vec![]),
8399                        log_messages: Some(vec![]),
8400                        pre_token_balances: Some(vec![]),
8401                        post_token_balances: Some(vec![]),
8402                        rewards: Some(vec![]),
8403                        loaded_addresses: LoadedAddresses::default(),
8404                        return_data: Some(TransactionReturnData::default()),
8405                        compute_units_consumed,
8406                    },
8407                }
8408            })
8409            .collect();
8410
8411        // Even if marked as root, a slot that is empty of entries should return an error
8412        assert_matches!(
8413            blockstore.get_rooted_block(slot - 1, true),
8414            Err(BlockstoreError::SlotUnavailable)
8415        );
8416
8417        // The previous_blockhash of `expected_block` is default because its parent slot is a root,
8418        // but empty of entries (eg. snapshot root slots). This now returns an error.
8419        assert_matches!(
8420            blockstore.get_rooted_block(slot, true),
8421            Err(BlockstoreError::ParentEntriesUnavailable)
8422        );
8423
8424        // Test if require_previous_blockhash is false
8425        let confirmed_block = blockstore.get_rooted_block(slot, false).unwrap();
8426        assert_eq!(confirmed_block.transactions.len(), 100);
8427        let expected_block = VersionedConfirmedBlock {
8428            transactions: expected_transactions.clone(),
8429            parent_slot: slot - 1,
8430            blockhash: blockhash.to_string(),
8431            previous_blockhash: Hash::default().to_string(),
8432            rewards: vec![],
8433            num_partitions: None,
8434            block_time: None,
8435            block_height: None,
8436        };
8437        assert_eq!(confirmed_block, expected_block);
8438
8439        let confirmed_block = blockstore.get_rooted_block(slot + 1, true).unwrap();
8440        assert_eq!(confirmed_block.transactions.len(), 100);
8441
8442        let mut expected_block = VersionedConfirmedBlock {
8443            transactions: expected_transactions.clone(),
8444            parent_slot: slot,
8445            blockhash: blockhash.to_string(),
8446            previous_blockhash: blockhash.to_string(),
8447            rewards: vec![],
8448            num_partitions: None,
8449            block_time: None,
8450            block_height: None,
8451        };
8452        assert_eq!(confirmed_block, expected_block);
8453
8454        let not_root = blockstore.get_rooted_block(slot + 2, true).unwrap_err();
8455        assert_matches!(not_root, BlockstoreError::SlotNotRooted);
8456
8457        let complete_block = blockstore.get_complete_block(slot + 2, true).unwrap();
8458        assert_eq!(complete_block.transactions.len(), 100);
8459
8460        let mut expected_complete_block = VersionedConfirmedBlock {
8461            transactions: expected_transactions,
8462            parent_slot: slot + 1,
8463            blockhash: blockhash.to_string(),
8464            previous_blockhash: blockhash.to_string(),
8465            rewards: vec![],
8466            num_partitions: None,
8467            block_time: None,
8468            block_height: None,
8469        };
8470        assert_eq!(complete_block, expected_complete_block);
8471
8472        // Test block_time & block_height return, if available
8473        let timestamp = 1_576_183_541;
8474        blockstore.blocktime_cf.put(slot + 1, &timestamp).unwrap();
8475        expected_block.block_time = Some(timestamp);
8476        let block_height = slot - 2;
8477        blockstore
8478            .block_height_cf
8479            .put(slot + 1, &block_height)
8480            .unwrap();
8481        expected_block.block_height = Some(block_height);
8482
8483        let confirmed_block = blockstore.get_rooted_block(slot + 1, true).unwrap();
8484        assert_eq!(confirmed_block, expected_block);
8485
8486        let timestamp = 1_576_183_542;
8487        blockstore.blocktime_cf.put(slot + 2, &timestamp).unwrap();
8488        expected_complete_block.block_time = Some(timestamp);
8489        let block_height = slot - 1;
8490        blockstore
8491            .block_height_cf
8492            .put(slot + 2, &block_height)
8493            .unwrap();
8494        expected_complete_block.block_height = Some(block_height);
8495
8496        let complete_block = blockstore.get_complete_block(slot + 2, true).unwrap();
8497        assert_eq!(complete_block, expected_complete_block);
8498    }
8499
8500    #[test]
8501    fn test_persist_transaction_status() {
8502        let ledger_path = get_tmp_ledger_path_auto_delete!();
8503        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
8504
8505        let transaction_status_cf = &blockstore.transaction_status_cf;
8506
8507        let pre_balances_vec = vec![1, 2, 3];
8508        let post_balances_vec = vec![3, 2, 1];
8509        let inner_instructions_vec = vec![InnerInstructions {
8510            index: 0,
8511            instructions: vec![InnerInstruction {
8512                instruction: CompiledInstruction::new(1, &(), vec![0]),
8513                stack_height: Some(2),
8514            }],
8515        }];
8516        let log_messages_vec = vec![String::from("Test message\n")];
8517        let pre_token_balances_vec = vec![];
8518        let post_token_balances_vec = vec![];
8519        let rewards_vec = vec![];
8520        let test_loaded_addresses = LoadedAddresses {
8521            writable: vec![Pubkey::new_unique()],
8522            readonly: vec![Pubkey::new_unique()],
8523        };
8524        let test_return_data = TransactionReturnData {
8525            program_id: Pubkey::new_unique(),
8526            data: vec![1, 2, 3],
8527        };
8528        let compute_units_consumed_1 = Some(3812649u64);
8529        let compute_units_consumed_2 = Some(42u64);
8530
8531        // result not found
8532        assert!(transaction_status_cf
8533            .get_protobuf((Signature::default(), 0))
8534            .unwrap()
8535            .is_none());
8536
8537        // insert value
8538        let status = TransactionStatusMeta {
8539            status: solana_sdk::transaction::Result::<()>::Err(TransactionError::AccountNotFound),
8540            fee: 5u64,
8541            pre_balances: pre_balances_vec.clone(),
8542            post_balances: post_balances_vec.clone(),
8543            inner_instructions: Some(inner_instructions_vec.clone()),
8544            log_messages: Some(log_messages_vec.clone()),
8545            pre_token_balances: Some(pre_token_balances_vec.clone()),
8546            post_token_balances: Some(post_token_balances_vec.clone()),
8547            rewards: Some(rewards_vec.clone()),
8548            loaded_addresses: test_loaded_addresses.clone(),
8549            return_data: Some(test_return_data.clone()),
8550            compute_units_consumed: compute_units_consumed_1,
8551        }
8552        .into();
8553        assert!(transaction_status_cf
8554            .put_protobuf((Signature::default(), 0), &status)
8555            .is_ok());
8556
8557        // result found
8558        let TransactionStatusMeta {
8559            status,
8560            fee,
8561            pre_balances,
8562            post_balances,
8563            inner_instructions,
8564            log_messages,
8565            pre_token_balances,
8566            post_token_balances,
8567            rewards,
8568            loaded_addresses,
8569            return_data,
8570            compute_units_consumed,
8571        } = transaction_status_cf
8572            .get_protobuf((Signature::default(), 0))
8573            .unwrap()
8574            .unwrap()
8575            .try_into()
8576            .unwrap();
8577        assert_eq!(status, Err(TransactionError::AccountNotFound));
8578        assert_eq!(fee, 5u64);
8579        assert_eq!(pre_balances, pre_balances_vec);
8580        assert_eq!(post_balances, post_balances_vec);
8581        assert_eq!(inner_instructions.unwrap(), inner_instructions_vec);
8582        assert_eq!(log_messages.unwrap(), log_messages_vec);
8583        assert_eq!(pre_token_balances.unwrap(), pre_token_balances_vec);
8584        assert_eq!(post_token_balances.unwrap(), post_token_balances_vec);
8585        assert_eq!(rewards.unwrap(), rewards_vec);
8586        assert_eq!(loaded_addresses, test_loaded_addresses);
8587        assert_eq!(return_data.unwrap(), test_return_data);
8588        assert_eq!(compute_units_consumed, compute_units_consumed_1);
8589
8590        // insert value
8591        let status = TransactionStatusMeta {
8592            status: solana_sdk::transaction::Result::<()>::Ok(()),
8593            fee: 9u64,
8594            pre_balances: pre_balances_vec.clone(),
8595            post_balances: post_balances_vec.clone(),
8596            inner_instructions: Some(inner_instructions_vec.clone()),
8597            log_messages: Some(log_messages_vec.clone()),
8598            pre_token_balances: Some(pre_token_balances_vec.clone()),
8599            post_token_balances: Some(post_token_balances_vec.clone()),
8600            rewards: Some(rewards_vec.clone()),
8601            loaded_addresses: test_loaded_addresses.clone(),
8602            return_data: Some(test_return_data.clone()),
8603            compute_units_consumed: compute_units_consumed_2,
8604        }
8605        .into();
8606        assert!(transaction_status_cf
8607            .put_protobuf((Signature::from([2u8; 64]), 9), &status,)
8608            .is_ok());
8609
8610        // result found
8611        let TransactionStatusMeta {
8612            status,
8613            fee,
8614            pre_balances,
8615            post_balances,
8616            inner_instructions,
8617            log_messages,
8618            pre_token_balances,
8619            post_token_balances,
8620            rewards,
8621            loaded_addresses,
8622            return_data,
8623            compute_units_consumed,
8624        } = transaction_status_cf
8625            .get_protobuf((Signature::from([2u8; 64]), 9))
8626            .unwrap()
8627            .unwrap()
8628            .try_into()
8629            .unwrap();
8630
8631        // deserialize
8632        assert_eq!(status, Ok(()));
8633        assert_eq!(fee, 9u64);
8634        assert_eq!(pre_balances, pre_balances_vec);
8635        assert_eq!(post_balances, post_balances_vec);
8636        assert_eq!(inner_instructions.unwrap(), inner_instructions_vec);
8637        assert_eq!(log_messages.unwrap(), log_messages_vec);
8638        assert_eq!(pre_token_balances.unwrap(), pre_token_balances_vec);
8639        assert_eq!(post_token_balances.unwrap(), post_token_balances_vec);
8640        assert_eq!(rewards.unwrap(), rewards_vec);
8641        assert_eq!(loaded_addresses, test_loaded_addresses);
8642        assert_eq!(return_data.unwrap(), test_return_data);
8643        assert_eq!(compute_units_consumed, compute_units_consumed_2);
8644    }
8645
8646    #[test]
8647    fn test_read_transaction_status_with_old_data() {
8648        let ledger_path = get_tmp_ledger_path_auto_delete!();
8649        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
8650        let signature = Signature::from([1; 64]);
8651
8652        let index0_slot = 2;
8653        blockstore
8654            .write_deprecated_transaction_status(
8655                0,
8656                index0_slot,
8657                signature,
8658                vec![&Pubkey::new_unique()],
8659                vec![&Pubkey::new_unique()],
8660                TransactionStatusMeta {
8661                    fee: index0_slot * 1_000,
8662                    ..TransactionStatusMeta::default()
8663                },
8664            )
8665            .unwrap();
8666
8667        let index1_slot = 1;
8668        blockstore
8669            .write_deprecated_transaction_status(
8670                1,
8671                index1_slot,
8672                signature,
8673                vec![&Pubkey::new_unique()],
8674                vec![&Pubkey::new_unique()],
8675                TransactionStatusMeta {
8676                    fee: index1_slot * 1_000,
8677                    ..TransactionStatusMeta::default()
8678                },
8679            )
8680            .unwrap();
8681
8682        let slot = 3;
8683        blockstore
8684            .write_transaction_status(
8685                slot,
8686                signature,
8687                vec![
8688                    (&Pubkey::new_unique(), true),
8689                    (&Pubkey::new_unique(), false),
8690                ]
8691                .into_iter(),
8692                TransactionStatusMeta {
8693                    fee: slot * 1_000,
8694                    ..TransactionStatusMeta::default()
8695                },
8696                0,
8697            )
8698            .unwrap();
8699
8700        let meta = blockstore
8701            .read_transaction_status((signature, slot))
8702            .unwrap()
8703            .unwrap();
8704        assert_eq!(meta.fee, slot * 1000);
8705
8706        let meta = blockstore
8707            .read_transaction_status((signature, index0_slot))
8708            .unwrap()
8709            .unwrap();
8710        assert_eq!(meta.fee, index0_slot * 1000);
8711
8712        let meta = blockstore
8713            .read_transaction_status((signature, index1_slot))
8714            .unwrap()
8715            .unwrap();
8716        assert_eq!(meta.fee, index1_slot * 1000);
8717    }
8718
8719    #[test]
8720    fn test_get_transaction_status() {
8721        let ledger_path = get_tmp_ledger_path_auto_delete!();
8722        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
8723        let transaction_status_cf = &blockstore.transaction_status_cf;
8724
8725        let pre_balances_vec = vec![1, 2, 3];
8726        let post_balances_vec = vec![3, 2, 1];
8727        let status = TransactionStatusMeta {
8728            status: solana_sdk::transaction::Result::<()>::Ok(()),
8729            fee: 42u64,
8730            pre_balances: pre_balances_vec,
8731            post_balances: post_balances_vec,
8732            inner_instructions: Some(vec![]),
8733            log_messages: Some(vec![]),
8734            pre_token_balances: Some(vec![]),
8735            post_token_balances: Some(vec![]),
8736            rewards: Some(vec![]),
8737            loaded_addresses: LoadedAddresses::default(),
8738            return_data: Some(TransactionReturnData::default()),
8739            compute_units_consumed: Some(42u64),
8740        }
8741        .into();
8742
8743        let signature1 = Signature::from([1u8; 64]);
8744        let signature2 = Signature::from([2u8; 64]);
8745        let signature3 = Signature::from([3u8; 64]);
8746        let signature4 = Signature::from([4u8; 64]);
8747        let signature5 = Signature::from([5u8; 64]);
8748        let signature6 = Signature::from([6u8; 64]);
8749        let signature7 = Signature::from([7u8; 64]);
8750
8751        // Insert slots with fork
8752        //   0 (root)
8753        //  / \
8754        // 1  |
8755        //    2 (root)
8756        //    |
8757        //    3
8758        let meta0 = SlotMeta::new(0, Some(0));
8759        blockstore.meta_cf.put(0, &meta0).unwrap();
8760        let meta1 = SlotMeta::new(1, Some(0));
8761        blockstore.meta_cf.put(1, &meta1).unwrap();
8762        let meta2 = SlotMeta::new(2, Some(0));
8763        blockstore.meta_cf.put(2, &meta2).unwrap();
8764        let meta3 = SlotMeta::new(3, Some(2));
8765        blockstore.meta_cf.put(3, &meta3).unwrap();
8766
8767        blockstore.set_roots([0, 2].iter()).unwrap();
8768
8769        // Initialize statuses:
8770        //   signature2 in skipped slot and root,
8771        //   signature4 in skipped slot,
8772        //   signature5 in skipped slot and non-root,
8773        //   signature6 in skipped slot,
8774        //   signature5 extra entries
8775        transaction_status_cf
8776            .put_protobuf((signature2, 1), &status)
8777            .unwrap();
8778
8779        transaction_status_cf
8780            .put_protobuf((signature2, 2), &status)
8781            .unwrap();
8782
8783        transaction_status_cf
8784            .put_protobuf((signature4, 1), &status)
8785            .unwrap();
8786
8787        transaction_status_cf
8788            .put_protobuf((signature5, 1), &status)
8789            .unwrap();
8790
8791        transaction_status_cf
8792            .put_protobuf((signature5, 3), &status)
8793            .unwrap();
8794
8795        transaction_status_cf
8796            .put_protobuf((signature6, 1), &status)
8797            .unwrap();
8798
8799        transaction_status_cf
8800            .put_protobuf((signature5, 5), &status)
8801            .unwrap();
8802
8803        transaction_status_cf
8804            .put_protobuf((signature6, 3), &status)
8805            .unwrap();
8806
8807        // Signature exists, root found
8808        if let (Some((slot, _status)), counter) = blockstore
8809            .get_transaction_status_with_counter(signature2, &[].into())
8810            .unwrap()
8811        {
8812            assert_eq!(slot, 2);
8813            assert_eq!(counter, 2);
8814        }
8815
8816        // Signature exists, root found although not required
8817        if let (Some((slot, _status)), counter) = blockstore
8818            .get_transaction_status_with_counter(signature2, &[3].into())
8819            .unwrap()
8820        {
8821            assert_eq!(slot, 2);
8822            assert_eq!(counter, 2);
8823        }
8824
8825        // Signature exists in skipped slot, no root found
8826        let (status, counter) = blockstore
8827            .get_transaction_status_with_counter(signature4, &[].into())
8828            .unwrap();
8829        assert_eq!(status, None);
8830        assert_eq!(counter, 2);
8831
8832        // Signature exists in skipped slot, no non-root found
8833        let (status, counter) = blockstore
8834            .get_transaction_status_with_counter(signature4, &[3].into())
8835            .unwrap();
8836        assert_eq!(status, None);
8837        assert_eq!(counter, 2);
8838
8839        // Signature exists, no root found
8840        let (status, counter) = blockstore
8841            .get_transaction_status_with_counter(signature5, &[].into())
8842            .unwrap();
8843        assert_eq!(status, None);
8844        assert_eq!(counter, 4);
8845
8846        // Signature exists, root not required
8847        if let (Some((slot, _status)), counter) = blockstore
8848            .get_transaction_status_with_counter(signature5, &[3].into())
8849            .unwrap()
8850        {
8851            assert_eq!(slot, 3);
8852            assert_eq!(counter, 2);
8853        }
8854
8855        // Signature does not exist, smaller than existing entries
8856        let (status, counter) = blockstore
8857            .get_transaction_status_with_counter(signature1, &[].into())
8858            .unwrap();
8859        assert_eq!(status, None);
8860        assert_eq!(counter, 1);
8861
8862        let (status, counter) = blockstore
8863            .get_transaction_status_with_counter(signature1, &[3].into())
8864            .unwrap();
8865        assert_eq!(status, None);
8866        assert_eq!(counter, 1);
8867
8868        // Signature does not exist, between existing entries
8869        let (status, counter) = blockstore
8870            .get_transaction_status_with_counter(signature3, &[].into())
8871            .unwrap();
8872        assert_eq!(status, None);
8873        assert_eq!(counter, 1);
8874
8875        let (status, counter) = blockstore
8876            .get_transaction_status_with_counter(signature3, &[3].into())
8877            .unwrap();
8878        assert_eq!(status, None);
8879        assert_eq!(counter, 1);
8880
8881        // Signature does not exist, larger than existing entries
8882        let (status, counter) = blockstore
8883            .get_transaction_status_with_counter(signature7, &[].into())
8884            .unwrap();
8885        assert_eq!(status, None);
8886        assert_eq!(counter, 0);
8887
8888        let (status, counter) = blockstore
8889            .get_transaction_status_with_counter(signature7, &[3].into())
8890            .unwrap();
8891        assert_eq!(status, None);
8892        assert_eq!(counter, 0);
8893    }
8894
8895    #[test]
8896    fn test_get_transaction_status_with_old_data() {
8897        let ledger_path = get_tmp_ledger_path_auto_delete!();
8898        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
8899        let transaction_status_cf = &blockstore.transaction_status_cf;
8900
8901        let pre_balances_vec = vec![1, 2, 3];
8902        let post_balances_vec = vec![3, 2, 1];
8903        let status = TransactionStatusMeta {
8904            status: solana_sdk::transaction::Result::<()>::Ok(()),
8905            fee: 42u64,
8906            pre_balances: pre_balances_vec,
8907            post_balances: post_balances_vec,
8908            inner_instructions: Some(vec![]),
8909            log_messages: Some(vec![]),
8910            pre_token_balances: Some(vec![]),
8911            post_token_balances: Some(vec![]),
8912            rewards: Some(vec![]),
8913            loaded_addresses: LoadedAddresses::default(),
8914            return_data: Some(TransactionReturnData::default()),
8915            compute_units_consumed: Some(42u64),
8916        }
8917        .into();
8918
8919        let signature1 = Signature::from([1u8; 64]);
8920        let signature2 = Signature::from([2u8; 64]);
8921        let signature3 = Signature::from([3u8; 64]);
8922        let signature4 = Signature::from([4u8; 64]);
8923        let signature5 = Signature::from([5u8; 64]);
8924        let signature6 = Signature::from([6u8; 64]);
8925
8926        // Insert slots with fork
8927        //   0 (root)
8928        //  / \
8929        // 1  |
8930        //    2 (root)
8931        //  / |
8932        // 3  |
8933        //    4 (root)
8934        //    |
8935        //    5
8936        let meta0 = SlotMeta::new(0, Some(0));
8937        blockstore.meta_cf.put(0, &meta0).unwrap();
8938        let meta1 = SlotMeta::new(1, Some(0));
8939        blockstore.meta_cf.put(1, &meta1).unwrap();
8940        let meta2 = SlotMeta::new(2, Some(0));
8941        blockstore.meta_cf.put(2, &meta2).unwrap();
8942        let meta3 = SlotMeta::new(3, Some(2));
8943        blockstore.meta_cf.put(3, &meta3).unwrap();
8944        let meta4 = SlotMeta::new(4, Some(2));
8945        blockstore.meta_cf.put(4, &meta4).unwrap();
8946        let meta5 = SlotMeta::new(5, Some(4));
8947        blockstore.meta_cf.put(5, &meta5).unwrap();
8948
8949        blockstore.set_roots([0, 2, 4].iter()).unwrap();
8950
8951        // Initialize statuses:
8952        //   signature1 in skipped slot and root (2), both index 1
8953        //   signature2 in skipped slot and root (4), both index 0
8954        //   signature3 in root
8955        //   signature4 in non-root,
8956        //   signature5 extra entries
8957        transaction_status_cf
8958            .put_deprecated_protobuf((1, signature1, 1), &status)
8959            .unwrap();
8960
8961        transaction_status_cf
8962            .put_deprecated_protobuf((1, signature1, 2), &status)
8963            .unwrap();
8964
8965        transaction_status_cf
8966            .put_deprecated_protobuf((0, signature2, 3), &status)
8967            .unwrap();
8968
8969        transaction_status_cf
8970            .put_deprecated_protobuf((0, signature2, 4), &status)
8971            .unwrap();
8972        blockstore.set_highest_primary_index_slot(Some(4));
8973
8974        transaction_status_cf
8975            .put_protobuf((signature3, 4), &status)
8976            .unwrap();
8977
8978        transaction_status_cf
8979            .put_protobuf((signature4, 5), &status)
8980            .unwrap();
8981
8982        transaction_status_cf
8983            .put_protobuf((signature5, 5), &status)
8984            .unwrap();
8985
8986        // Signature exists, root found in index 1
8987        if let (Some((slot, _status)), counter) = blockstore
8988            .get_transaction_status_with_counter(signature1, &[].into())
8989            .unwrap()
8990        {
8991            assert_eq!(slot, 2);
8992            assert_eq!(counter, 4);
8993        }
8994
8995        // Signature exists, root found in index 0
8996        if let (Some((slot, _status)), counter) = blockstore
8997            .get_transaction_status_with_counter(signature2, &[].into())
8998            .unwrap()
8999        {
9000            assert_eq!(slot, 4);
9001            assert_eq!(counter, 3);
9002        }
9003
9004        // Signature exists
9005        if let (Some((slot, _status)), counter) = blockstore
9006            .get_transaction_status_with_counter(signature3, &[].into())
9007            .unwrap()
9008        {
9009            assert_eq!(slot, 4);
9010            assert_eq!(counter, 1);
9011        }
9012
9013        // Signature does not exist
9014        let (status, counter) = blockstore
9015            .get_transaction_status_with_counter(signature6, &[].into())
9016            .unwrap();
9017        assert_eq!(status, None);
9018        assert_eq!(counter, 1);
9019    }
9020
9021    fn do_test_lowest_cleanup_slot_and_special_cfs(simulate_blockstore_cleanup_service: bool) {
9022        solana_logger::setup();
9023
9024        let ledger_path = get_tmp_ledger_path_auto_delete!();
9025        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
9026        let transaction_status_cf = &blockstore.transaction_status_cf;
9027
9028        let pre_balances_vec = vec![1, 2, 3];
9029        let post_balances_vec = vec![3, 2, 1];
9030        let status = TransactionStatusMeta {
9031            status: solana_sdk::transaction::Result::<()>::Ok(()),
9032            fee: 42u64,
9033            pre_balances: pre_balances_vec,
9034            post_balances: post_balances_vec,
9035            inner_instructions: Some(vec![]),
9036            log_messages: Some(vec![]),
9037            pre_token_balances: Some(vec![]),
9038            post_token_balances: Some(vec![]),
9039            rewards: Some(vec![]),
9040            loaded_addresses: LoadedAddresses::default(),
9041            return_data: Some(TransactionReturnData::default()),
9042            compute_units_consumed: Some(42u64),
9043        }
9044        .into();
9045
9046        let signature1 = Signature::from([2u8; 64]);
9047        let signature2 = Signature::from([3u8; 64]);
9048
9049        // Insert rooted slots 0..=3 with no fork
9050        let meta0 = SlotMeta::new(0, Some(0));
9051        blockstore.meta_cf.put(0, &meta0).unwrap();
9052        let meta1 = SlotMeta::new(1, Some(0));
9053        blockstore.meta_cf.put(1, &meta1).unwrap();
9054        let meta2 = SlotMeta::new(2, Some(1));
9055        blockstore.meta_cf.put(2, &meta2).unwrap();
9056        let meta3 = SlotMeta::new(3, Some(2));
9057        blockstore.meta_cf.put(3, &meta3).unwrap();
9058
9059        blockstore.set_roots([0, 1, 2, 3].iter()).unwrap();
9060
9061        let lowest_cleanup_slot = 1;
9062        let lowest_available_slot = lowest_cleanup_slot + 1;
9063
9064        transaction_status_cf
9065            .put_protobuf((signature1, lowest_cleanup_slot), &status)
9066            .unwrap();
9067
9068        transaction_status_cf
9069            .put_protobuf((signature2, lowest_available_slot), &status)
9070            .unwrap();
9071
9072        let address0 = solana_pubkey::new_rand();
9073        let address1 = solana_pubkey::new_rand();
9074        blockstore
9075            .write_transaction_status(
9076                lowest_cleanup_slot,
9077                signature1,
9078                vec![(&address0, true)].into_iter(),
9079                TransactionStatusMeta::default(),
9080                0,
9081            )
9082            .unwrap();
9083        blockstore
9084            .write_transaction_status(
9085                lowest_available_slot,
9086                signature2,
9087                vec![(&address1, true)].into_iter(),
9088                TransactionStatusMeta::default(),
9089                0,
9090            )
9091            .unwrap();
9092
9093        let check_for_missing = || {
9094            (
9095                blockstore
9096                    .get_transaction_status_with_counter(signature1, &[].into())
9097                    .unwrap()
9098                    .0
9099                    .is_none(),
9100                blockstore
9101                    .find_address_signatures_for_slot(address0, lowest_cleanup_slot)
9102                    .unwrap()
9103                    .is_empty(),
9104            )
9105        };
9106
9107        let assert_existing_always = || {
9108            let are_existing_always = (
9109                blockstore
9110                    .get_transaction_status_with_counter(signature2, &[].into())
9111                    .unwrap()
9112                    .0
9113                    .is_some(),
9114                !blockstore
9115                    .find_address_signatures_for_slot(address1, lowest_available_slot)
9116                    .unwrap()
9117                    .is_empty(),
9118            );
9119            assert_eq!(are_existing_always, (true, true));
9120        };
9121
9122        let are_missing = check_for_missing();
9123        // should never be missing before the conditional compaction & simulation...
9124        assert_eq!(are_missing, (false, false));
9125        assert_existing_always();
9126
9127        if simulate_blockstore_cleanup_service {
9128            *blockstore.lowest_cleanup_slot.write().unwrap() = lowest_cleanup_slot;
9129            blockstore.purge_slots(0, lowest_cleanup_slot, PurgeType::CompactionFilter);
9130        }
9131
9132        let are_missing = check_for_missing();
9133        if simulate_blockstore_cleanup_service {
9134            // ... when either simulation (or both) is effective, we should observe to be missing
9135            // consistently
9136            assert_eq!(are_missing, (true, true));
9137        } else {
9138            // ... otherwise, we should observe to be existing...
9139            assert_eq!(are_missing, (false, false));
9140        }
9141        assert_existing_always();
9142    }
9143
9144    #[test]
9145    fn test_lowest_cleanup_slot_and_special_cfs_with_blockstore_cleanup_service_simulation() {
9146        do_test_lowest_cleanup_slot_and_special_cfs(true);
9147    }
9148
9149    #[test]
9150    fn test_lowest_cleanup_slot_and_special_cfs_without_blockstore_cleanup_service_simulation() {
9151        do_test_lowest_cleanup_slot_and_special_cfs(false);
9152    }
9153
9154    #[test]
9155    fn test_get_rooted_transaction() {
9156        let slot = 2;
9157        let entries = make_slot_entries_with_transactions(5);
9158        let shreds = entries_to_test_shreds(
9159            &entries,
9160            slot,
9161            slot - 1, // parent_slot
9162            true,     // is_full_slot
9163            0,        // version
9164            true,     // merkle_variant
9165        );
9166        let ledger_path = get_tmp_ledger_path_auto_delete!();
9167        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
9168        blockstore.insert_shreds(shreds, None, false).unwrap();
9169        blockstore.set_roots([slot - 1, slot].iter()).unwrap();
9170
9171        let expected_transactions: Vec<VersionedTransactionWithStatusMeta> = entries
9172            .iter()
9173            .filter(|entry| !entry.is_tick())
9174            .cloned()
9175            .flat_map(|entry| entry.transactions)
9176            .map(|transaction| {
9177                let mut pre_balances: Vec<u64> = vec![];
9178                let mut post_balances: Vec<u64> = vec![];
9179                for i in 0..transaction.message.static_account_keys().len() {
9180                    pre_balances.push(i as u64 * 10);
9181                    post_balances.push(i as u64 * 11);
9182                }
9183                let inner_instructions = Some(vec![InnerInstructions {
9184                    index: 0,
9185                    instructions: vec![InnerInstruction {
9186                        instruction: CompiledInstruction::new(1, &(), vec![0]),
9187                        stack_height: Some(2),
9188                    }],
9189                }]);
9190                let log_messages = Some(vec![String::from("Test message\n")]);
9191                let pre_token_balances = Some(vec![]);
9192                let post_token_balances = Some(vec![]);
9193                let rewards = Some(vec![]);
9194                let signature = transaction.signatures[0];
9195                let return_data = Some(TransactionReturnData {
9196                    program_id: Pubkey::new_unique(),
9197                    data: vec![1, 2, 3],
9198                });
9199                let status = TransactionStatusMeta {
9200                    status: Ok(()),
9201                    fee: 42,
9202                    pre_balances: pre_balances.clone(),
9203                    post_balances: post_balances.clone(),
9204                    inner_instructions: inner_instructions.clone(),
9205                    log_messages: log_messages.clone(),
9206                    pre_token_balances: pre_token_balances.clone(),
9207                    post_token_balances: post_token_balances.clone(),
9208                    rewards: rewards.clone(),
9209                    loaded_addresses: LoadedAddresses::default(),
9210                    return_data: return_data.clone(),
9211                    compute_units_consumed: Some(42),
9212                }
9213                .into();
9214                blockstore
9215                    .transaction_status_cf
9216                    .put_protobuf((signature, slot), &status)
9217                    .unwrap();
9218                VersionedTransactionWithStatusMeta {
9219                    transaction,
9220                    meta: TransactionStatusMeta {
9221                        status: Ok(()),
9222                        fee: 42,
9223                        pre_balances,
9224                        post_balances,
9225                        inner_instructions,
9226                        log_messages,
9227                        pre_token_balances,
9228                        post_token_balances,
9229                        rewards,
9230                        loaded_addresses: LoadedAddresses::default(),
9231                        return_data,
9232                        compute_units_consumed: Some(42),
9233                    },
9234                }
9235            })
9236            .collect();
9237
9238        for tx_with_meta in expected_transactions.clone() {
9239            let signature = tx_with_meta.transaction.signatures[0];
9240            assert_eq!(
9241                blockstore.get_rooted_transaction(signature).unwrap(),
9242                Some(ConfirmedTransactionWithStatusMeta {
9243                    slot,
9244                    tx_with_meta: TransactionWithStatusMeta::Complete(tx_with_meta.clone()),
9245                    block_time: None
9246                })
9247            );
9248            assert_eq!(
9249                blockstore
9250                    .get_complete_transaction(signature, slot + 1)
9251                    .unwrap(),
9252                Some(ConfirmedTransactionWithStatusMeta {
9253                    slot,
9254                    tx_with_meta: TransactionWithStatusMeta::Complete(tx_with_meta),
9255                    block_time: None
9256                })
9257            );
9258        }
9259
9260        blockstore
9261            .run_purge(0, slot, PurgeType::CompactionFilter)
9262            .unwrap();
9263        *blockstore.lowest_cleanup_slot.write().unwrap() = slot;
9264        for VersionedTransactionWithStatusMeta { transaction, .. } in expected_transactions {
9265            let signature = transaction.signatures[0];
9266            assert_eq!(blockstore.get_rooted_transaction(signature).unwrap(), None);
9267            assert_eq!(
9268                blockstore
9269                    .get_complete_transaction(signature, slot + 1)
9270                    .unwrap(),
9271                None,
9272            );
9273        }
9274    }
9275
9276    #[test]
9277    fn test_get_complete_transaction() {
9278        let ledger_path = get_tmp_ledger_path_auto_delete!();
9279        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
9280
9281        let slot = 2;
9282        let entries = make_slot_entries_with_transactions(5);
9283        let shreds = entries_to_test_shreds(
9284            &entries,
9285            slot,
9286            slot - 1, // parent_slot
9287            true,     // is_full_slot
9288            0,        // version
9289            true,     // merkle_variant
9290        );
9291        blockstore.insert_shreds(shreds, None, false).unwrap();
9292
9293        let expected_transactions: Vec<VersionedTransactionWithStatusMeta> = entries
9294            .iter()
9295            .filter(|entry| !entry.is_tick())
9296            .cloned()
9297            .flat_map(|entry| entry.transactions)
9298            .map(|transaction| {
9299                let mut pre_balances: Vec<u64> = vec![];
9300                let mut post_balances: Vec<u64> = vec![];
9301                for i in 0..transaction.message.static_account_keys().len() {
9302                    pre_balances.push(i as u64 * 10);
9303                    post_balances.push(i as u64 * 11);
9304                }
9305                let inner_instructions = Some(vec![InnerInstructions {
9306                    index: 0,
9307                    instructions: vec![InnerInstruction {
9308                        instruction: CompiledInstruction::new(1, &(), vec![0]),
9309                        stack_height: Some(2),
9310                    }],
9311                }]);
9312                let log_messages = Some(vec![String::from("Test message\n")]);
9313                let pre_token_balances = Some(vec![]);
9314                let post_token_balances = Some(vec![]);
9315                let rewards = Some(vec![]);
9316                let return_data = Some(TransactionReturnData {
9317                    program_id: Pubkey::new_unique(),
9318                    data: vec![1, 2, 3],
9319                });
9320                let signature = transaction.signatures[0];
9321                let status = TransactionStatusMeta {
9322                    status: Ok(()),
9323                    fee: 42,
9324                    pre_balances: pre_balances.clone(),
9325                    post_balances: post_balances.clone(),
9326                    inner_instructions: inner_instructions.clone(),
9327                    log_messages: log_messages.clone(),
9328                    pre_token_balances: pre_token_balances.clone(),
9329                    post_token_balances: post_token_balances.clone(),
9330                    rewards: rewards.clone(),
9331                    loaded_addresses: LoadedAddresses::default(),
9332                    return_data: return_data.clone(),
9333                    compute_units_consumed: Some(42u64),
9334                }
9335                .into();
9336                blockstore
9337                    .transaction_status_cf
9338                    .put_protobuf((signature, slot), &status)
9339                    .unwrap();
9340                VersionedTransactionWithStatusMeta {
9341                    transaction,
9342                    meta: TransactionStatusMeta {
9343                        status: Ok(()),
9344                        fee: 42,
9345                        pre_balances,
9346                        post_balances,
9347                        inner_instructions,
9348                        log_messages,
9349                        pre_token_balances,
9350                        post_token_balances,
9351                        rewards,
9352                        loaded_addresses: LoadedAddresses::default(),
9353                        return_data,
9354                        compute_units_consumed: Some(42u64),
9355                    },
9356                }
9357            })
9358            .collect();
9359
9360        for tx_with_meta in expected_transactions.clone() {
9361            let signature = tx_with_meta.transaction.signatures[0];
9362            assert_eq!(
9363                blockstore
9364                    .get_complete_transaction(signature, slot)
9365                    .unwrap(),
9366                Some(ConfirmedTransactionWithStatusMeta {
9367                    slot,
9368                    tx_with_meta: TransactionWithStatusMeta::Complete(tx_with_meta),
9369                    block_time: None
9370                })
9371            );
9372            assert_eq!(blockstore.get_rooted_transaction(signature).unwrap(), None);
9373        }
9374
9375        blockstore
9376            .run_purge(0, slot, PurgeType::CompactionFilter)
9377            .unwrap();
9378        *blockstore.lowest_cleanup_slot.write().unwrap() = slot;
9379        for VersionedTransactionWithStatusMeta { transaction, .. } in expected_transactions {
9380            let signature = transaction.signatures[0];
9381            assert_eq!(
9382                blockstore
9383                    .get_complete_transaction(signature, slot)
9384                    .unwrap(),
9385                None,
9386            );
9387            assert_eq!(blockstore.get_rooted_transaction(signature).unwrap(), None,);
9388        }
9389    }
9390
9391    #[test]
9392    fn test_empty_transaction_status() {
9393        let ledger_path = get_tmp_ledger_path_auto_delete!();
9394        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
9395
9396        blockstore.set_roots(std::iter::once(&0)).unwrap();
9397        assert_eq!(
9398            blockstore
9399                .get_rooted_transaction(Signature::default())
9400                .unwrap(),
9401            None
9402        );
9403    }
9404
9405    impl Blockstore {
9406        pub(crate) fn write_deprecated_transaction_status(
9407            &self,
9408            primary_index: u64,
9409            slot: Slot,
9410            signature: Signature,
9411            writable_keys: Vec<&Pubkey>,
9412            readonly_keys: Vec<&Pubkey>,
9413            status: TransactionStatusMeta,
9414        ) -> Result<()> {
9415            let status = status.into();
9416            self.transaction_status_cf
9417                .put_deprecated_protobuf((primary_index, signature, slot), &status)?;
9418            for address in writable_keys {
9419                self.address_signatures_cf.put_deprecated(
9420                    (primary_index, *address, slot, signature),
9421                    &AddressSignatureMeta { writeable: true },
9422                )?;
9423            }
9424            for address in readonly_keys {
9425                self.address_signatures_cf.put_deprecated(
9426                    (primary_index, *address, slot, signature),
9427                    &AddressSignatureMeta { writeable: false },
9428                )?;
9429            }
9430            let mut w_highest_primary_index_slot = self.highest_primary_index_slot.write().unwrap();
9431            if w_highest_primary_index_slot.is_none()
9432                || w_highest_primary_index_slot.is_some_and(|highest_slot| highest_slot < slot)
9433            {
9434                *w_highest_primary_index_slot = Some(slot);
9435            }
9436            Ok(())
9437        }
9438    }
9439
9440    #[test]
9441    fn test_find_address_signatures_for_slot() {
9442        let ledger_path = get_tmp_ledger_path_auto_delete!();
9443        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
9444
9445        let address0 = solana_pubkey::new_rand();
9446        let address1 = solana_pubkey::new_rand();
9447
9448        let slot1 = 1;
9449        for x in 1..5 {
9450            let signature = Signature::from([x; 64]);
9451            blockstore
9452                .write_transaction_status(
9453                    slot1,
9454                    signature,
9455                    vec![(&address0, true), (&address1, false)].into_iter(),
9456                    TransactionStatusMeta::default(),
9457                    x as usize,
9458                )
9459                .unwrap();
9460        }
9461        let slot2 = 2;
9462        for x in 5..7 {
9463            let signature = Signature::from([x; 64]);
9464            blockstore
9465                .write_transaction_status(
9466                    slot2,
9467                    signature,
9468                    vec![(&address0, true), (&address1, false)].into_iter(),
9469                    TransactionStatusMeta::default(),
9470                    x as usize,
9471                )
9472                .unwrap();
9473        }
9474        for x in 7..9 {
9475            let signature = Signature::from([x; 64]);
9476            blockstore
9477                .write_transaction_status(
9478                    slot2,
9479                    signature,
9480                    vec![(&address0, true), (&address1, false)].into_iter(),
9481                    TransactionStatusMeta::default(),
9482                    x as usize,
9483                )
9484                .unwrap();
9485        }
9486        let slot3 = 3;
9487        for x in 9..13 {
9488            let signature = Signature::from([x; 64]);
9489            blockstore
9490                .write_transaction_status(
9491                    slot3,
9492                    signature,
9493                    vec![(&address0, true), (&address1, false)].into_iter(),
9494                    TransactionStatusMeta::default(),
9495                    x as usize,
9496                )
9497                .unwrap();
9498        }
9499        blockstore.set_roots(std::iter::once(&slot1)).unwrap();
9500
9501        let slot1_signatures = blockstore
9502            .find_address_signatures_for_slot(address0, 1)
9503            .unwrap();
9504        for (i, (slot, signature)) in slot1_signatures.iter().enumerate() {
9505            assert_eq!(*slot, slot1);
9506            assert_eq!(*signature, Signature::from([i as u8 + 1; 64]));
9507        }
9508
9509        let slot2_signatures = blockstore
9510            .find_address_signatures_for_slot(address0, 2)
9511            .unwrap();
9512        for (i, (slot, signature)) in slot2_signatures.iter().enumerate() {
9513            assert_eq!(*slot, slot2);
9514            assert_eq!(*signature, Signature::from([i as u8 + 5; 64]));
9515        }
9516
9517        let slot3_signatures = blockstore
9518            .find_address_signatures_for_slot(address0, 3)
9519            .unwrap();
9520        for (i, (slot, signature)) in slot3_signatures.iter().enumerate() {
9521            assert_eq!(*slot, slot3);
9522            assert_eq!(*signature, Signature::from([i as u8 + 9; 64]));
9523        }
9524    }
9525
9526    #[test]
9527    fn test_get_confirmed_signatures_for_address2() {
9528        let ledger_path = get_tmp_ledger_path_auto_delete!();
9529        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
9530
9531        let (shreds, _) = make_slot_entries(1, 0, 4, /*merkle_variant:*/ true);
9532        blockstore.insert_shreds(shreds, None, false).unwrap();
9533
9534        fn make_slot_entries_with_transaction_addresses(addresses: &[Pubkey]) -> Vec<Entry> {
9535            let mut entries: Vec<Entry> = Vec::new();
9536            for address in addresses {
9537                let transaction = Transaction::new_with_compiled_instructions(
9538                    &[&Keypair::new()],
9539                    &[*address],
9540                    Hash::default(),
9541                    vec![solana_pubkey::new_rand()],
9542                    vec![CompiledInstruction::new(1, &(), vec![0])],
9543                );
9544                entries.push(next_entry_mut(&mut Hash::default(), 0, vec![transaction]));
9545                let mut tick = create_ticks(1, 0, hash(&serialize(address).unwrap()));
9546                entries.append(&mut tick);
9547            }
9548            entries
9549        }
9550
9551        let address0 = solana_pubkey::new_rand();
9552        let address1 = solana_pubkey::new_rand();
9553
9554        for slot in 2..=8 {
9555            let entries = make_slot_entries_with_transaction_addresses(&[
9556                address0, address1, address0, address1,
9557            ]);
9558            let shreds = entries_to_test_shreds(
9559                &entries,
9560                slot,
9561                slot - 1, // parent_slot
9562                true,     // is_full_slot
9563                0,        // version
9564                true,     // merkle_variant
9565            );
9566            blockstore.insert_shreds(shreds, None, false).unwrap();
9567
9568            let mut counter = 0;
9569            for entry in entries.into_iter() {
9570                for transaction in entry.transactions {
9571                    assert_eq!(transaction.signatures.len(), 1);
9572                    blockstore
9573                        .write_transaction_status(
9574                            slot,
9575                            transaction.signatures[0],
9576                            transaction
9577                                .message
9578                                .static_account_keys()
9579                                .iter()
9580                                .map(|key| (key, true)),
9581                            TransactionStatusMeta::default(),
9582                            counter,
9583                        )
9584                        .unwrap();
9585                    counter += 1;
9586                }
9587            }
9588        }
9589
9590        // Add 2 slots that both descend from slot 8
9591        for slot in 9..=10 {
9592            let entries = make_slot_entries_with_transaction_addresses(&[
9593                address0, address1, address0, address1,
9594            ]);
9595            let shreds =
9596                entries_to_test_shreds(&entries, slot, 8, true, 0, /*merkle_variant:*/ true);
9597            blockstore.insert_shreds(shreds, None, false).unwrap();
9598
9599            let mut counter = 0;
9600            for entry in entries.into_iter() {
9601                for transaction in entry.transactions {
9602                    assert_eq!(transaction.signatures.len(), 1);
9603                    blockstore
9604                        .write_transaction_status(
9605                            slot,
9606                            transaction.signatures[0],
9607                            transaction
9608                                .message
9609                                .static_account_keys()
9610                                .iter()
9611                                .map(|key| (key, true)),
9612                            TransactionStatusMeta::default(),
9613                            counter,
9614                        )
9615                        .unwrap();
9616                    counter += 1;
9617                }
9618            }
9619        }
9620
9621        // Leave one slot unrooted to test only returns confirmed signatures
9622        blockstore.set_roots([1, 2, 4, 5, 6, 7, 8].iter()).unwrap();
9623        let highest_super_majority_root = 8;
9624
9625        // Fetch all rooted signatures for address 0 at once...
9626        let sig_infos = blockstore
9627            .get_confirmed_signatures_for_address2(
9628                address0,
9629                highest_super_majority_root,
9630                None,
9631                None,
9632                usize::MAX,
9633            )
9634            .unwrap();
9635        assert!(sig_infos.found_before);
9636        let all0 = sig_infos.infos;
9637        assert_eq!(all0.len(), 12);
9638
9639        // Fetch all rooted signatures for address 1 at once...
9640        let all1 = blockstore
9641            .get_confirmed_signatures_for_address2(
9642                address1,
9643                highest_super_majority_root,
9644                None,
9645                None,
9646                usize::MAX,
9647            )
9648            .unwrap()
9649            .infos;
9650        assert_eq!(all1.len(), 12);
9651
9652        // Fetch all signatures for address 0 individually
9653        for i in 0..all0.len() {
9654            let sig_infos = blockstore
9655                .get_confirmed_signatures_for_address2(
9656                    address0,
9657                    highest_super_majority_root,
9658                    if i == 0 {
9659                        None
9660                    } else {
9661                        Some(all0[i - 1].signature)
9662                    },
9663                    None,
9664                    1,
9665                )
9666                .unwrap();
9667            assert!(sig_infos.found_before);
9668            let results = sig_infos.infos;
9669            assert_eq!(results.len(), 1);
9670            assert_eq!(results[0], all0[i], "Unexpected result for {i}");
9671        }
9672        // Fetch all signatures for address 0 individually using `until`
9673        for i in 0..all0.len() {
9674            let results = blockstore
9675                .get_confirmed_signatures_for_address2(
9676                    address0,
9677                    highest_super_majority_root,
9678                    if i == 0 {
9679                        None
9680                    } else {
9681                        Some(all0[i - 1].signature)
9682                    },
9683                    if i == all0.len() - 1 || i == all0.len() {
9684                        None
9685                    } else {
9686                        Some(all0[i + 1].signature)
9687                    },
9688                    10,
9689                )
9690                .unwrap()
9691                .infos;
9692            assert_eq!(results.len(), 1);
9693            assert_eq!(results[0], all0[i], "Unexpected result for {i}");
9694        }
9695
9696        let sig_infos = blockstore
9697            .get_confirmed_signatures_for_address2(
9698                address0,
9699                highest_super_majority_root,
9700                Some(all0[all0.len() - 1].signature),
9701                None,
9702                1,
9703            )
9704            .unwrap();
9705        assert!(sig_infos.found_before);
9706        assert!(sig_infos.infos.is_empty());
9707
9708        assert!(blockstore
9709            .get_confirmed_signatures_for_address2(
9710                address0,
9711                highest_super_majority_root,
9712                None,
9713                Some(all0[0].signature),
9714                2,
9715            )
9716            .unwrap()
9717            .infos
9718            .is_empty());
9719
9720        // Fetch all signatures for address 0, three at a time
9721        assert!(all0.len() % 3 == 0);
9722        for i in (0..all0.len()).step_by(3) {
9723            let results = blockstore
9724                .get_confirmed_signatures_for_address2(
9725                    address0,
9726                    highest_super_majority_root,
9727                    if i == 0 {
9728                        None
9729                    } else {
9730                        Some(all0[i - 1].signature)
9731                    },
9732                    None,
9733                    3,
9734                )
9735                .unwrap()
9736                .infos;
9737            assert_eq!(results.len(), 3);
9738            assert_eq!(results[0], all0[i]);
9739            assert_eq!(results[1], all0[i + 1]);
9740            assert_eq!(results[2], all0[i + 2]);
9741        }
9742
9743        // Ensure that the signatures within a slot are reverse ordered by occurrence in block
9744        for i in (0..all1.len()).step_by(2) {
9745            let results = blockstore
9746                .get_confirmed_signatures_for_address2(
9747                    address1,
9748                    highest_super_majority_root,
9749                    if i == 0 {
9750                        None
9751                    } else {
9752                        Some(all1[i - 1].signature)
9753                    },
9754                    None,
9755                    2,
9756                )
9757                .unwrap()
9758                .infos;
9759            assert_eq!(results.len(), 2);
9760            assert_eq!(results[0].slot, results[1].slot);
9761            assert_eq!(results[0], all1[i]);
9762            assert_eq!(results[1], all1[i + 1]);
9763        }
9764
9765        // A search for address 0 with `before` and/or `until` signatures from address1 should also work
9766        let sig_infos = blockstore
9767            .get_confirmed_signatures_for_address2(
9768                address0,
9769                highest_super_majority_root,
9770                Some(all1[0].signature),
9771                None,
9772                usize::MAX,
9773            )
9774            .unwrap();
9775        assert!(sig_infos.found_before);
9776        let results = sig_infos.infos;
9777        // The exact number of results returned is variable, based on the sort order of the
9778        // random signatures that are generated
9779        assert!(!results.is_empty());
9780
9781        let results2 = blockstore
9782            .get_confirmed_signatures_for_address2(
9783                address0,
9784                highest_super_majority_root,
9785                Some(all1[0].signature),
9786                Some(all1[4].signature),
9787                usize::MAX,
9788            )
9789            .unwrap()
9790            .infos;
9791        assert!(results2.len() < results.len());
9792
9793        // Duplicate all tests using confirmed signatures
9794        let highest_confirmed_slot = 10;
9795
9796        // Fetch all signatures for address 0 at once...
9797        let all0 = blockstore
9798            .get_confirmed_signatures_for_address2(
9799                address0,
9800                highest_confirmed_slot,
9801                None,
9802                None,
9803                usize::MAX,
9804            )
9805            .unwrap()
9806            .infos;
9807        assert_eq!(all0.len(), 14);
9808
9809        // Fetch all signatures for address 1 at once...
9810        let all1 = blockstore
9811            .get_confirmed_signatures_for_address2(
9812                address1,
9813                highest_confirmed_slot,
9814                None,
9815                None,
9816                usize::MAX,
9817            )
9818            .unwrap()
9819            .infos;
9820        assert_eq!(all1.len(), 14);
9821
9822        // Fetch all signatures for address 0 individually
9823        for i in 0..all0.len() {
9824            let results = blockstore
9825                .get_confirmed_signatures_for_address2(
9826                    address0,
9827                    highest_confirmed_slot,
9828                    if i == 0 {
9829                        None
9830                    } else {
9831                        Some(all0[i - 1].signature)
9832                    },
9833                    None,
9834                    1,
9835                )
9836                .unwrap()
9837                .infos;
9838            assert_eq!(results.len(), 1);
9839            assert_eq!(results[0], all0[i], "Unexpected result for {i}");
9840        }
9841        // Fetch all signatures for address 0 individually using `until`
9842        for i in 0..all0.len() {
9843            let results = blockstore
9844                .get_confirmed_signatures_for_address2(
9845                    address0,
9846                    highest_confirmed_slot,
9847                    if i == 0 {
9848                        None
9849                    } else {
9850                        Some(all0[i - 1].signature)
9851                    },
9852                    if i == all0.len() - 1 || i == all0.len() {
9853                        None
9854                    } else {
9855                        Some(all0[i + 1].signature)
9856                    },
9857                    10,
9858                )
9859                .unwrap()
9860                .infos;
9861            assert_eq!(results.len(), 1);
9862            assert_eq!(results[0], all0[i], "Unexpected result for {i}");
9863        }
9864
9865        assert!(blockstore
9866            .get_confirmed_signatures_for_address2(
9867                address0,
9868                highest_confirmed_slot,
9869                Some(all0[all0.len() - 1].signature),
9870                None,
9871                1,
9872            )
9873            .unwrap()
9874            .infos
9875            .is_empty());
9876
9877        assert!(blockstore
9878            .get_confirmed_signatures_for_address2(
9879                address0,
9880                highest_confirmed_slot,
9881                None,
9882                Some(all0[0].signature),
9883                2,
9884            )
9885            .unwrap()
9886            .infos
9887            .is_empty());
9888
9889        // Fetch all signatures for address 0, three at a time
9890        assert!(all0.len() % 3 == 2);
9891        for i in (0..all0.len()).step_by(3) {
9892            let results = blockstore
9893                .get_confirmed_signatures_for_address2(
9894                    address0,
9895                    highest_confirmed_slot,
9896                    if i == 0 {
9897                        None
9898                    } else {
9899                        Some(all0[i - 1].signature)
9900                    },
9901                    None,
9902                    3,
9903                )
9904                .unwrap()
9905                .infos;
9906            if i < 12 {
9907                assert_eq!(results.len(), 3);
9908                assert_eq!(results[2], all0[i + 2]);
9909            } else {
9910                assert_eq!(results.len(), 2);
9911            }
9912            assert_eq!(results[0], all0[i]);
9913            assert_eq!(results[1], all0[i + 1]);
9914        }
9915
9916        // Ensure that the signatures within a slot are reverse ordered by occurrence in block
9917        for i in (0..all1.len()).step_by(2) {
9918            let results = blockstore
9919                .get_confirmed_signatures_for_address2(
9920                    address1,
9921                    highest_confirmed_slot,
9922                    if i == 0 {
9923                        None
9924                    } else {
9925                        Some(all1[i - 1].signature)
9926                    },
9927                    None,
9928                    2,
9929                )
9930                .unwrap()
9931                .infos;
9932            assert_eq!(results.len(), 2);
9933            assert_eq!(results[0].slot, results[1].slot);
9934            assert_eq!(results[0], all1[i]);
9935            assert_eq!(results[1], all1[i + 1]);
9936        }
9937
9938        // A search for address 0 with `before` and/or `until` signatures from address1 should also work
9939        let results = blockstore
9940            .get_confirmed_signatures_for_address2(
9941                address0,
9942                highest_confirmed_slot,
9943                Some(all1[0].signature),
9944                None,
9945                usize::MAX,
9946            )
9947            .unwrap()
9948            .infos;
9949        // The exact number of results returned is variable, based on the sort order of the
9950        // random signatures that are generated
9951        assert!(!results.is_empty());
9952
9953        let results2 = blockstore
9954            .get_confirmed_signatures_for_address2(
9955                address0,
9956                highest_confirmed_slot,
9957                Some(all1[0].signature),
9958                Some(all1[4].signature),
9959                usize::MAX,
9960            )
9961            .unwrap()
9962            .infos;
9963        assert!(results2.len() < results.len());
9964
9965        // Remove signature
9966        blockstore
9967            .address_signatures_cf
9968            .delete((address0, 2, 0, all0[0].signature))
9969            .unwrap();
9970        let sig_infos = blockstore
9971            .get_confirmed_signatures_for_address2(
9972                address0,
9973                highest_super_majority_root,
9974                Some(all0[0].signature),
9975                None,
9976                usize::MAX,
9977            )
9978            .unwrap();
9979        assert!(!sig_infos.found_before);
9980        assert!(sig_infos.infos.is_empty());
9981    }
9982
9983    #[test]
9984    fn test_get_last_hash() {
9985        let entries: Vec<Entry> = vec![];
9986        let empty_entries_iterator = entries.iter();
9987        assert!(get_last_hash(empty_entries_iterator).is_none());
9988
9989        let entry = next_entry(&hash::hash(&[42u8]), 1, vec![]);
9990        let entries: Vec<Entry> = std::iter::successors(Some(entry), |entry| {
9991            Some(next_entry(&entry.hash, 1, vec![]))
9992        })
9993        .take(10)
9994        .collect();
9995        let entries_iterator = entries.iter();
9996        assert_eq!(get_last_hash(entries_iterator).unwrap(), entries[9].hash);
9997    }
9998
9999    #[test]
10000    fn test_map_transactions_to_statuses() {
10001        let ledger_path = get_tmp_ledger_path_auto_delete!();
10002        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
10003
10004        let transaction_status_cf = &blockstore.transaction_status_cf;
10005
10006        let slot = 0;
10007        let mut transactions: Vec<VersionedTransaction> = vec![];
10008        for x in 0..4 {
10009            let transaction = Transaction::new_with_compiled_instructions(
10010                &[&Keypair::new()],
10011                &[solana_pubkey::new_rand()],
10012                Hash::default(),
10013                vec![solana_pubkey::new_rand()],
10014                vec![CompiledInstruction::new(1, &(), vec![0])],
10015            );
10016            let status = TransactionStatusMeta {
10017                status: solana_sdk::transaction::Result::<()>::Err(
10018                    TransactionError::AccountNotFound,
10019                ),
10020                fee: x,
10021                pre_balances: vec![],
10022                post_balances: vec![],
10023                inner_instructions: Some(vec![]),
10024                log_messages: Some(vec![]),
10025                pre_token_balances: Some(vec![]),
10026                post_token_balances: Some(vec![]),
10027                rewards: Some(vec![]),
10028                loaded_addresses: LoadedAddresses::default(),
10029                return_data: Some(TransactionReturnData::default()),
10030                compute_units_consumed: None,
10031            }
10032            .into();
10033            transaction_status_cf
10034                .put_protobuf((transaction.signatures[0], slot), &status)
10035                .unwrap();
10036            transactions.push(transaction.into());
10037        }
10038
10039        let map_result =
10040            blockstore.map_transactions_to_statuses(slot, transactions.clone().into_iter());
10041        assert!(map_result.is_ok());
10042        let map = map_result.unwrap();
10043        assert_eq!(map.len(), 4);
10044        for (x, m) in map.iter().enumerate() {
10045            assert_eq!(m.meta.fee, x as u64);
10046        }
10047
10048        // Push transaction that will not have matching status, as a test case
10049        transactions.push(
10050            Transaction::new_with_compiled_instructions(
10051                &[&Keypair::new()],
10052                &[solana_pubkey::new_rand()],
10053                Hash::default(),
10054                vec![solana_pubkey::new_rand()],
10055                vec![CompiledInstruction::new(1, &(), vec![0])],
10056            )
10057            .into(),
10058        );
10059
10060        let map_result =
10061            blockstore.map_transactions_to_statuses(slot, transactions.clone().into_iter());
10062        assert_matches!(map_result, Err(BlockstoreError::MissingTransactionMetadata));
10063    }
10064
10065    #[test]
10066    fn test_get_recent_perf_samples_v1_only() {
10067        let ledger_path = get_tmp_ledger_path_auto_delete!();
10068        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
10069
10070        let num_entries: usize = 10;
10071
10072        let slot_sample = |i: u64| PerfSampleV1 {
10073            num_transactions: 1406 + i,
10074            num_slots: 34 + i / 2,
10075            sample_period_secs: (40 + i / 5) as u16,
10076        };
10077
10078        let mut perf_samples: Vec<(Slot, PerfSample)> = vec![];
10079        for i in 0..num_entries {
10080            let slot = (i + 1) as u64 * 50;
10081            let sample = slot_sample(i as u64);
10082
10083            let bytes = serialize(&sample).unwrap();
10084            blockstore.perf_samples_cf.put_bytes(slot, &bytes).unwrap();
10085            perf_samples.push((slot, sample.into()));
10086        }
10087
10088        for i in 0..num_entries {
10089            let mut expected_samples = perf_samples[num_entries - 1 - i..].to_vec();
10090            expected_samples.sort_by(|a, b| b.0.cmp(&a.0));
10091            assert_eq!(
10092                blockstore.get_recent_perf_samples(i + 1).unwrap(),
10093                expected_samples
10094            );
10095        }
10096    }
10097
10098    #[test]
10099    fn test_get_recent_perf_samples_v2_only() {
10100        let ledger_path = get_tmp_ledger_path_auto_delete!();
10101        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
10102
10103        let num_entries: usize = 10;
10104
10105        let slot_sample = |i: u64| PerfSampleV2 {
10106            num_transactions: 2495 + i,
10107            num_slots: 167 + i / 2,
10108            sample_period_secs: (37 + i / 5) as u16,
10109            num_non_vote_transactions: 1672 + i,
10110        };
10111
10112        let mut perf_samples: Vec<(Slot, PerfSample)> = vec![];
10113        for i in 0..num_entries {
10114            let slot = (i + 1) as u64 * 50;
10115            let sample = slot_sample(i as u64);
10116
10117            let bytes = serialize(&sample).unwrap();
10118            blockstore.perf_samples_cf.put_bytes(slot, &bytes).unwrap();
10119            perf_samples.push((slot, sample.into()));
10120        }
10121
10122        for i in 0..num_entries {
10123            let mut expected_samples = perf_samples[num_entries - 1 - i..].to_vec();
10124            expected_samples.sort_by(|a, b| b.0.cmp(&a.0));
10125            assert_eq!(
10126                blockstore.get_recent_perf_samples(i + 1).unwrap(),
10127                expected_samples
10128            );
10129        }
10130    }
10131
10132    #[test]
10133    fn test_get_recent_perf_samples_v1_and_v2() {
10134        let ledger_path = get_tmp_ledger_path_auto_delete!();
10135        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
10136
10137        let num_entries: usize = 10;
10138
10139        let slot_sample_v1 = |i: u64| PerfSampleV1 {
10140            num_transactions: 1599 + i,
10141            num_slots: 123 + i / 2,
10142            sample_period_secs: (42 + i / 5) as u16,
10143        };
10144
10145        let slot_sample_v2 = |i: u64| PerfSampleV2 {
10146            num_transactions: 5809 + i,
10147            num_slots: 81 + i / 2,
10148            sample_period_secs: (35 + i / 5) as u16,
10149            num_non_vote_transactions: 2209 + i,
10150        };
10151
10152        let mut perf_samples: Vec<(Slot, PerfSample)> = vec![];
10153        for i in 0..num_entries {
10154            let slot = (i + 1) as u64 * 50;
10155
10156            if i % 3 == 0 {
10157                let sample = slot_sample_v1(i as u64);
10158                let bytes = serialize(&sample).unwrap();
10159                blockstore.perf_samples_cf.put_bytes(slot, &bytes).unwrap();
10160                perf_samples.push((slot, sample.into()));
10161            } else {
10162                let sample = slot_sample_v2(i as u64);
10163                let bytes = serialize(&sample).unwrap();
10164                blockstore.perf_samples_cf.put_bytes(slot, &bytes).unwrap();
10165                perf_samples.push((slot, sample.into()));
10166            }
10167        }
10168
10169        for i in 0..num_entries {
10170            let mut expected_samples = perf_samples[num_entries - 1 - i..].to_vec();
10171            expected_samples.sort_by(|a, b| b.0.cmp(&a.0));
10172            assert_eq!(
10173                blockstore.get_recent_perf_samples(i + 1).unwrap(),
10174                expected_samples
10175            );
10176        }
10177    }
10178
10179    #[test]
10180    fn test_write_perf_samples() {
10181        let ledger_path = get_tmp_ledger_path_auto_delete!();
10182        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
10183
10184        let num_entries: usize = 10;
10185        let mut perf_samples: Vec<(Slot, PerfSample)> = vec![];
10186        for x in 1..num_entries + 1 {
10187            let slot = x as u64 * 50;
10188            let sample = PerfSampleV2 {
10189                num_transactions: 1000 + x as u64,
10190                num_slots: 50,
10191                sample_period_secs: 20,
10192                num_non_vote_transactions: 300 + x as u64,
10193            };
10194
10195            blockstore.write_perf_sample(slot, &sample).unwrap();
10196            perf_samples.push((slot, PerfSample::V2(sample)));
10197        }
10198
10199        for x in 0..num_entries {
10200            let mut expected_samples = perf_samples[num_entries - 1 - x..].to_vec();
10201            expected_samples.sort_by(|a, b| b.0.cmp(&a.0));
10202            assert_eq!(
10203                blockstore.get_recent_perf_samples(x + 1).unwrap(),
10204                expected_samples
10205            );
10206        }
10207    }
10208
10209    #[test]
10210    fn test_lowest_slot() {
10211        let ledger_path = get_tmp_ledger_path_auto_delete!();
10212        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
10213
10214        assert_eq!(blockstore.lowest_slot(), 0);
10215
10216        for slot in 0..10 {
10217            let (shreds, _) = make_slot_entries(slot, 0, 1, /*merkle_variant:*/ true);
10218            blockstore.insert_shreds(shreds, None, false).unwrap();
10219        }
10220        assert_eq!(blockstore.lowest_slot(), 1);
10221        blockstore.run_purge(0, 5, PurgeType::Exact).unwrap();
10222        assert_eq!(blockstore.lowest_slot(), 6);
10223    }
10224
10225    #[test]
10226    fn test_highest_slot() {
10227        let ledger_path = get_tmp_ledger_path_auto_delete!();
10228        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
10229
10230        assert_eq!(blockstore.highest_slot().unwrap(), None);
10231
10232        for slot in 0..10 {
10233            let (shreds, _) = make_slot_entries(slot, 0, 1, /*merkle_variant:*/ true);
10234            blockstore.insert_shreds(shreds, None, false).unwrap();
10235            assert_eq!(blockstore.highest_slot().unwrap(), Some(slot));
10236        }
10237        blockstore.run_purge(5, 10, PurgeType::Exact).unwrap();
10238        assert_eq!(blockstore.highest_slot().unwrap(), Some(4));
10239
10240        blockstore.run_purge(0, 4, PurgeType::Exact).unwrap();
10241        assert_eq!(blockstore.highest_slot().unwrap(), None);
10242    }
10243
10244    #[test]
10245    fn test_recovery() {
10246        let ledger_path = get_tmp_ledger_path_auto_delete!();
10247        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
10248
10249        let slot = 1;
10250        let (data_shreds, coding_shreds, leader_schedule_cache) =
10251            setup_erasure_shreds(slot, 0, 100);
10252
10253        let (dummy_retransmit_sender, _) = crossbeam_channel::bounded(0);
10254        blockstore
10255            .do_insert_shreds(
10256                coding_shreds
10257                    .into_iter()
10258                    .map(|shred| (shred, /*is_repaired:*/ false)),
10259                Some(&leader_schedule_cache),
10260                false, // is_trusted
10261                Some((&ReedSolomonCache::default(), &dummy_retransmit_sender)),
10262                &mut BlockstoreInsertionMetrics::default(),
10263            )
10264            .unwrap();
10265        let shred_bufs: Vec<_> = data_shreds.iter().map(Shred::payload).cloned().collect();
10266
10267        // Check all the data shreds were recovered
10268        for (s, buf) in data_shreds.iter().zip(shred_bufs) {
10269            assert_eq!(
10270                blockstore
10271                    .get_data_shred(s.slot(), s.index() as u64)
10272                    .unwrap()
10273                    .unwrap(),
10274                buf.as_ref(),
10275            );
10276        }
10277
10278        verify_index_integrity(&blockstore, slot);
10279    }
10280
10281    #[test]
10282    fn test_index_integrity() {
10283        let slot = 1;
10284        let num_entries = 100;
10285        let (data_shreds, coding_shreds, leader_schedule_cache) =
10286            setup_erasure_shreds(slot, 0, num_entries);
10287        assert!(data_shreds.len() > 3);
10288        assert!(coding_shreds.len() > 3);
10289
10290        let ledger_path = get_tmp_ledger_path_auto_delete!();
10291        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
10292
10293        // Test inserting all the shreds
10294        let all_shreds: Vec<_> = data_shreds
10295            .iter()
10296            .cloned()
10297            .chain(coding_shreds.iter().cloned())
10298            .collect();
10299        blockstore
10300            .insert_shreds(all_shreds, Some(&leader_schedule_cache), false)
10301            .unwrap();
10302        verify_index_integrity(&blockstore, slot);
10303        blockstore.purge_and_compact_slots(0, slot);
10304
10305        // Test inserting just the codes, enough for recovery
10306        blockstore
10307            .insert_shreds(coding_shreds.clone(), Some(&leader_schedule_cache), false)
10308            .unwrap();
10309        verify_index_integrity(&blockstore, slot);
10310        blockstore.purge_and_compact_slots(0, slot);
10311
10312        // Test inserting some codes, but not enough for recovery
10313        blockstore
10314            .insert_shreds(
10315                coding_shreds[..coding_shreds.len() - 1].to_vec(),
10316                Some(&leader_schedule_cache),
10317                false,
10318            )
10319            .unwrap();
10320        verify_index_integrity(&blockstore, slot);
10321        blockstore.purge_and_compact_slots(0, slot);
10322
10323        // Test inserting just the codes, and some data, enough for recovery
10324        let shreds: Vec<_> = data_shreds[..data_shreds.len() - 1]
10325            .iter()
10326            .cloned()
10327            .chain(coding_shreds[..coding_shreds.len() - 1].iter().cloned())
10328            .collect();
10329        blockstore
10330            .insert_shreds(shreds, Some(&leader_schedule_cache), false)
10331            .unwrap();
10332        verify_index_integrity(&blockstore, slot);
10333        blockstore.purge_and_compact_slots(0, slot);
10334
10335        // Test inserting some codes, and some data, but enough for recovery
10336        let shreds: Vec<_> = data_shreds[..data_shreds.len() / 2 - 1]
10337            .iter()
10338            .cloned()
10339            .chain(coding_shreds[..coding_shreds.len() / 2 - 1].iter().cloned())
10340            .collect();
10341        blockstore
10342            .insert_shreds(shreds, Some(&leader_schedule_cache), false)
10343            .unwrap();
10344        verify_index_integrity(&blockstore, slot);
10345        blockstore.purge_and_compact_slots(0, slot);
10346
10347        // Test inserting all shreds in 2 rounds, make sure nothing is lost
10348        let shreds1: Vec<_> = data_shreds[..data_shreds.len() / 2 - 1]
10349            .iter()
10350            .cloned()
10351            .chain(coding_shreds[..coding_shreds.len() / 2 - 1].iter().cloned())
10352            .collect();
10353        let shreds2: Vec<_> = data_shreds[data_shreds.len() / 2 - 1..]
10354            .iter()
10355            .cloned()
10356            .chain(coding_shreds[coding_shreds.len() / 2 - 1..].iter().cloned())
10357            .collect();
10358        blockstore
10359            .insert_shreds(shreds1, Some(&leader_schedule_cache), false)
10360            .unwrap();
10361        blockstore
10362            .insert_shreds(shreds2, Some(&leader_schedule_cache), false)
10363            .unwrap();
10364        verify_index_integrity(&blockstore, slot);
10365        blockstore.purge_and_compact_slots(0, slot);
10366
10367        // Test not all, but enough data and coding shreds in 2 rounds to trigger recovery,
10368        // make sure nothing is lost
10369        let shreds1: Vec<_> = data_shreds[..data_shreds.len() / 2 - 1]
10370            .iter()
10371            .cloned()
10372            .chain(coding_shreds[..coding_shreds.len() / 2 - 1].iter().cloned())
10373            .collect();
10374        let shreds2: Vec<_> = data_shreds[data_shreds.len() / 2 - 1..data_shreds.len() / 2]
10375            .iter()
10376            .cloned()
10377            .chain(
10378                coding_shreds[coding_shreds.len() / 2 - 1..coding_shreds.len() / 2]
10379                    .iter()
10380                    .cloned(),
10381            )
10382            .collect();
10383        blockstore
10384            .insert_shreds(shreds1, Some(&leader_schedule_cache), false)
10385            .unwrap();
10386        blockstore
10387            .insert_shreds(shreds2, Some(&leader_schedule_cache), false)
10388            .unwrap();
10389        verify_index_integrity(&blockstore, slot);
10390        blockstore.purge_and_compact_slots(0, slot);
10391
10392        // Test insert shreds in 2 rounds, but not enough to trigger
10393        // recovery, make sure nothing is lost
10394        let shreds1: Vec<_> = data_shreds[..data_shreds.len() / 2 - 2]
10395            .iter()
10396            .cloned()
10397            .chain(coding_shreds[..coding_shreds.len() / 2 - 2].iter().cloned())
10398            .collect();
10399        let shreds2: Vec<_> = data_shreds[data_shreds.len() / 2 - 2..data_shreds.len() / 2 - 1]
10400            .iter()
10401            .cloned()
10402            .chain(
10403                coding_shreds[coding_shreds.len() / 2 - 2..coding_shreds.len() / 2 - 1]
10404                    .iter()
10405                    .cloned(),
10406            )
10407            .collect();
10408        blockstore
10409            .insert_shreds(shreds1, Some(&leader_schedule_cache), false)
10410            .unwrap();
10411        blockstore
10412            .insert_shreds(shreds2, Some(&leader_schedule_cache), false)
10413            .unwrap();
10414        verify_index_integrity(&blockstore, slot);
10415        blockstore.purge_and_compact_slots(0, slot);
10416    }
10417
10418    fn setup_erasure_shreds(
10419        slot: u64,
10420        parent_slot: u64,
10421        num_entries: u64,
10422    ) -> (Vec<Shred>, Vec<Shred>, Arc<LeaderScheduleCache>) {
10423        setup_erasure_shreds_with_index(slot, parent_slot, num_entries, 0)
10424    }
10425
10426    fn setup_erasure_shreds_with_index(
10427        slot: u64,
10428        parent_slot: u64,
10429        num_entries: u64,
10430        fec_set_index: u32,
10431    ) -> (Vec<Shred>, Vec<Shred>, Arc<LeaderScheduleCache>) {
10432        setup_erasure_shreds_with_index_and_chained_merkle(
10433            slot,
10434            parent_slot,
10435            num_entries,
10436            fec_set_index,
10437            Some(Hash::new_from_array(rand::thread_rng().gen())),
10438        )
10439    }
10440
10441    fn setup_erasure_shreds_with_index_and_chained_merkle(
10442        slot: u64,
10443        parent_slot: u64,
10444        num_entries: u64,
10445        fec_set_index: u32,
10446        chained_merkle_root: Option<Hash>,
10447    ) -> (Vec<Shred>, Vec<Shred>, Arc<LeaderScheduleCache>) {
10448        setup_erasure_shreds_with_index_and_chained_merkle_and_last_in_slot(
10449            slot,
10450            parent_slot,
10451            num_entries,
10452            fec_set_index,
10453            chained_merkle_root,
10454            true,
10455        )
10456    }
10457
10458    fn setup_erasure_shreds_with_index_and_chained_merkle_and_last_in_slot(
10459        slot: u64,
10460        parent_slot: u64,
10461        num_entries: u64,
10462        fec_set_index: u32,
10463        chained_merkle_root: Option<Hash>,
10464        is_last_in_slot: bool,
10465    ) -> (Vec<Shred>, Vec<Shred>, Arc<LeaderScheduleCache>) {
10466        let entries = make_slot_entries_with_transactions(num_entries);
10467        let leader_keypair = Arc::new(Keypair::new());
10468        let shredder = Shredder::new(slot, parent_slot, 0, 0).unwrap();
10469        let (data_shreds, coding_shreds) = shredder.entries_to_shreds(
10470            &leader_keypair,
10471            &entries,
10472            is_last_in_slot,
10473            chained_merkle_root,
10474            fec_set_index, // next_shred_index
10475            fec_set_index, // next_code_index
10476            true,          // merkle_variant
10477            &ReedSolomonCache::default(),
10478            &mut ProcessShredsStats::default(),
10479        );
10480
10481        let genesis_config = create_genesis_config(2).genesis_config;
10482        let bank = Arc::new(Bank::new_for_tests(&genesis_config));
10483        let mut leader_schedule_cache = LeaderScheduleCache::new_from_bank(&bank);
10484        let fixed_schedule = FixedSchedule {
10485            leader_schedule: Arc::new(LeaderSchedule::new_from_schedule(vec![
10486                leader_keypair.pubkey()
10487            ])),
10488        };
10489        leader_schedule_cache.set_fixed_leader_schedule(Some(fixed_schedule));
10490
10491        (data_shreds, coding_shreds, Arc::new(leader_schedule_cache))
10492    }
10493
10494    fn verify_index_integrity(blockstore: &Blockstore, slot: u64) {
10495        let shred_index = blockstore.get_index(slot).unwrap().unwrap();
10496
10497        let data_iter = blockstore.slot_data_iterator(slot, 0).unwrap();
10498        let mut num_data = 0;
10499        for ((slot, index), _) in data_iter {
10500            num_data += 1;
10501            // Test that iterator and individual shred lookup yield same set
10502            assert!(blockstore.get_data_shred(slot, index).unwrap().is_some());
10503            // Test that the data index has current shred accounted for
10504            assert!(shred_index.data().contains(index));
10505        }
10506
10507        // Test the data index doesn't have anything extra
10508        let num_data_in_index = shred_index.data().num_shreds();
10509        assert_eq!(num_data_in_index, num_data);
10510
10511        let coding_iter = blockstore.slot_coding_iterator(slot, 0).unwrap();
10512        let mut num_coding = 0;
10513        for ((slot, index), _) in coding_iter {
10514            num_coding += 1;
10515            // Test that the iterator and individual shred lookup yield same set
10516            assert!(blockstore.get_coding_shred(slot, index).unwrap().is_some());
10517            // Test that the coding index has current shred accounted for
10518            assert!(shred_index.coding().contains(index));
10519        }
10520
10521        // Test the data index doesn't have anything extra
10522        let num_coding_in_index = shred_index.coding().num_shreds();
10523        assert_eq!(num_coding_in_index, num_coding);
10524    }
10525
10526    #[test_case(false)]
10527    #[test_case(true)]
10528    fn test_duplicate_slot(chained: bool) {
10529        let slot = 0;
10530        let entries1 = make_slot_entries_with_transactions(1);
10531        let entries2 = make_slot_entries_with_transactions(1);
10532        let leader_keypair = Arc::new(Keypair::new());
10533        let reed_solomon_cache = ReedSolomonCache::default();
10534        let shredder = Shredder::new(slot, 0, 0, 0).unwrap();
10535        let chained_merkle_root = chained.then(|| Hash::new_from_array(rand::thread_rng().gen()));
10536        let (shreds, _) = shredder.entries_to_shreds(
10537            &leader_keypair,
10538            &entries1,
10539            true, // is_last_in_slot
10540            chained_merkle_root,
10541            0,    // next_shred_index
10542            0,    // next_code_index,
10543            true, // merkle_variant
10544            &reed_solomon_cache,
10545            &mut ProcessShredsStats::default(),
10546        );
10547        let (duplicate_shreds, _) = shredder.entries_to_shreds(
10548            &leader_keypair,
10549            &entries2,
10550            true, // is_last_in_slot
10551            chained_merkle_root,
10552            0,    // next_shred_index
10553            0,    // next_code_index
10554            true, // merkle_variant
10555            &reed_solomon_cache,
10556            &mut ProcessShredsStats::default(),
10557        );
10558        let shred = shreds[0].clone();
10559        let duplicate_shred = duplicate_shreds[0].clone();
10560        let non_duplicate_shred = shred.clone();
10561
10562        let ledger_path = get_tmp_ledger_path_auto_delete!();
10563        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
10564
10565        blockstore
10566            .insert_shreds(vec![shred.clone()], None, false)
10567            .unwrap();
10568
10569        // No duplicate shreds exist yet
10570        assert!(!blockstore.has_duplicate_shreds_in_slot(slot));
10571
10572        // Check if shreds are duplicated
10573        assert_eq!(
10574            blockstore.is_shred_duplicate(&duplicate_shred).as_deref(),
10575            Some(shred.payload().as_ref())
10576        );
10577        assert!(blockstore
10578            .is_shred_duplicate(&non_duplicate_shred)
10579            .is_none());
10580
10581        // Store a duplicate shred
10582        blockstore
10583            .store_duplicate_slot(
10584                slot,
10585                shred.payload().clone(),
10586                duplicate_shred.payload().clone(),
10587            )
10588            .unwrap();
10589
10590        // Slot is now marked as duplicate
10591        assert!(blockstore.has_duplicate_shreds_in_slot(slot));
10592
10593        // Check ability to fetch the duplicates
10594        let duplicate_proof = blockstore.get_duplicate_slot(slot).unwrap();
10595        assert_eq!(duplicate_proof.shred1, *shred.payload());
10596        assert_eq!(duplicate_proof.shred2, *duplicate_shred.payload());
10597    }
10598
10599    #[test]
10600    fn test_clear_unconfirmed_slot() {
10601        let ledger_path = get_tmp_ledger_path_auto_delete!();
10602        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
10603
10604        let unconfirmed_slot = 9;
10605        let unconfirmed_child_slot = 10;
10606        let slots = vec![2, unconfirmed_slot, unconfirmed_child_slot];
10607
10608        // Insert into slot 9, mark it as dead
10609        let shreds: Vec<_> = make_chaining_slot_entries(&slots, 1, 0)
10610            .into_iter()
10611            .flat_map(|x| x.0)
10612            .collect();
10613        blockstore.insert_shreds(shreds, None, false).unwrap();
10614        // There are 32 data shreds in slot 9.
10615        for index in 0..32 {
10616            assert_matches!(
10617                blockstore.get_data_shred(unconfirmed_slot, index as u64),
10618                Ok(Some(_))
10619            );
10620        }
10621        blockstore.set_dead_slot(unconfirmed_slot).unwrap();
10622
10623        // Purge the slot
10624        blockstore.clear_unconfirmed_slot(unconfirmed_slot);
10625        assert!(!blockstore.is_dead(unconfirmed_slot));
10626        assert_eq!(
10627            blockstore
10628                .meta(unconfirmed_slot)
10629                .unwrap()
10630                .unwrap()
10631                .next_slots,
10632            vec![unconfirmed_child_slot]
10633        );
10634        assert!(blockstore
10635            .get_data_shred(unconfirmed_slot, 0)
10636            .unwrap()
10637            .is_none());
10638    }
10639
10640    #[test]
10641    fn test_clear_unconfirmed_slot_and_insert_again() {
10642        let ledger_path = get_tmp_ledger_path_auto_delete!();
10643        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
10644
10645        let confirmed_slot = 7;
10646        let unconfirmed_slot = 8;
10647        let slots = vec![confirmed_slot, unconfirmed_slot];
10648
10649        let shreds: Vec<_> = make_chaining_slot_entries(&slots, 1, 0)
10650            .into_iter()
10651            .flat_map(|x| x.0)
10652            .collect();
10653        assert_eq!(shreds.len(), 2 * 32);
10654
10655        // Save off unconfirmed_slot for later, just one shred at shreds[32]
10656        let unconfirmed_slot_shreds = vec![shreds[32].clone()];
10657        assert_eq!(unconfirmed_slot_shreds[0].slot(), unconfirmed_slot);
10658
10659        // Insert into slot 9
10660        blockstore.insert_shreds(shreds, None, false).unwrap();
10661
10662        // Purge the slot
10663        blockstore.clear_unconfirmed_slot(unconfirmed_slot);
10664        assert!(!blockstore.is_dead(unconfirmed_slot));
10665        assert!(blockstore
10666            .get_data_shred(unconfirmed_slot, 0)
10667            .unwrap()
10668            .is_none());
10669
10670        // Re-add unconfirmed_slot and confirm that confirmed_slot only has
10671        // unconfirmed_slot in next_slots once
10672        blockstore
10673            .insert_shreds(unconfirmed_slot_shreds, None, false)
10674            .unwrap();
10675        assert_eq!(
10676            blockstore.meta(confirmed_slot).unwrap().unwrap().next_slots,
10677            vec![unconfirmed_slot]
10678        );
10679    }
10680
10681    #[test]
10682    fn test_update_completed_data_indexes() {
10683        let mut completed_data_indexes = BTreeSet::default();
10684        let mut shred_index = ShredIndex::default();
10685
10686        for i in 0..10 {
10687            shred_index.insert(i as u64);
10688            assert!(update_completed_data_indexes(
10689                true,
10690                i,
10691                &shred_index,
10692                &mut completed_data_indexes
10693            )
10694            .eq(std::iter::once(i..i + 1)));
10695            assert!(completed_data_indexes.iter().copied().eq(0..=i));
10696        }
10697    }
10698
10699    #[test]
10700    fn test_update_completed_data_indexes_out_of_order() {
10701        let mut completed_data_indexes = BTreeSet::default();
10702        let mut shred_index = ShredIndex::default();
10703
10704        shred_index.insert(4);
10705        assert!(
10706            update_completed_data_indexes(false, 4, &shred_index, &mut completed_data_indexes)
10707                .eq([])
10708        );
10709        assert!(completed_data_indexes.is_empty());
10710
10711        shred_index.insert(2);
10712        assert!(
10713            update_completed_data_indexes(false, 2, &shred_index, &mut completed_data_indexes)
10714                .eq([])
10715        );
10716        assert!(completed_data_indexes.is_empty());
10717
10718        shred_index.insert(3);
10719        assert!(
10720            update_completed_data_indexes(true, 3, &shred_index, &mut completed_data_indexes)
10721                .eq([])
10722        );
10723        assert!(completed_data_indexes.iter().eq([3].iter()));
10724
10725        // Inserting data complete shred 1 now confirms the range of shreds [2, 3]
10726        // is part of the same data set
10727        shred_index.insert(1);
10728        assert!(
10729            update_completed_data_indexes(true, 1, &shred_index, &mut completed_data_indexes)
10730                .eq(std::iter::once(2..4))
10731        );
10732        assert!(completed_data_indexes.iter().eq([1, 3].iter()));
10733
10734        // Inserting data complete shred 0 now confirms the range of shreds [0]
10735        // is part of the same data set
10736        shred_index.insert(0);
10737        assert!(
10738            update_completed_data_indexes(true, 0, &shred_index, &mut completed_data_indexes)
10739                .eq([0..1, 1..2])
10740        );
10741        assert!(completed_data_indexes.iter().eq([0, 1, 3].iter()));
10742    }
10743
10744    #[test]
10745    fn test_rewards_protobuf_backward_compatibility() {
10746        let ledger_path = get_tmp_ledger_path_auto_delete!();
10747        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
10748
10749        let rewards: Rewards = (0..100)
10750            .map(|i| Reward {
10751                pubkey: solana_pubkey::new_rand().to_string(),
10752                lamports: 42 + i,
10753                post_balance: u64::MAX,
10754                reward_type: Some(RewardType::Fee),
10755                commission: None,
10756            })
10757            .collect();
10758        let protobuf_rewards: generated::Rewards = rewards.into();
10759
10760        let deprecated_rewards: StoredExtendedRewards = protobuf_rewards.clone().into();
10761        for slot in 0..2 {
10762            let data = serialize(&deprecated_rewards).unwrap();
10763            blockstore.rewards_cf.put_bytes(slot, &data).unwrap();
10764        }
10765        for slot in 2..4 {
10766            blockstore
10767                .rewards_cf
10768                .put_protobuf(slot, &protobuf_rewards)
10769                .unwrap();
10770        }
10771        for slot in 0..4 {
10772            assert_eq!(
10773                blockstore
10774                    .rewards_cf
10775                    .get_protobuf_or_bincode::<StoredExtendedRewards>(slot)
10776                    .unwrap()
10777                    .unwrap(),
10778                protobuf_rewards
10779            );
10780        }
10781    }
10782
10783    // This test is probably superfluous, since it is highly unlikely that bincode-format
10784    // TransactionStatus entries exist in any current ledger. They certainly exist in historical
10785    // ledger archives, but typically those require contemporaraneous software for other reasons.
10786    // However, we are persisting the test since the apis still exist in `blockstore_db`.
10787    #[test]
10788    fn test_transaction_status_protobuf_backward_compatibility() {
10789        let ledger_path = get_tmp_ledger_path_auto_delete!();
10790        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
10791
10792        let status = TransactionStatusMeta {
10793            status: Ok(()),
10794            fee: 42,
10795            pre_balances: vec![1, 2, 3],
10796            post_balances: vec![1, 2, 3],
10797            inner_instructions: Some(vec![]),
10798            log_messages: Some(vec![]),
10799            pre_token_balances: Some(vec![TransactionTokenBalance {
10800                account_index: 0,
10801                mint: Pubkey::new_unique().to_string(),
10802                ui_token_amount: UiTokenAmount {
10803                    ui_amount: Some(1.1),
10804                    decimals: 1,
10805                    amount: "11".to_string(),
10806                    ui_amount_string: "1.1".to_string(),
10807                },
10808                owner: Pubkey::new_unique().to_string(),
10809                program_id: Pubkey::new_unique().to_string(),
10810            }]),
10811            post_token_balances: Some(vec![TransactionTokenBalance {
10812                account_index: 0,
10813                mint: Pubkey::new_unique().to_string(),
10814                ui_token_amount: UiTokenAmount {
10815                    ui_amount: None,
10816                    decimals: 1,
10817                    amount: "11".to_string(),
10818                    ui_amount_string: "1.1".to_string(),
10819                },
10820                owner: Pubkey::new_unique().to_string(),
10821                program_id: Pubkey::new_unique().to_string(),
10822            }]),
10823            rewards: Some(vec![Reward {
10824                pubkey: "My11111111111111111111111111111111111111111".to_string(),
10825                lamports: -42,
10826                post_balance: 42,
10827                reward_type: Some(RewardType::Rent),
10828                commission: None,
10829            }]),
10830            loaded_addresses: LoadedAddresses::default(),
10831            return_data: Some(TransactionReturnData {
10832                program_id: Pubkey::new_unique(),
10833                data: vec![1, 2, 3],
10834            }),
10835            compute_units_consumed: Some(23456),
10836        };
10837        let deprecated_status: StoredTransactionStatusMeta = status.clone().try_into().unwrap();
10838        let protobuf_status: generated::TransactionStatusMeta = status.into();
10839
10840        for slot in 0..2 {
10841            let data = serialize(&deprecated_status).unwrap();
10842            blockstore
10843                .transaction_status_cf
10844                .put_bytes((Signature::default(), slot), &data)
10845                .unwrap();
10846        }
10847        for slot in 2..4 {
10848            blockstore
10849                .transaction_status_cf
10850                .put_protobuf((Signature::default(), slot), &protobuf_status)
10851                .unwrap();
10852        }
10853        for slot in 0..4 {
10854            assert_eq!(
10855                blockstore
10856                    .transaction_status_cf
10857                    .get_protobuf_or_bincode::<StoredTransactionStatusMeta>((
10858                        Signature::default(),
10859                        slot
10860                    ))
10861                    .unwrap()
10862                    .unwrap(),
10863                protobuf_status
10864            );
10865        }
10866    }
10867
10868    fn make_large_tx_entry(num_txs: usize) -> Entry {
10869        let txs: Vec<_> = (0..num_txs)
10870            .map(|_| {
10871                let keypair0 = Keypair::new();
10872                let to = solana_pubkey::new_rand();
10873                solana_sdk::system_transaction::transfer(&keypair0, &to, 1, Hash::default())
10874            })
10875            .collect();
10876
10877        Entry::new(&Hash::default(), 1, txs)
10878    }
10879
10880    #[test]
10881    fn erasure_multiple_config() {
10882        solana_logger::setup();
10883        let slot = 1;
10884        let parent = 0;
10885        let num_txs = 20;
10886        let entry = make_large_tx_entry(num_txs);
10887        let shreds = entries_to_test_shreds(
10888            &[entry],
10889            slot,
10890            parent,
10891            true,  // is_full_slot
10892            0,     // version
10893            false, // merkle_variant
10894        );
10895        assert!(shreds.len() > 1);
10896
10897        let ledger_path = get_tmp_ledger_path_auto_delete!();
10898        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
10899
10900        let reed_solomon_cache = ReedSolomonCache::default();
10901        let coding1 = Shredder::generate_coding_shreds(
10902            &shreds,
10903            0, // next_code_index
10904            &reed_solomon_cache,
10905        );
10906        let coding2 = Shredder::generate_coding_shreds(
10907            &shreds,
10908            1, // next_code_index
10909            &reed_solomon_cache,
10910        );
10911        for shred in &shreds {
10912            info!("shred {:?}", shred);
10913        }
10914        for shred in &coding1 {
10915            info!("coding1 {:?}", shred);
10916        }
10917        for shred in &coding2 {
10918            info!("coding2 {:?}", shred);
10919        }
10920        blockstore
10921            .insert_shreds(shreds[..shreds.len() - 2].to_vec(), None, false)
10922            .unwrap();
10923        blockstore
10924            .insert_shreds(vec![coding1[0].clone(), coding2[1].clone()], None, false)
10925            .unwrap();
10926        assert!(blockstore.has_duplicate_shreds_in_slot(slot));
10927    }
10928
10929    #[test]
10930    fn test_insert_data_shreds_same_slot_last_index() {
10931        let ledger_path = get_tmp_ledger_path_auto_delete!();
10932        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
10933
10934        // Create enough entries to ensure there are at least two shreds created
10935        let num_unique_entries = max_ticks_per_n_shreds(1, None) + 1;
10936        let (mut original_shreds, original_entries) =
10937            make_slot_entries(0, 0, num_unique_entries, /*merkle_variant:*/ true);
10938        let mut duplicate_shreds = original_shreds.clone();
10939        // Mutate signature so that payloads are not the same as the originals.
10940        for shred in &mut duplicate_shreds {
10941            shred.sign(&Keypair::new());
10942        }
10943        // Discard first shred, so that the slot is not full
10944        assert!(original_shreds.len() > 1);
10945        let last_index = original_shreds.last().unwrap().index() as u64;
10946        original_shreds.remove(0);
10947
10948        // Insert the same shreds, including the last shred specifically, multiple
10949        // times
10950        for _ in 0..10 {
10951            blockstore
10952                .insert_shreds(original_shreds.clone(), None, false)
10953                .unwrap();
10954            let meta = blockstore.meta(0).unwrap().unwrap();
10955            assert!(!blockstore.is_dead(0));
10956            assert_eq!(blockstore.get_slot_entries(0, 0).unwrap(), vec![]);
10957            assert_eq!(meta.consumed, 0);
10958            assert_eq!(meta.received, last_index + 1);
10959            assert_eq!(meta.parent_slot, Some(0));
10960            assert_eq!(meta.last_index, Some(last_index));
10961            assert!(!blockstore.is_full(0));
10962        }
10963
10964        let num_shreds = duplicate_shreds.len() as u64;
10965        blockstore
10966            .insert_shreds(duplicate_shreds, None, false)
10967            .unwrap();
10968
10969        assert_eq!(blockstore.get_slot_entries(0, 0).unwrap(), original_entries);
10970
10971        let meta = blockstore.meta(0).unwrap().unwrap();
10972        assert_eq!(meta.consumed, num_shreds);
10973        assert_eq!(meta.received, num_shreds);
10974        assert_eq!(meta.parent_slot, Some(0));
10975        assert_eq!(meta.last_index, Some(num_shreds - 1));
10976        assert!(blockstore.is_full(0));
10977        assert!(!blockstore.is_dead(0));
10978    }
10979
10980    #[test]
10981    fn test_duplicate_last_index() {
10982        let num_shreds = 2;
10983        let num_entries = max_ticks_per_n_shreds(num_shreds, None);
10984        let slot = 1;
10985        let (mut shreds, _) =
10986            make_slot_entries(slot, 0, num_entries, /*merkle_variant:*/ false);
10987
10988        // Mark both as last shred
10989        shreds[0].set_last_in_slot();
10990        shreds[1].set_last_in_slot();
10991        let ledger_path = get_tmp_ledger_path_auto_delete!();
10992        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
10993
10994        blockstore.insert_shreds(shreds, None, false).unwrap();
10995
10996        assert!(blockstore.get_duplicate_slot(slot).is_some());
10997    }
10998
10999    #[test]
11000    fn test_duplicate_last_index_mark_dead() {
11001        let num_shreds = 10;
11002        let smaller_last_shred_index = 5;
11003        let larger_last_shred_index = 8;
11004
11005        let setup_test_shreds = |slot: Slot| -> Vec<Shred> {
11006            let num_entries = max_ticks_per_n_shreds(num_shreds, Some(LEGACY_SHRED_DATA_CAPACITY));
11007            let (mut shreds, _) =
11008                make_slot_entries(slot, 0, num_entries, /*merkle_variant:*/ false);
11009            shreds[smaller_last_shred_index].set_last_in_slot();
11010            shreds[larger_last_shred_index].set_last_in_slot();
11011            shreds
11012        };
11013
11014        let get_expected_slot_meta_and_index_meta =
11015            |blockstore: &Blockstore, shreds: Vec<Shred>| -> (SlotMeta, Index) {
11016                let slot = shreds[0].slot();
11017                blockstore
11018                    .insert_shreds(shreds.clone(), None, false)
11019                    .unwrap();
11020                let meta = blockstore.meta(slot).unwrap().unwrap();
11021                assert_eq!(meta.consumed, shreds.len() as u64);
11022                let shreds_index = blockstore.get_index(slot).unwrap().unwrap();
11023                for i in 0..shreds.len() as u64 {
11024                    assert!(shreds_index.data().contains(i));
11025                }
11026
11027                // Cleanup the slot
11028                blockstore
11029                    .run_purge(slot, slot, PurgeType::Exact)
11030                    .expect("Purge database operations failed");
11031                assert!(blockstore.meta(slot).unwrap().is_none());
11032
11033                (meta, shreds_index)
11034            };
11035
11036        let ledger_path = get_tmp_ledger_path_auto_delete!();
11037        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11038
11039        let mut slot = 0;
11040        let shreds = setup_test_shreds(slot);
11041
11042        // Case 1: Insert in the same batch. Since we're inserting the shreds in order,
11043        // any shreds > smaller_last_shred_index will not be inserted. Slot is not marked
11044        // as dead because no slots > the first "last" index shred are inserted before
11045        // the "last" index shred itself is inserted.
11046        let (expected_slot_meta, expected_index) = get_expected_slot_meta_and_index_meta(
11047            &blockstore,
11048            shreds[..=smaller_last_shred_index].to_vec(),
11049        );
11050        blockstore
11051            .insert_shreds(shreds.clone(), None, false)
11052            .unwrap();
11053        assert!(blockstore.get_duplicate_slot(slot).is_some());
11054        assert!(!blockstore.is_dead(slot));
11055        for i in 0..num_shreds {
11056            if i <= smaller_last_shred_index as u64 {
11057                assert_eq!(
11058                    blockstore.get_data_shred(slot, i).unwrap().unwrap(),
11059                    shreds[i as usize].payload().as_ref(),
11060                );
11061            } else {
11062                assert!(blockstore.get_data_shred(slot, i).unwrap().is_none());
11063            }
11064        }
11065        let mut meta = blockstore.meta(slot).unwrap().unwrap();
11066        meta.first_shred_timestamp = expected_slot_meta.first_shred_timestamp;
11067        assert_eq!(meta, expected_slot_meta);
11068        assert_eq!(blockstore.get_index(slot).unwrap().unwrap(), expected_index);
11069
11070        // Case 2: Inserting a duplicate with an even smaller last shred index should not
11071        // mark the slot as dead since the Slotmeta is full.
11072        let even_smaller_last_shred_duplicate = {
11073            let mut payload = shreds[smaller_last_shred_index - 1].payload().clone();
11074            // Flip a byte to create a duplicate shred
11075            payload[0] = u8::MAX - payload[0];
11076            let mut shred = Shred::new_from_serialized_shred(payload).unwrap();
11077            shred.set_last_in_slot();
11078            shred
11079        };
11080        assert!(blockstore
11081            .is_shred_duplicate(&even_smaller_last_shred_duplicate)
11082            .is_some());
11083        blockstore
11084            .insert_shreds(vec![even_smaller_last_shred_duplicate], None, false)
11085            .unwrap();
11086        assert!(!blockstore.is_dead(slot));
11087        for i in 0..num_shreds {
11088            if i <= smaller_last_shred_index as u64 {
11089                assert_eq!(
11090                    blockstore.get_data_shred(slot, i).unwrap().unwrap(),
11091                    shreds[i as usize].payload().as_ref(),
11092                );
11093            } else {
11094                assert!(blockstore.get_data_shred(slot, i).unwrap().is_none());
11095            }
11096        }
11097        let mut meta = blockstore.meta(slot).unwrap().unwrap();
11098        meta.first_shred_timestamp = expected_slot_meta.first_shred_timestamp;
11099        assert_eq!(meta, expected_slot_meta);
11100        assert_eq!(blockstore.get_index(slot).unwrap().unwrap(), expected_index);
11101
11102        // Case 3: Insert shreds in reverse so that consumed will not be updated. Now on insert, the
11103        // the slot should be marked as dead
11104        slot += 1;
11105        let mut shreds = setup_test_shreds(slot);
11106        shreds.reverse();
11107        blockstore
11108            .insert_shreds(shreds.clone(), None, false)
11109            .unwrap();
11110        assert!(blockstore.is_dead(slot));
11111        // All the shreds other than the two last index shreds because those two
11112        // are marked as last, but less than the first received index == 10.
11113        // The others will be inserted even after the slot is marked dead on attempted
11114        // insert of the first last_index shred since dead slots can still be
11115        // inserted into.
11116        for i in 0..num_shreds {
11117            let shred_to_check = &shreds[i as usize];
11118            let shred_index = shred_to_check.index() as u64;
11119            if shred_index != smaller_last_shred_index as u64
11120                && shred_index != larger_last_shred_index as u64
11121            {
11122                assert_eq!(
11123                    blockstore
11124                        .get_data_shred(slot, shred_index)
11125                        .unwrap()
11126                        .unwrap(),
11127                    shred_to_check.payload().as_ref(),
11128                );
11129            } else {
11130                assert!(blockstore
11131                    .get_data_shred(slot, shred_index)
11132                    .unwrap()
11133                    .is_none());
11134            }
11135        }
11136
11137        // Case 4: Same as Case 3, but this time insert the shreds one at a time to test that the clearing
11138        // of data shreds works even after they've been committed
11139        slot += 1;
11140        let mut shreds = setup_test_shreds(slot);
11141        shreds.reverse();
11142        for shred in shreds.clone() {
11143            blockstore.insert_shreds(vec![shred], None, false).unwrap();
11144        }
11145        assert!(blockstore.is_dead(slot));
11146        // All the shreds will be inserted since dead slots can still be inserted into.
11147        for i in 0..num_shreds {
11148            let shred_to_check = &shreds[i as usize];
11149            let shred_index = shred_to_check.index() as u64;
11150            if shred_index != smaller_last_shred_index as u64
11151                && shred_index != larger_last_shred_index as u64
11152            {
11153                assert_eq!(
11154                    blockstore
11155                        .get_data_shred(slot, shred_index)
11156                        .unwrap()
11157                        .unwrap(),
11158                    shred_to_check.payload().as_ref(),
11159                );
11160            } else {
11161                assert!(blockstore
11162                    .get_data_shred(slot, shred_index)
11163                    .unwrap()
11164                    .is_none());
11165            }
11166        }
11167    }
11168
11169    #[test]
11170    fn test_get_slot_entries_dead_slot_race() {
11171        let setup_test_shreds = move |slot: Slot| -> Vec<Shred> {
11172            let num_shreds = 10;
11173            let middle_shred_index = 5;
11174            let num_entries = max_ticks_per_n_shreds(num_shreds, None);
11175            let (shreds, _) =
11176                make_slot_entries(slot, 0, num_entries, /*merkle_variant:*/ false);
11177
11178            // Reverse shreds so that last shred gets inserted first and sets meta.received
11179            let mut shreds: Vec<Shred> = shreds.into_iter().rev().collect();
11180
11181            // Push the real middle shred to the end of the shreds list
11182            shreds.push(shreds[middle_shred_index].clone());
11183
11184            // Set the middle shred as a last shred to cause the slot to be marked dead
11185            shreds[middle_shred_index].set_last_in_slot();
11186            shreds
11187        };
11188
11189        let ledger_path = get_tmp_ledger_path_auto_delete!();
11190        {
11191            let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap());
11192            let (slot_sender, slot_receiver) = unbounded();
11193            let (shred_sender, shred_receiver) = unbounded::<Vec<Shred>>();
11194            let (signal_sender, signal_receiver) = unbounded();
11195
11196            let t_entry_getter = {
11197                let blockstore = blockstore.clone();
11198                let signal_sender = signal_sender.clone();
11199                Builder::new()
11200                    .spawn(move || {
11201                        while let Ok(slot) = slot_receiver.recv() {
11202                            match blockstore.get_slot_entries_with_shred_info(slot, 0, false) {
11203                                Ok((_entries, _num_shreds, is_full)) => {
11204                                    if is_full {
11205                                        signal_sender
11206                                            .send(Err(IoError::new(
11207                                                ErrorKind::Other,
11208                                                "got full slot entries for dead slot",
11209                                            )))
11210                                            .unwrap();
11211                                    }
11212                                }
11213                                Err(err) => {
11214                                    assert_matches!(err, BlockstoreError::DeadSlot);
11215                                }
11216                            }
11217                            signal_sender.send(Ok(())).unwrap();
11218                        }
11219                    })
11220                    .unwrap()
11221            };
11222
11223            let t_shred_inserter = {
11224                let blockstore = blockstore.clone();
11225                Builder::new()
11226                    .spawn(move || {
11227                        while let Ok(shreds) = shred_receiver.recv() {
11228                            let slot = shreds[0].slot();
11229                            // Grab this lock to block `get_slot_entries` before it fetches completed datasets
11230                            // and then mark the slot as dead, but full, by inserting carefully crafted shreds.
11231
11232                            #[allow(clippy::readonly_write_lock)]
11233                            // Possible clippy bug, the lock is unused so clippy shouldn't care
11234                            // about read vs. write lock
11235                            let _lowest_cleanup_slot =
11236                                blockstore.lowest_cleanup_slot.write().unwrap();
11237                            blockstore.insert_shreds(shreds, None, false).unwrap();
11238                            assert!(blockstore.get_duplicate_slot(slot).is_some());
11239                            assert!(blockstore.is_dead(slot));
11240                            assert!(blockstore.meta(slot).unwrap().unwrap().is_full());
11241                            signal_sender.send(Ok(())).unwrap();
11242                        }
11243                    })
11244                    .unwrap()
11245            };
11246
11247            for slot in 0..100 {
11248                let shreds = setup_test_shreds(slot);
11249
11250                // Start a task on each thread to trigger a race condition
11251                slot_sender.send(slot).unwrap();
11252                shred_sender.send(shreds).unwrap();
11253
11254                // Check that each thread processed their task before continuing
11255                for _ in 1..=2 {
11256                    let res = signal_receiver.recv().unwrap();
11257                    assert!(res.is_ok(), "race condition: {res:?}");
11258                }
11259            }
11260
11261            drop(slot_sender);
11262            drop(shred_sender);
11263
11264            let handles = vec![t_entry_getter, t_shred_inserter];
11265            for handle in handles {
11266                assert!(handle.join().is_ok());
11267            }
11268
11269            assert!(Arc::strong_count(&blockstore) == 1);
11270        }
11271    }
11272
11273    #[test]
11274    fn test_read_write_cost_table() {
11275        let ledger_path = get_tmp_ledger_path_auto_delete!();
11276        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11277
11278        let num_entries: usize = 10;
11279        let mut cost_table: HashMap<Pubkey, u64> = HashMap::new();
11280        for x in 1..num_entries + 1 {
11281            cost_table.insert(Pubkey::new_unique(), (x + 100) as u64);
11282        }
11283
11284        // write to db
11285        for (key, cost) in cost_table.iter() {
11286            blockstore
11287                .write_program_cost(key, cost)
11288                .expect("write a program");
11289        }
11290
11291        // read back from db
11292        let read_back = blockstore.read_program_costs().expect("read programs");
11293        // verify
11294        assert_eq!(read_back.len(), cost_table.len());
11295        for (read_key, read_cost) in read_back {
11296            assert_eq!(read_cost, *cost_table.get(&read_key).unwrap());
11297        }
11298
11299        // update value, write to db
11300        for val in cost_table.values_mut() {
11301            *val += 100;
11302        }
11303        for (key, cost) in cost_table.iter() {
11304            blockstore
11305                .write_program_cost(key, cost)
11306                .expect("write a program");
11307        }
11308        // add a new record
11309        let new_program_key = Pubkey::new_unique();
11310        let new_program_cost = 999;
11311        blockstore
11312            .write_program_cost(&new_program_key, &new_program_cost)
11313            .unwrap();
11314
11315        // confirm value updated
11316        let read_back = blockstore.read_program_costs().expect("read programs");
11317        // verify
11318        assert_eq!(read_back.len(), cost_table.len() + 1);
11319        for (key, cost) in cost_table.iter() {
11320            assert_eq!(*cost, read_back.iter().find(|(k, _v)| k == key).unwrap().1);
11321        }
11322        assert_eq!(
11323            new_program_cost,
11324            read_back
11325                .iter()
11326                .find(|(k, _v)| *k == new_program_key)
11327                .unwrap()
11328                .1
11329        );
11330
11331        // test delete
11332        blockstore
11333            .delete_program_cost(&new_program_key)
11334            .expect("delete a progrma");
11335        let read_back = blockstore.read_program_costs().expect("read programs");
11336        // verify
11337        assert_eq!(read_back.len(), cost_table.len());
11338        for (read_key, read_cost) in read_back {
11339            assert_eq!(read_cost, *cost_table.get(&read_key).unwrap());
11340        }
11341    }
11342
11343    #[test]
11344    fn test_delete_old_records_from_cost_table() {
11345        let ledger_path = get_tmp_ledger_path_auto_delete!();
11346        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11347
11348        let num_entries: usize = 10;
11349        let mut cost_table: HashMap<Pubkey, u64> = HashMap::new();
11350        for x in 1..num_entries + 1 {
11351            cost_table.insert(Pubkey::new_unique(), (x + 100) as u64);
11352        }
11353
11354        // write to db
11355        for (key, cost) in cost_table.iter() {
11356            blockstore
11357                .write_program_cost(key, cost)
11358                .expect("write a program");
11359        }
11360
11361        // remove a record
11362        let mut removed_key = Pubkey::new_unique();
11363        for (key, cost) in cost_table.iter() {
11364            if *cost == 101_u64 {
11365                removed_key = *key;
11366                break;
11367            }
11368        }
11369        cost_table.remove(&removed_key);
11370
11371        // delete records from blockstore if they are no longer in cost_table
11372        let db_records = blockstore.read_program_costs().expect("read programs");
11373        db_records.iter().for_each(|(pubkey, _)| {
11374            if !cost_table.iter().any(|(key, _)| key == pubkey) {
11375                assert_eq!(*pubkey, removed_key);
11376                blockstore
11377                    .delete_program_cost(pubkey)
11378                    .expect("delete old program");
11379            }
11380        });
11381
11382        // read back from db
11383        let read_back = blockstore.read_program_costs().expect("read programs");
11384        // verify
11385        assert_eq!(read_back.len(), cost_table.len());
11386        for (read_key, read_cost) in read_back {
11387            assert_eq!(read_cost, *cost_table.get(&read_key).unwrap());
11388        }
11389    }
11390
11391    #[test]
11392    fn test_previous_erasure_set() {
11393        let ledger_path = get_tmp_ledger_path_auto_delete!();
11394        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11395        let mut erasure_metas = BTreeMap::new();
11396
11397        let parent_slot = 0;
11398        let prev_slot = 1;
11399        let slot = 2;
11400        let (data_shreds_0, coding_shreds_0, _) =
11401            setup_erasure_shreds_with_index(slot, parent_slot, 10, 0);
11402        let erasure_set_0 = ErasureSetId::new(slot, 0);
11403        let erasure_meta_0 =
11404            ErasureMeta::from_coding_shred(coding_shreds_0.first().unwrap()).unwrap();
11405
11406        let prev_fec_set_index = data_shreds_0.len() as u32;
11407        let (data_shreds_prev, coding_shreds_prev, _) =
11408            setup_erasure_shreds_with_index(slot, parent_slot, 10, prev_fec_set_index);
11409        let erasure_set_prev = ErasureSetId::new(slot, prev_fec_set_index);
11410        let erasure_meta_prev =
11411            ErasureMeta::from_coding_shred(coding_shreds_prev.first().unwrap()).unwrap();
11412
11413        let (_, coding_shreds_prev_slot, _) =
11414            setup_erasure_shreds_with_index(prev_slot, parent_slot, 10, prev_fec_set_index);
11415        let erasure_set_prev_slot = ErasureSetId::new(prev_slot, prev_fec_set_index);
11416        let erasure_meta_prev_slot =
11417            ErasureMeta::from_coding_shred(coding_shreds_prev_slot.first().unwrap()).unwrap();
11418
11419        let fec_set_index = data_shreds_prev.len() as u32 + prev_fec_set_index;
11420        let erasure_set = ErasureSetId::new(slot, fec_set_index);
11421
11422        // Blockstore is empty
11423        assert_eq!(
11424            blockstore
11425                .previous_erasure_set(erasure_set, &erasure_metas)
11426                .unwrap(),
11427            None
11428        );
11429
11430        // Erasure metas does not contain the previous fec set, but only the one before that
11431        erasure_metas.insert(erasure_set_0, WorkingEntry::Dirty(erasure_meta_0));
11432        assert_eq!(
11433            blockstore
11434                .previous_erasure_set(erasure_set, &erasure_metas)
11435                .unwrap(),
11436            None
11437        );
11438
11439        // Both Erasure metas and blockstore, contain only contain the previous previous fec set
11440        erasure_metas.insert(erasure_set_0, WorkingEntry::Clean(erasure_meta_0));
11441        blockstore
11442            .put_erasure_meta(erasure_set_0, &erasure_meta_0)
11443            .unwrap();
11444        assert_eq!(
11445            blockstore
11446                .previous_erasure_set(erasure_set, &erasure_metas)
11447                .unwrap(),
11448            None
11449        );
11450
11451        // Erasure meta contains the previous FEC set, blockstore only contains the older
11452        erasure_metas.insert(erasure_set_prev, WorkingEntry::Dirty(erasure_meta_prev));
11453        assert_eq!(
11454            blockstore
11455                .previous_erasure_set(erasure_set, &erasure_metas)
11456                .unwrap()
11457                .map(|(erasure_set, erasure_meta)| (erasure_set, erasure_meta.into_owned())),
11458            Some((erasure_set_prev, erasure_meta_prev))
11459        );
11460
11461        // Erasure meta only contains the older, blockstore has the previous fec set
11462        erasure_metas.remove(&erasure_set_prev);
11463        blockstore
11464            .put_erasure_meta(erasure_set_prev, &erasure_meta_prev)
11465            .unwrap();
11466        assert_eq!(
11467            blockstore
11468                .previous_erasure_set(erasure_set, &erasure_metas)
11469                .unwrap()
11470                .map(|(erasure_set, erasure_meta)| (erasure_set, erasure_meta.into_owned())),
11471            Some((erasure_set_prev, erasure_meta_prev))
11472        );
11473
11474        // Both contain the previous fec set
11475        erasure_metas.insert(erasure_set_prev, WorkingEntry::Clean(erasure_meta_prev));
11476        assert_eq!(
11477            blockstore
11478                .previous_erasure_set(erasure_set, &erasure_metas)
11479                .unwrap()
11480                .map(|(erasure_set, erasure_meta)| (erasure_set, erasure_meta.into_owned())),
11481            Some((erasure_set_prev, erasure_meta_prev))
11482        );
11483
11484        // Works even if the previous fec set has index 0
11485        assert_eq!(
11486            blockstore
11487                .previous_erasure_set(erasure_set_prev, &erasure_metas)
11488                .unwrap()
11489                .map(|(erasure_set, erasure_meta)| (erasure_set, erasure_meta.into_owned())),
11490            Some((erasure_set_0, erasure_meta_0))
11491        );
11492        erasure_metas.remove(&erasure_set_0);
11493        assert_eq!(
11494            blockstore
11495                .previous_erasure_set(erasure_set_prev, &erasure_metas)
11496                .unwrap()
11497                .map(|(erasure_set, erasure_meta)| (erasure_set, erasure_meta.into_owned())),
11498            Some((erasure_set_0, erasure_meta_0))
11499        );
11500
11501        // Does not cross slot boundary
11502        let ledger_path = get_tmp_ledger_path_auto_delete!();
11503        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11504        erasure_metas.clear();
11505        erasure_metas.insert(
11506            erasure_set_prev_slot,
11507            WorkingEntry::Dirty(erasure_meta_prev_slot),
11508        );
11509        assert_eq!(
11510            erasure_meta_prev_slot.next_fec_set_index().unwrap(),
11511            fec_set_index
11512        );
11513        assert_eq!(
11514            blockstore
11515                .previous_erasure_set(erasure_set, &erasure_metas)
11516                .unwrap(),
11517            None,
11518        );
11519        erasure_metas.insert(
11520            erasure_set_prev_slot,
11521            WorkingEntry::Clean(erasure_meta_prev_slot),
11522        );
11523        blockstore
11524            .put_erasure_meta(erasure_set_prev_slot, &erasure_meta_prev_slot)
11525            .unwrap();
11526        assert_eq!(
11527            blockstore
11528                .previous_erasure_set(erasure_set, &erasure_metas)
11529                .unwrap(),
11530            None,
11531        );
11532    }
11533
11534    #[test]
11535    fn test_chained_merkle_root_consistency_backwards() {
11536        // Insert a coding shred then consistent data and coding shreds from the next FEC set
11537        let ledger_path = get_tmp_ledger_path_auto_delete!();
11538        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11539
11540        let parent_slot = 0;
11541        let slot = 1;
11542        let fec_set_index = 0;
11543        let (data_shreds, coding_shreds, leader_schedule) =
11544            setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index);
11545        let coding_shred = coding_shreds[0].clone();
11546        let next_fec_set_index = fec_set_index + data_shreds.len() as u32;
11547
11548        assert!(blockstore
11549            .insert_shred_return_duplicate(coding_shred.clone(), &leader_schedule,)
11550            .is_empty());
11551
11552        let merkle_root = coding_shred.merkle_root().unwrap();
11553
11554        // Correctly chained merkle
11555        let (data_shreds, coding_shreds, _) = setup_erasure_shreds_with_index_and_chained_merkle(
11556            slot,
11557            parent_slot,
11558            10,
11559            next_fec_set_index,
11560            Some(merkle_root),
11561        );
11562        let data_shred = data_shreds[0].clone();
11563        let coding_shred = coding_shreds[0].clone();
11564        assert!(blockstore
11565            .insert_shred_return_duplicate(coding_shred, &leader_schedule,)
11566            .is_empty());
11567        assert!(blockstore
11568            .insert_shred_return_duplicate(data_shred, &leader_schedule,)
11569            .is_empty());
11570    }
11571
11572    #[test]
11573    fn test_chained_merkle_root_consistency_forwards() {
11574        // Insert a coding shred, then a consistent coding shred from the previous FEC set
11575        let ledger_path = get_tmp_ledger_path_auto_delete!();
11576        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11577
11578        let parent_slot = 0;
11579        let slot = 1;
11580        let fec_set_index = 0;
11581        let (data_shreds, coding_shreds, leader_schedule) =
11582            setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index);
11583        let coding_shred = coding_shreds[0].clone();
11584        let next_fec_set_index = fec_set_index + data_shreds.len() as u32;
11585
11586        // Correctly chained merkle
11587        let merkle_root = coding_shred.merkle_root().unwrap();
11588        let (_, next_coding_shreds, _) = setup_erasure_shreds_with_index_and_chained_merkle(
11589            slot,
11590            parent_slot,
11591            10,
11592            next_fec_set_index,
11593            Some(merkle_root),
11594        );
11595        let next_coding_shred = next_coding_shreds[0].clone();
11596
11597        assert!(blockstore
11598            .insert_shred_return_duplicate(next_coding_shred, &leader_schedule,)
11599            .is_empty());
11600
11601        // Insert previous FEC set
11602        assert!(blockstore
11603            .insert_shred_return_duplicate(coding_shred, &leader_schedule,)
11604            .is_empty());
11605    }
11606
11607    #[test]
11608    fn test_chained_merkle_root_across_slots_backwards() {
11609        let ledger_path = get_tmp_ledger_path_auto_delete!();
11610        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11611
11612        let parent_slot = 0;
11613        let slot = 1;
11614        let fec_set_index = 0;
11615        let (data_shreds, _, leader_schedule) =
11616            setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index);
11617        let data_shred = data_shreds[0].clone();
11618
11619        assert!(blockstore
11620            .insert_shred_return_duplicate(data_shred.clone(), &leader_schedule,)
11621            .is_empty());
11622
11623        // Incorrectly chained merkle for next slot
11624        let merkle_root = Hash::new_unique();
11625        assert!(merkle_root != data_shred.merkle_root().unwrap());
11626        let (next_slot_data_shreds, next_slot_coding_shreds, leader_schedule) =
11627            setup_erasure_shreds_with_index_and_chained_merkle(
11628                slot + 1,
11629                slot,
11630                10,
11631                fec_set_index,
11632                Some(merkle_root),
11633            );
11634        let next_slot_data_shred = next_slot_data_shreds[0].clone();
11635        let next_slot_coding_shred = next_slot_coding_shreds[0].clone();
11636        assert!(blockstore
11637            .insert_shred_return_duplicate(next_slot_coding_shred, &leader_schedule,)
11638            .is_empty());
11639        assert!(blockstore
11640            .insert_shred_return_duplicate(next_slot_data_shred, &leader_schedule)
11641            .is_empty());
11642    }
11643
11644    #[test]
11645    fn test_chained_merkle_root_across_slots_forwards() {
11646        let ledger_path = get_tmp_ledger_path_auto_delete!();
11647        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11648
11649        let parent_slot = 0;
11650        let slot = 1;
11651        let fec_set_index = 0;
11652        let (_, coding_shreds, _) =
11653            setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index);
11654        let coding_shred = coding_shreds[0].clone();
11655
11656        // Incorrectly chained merkle for next slot
11657        let merkle_root = Hash::new_unique();
11658        assert!(merkle_root != coding_shred.merkle_root().unwrap());
11659        let (next_slot_data_shreds, _, leader_schedule) =
11660            setup_erasure_shreds_with_index_and_chained_merkle(
11661                slot + 1,
11662                slot,
11663                10,
11664                fec_set_index,
11665                Some(merkle_root),
11666            );
11667        let next_slot_data_shred = next_slot_data_shreds[0].clone();
11668
11669        assert!(blockstore
11670            .insert_shred_return_duplicate(next_slot_data_shred.clone(), &leader_schedule,)
11671            .is_empty());
11672
11673        // Insert for previous slot
11674        assert!(blockstore
11675            .insert_shred_return_duplicate(coding_shred, &leader_schedule,)
11676            .is_empty());
11677    }
11678
11679    #[test]
11680    fn test_chained_merkle_root_inconsistency_backwards_insert_code() {
11681        // Insert a coding shred then inconsistent coding shred then inconsistent data shred from the next FEC set
11682        let ledger_path = get_tmp_ledger_path_auto_delete!();
11683        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11684
11685        let parent_slot = 0;
11686        let slot = 1;
11687        let fec_set_index = 0;
11688        let (data_shreds, coding_shreds, leader_schedule) =
11689            setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index);
11690        let coding_shred_previous = coding_shreds[0].clone();
11691        let next_fec_set_index = fec_set_index + data_shreds.len() as u32;
11692
11693        assert!(blockstore
11694            .insert_shred_return_duplicate(coding_shred_previous.clone(), &leader_schedule,)
11695            .is_empty());
11696
11697        // Incorrectly chained merkle
11698        let merkle_root = Hash::new_unique();
11699        assert!(merkle_root != coding_shred_previous.merkle_root().unwrap());
11700        let (data_shreds, coding_shreds, leader_schedule) =
11701            setup_erasure_shreds_with_index_and_chained_merkle(
11702                slot,
11703                parent_slot,
11704                10,
11705                next_fec_set_index,
11706                Some(merkle_root),
11707            );
11708        let data_shred = data_shreds[0].clone();
11709        let coding_shred = coding_shreds[0].clone();
11710        let duplicate_shreds =
11711            blockstore.insert_shred_return_duplicate(coding_shred.clone(), &leader_schedule);
11712        assert_eq!(duplicate_shreds.len(), 1);
11713        assert_eq!(
11714            duplicate_shreds[0],
11715            PossibleDuplicateShred::ChainedMerkleRootConflict(
11716                coding_shred,
11717                coding_shred_previous.into_payload()
11718            )
11719        );
11720
11721        // Should not check again, even though this shred conflicts as well
11722        assert!(blockstore
11723            .insert_shred_return_duplicate(data_shred.clone(), &leader_schedule,)
11724            .is_empty());
11725    }
11726
11727    #[test]
11728    fn test_chained_merkle_root_inconsistency_backwards_insert_data() {
11729        // Insert a coding shred then inconsistent data shred then inconsistent coding shred from the next FEC set
11730        let ledger_path = get_tmp_ledger_path_auto_delete!();
11731        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11732
11733        let parent_slot = 0;
11734        let slot = 1;
11735        let fec_set_index = 0;
11736        let (data_shreds, coding_shreds, leader_schedule) =
11737            setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index);
11738        let coding_shred_previous = coding_shreds[0].clone();
11739        let next_fec_set_index = fec_set_index + data_shreds.len() as u32;
11740
11741        assert!(blockstore
11742            .insert_shred_return_duplicate(coding_shred_previous.clone(), &leader_schedule,)
11743            .is_empty());
11744
11745        // Incorrectly chained merkle
11746        let merkle_root = Hash::new_unique();
11747        assert!(merkle_root != coding_shred_previous.merkle_root().unwrap());
11748        let (data_shreds, coding_shreds, leader_schedule) =
11749            setup_erasure_shreds_with_index_and_chained_merkle(
11750                slot,
11751                parent_slot,
11752                10,
11753                next_fec_set_index,
11754                Some(merkle_root),
11755            );
11756        let data_shred = data_shreds[0].clone();
11757        let coding_shred = coding_shreds[0].clone();
11758
11759        let duplicate_shreds =
11760            blockstore.insert_shred_return_duplicate(data_shred.clone(), &leader_schedule);
11761        assert_eq!(duplicate_shreds.len(), 1);
11762        assert_eq!(
11763            duplicate_shreds[0],
11764            PossibleDuplicateShred::ChainedMerkleRootConflict(
11765                data_shred,
11766                coding_shred_previous.into_payload(),
11767            )
11768        );
11769        // Should not check again, even though this shred conflicts as well
11770        assert!(blockstore
11771            .insert_shred_return_duplicate(coding_shred.clone(), &leader_schedule,)
11772            .is_empty());
11773    }
11774
11775    #[test]
11776    fn test_chained_merkle_root_inconsistency_forwards() {
11777        // Insert a data shred, then an inconsistent coding shred from the previous FEC set
11778        let ledger_path = get_tmp_ledger_path_auto_delete!();
11779        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11780
11781        let parent_slot = 0;
11782        let slot = 1;
11783        let fec_set_index = 0;
11784        let (data_shreds, coding_shreds, leader_schedule) =
11785            setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index);
11786        let coding_shred = coding_shreds[0].clone();
11787        let next_fec_set_index = fec_set_index + data_shreds.len() as u32;
11788
11789        // Incorrectly chained merkle
11790        let merkle_root = Hash::new_unique();
11791        assert!(merkle_root != coding_shred.merkle_root().unwrap());
11792        let (next_data_shreds, _, leader_schedule_next) =
11793            setup_erasure_shreds_with_index_and_chained_merkle(
11794                slot,
11795                parent_slot,
11796                10,
11797                next_fec_set_index,
11798                Some(merkle_root),
11799            );
11800        let next_data_shred = next_data_shreds[0].clone();
11801
11802        assert!(blockstore
11803            .insert_shred_return_duplicate(next_data_shred.clone(), &leader_schedule_next,)
11804            .is_empty());
11805
11806        // Insert previous FEC set
11807        let duplicate_shreds =
11808            blockstore.insert_shred_return_duplicate(coding_shred.clone(), &leader_schedule);
11809
11810        assert_eq!(duplicate_shreds.len(), 1);
11811        assert_eq!(
11812            duplicate_shreds[0],
11813            PossibleDuplicateShred::ChainedMerkleRootConflict(
11814                coding_shred,
11815                next_data_shred.into_payload(),
11816            )
11817        );
11818    }
11819
11820    #[test]
11821    fn test_chained_merkle_root_inconsistency_both() {
11822        // Insert a coding shred from fec_set - 1, and a data shred from fec_set + 10
11823        // Then insert an inconsistent data shred from fec_set, and finally an
11824        // inconsistent coding shred from fec_set
11825        let ledger_path = get_tmp_ledger_path_auto_delete!();
11826        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11827
11828        let parent_slot = 0;
11829        let slot = 1;
11830        let prev_fec_set_index = 0;
11831        let (prev_data_shreds, prev_coding_shreds, leader_schedule_prev) =
11832            setup_erasure_shreds_with_index(slot, parent_slot, 10, prev_fec_set_index);
11833        let prev_coding_shred = prev_coding_shreds[0].clone();
11834        let fec_set_index = prev_fec_set_index + prev_data_shreds.len() as u32;
11835
11836        // Incorrectly chained merkle
11837        let merkle_root = Hash::new_unique();
11838        assert!(merkle_root != prev_coding_shred.merkle_root().unwrap());
11839        let (data_shreds, coding_shreds, leader_schedule) =
11840            setup_erasure_shreds_with_index_and_chained_merkle(
11841                slot,
11842                parent_slot,
11843                10,
11844                fec_set_index,
11845                Some(merkle_root),
11846            );
11847        let data_shred = data_shreds[0].clone();
11848        let coding_shred = coding_shreds[0].clone();
11849        let next_fec_set_index = fec_set_index + prev_data_shreds.len() as u32;
11850
11851        // Incorrectly chained merkle
11852        let merkle_root = Hash::new_unique();
11853        assert!(merkle_root != data_shred.merkle_root().unwrap());
11854        let (next_data_shreds, _, leader_schedule_next) =
11855            setup_erasure_shreds_with_index_and_chained_merkle(
11856                slot,
11857                parent_slot,
11858                10,
11859                next_fec_set_index,
11860                Some(merkle_root),
11861            );
11862        let next_data_shred = next_data_shreds[0].clone();
11863
11864        assert!(blockstore
11865            .insert_shred_return_duplicate(prev_coding_shred.clone(), &leader_schedule_prev,)
11866            .is_empty());
11867
11868        assert!(blockstore
11869            .insert_shred_return_duplicate(next_data_shred.clone(), &leader_schedule_next)
11870            .is_empty());
11871
11872        // Insert data shred
11873        let duplicate_shreds =
11874            blockstore.insert_shred_return_duplicate(data_shred.clone(), &leader_schedule);
11875
11876        // Only the backwards check will be performed
11877        assert_eq!(duplicate_shreds.len(), 1);
11878        assert_eq!(
11879            duplicate_shreds[0],
11880            PossibleDuplicateShred::ChainedMerkleRootConflict(
11881                data_shred,
11882                prev_coding_shred.into_payload(),
11883            )
11884        );
11885
11886        // Insert coding shred
11887        let duplicate_shreds =
11888            blockstore.insert_shred_return_duplicate(coding_shred.clone(), &leader_schedule);
11889
11890        // Now the forwards check will be performed
11891        assert_eq!(duplicate_shreds.len(), 1);
11892        assert_eq!(
11893            duplicate_shreds[0],
11894            PossibleDuplicateShred::ChainedMerkleRootConflict(
11895                coding_shred,
11896                next_data_shred.into_payload(),
11897            )
11898        );
11899    }
11900
11901    #[test]
11902    fn test_chained_merkle_root_upgrade_inconsistency_backwards() {
11903        // Insert a coding shred (with an old erasure meta and no merkle root meta) then inconsistent shreds from the next FEC set
11904        let ledger_path = get_tmp_ledger_path_auto_delete!();
11905        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11906
11907        let parent_slot = 0;
11908        let slot = 1;
11909        let fec_set_index = 0;
11910        let (data_shreds, coding_shreds, leader_schedule) =
11911            setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index);
11912        let coding_shred_previous = coding_shreds[1].clone();
11913        let next_fec_set_index = fec_set_index + data_shreds.len() as u32;
11914
11915        assert!(blockstore
11916            .insert_shred_return_duplicate(coding_shred_previous.clone(), &leader_schedule,)
11917            .is_empty());
11918
11919        // Set the first received coding shred index to 0 and remove merkle root meta to simulate this insertion coming from an
11920        // older version.
11921        let mut erasure_meta = blockstore
11922            .erasure_meta(coding_shred_previous.erasure_set())
11923            .unwrap()
11924            .unwrap();
11925        erasure_meta.clear_first_received_coding_shred_index();
11926        blockstore
11927            .put_erasure_meta(coding_shred_previous.erasure_set(), &erasure_meta)
11928            .unwrap();
11929        let mut write_batch = blockstore.get_write_batch().unwrap();
11930        blockstore
11931            .merkle_root_meta_cf
11932            .delete_range_in_batch(&mut write_batch, slot, slot)
11933            .unwrap();
11934        blockstore.write_batch(write_batch).unwrap();
11935        assert!(blockstore
11936            .merkle_root_meta(coding_shred_previous.erasure_set())
11937            .unwrap()
11938            .is_none());
11939
11940        // Add an incorrectly chained merkle from the next set. Although incorrectly chained
11941        // we skip the duplicate check as the first received coding shred index shred is missing
11942        let merkle_root = Hash::new_unique();
11943        assert!(merkle_root != coding_shred_previous.merkle_root().unwrap());
11944        let (data_shreds, coding_shreds, leader_schedule) =
11945            setup_erasure_shreds_with_index_and_chained_merkle(
11946                slot,
11947                parent_slot,
11948                10,
11949                next_fec_set_index,
11950                Some(merkle_root),
11951            );
11952        let data_shred = data_shreds[0].clone();
11953        let coding_shred = coding_shreds[0].clone();
11954        assert!(blockstore
11955            .insert_shred_return_duplicate(coding_shred, &leader_schedule)
11956            .is_empty());
11957        assert!(blockstore
11958            .insert_shred_return_duplicate(data_shred, &leader_schedule,)
11959            .is_empty());
11960    }
11961
11962    #[test]
11963    fn test_chained_merkle_root_upgrade_inconsistency_forwards() {
11964        // Insert a data shred (without a merkle root), then an inconsistent coding shred from the previous FEC set.
11965        let ledger_path = get_tmp_ledger_path_auto_delete!();
11966        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11967
11968        let parent_slot = 0;
11969        let slot = 1;
11970        let fec_set_index = 0;
11971        let (data_shreds, coding_shreds, leader_schedule) =
11972            setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index);
11973        let coding_shred = coding_shreds[0].clone();
11974        let next_fec_set_index = fec_set_index + data_shreds.len() as u32;
11975
11976        // Incorrectly chained merkle
11977        let merkle_root = Hash::new_unique();
11978        assert!(merkle_root != coding_shred.merkle_root().unwrap());
11979        let (next_data_shreds, next_coding_shreds, leader_schedule_next) =
11980            setup_erasure_shreds_with_index_and_chained_merkle(
11981                slot,
11982                parent_slot,
11983                10,
11984                next_fec_set_index,
11985                Some(merkle_root),
11986            );
11987        let next_data_shred = next_data_shreds[0].clone();
11988
11989        assert!(blockstore
11990            .insert_shred_return_duplicate(next_data_shred, &leader_schedule_next,)
11991            .is_empty());
11992
11993        // Remove the merkle root meta in order to simulate this blockstore originating from
11994        // an older version.
11995        let mut write_batch = blockstore.get_write_batch().unwrap();
11996        blockstore
11997            .merkle_root_meta_cf
11998            .delete_range_in_batch(&mut write_batch, slot, slot)
11999            .unwrap();
12000        blockstore.write_batch(write_batch).unwrap();
12001        assert!(blockstore
12002            .merkle_root_meta(next_coding_shreds[0].erasure_set())
12003            .unwrap()
12004            .is_none());
12005
12006        // Insert previous FEC set, although incorrectly chained we skip the duplicate check
12007        // as the merkle root meta is missing.
12008        assert!(blockstore
12009            .insert_shred_return_duplicate(coding_shred, &leader_schedule)
12010            .is_empty());
12011    }
12012
12013    #[test]
12014    fn test_check_last_fec_set() {
12015        let ledger_path = get_tmp_ledger_path_auto_delete!();
12016        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
12017
12018        let parent_slot = 0;
12019        let slot = 1;
12020
12021        let fec_set_index = 30;
12022        let (data_shreds, _, _) =
12023            setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index);
12024        let total_shreds = fec_set_index as u64 + data_shreds.len() as u64;
12025
12026        // FEC set should be padded
12027        assert_eq!(data_shreds.len(), DATA_SHREDS_PER_FEC_BLOCK);
12028
12029        // Missing slot meta
12030        assert_matches!(
12031            blockstore.check_last_fec_set(0),
12032            Err(BlockstoreError::SlotUnavailable)
12033        );
12034
12035        // Incomplete slot
12036        blockstore
12037            .insert_shreds(
12038                data_shreds[0..DATA_SHREDS_PER_FEC_BLOCK - 1].to_vec(),
12039                None,
12040                false,
12041            )
12042            .unwrap();
12043        let meta = blockstore.meta(slot).unwrap().unwrap();
12044        assert!(meta.last_index.is_none());
12045        assert_matches!(
12046            blockstore.check_last_fec_set(slot),
12047            Err(BlockstoreError::UnknownLastIndex(_))
12048        );
12049        blockstore.run_purge(slot, slot, PurgeType::Exact).unwrap();
12050
12051        // Missing shreds
12052        blockstore
12053            .insert_shreds(data_shreds[1..].to_vec(), None, false)
12054            .unwrap();
12055        let meta = blockstore.meta(slot).unwrap().unwrap();
12056        assert_eq!(meta.last_index, Some(total_shreds - 1));
12057        assert_matches!(
12058            blockstore.check_last_fec_set(slot),
12059            Err(BlockstoreError::MissingShred(_, _))
12060        );
12061        blockstore.run_purge(slot, slot, PurgeType::Exact).unwrap();
12062
12063        // Full slot
12064        let block_id = data_shreds[0].merkle_root().unwrap();
12065        blockstore.insert_shreds(data_shreds, None, false).unwrap();
12066        let results = blockstore.check_last_fec_set(slot).unwrap();
12067        assert_eq!(results.last_fec_set_merkle_root, Some(block_id));
12068        assert!(results.is_retransmitter_signed);
12069        blockstore.run_purge(slot, slot, PurgeType::Exact).unwrap();
12070
12071        // Slot has less than DATA_SHREDS_PER_FEC_BLOCK shreds in total
12072        let mut fec_set_index = 0;
12073        let (first_data_shreds, _, _) =
12074            setup_erasure_shreds_with_index_and_chained_merkle_and_last_in_slot(
12075                slot,
12076                parent_slot,
12077                10,
12078                fec_set_index,
12079                None,
12080                false,
12081            );
12082        let merkle_root = first_data_shreds[0].merkle_root().unwrap();
12083        fec_set_index += first_data_shreds.len() as u32;
12084        let (last_data_shreds, _, _) =
12085            setup_erasure_shreds_with_index_and_chained_merkle_and_last_in_slot(
12086                slot,
12087                parent_slot,
12088                40,
12089                fec_set_index,
12090                Some(merkle_root),
12091                false, // No padding
12092            );
12093        let last_index = last_data_shreds.last().unwrap().index();
12094        let total_shreds = first_data_shreds.len() + last_data_shreds.len();
12095        assert!(total_shreds < DATA_SHREDS_PER_FEC_BLOCK);
12096        blockstore
12097            .insert_shreds(first_data_shreds, None, false)
12098            .unwrap();
12099        blockstore
12100            .insert_shreds(last_data_shreds, None, false)
12101            .unwrap();
12102        // Manually update last index flag
12103        let mut slot_meta = blockstore.meta(slot).unwrap().unwrap();
12104        slot_meta.last_index = Some(last_index as u64);
12105        blockstore.put_meta(slot, &slot_meta).unwrap();
12106        let results = blockstore.check_last_fec_set(slot).unwrap();
12107        assert!(results.last_fec_set_merkle_root.is_none());
12108        assert!(!results.is_retransmitter_signed);
12109        blockstore.run_purge(slot, slot, PurgeType::Exact).unwrap();
12110
12111        // Slot has more than DATA_SHREDS_PER_FEC_BLOCK in total, but last FEC set has less
12112        let mut fec_set_index = 0;
12113        let (first_data_shreds, _, _) =
12114            setup_erasure_shreds_with_index_and_chained_merkle_and_last_in_slot(
12115                slot,
12116                parent_slot,
12117                100,
12118                fec_set_index,
12119                None,
12120                false,
12121            );
12122        let merkle_root = first_data_shreds[0].merkle_root().unwrap();
12123        fec_set_index += first_data_shreds.len() as u32;
12124        let (last_data_shreds, _, _) =
12125            setup_erasure_shreds_with_index_and_chained_merkle_and_last_in_slot(
12126                slot,
12127                parent_slot,
12128                100,
12129                fec_set_index,
12130                Some(merkle_root),
12131                false, // No padding
12132            );
12133        let last_index = last_data_shreds.last().unwrap().index();
12134        let total_shreds = first_data_shreds.len() + last_data_shreds.len();
12135        assert!(last_data_shreds.len() < DATA_SHREDS_PER_FEC_BLOCK);
12136        assert!(total_shreds > DATA_SHREDS_PER_FEC_BLOCK);
12137        blockstore
12138            .insert_shreds(first_data_shreds, None, false)
12139            .unwrap();
12140        blockstore
12141            .insert_shreds(last_data_shreds, None, false)
12142            .unwrap();
12143        // Manually update last index flag
12144        let mut slot_meta = blockstore.meta(slot).unwrap().unwrap();
12145        slot_meta.last_index = Some(last_index as u64);
12146        blockstore.put_meta(slot, &slot_meta).unwrap();
12147        let results = blockstore.check_last_fec_set(slot).unwrap();
12148        assert!(results.last_fec_set_merkle_root.is_none());
12149        assert!(!results.is_retransmitter_signed);
12150        blockstore.run_purge(slot, slot, PurgeType::Exact).unwrap();
12151
12152        // Slot is full, but does not contain retransmitter shreds
12153        let fec_set_index = 0;
12154        let (first_data_shreds, _, _) =
12155            setup_erasure_shreds_with_index_and_chained_merkle_and_last_in_slot(
12156                slot,
12157                parent_slot,
12158                200,
12159                fec_set_index,
12160                // Do not set merkle root, so shreds are not signed
12161                None,
12162                true,
12163            );
12164        assert!(first_data_shreds.len() > DATA_SHREDS_PER_FEC_BLOCK);
12165        let block_id = first_data_shreds[0].merkle_root().unwrap();
12166        blockstore
12167            .insert_shreds(first_data_shreds, None, false)
12168            .unwrap();
12169        let results = blockstore.check_last_fec_set(slot).unwrap();
12170        assert_eq!(results.last_fec_set_merkle_root, Some(block_id));
12171        assert!(!results.is_retransmitter_signed);
12172    }
12173
12174    #[test]
12175    fn test_last_fec_set_check_results() {
12176        let enabled_feature_set = FeatureSet::all_enabled();
12177        let disabled_feature_set = FeatureSet::default();
12178        let mut full_only = FeatureSet::default();
12179        full_only.activate(&vote_only_full_fec_sets::id(), 0);
12180        let mut retransmitter_only = FeatureSet::default();
12181        retransmitter_only.activate(&vote_only_retransmitter_signed_fec_sets::id(), 0);
12182
12183        let results = LastFECSetCheckResults {
12184            last_fec_set_merkle_root: None,
12185            is_retransmitter_signed: false,
12186        };
12187        assert_matches!(
12188            results.get_last_fec_set_merkle_root(&enabled_feature_set),
12189            Err(BlockstoreProcessorError::IncompleteFinalFecSet)
12190        );
12191        assert_matches!(
12192            results.get_last_fec_set_merkle_root(&full_only),
12193            Err(BlockstoreProcessorError::IncompleteFinalFecSet)
12194        );
12195        assert_matches!(
12196            results.get_last_fec_set_merkle_root(&retransmitter_only),
12197            Err(BlockstoreProcessorError::InvalidRetransmitterSignatureFinalFecSet)
12198        );
12199        assert!(results
12200            .get_last_fec_set_merkle_root(&disabled_feature_set)
12201            .unwrap()
12202            .is_none());
12203
12204        let block_id = Hash::new_unique();
12205        let results = LastFECSetCheckResults {
12206            last_fec_set_merkle_root: Some(block_id),
12207            is_retransmitter_signed: false,
12208        };
12209        assert_matches!(
12210            results.get_last_fec_set_merkle_root(&enabled_feature_set),
12211            Err(BlockstoreProcessorError::InvalidRetransmitterSignatureFinalFecSet)
12212        );
12213        assert_eq!(
12214            results.get_last_fec_set_merkle_root(&full_only).unwrap(),
12215            Some(block_id)
12216        );
12217        assert_matches!(
12218            results.get_last_fec_set_merkle_root(&retransmitter_only),
12219            Err(BlockstoreProcessorError::InvalidRetransmitterSignatureFinalFecSet)
12220        );
12221        assert_eq!(
12222            results
12223                .get_last_fec_set_merkle_root(&disabled_feature_set)
12224                .unwrap(),
12225            Some(block_id)
12226        );
12227
12228        let results = LastFECSetCheckResults {
12229            last_fec_set_merkle_root: None,
12230            is_retransmitter_signed: true,
12231        };
12232        assert_matches!(
12233            results.get_last_fec_set_merkle_root(&enabled_feature_set),
12234            Err(BlockstoreProcessorError::IncompleteFinalFecSet)
12235        );
12236        assert_matches!(
12237            results.get_last_fec_set_merkle_root(&full_only),
12238            Err(BlockstoreProcessorError::IncompleteFinalFecSet)
12239        );
12240        assert!(results
12241            .get_last_fec_set_merkle_root(&retransmitter_only)
12242            .unwrap()
12243            .is_none());
12244        assert!(results
12245            .get_last_fec_set_merkle_root(&disabled_feature_set)
12246            .unwrap()
12247            .is_none());
12248
12249        let block_id = Hash::new_unique();
12250        let results = LastFECSetCheckResults {
12251            last_fec_set_merkle_root: Some(block_id),
12252            is_retransmitter_signed: true,
12253        };
12254        for feature_set in [
12255            enabled_feature_set,
12256            disabled_feature_set,
12257            full_only,
12258            retransmitter_only,
12259        ] {
12260            assert_eq!(
12261                results.get_last_fec_set_merkle_root(&feature_set).unwrap(),
12262                Some(block_id)
12263            );
12264        }
12265    }
12266
12267    #[test]
12268    fn test_write_transaction_memos() {
12269        let ledger_path = get_tmp_ledger_path_auto_delete!();
12270        let blockstore = Blockstore::open(ledger_path.path())
12271            .expect("Expected to be able to open database ledger");
12272        let signature: Signature = Signature::new_unique();
12273
12274        blockstore
12275            .write_transaction_memos(&signature, 4, "test_write_transaction_memos".to_string())
12276            .unwrap();
12277
12278        let memo = blockstore
12279            .read_transaction_memos(signature, 4)
12280            .expect("Expected to find memo");
12281        assert_eq!(memo, Some("test_write_transaction_memos".to_string()));
12282    }
12283
12284    #[test]
12285    fn test_add_transaction_memos_to_batch() {
12286        let ledger_path = get_tmp_ledger_path_auto_delete!();
12287        let blockstore = Blockstore::open(ledger_path.path())
12288            .expect("Expected to be able to open database ledger");
12289        let signatures: Vec<Signature> = (0..2).map(|_| Signature::new_unique()).collect();
12290        let mut memos_batch = blockstore.get_write_batch().unwrap();
12291
12292        blockstore
12293            .add_transaction_memos_to_batch(
12294                &signatures[0],
12295                4,
12296                "test_write_transaction_memos1".to_string(),
12297                &mut memos_batch,
12298            )
12299            .unwrap();
12300
12301        blockstore
12302            .add_transaction_memos_to_batch(
12303                &signatures[1],
12304                5,
12305                "test_write_transaction_memos2".to_string(),
12306                &mut memos_batch,
12307            )
12308            .unwrap();
12309
12310        blockstore.write_batch(memos_batch).unwrap();
12311
12312        let memo1 = blockstore
12313            .read_transaction_memos(signatures[0], 4)
12314            .expect("Expected to find memo");
12315        assert_eq!(memo1, Some("test_write_transaction_memos1".to_string()));
12316
12317        let memo2 = blockstore
12318            .read_transaction_memos(signatures[1], 5)
12319            .expect("Expected to find memo");
12320        assert_eq!(memo2, Some("test_write_transaction_memos2".to_string()));
12321    }
12322
12323    #[test]
12324    fn test_write_transaction_status() {
12325        let ledger_path = get_tmp_ledger_path_auto_delete!();
12326        let blockstore = Blockstore::open(ledger_path.path())
12327            .expect("Expected to be able to open database ledger");
12328        let signatures: Vec<Signature> = (0..2).map(|_| Signature::new_unique()).collect();
12329        let keys_with_writable: Vec<(Pubkey, bool)> =
12330            vec![(Pubkey::new_unique(), true), (Pubkey::new_unique(), false)];
12331        let slot = 5;
12332
12333        blockstore
12334            .write_transaction_status(
12335                slot,
12336                signatures[0],
12337                keys_with_writable
12338                    .iter()
12339                    .map(|&(ref pubkey, writable)| (pubkey, writable)),
12340                TransactionStatusMeta {
12341                    fee: 4200,
12342                    ..TransactionStatusMeta::default()
12343                },
12344                0,
12345            )
12346            .unwrap();
12347
12348        let tx_status = blockstore
12349            .read_transaction_status((signatures[0], slot))
12350            .unwrap()
12351            .unwrap();
12352        assert_eq!(tx_status.fee, 4200);
12353    }
12354
12355    #[test]
12356    fn test_add_transaction_status_to_batch() {
12357        let ledger_path = get_tmp_ledger_path_auto_delete!();
12358        let blockstore = Blockstore::open(ledger_path.path())
12359            .expect("Expected to be able to open database ledger");
12360        let signatures: Vec<Signature> = (0..2).map(|_| Signature::new_unique()).collect();
12361        let keys_with_writable: Vec<Vec<(Pubkey, bool)>> = (0..2)
12362            .map(|_| vec![(Pubkey::new_unique(), true), (Pubkey::new_unique(), false)])
12363            .collect();
12364        let slot = 5;
12365        let mut status_batch = blockstore.get_write_batch().unwrap();
12366
12367        for (tx_idx, signature) in signatures.iter().enumerate() {
12368            blockstore
12369                .add_transaction_status_to_batch(
12370                    slot,
12371                    *signature,
12372                    keys_with_writable[tx_idx].iter().map(|(k, v)| (k, *v)),
12373                    TransactionStatusMeta {
12374                        fee: 5700 + tx_idx as u64,
12375                        status: if tx_idx % 2 == 0 {
12376                            Ok(())
12377                        } else {
12378                            Err(TransactionError::InsufficientFundsForFee)
12379                        },
12380                        ..TransactionStatusMeta::default()
12381                    },
12382                    tx_idx,
12383                    &mut status_batch,
12384                )
12385                .unwrap();
12386        }
12387
12388        blockstore.write_batch(status_batch).unwrap();
12389
12390        let tx_status1 = blockstore
12391            .read_transaction_status((signatures[0], slot))
12392            .unwrap()
12393            .unwrap();
12394        assert_eq!(tx_status1.fee, 5700);
12395        assert_eq!(tx_status1.status, Ok(()));
12396
12397        let tx_status2 = blockstore
12398            .read_transaction_status((signatures[1], slot))
12399            .unwrap()
12400            .unwrap();
12401        assert_eq!(tx_status2.fee, 5701);
12402        assert_eq!(
12403            tx_status2.status,
12404            Err(TransactionError::InsufficientFundsForFee)
12405        );
12406    }
12407}