1#[cfg(feature = "dev-context-only-utils")]
6use trees::{Tree, TreeWalk};
7use {
8 crate::{
9 ancestor_iterator::AncestorIterator,
10 blockstore::column::{columns as cf, Column, ColumnIndexDeprecation, TypedColumn},
11 blockstore_db::{IteratorDirection, IteratorMode, LedgerColumn, Rocks, WriteBatch},
12 blockstore_meta::*,
13 blockstore_options::{
14 BlockstoreOptions, LedgerColumnOptions, BLOCKSTORE_DIRECTORY_ROCKS_LEVEL,
15 },
16 blockstore_processor::BlockstoreProcessorError,
17 leader_schedule_cache::LeaderScheduleCache,
18 next_slots_iterator::NextSlotsIterator,
19 shred::{
20 self, ErasureSetId, ProcessShredsStats, ReedSolomonCache, Shred, ShredData, ShredId,
21 ShredType, Shredder, DATA_SHREDS_PER_FEC_BLOCK,
22 },
23 slot_stats::{ShredSource, SlotsStats},
24 transaction_address_lookup_table_scanner::scan_transaction,
25 },
26 agave_feature_set::FeatureSet,
27 agave_snapshots::unpack_genesis_archive,
28 assert_matches::debug_assert_matches,
29 bincode::{deserialize, serialize},
30 crossbeam_channel::{bounded, Receiver, Sender, TrySendError},
31 dashmap::DashSet,
32 itertools::Itertools,
33 log::*,
34 rand::Rng,
35 rayon::iter::{IntoParallelIterator, ParallelIterator},
36 rocksdb::{DBRawIterator, LiveFile},
37 solana_account::ReadableAccount,
38 solana_address_lookup_table_interface::state::AddressLookupTable,
39 solana_clock::{Slot, UnixTimestamp, DEFAULT_TICKS_PER_SECOND},
40 solana_entry::entry::{create_ticks, Entry},
41 solana_genesis_config::{GenesisConfig, DEFAULT_GENESIS_ARCHIVE, DEFAULT_GENESIS_FILE},
42 solana_hash::Hash,
43 solana_keypair::Keypair,
44 solana_measure::measure::Measure,
45 solana_metrics::datapoint_error,
46 solana_pubkey::Pubkey,
47 solana_runtime::bank::Bank,
48 solana_signature::Signature,
49 solana_signer::Signer,
50 solana_storage_proto::{StoredExtendedRewards, StoredTransactionStatusMeta},
51 solana_streamer::{evicting_sender::EvictingSender, streamer::ChannelSend},
52 solana_time_utils::timestamp,
53 solana_transaction::versioned::{
54 sanitized::SanitizedVersionedTransaction, VersionedTransaction,
55 },
56 solana_transaction_status::{
57 ConfirmedTransactionStatusWithSignature, ConfirmedTransactionWithStatusMeta, Rewards,
58 RewardsAndNumPartitions, TransactionStatusMeta, TransactionWithStatusMeta,
59 VersionedConfirmedBlock, VersionedConfirmedBlockWithEntries,
60 VersionedTransactionWithStatusMeta,
61 },
62 std::{
63 borrow::Cow,
64 cell::RefCell,
65 cmp,
66 collections::{
67 btree_map::Entry as BTreeMapEntry, hash_map::Entry as HashMapEntry, BTreeMap, HashMap,
68 HashSet, VecDeque,
69 },
70 convert::TryInto,
71 fmt::Write,
72 fs::{self, File},
73 io::{Error as IoError, ErrorKind},
74 ops::{Bound, Range},
75 path::{Path, PathBuf},
76 rc::Rc,
77 sync::{
78 atomic::{AtomicBool, AtomicU64, Ordering},
79 Arc, Mutex, RwLock,
80 },
81 },
82 tar,
83 tempfile::{Builder, TempDir},
84 thiserror::Error,
85};
86
87pub mod blockstore_purge;
88pub mod column;
89pub mod error;
90#[cfg(test)]
91use static_assertions::const_assert_eq;
92pub use {
93 crate::{
94 blockstore::error::{BlockstoreError, Result},
95 blockstore_db::{default_num_compaction_threads, default_num_flush_threads},
96 blockstore_meta::{OptimisticSlotMetaVersioned, SlotMeta},
97 blockstore_metrics::BlockstoreInsertionMetrics,
98 },
99 blockstore_purge::PurgeType,
100 rocksdb::properties as RocksProperties,
101};
102
103pub const MAX_REPLAY_WAKE_UP_SIGNALS: usize = 1;
104pub const MAX_COMPLETED_SLOTS_IN_CHANNEL: usize = 100_000;
105
106pub type CompletedSlotsSender = Sender<Vec<Slot>>;
107pub type CompletedSlotsReceiver = Receiver<Vec<Slot>>;
108
109type CompletedRanges = Vec<Range<u32>>;
117
118#[derive(Default)]
119pub struct SignatureInfosForAddress {
120 pub infos: Vec<ConfirmedTransactionStatusWithSignature>,
121 pub found_before: bool,
122}
123
124#[derive(Error, Debug)]
125enum InsertDataShredError {
126 #[error("Data shred already exists in Blockstore")]
127 Exists,
128 #[error("Invalid data shred")]
129 InvalidShred,
130 #[error(transparent)]
131 BlockstoreError(#[from] BlockstoreError),
132}
133
134#[derive(Eq, PartialEq, Debug, Clone)]
135pub enum PossibleDuplicateShred {
136 Exists(Shred), LastIndexConflict(
139 Shred, shred::Payload, ),
142 ErasureConflict(
144 Shred, shred::Payload, ),
147 MerkleRootConflict(
149 Shred, shred::Payload, ),
152 ChainedMerkleRootConflict(
154 Shred, shred::Payload, ),
157}
158
159impl PossibleDuplicateShred {
160 pub fn slot(&self) -> Slot {
161 match self {
162 Self::Exists(shred) => shred.slot(),
163 Self::LastIndexConflict(shred, _) => shred.slot(),
164 Self::ErasureConflict(shred, _) => shred.slot(),
165 Self::MerkleRootConflict(shred, _) => shred.slot(),
166 Self::ChainedMerkleRootConflict(shred, _) => shred.slot(),
167 }
168 }
169}
170
171enum WorkingEntry<T> {
172 Dirty(T), Clean(T), }
175
176impl<T> WorkingEntry<T> {
177 fn should_write(&self) -> bool {
178 matches!(self, Self::Dirty(_))
179 }
180}
181
182impl<T> AsRef<T> for WorkingEntry<T> {
183 fn as_ref(&self) -> &T {
184 match self {
185 Self::Dirty(value) => value,
186 Self::Clean(value) => value,
187 }
188 }
189}
190
191#[derive(Clone, Copy, PartialEq, Eq, Debug)]
192pub struct LastFECSetCheckResults {
193 last_fec_set_merkle_root: Option<Hash>,
194 is_retransmitter_signed: bool,
195}
196
197impl LastFECSetCheckResults {
198 fn get_last_fec_set_merkle_root(
199 &self,
200 feature_set: &FeatureSet,
201 ) -> std::result::Result<Option<Hash>, BlockstoreProcessorError> {
202 if self.last_fec_set_merkle_root.is_none() {
203 return Err(BlockstoreProcessorError::IncompleteFinalFecSet);
204 } else if feature_set
205 .is_active(&agave_feature_set::vote_only_retransmitter_signed_fec_sets::id())
206 && !self.is_retransmitter_signed
207 {
208 return Err(BlockstoreProcessorError::InvalidRetransmitterSignatureFinalFecSet);
209 }
210 Ok(self.last_fec_set_merkle_root)
211 }
212}
213
214pub struct InsertResults {
215 completed_data_set_infos: Vec<CompletedDataSetInfo>,
216 duplicate_shreds: Vec<PossibleDuplicateShred>,
217}
218
219#[derive(Clone, Debug, Eq, PartialEq)]
229pub struct CompletedDataSetInfo {
230 pub slot: Slot,
232 pub indices: Range<u32>,
234}
235
236pub struct BlockstoreSignals {
237 pub blockstore: Blockstore,
238 pub ledger_signal_receiver: Receiver<bool>,
239 pub completed_slots_receiver: CompletedSlotsReceiver,
240}
241
242pub struct Blockstore {
244 ledger_path: PathBuf,
245 db: Arc<Rocks>,
246 address_signatures_cf: LedgerColumn<cf::AddressSignatures>,
248 bank_hash_cf: LedgerColumn<cf::BankHash>,
249 block_height_cf: LedgerColumn<cf::BlockHeight>,
250 blocktime_cf: LedgerColumn<cf::Blocktime>,
251 code_shred_cf: LedgerColumn<cf::ShredCode>,
252 data_shred_cf: LedgerColumn<cf::ShredData>,
253 dead_slots_cf: LedgerColumn<cf::DeadSlots>,
254 duplicate_slots_cf: LedgerColumn<cf::DuplicateSlots>,
255 erasure_meta_cf: LedgerColumn<cf::ErasureMeta>,
256 index_cf: LedgerColumn<cf::Index>,
257 merkle_root_meta_cf: LedgerColumn<cf::MerkleRootMeta>,
258 meta_cf: LedgerColumn<cf::SlotMeta>,
259 optimistic_slots_cf: LedgerColumn<cf::OptimisticSlots>,
260 orphans_cf: LedgerColumn<cf::Orphans>,
261 perf_samples_cf: LedgerColumn<cf::PerfSamples>,
262 rewards_cf: LedgerColumn<cf::Rewards>,
263 roots_cf: LedgerColumn<cf::Root>,
264 transaction_memos_cf: LedgerColumn<cf::TransactionMemos>,
265 transaction_status_cf: LedgerColumn<cf::TransactionStatus>,
266 transaction_status_index_cf: LedgerColumn<cf::TransactionStatusIndex>,
267
268 highest_primary_index_slot: RwLock<Option<Slot>>,
269 max_root: AtomicU64,
270 insert_shreds_lock: Mutex<()>,
271 new_shreds_signals: Mutex<Vec<Sender<bool>>>,
272 completed_slots_senders: Mutex<Vec<CompletedSlotsSender>>,
273 pub lowest_cleanup_slot: RwLock<Slot>,
274 pub slots_stats: SlotsStats,
275}
276
277pub struct IndexMetaWorkingSetEntry {
278 index: Index,
279 did_insert_occur: bool,
282}
283
284pub struct SlotMetaWorkingSetEntry {
287 new_slot_meta: Rc<RefCell<SlotMeta>>,
290 old_slot_meta: Option<SlotMeta>,
294 did_insert_occur: bool,
297}
298
299struct ShredInsertionTracker<'a> {
300 just_inserted_shreds: HashMap<ShredId, Cow<'a, Shred>>,
302 erasure_metas: BTreeMap<ErasureSetId, WorkingEntry<ErasureMeta>>,
305 merkle_root_metas: HashMap<ErasureSetId, WorkingEntry<MerkleRootMeta>>,
308 slot_meta_working_set: HashMap<u64, SlotMetaWorkingSetEntry>,
311 index_working_set: HashMap<u64, IndexMetaWorkingSetEntry>,
314 duplicate_shreds: Vec<PossibleDuplicateShred>,
315 write_batch: WriteBatch,
318 index_meta_time_us: u64,
320 newly_completed_data_sets: Vec<CompletedDataSetInfo>,
322}
323
324impl ShredInsertionTracker<'_> {
325 fn new(shred_num: usize, write_batch: WriteBatch) -> Self {
326 Self {
327 just_inserted_shreds: HashMap::with_capacity(shred_num),
328 erasure_metas: BTreeMap::new(),
329 merkle_root_metas: HashMap::new(),
330 slot_meta_working_set: HashMap::new(),
331 index_working_set: HashMap::new(),
332 duplicate_shreds: vec![],
333 write_batch,
334 index_meta_time_us: 0,
335 newly_completed_data_sets: vec![],
336 }
337 }
338}
339
340impl SlotMetaWorkingSetEntry {
341 fn new(new_slot_meta: Rc<RefCell<SlotMeta>>, old_slot_meta: Option<SlotMeta>) -> Self {
344 Self {
345 new_slot_meta,
346 old_slot_meta,
347 did_insert_occur: false,
348 }
349 }
350}
351
352pub fn banking_trace_path(path: &Path) -> PathBuf {
353 path.join("banking_trace")
354}
355
356pub fn banking_retrace_path(path: &Path) -> PathBuf {
357 path.join("banking_retrace")
358}
359
360impl Blockstore {
361 pub fn ledger_path(&self) -> &PathBuf {
362 &self.ledger_path
363 }
364
365 pub fn banking_trace_path(&self) -> PathBuf {
366 banking_trace_path(&self.ledger_path)
367 }
368
369 pub fn banking_retracer_path(&self) -> PathBuf {
370 banking_retrace_path(&self.ledger_path)
371 }
372
373 pub fn open(ledger_path: &Path) -> Result<Blockstore> {
375 Self::do_open(ledger_path, BlockstoreOptions::default())
376 }
377
378 pub fn open_with_options(ledger_path: &Path, options: BlockstoreOptions) -> Result<Blockstore> {
379 Self::do_open(ledger_path, options)
380 }
381
382 fn do_open(ledger_path: &Path, options: BlockstoreOptions) -> Result<Blockstore> {
383 fs::create_dir_all(ledger_path)?;
384 let blockstore_path = ledger_path.join(BLOCKSTORE_DIRECTORY_ROCKS_LEVEL);
385
386 let mut measure = Measure::start("blockstore open");
388 info!("Opening blockstore at {blockstore_path:?}");
389 let db = Arc::new(Rocks::open(blockstore_path, options)?);
390
391 let address_signatures_cf = db.column();
392 let bank_hash_cf = db.column();
393 let block_height_cf = db.column();
394 let blocktime_cf = db.column();
395 let code_shred_cf = db.column();
396 let data_shred_cf = db.column();
397 let dead_slots_cf = db.column();
398 let duplicate_slots_cf = db.column();
399 let erasure_meta_cf = db.column();
400 let index_cf = db.column();
401 let merkle_root_meta_cf = db.column();
402 let meta_cf = db.column();
403 let optimistic_slots_cf = db.column();
404 let orphans_cf = db.column();
405 let perf_samples_cf = db.column();
406 let rewards_cf = db.column();
407 let roots_cf = db.column();
408 let transaction_memos_cf = db.column();
409 let transaction_status_cf = db.column();
410 let transaction_status_index_cf = db.column();
411
412 let max_root = roots_cf
414 .iter(IteratorMode::End)?
415 .next()
416 .map(|(slot, _)| slot)
417 .unwrap_or(0);
418 let max_root = AtomicU64::new(max_root);
419
420 measure.stop();
421 info!("Opening blockstore done; {measure}");
422 let blockstore = Blockstore {
423 ledger_path: ledger_path.to_path_buf(),
424 db,
425 address_signatures_cf,
426 bank_hash_cf,
427 block_height_cf,
428 blocktime_cf,
429 code_shred_cf,
430 data_shred_cf,
431 dead_slots_cf,
432 duplicate_slots_cf,
433 erasure_meta_cf,
434 index_cf,
435 merkle_root_meta_cf,
436 meta_cf,
437 optimistic_slots_cf,
438 orphans_cf,
439 perf_samples_cf,
440 rewards_cf,
441 roots_cf,
442 transaction_memos_cf,
443 transaction_status_cf,
444 transaction_status_index_cf,
445 highest_primary_index_slot: RwLock::<Option<Slot>>::default(),
446 new_shreds_signals: Mutex::default(),
447 completed_slots_senders: Mutex::default(),
448 insert_shreds_lock: Mutex::<()>::default(),
449 max_root,
450 lowest_cleanup_slot: RwLock::<Slot>::default(),
451 slots_stats: SlotsStats::default(),
452 };
453 blockstore.cleanup_old_entries()?;
454 blockstore.update_highest_primary_index_slot()?;
455
456 Ok(blockstore)
457 }
458
459 pub fn open_with_signal(
460 ledger_path: &Path,
461 options: BlockstoreOptions,
462 ) -> Result<BlockstoreSignals> {
463 let blockstore = Self::open_with_options(ledger_path, options)?;
464 let (ledger_signal_sender, ledger_signal_receiver) = bounded(MAX_REPLAY_WAKE_UP_SIGNALS);
465 let (completed_slots_sender, completed_slots_receiver) =
466 bounded(MAX_COMPLETED_SLOTS_IN_CHANNEL);
467
468 blockstore.add_new_shred_signal(ledger_signal_sender);
469 blockstore.add_completed_slots_signal(completed_slots_sender);
470
471 Ok(BlockstoreSignals {
472 blockstore,
473 ledger_signal_receiver,
474 completed_slots_receiver,
475 })
476 }
477
478 #[cfg(feature = "dev-context-only-utils")]
479 pub fn add_tree(
480 &self,
481 forks: Tree<Slot>,
482 is_orphan: bool,
483 is_slot_complete: bool,
484 num_ticks: u64,
485 starting_hash: Hash,
486 ) {
487 let mut walk = TreeWalk::from(forks);
488 let mut blockhashes = HashMap::new();
489 while let Some(visit) = walk.get() {
490 let slot = *visit.node().data();
491 if self.meta(slot).unwrap().is_some() && self.orphan(slot).unwrap().is_none() {
492 walk.forward();
494 continue;
495 }
496 let parent = walk.get_parent().map(|n| *n.data());
497 if parent.is_some() || !is_orphan {
498 let parent_hash = parent
499 .and_then(|parent| blockhashes.get(&parent))
502 .unwrap_or(&starting_hash);
503 let mut entries = create_ticks(
504 num_ticks * (std::cmp::max(1, slot - parent.unwrap_or(slot))),
505 0,
506 *parent_hash,
507 );
508 blockhashes.insert(slot, entries.last().unwrap().hash);
509 if !is_slot_complete {
510 entries.pop().unwrap();
511 }
512 let shreds = entries_to_test_shreds(
513 &entries,
514 slot,
515 parent.unwrap_or(slot),
516 is_slot_complete,
517 0,
518 );
519 self.insert_shreds(shreds, None, false).unwrap();
520 }
521 walk.forward();
522 }
523 }
524
525 pub fn destroy(ledger_path: &Path) -> Result<()> {
530 fs::create_dir_all(ledger_path)?;
532 Rocks::destroy(&Path::new(ledger_path).join(BLOCKSTORE_DIRECTORY_ROCKS_LEVEL))
533 }
534
535 pub fn meta(&self, slot: Slot) -> Result<Option<SlotMeta>> {
537 self.meta_cf.get(slot)
538 }
539
540 pub fn is_full(&self, slot: Slot) -> bool {
542 if let Ok(Some(meta)) = self.meta_cf.get(slot) {
543 return meta.is_full();
544 }
545 false
546 }
547
548 fn erasure_meta(&self, erasure_set: ErasureSetId) -> Result<Option<ErasureMeta>> {
549 let (slot, fec_set_index) = erasure_set.store_key();
550 self.erasure_meta_cf.get((slot, u64::from(fec_set_index)))
551 }
552
553 #[cfg(test)]
554 fn put_erasure_meta(
555 &self,
556 erasure_set: ErasureSetId,
557 erasure_meta: &ErasureMeta,
558 ) -> Result<()> {
559 let (slot, fec_set_index) = erasure_set.store_key();
560 self.erasure_meta_cf.put_bytes(
561 (slot, u64::from(fec_set_index)),
562 &bincode::serialize(erasure_meta).unwrap(),
563 )
564 }
565
566 fn previous_erasure_set<'a>(
571 &'a self,
572 erasure_set: ErasureSetId,
573 erasure_metas: &'a BTreeMap<ErasureSetId, WorkingEntry<ErasureMeta>>,
574 ) -> Result<Option<(ErasureSetId, Cow<'a, ErasureMeta>)>> {
575 let (slot, fec_set_index) = erasure_set.store_key();
576
577 let candidate_erasure_entry = erasure_metas
580 .range((
581 Bound::Included(ErasureSetId::new(slot, 0)),
582 Bound::Excluded(erasure_set),
583 ))
584 .next_back();
585 let candidate_erasure_set_and_meta = candidate_erasure_entry
586 .filter(|(_, candidate_erasure_meta)| {
587 candidate_erasure_meta.as_ref().next_fec_set_index() == Some(fec_set_index)
588 })
589 .map(|(erasure_set, erasure_meta)| {
590 (*erasure_set, Cow::Borrowed(erasure_meta.as_ref()))
591 });
592 if candidate_erasure_set_and_meta.is_some() {
593 return Ok(candidate_erasure_set_and_meta);
594 }
595
596 let Some(((_, candidate_fec_set_index), candidate_erasure_meta)) = self
598 .erasure_meta_cf
599 .iter(IteratorMode::From(
600 (slot, u64::from(fec_set_index)),
601 IteratorDirection::Reverse,
602 ))?
603 .find(|((_, candidate_fec_set_index), _)| {
605 *candidate_fec_set_index != u64::from(fec_set_index)
606 })
607 .filter(|((candidate_slot, _), _)| *candidate_slot == slot)
609 else {
610 return Ok(None);
612 };
613 let candidate_fec_set_index = u32::try_from(candidate_fec_set_index)
614 .expect("fec_set_index from a previously inserted shred should fit in u32");
615 let candidate_erasure_set = ErasureSetId::new(slot, candidate_fec_set_index);
616 let candidate_erasure_meta: ErasureMeta = deserialize(candidate_erasure_meta.as_ref())?;
617
618 let Some(next_fec_set_index) = candidate_erasure_meta.next_fec_set_index() else {
620 return Err(BlockstoreError::InvalidErasureConfig);
621 };
622 if next_fec_set_index == fec_set_index {
623 return Ok(Some((
624 candidate_erasure_set,
625 Cow::Owned(candidate_erasure_meta),
626 )));
627 }
628 Ok(None)
629 }
630
631 fn merkle_root_meta(&self, erasure_set: ErasureSetId) -> Result<Option<MerkleRootMeta>> {
632 self.merkle_root_meta_cf.get(erasure_set.store_key())
633 }
634
635 pub fn orphan(&self, slot: Slot) -> Result<Option<bool>> {
642 self.orphans_cf.get(slot)
643 }
644
645 pub fn slot_meta_iterator(
646 &self,
647 slot: Slot,
648 ) -> Result<impl Iterator<Item = (Slot, SlotMeta)> + '_> {
649 let meta_iter = self
650 .meta_cf
651 .iter(IteratorMode::From(slot, IteratorDirection::Forward))?;
652 Ok(meta_iter.map(|(slot, slot_meta_bytes)| {
653 (
654 slot,
655 cf::SlotMeta::deserialize(&slot_meta_bytes).unwrap_or_else(|e| {
656 panic!("Could not deserialize SlotMeta for slot {slot}: {e:?}")
657 }),
658 )
659 }))
660 }
661
662 pub fn live_slots_iterator(&self, root: Slot) -> impl Iterator<Item = (Slot, SlotMeta)> + '_ {
663 let root_forks = NextSlotsIterator::new(root, self);
664
665 let orphans_iter = self.orphans_iterator(root + 1).unwrap();
666 root_forks.chain(orphans_iter.flat_map(move |orphan| NextSlotsIterator::new(orphan, self)))
667 }
668
669 pub fn live_files_metadata(&self) -> Result<Vec<LiveFile>> {
670 self.db.live_files_metadata()
671 }
672
673 #[cfg(feature = "dev-context-only-utils")]
674 #[allow(clippy::type_complexity)]
675 pub fn iterator_cf(
676 &self,
677 cf_name: &str,
678 ) -> Result<impl Iterator<Item = (Box<[u8]>, Box<[u8]>)> + '_> {
679 let cf = self.db.cf_handle(cf_name);
680 let iterator = self.db.iterator_cf(cf, rocksdb::IteratorMode::Start);
681 Ok(iterator.map(|pair| pair.unwrap()))
682 }
683
684 #[allow(clippy::type_complexity)]
685 pub fn slot_data_iterator(
686 &self,
687 slot: Slot,
688 index: u64,
689 ) -> Result<impl Iterator<Item = ((u64, u64), Box<[u8]>)> + '_> {
690 let slot_iterator = self.data_shred_cf.iter(IteratorMode::From(
691 (slot, index),
692 IteratorDirection::Forward,
693 ))?;
694 Ok(slot_iterator.take_while(move |((shred_slot, _), _)| *shred_slot == slot))
695 }
696
697 #[allow(clippy::type_complexity)]
698 pub fn slot_coding_iterator(
699 &self,
700 slot: Slot,
701 index: u64,
702 ) -> Result<impl Iterator<Item = ((u64, u64), Box<[u8]>)> + '_> {
703 let slot_iterator = self.code_shred_cf.iter(IteratorMode::From(
704 (slot, index),
705 IteratorDirection::Forward,
706 ))?;
707 Ok(slot_iterator.take_while(move |((shred_slot, _), _)| *shred_slot == slot))
708 }
709
710 fn prepare_rooted_slot_iterator(
711 &self,
712 slot: Slot,
713 direction: IteratorDirection,
714 ) -> Result<impl Iterator<Item = Slot> + '_> {
715 let slot_iterator = self.roots_cf.iter(IteratorMode::From(slot, direction))?;
716 Ok(slot_iterator.map(move |(rooted_slot, _)| rooted_slot))
717 }
718
719 pub fn rooted_slot_iterator(&self, slot: Slot) -> Result<impl Iterator<Item = Slot> + '_> {
720 self.prepare_rooted_slot_iterator(slot, IteratorDirection::Forward)
721 }
722
723 pub fn reversed_rooted_slot_iterator(
724 &self,
725 slot: Slot,
726 ) -> Result<impl Iterator<Item = Slot> + '_> {
727 self.prepare_rooted_slot_iterator(slot, IteratorDirection::Reverse)
728 }
729
730 pub fn reversed_optimistic_slots_iterator(
731 &self,
732 ) -> Result<impl Iterator<Item = (Slot, Hash, UnixTimestamp)> + '_> {
733 let iter = self.optimistic_slots_cf.iter(IteratorMode::End)?;
734 Ok(iter.map(|(slot, bytes)| {
735 let meta: OptimisticSlotMetaVersioned = deserialize(&bytes).unwrap();
736 (slot, meta.hash(), meta.timestamp())
737 }))
738 }
739
740 pub fn slot_range_connected(&self, starting_slot: Slot, ending_slot: Slot) -> bool {
743 if starting_slot == ending_slot {
744 return true;
745 }
746
747 let mut next_slots: VecDeque<_> = match self.meta(starting_slot) {
748 Ok(Some(starting_slot_meta)) => starting_slot_meta.next_slots.into(),
749 _ => return false,
750 };
751 while let Some(slot) = next_slots.pop_front() {
752 if let Ok(Some(slot_meta)) = self.meta(slot) {
753 if slot_meta.is_full() {
754 match slot.cmp(&ending_slot) {
755 cmp::Ordering::Less => next_slots.extend(slot_meta.next_slots),
756 _ => return true,
757 }
758 }
759 }
760 }
761
762 false
763 }
764
765 fn get_recovery_data_shreds<'a>(
766 &'a self,
767 index: &'a Index,
768 erasure_meta: &'a ErasureMeta,
769 prev_inserted_shreds: &'a HashMap<ShredId, Cow<'_, Shred>>,
770 ) -> impl Iterator<Item = Shred> + 'a {
771 let slot = index.slot;
772 erasure_meta.data_shreds_indices().filter_map(move |i| {
773 let key = ShredId::new(slot, u32::try_from(i).unwrap(), ShredType::Data);
774 if let Some(shred) = prev_inserted_shreds.get(&key) {
775 return Some(shred.as_ref().clone());
776 }
777 if !index.data().contains(i) {
778 return None;
779 }
780 match self.data_shred_cf.get_bytes((slot, i)).unwrap() {
781 None => {
782 error!(
783 "Unable to read the data shred with slot {slot}, index {i} for shred \
784 recovery. The shred is marked present in the slot's data shred index, \
785 but the shred could not be found in the data shred column."
786 );
787 None
788 }
789 Some(data) => Shred::new_from_serialized_shred(data).ok(),
790 }
791 })
792 }
793
794 fn get_recovery_coding_shreds<'a>(
795 &'a self,
796 index: &'a Index,
797 erasure_meta: &'a ErasureMeta,
798 prev_inserted_shreds: &'a HashMap<ShredId, Cow<'_, Shred>>,
799 ) -> impl Iterator<Item = Shred> + 'a {
800 let slot = index.slot;
801 erasure_meta.coding_shreds_indices().filter_map(move |i| {
802 let key = ShredId::new(slot, u32::try_from(i).unwrap(), ShredType::Code);
803 if let Some(shred) = prev_inserted_shreds.get(&key) {
804 return Some(shred.as_ref().clone());
805 }
806 if !index.coding().contains(i) {
807 return None;
808 }
809 match self.code_shred_cf.get_bytes((slot, i)).unwrap() {
810 None => {
811 error!(
812 "Unable to read the coding shred with slot {slot}, index {i} for shred \
813 recovery. The shred is marked present in the slot's coding shred index, \
814 but the shred could not be found in the coding shred column."
815 );
816 None
817 }
818 Some(code) => Shred::new_from_serialized_shred(code).ok(),
819 }
820 })
821 }
822
823 fn recover_shreds<'a>(
824 &'a self,
825 index: &'a Index,
826 erasure_meta: &'a ErasureMeta,
827 prev_inserted_shreds: &'a HashMap<ShredId, Cow<'_, Shred>>,
828 reed_solomon_cache: &'a ReedSolomonCache,
829 ) -> std::result::Result<impl Iterator<Item = Shred> + 'a, shred::Error> {
830 let data = self.get_recovery_data_shreds(index, erasure_meta, prev_inserted_shreds);
832 let code = self.get_recovery_coding_shreds(index, erasure_meta, prev_inserted_shreds);
833 let shreds = shred::recover(data.chain(code), reed_solomon_cache)?;
834 Ok(shreds.filter_map(std::result::Result::ok))
835 }
836
837 pub fn submit_rocksdb_cf_metrics_for_all_cfs(&self) {
842 self.meta_cf.submit_rocksdb_cf_metrics();
843 self.dead_slots_cf.submit_rocksdb_cf_metrics();
844 self.duplicate_slots_cf.submit_rocksdb_cf_metrics();
845 self.roots_cf.submit_rocksdb_cf_metrics();
846 self.erasure_meta_cf.submit_rocksdb_cf_metrics();
847 self.orphans_cf.submit_rocksdb_cf_metrics();
848 self.index_cf.submit_rocksdb_cf_metrics();
849 self.data_shred_cf.submit_rocksdb_cf_metrics();
850 self.code_shred_cf.submit_rocksdb_cf_metrics();
851 self.transaction_status_cf.submit_rocksdb_cf_metrics();
852 self.address_signatures_cf.submit_rocksdb_cf_metrics();
853 self.transaction_memos_cf.submit_rocksdb_cf_metrics();
854 self.transaction_status_index_cf.submit_rocksdb_cf_metrics();
855 self.rewards_cf.submit_rocksdb_cf_metrics();
856 self.blocktime_cf.submit_rocksdb_cf_metrics();
857 self.perf_samples_cf.submit_rocksdb_cf_metrics();
858 self.block_height_cf.submit_rocksdb_cf_metrics();
859 self.bank_hash_cf.submit_rocksdb_cf_metrics();
860 self.optimistic_slots_cf.submit_rocksdb_cf_metrics();
861 self.merkle_root_meta_cf.submit_rocksdb_cf_metrics();
862 }
863
864 fn attempt_shred_insertion<'a>(
867 &self,
868 shreds: impl IntoIterator<
869 Item = (Cow<'a, Shred>, bool),
870 IntoIter: ExactSizeIterator,
871 >,
872 is_trusted: bool,
873 leader_schedule: Option<&LeaderScheduleCache>,
874 shred_insertion_tracker: &mut ShredInsertionTracker<'a>,
875 metrics: &mut BlockstoreInsertionMetrics,
876 ) {
877 let shreds = shreds.into_iter();
878 metrics.num_shreds += shreds.len();
879 let mut start = Measure::start("Shred insertion");
880 for (shred, is_repaired) in shreds {
881 let shred_source = if is_repaired {
882 ShredSource::Repaired
883 } else {
884 ShredSource::Turbine
885 };
886 match shred.shred_type() {
887 ShredType::Data => {
888 match self.check_insert_data_shred(
889 shred,
890 shred_insertion_tracker,
891 is_trusted,
892 leader_schedule,
893 shred_source,
894 ) {
895 Err(InsertDataShredError::Exists) => {
896 if is_repaired {
897 metrics.num_repaired_data_shreds_exists += 1;
898 } else {
899 metrics.num_turbine_data_shreds_exists += 1;
900 }
901 }
902 Err(InsertDataShredError::InvalidShred) => {
903 metrics.num_data_shreds_invalid += 1
904 }
905 Err(InsertDataShredError::BlockstoreError(err)) => {
906 metrics.num_data_shreds_blockstore_error += 1;
907 error!("blockstore error: {err}");
908 }
909 Ok(()) => {
910 if is_repaired {
911 metrics.num_repair += 1;
912 }
913 metrics.num_inserted += 1;
914 }
915 };
916 }
917 ShredType::Code => {
918 self.check_insert_coding_shred(
919 shred,
920 shred_insertion_tracker,
921 is_trusted,
922 shred_source,
923 metrics,
924 );
925 }
926 };
927 }
928 start.stop();
929
930 metrics.insert_shreds_elapsed_us += start.as_us();
931 }
932
933 fn try_shred_recovery<'a>(
934 &'a self,
935 erasure_metas: &'a BTreeMap<ErasureSetId, WorkingEntry<ErasureMeta>>,
936 index_working_set: &'a HashMap<u64, IndexMetaWorkingSetEntry>,
937 prev_inserted_shreds: &'a HashMap<ShredId, Cow<'_, Shred>>,
938 reed_solomon_cache: &'a ReedSolomonCache,
939 ) -> impl Iterator<Item = Shred> + 'a {
940 erasure_metas
946 .iter()
947 .filter_map(|(erasure_set, working_erasure_meta)| {
948 let erasure_meta = working_erasure_meta.as_ref();
949 let slot = erasure_set.slot();
950 let index_meta_entry = index_working_set.get(&slot).expect("Index");
951 let index = &index_meta_entry.index;
952 erasure_meta
953 .should_recover_shreds(index)
954 .then(|| {
955 self.recover_shreds(
956 index,
957 erasure_meta,
958 prev_inserted_shreds,
959 reed_solomon_cache,
960 )
961 })?
962 .ok()
963 })
964 .flatten()
965 }
966
967 fn handle_shred_recovery(
973 &self,
974 leader_schedule: Option<&LeaderScheduleCache>,
975 reed_solomon_cache: &ReedSolomonCache,
976 shred_insertion_tracker: &mut ShredInsertionTracker,
977 retransmit_sender: &EvictingSender<Vec<shred::Payload>>,
978 is_trusted: bool,
979 metrics: &mut BlockstoreInsertionMetrics,
980 ) {
981 let mut start = Measure::start("Shred recovery");
982 let mut recovered_shreds = Vec::new();
983 let recovered_data_shreds: Vec<_> = self
984 .try_shred_recovery(
985 &shred_insertion_tracker.erasure_metas,
986 &shred_insertion_tracker.index_working_set,
987 &shred_insertion_tracker.just_inserted_shreds,
988 reed_solomon_cache,
989 )
990 .filter_map(|shred| {
991 match shred.shred_type() {
995 ShredType::Code => {
996 recovered_shreds.push(shred.into_payload());
998 None
999 }
1000 ShredType::Data => {
1001 recovered_shreds.push(shred.payload().clone());
1003 Some(shred)
1004 }
1005 }
1006 })
1007 .collect();
1008 if !recovered_shreds.is_empty() {
1009 let _ = retransmit_sender.try_send(recovered_shreds);
1010 }
1011 metrics.num_recovered += recovered_data_shreds.len();
1012 for shred in recovered_data_shreds {
1013 *match self.check_insert_data_shred(
1014 Cow::Owned(shred),
1015 shred_insertion_tracker,
1016 is_trusted,
1017 leader_schedule,
1018 ShredSource::Recovered,
1019 ) {
1020 Err(InsertDataShredError::Exists) => &mut metrics.num_recovered_exists,
1021 Err(InsertDataShredError::InvalidShred) => {
1022 &mut metrics.num_recovered_failed_invalid
1023 }
1024 Err(InsertDataShredError::BlockstoreError(err)) => {
1025 error!("blockstore error: {err}");
1026 &mut metrics.num_recovered_blockstore_error
1027 }
1028 Ok(()) => &mut metrics.num_recovered_inserted,
1029 } += 1;
1030 }
1031 start.stop();
1032 metrics.shred_recovery_elapsed_us += start.as_us();
1033 }
1034
1035 fn check_chained_merkle_root_consistency(
1036 &self,
1037 shred_insertion_tracker: &mut ShredInsertionTracker,
1038 ) {
1039 for (erasure_set, working_erasure_meta) in shred_insertion_tracker.erasure_metas.iter() {
1040 if !working_erasure_meta.should_write() {
1041 continue;
1043 }
1044 let (slot, _) = erasure_set.store_key();
1045 if self.has_duplicate_shreds_in_slot(slot) {
1046 continue;
1047 }
1048 let erasure_meta = working_erasure_meta.as_ref();
1050 let shred_id = ShredId::new(
1051 slot,
1052 erasure_meta
1053 .first_received_coding_shred_index()
1054 .expect("First received coding index must fit in u32"),
1055 ShredType::Code,
1056 );
1057 let shred = shred_insertion_tracker
1058 .just_inserted_shreds
1059 .get(&shred_id)
1060 .expect("Erasure meta was just created, initial shred must exist");
1061
1062 self.check_forward_chained_merkle_root_consistency(
1063 shred,
1064 erasure_meta,
1065 &shred_insertion_tracker.just_inserted_shreds,
1066 &shred_insertion_tracker.merkle_root_metas,
1067 &mut shred_insertion_tracker.duplicate_shreds,
1068 );
1069 }
1070
1071 for (erasure_set, working_merkle_root_meta) in
1072 shred_insertion_tracker.merkle_root_metas.iter()
1073 {
1074 if !working_merkle_root_meta.should_write() {
1075 continue;
1077 }
1078 let (slot, _) = erasure_set.store_key();
1079 if self.has_duplicate_shreds_in_slot(slot) {
1080 continue;
1081 }
1082 let merkle_root_meta = working_merkle_root_meta.as_ref();
1084 let shred_id = ShredId::new(
1085 slot,
1086 merkle_root_meta.first_received_shred_index(),
1087 merkle_root_meta.first_received_shred_type(),
1088 );
1089 let shred = shred_insertion_tracker
1090 .just_inserted_shreds
1091 .get(&shred_id)
1092 .expect("Merkle root meta was just created, initial shred must exist");
1093
1094 self.check_backwards_chained_merkle_root_consistency(
1095 shred,
1096 &shred_insertion_tracker.just_inserted_shreds,
1097 &shred_insertion_tracker.erasure_metas,
1098 &mut shred_insertion_tracker.duplicate_shreds,
1099 );
1100 }
1101 }
1102
1103 fn commit_updates_to_write_batch(
1104 &self,
1105 shred_insertion_tracker: &mut ShredInsertionTracker,
1106 metrics: &mut BlockstoreInsertionMetrics,
1107 ) -> Result<(
1108 bool,
1109 Vec<u64>,
1110 )> {
1111 let mut start = Measure::start("Commit Working Sets");
1112 let (should_signal, newly_completed_slots) = self.commit_slot_meta_working_set(
1113 &shred_insertion_tracker.slot_meta_working_set,
1114 &mut shred_insertion_tracker.write_batch,
1115 )?;
1116
1117 for (erasure_set, working_erasure_meta) in &shred_insertion_tracker.erasure_metas {
1118 if !working_erasure_meta.should_write() {
1119 continue;
1121 }
1122 let (slot, fec_set_index) = erasure_set.store_key();
1123 self.erasure_meta_cf.put_in_batch(
1124 &mut shred_insertion_tracker.write_batch,
1125 (slot, u64::from(fec_set_index)),
1126 working_erasure_meta.as_ref(),
1127 )?;
1128 }
1129
1130 for (erasure_set, working_merkle_root_meta) in &shred_insertion_tracker.merkle_root_metas {
1131 if !working_merkle_root_meta.should_write() {
1132 continue;
1134 }
1135 self.merkle_root_meta_cf.put_in_batch(
1136 &mut shred_insertion_tracker.write_batch,
1137 erasure_set.store_key(),
1138 working_merkle_root_meta.as_ref(),
1139 )?;
1140 }
1141
1142 for (&slot, index_working_set_entry) in shred_insertion_tracker.index_working_set.iter() {
1143 if index_working_set_entry.did_insert_occur {
1144 self.index_cf.put_in_batch(
1145 &mut shred_insertion_tracker.write_batch,
1146 slot,
1147 &index_working_set_entry.index,
1148 )?;
1149 }
1150 }
1151 start.stop();
1152 metrics.commit_working_sets_elapsed_us += start.as_us();
1153
1154 Ok((should_signal, newly_completed_slots))
1155 }
1156
1157 fn do_insert_shreds<'a>(
1211 &self,
1212 shreds: impl IntoIterator<
1213 Item = (Cow<'a, Shred>, bool),
1214 IntoIter: ExactSizeIterator,
1215 >,
1216 leader_schedule: Option<&LeaderScheduleCache>,
1217 is_trusted: bool,
1218 should_recover_shreds: Option<(
1224 &ReedSolomonCache,
1225 &EvictingSender<Vec<shred::Payload>>, )>,
1227 metrics: &mut BlockstoreInsertionMetrics,
1228 ) -> Result<InsertResults> {
1229 let mut total_start = Measure::start("Total elapsed");
1230
1231 let mut start = Measure::start("Blockstore lock");
1233 let _lock = self.insert_shreds_lock.lock().unwrap();
1234 start.stop();
1235 metrics.insert_lock_elapsed_us += start.as_us();
1236
1237 let shreds = shreds.into_iter();
1238 let mut shred_insertion_tracker =
1239 ShredInsertionTracker::new(shreds.len(), self.get_write_batch()?);
1240
1241 self.attempt_shred_insertion(
1242 shreds,
1243 is_trusted,
1244 leader_schedule,
1245 &mut shred_insertion_tracker,
1246 metrics,
1247 );
1248 if let Some((reed_solomon_cache, retransmit_sender)) = should_recover_shreds {
1249 self.handle_shred_recovery(
1250 leader_schedule,
1251 reed_solomon_cache,
1252 &mut shred_insertion_tracker,
1253 retransmit_sender,
1254 is_trusted,
1255 metrics,
1256 );
1257 }
1258 self.handle_chaining(
1261 &mut shred_insertion_tracker.write_batch,
1262 &mut shred_insertion_tracker.slot_meta_working_set,
1263 metrics,
1264 )?;
1265
1266 self.check_chained_merkle_root_consistency(&mut shred_insertion_tracker);
1267
1268 let (should_signal, newly_completed_slots) =
1269 self.commit_updates_to_write_batch(&mut shred_insertion_tracker, metrics)?;
1270
1271 let mut start = Measure::start("Write Batch");
1273 self.write_batch(shred_insertion_tracker.write_batch)?;
1274 start.stop();
1275 metrics.write_batch_elapsed_us += start.as_us();
1276
1277 send_signals(
1278 &self.new_shreds_signals.lock().unwrap(),
1279 &self.completed_slots_senders.lock().unwrap(),
1280 should_signal,
1281 newly_completed_slots,
1282 );
1283
1284 total_start.stop();
1286 metrics.total_elapsed_us += total_start.as_us();
1287 metrics.index_meta_time_us += shred_insertion_tracker.index_meta_time_us;
1288
1289 Ok(InsertResults {
1290 completed_data_set_infos: shred_insertion_tracker.newly_completed_data_sets,
1291 duplicate_shreds: shred_insertion_tracker.duplicate_shreds,
1292 })
1293 }
1294
1295 pub fn insert_shreds_handle_duplicate<'a, F>(
1299 &self,
1300 shreds: impl IntoIterator<
1301 Item = (Cow<'a, Shred>, bool),
1302 IntoIter: ExactSizeIterator,
1303 >,
1304 leader_schedule: Option<&LeaderScheduleCache>,
1305 is_trusted: bool,
1306 retransmit_sender: &EvictingSender<Vec<shred::Payload>>,
1307 handle_duplicate: &F,
1308 reed_solomon_cache: &ReedSolomonCache,
1309 metrics: &mut BlockstoreInsertionMetrics,
1310 ) -> Result<Vec<CompletedDataSetInfo>>
1311 where
1312 F: Fn(PossibleDuplicateShred),
1313 {
1314 let InsertResults {
1315 completed_data_set_infos,
1316 duplicate_shreds,
1317 } = self.do_insert_shreds(
1318 shreds,
1319 leader_schedule,
1320 is_trusted,
1321 Some((reed_solomon_cache, retransmit_sender)),
1322 metrics,
1323 )?;
1324
1325 for shred in duplicate_shreds {
1326 handle_duplicate(shred);
1327 }
1328
1329 Ok(completed_data_set_infos)
1330 }
1331
1332 pub fn add_new_shred_signal(&self, s: Sender<bool>) {
1333 self.new_shreds_signals.lock().unwrap().push(s);
1334 }
1335
1336 pub fn add_completed_slots_signal(&self, s: CompletedSlotsSender) {
1337 self.completed_slots_senders.lock().unwrap().push(s);
1338 }
1339
1340 pub fn get_new_shred_signals_len(&self) -> usize {
1341 self.new_shreds_signals.lock().unwrap().len()
1342 }
1343
1344 pub fn get_new_shred_signal(&self, index: usize) -> Option<Sender<bool>> {
1345 self.new_shreds_signals.lock().unwrap().get(index).cloned()
1346 }
1347
1348 pub fn drop_signal(&self) {
1349 self.new_shreds_signals.lock().unwrap().clear();
1350 self.completed_slots_senders.lock().unwrap().clear();
1351 }
1352
1353 pub fn clear_unconfirmed_slot(&self, slot: Slot) {
1361 let _lock = self.insert_shreds_lock.lock().unwrap();
1362 match self.purge_slot_cleanup_chaining(slot) {
1369 Ok(_) => {}
1370 Err(BlockstoreError::SlotUnavailable) => {
1371 error!("clear_unconfirmed_slot() called on slot {slot} with no SlotMeta")
1372 }
1373 Err(e) => panic!("Purge database operations failed {e}"),
1374 }
1375 }
1376
1377 pub fn insert_cow_shreds<'a>(
1380 &self,
1381 shreds: impl IntoIterator<Item = Cow<'a, Shred>, IntoIter: ExactSizeIterator>,
1382 leader_schedule: Option<&LeaderScheduleCache>,
1383 is_trusted: bool,
1384 ) -> Result<Vec<CompletedDataSetInfo>> {
1385 let shreds = shreds
1386 .into_iter()
1387 .map(|shred| (shred, false));
1388 let insert_results = self.do_insert_shreds(
1389 shreds,
1390 leader_schedule,
1391 is_trusted,
1392 None, &mut BlockstoreInsertionMetrics::default(),
1394 )?;
1395 Ok(insert_results.completed_data_set_infos)
1396 }
1397
1398 pub fn insert_shreds(
1400 &self,
1401 shreds: impl IntoIterator<Item = Shred, IntoIter: ExactSizeIterator>,
1402 leader_schedule: Option<&LeaderScheduleCache>,
1403 is_trusted: bool,
1404 ) -> Result<Vec<CompletedDataSetInfo>> {
1405 let shreds = shreds.into_iter().map(Cow::Owned);
1406 self.insert_cow_shreds(shreds, leader_schedule, is_trusted)
1407 }
1408
1409 #[cfg(test)]
1410 fn insert_shred_return_duplicate(
1411 &self,
1412 shred: Shred,
1413 leader_schedule: &LeaderScheduleCache,
1414 ) -> Vec<PossibleDuplicateShred> {
1415 let insert_results = self
1416 .do_insert_shreds(
1417 [(Cow::Owned(shred), false)],
1418 Some(leader_schedule),
1419 false,
1420 None, &mut BlockstoreInsertionMetrics::default(),
1422 )
1423 .unwrap();
1424 insert_results.duplicate_shreds
1425 }
1426
1427 #[allow(clippy::too_many_arguments)]
1428 fn check_insert_coding_shred<'a>(
1429 &self,
1430 shred: Cow<'a, Shred>,
1431 shred_insertion_tracker: &mut ShredInsertionTracker<'a>,
1432 is_trusted: bool,
1433 shred_source: ShredSource,
1434 metrics: &mut BlockstoreInsertionMetrics,
1435 ) -> bool {
1436 let slot = shred.slot();
1437 let shred_index = u64::from(shred.index());
1438
1439 let ShredInsertionTracker {
1440 just_inserted_shreds,
1441 erasure_metas,
1442 merkle_root_metas,
1443 index_working_set,
1444 index_meta_time_us,
1445 duplicate_shreds,
1446 write_batch,
1447 ..
1448 } = shred_insertion_tracker;
1449
1450 let index_meta_working_set_entry =
1451 self.get_index_meta_entry(slot, index_working_set, index_meta_time_us);
1452
1453 let index_meta = &mut index_meta_working_set_entry.index;
1454 let erasure_set = shred.erasure_set();
1455
1456 if let HashMapEntry::Vacant(entry) = merkle_root_metas.entry(erasure_set) {
1457 if let Some(meta) = self.merkle_root_meta(erasure_set).unwrap() {
1458 entry.insert(WorkingEntry::Clean(meta));
1459 }
1460 }
1461
1462 if !is_trusted {
1465 if index_meta.coding().contains(shred_index) {
1466 metrics.num_coding_shreds_exists += 1;
1467 duplicate_shreds.push(PossibleDuplicateShred::Exists(shred.into_owned()));
1468 return false;
1469 }
1470
1471 if !Blockstore::should_insert_coding_shred(&shred, self.max_root()) {
1472 metrics.num_coding_shreds_invalid += 1;
1473 return false;
1474 }
1475
1476 if let Some(merkle_root_meta) = merkle_root_metas.get(&erasure_set) {
1477 if !self.check_merkle_root_consistency(
1481 just_inserted_shreds,
1482 slot,
1483 merkle_root_meta.as_ref(),
1484 &shred,
1485 duplicate_shreds,
1486 ) {
1487 return false;
1488 }
1489 }
1490 }
1491
1492 let erasure_meta_entry = erasure_metas.entry(erasure_set).or_insert_with(|| {
1493 self.erasure_meta(erasure_set)
1494 .expect("Expect database get to succeed")
1495 .map(WorkingEntry::Clean)
1496 .unwrap_or_else(|| {
1497 WorkingEntry::Dirty(ErasureMeta::from_coding_shred(&shred).unwrap())
1498 })
1499 });
1500 let erasure_meta = erasure_meta_entry.as_ref();
1501
1502 if !erasure_meta.check_coding_shred(&shred) {
1503 metrics.num_coding_shreds_invalid_erasure_config += 1;
1504 if !self.has_duplicate_shreds_in_slot(slot) {
1505 if let Some(conflicting_shred) = self
1506 .find_conflicting_coding_shred(&shred, slot, erasure_meta, just_inserted_shreds)
1507 .map(Cow::into_owned)
1508 {
1509 if let Err(e) = self.store_duplicate_slot(
1510 slot,
1511 conflicting_shred.clone(),
1512 shred.payload().clone(),
1513 ) {
1514 warn!(
1515 "Unable to store conflicting erasure meta duplicate proof for {slot} \
1516 {erasure_set:?} {e}"
1517 );
1518 }
1519
1520 duplicate_shreds.push(PossibleDuplicateShred::ErasureConflict(
1521 shred.as_ref().clone(),
1522 conflicting_shred,
1523 ));
1524 } else {
1525 error!(
1526 "Unable to find the conflicting coding shred that set {erasure_meta:?}. \
1527 This should only happen in extreme cases where blockstore cleanup has \
1528 caught up to the root. Skipping the erasure meta duplicate shred check"
1529 );
1530 }
1531 }
1532
1533 warn!("Received multiple erasure configs for the same erasure set!!!");
1535 warn!(
1536 "Slot: {}, shred index: {}, erasure_set: {:?}, is_duplicate: {}, stored config: \
1537 {:#?}, new shred: {:#?}",
1538 slot,
1539 shred.index(),
1540 erasure_set,
1541 self.has_duplicate_shreds_in_slot(slot),
1542 erasure_meta.config(),
1543 shred,
1544 );
1545 return false;
1546 }
1547
1548 self.slots_stats
1549 .record_shred(shred.slot(), shred.fec_set_index(), shred_source, None);
1550
1551 let result = self
1553 .insert_coding_shred(index_meta, &shred, write_batch)
1554 .is_ok();
1555
1556 if result {
1557 index_meta_working_set_entry.did_insert_occur = true;
1558 metrics.num_inserted += 1;
1559
1560 merkle_root_metas
1561 .entry(erasure_set)
1562 .or_insert(WorkingEntry::Dirty(MerkleRootMeta::from_shred(&shred)));
1563 }
1564
1565 if let HashMapEntry::Vacant(entry) = just_inserted_shreds.entry(shred.id()) {
1566 metrics.num_coding_shreds_inserted += 1;
1567 entry.insert(shred);
1568 }
1569
1570 result
1571 }
1572
1573 fn find_conflicting_coding_shred<'a>(
1574 &'a self,
1575 shred: &Shred,
1576 slot: Slot,
1577 erasure_meta: &ErasureMeta,
1578 just_received_shreds: &'a HashMap<ShredId, Cow<'_, Shred>>,
1579 ) -> Option<Cow<'a, shred::Payload>> {
1580 let index = erasure_meta.first_received_coding_shred_index()?;
1583 let shred_id = ShredId::new(slot, index, ShredType::Code);
1584 let maybe_shred = self.get_shred_from_just_inserted_or_db(just_received_shreds, shred_id);
1585
1586 if index != 0 || maybe_shred.is_some() {
1587 return maybe_shred;
1588 }
1589
1590 for coding_index in erasure_meta.coding_shreds_indices() {
1594 let maybe_shred = self.get_coding_shred(slot, coding_index);
1595 if let Ok(Some(shred_data)) = maybe_shred {
1596 let potential_shred = Shred::new_from_serialized_shred(shred_data).unwrap();
1597 if shred.erasure_mismatch(&potential_shred).unwrap() {
1598 return Some(Cow::Owned(potential_shred.into_payload()));
1599 }
1600 } else if let Some(potential_shred) = {
1601 let key = ShredId::new(slot, u32::try_from(coding_index).unwrap(), ShredType::Code);
1602 just_received_shreds.get(&key)
1603 } {
1604 if shred.erasure_mismatch(potential_shred).unwrap() {
1605 return Some(Cow::Borrowed(potential_shred.payload()));
1606 }
1607 }
1608 }
1609 None
1610 }
1611
1612 #[allow(clippy::too_many_arguments)]
1637 fn check_insert_data_shred<'a>(
1638 &self,
1639 shred: Cow<'a, Shred>,
1640 shred_insertion_tracker: &mut ShredInsertionTracker<'a>,
1641 is_trusted: bool,
1642 leader_schedule: Option<&LeaderScheduleCache>,
1643 shred_source: ShredSource,
1644 ) -> std::result::Result<(), InsertDataShredError> {
1645 let slot = shred.slot();
1646 let shred_index = u64::from(shred.index());
1647
1648 let ShredInsertionTracker {
1649 index_working_set,
1650 slot_meta_working_set,
1651 just_inserted_shreds,
1652 merkle_root_metas,
1653 duplicate_shreds,
1654 index_meta_time_us,
1655 erasure_metas,
1656 write_batch,
1657 newly_completed_data_sets,
1658 } = shred_insertion_tracker;
1659
1660 let index_meta_working_set_entry =
1661 self.get_index_meta_entry(slot, index_working_set, index_meta_time_us);
1662 let index_meta = &mut index_meta_working_set_entry.index;
1663 let slot_meta_entry = self.get_slot_meta_entry(
1664 slot_meta_working_set,
1665 slot,
1666 shred
1667 .parent()
1668 .map_err(|_| InsertDataShredError::InvalidShred)?,
1669 );
1670
1671 let slot_meta = &mut slot_meta_entry.new_slot_meta.borrow_mut();
1672 let erasure_set = shred.erasure_set();
1673 if let HashMapEntry::Vacant(entry) = merkle_root_metas.entry(erasure_set) {
1674 if let Some(meta) = self.merkle_root_meta(erasure_set).unwrap() {
1675 entry.insert(WorkingEntry::Clean(meta));
1676 }
1677 }
1678
1679 if !is_trusted {
1680 if Self::is_data_shred_present(&shred, slot_meta, index_meta.data()) {
1681 duplicate_shreds.push(PossibleDuplicateShred::Exists(shred.into_owned()));
1682 return Err(InsertDataShredError::Exists);
1683 }
1684
1685 if shred.last_in_slot() && shred_index < slot_meta.received && !slot_meta.is_full() {
1686 warn!(
1696 "Received *last* shred index {} less than previous shred index {}, and slot \
1697 {} is not full, marking slot dead",
1698 shred_index, slot_meta.received, slot
1699 );
1700 self.dead_slots_cf
1701 .put_in_batch(write_batch, slot, &true)
1702 .unwrap();
1703 }
1704
1705 if !self.should_insert_data_shred(
1706 &shred,
1707 slot_meta,
1708 just_inserted_shreds,
1709 self.max_root(),
1710 leader_schedule,
1711 shred_source,
1712 duplicate_shreds,
1713 ) {
1714 return Err(InsertDataShredError::InvalidShred);
1715 }
1716
1717 if let Some(merkle_root_meta) = merkle_root_metas.get(&erasure_set) {
1718 if !self.check_merkle_root_consistency(
1722 just_inserted_shreds,
1723 slot,
1724 merkle_root_meta.as_ref(),
1725 &shred,
1726 duplicate_shreds,
1727 ) {
1728 self.dead_slots_cf
1735 .put_in_batch(write_batch, slot, &true)
1736 .unwrap();
1737 return Err(InsertDataShredError::InvalidShred);
1738 }
1739 }
1740 }
1741
1742 let completed_data_sets = self.insert_data_shred(
1743 slot_meta,
1744 index_meta.data_mut(),
1745 &shred,
1746 write_batch,
1747 shred_source,
1748 )?;
1749 newly_completed_data_sets.extend(completed_data_sets);
1750 merkle_root_metas
1751 .entry(erasure_set)
1752 .or_insert(WorkingEntry::Dirty(MerkleRootMeta::from_shred(&shred)));
1753 just_inserted_shreds.insert(shred.id(), shred);
1754 index_meta_working_set_entry.did_insert_occur = true;
1755 slot_meta_entry.did_insert_occur = true;
1756 if let BTreeMapEntry::Vacant(entry) = erasure_metas.entry(erasure_set) {
1757 if let Some(meta) = self.erasure_meta(erasure_set).unwrap() {
1758 entry.insert(WorkingEntry::Clean(meta));
1759 }
1760 }
1761 Ok(())
1762 }
1763
1764 fn should_insert_coding_shred(shred: &Shred, max_root: Slot) -> bool {
1765 debug_assert_matches!(shred.sanitize(), Ok(()));
1766 shred.is_code() && shred.slot() > max_root
1767 }
1768
1769 fn insert_coding_shred(
1770 &self,
1771 index_meta: &mut Index,
1772 shred: &Shred,
1773 write_batch: &mut WriteBatch,
1774 ) -> Result<()> {
1775 let slot = shred.slot();
1776 let shred_index = u64::from(shred.index());
1777
1778 debug_assert_matches!(shred.sanitize(), Ok(()));
1781 assert!(shred.is_code());
1782
1783 self.code_shred_cf
1786 .put_bytes_in_batch(write_batch, (slot, shred_index), shred.payload())?;
1787 index_meta.coding_mut().insert(shred_index);
1788
1789 Ok(())
1790 }
1791
1792 fn is_data_shred_present(shred: &Shred, slot_meta: &SlotMeta, data_index: &ShredIndex) -> bool {
1793 let shred_index = u64::from(shred.index());
1794 shred_index < slot_meta.consumed || data_index.contains(shred_index)
1796 }
1797
1798 fn get_shred_from_just_inserted_or_db<'a>(
1801 &'a self,
1802 just_inserted_shreds: &'a HashMap<ShredId, Cow<'_, Shred>>,
1803 shred_id: ShredId,
1804 ) -> Option<Cow<'a, shred::Payload>> {
1805 let (slot, index, shred_type) = shred_id.unpack();
1806 match (just_inserted_shreds.get(&shred_id), shred_type) {
1807 (Some(shred), _) => Some(Cow::Borrowed(shred.payload())),
1808 (_, ShredType::Data) => self
1811 .get_data_shred(slot, u64::from(index))
1812 .unwrap()
1813 .map(shred::Payload::from)
1814 .map(Cow::Owned),
1815 (_, ShredType::Code) => self
1816 .get_coding_shred(slot, u64::from(index))
1817 .unwrap()
1818 .map(shred::Payload::from)
1819 .map(Cow::Owned),
1820 }
1821 }
1822
1823 fn check_merkle_root_consistency(
1829 &self,
1830 just_inserted_shreds: &HashMap<ShredId, Cow<'_, Shred>>,
1831 slot: Slot,
1832 merkle_root_meta: &MerkleRootMeta,
1833 shred: &Shred,
1834 duplicate_shreds: &mut Vec<PossibleDuplicateShred>,
1835 ) -> bool {
1836 let new_merkle_root = shred.merkle_root().ok();
1837 if merkle_root_meta.merkle_root() == new_merkle_root {
1838 return true;
1841 }
1842
1843 warn!(
1844 "Received conflicting merkle roots for slot: {}, erasure_set: {:?} original merkle \
1845 root meta {:?} vs conflicting merkle root {:?} shred index {} type {:?}. Reporting \
1846 as duplicate",
1847 slot,
1848 shred.erasure_set(),
1849 merkle_root_meta,
1850 new_merkle_root,
1851 shred.index(),
1852 shred.shred_type(),
1853 );
1854
1855 if !self.has_duplicate_shreds_in_slot(slot) {
1856 let shred_id = ShredId::new(
1857 slot,
1858 merkle_root_meta.first_received_shred_index(),
1859 merkle_root_meta.first_received_shred_type(),
1860 );
1861 let Some(conflicting_shred) = self
1862 .get_shred_from_just_inserted_or_db(just_inserted_shreds, shred_id)
1863 .map(Cow::into_owned)
1864 else {
1865 error!(
1866 "Shred {shred_id:?} indiciated by merkle root meta {merkle_root_meta:?} is \
1867 missing from blockstore. This should only happen in extreme cases where \
1868 blockstore cleanup has caught up to the root. Skipping the merkle root \
1869 consistency check"
1870 );
1871 return true;
1872 };
1873 if let Err(e) = self.store_duplicate_slot(
1874 slot,
1875 conflicting_shred.clone(),
1876 shred.clone().into_payload(),
1877 ) {
1878 warn!(
1879 "Unable to store conflicting merkle root duplicate proof for {slot} {:?} {e}",
1880 shred.erasure_set(),
1881 );
1882 }
1883 duplicate_shreds.push(PossibleDuplicateShred::MerkleRootConflict(
1884 shred.clone(),
1885 conflicting_shred,
1886 ));
1887 }
1888 false
1889 }
1890
1891 fn check_forward_chained_merkle_root_consistency(
1901 &self,
1902 shred: &Shred,
1903 erasure_meta: &ErasureMeta,
1904 just_inserted_shreds: &HashMap<ShredId, Cow<'_, Shred>>,
1905 merkle_root_metas: &HashMap<ErasureSetId, WorkingEntry<MerkleRootMeta>>,
1906 duplicate_shreds: &mut Vec<PossibleDuplicateShred>,
1907 ) -> bool {
1908 debug_assert!(erasure_meta.check_coding_shred(shred));
1909 let slot = shred.slot();
1910 let erasure_set = shred.erasure_set();
1911
1912 let Some(next_fec_set_index) = erasure_meta.next_fec_set_index() else {
1914 error!("Invalid erasure meta, unable to compute next fec set index {erasure_meta:?}");
1915 return false;
1916 };
1917 let next_erasure_set = ErasureSetId::new(slot, next_fec_set_index);
1918 let Some(next_merkle_root_meta) = merkle_root_metas
1919 .get(&next_erasure_set)
1920 .map(WorkingEntry::as_ref)
1921 .map(Cow::Borrowed)
1922 .or_else(|| {
1923 self.merkle_root_meta(next_erasure_set)
1924 .unwrap()
1925 .map(Cow::Owned)
1926 })
1927 else {
1928 return true;
1930 };
1931 let next_shred_id = ShredId::new(
1932 slot,
1933 next_merkle_root_meta.first_received_shred_index(),
1934 next_merkle_root_meta.first_received_shred_type(),
1935 );
1936 let Some(next_shred) =
1937 Self::get_shred_from_just_inserted_or_db(self, just_inserted_shreds, next_shred_id)
1938 .map(Cow::into_owned)
1939 else {
1940 error!(
1941 "Shred {next_shred_id:?} indicated by merkle root meta {next_merkle_root_meta:?} \
1942 is missing from blockstore. This should only happen in extreme cases where \
1943 blockstore cleanup has caught up to the root. Skipping the forward chained \
1944 merkle root consistency check"
1945 );
1946 return true;
1947 };
1948 let merkle_root = shred.merkle_root().ok();
1949 let chained_merkle_root = shred::layout::get_chained_merkle_root(&next_shred);
1950
1951 if !self.check_chaining(merkle_root, chained_merkle_root) {
1952 warn!(
1953 "Received conflicting chained merkle roots for slot: {slot}, shred \
1954 {erasure_set:?} type {:?} has merkle root {merkle_root:?}, however next fec set \
1955 shred {next_erasure_set:?} type {:?} chains to merkle root \
1956 {chained_merkle_root:?}. Reporting as duplicate",
1957 shred.shred_type(),
1958 next_merkle_root_meta.first_received_shred_type(),
1959 );
1960
1961 if !self.has_duplicate_shreds_in_slot(shred.slot()) {
1962 duplicate_shreds.push(PossibleDuplicateShred::ChainedMerkleRootConflict(
1963 shred.clone(),
1964 next_shred,
1965 ));
1966 }
1967 return false;
1968 }
1969
1970 true
1971 }
1972
1973 fn check_backwards_chained_merkle_root_consistency(
1983 &self,
1984 shred: &Shred,
1985 just_inserted_shreds: &HashMap<ShredId, Cow<'_, Shred>>,
1986 erasure_metas: &BTreeMap<ErasureSetId, WorkingEntry<ErasureMeta>>,
1987 duplicate_shreds: &mut Vec<PossibleDuplicateShred>,
1988 ) -> bool {
1989 let slot = shred.slot();
1990 let erasure_set = shred.erasure_set();
1991 let fec_set_index = shred.fec_set_index();
1992
1993 if fec_set_index == 0 {
1994 return true;
1999 }
2000
2001 let Some((prev_erasure_set, prev_erasure_meta)) = self
2005 .previous_erasure_set(erasure_set, erasure_metas)
2006 .expect("Expect database operations to succeed")
2007 else {
2008 return true;
2012 };
2013
2014 let prev_shred_id = ShredId::new(
2015 slot,
2016 prev_erasure_meta
2017 .first_received_coding_shred_index()
2018 .expect("First received coding index must fit in u32"),
2019 ShredType::Code,
2020 );
2021 let Some(prev_shred) =
2022 Self::get_shred_from_just_inserted_or_db(self, just_inserted_shreds, prev_shred_id)
2023 .map(Cow::into_owned)
2024 else {
2025 warn!(
2026 "Shred {prev_shred_id:?} indicated by the erasure meta {prev_erasure_meta:?} is \
2027 missing from blockstore. This can happen if you have recently upgraded from a \
2028 version < v1.18.13, or if blockstore cleanup has caught up to the root. Skipping \
2029 the backwards chained merkle root consistency check"
2030 );
2031 return true;
2032 };
2033 let merkle_root = shred::layout::get_merkle_root(&prev_shred);
2034 let chained_merkle_root = shred.chained_merkle_root().ok();
2035
2036 if !self.check_chaining(merkle_root, chained_merkle_root) {
2037 warn!(
2038 "Received conflicting chained merkle roots for slot: {slot}, shred {:?} type {:?} \
2039 chains to merkle root {chained_merkle_root:?}, however previous fec set coding \
2040 shred {prev_erasure_set:?} has merkle root {merkle_root:?}. Reporting as \
2041 duplicate",
2042 shred.erasure_set(),
2043 shred.shred_type(),
2044 );
2045
2046 if !self.has_duplicate_shreds_in_slot(shred.slot()) {
2047 duplicate_shreds.push(PossibleDuplicateShred::ChainedMerkleRootConflict(
2048 shred.clone(),
2049 prev_shred,
2050 ));
2051 }
2052 return false;
2053 }
2054
2055 true
2056 }
2057
2058 fn check_chaining(&self, merkle_root: Option<Hash>, chained_merkle_root: Option<Hash>) -> bool {
2062 chained_merkle_root.is_none() || chained_merkle_root == merkle_root
2064 }
2065
2066 fn should_insert_data_shred(
2067 &self,
2068 shred: &Shred,
2069 slot_meta: &SlotMeta,
2070 just_inserted_shreds: &HashMap<ShredId, Cow<'_, Shred>>,
2071 max_root: Slot,
2072 leader_schedule: Option<&LeaderScheduleCache>,
2073 shred_source: ShredSource,
2074 duplicate_shreds: &mut Vec<PossibleDuplicateShred>,
2075 ) -> bool {
2076 let shred_index = u64::from(shred.index());
2077 let slot = shred.slot();
2078 let last_in_slot = if shred.last_in_slot() {
2079 debug!("got last in slot");
2080 true
2081 } else {
2082 false
2083 };
2084 debug_assert_matches!(shred.sanitize(), Ok(()));
2085 let last_index = slot_meta.last_index;
2088 if last_index.map(|ix| shred_index >= ix).unwrap_or_default() {
2089 let leader_pubkey = leader_schedule
2090 .and_then(|leader_schedule| leader_schedule.slot_leader_at(slot, None));
2091
2092 if !self.has_duplicate_shreds_in_slot(slot) {
2093 let shred_id = ShredId::new(
2094 slot,
2095 u32::try_from(last_index.unwrap()).unwrap(),
2096 ShredType::Data,
2097 );
2098 let Some(ending_shred) = self
2099 .get_shred_from_just_inserted_or_db(just_inserted_shreds, shred_id)
2100 .map(Cow::into_owned)
2101 else {
2102 error!(
2103 "Last index data shred {shred_id:?} indiciated by slot meta {slot_meta:?} \
2104 is missing from blockstore. This should only happen in extreme cases \
2105 where blockstore cleanup has caught up to the root. Skipping data shred \
2106 insertion"
2107 );
2108 return false;
2109 };
2110
2111 if self
2112 .store_duplicate_slot(slot, ending_shred.clone(), shred.payload().clone())
2113 .is_err()
2114 {
2115 warn!("store duplicate error");
2116 }
2117 duplicate_shreds.push(PossibleDuplicateShred::LastIndexConflict(
2118 shred.clone(),
2119 ending_shred,
2120 ));
2121 }
2122
2123 datapoint_error!(
2124 "blockstore_error",
2125 (
2126 "error",
2127 format!(
2128 "Leader {leader_pubkey:?}, slot {slot}: received index {shred_index} >= \
2129 slot.last_index {last_index:?}, shred_source: {shred_source:?}"
2130 ),
2131 String
2132 )
2133 );
2134 return false;
2135 }
2136 if last_in_slot && shred_index < slot_meta.received {
2139 let leader_pubkey = leader_schedule
2140 .and_then(|leader_schedule| leader_schedule.slot_leader_at(slot, None));
2141
2142 if !self.has_duplicate_shreds_in_slot(slot) {
2143 let shred_id = ShredId::new(
2144 slot,
2145 u32::try_from(slot_meta.received - 1).unwrap(),
2146 ShredType::Data,
2147 );
2148 let Some(ending_shred) = self
2149 .get_shred_from_just_inserted_or_db(just_inserted_shreds, shred_id)
2150 .map(Cow::into_owned)
2151 else {
2152 error!(
2153 "Last received data shred {shred_id:?} indiciated by slot meta \
2154 {slot_meta:?} is missing from blockstore. This should only happen in \
2155 extreme cases where blockstore cleanup has caught up to the root. \
2156 Skipping data shred insertion"
2157 );
2158 return false;
2159 };
2160
2161 if self
2162 .store_duplicate_slot(slot, ending_shred.clone(), shred.payload().clone())
2163 .is_err()
2164 {
2165 warn!("store duplicate error");
2166 }
2167 duplicate_shreds.push(PossibleDuplicateShred::LastIndexConflict(
2168 shred.clone(),
2169 ending_shred,
2170 ));
2171 }
2172
2173 datapoint_error!(
2174 "blockstore_error",
2175 (
2176 "error",
2177 format!(
2178 "Leader {:?}, slot {}: received shred_index {} < slot.received {}, \
2179 shred_source: {:?}",
2180 leader_pubkey, slot, shred_index, slot_meta.received, shred_source
2181 ),
2182 String
2183 )
2184 );
2185 return false;
2186 }
2187
2188 slot_meta
2191 .parent_slot
2192 .map(|parent_slot| verify_shred_slots(slot, parent_slot, max_root))
2193 .unwrap_or_default()
2194 }
2195
2196 fn insert_data_shred<'a>(
2197 &self,
2198 slot_meta: &mut SlotMeta,
2199 data_index: &'a mut ShredIndex,
2200 shred: &Shred,
2201 write_batch: &mut WriteBatch,
2202 shred_source: ShredSource,
2203 ) -> Result<impl Iterator<Item = CompletedDataSetInfo> + 'a> {
2204 let slot = shred.slot();
2205 let index = u64::from(shred.index());
2206
2207 let last_in_slot = if shred.last_in_slot() {
2208 debug!("got last in slot");
2209 true
2210 } else {
2211 false
2212 };
2213
2214 let last_in_data = if shred.data_complete() {
2215 debug!("got last in data");
2216 true
2217 } else {
2218 false
2219 };
2220
2221 assert!(!slot_meta.is_orphan());
2223
2224 let new_consumed = if slot_meta.consumed == index {
2225 let mut current_index = index + 1;
2226
2227 while data_index.contains(current_index) {
2228 current_index += 1;
2229 }
2230 current_index
2231 } else {
2232 slot_meta.consumed
2233 };
2234
2235 self.data_shred_cf.put_bytes_in_batch(
2238 write_batch,
2239 (slot, index),
2240 shred.bytes_to_store(),
2241 )?;
2242 data_index.insert(index);
2243 let newly_completed_data_sets = update_slot_meta(
2244 last_in_slot,
2245 last_in_data,
2246 slot_meta,
2247 index as u32,
2248 new_consumed,
2249 shred.reference_tick(),
2250 data_index,
2251 )
2252 .map(move |indices| CompletedDataSetInfo { slot, indices });
2253
2254 self.slots_stats.record_shred(
2255 shred.slot(),
2256 shred.fec_set_index(),
2257 shred_source,
2258 Some(slot_meta),
2259 );
2260
2261 trace!("inserted shred into slot {slot:?} and index {index:?}");
2262
2263 Ok(newly_completed_data_sets)
2264 }
2265
2266 pub fn get_data_shred(&self, slot: Slot, index: u64) -> Result<Option<Vec<u8>>> {
2267 let shred = self.data_shred_cf.get_bytes((slot, index))?;
2268 let shred = shred.map(ShredData::resize_stored_shred).transpose();
2269 shred.map_err(|err| {
2270 let err = format!("Invalid stored shred: {err}");
2271 let err = Box::new(bincode::ErrorKind::Custom(err));
2272 BlockstoreError::InvalidShredData(err)
2273 })
2274 }
2275
2276 pub fn get_data_shreds_for_slot(&self, slot: Slot, start_index: u64) -> Result<Vec<Shred>> {
2277 self.slot_data_iterator(slot, start_index)
2278 .expect("blockstore couldn't fetch iterator")
2279 .map(|(_, bytes)| {
2280 Shred::new_from_serialized_shred(Vec::from(bytes)).map_err(|err| {
2281 BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom(
2282 format!("Could not reconstruct shred from shred payload: {err:?}"),
2283 )))
2284 })
2285 })
2286 .collect()
2287 }
2288
2289 #[cfg(test)]
2290 fn get_data_shreds(
2291 &self,
2292 slot: Slot,
2293 from_index: u64,
2294 to_index: u64,
2295 buffer: &mut [u8],
2296 ) -> Result<(u64, usize)> {
2297 let _lock = self.check_lowest_cleanup_slot(slot)?;
2298 let mut buffer_offset = 0;
2299 let mut last_index = 0;
2300 if let Some(meta) = self.meta_cf.get(slot)? {
2301 if !meta.is_full() {
2302 warn!("The slot is not yet full. Will not return any shreds");
2303 return Ok((last_index, buffer_offset));
2304 }
2305 let to_index = cmp::min(to_index, meta.consumed);
2306 for index in from_index..to_index {
2307 if let Some(shred_data) = self.get_data_shred(slot, index)? {
2308 let shred_len = shred_data.len();
2309 if buffer.len().saturating_sub(buffer_offset) >= shred_len {
2310 buffer[buffer_offset..buffer_offset + shred_len]
2311 .copy_from_slice(&shred_data[..shred_len]);
2312 buffer_offset += shred_len;
2313 last_index = index;
2314 if buffer.len().saturating_sub(buffer_offset) < shred_len {
2318 break;
2319 }
2320 } else {
2321 break;
2322 }
2323 }
2324 }
2325 }
2326 Ok((last_index, buffer_offset))
2327 }
2328
2329 pub fn get_coding_shred(&self, slot: Slot, index: u64) -> Result<Option<Vec<u8>>> {
2330 self.code_shred_cf.get_bytes((slot, index))
2331 }
2332
2333 pub fn get_coding_shreds_for_slot(
2334 &self,
2335 slot: Slot,
2336 start_index: u64,
2337 ) -> std::result::Result<Vec<Shred>, shred::Error> {
2338 self.slot_coding_iterator(slot, start_index)
2339 .expect("blockstore couldn't fetch iterator")
2340 .map(|(_, bytes)| Shred::new_from_serialized_shred(Vec::from(bytes)))
2341 .collect()
2342 }
2343
2344 #[allow(clippy::too_many_arguments)]
2346 pub(crate) fn write_entries(
2347 &self,
2348 start_slot: Slot,
2349 num_ticks_in_start_slot: u64,
2350 start_index: u32,
2351 ticks_per_slot: u64,
2352 parent: Option<u64>,
2353 is_full_slot: bool,
2354 keypair: &Keypair,
2355 entries: Vec<Entry>,
2356 version: u16,
2357 ) -> Result<usize > {
2358 let mut parent_slot = parent.map_or(start_slot.saturating_sub(1), |v| v);
2359 let num_slots = (start_slot - parent_slot).max(1); assert!(num_ticks_in_start_slot < num_slots * ticks_per_slot);
2361 let mut remaining_ticks_in_slot = num_slots * ticks_per_slot - num_ticks_in_start_slot;
2362
2363 let mut current_slot = start_slot;
2364 let mut shredder = Shredder::new(current_slot, parent_slot, 0, version).unwrap();
2365 let mut all_shreds = vec![];
2366 let mut slot_entries = vec![];
2367 let reed_solomon_cache = ReedSolomonCache::default();
2368 let mut chained_merkle_root = Hash::new_from_array(rand::thread_rng().gen());
2369 for entry in entries.into_iter() {
2371 if remaining_ticks_in_slot == 0 {
2372 current_slot += 1;
2373 parent_slot = current_slot - 1;
2374 remaining_ticks_in_slot = ticks_per_slot;
2375 let current_entries = std::mem::take(&mut slot_entries);
2376 let start_index = {
2377 if all_shreds.is_empty() {
2378 start_index
2379 } else {
2380 0
2381 }
2382 };
2383 let (mut data_shreds, mut coding_shreds) = shredder
2384 .entries_to_merkle_shreds_for_tests(
2385 keypair,
2386 ¤t_entries,
2387 true, chained_merkle_root,
2389 start_index, start_index, &reed_solomon_cache,
2392 &mut ProcessShredsStats::default(),
2393 );
2394 all_shreds.append(&mut data_shreds);
2395 all_shreds.append(&mut coding_shreds);
2396 chained_merkle_root = coding_shreds.last().unwrap().merkle_root().unwrap();
2397 shredder = Shredder::new(
2398 current_slot,
2399 parent_slot,
2400 (ticks_per_slot - remaining_ticks_in_slot) as u8,
2401 version,
2402 )
2403 .unwrap();
2404 }
2405
2406 if entry.is_tick() {
2407 remaining_ticks_in_slot -= 1;
2408 }
2409 slot_entries.push(entry);
2410 }
2411
2412 if !slot_entries.is_empty() {
2413 all_shreds.extend(shredder.make_merkle_shreds_from_entries(
2414 keypair,
2415 &slot_entries,
2416 is_full_slot,
2417 chained_merkle_root,
2418 0, 0, &reed_solomon_cache,
2421 &mut ProcessShredsStats::default(),
2422 ));
2423 }
2424 let num_data = all_shreds.iter().filter(|shred| shred.is_data()).count();
2425 self.insert_shreds(all_shreds, None, false)?;
2426 Ok(num_data)
2427 }
2428
2429 pub fn get_index(&self, slot: Slot) -> Result<Option<Index>> {
2430 self.index_cf.get(slot)
2431 }
2432
2433 pub fn put_meta_bytes(&self, slot: Slot, bytes: &[u8]) -> Result<()> {
2437 self.meta_cf.put_bytes(slot, bytes)
2438 }
2439
2440 pub fn put_meta(&self, slot: Slot, meta: &SlotMeta) -> Result<()> {
2444 self.put_meta_bytes(slot, &cf::SlotMeta::serialize(meta)?)
2445 }
2446
2447 fn find_missing_indexes<C>(
2464 db_iterator: &mut DBRawIterator,
2465 slot: Slot,
2466 first_timestamp: u64,
2467 defer_threshold_ticks: u64,
2468 start_index: u64,
2469 end_index: u64,
2470 max_missing: usize,
2471 ) -> Vec<u64>
2472 where
2473 C: Column<Index = (u64, u64)>,
2474 {
2475 if start_index >= end_index || max_missing == 0 {
2476 return vec![];
2477 }
2478
2479 let mut missing_indexes = vec![];
2480 let ticks_since_first_insert =
2482 DEFAULT_TICKS_PER_SECOND * timestamp().saturating_sub(first_timestamp) / 1000;
2483
2484 db_iterator.seek(C::key(&(slot, start_index)));
2486
2487 let mut prev_index = start_index;
2489 loop {
2490 if !db_iterator.valid() {
2491 let num_to_take = max_missing - missing_indexes.len();
2492 missing_indexes.extend((prev_index..end_index).take(num_to_take));
2493 break;
2494 }
2495 let (current_slot, index) = C::index(db_iterator.key().expect("Expect a valid key"));
2496
2497 let current_index = {
2498 if current_slot > slot {
2499 end_index
2500 } else {
2501 index
2502 }
2503 };
2504
2505 let upper_index = cmp::min(current_index, end_index);
2506 let data = db_iterator.value().expect("couldn't read value");
2508 let reference_tick = u64::from(shred::layout::get_reference_tick(data).unwrap());
2509 if ticks_since_first_insert < reference_tick + defer_threshold_ticks {
2510 break;
2512 }
2513
2514 let num_to_take = max_missing - missing_indexes.len();
2515 missing_indexes.extend((prev_index..upper_index).take(num_to_take));
2516
2517 if missing_indexes.len() == max_missing
2518 || current_slot > slot
2519 || current_index >= end_index
2520 {
2521 break;
2522 }
2523
2524 prev_index = current_index + 1;
2525 db_iterator.next();
2526 }
2527
2528 missing_indexes
2529 }
2530
2531 pub fn find_missing_data_indexes(
2535 &self,
2536 slot: Slot,
2537 first_timestamp: u64,
2538 defer_threshold_ticks: u64,
2539 start_index: u64,
2540 end_index: u64,
2541 max_missing: usize,
2542 ) -> Vec<u64> {
2543 let Ok(mut db_iterator) = self.db.raw_iterator_cf(self.data_shred_cf.handle()) else {
2544 return vec![];
2545 };
2546
2547 Self::find_missing_indexes::<cf::ShredData>(
2548 &mut db_iterator,
2549 slot,
2550 first_timestamp,
2551 defer_threshold_ticks,
2552 start_index,
2553 end_index,
2554 max_missing,
2555 )
2556 }
2557
2558 fn get_block_time(&self, slot: Slot) -> Result<Option<UnixTimestamp>> {
2559 let _lock = self.check_lowest_cleanup_slot(slot)?;
2560 self.blocktime_cf.get(slot)
2561 }
2562
2563 pub fn get_rooted_block_time(&self, slot: Slot) -> Result<UnixTimestamp> {
2564 let _lock = self.check_lowest_cleanup_slot(slot)?;
2565
2566 if self.is_root(slot) {
2567 return self
2568 .blocktime_cf
2569 .get(slot)?
2570 .ok_or(BlockstoreError::SlotUnavailable);
2571 }
2572 Err(BlockstoreError::SlotNotRooted)
2573 }
2574
2575 pub fn set_block_time(&self, slot: Slot, timestamp: UnixTimestamp) -> Result<()> {
2576 self.blocktime_cf.put(slot, ×tamp)
2577 }
2578
2579 pub fn get_block_height(&self, slot: Slot) -> Result<Option<u64>> {
2580 let _lock = self.check_lowest_cleanup_slot(slot)?;
2581
2582 self.block_height_cf.get(slot)
2583 }
2584
2585 pub fn set_block_height(&self, slot: Slot, block_height: u64) -> Result<()> {
2586 self.block_height_cf.put(slot, &block_height)
2587 }
2588
2589 pub fn get_first_available_block(&self) -> Result<Slot> {
2591 let mut root_iterator = self.rooted_slot_iterator(self.lowest_slot_with_genesis())?;
2592 let first_root = root_iterator.next().unwrap_or_default();
2593 if first_root == 0 {
2596 return Ok(first_root);
2597 }
2598 Ok(root_iterator.next().unwrap_or_default())
2602 }
2603
2604 pub fn get_rooted_block(
2605 &self,
2606 slot: Slot,
2607 require_previous_blockhash: bool,
2608 ) -> Result<VersionedConfirmedBlock> {
2609 let _lock = self.check_lowest_cleanup_slot(slot)?;
2610
2611 if self.is_root(slot) {
2612 return self.get_complete_block(slot, require_previous_blockhash);
2613 }
2614 Err(BlockstoreError::SlotNotRooted)
2615 }
2616
2617 pub fn get_complete_block(
2618 &self,
2619 slot: Slot,
2620 require_previous_blockhash: bool,
2621 ) -> Result<VersionedConfirmedBlock> {
2622 self.do_get_complete_block_with_entries(
2623 slot,
2624 require_previous_blockhash,
2625 false,
2626 false,
2627 )
2628 .map(|result| result.block)
2629 }
2630
2631 pub fn get_rooted_block_with_entries(
2632 &self,
2633 slot: Slot,
2634 require_previous_blockhash: bool,
2635 ) -> Result<VersionedConfirmedBlockWithEntries> {
2636 let _lock = self.check_lowest_cleanup_slot(slot)?;
2637
2638 if self.is_root(slot) {
2639 return self.do_get_complete_block_with_entries(
2640 slot,
2641 require_previous_blockhash,
2642 true,
2643 false,
2644 );
2645 }
2646 Err(BlockstoreError::SlotNotRooted)
2647 }
2648
2649 #[cfg(feature = "dev-context-only-utils")]
2650 pub fn get_complete_block_with_entries(
2651 &self,
2652 slot: Slot,
2653 require_previous_blockhash: bool,
2654 populate_entries: bool,
2655 allow_dead_slots: bool,
2656 ) -> Result<VersionedConfirmedBlockWithEntries> {
2657 self.do_get_complete_block_with_entries(
2658 slot,
2659 require_previous_blockhash,
2660 populate_entries,
2661 allow_dead_slots,
2662 )
2663 }
2664
2665 fn do_get_complete_block_with_entries(
2666 &self,
2667 slot: Slot,
2668 require_previous_blockhash: bool,
2669 populate_entries: bool,
2670 allow_dead_slots: bool,
2671 ) -> Result<VersionedConfirmedBlockWithEntries> {
2672 let Some(slot_meta) = self.meta_cf.get(slot)? else {
2673 trace!("do_get_complete_block_with_entries() failed for {slot} (missing SlotMeta)");
2674 return Err(BlockstoreError::SlotUnavailable);
2675 };
2676
2677 if !slot_meta.is_full() {
2678 trace!("do_get_complete_block_with_entries() failed for {slot} (slot not full)");
2679 return Err(BlockstoreError::SlotUnavailable);
2680 }
2681
2682 let (slot_entries, _, _) = self.get_slot_entries_with_shred_info(
2683 slot,
2684 0,
2685 allow_dead_slots,
2686 )?;
2687
2688 if slot_entries.is_empty() {
2689 trace!("do_get_complete_block_with_entries() failed for {slot} (no entries found)");
2690 return Err(BlockstoreError::SlotUnavailable);
2691 }
2692
2693 let blockhash = slot_entries
2694 .last()
2695 .map(|entry| entry.hash)
2696 .unwrap_or_else(|| panic!("Rooted slot {slot:?} must have blockhash"));
2697
2698 let mut starting_transaction_index = 0;
2699 let mut entries = if populate_entries {
2700 Vec::with_capacity(slot_entries.len())
2701 } else {
2702 Vec::new()
2703 };
2704
2705 let slot_transaction_iterator = slot_entries
2706 .into_iter()
2707 .flat_map(|entry| {
2708 if populate_entries {
2709 entries.push(solana_transaction_status::EntrySummary {
2710 num_hashes: entry.num_hashes,
2711 hash: entry.hash,
2712 num_transactions: entry.transactions.len() as u64,
2713 starting_transaction_index,
2714 });
2715 starting_transaction_index += entry.transactions.len();
2716 }
2717 entry.transactions
2718 })
2719 .map(|transaction| {
2720 if let Err(err) = transaction.sanitize() {
2721 warn!(
2722 "Blockstore::get_block sanitize failed: {err:?}, slot: {slot:?}, \
2723 {transaction:?}",
2724 );
2725 }
2726 transaction
2727 });
2728
2729 let parent_slot_entries = slot_meta
2730 .parent_slot
2731 .and_then(|parent_slot| {
2732 self.get_slot_entries_with_shred_info(
2733 parent_slot,
2734 0,
2735 allow_dead_slots,
2736 )
2737 .ok()
2738 .map(|(entries, _, _)| entries)
2739 })
2740 .unwrap_or_default();
2741 if parent_slot_entries.is_empty() && require_previous_blockhash {
2742 return Err(BlockstoreError::ParentEntriesUnavailable);
2743 }
2744 let previous_blockhash = if !parent_slot_entries.is_empty() {
2745 get_last_hash(parent_slot_entries.iter()).unwrap()
2746 } else {
2747 Hash::default()
2748 };
2749
2750 let (rewards, num_partitions) = self
2751 .rewards_cf
2752 .get_protobuf_or_bincode::<StoredExtendedRewards>(slot)?
2753 .unwrap_or_default()
2754 .into();
2755
2756 let block_time = self.blocktime_cf.get(slot)?;
2760 let block_height = self.block_height_cf.get(slot)?;
2761
2762 let block = VersionedConfirmedBlock {
2763 previous_blockhash: previous_blockhash.to_string(),
2764 blockhash: blockhash.to_string(),
2765 parent_slot: slot_meta.parent_slot.unwrap(),
2768 transactions: self.map_transactions_to_statuses(slot, slot_transaction_iterator)?,
2769 rewards,
2770 num_partitions,
2771 block_time,
2772 block_height,
2773 };
2774
2775 Ok(VersionedConfirmedBlockWithEntries { block, entries })
2776 }
2777
2778 pub fn map_transactions_to_statuses(
2779 &self,
2780 slot: Slot,
2781 iterator: impl Iterator<Item = VersionedTransaction>,
2782 ) -> Result<Vec<VersionedTransactionWithStatusMeta>> {
2783 iterator
2784 .map(|transaction| {
2785 let signature = transaction.signatures[0];
2786 Ok(VersionedTransactionWithStatusMeta {
2787 transaction,
2788 meta: self
2789 .read_transaction_status((signature, slot))?
2790 .ok_or(BlockstoreError::MissingTransactionMetadata)?,
2791 })
2792 })
2793 .collect()
2794 }
2795
2796 fn cleanup_old_entries(&self) -> Result<()> {
2797 if !self.is_primary_access() {
2798 return Ok(());
2799 }
2800
2801 if self.transaction_status_index_cf.get(0)?.is_none() {
2803 self.transaction_status_index_cf
2804 .put(0, &TransactionStatusIndexMeta::default())?;
2805 }
2806 if self.transaction_status_index_cf.get(1)?.is_none() {
2807 self.transaction_status_index_cf
2808 .put(1, &TransactionStatusIndexMeta::default())?;
2809 }
2810
2811 let transaction_status_dummy_key = cf::TransactionStatus::as_index(2);
2814 if self
2815 .transaction_status_cf
2816 .get_protobuf_or_bincode::<StoredTransactionStatusMeta>(transaction_status_dummy_key)?
2817 .is_some()
2818 {
2819 self.transaction_status_cf
2820 .delete(transaction_status_dummy_key)?;
2821 };
2822 let address_signatures_dummy_key = cf::AddressSignatures::as_index(2);
2823 if self
2824 .address_signatures_cf
2825 .get(address_signatures_dummy_key)?
2826 .is_some()
2827 {
2828 self.address_signatures_cf
2829 .delete(address_signatures_dummy_key)?;
2830 };
2831
2832 Ok(())
2833 }
2834
2835 fn get_highest_primary_index_slot(&self) -> Option<Slot> {
2836 *self.highest_primary_index_slot.read().unwrap()
2837 }
2838
2839 fn set_highest_primary_index_slot(&self, slot: Option<Slot>) {
2840 *self.highest_primary_index_slot.write().unwrap() = slot;
2841 }
2842
2843 fn update_highest_primary_index_slot(&self) -> Result<()> {
2844 let iterator = self.transaction_status_index_cf.iter(IteratorMode::Start)?;
2845 let mut highest_primary_index_slot = None;
2846 for (_, data) in iterator {
2847 let meta: TransactionStatusIndexMeta = deserialize(&data).unwrap();
2848 if highest_primary_index_slot.is_none()
2849 || highest_primary_index_slot.is_some_and(|slot| slot < meta.max_slot)
2850 {
2851 highest_primary_index_slot = Some(meta.max_slot);
2852 }
2853 }
2854 if highest_primary_index_slot.is_some_and(|slot| slot != 0) {
2855 self.set_highest_primary_index_slot(highest_primary_index_slot);
2856 } else {
2857 self.db.set_clean_slot_0(true);
2858 }
2859 Ok(())
2860 }
2861
2862 fn maybe_cleanup_highest_primary_index_slot(&self, oldest_slot: Slot) -> Result<()> {
2863 let mut w_highest_primary_index_slot = self.highest_primary_index_slot.write().unwrap();
2864 if let Some(highest_primary_index_slot) = *w_highest_primary_index_slot {
2865 if oldest_slot > highest_primary_index_slot {
2866 *w_highest_primary_index_slot = None;
2867 self.db.set_clean_slot_0(true);
2868 }
2869 }
2870 Ok(())
2871 }
2872
2873 fn read_deprecated_transaction_status(
2874 &self,
2875 index: (Signature, Slot),
2876 ) -> Result<Option<TransactionStatusMeta>> {
2877 let (signature, slot) = index;
2878 let result = self
2879 .transaction_status_cf
2880 .get_raw_protobuf_or_bincode::<StoredTransactionStatusMeta>(
2881 &cf::TransactionStatus::deprecated_key((0, signature, slot)),
2882 )?;
2883 if result.is_none() {
2884 Ok(self
2885 .transaction_status_cf
2886 .get_raw_protobuf_or_bincode::<StoredTransactionStatusMeta>(
2887 &cf::TransactionStatus::deprecated_key((1, signature, slot)),
2888 )?
2889 .and_then(|meta| meta.try_into().ok()))
2890 } else {
2891 Ok(result.and_then(|meta| meta.try_into().ok()))
2892 }
2893 }
2894
2895 pub fn read_transaction_status(
2896 &self,
2897 index: (Signature, Slot),
2898 ) -> Result<Option<TransactionStatusMeta>> {
2899 let result = self.transaction_status_cf.get_protobuf(index)?;
2900 if result.is_none()
2901 && self
2902 .get_highest_primary_index_slot()
2903 .is_some_and(|highest_slot| highest_slot >= index.1)
2904 {
2905 self.read_deprecated_transaction_status(index)
2906 } else {
2907 Ok(result.and_then(|meta| meta.try_into().ok()))
2908 }
2909 }
2910
2911 #[inline]
2912 fn write_transaction_status_helper<'a, F>(
2913 &self,
2914 slot: Slot,
2915 signature: Signature,
2916 keys_with_writable: impl Iterator<Item = (&'a Pubkey, bool)>,
2917 status: TransactionStatusMeta,
2918 transaction_index: usize,
2919 mut write_fn: F,
2920 ) -> Result<()>
2921 where
2922 F: FnMut(&Pubkey, Slot, u32, Signature, bool) -> Result<()>,
2923 {
2924 let status = status.into();
2925 let transaction_index = u32::try_from(transaction_index)
2926 .map_err(|_| BlockstoreError::TransactionIndexOverflow)?;
2927 self.transaction_status_cf
2928 .put_protobuf((signature, slot), &status)?;
2929
2930 for (address, writeable) in keys_with_writable {
2931 write_fn(address, slot, transaction_index, signature, writeable)?;
2932 }
2933
2934 Ok(())
2935 }
2936
2937 pub fn write_transaction_status<'a>(
2938 &self,
2939 slot: Slot,
2940 signature: Signature,
2941 keys_with_writable: impl Iterator<Item = (&'a Pubkey, bool)>,
2942 status: TransactionStatusMeta,
2943 transaction_index: usize,
2944 ) -> Result<()> {
2945 self.write_transaction_status_helper(
2946 slot,
2947 signature,
2948 keys_with_writable,
2949 status,
2950 transaction_index,
2951 |address, slot, tx_index, signature, writeable| {
2952 self.address_signatures_cf.put(
2953 (*address, slot, tx_index, signature),
2954 &AddressSignatureMeta { writeable },
2955 )
2956 },
2957 )
2958 }
2959
2960 pub fn add_transaction_status_to_batch<'a>(
2961 &self,
2962 slot: Slot,
2963 signature: Signature,
2964 keys_with_writable: impl Iterator<Item = (&'a Pubkey, bool)>,
2965 status: TransactionStatusMeta,
2966 transaction_index: usize,
2967 db_write_batch: &mut WriteBatch,
2968 ) -> Result<()> {
2969 self.write_transaction_status_helper(
2970 slot,
2971 signature,
2972 keys_with_writable,
2973 status,
2974 transaction_index,
2975 |address, slot, tx_index, signature, writeable| {
2976 self.address_signatures_cf.put_in_batch(
2977 db_write_batch,
2978 (*address, slot, tx_index, signature),
2979 &AddressSignatureMeta { writeable },
2980 )
2981 },
2982 )
2983 }
2984
2985 pub fn read_transaction_memos(
2986 &self,
2987 signature: Signature,
2988 slot: Slot,
2989 ) -> Result<Option<String>> {
2990 let memos = self.transaction_memos_cf.get((signature, slot))?;
2991 if memos.is_none()
2992 && self
2993 .get_highest_primary_index_slot()
2994 .is_some_and(|highest_slot| highest_slot >= slot)
2995 {
2996 self.transaction_memos_cf
2997 .get_raw(cf::TransactionMemos::deprecated_key(signature))
2998 } else {
2999 Ok(memos)
3000 }
3001 }
3002
3003 pub fn write_transaction_memos(
3004 &self,
3005 signature: &Signature,
3006 slot: Slot,
3007 memos: String,
3008 ) -> Result<()> {
3009 self.transaction_memos_cf.put((*signature, slot), &memos)
3010 }
3011
3012 pub fn add_transaction_memos_to_batch(
3013 &self,
3014 signature: &Signature,
3015 slot: Slot,
3016 memos: String,
3017 db_write_batch: &mut WriteBatch,
3018 ) -> Result<()> {
3019 self.transaction_memos_cf
3020 .put_in_batch(db_write_batch, (*signature, slot), &memos)
3021 }
3022
3023 fn check_lowest_cleanup_slot(
3029 &self,
3030 slot: Slot,
3031 ) -> Result<std::sync::RwLockReadGuard<'_, Slot>> {
3032 let lowest_cleanup_slot = self.lowest_cleanup_slot.read().unwrap();
3034 if *lowest_cleanup_slot > 0 && *lowest_cleanup_slot >= slot {
3035 return Err(BlockstoreError::SlotCleanedUp);
3036 }
3037 Ok(lowest_cleanup_slot)
3040 }
3041
3042 fn ensure_lowest_cleanup_slot(&self) -> (std::sync::RwLockReadGuard<'_, Slot>, Slot) {
3049 let lowest_cleanup_slot = self.lowest_cleanup_slot.read().unwrap();
3050 let lowest_available_slot = (*lowest_cleanup_slot)
3051 .checked_add(1)
3052 .expect("overflow from trusted value");
3053
3054 (lowest_cleanup_slot, lowest_available_slot)
3058 }
3059
3060 fn get_transaction_status_with_counter(
3062 &self,
3063 signature: Signature,
3064 confirmed_unrooted_slots: &HashSet<Slot>,
3065 ) -> Result<(Option<(Slot, TransactionStatusMeta)>, u64)> {
3066 let mut counter = 0;
3067 let (lock, _) = self.ensure_lowest_cleanup_slot();
3068 let first_available_block = self.get_first_available_block()?;
3069
3070 let iterator =
3071 self.transaction_status_cf
3072 .iter_current_index_filtered(IteratorMode::From(
3073 (signature, first_available_block),
3074 IteratorDirection::Forward,
3075 ))?;
3076
3077 for ((sig, slot), _data) in iterator {
3078 counter += 1;
3079 if sig != signature {
3080 break;
3081 }
3082 if !self.is_root(slot) && !confirmed_unrooted_slots.contains(&slot) {
3083 continue;
3084 }
3085 let status = self
3086 .transaction_status_cf
3087 .get_protobuf((signature, slot))?
3088 .and_then(|status| status.try_into().ok())
3089 .map(|status| (slot, status));
3090 return Ok((status, counter));
3091 }
3092
3093 if self.get_highest_primary_index_slot().is_none() {
3094 return Ok((None, counter));
3095 }
3096 for transaction_status_cf_primary_index in 0..=1 {
3097 let index_iterator =
3098 self.transaction_status_cf
3099 .iter_deprecated_index_filtered(IteratorMode::From(
3100 (
3101 transaction_status_cf_primary_index,
3102 signature,
3103 first_available_block,
3104 ),
3105 IteratorDirection::Forward,
3106 ))?;
3107 for ((i, sig, slot), _data) in index_iterator {
3108 counter += 1;
3109 if i != transaction_status_cf_primary_index || sig != signature {
3110 break;
3111 }
3112 if !self.is_root(slot) && !confirmed_unrooted_slots.contains(&slot) {
3113 continue;
3114 }
3115 let status = self
3116 .transaction_status_cf
3117 .get_raw_protobuf_or_bincode::<StoredTransactionStatusMeta>(
3118 &cf::TransactionStatus::deprecated_key((i, signature, slot)),
3119 )?
3120 .and_then(|status| status.try_into().ok())
3121 .map(|status| (slot, status));
3122 return Ok((status, counter));
3123 }
3124 }
3125 drop(lock);
3126
3127 Ok((None, counter))
3128 }
3129
3130 pub fn get_rooted_transaction_status(
3132 &self,
3133 signature: Signature,
3134 ) -> Result<Option<(Slot, TransactionStatusMeta)>> {
3135 self.get_transaction_status(signature, &HashSet::default())
3136 }
3137
3138 pub fn get_transaction_status(
3140 &self,
3141 signature: Signature,
3142 confirmed_unrooted_slots: &HashSet<Slot>,
3143 ) -> Result<Option<(Slot, TransactionStatusMeta)>> {
3144 self.get_transaction_status_with_counter(signature, confirmed_unrooted_slots)
3145 .map(|(status, _)| status)
3146 }
3147
3148 pub fn get_rooted_transaction(
3150 &self,
3151 signature: Signature,
3152 ) -> Result<Option<ConfirmedTransactionWithStatusMeta>> {
3153 self.get_transaction_with_status(signature, &HashSet::default())
3154 }
3155
3156 pub fn get_complete_transaction(
3158 &self,
3159 signature: Signature,
3160 highest_confirmed_slot: Slot,
3161 ) -> Result<Option<ConfirmedTransactionWithStatusMeta>> {
3162 let max_root = self.max_root();
3163 let confirmed_unrooted_slots: HashSet<_> =
3164 AncestorIterator::new_inclusive(highest_confirmed_slot, self)
3165 .take_while(|&slot| slot > max_root)
3166 .collect();
3167 self.get_transaction_with_status(signature, &confirmed_unrooted_slots)
3168 }
3169
3170 fn get_transaction_with_status(
3171 &self,
3172 signature: Signature,
3173 confirmed_unrooted_slots: &HashSet<Slot>,
3174 ) -> Result<Option<ConfirmedTransactionWithStatusMeta>> {
3175 if let Some((slot, meta)) =
3176 self.get_transaction_status(signature, confirmed_unrooted_slots)?
3177 {
3178 let transaction = self
3179 .find_transaction_in_slot(slot, signature)?
3180 .ok_or(BlockstoreError::TransactionStatusSlotMismatch)?; let block_time = self.get_block_time(slot)?;
3183 Ok(Some(ConfirmedTransactionWithStatusMeta {
3184 slot,
3185 tx_with_meta: TransactionWithStatusMeta::Complete(
3186 VersionedTransactionWithStatusMeta { transaction, meta },
3187 ),
3188 block_time,
3189 }))
3190 } else {
3191 Ok(None)
3192 }
3193 }
3194
3195 fn find_transaction_in_slot(
3196 &self,
3197 slot: Slot,
3198 signature: Signature,
3199 ) -> Result<Option<VersionedTransaction>> {
3200 let slot_entries = self.get_slot_entries(slot, 0)?;
3201 Ok(slot_entries
3202 .iter()
3203 .cloned()
3204 .flat_map(|entry| entry.transactions)
3205 .map(|transaction| {
3206 if let Err(err) = transaction.sanitize() {
3207 warn!(
3208 "Blockstore::find_transaction_in_slot sanitize failed: {err:?}, slot: \
3209 {slot:?}, {transaction:?}",
3210 );
3211 }
3212 transaction
3213 })
3214 .find(|transaction| transaction.signatures[0] == signature))
3215 }
3216
3217 fn find_address_signatures(
3220 &self,
3221 _pubkey: Pubkey,
3222 _start_slot: Slot,
3223 _end_slot: Slot,
3224 ) -> Result<Vec<(Slot, Signature)>> {
3225 Ok(vec![])
3226 }
3227
3228 fn find_address_signatures_for_slot(
3231 &self,
3232 pubkey: Pubkey,
3233 slot: Slot,
3234 ) -> Result<Vec<(Slot, Signature)>> {
3235 let (lock, lowest_available_slot) = self.ensure_lowest_cleanup_slot();
3236 let mut signatures: Vec<(Slot, Signature)> = vec![];
3237 if slot < lowest_available_slot {
3238 return Ok(signatures);
3239 }
3240 let index_iterator =
3241 self.address_signatures_cf
3242 .iter_current_index_filtered(IteratorMode::From(
3243 (
3244 pubkey,
3245 slot.max(lowest_available_slot),
3246 0,
3247 Signature::default(),
3248 ),
3249 IteratorDirection::Forward,
3250 ))?;
3251 for ((address, transaction_slot, _transaction_index, signature), _) in index_iterator {
3252 if transaction_slot > slot || address != pubkey {
3253 break;
3254 }
3255 signatures.push((slot, signature));
3256 }
3257 drop(lock);
3258 Ok(signatures)
3259 }
3260
3261 pub fn get_confirmed_signatures_for_address(
3264 &self,
3265 pubkey: Pubkey,
3266 start_slot: Slot,
3267 end_slot: Slot,
3268 ) -> Result<Vec<Signature>> {
3269 self.find_address_signatures(pubkey, start_slot, end_slot)
3270 .map(|signatures| signatures.iter().map(|(_, signature)| *signature).collect())
3271 }
3272
3273 fn get_block_signatures_rev(&self, slot: Slot) -> Result<Vec<Signature>> {
3274 let block = self.get_complete_block(slot, false).map_err(|err| {
3275 BlockstoreError::Io(IoError::other(format!("Unable to get block: {err}")))
3276 })?;
3277
3278 Ok(block
3279 .transactions
3280 .into_iter()
3281 .rev()
3282 .filter_map(|transaction_with_meta| {
3283 transaction_with_meta
3284 .transaction
3285 .signatures
3286 .into_iter()
3287 .next()
3288 })
3289 .collect())
3290 }
3291
3292 pub fn get_confirmed_signatures_for_address2(
3293 &self,
3294 address: Pubkey,
3295 highest_slot: Slot, before: Option<Signature>,
3297 until: Option<Signature>,
3298 limit: usize,
3299 ) -> Result<SignatureInfosForAddress> {
3300 let max_root = self.max_root();
3301 let confirmed_unrooted_slots: HashSet<_> =
3302 AncestorIterator::new_inclusive(highest_slot, self)
3303 .take_while(|&slot| slot > max_root)
3304 .collect();
3305
3306 let mut get_before_slot_timer = Measure::start("get_before_slot_timer");
3310 let (slot, mut before_excluded_signatures) = match before {
3311 None => (highest_slot, None),
3312 Some(before) => {
3313 let transaction_status =
3314 self.get_transaction_status(before, &confirmed_unrooted_slots)?;
3315 match transaction_status {
3316 None => return Ok(SignatureInfosForAddress::default()),
3317 Some((slot, _)) => {
3318 let mut slot_signatures = self.get_block_signatures_rev(slot)?;
3319 if let Some(pos) = slot_signatures.iter().position(|&x| x == before) {
3320 slot_signatures.truncate(pos + 1);
3321 }
3322
3323 (
3324 slot,
3325 Some(slot_signatures.into_iter().collect::<HashSet<_>>()),
3326 )
3327 }
3328 }
3329 }
3330 };
3331 get_before_slot_timer.stop();
3332
3333 let first_available_block = self.get_first_available_block()?;
3334 let mut get_until_slot_timer = Measure::start("get_until_slot_timer");
3337 let (lowest_slot, until_excluded_signatures) = match until {
3338 None => (first_available_block, HashSet::new()),
3339 Some(until) => {
3340 let transaction_status =
3341 self.get_transaction_status(until, &confirmed_unrooted_slots)?;
3342 match transaction_status {
3343 None => (first_available_block, HashSet::new()),
3344 Some((slot, _)) => {
3345 let mut slot_signatures = self.get_block_signatures_rev(slot)?;
3346 if let Some(pos) = slot_signatures.iter().position(|&x| x == until) {
3347 slot_signatures = slot_signatures.split_off(pos);
3348 }
3349
3350 (slot, slot_signatures.into_iter().collect::<HashSet<_>>())
3351 }
3352 }
3353 }
3354 };
3355 get_until_slot_timer.stop();
3356
3357 let mut address_signatures = vec![];
3359
3360 let mut get_initial_slot_timer = Measure::start("get_initial_slot_timer");
3362 let mut signatures = self.find_address_signatures_for_slot(address, slot)?;
3363 signatures.reverse();
3364 if let Some(excluded_signatures) = before_excluded_signatures.take() {
3365 address_signatures.extend(
3366 signatures
3367 .into_iter()
3368 .filter(|(_, signature)| !excluded_signatures.contains(signature)),
3369 )
3370 } else {
3371 address_signatures.append(&mut signatures);
3372 }
3373 get_initial_slot_timer.stop();
3374
3375 let mut address_signatures_iter_timer = Measure::start("iter_timer");
3376 let mut iterator =
3377 self.address_signatures_cf
3378 .iter_current_index_filtered(IteratorMode::From(
3379 (address, slot, 0, Signature::default()),
3385 IteratorDirection::Reverse,
3386 ))?;
3387
3388 while address_signatures.len() < limit {
3390 if let Some(((key_address, slot, _transaction_index, signature), _)) = iterator.next() {
3391 if slot < lowest_slot {
3392 break;
3393 }
3394 if key_address == address {
3395 if self.is_root(slot) || confirmed_unrooted_slots.contains(&slot) {
3396 address_signatures.push((slot, signature));
3397 }
3398 continue;
3399 }
3400 }
3401 break;
3402 }
3403 address_signatures_iter_timer.stop();
3404
3405 let address_signatures_iter = address_signatures
3406 .into_iter()
3407 .filter(|(_, signature)| !until_excluded_signatures.contains(signature))
3408 .take(limit);
3409
3410 let mut get_status_info_timer = Measure::start("get_status_info_timer");
3412 let mut infos = vec![];
3413 for (slot, signature) in address_signatures_iter {
3414 let transaction_status =
3415 self.get_transaction_status(signature, &confirmed_unrooted_slots)?;
3416 let err = transaction_status.and_then(|(_slot, status)| status.status.err());
3417 let memo = self.read_transaction_memos(signature, slot)?;
3418 let block_time = self.get_block_time(slot)?;
3419 infos.push(ConfirmedTransactionStatusWithSignature {
3420 signature,
3421 slot,
3422 err,
3423 memo,
3424 block_time,
3425 });
3426 }
3427 get_status_info_timer.stop();
3428
3429 datapoint_info!(
3430 "blockstore-get-conf-sigs-for-addr-2",
3431 (
3432 "get_before_slot_us",
3433 get_before_slot_timer.as_us() as i64,
3434 i64
3435 ),
3436 (
3437 "get_initial_slot_us",
3438 get_initial_slot_timer.as_us() as i64,
3439 i64
3440 ),
3441 (
3442 "address_signatures_iter_us",
3443 address_signatures_iter_timer.as_us() as i64,
3444 i64
3445 ),
3446 (
3447 "get_status_info_us",
3448 get_status_info_timer.as_us() as i64,
3449 i64
3450 ),
3451 (
3452 "get_until_slot_us",
3453 get_until_slot_timer.as_us() as i64,
3454 i64
3455 )
3456 );
3457
3458 Ok(SignatureInfosForAddress {
3459 infos,
3460 found_before: true, })
3462 }
3463
3464 pub fn read_rewards(&self, index: Slot) -> Result<Option<Rewards>> {
3465 self.rewards_cf
3466 .get_protobuf_or_bincode::<Rewards>(index)
3467 .map(|result| result.map(|option| option.into()))
3468 }
3469
3470 pub fn write_rewards(&self, index: Slot, rewards: RewardsAndNumPartitions) -> Result<()> {
3471 let rewards = rewards.into();
3472 self.rewards_cf.put_protobuf(index, &rewards)
3473 }
3474
3475 pub fn get_recent_perf_samples(&self, num: usize) -> Result<Vec<(Slot, PerfSample)>> {
3476 let samples =
3481 self.perf_samples_cf
3482 .iter(IteratorMode::End)?
3483 .take(num)
3484 .map(|(slot, data)| {
3485 deserialize::<PerfSampleV2>(&data)
3486 .map(|sample| (slot, sample.into()))
3487 .or_else(|err| {
3488 match &*err {
3489 bincode::ErrorKind::Io(io_err)
3490 if matches!(io_err.kind(), ErrorKind::UnexpectedEof) =>
3491 {
3492 }
3494 _ => return Err(err),
3495 }
3496
3497 deserialize::<PerfSampleV1>(&data).map(|sample| (slot, sample.into()))
3498 })
3499 .map_err(Into::into)
3500 });
3501
3502 samples.collect()
3503 }
3504
3505 pub fn write_perf_sample(&self, index: Slot, perf_sample: &PerfSampleV2) -> Result<()> {
3506 let bytes =
3508 serialize(&perf_sample).expect("`PerfSampleV2` can be serialized with `bincode`");
3509 self.perf_samples_cf.put_bytes(index, &bytes)
3510 }
3511
3512 pub fn get_slot_entries(&self, slot: Slot, shred_start_index: u64) -> Result<Vec<Entry>> {
3514 self.get_slot_entries_with_shred_info(slot, shred_start_index, false)
3515 .map(|x| x.0)
3516 }
3517
3518 pub fn get_slot_entries_with_shred_info(
3521 &self,
3522 slot: Slot,
3523 start_index: u64,
3524 allow_dead_slots: bool,
3525 ) -> Result<(Vec<Entry>, u64, bool)> {
3526 let (completed_ranges, slot_meta) = self.get_completed_ranges(slot, start_index)?;
3527
3528 if self.is_dead(slot) && !allow_dead_slots {
3533 return Err(BlockstoreError::DeadSlot);
3534 } else if completed_ranges.is_empty() {
3535 return Ok((vec![], 0, false));
3536 }
3537
3538 let slot_meta = slot_meta.unwrap();
3539 let num_shreds = completed_ranges
3540 .last()
3541 .map(|&Range { end, .. }| u64::from(end) - start_index)
3542 .unwrap_or(0);
3543
3544 let entries = self.get_slot_entries_in_block(slot, completed_ranges, Some(&slot_meta))?;
3545 Ok((entries, num_shreds, slot_meta.is_full()))
3546 }
3547
3548 pub fn get_accounts_used_in_range(
3552 &self,
3553 bank: &Bank,
3554 starting_slot: Slot,
3555 ending_slot: Slot,
3556 ) -> (DashSet<Pubkey>, bool) {
3557 let result = DashSet::new();
3558 let lookup_tables = DashSet::new();
3559 let possible_cpi_alt_extend = AtomicBool::new(false);
3560
3561 fn add_to_set<'a>(set: &DashSet<Pubkey>, iter: impl IntoIterator<Item = &'a Pubkey>) {
3562 iter.into_iter().for_each(|key| {
3563 set.insert(*key);
3564 });
3565 }
3566
3567 (starting_slot..=ending_slot)
3568 .into_par_iter()
3569 .for_each(|slot| {
3570 if let Ok(entries) = self.get_slot_entries(slot, 0) {
3571 entries.into_par_iter().for_each(|entry| {
3572 entry.transactions.into_iter().for_each(|tx| {
3573 if let Some(lookups) = tx.message.address_table_lookups() {
3574 add_to_set(
3575 &lookup_tables,
3576 lookups.iter().map(|lookup| &lookup.account_key),
3577 );
3578 }
3579 if let Ok(tx) = bank.fully_verify_transaction(tx.clone()) {
3582 add_to_set(&result, tx.message().account_keys().iter());
3583 } else {
3584 add_to_set(&result, tx.message.static_account_keys());
3585
3586 let tx = SanitizedVersionedTransaction::try_from(tx)
3587 .expect("transaction failed to sanitize");
3588
3589 let alt_scan_extensions = scan_transaction(&tx);
3590 add_to_set(&result, &alt_scan_extensions.accounts);
3591 if alt_scan_extensions.possibly_incomplete {
3592 possible_cpi_alt_extend.store(true, Ordering::Relaxed);
3593 }
3594 }
3595 });
3596 });
3597 }
3598 });
3599
3600 lookup_tables.into_par_iter().for_each(|lookup_table_key| {
3602 bank.get_account(&lookup_table_key)
3603 .map(|lookup_table_account| {
3604 add_to_set(&result, &[lookup_table_key]);
3605 AddressLookupTable::deserialize(lookup_table_account.data()).map(|t| {
3606 add_to_set(&result, &t.addresses[..]);
3607 })
3608 });
3609 });
3610
3611 (result, possible_cpi_alt_extend.into_inner())
3612 }
3613
3614 fn get_completed_ranges(
3615 &self,
3616 slot: Slot,
3617 start_index: u64,
3618 ) -> Result<(CompletedRanges, Option<SlotMeta>)> {
3619 let Some(slot_meta) = self.meta_cf.get(slot)? else {
3620 return Ok((vec![], None));
3621 };
3622 let completed_ranges = Self::get_completed_data_ranges(
3624 start_index as u32,
3625 &slot_meta.completed_data_indexes,
3626 slot_meta.consumed as u32,
3627 );
3628
3629 Ok((completed_ranges, Some(slot_meta)))
3630 }
3631
3632 fn get_completed_data_ranges(
3634 start_index: u32,
3635 completed_data_indexes: &CompletedDataIndexes,
3636 consumed: u32,
3637 ) -> CompletedRanges {
3638 assert!(!completed_data_indexes.contains(&consumed));
3641 completed_data_indexes
3642 .range(start_index..consumed)
3643 .scan(start_index, |start, index| {
3644 let out = *start..index + 1;
3645 *start = index + 1;
3646 Some(out)
3647 })
3648 .collect()
3649 }
3650
3651 fn get_slot_entries_in_block(
3658 &self,
3659 slot: Slot,
3660 completed_ranges: CompletedRanges,
3661 slot_meta: Option<&SlotMeta>,
3662 ) -> Result<Vec<Entry>> {
3663 debug_assert!(completed_ranges
3664 .iter()
3665 .tuple_windows()
3666 .all(|(a, b)| a.start < a.end && a.end == b.start && b.start < b.end));
3667 let maybe_panic = |index: u64| {
3668 if let Some(slot_meta) = slot_meta {
3669 if slot > self.lowest_cleanup_slot() {
3670 panic!("Missing shred. slot: {slot}, index: {index}, slot meta: {slot_meta:?}");
3671 }
3672 }
3673 };
3674 let Some((&Range { start, .. }, &Range { end, .. })) =
3675 completed_ranges.first().zip(completed_ranges.last())
3676 else {
3677 return Ok(vec![]);
3678 };
3679 let indices = u64::from(start)..u64::from(end);
3680 let keys = indices.clone().map(|index| (slot, index));
3681 let keys = self.data_shred_cf.multi_get_keys(keys);
3682 let mut shreds =
3683 self.data_shred_cf
3684 .multi_get_bytes(&keys)
3685 .zip(indices)
3686 .map(|(shred, index)| {
3687 shred?.ok_or_else(|| {
3688 maybe_panic(index);
3689 BlockstoreError::MissingShred(slot, index)
3690 })
3691 });
3692 completed_ranges
3693 .into_iter()
3694 .map(|Range { start, end }| end - start)
3695 .map(|num_shreds| {
3696 shreds
3697 .by_ref()
3698 .take(num_shreds as usize)
3699 .process_results(|shreds| Shredder::deshred(shreds))?
3700 .map_err(|e| {
3701 BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom(
3702 format!("could not reconstruct entries buffer from shreds: {e:?}"),
3703 )))
3704 })
3705 .and_then(|payload| {
3706 wincode::deserialize::<Vec<Entry>>(&payload).map_err(|e| {
3707 BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom(
3708 format!("could not reconstruct entries: {e:?}"),
3709 )))
3710 })
3711 })
3712 })
3713 .flatten_ok()
3714 .collect()
3715 }
3716
3717 pub fn get_entries_in_data_block(
3718 &self,
3719 slot: Slot,
3720 range: Range<u32>,
3721 slot_meta: Option<&SlotMeta>,
3722 ) -> Result<Vec<Entry>> {
3723 self.get_slot_entries_in_block(slot, vec![range], slot_meta)
3724 }
3725
3726 pub fn check_last_fec_set_and_get_block_id(
3733 &self,
3734 slot: Slot,
3735 bank_hash: Hash,
3736 feature_set: &FeatureSet,
3737 ) -> std::result::Result<Option<Hash>, BlockstoreProcessorError> {
3738 let results = self.check_last_fec_set(slot);
3739 let Ok(results) = results else {
3740 warn!(
3741 "Unable to check the last fec set for slot {slot} {bank_hash}, marking as dead: \
3742 {results:?}",
3743 );
3744 return Err(BlockstoreProcessorError::IncompleteFinalFecSet);
3745 };
3746 if results.last_fec_set_merkle_root.is_none() {
3748 datapoint_warn!("incomplete_final_fec_set", ("slot", slot, i64),);
3749 }
3750 results.get_last_fec_set_merkle_root(feature_set)
3752 }
3753
3754 fn check_last_fec_set(&self, slot: Slot) -> Result<LastFECSetCheckResults> {
3769 let slot_meta = self.meta(slot)?.ok_or(BlockstoreError::SlotUnavailable)?;
3774 let last_shred_index = slot_meta
3775 .last_index
3776 .ok_or(BlockstoreError::UnknownLastIndex(slot))?;
3777
3778 const MINIMUM_INDEX: u64 = DATA_SHREDS_PER_FEC_BLOCK as u64 - 1;
3779 #[cfg(test)]
3780 const_assert_eq!(MINIMUM_INDEX, 31);
3781 let Some(start_index) = last_shred_index.checked_sub(MINIMUM_INDEX) else {
3782 warn!(
3783 "Slot {slot} has only {} shreds, fewer than the {DATA_SHREDS_PER_FEC_BLOCK} \
3784 required",
3785 last_shred_index + 1
3786 );
3787 return Ok(LastFECSetCheckResults {
3788 last_fec_set_merkle_root: None,
3789 is_retransmitter_signed: false,
3790 });
3791 };
3792 let keys = self
3793 .data_shred_cf
3794 .multi_get_keys((start_index..=last_shred_index).map(|index| (slot, index)));
3795
3796 let deduped_shred_checks: Vec<(Hash, bool)> = self
3797 .data_shred_cf
3798 .multi_get_bytes(&keys)
3799 .enumerate()
3800 .map(|(offset, shred_bytes)| {
3801 let shred_bytes = shred_bytes.ok().flatten().ok_or_else(|| {
3802 let shred_index = start_index + u64::try_from(offset).unwrap();
3803 warn!("Missing shred for {slot} index {shred_index}");
3804 BlockstoreError::MissingShred(slot, shred_index)
3805 })?;
3806 let is_retransmitter_signed =
3807 shred::layout::is_retransmitter_signed_variant(&shred_bytes).map_err(|_| {
3808 let shred_index = start_index + u64::try_from(offset).unwrap();
3809 warn!("Found legacy shred for {slot}, index {shred_index}");
3810 BlockstoreError::LegacyShred(slot, shred_index)
3811 })?;
3812 let merkle_root =
3813 shred::layout::get_merkle_root(&shred_bytes).ok_or_else(|| {
3814 let shred_index = start_index + u64::try_from(offset).unwrap();
3815 warn!("Unable to read merkle root for {slot}, index {shred_index}");
3816 BlockstoreError::MissingMerkleRoot(slot, shred_index)
3817 })?;
3818 Ok((merkle_root, is_retransmitter_signed))
3819 })
3820 .dedup_by(|res1, res2| res1.as_ref().ok() == res2.as_ref().ok())
3821 .collect::<Result<Vec<(Hash, bool)>>>()?;
3822
3823 let &[(block_id, is_retransmitter_signed)] = deduped_shred_checks.as_slice() else {
3825 return Ok(LastFECSetCheckResults {
3826 last_fec_set_merkle_root: None,
3827 is_retransmitter_signed: false,
3828 });
3829 };
3830 Ok(LastFECSetCheckResults {
3831 last_fec_set_merkle_root: Some(block_id),
3832 is_retransmitter_signed,
3833 })
3834 }
3835
3836 pub fn get_slots_since(&self, slots: &[Slot]) -> Result<HashMap<Slot, Vec<Slot>>> {
3839 let keys = self.meta_cf.multi_get_keys(slots.iter().copied());
3840 let slot_metas = self.meta_cf.multi_get(&keys);
3841
3842 let mut slots_since: HashMap<Slot, Vec<Slot>> = HashMap::with_capacity(slots.len());
3843 for meta in slot_metas.into_iter() {
3844 let meta = meta?;
3845 if let Some(meta) = meta {
3846 slots_since.insert(meta.slot, meta.next_slots);
3847 }
3848 }
3849
3850 Ok(slots_since)
3851 }
3852
3853 pub fn is_root(&self, slot: Slot) -> bool {
3854 matches!(self.roots_cf.get(slot), Ok(Some(true)))
3855 }
3856
3857 pub fn is_skipped(&self, slot: Slot) -> bool {
3861 let lowest_root = self
3862 .rooted_slot_iterator(0)
3863 .ok()
3864 .and_then(|mut iter| iter.next())
3865 .unwrap_or_default();
3866 match self.roots_cf.get(slot).ok().flatten() {
3867 Some(_) => false,
3868 None => slot < self.max_root() && slot > lowest_root,
3869 }
3870 }
3871
3872 pub fn insert_bank_hash(&self, slot: Slot, frozen_hash: Hash, is_duplicate_confirmed: bool) {
3873 if let Some(prev_value) = self.bank_hash_cf.get(slot).unwrap() {
3874 if prev_value.frozen_hash() == frozen_hash && prev_value.is_duplicate_confirmed() {
3875 return;
3879 }
3880 }
3881 let data = FrozenHashVersioned::Current(FrozenHashStatus {
3882 frozen_hash,
3883 is_duplicate_confirmed,
3884 });
3885 self.bank_hash_cf.put(slot, &data).unwrap()
3886 }
3887
3888 pub fn get_bank_hash(&self, slot: Slot) -> Option<Hash> {
3889 self.bank_hash_cf
3890 .get(slot)
3891 .unwrap()
3892 .map(|versioned| versioned.frozen_hash())
3893 }
3894
3895 pub fn is_duplicate_confirmed(&self, slot: Slot) -> bool {
3896 self.bank_hash_cf
3897 .get(slot)
3898 .unwrap()
3899 .map(|versioned| versioned.is_duplicate_confirmed())
3900 .unwrap_or(false)
3901 }
3902
3903 pub fn insert_optimistic_slot(
3904 &self,
3905 slot: Slot,
3906 hash: &Hash,
3907 timestamp: UnixTimestamp,
3908 ) -> Result<()> {
3909 let slot_data = OptimisticSlotMetaVersioned::new(*hash, timestamp);
3910 self.optimistic_slots_cf.put(slot, &slot_data)
3911 }
3912
3913 pub fn get_optimistic_slot(&self, slot: Slot) -> Result<Option<(Hash, UnixTimestamp)>> {
3915 Ok(self
3916 .optimistic_slots_cf
3917 .get(slot)?
3918 .map(|meta| (meta.hash(), meta.timestamp())))
3919 }
3920
3921 pub fn get_latest_optimistic_slots(
3923 &self,
3924 num: usize,
3925 ) -> Result<Vec<(Slot, Hash, UnixTimestamp)>> {
3926 let iter = self.reversed_optimistic_slots_iterator()?;
3927 Ok(iter.take(num).collect())
3928 }
3929
3930 pub fn set_duplicate_confirmed_slots_and_hashes(
3931 &self,
3932 duplicate_confirmed_slot_hashes: impl Iterator<Item = (Slot, Hash)>,
3933 ) -> Result<()> {
3934 let mut write_batch = self.get_write_batch()?;
3935 for (slot, frozen_hash) in duplicate_confirmed_slot_hashes {
3936 let data = FrozenHashVersioned::Current(FrozenHashStatus {
3937 frozen_hash,
3938 is_duplicate_confirmed: true,
3939 });
3940 self.bank_hash_cf
3941 .put_in_batch(&mut write_batch, slot, &data)?;
3942 }
3943
3944 self.write_batch(write_batch)?;
3945 Ok(())
3946 }
3947
3948 pub fn set_roots<'a>(&self, rooted_slots: impl Iterator<Item = &'a Slot>) -> Result<()> {
3949 let mut write_batch = self.get_write_batch()?;
3950 let mut max_new_rooted_slot = 0;
3951 for slot in rooted_slots {
3952 max_new_rooted_slot = std::cmp::max(max_new_rooted_slot, *slot);
3953 self.roots_cf.put_in_batch(&mut write_batch, *slot, &true)?;
3954 }
3955
3956 self.write_batch(write_batch)?;
3957 self.max_root
3958 .fetch_max(max_new_rooted_slot, Ordering::Relaxed);
3959 Ok(())
3960 }
3961
3962 pub fn mark_slots_as_if_rooted_normally_at_startup(
3963 &self,
3964 slots: Vec<(Slot, Option<Hash>)>,
3965 with_hash: bool,
3966 ) -> Result<()> {
3967 self.set_roots(slots.iter().map(|(slot, _hash)| slot))?;
3968 if with_hash {
3969 self.set_duplicate_confirmed_slots_and_hashes(
3970 slots
3971 .into_iter()
3972 .map(|(slot, maybe_hash)| (slot, maybe_hash.unwrap())),
3973 )?;
3974 }
3975 Ok(())
3976 }
3977
3978 pub fn is_dead(&self, slot: Slot) -> bool {
3979 matches!(
3980 self.dead_slots_cf
3981 .get(slot)
3982 .expect("fetch from DeadSlots column family failed"),
3983 Some(true)
3984 )
3985 }
3986
3987 pub fn set_dead_slot(&self, slot: Slot) -> Result<()> {
3988 self.dead_slots_cf.put(slot, &true)
3989 }
3990
3991 pub fn remove_dead_slot(&self, slot: Slot) -> Result<()> {
3992 self.dead_slots_cf.delete(slot)
3993 }
3994
3995 pub fn remove_slot_duplicate_proof(&self, slot: Slot) -> Result<()> {
3996 self.duplicate_slots_cf.delete(slot)
3997 }
3998
3999 pub fn get_first_duplicate_proof(&self) -> Option<(Slot, DuplicateSlotProof)> {
4000 let mut iter = self
4001 .duplicate_slots_cf
4002 .iter(IteratorMode::From(0, IteratorDirection::Forward))
4003 .unwrap();
4004 iter.next()
4005 .map(|(slot, proof_bytes)| (slot, deserialize(&proof_bytes).unwrap()))
4006 }
4007
4008 pub fn store_duplicate_slot<S, T>(&self, slot: Slot, shred1: S, shred2: T) -> Result<()>
4009 where
4010 shred::Payload: From<S> + From<T>,
4011 {
4012 let duplicate_slot_proof = DuplicateSlotProof::new(shred1, shred2);
4013 self.duplicate_slots_cf.put(slot, &duplicate_slot_proof)
4014 }
4015
4016 pub fn get_duplicate_slot(&self, slot: u64) -> Option<DuplicateSlotProof> {
4017 self.duplicate_slots_cf
4018 .get(slot)
4019 .expect("fetch from DuplicateSlots column family failed")
4020 }
4021
4022 pub fn is_shred_duplicate(&self, shred: &Shred) -> Option<Vec<u8>> {
4029 let (slot, index, shred_type) = shred.id().unpack();
4030 let mut other = match shred_type {
4031 ShredType::Data => self.get_data_shred(slot, u64::from(index)),
4032 ShredType::Code => self.get_coding_shred(slot, u64::from(index)),
4033 }
4034 .expect("fetch from DuplicateSlots column family failed")?;
4035 if let Ok(signature) = shred.retransmitter_signature() {
4036 if let Err(err) = shred::layout::set_retransmitter_signature(&mut other, &signature) {
4037 error!("set retransmitter signature failed: {err:?}");
4038 }
4039 }
4040 (other != **shred.payload()).then_some(other)
4041 }
4042
4043 pub fn has_duplicate_shreds_in_slot(&self, slot: Slot) -> bool {
4044 self.duplicate_slots_cf
4045 .get(slot)
4046 .expect("fetch from DuplicateSlots column family failed")
4047 .is_some()
4048 }
4049
4050 pub fn orphans_iterator(&self, slot: Slot) -> Result<impl Iterator<Item = u64> + '_> {
4051 let orphans_iter = self
4052 .orphans_cf
4053 .iter(IteratorMode::From(slot, IteratorDirection::Forward))?;
4054 Ok(orphans_iter.map(|(slot, _)| slot))
4055 }
4056
4057 pub fn dead_slots_iterator(&self, slot: Slot) -> Result<impl Iterator<Item = Slot> + '_> {
4058 let dead_slots_iterator = self
4059 .dead_slots_cf
4060 .iter(IteratorMode::From(slot, IteratorDirection::Forward))?;
4061 Ok(dead_slots_iterator.map(|(slot, _)| slot))
4062 }
4063
4064 pub fn duplicate_slots_iterator(&self, slot: Slot) -> Result<impl Iterator<Item = Slot> + '_> {
4065 let duplicate_slots_iterator = self
4066 .duplicate_slots_cf
4067 .iter(IteratorMode::From(slot, IteratorDirection::Forward))?;
4068 Ok(duplicate_slots_iterator.map(|(slot, _)| slot))
4069 }
4070
4071 pub fn has_existing_shreds_for_slot(&self, slot: Slot) -> bool {
4072 match self.meta(slot).unwrap() {
4073 Some(meta) => meta.received > 0,
4074 None => false,
4075 }
4076 }
4077
4078 pub fn max_root(&self) -> Slot {
4080 self.max_root.load(Ordering::Relaxed)
4081 }
4082
4083 pub fn lowest_slot(&self) -> Slot {
4085 for (slot, meta) in self
4086 .slot_meta_iterator(0)
4087 .expect("unable to iterate over meta")
4088 {
4089 if slot > 0 && meta.received > 0 {
4090 return slot;
4091 }
4092 }
4093 self.max_root()
4095 }
4096
4097 fn lowest_slot_with_genesis(&self) -> Slot {
4098 for (slot, meta) in self
4099 .slot_meta_iterator(0)
4100 .expect("unable to iterate over meta")
4101 {
4102 if meta.received > 0 {
4103 return slot;
4104 }
4105 }
4106 self.max_root()
4108 }
4109
4110 pub fn highest_slot(&self) -> Result<Option<Slot>> {
4112 let highest_slot = self
4113 .meta_cf
4114 .iter(IteratorMode::End)?
4115 .next()
4116 .map(|(slot, _)| slot);
4117 Ok(highest_slot)
4118 }
4119
4120 pub fn lowest_cleanup_slot(&self) -> Slot {
4121 *self.lowest_cleanup_slot.read().unwrap()
4122 }
4123
4124 pub fn storage_size(&self) -> Result<u64> {
4125 self.db.storage_size()
4126 }
4127
4128 pub fn total_data_shred_storage_size(&self) -> Result<i64> {
4133 self.data_shred_cf
4134 .get_int_property(RocksProperties::TOTAL_SST_FILES_SIZE)
4135 }
4136
4137 pub fn total_coding_shred_storage_size(&self) -> Result<i64> {
4142 self.code_shred_cf
4143 .get_int_property(RocksProperties::TOTAL_SST_FILES_SIZE)
4144 }
4145
4146 pub fn is_primary_access(&self) -> bool {
4148 self.db.is_primary_access()
4149 }
4150
4151 pub fn scan_and_fix_roots(
4163 &self,
4164 start_root: Option<Slot>,
4165 end_slot: Option<Slot>,
4166 exit: &AtomicBool,
4167 ) -> Result<usize> {
4168 let lowest_cleanup_slot = self.lowest_cleanup_slot.read().unwrap();
4175
4176 let start_root = if let Some(slot) = start_root {
4177 if !self.is_root(slot) {
4178 return Err(BlockstoreError::SlotNotRooted);
4179 }
4180 slot
4181 } else {
4182 self.max_root()
4183 };
4184 let end_slot = end_slot.unwrap_or(*lowest_cleanup_slot);
4185 let ancestor_iterator =
4186 AncestorIterator::new(start_root, self).take_while(|&slot| slot >= end_slot);
4187
4188 let mut find_missing_roots = Measure::start("find_missing_roots");
4189 let mut roots_to_fix = vec![];
4190 for slot in ancestor_iterator.filter(|slot| !self.is_root(*slot)) {
4191 if exit.load(Ordering::Relaxed) {
4192 return Ok(0);
4193 }
4194 roots_to_fix.push(slot);
4195 }
4196 find_missing_roots.stop();
4197 let mut fix_roots = Measure::start("fix_roots");
4198 if !roots_to_fix.is_empty() {
4199 info!("{} slots to be rooted", roots_to_fix.len());
4200 let chunk_size = 100;
4201 for (i, chunk) in roots_to_fix.chunks(chunk_size).enumerate() {
4202 if exit.load(Ordering::Relaxed) {
4203 return Ok(i * chunk_size);
4204 }
4205 trace!("{chunk:?}");
4206 self.set_roots(chunk.iter())?;
4207 }
4208 } else {
4209 debug!("No missing roots found in range {start_root} to {end_slot}");
4210 }
4211 fix_roots.stop();
4212 datapoint_info!(
4213 "blockstore-scan_and_fix_roots",
4214 (
4215 "find_missing_roots_us",
4216 find_missing_roots.as_us() as i64,
4217 i64
4218 ),
4219 ("num_roots_to_fix", roots_to_fix.len() as i64, i64),
4220 ("fix_roots_us", fix_roots.as_us() as i64, i64),
4221 );
4222 Ok(roots_to_fix.len())
4223 }
4224
4225 pub fn set_and_chain_connected_on_root_and_next_slots(&self, root: Slot) -> Result<()> {
4235 let mut root_meta = self
4236 .meta(root)?
4237 .unwrap_or_else(|| SlotMeta::new(root, None));
4238 if root_meta.is_connected() {
4241 return Ok(());
4242 }
4243 info!("Marking slot {root} and any full children slots as connected");
4244 let mut write_batch = self.get_write_batch()?;
4245
4246 root_meta.set_parent_connected();
4249 root_meta.set_connected();
4250 self.meta_cf
4251 .put_in_batch(&mut write_batch, root_meta.slot, &root_meta)?;
4252
4253 let mut next_slots = VecDeque::from(root_meta.next_slots);
4254 while !next_slots.is_empty() {
4255 let slot = next_slots.pop_front().unwrap();
4256 let mut meta = self.meta(slot)?.unwrap_or_else(|| {
4257 panic!("Slot {slot} is a child but has no SlotMeta in blockstore")
4258 });
4259
4260 if meta.set_parent_connected() {
4261 next_slots.extend(meta.next_slots.iter());
4262 }
4263 self.meta_cf
4264 .put_in_batch(&mut write_batch, meta.slot, &meta)?;
4265 }
4266
4267 self.write_batch(write_batch)?;
4268 Ok(())
4269 }
4270
4271 fn handle_chaining(
4291 &self,
4292 write_batch: &mut WriteBatch,
4293 working_set: &mut HashMap<u64, SlotMetaWorkingSetEntry>,
4294 metrics: &mut BlockstoreInsertionMetrics,
4295 ) -> Result<()> {
4296 let mut start = Measure::start("Shred chaining");
4297 working_set.retain(|_, entry| entry.did_insert_occur);
4299 let mut new_chained_slots = HashMap::new();
4300 let working_set_slots: Vec<_> = working_set.keys().collect();
4301 for slot in working_set_slots {
4302 self.handle_chaining_for_slot(write_batch, working_set, &mut new_chained_slots, *slot)?;
4303 }
4304
4305 for (slot, meta) in new_chained_slots.iter() {
4307 let meta: &SlotMeta = &RefCell::borrow(meta);
4308 self.meta_cf.put_in_batch(write_batch, *slot, meta)?;
4309 }
4310 start.stop();
4311 metrics.chaining_elapsed_us += start.as_us();
4312 Ok(())
4313 }
4314
4315 fn handle_chaining_for_slot(
4344 &self,
4345 write_batch: &mut WriteBatch,
4346 working_set: &HashMap<u64, SlotMetaWorkingSetEntry>,
4347 new_chained_slots: &mut HashMap<u64, Rc<RefCell<SlotMeta>>>,
4348 slot: Slot,
4349 ) -> Result<()> {
4350 let slot_meta_entry = working_set
4351 .get(&slot)
4352 .expect("Slot must exist in the working_set hashmap");
4353
4354 let meta = &slot_meta_entry.new_slot_meta;
4355 let meta_backup = &slot_meta_entry.old_slot_meta;
4356 {
4357 let mut meta_mut = meta.borrow_mut();
4358 let was_orphan_slot =
4359 meta_backup.is_some() && meta_backup.as_ref().unwrap().is_orphan();
4360
4361 if slot != 0 && meta_mut.parent_slot.is_some() {
4366 let prev_slot = meta_mut.parent_slot.unwrap();
4367
4368 if meta_backup.is_none() || was_orphan_slot {
4372 let prev_slot_meta =
4373 self.find_slot_meta_else_create(working_set, new_chained_slots, prev_slot)?;
4374
4375 chain_new_slot_to_prev_slot(
4378 &mut prev_slot_meta.borrow_mut(),
4379 slot,
4380 &mut meta_mut,
4381 );
4382
4383 if RefCell::borrow(&*prev_slot_meta).is_orphan() {
4386 self.orphans_cf
4387 .put_in_batch(write_batch, prev_slot, &true)?;
4388 }
4389 }
4390 }
4391
4392 if was_orphan_slot {
4394 self.orphans_cf.delete_in_batch(write_batch, slot)?;
4395 }
4396 }
4397
4398 let should_propagate_is_connected =
4402 is_newly_completed_slot(&RefCell::borrow(meta), meta_backup)
4403 && RefCell::borrow(meta).is_parent_connected();
4404
4405 if should_propagate_is_connected {
4406 meta.borrow_mut().set_connected();
4407 self.traverse_children_mut(
4408 meta,
4409 working_set,
4410 new_chained_slots,
4411 SlotMeta::set_parent_connected,
4412 )?;
4413 }
4414
4415 Ok(())
4416 }
4417
4418 fn traverse_children_mut<F>(
4432 &self,
4433 slot_meta: &Rc<RefCell<SlotMeta>>,
4434 working_set: &HashMap<u64, SlotMetaWorkingSetEntry>,
4435 passed_visisted_slots: &mut HashMap<u64, Rc<RefCell<SlotMeta>>>,
4436 slot_function: F,
4437 ) -> Result<()>
4438 where
4439 F: Fn(&mut SlotMeta) -> bool,
4440 {
4441 let slot_meta = slot_meta.borrow();
4442 let mut next_slots: VecDeque<u64> = slot_meta.next_slots.to_vec().into();
4443 while !next_slots.is_empty() {
4444 let slot = next_slots.pop_front().unwrap();
4445 let meta_ref =
4446 self.find_slot_meta_else_create(working_set, passed_visisted_slots, slot)?;
4447 let mut meta = meta_ref.borrow_mut();
4448 if slot_function(&mut meta) {
4449 meta.next_slots
4450 .iter()
4451 .for_each(|slot| next_slots.push_back(*slot));
4452 }
4453 }
4454 Ok(())
4455 }
4456
4457 fn commit_slot_meta_working_set(
4473 &self,
4474 slot_meta_working_set: &HashMap<u64, SlotMetaWorkingSetEntry>,
4475 write_batch: &mut WriteBatch,
4476 ) -> Result<(bool, Vec<u64>)> {
4477 let mut should_signal = false;
4478 let mut newly_completed_slots = vec![];
4479 let completed_slots_senders = self.completed_slots_senders.lock().unwrap();
4480
4481 for (slot, slot_meta_entry) in slot_meta_working_set.iter() {
4484 assert!(slot_meta_entry.did_insert_occur);
4486 let meta: &SlotMeta = &RefCell::borrow(&*slot_meta_entry.new_slot_meta);
4487 let meta_backup = &slot_meta_entry.old_slot_meta;
4488 if !completed_slots_senders.is_empty() && is_newly_completed_slot(meta, meta_backup) {
4489 newly_completed_slots.push(*slot);
4490 }
4491 if Some(meta) != meta_backup.as_ref() {
4493 should_signal = should_signal || slot_has_updates(meta, meta_backup);
4494 self.meta_cf.put_in_batch(write_batch, *slot, meta)?;
4495 }
4496 }
4497
4498 Ok((should_signal, newly_completed_slots))
4499 }
4500
4501 fn get_slot_meta_entry<'a>(
4520 &self,
4521 slot_meta_working_set: &'a mut HashMap<u64, SlotMetaWorkingSetEntry>,
4522 slot: Slot,
4523 parent_slot: Slot,
4524 ) -> &'a mut SlotMetaWorkingSetEntry {
4525 slot_meta_working_set.entry(slot).or_insert_with(|| {
4527 if let Some(mut meta) = self
4529 .meta_cf
4530 .get(slot)
4531 .expect("Expect database get to succeed")
4532 {
4533 let backup = Some(meta.clone());
4534 if meta.is_orphan() {
4539 meta.parent_slot = Some(parent_slot);
4540 }
4541
4542 SlotMetaWorkingSetEntry::new(Rc::new(RefCell::new(meta)), backup)
4543 } else {
4544 SlotMetaWorkingSetEntry::new(
4545 Rc::new(RefCell::new(SlotMeta::new(slot, Some(parent_slot)))),
4546 None,
4547 )
4548 }
4549 })
4550 }
4551
4552 fn find_slot_meta_else_create<'a>(
4563 &self,
4564 working_set: &'a HashMap<u64, SlotMetaWorkingSetEntry>,
4565 chained_slots: &'a mut HashMap<u64, Rc<RefCell<SlotMeta>>>,
4566 slot_index: u64,
4567 ) -> Result<Rc<RefCell<SlotMeta>>> {
4568 let result = find_slot_meta_in_cached_state(working_set, chained_slots, slot_index);
4569 if let Some(slot) = result {
4570 Ok(slot)
4571 } else {
4572 self.find_slot_meta_in_db_else_create(slot_index, chained_slots)
4573 }
4574 }
4575
4576 fn find_slot_meta_in_db_else_create(
4582 &self,
4583 slot: Slot,
4584 insert_map: &mut HashMap<u64, Rc<RefCell<SlotMeta>>>,
4585 ) -> Result<Rc<RefCell<SlotMeta>>> {
4586 if let Some(slot_meta) = self.meta_cf.get(slot)? {
4587 insert_map.insert(slot, Rc::new(RefCell::new(slot_meta)));
4588 } else {
4589 insert_map.insert(slot, Rc::new(RefCell::new(SlotMeta::new_orphan(slot))));
4593 }
4594 Ok(insert_map.get(&slot).unwrap().clone())
4595 }
4596
4597 fn get_index_meta_entry<'a>(
4598 &self,
4599 slot: Slot,
4600 index_working_set: &'a mut HashMap<u64, IndexMetaWorkingSetEntry>,
4601 index_meta_time_us: &mut u64,
4602 ) -> &'a mut IndexMetaWorkingSetEntry {
4603 let mut total_start = Measure::start("Total elapsed");
4604 let res = index_working_set.entry(slot).or_insert_with(|| {
4605 let newly_inserted_meta = self
4606 .index_cf
4607 .get(slot)
4608 .unwrap()
4609 .unwrap_or_else(|| Index::new(slot));
4610 IndexMetaWorkingSetEntry {
4611 index: newly_inserted_meta,
4612 did_insert_occur: false,
4613 }
4614 });
4615 total_start.stop();
4616 *index_meta_time_us += total_start.as_us();
4617 res
4618 }
4619
4620 pub fn get_write_batch(&self) -> Result<WriteBatch> {
4621 self.db.batch()
4622 }
4623
4624 pub fn write_batch(&self, write_batch: WriteBatch) -> Result<()> {
4625 self.db.write(write_batch)
4626 }
4627}
4628
4629fn update_completed_data_indexes<'a>(
4634 is_last_in_data: bool,
4635 new_shred_index: u32,
4636 received_data_shreds: &'a ShredIndex,
4637 completed_data_indexes: &mut CompletedDataIndexes,
4639) -> impl Iterator<Item = Range<u32>> + 'a {
4640 if is_last_in_data {
4643 completed_data_indexes.insert(new_shred_index);
4644 }
4645 [
4648 completed_data_indexes
4649 .range(..new_shred_index)
4650 .next_back()
4651 .map(|index| index + 1)
4652 .or(Some(0u32)),
4653 is_last_in_data.then(|| new_shred_index + 1),
4654 completed_data_indexes
4655 .range(new_shred_index + 1..)
4656 .next()
4657 .map(|index| index + 1),
4658 ]
4659 .into_iter()
4660 .flatten()
4661 .tuple_windows()
4662 .filter(|&(start, end)| {
4663 let bounds = u64::from(start)..u64::from(end);
4664 received_data_shreds.range(bounds.clone()).eq(bounds)
4665 })
4666 .map(|(start, end)| start..end)
4667}
4668
4669fn update_slot_meta<'a>(
4670 is_last_in_slot: bool,
4671 is_last_in_data: bool,
4672 slot_meta: &mut SlotMeta,
4673 index: u32,
4674 new_consumed: u64,
4675 reference_tick: u8,
4676 received_data_shreds: &'a ShredIndex,
4677) -> impl Iterator<Item = Range<u32>> + 'a {
4678 let first_insert = slot_meta.received == 0;
4679 slot_meta.received = cmp::max(u64::from(index) + 1, slot_meta.received);
4682 if first_insert {
4683 let slot_time_elapsed = u64::from(reference_tick) * 1000 / DEFAULT_TICKS_PER_SECOND;
4685 slot_meta.first_shred_timestamp = timestamp() - slot_time_elapsed;
4686 }
4687 slot_meta.consumed = new_consumed;
4688 if is_last_in_slot && slot_meta.last_index.is_none() {
4691 slot_meta.last_index = Some(u64::from(index));
4692 }
4693 update_completed_data_indexes(
4694 is_last_in_slot || is_last_in_data,
4695 index,
4696 received_data_shreds,
4697 &mut slot_meta.completed_data_indexes,
4698 )
4699}
4700
4701fn get_last_hash<'a>(iterator: impl Iterator<Item = &'a Entry> + 'a) -> Option<Hash> {
4702 iterator.last().map(|entry| entry.hash)
4703}
4704
4705fn send_signals(
4706 new_shreds_signals: &[Sender<bool>],
4707 completed_slots_senders: &[Sender<Vec<u64>>],
4708 should_signal: bool,
4709 newly_completed_slots: Vec<u64>,
4710) {
4711 if should_signal {
4712 for signal in new_shreds_signals {
4713 match signal.try_send(true) {
4714 Ok(_) => {}
4715 Err(TrySendError::Full(_)) => {
4716 trace!("replay wake up signal channel is full.")
4717 }
4718 Err(TrySendError::Disconnected(_)) => {
4719 trace!("replay wake up signal channel is disconnected.")
4720 }
4721 }
4722 }
4723 }
4724
4725 if !completed_slots_senders.is_empty() && !newly_completed_slots.is_empty() {
4726 let mut slots: Vec<_> = (0..completed_slots_senders.len() - 1)
4727 .map(|_| newly_completed_slots.clone())
4728 .collect();
4729
4730 slots.push(newly_completed_slots);
4731
4732 for (signal, slots) in completed_slots_senders.iter().zip(slots.into_iter()) {
4733 let res = signal.try_send(slots);
4734 if let Err(TrySendError::Full(_)) = res {
4735 datapoint_error!(
4736 "blockstore_error",
4737 (
4738 "error",
4739 "Unable to send newly completed slot because channel is full",
4740 String
4741 ),
4742 );
4743 }
4744 }
4745 }
4746}
4747
4748fn find_slot_meta_in_cached_state<'a>(
4752 working_set: &'a HashMap<u64, SlotMetaWorkingSetEntry>,
4753 chained_slots: &'a HashMap<u64, Rc<RefCell<SlotMeta>>>,
4754 slot: Slot,
4755) -> Option<Rc<RefCell<SlotMeta>>> {
4756 if let Some(entry) = working_set.get(&slot) {
4757 Some(entry.new_slot_meta.clone())
4758 } else {
4759 chained_slots.get(&slot).cloned()
4760 }
4761}
4762
4763fn chain_new_slot_to_prev_slot(
4765 prev_slot_meta: &mut SlotMeta,
4766 current_slot: Slot,
4767 current_slot_meta: &mut SlotMeta,
4768) {
4769 prev_slot_meta.next_slots.push(current_slot);
4770 if prev_slot_meta.is_connected() {
4771 current_slot_meta.set_parent_connected();
4772 }
4773}
4774
4775fn is_newly_completed_slot(slot_meta: &SlotMeta, backup_slot_meta: &Option<SlotMeta>) -> bool {
4776 slot_meta.is_full()
4777 && (backup_slot_meta.is_none()
4778 || slot_meta.consumed != backup_slot_meta.as_ref().unwrap().consumed)
4779}
4780
4781fn slot_has_updates(slot_meta: &SlotMeta, slot_meta_backup: &Option<SlotMeta>) -> bool {
4784 slot_meta.is_parent_connected() &&
4787 ((slot_meta_backup.is_none() && slot_meta.consumed != 0) ||
4791 (slot_meta_backup.is_some() && slot_meta_backup.as_ref().unwrap().consumed != slot_meta.consumed))
4795}
4796
4797pub fn create_new_ledger(
4801 ledger_path: &Path,
4802 genesis_config: &GenesisConfig,
4803 max_genesis_archive_unpacked_size: u64,
4804 column_options: LedgerColumnOptions,
4805) -> Result<Hash> {
4806 Blockstore::destroy(ledger_path)?;
4807 genesis_config.write(ledger_path)?;
4808
4809 let blockstore_dir = BLOCKSTORE_DIRECTORY_ROCKS_LEVEL;
4811 let blockstore = Blockstore::open_with_options(
4812 ledger_path,
4813 BlockstoreOptions {
4814 column_options: column_options.clone(),
4815 ..BlockstoreOptions::default()
4816 },
4817 )?;
4818 let ticks_per_slot = genesis_config.ticks_per_slot;
4819 let hashes_per_tick = genesis_config.poh_config.hashes_per_tick.unwrap_or(0);
4820 let entries = create_ticks(ticks_per_slot, hashes_per_tick, genesis_config.hash());
4821 let last_hash = entries.last().unwrap().hash;
4822 let version = solana_shred_version::version_from_hash(&last_hash);
4823
4824 let shredder = Shredder::new(0, 0, 0, version).unwrap();
4825 let (shreds, _) = shredder.entries_to_merkle_shreds_for_tests(
4826 &Keypair::new(),
4827 &entries,
4828 true, Hash::new_from_array(rand::thread_rng().gen()),
4831 0, 0, &ReedSolomonCache::default(),
4834 &mut ProcessShredsStats::default(),
4835 );
4836 assert!(shreds.last().unwrap().last_in_slot());
4837
4838 blockstore.insert_shreds(shreds, None, false)?;
4839 blockstore.set_roots(std::iter::once(&0))?;
4840 drop(blockstore);
4842
4843 let archive_path = ledger_path.join(DEFAULT_GENESIS_ARCHIVE);
4844 let archive_file = File::create(&archive_path)?;
4845 let encoder = bzip2::write::BzEncoder::new(archive_file, bzip2::Compression::best());
4846 let mut archive = tar::Builder::new(encoder);
4847 archive.append_path_with_name(ledger_path.join(DEFAULT_GENESIS_FILE), DEFAULT_GENESIS_FILE)?;
4848 archive.append_dir_all(blockstore_dir, ledger_path.join(blockstore_dir))?;
4849 archive.into_inner()?;
4850
4851 {
4854 let temp_dir = tempfile::tempdir_in(ledger_path).unwrap();
4855 let unpack_check = unpack_genesis_archive(
4857 &archive_path,
4858 temp_dir.path(),
4859 max_genesis_archive_unpacked_size,
4860 );
4861 if let Err(unpack_err) = unpack_check {
4862 let mut error_messages = String::new();
4866
4867 fs::rename(
4868 ledger_path.join(DEFAULT_GENESIS_ARCHIVE),
4869 ledger_path.join(format!("{DEFAULT_GENESIS_ARCHIVE}.failed")),
4870 )
4871 .unwrap_or_else(|e| {
4872 let _ = write!(
4873 &mut error_messages,
4874 "/failed to stash problematic {DEFAULT_GENESIS_ARCHIVE}: {e}"
4875 );
4876 });
4877 fs::rename(
4878 ledger_path.join(DEFAULT_GENESIS_FILE),
4879 ledger_path.join(format!("{DEFAULT_GENESIS_FILE}.failed")),
4880 )
4881 .unwrap_or_else(|e| {
4882 let _ = write!(
4883 &mut error_messages,
4884 "/failed to stash problematic {DEFAULT_GENESIS_FILE}: {e}"
4885 );
4886 });
4887 fs::rename(
4888 ledger_path.join(blockstore_dir),
4889 ledger_path.join(format!("{blockstore_dir}.failed")),
4890 )
4891 .unwrap_or_else(|e| {
4892 let _ = write!(
4893 &mut error_messages,
4894 "/failed to stash problematic {blockstore_dir}: {e}"
4895 );
4896 });
4897
4898 return Err(BlockstoreError::Io(IoError::other(format!(
4899 "Error checking to unpack genesis archive: {unpack_err}{error_messages}"
4900 ))));
4901 }
4902 }
4903
4904 Ok(last_hash)
4905}
4906
4907#[macro_export]
4908macro_rules! tmp_ledger_name {
4909 () => {
4910 &format!("{}-{}", file!(), line!())
4911 };
4912}
4913
4914#[macro_export]
4915macro_rules! get_tmp_ledger_path {
4916 () => {
4917 $crate::blockstore::get_ledger_path_from_name($crate::tmp_ledger_name!())
4918 };
4919}
4920
4921#[macro_export]
4922macro_rules! get_tmp_ledger_path_auto_delete {
4923 () => {
4924 $crate::blockstore::get_ledger_path_from_name_auto_delete($crate::tmp_ledger_name!())
4925 };
4926}
4927
4928pub fn get_ledger_path_from_name_auto_delete(name: &str) -> TempDir {
4929 let mut path = get_ledger_path_from_name(name);
4930 let last = path.file_name().unwrap().to_str().unwrap().to_string();
4932 path.pop();
4933 fs::create_dir_all(&path).unwrap();
4934 Builder::new()
4935 .prefix(&last)
4936 .rand_bytes(0)
4937 .tempdir_in(path)
4938 .unwrap()
4939}
4940
4941pub fn get_ledger_path_from_name(name: &str) -> PathBuf {
4942 use std::env;
4943 let out_dir = env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string());
4944 let keypair = Keypair::new();
4945
4946 let path = [
4947 out_dir,
4948 "ledger".to_string(),
4949 format!("{}-{}", name, keypair.pubkey()),
4950 ]
4951 .iter()
4952 .collect();
4953
4954 let _ignored = fs::remove_dir_all(&path);
4956
4957 path
4958}
4959
4960#[macro_export]
4961macro_rules! create_new_tmp_ledger {
4962 ($genesis_config:expr) => {
4963 $crate::blockstore::create_new_ledger_from_name(
4964 $crate::tmp_ledger_name!(),
4965 $genesis_config,
4966 $crate::macro_reexports::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
4967 $crate::blockstore_options::LedgerColumnOptions::default(),
4968 )
4969 };
4970}
4971
4972#[macro_export]
4973macro_rules! create_new_tmp_ledger_with_size {
4974 (
4975 $genesis_config:expr,
4976 $max_genesis_archive_unpacked_size:expr $(,)?
4977 ) => {
4978 $crate::blockstore::create_new_ledger_from_name(
4979 $crate::tmp_ledger_name!(),
4980 $genesis_config,
4981 $max_genesis_archive_unpacked_size,
4982 $crate::blockstore_options::LedgerColumnOptions::default(),
4983 )
4984 };
4985}
4986
4987#[macro_export]
4988macro_rules! create_new_tmp_ledger_auto_delete {
4989 ($genesis_config:expr) => {
4990 $crate::blockstore::create_new_ledger_from_name_auto_delete(
4991 $crate::tmp_ledger_name!(),
4992 $genesis_config,
4993 $crate::macro_reexports::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
4994 $crate::blockstore_options::LedgerColumnOptions::default(),
4995 )
4996 };
4997}
4998
4999pub(crate) fn verify_shred_slots(slot: Slot, parent: Slot, root: Slot) -> bool {
5000 if slot == 0 && parent == 0 && root == 0 {
5001 return true; }
5003 root <= parent && parent < slot
5006}
5007
5008pub fn create_new_ledger_from_name(
5013 name: &str,
5014 genesis_config: &GenesisConfig,
5015 max_genesis_archive_unpacked_size: u64,
5016 column_options: LedgerColumnOptions,
5017) -> (PathBuf, Hash) {
5018 let (ledger_path, blockhash) = create_new_ledger_from_name_auto_delete(
5019 name,
5020 genesis_config,
5021 max_genesis_archive_unpacked_size,
5022 column_options,
5023 );
5024 (ledger_path.keep(), blockhash)
5025}
5026
5027pub fn create_new_ledger_from_name_auto_delete(
5032 name: &str,
5033 genesis_config: &GenesisConfig,
5034 max_genesis_archive_unpacked_size: u64,
5035 column_options: LedgerColumnOptions,
5036) -> (TempDir, Hash) {
5037 let ledger_path = get_ledger_path_from_name_auto_delete(name);
5038 let blockhash = create_new_ledger(
5039 ledger_path.path(),
5040 genesis_config,
5041 max_genesis_archive_unpacked_size,
5042 column_options,
5043 )
5044 .unwrap();
5045 (ledger_path, blockhash)
5046}
5047
5048#[cfg(feature = "dev-context-only-utils")]
5049pub fn entries_to_test_shreds(
5050 entries: &[Entry],
5051 slot: Slot,
5052 parent_slot: Slot,
5053 is_full_slot: bool,
5054 version: u16,
5055) -> Vec<Shred> {
5056 Shredder::new(slot, parent_slot, 0, version)
5057 .unwrap()
5058 .make_merkle_shreds_from_entries(
5059 &Keypair::new(),
5060 entries,
5061 is_full_slot,
5062 Hash::new_from_array(rand::thread_rng().gen()), 0, 0, &ReedSolomonCache::default(),
5066 &mut ProcessShredsStats::default(),
5067 )
5068 .filter(Shred::is_data)
5069 .collect()
5070}
5071
5072#[cfg(feature = "dev-context-only-utils")]
5073pub fn make_slot_entries(
5074 slot: Slot,
5075 parent_slot: Slot,
5076 num_entries: u64,
5077) -> (Vec<Shred>, Vec<Entry>) {
5078 let entries = create_ticks(num_entries, 1, Hash::new_unique());
5079 let shreds = entries_to_test_shreds(&entries, slot, parent_slot, true, 0);
5080 (shreds, entries)
5081}
5082
5083#[cfg(feature = "dev-context-only-utils")]
5084pub fn make_many_slot_entries(
5085 start_slot: Slot,
5086 num_slots: u64,
5087 entries_per_slot: u64,
5088) -> (Vec<Shred>, Vec<Entry>) {
5089 let mut shreds = vec![];
5090 let mut entries = vec![];
5091 for slot in start_slot..start_slot + num_slots {
5092 let parent_slot = if slot == 0 { 0 } else { slot - 1 };
5093
5094 let (slot_shreds, slot_entries) = make_slot_entries(slot, parent_slot, entries_per_slot);
5095 shreds.extend(slot_shreds);
5096 entries.extend(slot_entries);
5097 }
5098
5099 (shreds, entries)
5100}
5101
5102#[cfg(feature = "dev-context-only-utils")]
5103pub fn test_all_empty_or_min(blockstore: &Blockstore, min_slot: Slot) {
5104 let condition_met = blockstore
5105 .meta_cf
5106 .iter(IteratorMode::Start)
5107 .unwrap()
5108 .next()
5109 .map(|(slot, _)| slot >= min_slot)
5110 .unwrap_or(true)
5111 & blockstore
5112 .roots_cf
5113 .iter(IteratorMode::Start)
5114 .unwrap()
5115 .next()
5116 .map(|(slot, _)| slot >= min_slot)
5117 .unwrap_or(true)
5118 & blockstore
5119 .data_shred_cf
5120 .iter(IteratorMode::Start)
5121 .unwrap()
5122 .next()
5123 .map(|((slot, _), _)| slot >= min_slot)
5124 .unwrap_or(true)
5125 & blockstore
5126 .code_shred_cf
5127 .iter(IteratorMode::Start)
5128 .unwrap()
5129 .next()
5130 .map(|((slot, _), _)| slot >= min_slot)
5131 .unwrap_or(true)
5132 & blockstore
5133 .dead_slots_cf
5134 .iter(IteratorMode::Start)
5135 .unwrap()
5136 .next()
5137 .map(|(slot, _)| slot >= min_slot)
5138 .unwrap_or(true)
5139 & blockstore
5140 .duplicate_slots_cf
5141 .iter(IteratorMode::Start)
5142 .unwrap()
5143 .next()
5144 .map(|(slot, _)| slot >= min_slot)
5145 .unwrap_or(true)
5146 & blockstore
5147 .erasure_meta_cf
5148 .iter(IteratorMode::Start)
5149 .unwrap()
5150 .next()
5151 .map(|((slot, _), _)| slot >= min_slot)
5152 .unwrap_or(true)
5153 & blockstore
5154 .orphans_cf
5155 .iter(IteratorMode::Start)
5156 .unwrap()
5157 .next()
5158 .map(|(slot, _)| slot >= min_slot)
5159 .unwrap_or(true)
5160 & blockstore
5161 .index_cf
5162 .iter(IteratorMode::Start)
5163 .unwrap()
5164 .next()
5165 .map(|(slot, _)| slot >= min_slot)
5166 .unwrap_or(true)
5167 & blockstore
5168 .transaction_status_cf
5169 .iter(IteratorMode::Start)
5170 .unwrap()
5171 .next()
5172 .map(|((_, slot), _)| slot >= min_slot || slot == 0)
5173 .unwrap_or(true)
5174 & blockstore
5175 .address_signatures_cf
5176 .iter(IteratorMode::Start)
5177 .unwrap()
5178 .next()
5179 .map(|((_, slot, _, _), _)| slot >= min_slot || slot == 0)
5180 .unwrap_or(true)
5181 & blockstore
5182 .rewards_cf
5183 .iter(IteratorMode::Start)
5184 .unwrap()
5185 .next()
5186 .map(|(slot, _)| slot >= min_slot)
5187 .unwrap_or(true);
5188 assert!(condition_met);
5189}
5190
5191#[cfg(feature = "dev-context-only-utils")]
5194pub fn make_chaining_slot_entries(
5195 chain: &[u64],
5196 entries_per_slot: u64,
5197 first_parent: u64,
5198) -> Vec<(Vec<Shred>, Vec<Entry>)> {
5199 let mut slots_shreds_and_entries = vec![];
5200 for (i, slot) in chain.iter().enumerate() {
5201 let parent_slot = {
5202 if *slot == 0 || i == 0 {
5203 first_parent
5204 } else {
5205 chain[i - 1]
5206 }
5207 };
5208
5209 let result = make_slot_entries(*slot, parent_slot, entries_per_slot);
5210 slots_shreds_and_entries.push(result);
5211 }
5212
5213 slots_shreds_and_entries
5214}
5215
5216#[cfg(test)]
5217pub mod tests {
5218 use {
5219 super::*,
5220 crate::{
5221 genesis_utils::{create_genesis_config, GenesisConfigInfo},
5222 leader_schedule::{FixedSchedule, IdentityKeyedLeaderSchedule},
5223 shred::{max_ticks_per_n_shreds, MAX_DATA_SHREDS_PER_SLOT},
5224 },
5225 assert_matches::assert_matches,
5226 bincode::{serialize, Options},
5227 crossbeam_channel::unbounded,
5228 rand::{seq::SliceRandom, thread_rng},
5229 solana_account_decoder::parse_token::UiTokenAmount,
5230 solana_clock::{DEFAULT_MS_PER_SLOT, DEFAULT_TICKS_PER_SLOT},
5231 solana_entry::entry::{next_entry, next_entry_mut},
5232 solana_genesis_utils::{open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE},
5233 solana_hash::Hash,
5234 solana_message::{compiled_instruction::CompiledInstruction, v0::LoadedAddresses},
5235 solana_packet::PACKET_DATA_SIZE,
5236 solana_pubkey::Pubkey,
5237 solana_runtime::bank::{Bank, RewardType},
5238 solana_sha256_hasher::hash,
5239 solana_shred_version::version_from_hash,
5240 solana_signature::Signature,
5241 solana_storage_proto::convert::generated,
5242 solana_transaction::Transaction,
5243 solana_transaction_context::TransactionReturnData,
5244 solana_transaction_error::TransactionError,
5245 solana_transaction_status::{
5246 InnerInstruction, InnerInstructions, Reward, Rewards, TransactionTokenBalance,
5247 },
5248 std::{cmp::Ordering, time::Duration},
5249 };
5250
5251 pub(crate) fn make_slot_entries_with_transactions(num_entries: u64) -> Vec<Entry> {
5253 let mut entries: Vec<Entry> = Vec::new();
5254 for x in 0..num_entries {
5255 let transaction = Transaction::new_with_compiled_instructions(
5256 &[&Keypair::new()],
5257 &[solana_pubkey::new_rand()],
5258 Hash::default(),
5259 vec![solana_pubkey::new_rand()],
5260 vec![CompiledInstruction::new(1, &(), vec![0])],
5261 );
5262 entries.push(next_entry_mut(&mut Hash::default(), 0, vec![transaction]));
5263 let mut tick = create_ticks(1, 0, hash(&serialize(&x).unwrap()));
5264 entries.append(&mut tick);
5265 }
5266 entries
5267 }
5268
5269 fn make_and_insert_slot(blockstore: &Blockstore, slot: Slot, parent_slot: Slot) {
5270 let (shreds, _) = make_slot_entries(
5271 slot,
5272 parent_slot,
5273 100, );
5275 blockstore.insert_shreds(shreds, None, true).unwrap();
5276
5277 let meta = blockstore.meta(slot).unwrap().unwrap();
5278 assert_eq!(slot, meta.slot);
5279 assert!(meta.is_full());
5280 assert!(meta.next_slots.is_empty());
5281 }
5282
5283 #[test]
5284 fn test_create_new_ledger() {
5285 agave_logger::setup();
5286 let mint_total = 1_000_000_000_000;
5287 let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(mint_total);
5288 let (ledger_path, _blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
5289 let blockstore = Blockstore::open(ledger_path.path()).unwrap(); let ticks = create_ticks(genesis_config.ticks_per_slot, 0, genesis_config.hash());
5292 let entries = blockstore.get_slot_entries(0, 0).unwrap();
5293
5294 assert_eq!(ticks, entries);
5295 assert!(Path::new(ledger_path.path())
5296 .join(BLOCKSTORE_DIRECTORY_ROCKS_LEVEL)
5297 .exists());
5298
5299 assert_eq!(
5300 genesis_config,
5301 open_genesis_config(ledger_path.path(), MAX_GENESIS_ARCHIVE_UNPACKED_SIZE).unwrap()
5302 );
5303 std::fs::remove_file(ledger_path.path().join(DEFAULT_GENESIS_FILE)).unwrap();
5305 assert_eq!(
5306 genesis_config,
5307 open_genesis_config(ledger_path.path(), MAX_GENESIS_ARCHIVE_UNPACKED_SIZE).unwrap()
5308 );
5309 }
5310
5311 #[test]
5312 fn test_insert_get_bytes() {
5313 let num_entries = max_ticks_per_n_shreds(1, None) + 1;
5315 assert!(num_entries > 1);
5316
5317 let (mut shreds, _) = make_slot_entries(
5318 0, 0, num_entries,
5321 );
5322
5323 let ledger_path = get_tmp_ledger_path_auto_delete!();
5324 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
5325
5326 let last_shred = shreds.pop().unwrap();
5328 assert!(last_shred.index() > 0);
5329 blockstore
5330 .insert_shreds(vec![last_shred.clone()], None, false)
5331 .unwrap();
5332
5333 let serialized_shred = blockstore
5334 .data_shred_cf
5335 .get_bytes((0, last_shred.index() as u64))
5336 .unwrap()
5337 .unwrap();
5338 let deserialized_shred = Shred::new_from_serialized_shred(serialized_shred).unwrap();
5339
5340 assert_eq!(last_shred, deserialized_shred);
5341 }
5342
5343 #[test]
5344 fn test_write_entries() {
5345 agave_logger::setup();
5346 let ledger_path = get_tmp_ledger_path_auto_delete!();
5347 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
5348
5349 let ticks_per_slot = 10;
5350 let num_slots = 10;
5351 let mut ticks = vec![];
5352 let mut shreds_per_slot = vec![];
5354
5355 for i in 0..num_slots {
5356 let mut new_ticks = create_ticks(ticks_per_slot, 0, Hash::default());
5357 let num_shreds = blockstore
5358 .write_entries(
5359 i,
5360 0,
5361 0,
5362 ticks_per_slot,
5363 Some(i.saturating_sub(1)),
5364 true,
5365 &Arc::new(Keypair::new()),
5366 new_ticks.clone(),
5367 0,
5368 )
5369 .unwrap() as u64;
5370 shreds_per_slot.push(num_shreds);
5371 ticks.append(&mut new_ticks);
5372 }
5373
5374 for i in 0..num_slots {
5375 let meta = blockstore.meta(i).unwrap().unwrap();
5376 let num_shreds = shreds_per_slot[i as usize];
5377 assert_eq!(meta.consumed, num_shreds);
5378 assert_eq!(meta.received, num_shreds);
5379 assert_eq!(meta.last_index, Some(num_shreds - 1));
5380 if i == num_slots - 1 {
5381 assert!(meta.next_slots.is_empty());
5382 } else {
5383 assert_eq!(meta.next_slots, vec![i + 1]);
5384 }
5385 if i == 0 {
5386 assert_eq!(meta.parent_slot, Some(0));
5387 } else {
5388 assert_eq!(meta.parent_slot, Some(i - 1));
5389 }
5390
5391 assert_eq!(
5392 &ticks[(i * ticks_per_slot) as usize..((i + 1) * ticks_per_slot) as usize],
5393 &blockstore.get_slot_entries(i, 0).unwrap()[..]
5394 );
5395 }
5396
5397 }
5439
5440 #[test]
5441 fn test_put_get_simple() {
5442 let ledger_path = get_tmp_ledger_path_auto_delete!();
5443 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
5444
5445 let meta = SlotMeta::new(0, Some(1));
5447 blockstore.meta_cf.put(0, &meta).unwrap();
5448 let result = blockstore
5449 .meta_cf
5450 .get(0)
5451 .unwrap()
5452 .expect("Expected meta object to exist");
5453
5454 assert_eq!(result, meta);
5455
5456 let erasure = vec![1u8; 16];
5458 let erasure_key = (0, 0);
5459 blockstore
5460 .code_shred_cf
5461 .put_bytes(erasure_key, &erasure)
5462 .unwrap();
5463
5464 let result = blockstore
5465 .code_shred_cf
5466 .get_bytes(erasure_key)
5467 .unwrap()
5468 .expect("Expected erasure object to exist");
5469
5470 assert_eq!(result, erasure);
5471
5472 let data = vec![2u8; 16];
5474 let data_key = (0, 0);
5475 blockstore.data_shred_cf.put_bytes(data_key, &data).unwrap();
5476
5477 let result = blockstore
5478 .data_shred_cf
5479 .get_bytes(data_key)
5480 .unwrap()
5481 .expect("Expected data object to exist");
5482
5483 assert_eq!(result, data);
5484 }
5485
5486 #[test]
5487 fn test_multi_get() {
5488 const TEST_PUT_ENTRY_COUNT: usize = 100;
5489 let ledger_path = get_tmp_ledger_path_auto_delete!();
5490 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
5491
5492 for i in 0..TEST_PUT_ENTRY_COUNT {
5494 let k = u64::try_from(i).unwrap();
5495 let meta = SlotMeta::new(k, Some(k + 1));
5496 blockstore.meta_cf.put(k, &meta).unwrap();
5497 let result = blockstore
5498 .meta_cf
5499 .get(k)
5500 .unwrap()
5501 .expect("Expected meta object to exist");
5502 assert_eq!(result, meta);
5503 }
5504 let keys = blockstore
5505 .meta_cf
5506 .multi_get_keys(0..TEST_PUT_ENTRY_COUNT as Slot);
5507 let values = blockstore.meta_cf.multi_get(&keys);
5508 for (i, value) in values.enumerate().take(TEST_PUT_ENTRY_COUNT) {
5509 let k = u64::try_from(i).unwrap();
5510 assert_eq!(
5511 value.as_ref().unwrap().as_ref().unwrap(),
5512 &SlotMeta::new(k, Some(k + 1))
5513 );
5514 }
5515 }
5516
5517 #[test]
5518 fn test_read_shred_bytes() {
5519 let slot = 0;
5520 let (shreds, _) = make_slot_entries(slot, 0, 100);
5521 let num_shreds = shreds.len() as u64;
5522 let shred_bufs: Vec<_> = shreds.iter().map(Shred::payload).cloned().collect();
5523
5524 let ledger_path = get_tmp_ledger_path_auto_delete!();
5525 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
5526 blockstore.insert_shreds(shreds, None, false).unwrap();
5527
5528 let mut buf = [0; 4096];
5529 let (_, bytes) = blockstore.get_data_shreds(slot, 0, 1, &mut buf).unwrap();
5530 assert_eq!(buf[..bytes], shred_bufs[0][..bytes]);
5531
5532 let (last_index, bytes2) = blockstore.get_data_shreds(slot, 0, 2, &mut buf).unwrap();
5533 assert_eq!(last_index, 1);
5534 assert!(bytes2 > bytes);
5535 {
5536 let shred_data_1 = &buf[..bytes];
5537 assert_eq!(shred_data_1, &shred_bufs[0][..bytes]);
5538
5539 let shred_data_2 = &buf[bytes..bytes2];
5540 assert_eq!(shred_data_2, &shred_bufs[1][..bytes2 - bytes]);
5541 }
5542
5543 let mut buf = vec![0; bytes + 1];
5545 let (last_index, bytes3) = blockstore.get_data_shreds(slot, 0, 2, &mut buf).unwrap();
5546 assert_eq!(last_index, 0);
5547 assert_eq!(bytes3, bytes);
5548
5549 let mut buf = vec![0; bytes2 - 1];
5550 let (last_index, bytes4) = blockstore.get_data_shreds(slot, 0, 2, &mut buf).unwrap();
5551 assert_eq!(last_index, 0);
5552 assert_eq!(bytes4, bytes);
5553
5554 let mut buf = vec![0; bytes * 2];
5555 let (last_index, bytes6) = blockstore
5556 .get_data_shreds(slot, num_shreds - 1, num_shreds, &mut buf)
5557 .unwrap();
5558 assert_eq!(last_index, num_shreds - 1);
5559
5560 {
5561 let shred_data = &buf[..bytes6];
5562 assert_eq!(shred_data, &shred_bufs[(num_shreds - 1) as usize][..bytes6]);
5563 }
5564
5565 let (last_index, bytes6) = blockstore
5567 .get_data_shreds(slot, num_shreds, num_shreds + 2, &mut buf)
5568 .unwrap();
5569 assert_eq!(last_index, 0);
5570 assert_eq!(bytes6, 0);
5571 }
5572
5573 #[test]
5574 fn test_shred_cleanup_check() {
5575 let slot = 1;
5576 let (shreds, _) = make_slot_entries(slot, 0, 100);
5577
5578 let ledger_path = get_tmp_ledger_path_auto_delete!();
5579 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
5580 blockstore.insert_shreds(shreds, None, false).unwrap();
5581
5582 let mut buf = [0; 4096];
5583 assert!(blockstore.get_data_shreds(slot, 0, 1, &mut buf).is_ok());
5584
5585 let max_purge_slot = 1;
5586 blockstore
5587 .run_purge(0, max_purge_slot, PurgeType::Exact)
5588 .unwrap();
5589 *blockstore.lowest_cleanup_slot.write().unwrap() = max_purge_slot;
5590
5591 let mut buf = [0; 4096];
5592 assert!(blockstore.get_data_shreds(slot, 0, 1, &mut buf).is_err());
5593 }
5594
5595 #[test]
5596 fn test_insert_data_shreds_basic() {
5597 let num_entries = max_ticks_per_n_shreds(1, None) + 1;
5599 assert!(num_entries > 1);
5600
5601 let (mut shreds, entries) = make_slot_entries(
5602 0, 0, num_entries,
5605 );
5606 let num_shreds = shreds.len() as u64;
5607
5608 let ledger_path = get_tmp_ledger_path_auto_delete!();
5609 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
5610
5611 assert!(shreds.len() > 1);
5614 let last_shred = shreds.pop().unwrap();
5615 blockstore
5616 .insert_shreds(vec![last_shred], None, false)
5617 .unwrap();
5618 assert!(blockstore.get_slot_entries(0, 0).unwrap().is_empty());
5619
5620 let meta = blockstore
5621 .meta(0)
5622 .unwrap()
5623 .expect("Expected new metadata object to be created");
5624 assert!(meta.consumed == 0 && meta.received == num_shreds);
5625
5626 blockstore.insert_shreds(shreds, None, false).unwrap();
5628 let result = blockstore.get_slot_entries(0, 0).unwrap();
5629
5630 assert_eq!(result, entries);
5631
5632 let meta = blockstore
5633 .meta(0)
5634 .unwrap()
5635 .expect("Expected new metadata object to exist");
5636 assert_eq!(meta.consumed, num_shreds);
5637 assert_eq!(meta.received, num_shreds);
5638 assert_eq!(meta.parent_slot, Some(0));
5639 assert_eq!(meta.last_index, Some(num_shreds - 1));
5640 assert!(meta.next_slots.is_empty());
5641 assert!(meta.is_connected());
5642 }
5643
5644 #[test]
5645 fn test_insert_data_shreds_reverse() {
5646 let num_shreds = 10;
5647 let num_entries = max_ticks_per_n_shreds(num_shreds, None);
5648 let (mut shreds, entries) = make_slot_entries(
5649 0, 0, num_entries,
5652 );
5653 let num_shreds = shreds.len() as u64;
5654
5655 let ledger_path = get_tmp_ledger_path_auto_delete!();
5656 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
5657
5658 for i in (0..num_shreds).rev() {
5660 let shred = shreds.pop().unwrap();
5661 blockstore.insert_shreds(vec![shred], None, false).unwrap();
5662 let result = blockstore.get_slot_entries(0, 0).unwrap();
5663
5664 let meta = blockstore
5665 .meta(0)
5666 .unwrap()
5667 .expect("Expected metadata object to exist");
5668 assert_eq!(meta.last_index, Some(num_shreds - 1));
5669 if i != 0 {
5670 assert_eq!(result.len(), 0);
5671 assert!(meta.consumed == 0 && meta.received == num_shreds);
5672 } else {
5673 assert_eq!(meta.parent_slot, Some(0));
5674 assert_eq!(result, entries);
5675 assert!(meta.consumed == num_shreds && meta.received == num_shreds);
5676 }
5677 }
5678 }
5679
5680 #[test]
5681 fn test_insert_slots() {
5682 test_insert_data_shreds_slots(false);
5683 test_insert_data_shreds_slots(true);
5684 }
5685
5686 #[test]
5687 fn test_index_fallback_deserialize() {
5688 let ledger_path = get_tmp_ledger_path_auto_delete!();
5689 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
5690 let mut rng = rand::thread_rng();
5691 let slot = rng.gen_range(0..100);
5692 let bincode = bincode::DefaultOptions::new()
5693 .reject_trailing_bytes()
5694 .with_fixint_encoding();
5695
5696 let data = 0..rng.gen_range(100..MAX_DATA_SHREDS_PER_SLOT as u64);
5697 let coding = 0..rng.gen_range(100..MAX_DATA_SHREDS_PER_SLOT as u64);
5698 let mut fallback = IndexFallback::new(slot);
5699 for (d, c) in data.clone().zip(coding.clone()) {
5700 fallback.data_mut().insert(d);
5701 fallback.coding_mut().insert(c);
5702 }
5703
5704 blockstore
5705 .index_cf
5706 .put_bytes(slot, &bincode.serialize(&fallback).unwrap())
5707 .unwrap();
5708
5709 let current = blockstore.index_cf.get(slot).unwrap().unwrap();
5710 for (d, c) in data.zip(coding) {
5711 assert!(current.data().contains(d));
5712 assert!(current.coding().contains(c));
5713 }
5714 }
5715
5716 #[test]
5717 fn test_get_slot_entries1() {
5718 let ledger_path = get_tmp_ledger_path_auto_delete!();
5719 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
5720 let entries = create_ticks(8, 0, Hash::default());
5721 let shreds = entries_to_test_shreds(&entries[0..4], 1, 0, false, 0);
5722 blockstore
5723 .insert_shreds(shreds, None, false)
5724 .expect("Expected successful write of shreds");
5725
5726 assert_eq!(
5727 blockstore.get_slot_entries(1, 0).unwrap()[2..4],
5728 entries[2..4],
5729 );
5730 }
5731
5732 #[test]
5733 fn test_get_slot_entries3() {
5734 let ledger_path = get_tmp_ledger_path_auto_delete!();
5736
5737 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
5738 let num_slots = 5_u64;
5739 let shreds_per_slot = 5_u64;
5740 let entry_serialized_size =
5741 wincode::serialized_size(&create_ticks(1, 0, Hash::default())).unwrap();
5742 let entries_per_slot = (shreds_per_slot * PACKET_DATA_SIZE as u64) / entry_serialized_size;
5743
5744 for slot in 0..num_slots {
5746 let entries = create_ticks(entries_per_slot, 0, Hash::default());
5747 let shreds = entries_to_test_shreds(&entries, slot, slot.saturating_sub(1), false, 0);
5748 assert!(shreds.len() as u64 >= shreds_per_slot);
5749 blockstore
5750 .insert_shreds(shreds, None, false)
5751 .expect("Expected successful write of shreds");
5752 assert_eq!(blockstore.get_slot_entries(slot, 0).unwrap(), entries);
5753 }
5754 }
5755
5756 #[test]
5757 fn test_insert_data_shreds_consecutive() {
5758 let ledger_path = get_tmp_ledger_path_auto_delete!();
5759 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
5760 let min_entries = max_ticks_per_n_shreds(1, None) + 1;
5762 for i in 0..4 {
5763 let slot = i;
5764 let parent_slot = if i == 0 { 0 } else { i - 1 };
5765 let num_entries = min_entries * (i + 1);
5767 let (shreds, original_entries) = make_slot_entries(slot, parent_slot, num_entries);
5768
5769 let num_shreds = shreds.len() as u64;
5770 assert!(num_shreds > 1);
5771 let mut even_shreds = vec![];
5772 let mut odd_shreds = vec![];
5773
5774 for (i, shred) in shreds.into_iter().enumerate() {
5775 if i % 2 == 0 {
5776 even_shreds.push(shred);
5777 } else {
5778 odd_shreds.push(shred);
5779 }
5780 }
5781
5782 blockstore.insert_shreds(odd_shreds, None, false).unwrap();
5783
5784 assert_eq!(blockstore.get_slot_entries(slot, 0).unwrap(), vec![]);
5785
5786 let meta = blockstore.meta(slot).unwrap().unwrap();
5787 if num_shreds % 2 == 0 {
5788 assert_eq!(meta.received, num_shreds);
5789 } else {
5790 trace!("got here");
5791 assert_eq!(meta.received, num_shreds - 1);
5792 }
5793 assert_eq!(meta.consumed, 0);
5794 if num_shreds % 2 == 0 {
5795 assert_eq!(meta.last_index, Some(num_shreds - 1));
5796 } else {
5797 assert_eq!(meta.last_index, None);
5798 }
5799
5800 blockstore.insert_shreds(even_shreds, None, false).unwrap();
5801
5802 assert_eq!(
5803 blockstore.get_slot_entries(slot, 0).unwrap(),
5804 original_entries,
5805 );
5806
5807 let meta = blockstore.meta(slot).unwrap().unwrap();
5808 assert_eq!(meta.received, num_shreds);
5809 assert_eq!(meta.consumed, num_shreds);
5810 assert_eq!(meta.parent_slot, Some(parent_slot));
5811 assert_eq!(meta.last_index, Some(num_shreds - 1));
5812 }
5813 }
5814
5815 #[test]
5816 fn test_data_set_completed_on_insert() {
5817 let ledger_path = get_tmp_ledger_path_auto_delete!();
5818 let BlockstoreSignals { blockstore, .. } =
5819 Blockstore::open_with_signal(ledger_path.path(), BlockstoreOptions::default()).unwrap();
5820
5821 let slot = 0;
5823 let num_entries = max_ticks_per_n_shreds(1, None) + 1;
5824 let entries = create_ticks(num_entries, slot, Hash::default());
5825 let shreds = entries_to_test_shreds(&entries, slot, 0, true, 0);
5826 let num_shreds = shreds.len();
5827 assert!(num_shreds > 1);
5828 assert!(blockstore
5829 .insert_shreds(shreds[1..].to_vec(), None, false)
5830 .unwrap()
5831 .is_empty());
5832 assert_eq!(
5833 blockstore
5834 .insert_shreds(vec![shreds[0].clone()], None, false)
5835 .unwrap(),
5836 vec![CompletedDataSetInfo {
5837 slot,
5838 indices: 0..num_shreds as u32,
5839 }]
5840 );
5841 assert!(blockstore
5843 .insert_shreds(shreds, None, false)
5844 .unwrap()
5845 .is_empty());
5846 }
5847
5848 #[test]
5849 fn test_new_shreds_signal() {
5850 let ledger_path = get_tmp_ledger_path_auto_delete!();
5852 let BlockstoreSignals {
5853 blockstore,
5854 ledger_signal_receiver: recvr,
5855 ..
5856 } = Blockstore::open_with_signal(ledger_path.path(), BlockstoreOptions::default()).unwrap();
5857
5858 let entries_per_slot = 50;
5859 let (mut shreds, _) = make_slot_entries(
5861 0, 0, entries_per_slot,
5864 );
5865 let shreds_per_slot = shreds.len() as u64;
5866
5867 blockstore
5870 .insert_shreds(vec![shreds.remove(1)], None, false)
5871 .unwrap();
5872 let timer = Duration::from_secs(1);
5873 assert!(recvr.recv_timeout(timer).is_err());
5874 blockstore
5876 .insert_shreds(vec![shreds.remove(0)], None, false)
5877 .unwrap();
5878 assert!(recvr.recv_timeout(timer).is_ok());
5880 assert!(recvr.try_recv().is_err());
5881 blockstore.insert_shreds(shreds, None, false).unwrap();
5883 assert!(recvr.recv_timeout(timer).is_ok());
5885 assert!(recvr.try_recv().is_err());
5886
5887 let num_slots = shreds_per_slot;
5891 let mut shreds = vec![];
5892 let mut missing_shreds = vec![];
5893 for slot in 1..num_slots + 1 {
5894 let (mut slot_shreds, _) = make_slot_entries(
5895 slot,
5896 slot - 1, entries_per_slot,
5898 );
5899 let missing_shred = slot_shreds.remove(slot as usize - 1);
5900 shreds.extend(slot_shreds);
5901 missing_shreds.push(missing_shred);
5902 }
5903
5904 blockstore.insert_shreds(shreds, None, false).unwrap();
5906 assert!(recvr.recv_timeout(timer).is_err());
5907
5908 let missing_shreds2 = missing_shreds
5911 .drain((num_slots / 2) as usize..)
5912 .collect_vec();
5913 blockstore
5914 .insert_shreds(missing_shreds, None, false)
5915 .unwrap();
5916 assert!(recvr.recv_timeout(timer).is_ok());
5917 assert!(recvr.try_recv().is_err());
5918
5919 blockstore
5922 .insert_shreds(missing_shreds2, None, false)
5923 .unwrap();
5924
5925 assert!(recvr.recv_timeout(timer).is_ok());
5926 assert!(recvr.try_recv().is_err());
5927 }
5928
5929 #[test]
5930 fn test_completed_shreds_signal() {
5931 let ledger_path = get_tmp_ledger_path_auto_delete!();
5933 let BlockstoreSignals {
5934 blockstore,
5935 completed_slots_receiver: recvr,
5936 ..
5937 } = Blockstore::open_with_signal(ledger_path.path(), BlockstoreOptions::default()).unwrap();
5938
5939 let entries_per_slot = 10;
5940
5941 let (mut shreds, _) = make_slot_entries(0, 0, entries_per_slot);
5943
5944 let shred0 = shreds.remove(0);
5945 blockstore.insert_shreds(shreds, None, false).unwrap();
5947 assert!(recvr.try_recv().is_err());
5948
5949 blockstore.insert_shreds(vec![shred0], None, false).unwrap();
5951 assert_eq!(recvr.try_recv().unwrap(), vec![0]);
5952 }
5953
5954 #[test]
5955 fn test_completed_shreds_signal_orphans() {
5956 let ledger_path = get_tmp_ledger_path_auto_delete!();
5958 let BlockstoreSignals {
5959 blockstore,
5960 completed_slots_receiver: recvr,
5961 ..
5962 } = Blockstore::open_with_signal(ledger_path.path(), BlockstoreOptions::default()).unwrap();
5963
5964 let entries_per_slot = 10;
5965 let slots = [2, 5, 10];
5966 let mut all_shreds = make_chaining_slot_entries(&slots[..], entries_per_slot, 0);
5967
5968 let (mut orphan_child, _) = all_shreds.remove(2);
5970
5971 let (mut orphan_shreds, _) = all_shreds.remove(1);
5973
5974 let orphan_child0 = orphan_child.remove(0);
5976 blockstore.insert_shreds(orphan_child, None, false).unwrap();
5977 assert!(recvr.try_recv().is_err());
5978
5979 blockstore
5981 .insert_shreds(vec![orphan_child0], None, false)
5982 .unwrap();
5983 assert_eq!(recvr.try_recv().unwrap(), vec![slots[2]]);
5984
5985 let orphan_shred0 = orphan_shreds.remove(0);
5987 blockstore
5988 .insert_shreds(orphan_shreds, None, false)
5989 .unwrap();
5990 assert!(recvr.try_recv().is_err());
5991
5992 blockstore
5994 .insert_shreds(vec![orphan_shred0], None, false)
5995 .unwrap();
5996 assert_eq!(recvr.try_recv().unwrap(), vec![slots[1]]);
5997 }
5998
5999 #[test]
6000 fn test_completed_shreds_signal_many() {
6001 let ledger_path = get_tmp_ledger_path_auto_delete!();
6003 let BlockstoreSignals {
6004 blockstore,
6005 completed_slots_receiver: recvr,
6006 ..
6007 } = Blockstore::open_with_signal(ledger_path.path(), BlockstoreOptions::default()).unwrap();
6008
6009 let entries_per_slot = 10;
6010 let mut slots = vec![2, 5, 10];
6011 let mut all_shreds = make_chaining_slot_entries(&slots[..], entries_per_slot, 0);
6012 let disconnected_slot = 4;
6013
6014 let (shreds0, _) = all_shreds.remove(0);
6015 let (shreds1, _) = all_shreds.remove(0);
6016 let (shreds2, _) = all_shreds.remove(0);
6017 let (shreds3, _) = make_slot_entries(
6018 disconnected_slot,
6019 1, entries_per_slot,
6021 );
6022
6023 let mut all_shreds: Vec<_> = vec![shreds0, shreds1, shreds2, shreds3]
6024 .into_iter()
6025 .flatten()
6026 .collect();
6027
6028 all_shreds.shuffle(&mut thread_rng());
6029 blockstore.insert_shreds(all_shreds, None, false).unwrap();
6030 let mut result = recvr.try_recv().unwrap();
6031 result.sort_unstable();
6032 slots.push(disconnected_slot);
6033 slots.sort_unstable();
6034 assert_eq!(result, slots);
6035 }
6036
6037 #[test]
6038 fn test_handle_chaining_basic() {
6039 let ledger_path = get_tmp_ledger_path_auto_delete!();
6040 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6041
6042 let entries_per_slot = 5;
6043 let num_slots = 3;
6044
6045 let (mut shreds, _) = make_many_slot_entries(0, num_slots, entries_per_slot);
6047 let shreds_per_slot = shreds.len() / num_slots as usize;
6048
6049 let shreds1 = shreds
6051 .drain(shreds_per_slot..2 * shreds_per_slot)
6052 .collect_vec();
6053 blockstore.insert_shreds(shreds1, None, false).unwrap();
6054 let meta1 = blockstore.meta(1).unwrap().unwrap();
6055 assert!(meta1.next_slots.is_empty());
6056 assert!(!meta1.is_connected());
6058 assert_eq!(meta1.parent_slot, Some(0));
6059 assert_eq!(meta1.last_index, Some(shreds_per_slot as u64 - 1));
6060
6061 let shreds2 = shreds
6063 .drain(shreds_per_slot..2 * shreds_per_slot)
6064 .collect_vec();
6065 blockstore.insert_shreds(shreds2, None, false).unwrap();
6066 let meta2 = blockstore.meta(2).unwrap().unwrap();
6067 assert!(meta2.next_slots.is_empty());
6068 assert!(!meta2.is_connected());
6070 assert_eq!(meta2.parent_slot, Some(1));
6071 assert_eq!(meta2.last_index, Some(shreds_per_slot as u64 - 1));
6072
6073 let meta1 = blockstore.meta(1).unwrap().unwrap();
6076 assert_eq!(meta1.next_slots, vec![2]);
6077 assert!(!meta1.is_connected());
6078 assert_eq!(meta1.parent_slot, Some(0));
6079 assert_eq!(meta1.last_index, Some(shreds_per_slot as u64 - 1));
6080
6081 blockstore.insert_shreds(shreds, None, false).unwrap();
6084 for slot in 0..3 {
6085 let meta = blockstore.meta(slot).unwrap().unwrap();
6086 if slot != 2 {
6088 assert_eq!(meta.next_slots, vec![slot + 1]);
6089 }
6090 if slot == 0 {
6091 assert_eq!(meta.parent_slot, Some(0));
6092 } else {
6093 assert_eq!(meta.parent_slot, Some(slot - 1));
6094 }
6095 assert_eq!(meta.last_index, Some(shreds_per_slot as u64 - 1));
6096 assert!(meta.is_connected());
6097 }
6098 }
6099
6100 #[test]
6101 fn test_handle_chaining_missing_slots() {
6102 agave_logger::setup();
6103 let ledger_path = get_tmp_ledger_path_auto_delete!();
6104 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6105
6106 let num_slots = 30;
6107 let entries_per_slot = 5;
6108 let (shreds, _) = make_many_slot_entries(0, num_slots, entries_per_slot);
6110 let shreds_per_slot = shreds.len() as u64 / num_slots;
6111 let (even_slots, odd_slots): (Vec<_>, Vec<_>) =
6112 shreds.into_iter().partition(|shred| shred.slot() % 2 == 0);
6113
6114 blockstore.insert_shreds(odd_slots, None, false).unwrap();
6116
6117 for slot in 0..num_slots {
6118 let meta = blockstore.meta(slot).unwrap().unwrap();
6126 if slot % 2 == 0 {
6127 assert_eq!(meta.next_slots, vec![slot + 1]);
6128 assert_eq!(meta.parent_slot, None);
6129 } else {
6130 assert!(meta.next_slots.is_empty());
6131 assert_eq!(meta.parent_slot, Some(slot - 1));
6132 }
6133
6134 assert!(!meta.is_connected());
6137 assert!(!meta.is_parent_connected() || slot == 0);
6138 }
6139
6140 blockstore.insert_shreds(even_slots, None, false).unwrap();
6142
6143 for slot in 0..num_slots {
6144 let meta = blockstore.meta(slot).unwrap().unwrap();
6145 if slot != num_slots - 1 {
6147 assert_eq!(meta.next_slots, vec![slot + 1]);
6148 } else {
6149 assert!(meta.next_slots.is_empty());
6150 }
6151 if slot == 0 {
6153 assert_eq!(meta.parent_slot, Some(0));
6154 } else {
6155 assert_eq!(meta.parent_slot, Some(slot - 1));
6156 }
6157 assert_eq!(meta.last_index, Some(shreds_per_slot - 1));
6159 assert!(meta.is_full());
6160 assert!(meta.is_connected());
6161 }
6162 }
6163
6164 #[test]
6165 #[allow(clippy::cognitive_complexity)]
6166 pub fn test_forward_chaining_is_connected() {
6167 agave_logger::setup();
6168 let ledger_path = get_tmp_ledger_path_auto_delete!();
6169 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6170
6171 let num_slots = 15;
6172 let entries_per_slot = max_ticks_per_n_shreds(1, None) + 1;
6174 assert!(entries_per_slot > 1);
6175
6176 let (mut shreds, _) = make_many_slot_entries(0, num_slots, entries_per_slot);
6177 let shreds_per_slot = shreds.len() / num_slots as usize;
6178 assert!(shreds_per_slot > 1);
6179
6180 let mut missing_shreds = vec![];
6182 for slot in 0..num_slots {
6183 let mut shreds_for_slot = shreds.drain(..shreds_per_slot).collect_vec();
6184 if slot % 3 == 0 {
6185 let shred0 = shreds_for_slot.remove(0);
6186 missing_shreds.push(shred0);
6187 }
6188 blockstore
6189 .insert_shreds(shreds_for_slot, None, false)
6190 .unwrap();
6191 }
6192
6193 for slot in 0..num_slots {
6195 let meta = blockstore.meta(slot).unwrap().unwrap();
6196 if slot != num_slots - 1 {
6198 assert_eq!(meta.next_slots, vec![slot + 1]);
6199 } else {
6200 assert!(meta.next_slots.is_empty());
6201 }
6202
6203 if slot == 0 {
6205 assert_eq!(meta.parent_slot, Some(0));
6206 } else {
6207 assert_eq!(meta.parent_slot, Some(slot - 1));
6208 }
6209 assert!(!meta.is_connected());
6212
6213 assert_eq!(meta.last_index, Some(shreds_per_slot as u64 - 1));
6214 }
6215
6216 for slot_index in 0..num_slots {
6219 if slot_index % 3 == 0 {
6220 let shred = missing_shreds.remove(0);
6221 blockstore.insert_shreds(vec![shred], None, false).unwrap();
6222
6223 for slot in 0..num_slots {
6224 let meta = blockstore.meta(slot).unwrap().unwrap();
6225
6226 if slot != num_slots - 1 {
6227 assert_eq!(meta.next_slots, vec![slot + 1]);
6228 } else {
6229 assert!(meta.next_slots.is_empty());
6230 }
6231
6232 if slot < slot_index + 3 {
6233 assert!(meta.is_full());
6234 assert!(meta.is_connected());
6235 } else {
6236 assert!(!meta.is_connected());
6237 }
6238
6239 assert_eq!(meta.last_index, Some(shreds_per_slot as u64 - 1));
6240 }
6241 }
6242 }
6243 }
6244
6245 #[test]
6246 fn test_scan_and_fix_roots() {
6247 fn blockstore_roots(blockstore: &Blockstore) -> Vec<Slot> {
6248 blockstore
6249 .rooted_slot_iterator(0)
6250 .unwrap()
6251 .collect::<Vec<_>>()
6252 }
6253
6254 agave_logger::setup();
6255 let ledger_path = get_tmp_ledger_path_auto_delete!();
6256 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6257
6258 let entries_per_slot = max_ticks_per_n_shreds(5, None);
6259 let start_slot: Slot = 0;
6260 let num_slots = 18;
6261
6262 let shreds: Vec<_> = (start_slot..=num_slots)
6267 .flat_map(|slot| {
6268 let parent_slot = if slot % 2 == 0 {
6269 slot.saturating_sub(2)
6270 } else {
6271 slot.saturating_sub(1)
6272 };
6273 let (shreds, _) = make_slot_entries(slot, parent_slot, entries_per_slot);
6274 shreds.into_iter()
6275 })
6276 .collect();
6277 blockstore.insert_shreds(shreds, None, false).unwrap();
6278
6279 let (start, end) = (Some(16), None);
6281 assert_matches!(
6282 blockstore.scan_and_fix_roots(start, end, &AtomicBool::new(false)),
6283 Err(BlockstoreError::SlotNotRooted)
6284 );
6285
6286 let new_roots = vec![6, 12];
6288 blockstore.set_roots(new_roots.iter()).unwrap();
6289 assert_eq!(&new_roots, &blockstore_roots(&blockstore));
6290
6291 let (start, end) = (Some(12), Some(8));
6293 let roots = vec![6, 8, 10, 12];
6294 blockstore
6295 .scan_and_fix_roots(start, end, &AtomicBool::new(false))
6296 .unwrap();
6297 assert_eq!(&roots, &blockstore_roots(&blockstore));
6298
6299 let (start, end) = (None, Some(4));
6301 let roots = vec![4, 6, 8, 10, 12];
6302 blockstore
6303 .scan_and_fix_roots(start, end, &AtomicBool::new(false))
6304 .unwrap();
6305 assert_eq!(&roots, &blockstore_roots(&blockstore));
6306
6307 let (start, end) = (Some(12), None);
6309 let roots = vec![0, 2, 4, 6, 8, 10, 12];
6310 blockstore
6311 .scan_and_fix_roots(start, end, &AtomicBool::new(false))
6312 .unwrap();
6313 assert_eq!(&roots, &blockstore_roots(&blockstore));
6314
6315 let new_roots = [16];
6317 let roots = vec![0, 2, 4, 6, 8, 10, 12, 16];
6318 blockstore.set_roots(new_roots.iter()).unwrap();
6319 assert_eq!(&roots, &blockstore_roots(&blockstore));
6320
6321 let (start, end) = (None, None);
6323 let roots = vec![0, 2, 4, 6, 8, 10, 12, 14, 16];
6324 blockstore
6325 .scan_and_fix_roots(start, end, &AtomicBool::new(false))
6326 .unwrap();
6327 assert_eq!(&roots, &blockstore_roots(&blockstore));
6328
6329 blockstore
6331 .scan_and_fix_roots(start, end, &AtomicBool::new(false))
6332 .unwrap();
6333 assert_eq!(&roots, &blockstore_roots(&blockstore));
6334 }
6335
6336 #[test]
6337 fn test_set_and_chain_connected_on_root_and_next_slots() {
6338 agave_logger::setup();
6339 let ledger_path = get_tmp_ledger_path_auto_delete!();
6340 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6341
6342 let entries_per_slot = max_ticks_per_n_shreds(5, None);
6344
6345 let mut start_slot = 5;
6346 blockstore
6351 .set_and_chain_connected_on_root_and_next_slots(start_slot)
6352 .unwrap();
6353 let slot_meta5 = blockstore.meta(start_slot).unwrap().unwrap();
6354 assert!(!slot_meta5.is_full());
6355 assert!(slot_meta5.is_parent_connected());
6356 assert!(slot_meta5.is_connected());
6357
6358 let num_slots = 5;
6359 start_slot += 1;
6361 let (shreds, _) = make_many_slot_entries(start_slot, num_slots, entries_per_slot);
6362 blockstore.insert_shreds(shreds, None, false).unwrap();
6363 for slot in start_slot..start_slot + num_slots {
6364 info!("Evaluating slot {slot}");
6365 let meta = blockstore.meta(slot).unwrap().unwrap();
6366 assert!(meta.is_parent_connected());
6367 assert!(meta.is_connected());
6368 }
6369
6370 blockstore
6372 .set_and_chain_connected_on_root_and_next_slots(start_slot)
6373 .unwrap();
6374 for slot in start_slot..start_slot + num_slots {
6375 let meta = blockstore.meta(slot).unwrap().unwrap();
6376 assert!(meta.is_parent_connected());
6377 assert!(meta.is_connected());
6378 }
6379
6380 start_slot += 2 * num_slots;
6384 let (shreds, _) = make_many_slot_entries(start_slot, num_slots, entries_per_slot);
6385 let non_full_slot = start_slot + num_slots / 2;
6387 let (shreds, missing_shreds): (Vec<_>, Vec<_>) = shreds
6388 .into_iter()
6389 .partition(|shred| shred.slot() != non_full_slot || shred.index() == 0);
6390 blockstore.insert_shreds(shreds, None, false).unwrap();
6391 for slot in start_slot..start_slot + num_slots {
6393 let meta = blockstore.meta(slot).unwrap().unwrap();
6394 assert!(!meta.is_parent_connected());
6395 assert!(!meta.is_connected());
6396 }
6397 blockstore
6399 .set_and_chain_connected_on_root_and_next_slots(start_slot)
6400 .unwrap();
6401 for slot in start_slot..start_slot + num_slots {
6402 let meta = blockstore.meta(slot).unwrap().unwrap();
6403 match slot.cmp(&non_full_slot) {
6404 Ordering::Less => {
6405 assert!(meta.is_parent_connected());
6407 assert!(meta.is_connected());
6408 }
6409 Ordering::Equal => {
6410 assert!(meta.is_parent_connected());
6412 assert!(!meta.is_connected());
6413 }
6414 Ordering::Greater => {
6415 assert!(!meta.is_parent_connected());
6417 assert!(!meta.is_connected());
6418 }
6419 }
6420 }
6421
6422 blockstore
6424 .insert_shreds(missing_shreds, None, false)
6425 .unwrap();
6426 for slot in start_slot..start_slot + num_slots {
6427 let meta = blockstore.meta(slot).unwrap().unwrap();
6428 assert!(meta.is_parent_connected());
6429 assert!(meta.is_connected());
6430 }
6431 }
6432
6433 #[test]
6434 fn test_slot_range_connected_chain() {
6435 let ledger_path = get_tmp_ledger_path_auto_delete!();
6436 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6437
6438 let num_slots = 3;
6439 for slot in 1..=num_slots {
6440 make_and_insert_slot(&blockstore, slot, slot.saturating_sub(1));
6441 }
6442
6443 assert!(blockstore.slot_range_connected(1, 3));
6444 assert!(!blockstore.slot_range_connected(1, 4)); }
6446
6447 #[test]
6448 fn test_slot_range_connected_disconnected() {
6449 let ledger_path = get_tmp_ledger_path_auto_delete!();
6450 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6451
6452 make_and_insert_slot(&blockstore, 1, 0);
6453 make_and_insert_slot(&blockstore, 2, 1);
6454 make_and_insert_slot(&blockstore, 4, 2);
6455
6456 assert!(blockstore.slot_range_connected(1, 3)); assert!(blockstore.slot_range_connected(1, 4));
6458 }
6459
6460 #[test]
6461 fn test_slot_range_connected_same_slot() {
6462 let ledger_path = get_tmp_ledger_path_auto_delete!();
6463 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6464
6465 assert!(blockstore.slot_range_connected(54, 54));
6466 }
6467
6468 #[test]
6469 fn test_slot_range_connected_starting_slot_not_full() {
6470 let ledger_path = get_tmp_ledger_path_auto_delete!();
6471 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6472
6473 make_and_insert_slot(&blockstore, 5, 4);
6474 make_and_insert_slot(&blockstore, 6, 5);
6475
6476 assert!(!blockstore.meta(4).unwrap().unwrap().is_full());
6477 assert!(blockstore.slot_range_connected(4, 6));
6478 }
6479
6480 #[test]
6481 fn test_get_slots_since() {
6482 let ledger_path = get_tmp_ledger_path_auto_delete!();
6483 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6484
6485 assert!(blockstore.get_slots_since(&[0]).unwrap().is_empty());
6487
6488 let mut meta0 = SlotMeta::new(0, Some(0));
6489 blockstore.meta_cf.put(0, &meta0).unwrap();
6490
6491 let expected: HashMap<u64, Vec<u64>> = vec![(0, vec![])].into_iter().collect();
6493 assert_eq!(blockstore.get_slots_since(&[0]).unwrap(), expected);
6494 meta0.next_slots = vec![1, 2];
6495 blockstore.meta_cf.put(0, &meta0).unwrap();
6496
6497 let expected: HashMap<u64, Vec<u64>> = vec![(0, vec![1, 2])].into_iter().collect();
6499 assert_eq!(blockstore.get_slots_since(&[0]).unwrap(), expected);
6500 assert_eq!(blockstore.get_slots_since(&[0, 1]).unwrap(), expected);
6501
6502 let mut meta3 = SlotMeta::new(3, Some(1));
6503 meta3.next_slots = vec![10, 5];
6504 blockstore.meta_cf.put(3, &meta3).unwrap();
6505 let expected: HashMap<u64, Vec<u64>> = vec![(0, vec![1, 2]), (3, vec![10, 5])]
6506 .into_iter()
6507 .collect();
6508 assert_eq!(blockstore.get_slots_since(&[0, 1, 3]).unwrap(), expected);
6509 }
6510
6511 #[test]
6512 fn test_orphans() {
6513 let ledger_path = get_tmp_ledger_path_auto_delete!();
6514 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6515
6516 let entries_per_slot = 1;
6518 let (mut shreds, _) = make_many_slot_entries(0, 3, entries_per_slot);
6519 let shreds_per_slot = shreds.len() / 3;
6520
6521 let shreds_for_slot = shreds.drain((shreds_per_slot * 2)..).collect_vec();
6524 blockstore
6525 .insert_shreds(shreds_for_slot, None, false)
6526 .unwrap();
6527 let meta = blockstore
6528 .meta(1)
6529 .expect("Expect database get to succeed")
6530 .unwrap();
6531 assert!(meta.is_orphan());
6532 assert_eq!(
6533 blockstore.orphans_iterator(0).unwrap().collect::<Vec<_>>(),
6534 vec![1]
6535 );
6536
6537 let shreds_for_slot = shreds.drain(shreds_per_slot..).collect_vec();
6540 blockstore
6541 .insert_shreds(shreds_for_slot, None, false)
6542 .unwrap();
6543 let meta = blockstore
6544 .meta(1)
6545 .expect("Expect database get to succeed")
6546 .unwrap();
6547 assert!(!meta.is_orphan());
6548 let meta = blockstore
6549 .meta(0)
6550 .expect("Expect database get to succeed")
6551 .unwrap();
6552 assert!(meta.is_orphan());
6553 assert_eq!(
6554 blockstore.orphans_iterator(0).unwrap().collect::<Vec<_>>(),
6555 vec![0]
6556 );
6557
6558 let (shred4, _) = make_slot_entries(4, 0, 1);
6561 let (shred5, _) = make_slot_entries(5, 1, 1);
6562 blockstore.insert_shreds(shred4, None, false).unwrap();
6563 blockstore.insert_shreds(shred5, None, false).unwrap();
6564 assert_eq!(
6565 blockstore.orphans_iterator(0).unwrap().collect::<Vec<_>>(),
6566 vec![0]
6567 );
6568
6569 blockstore.insert_shreds(shreds, None, false).unwrap();
6571 for i in 0..3 {
6572 let meta = blockstore
6573 .meta(i)
6574 .expect("Expect database get to succeed")
6575 .unwrap();
6576 assert!(!meta.is_orphan());
6577 }
6578 assert!(blockstore.orphans_cf.is_empty().unwrap());
6580 }
6581
6582 fn test_insert_data_shreds_slots(should_bulk_write: bool) {
6583 let ledger_path = get_tmp_ledger_path_auto_delete!();
6584 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6585
6586 let num_slots = 20_u64;
6588 let mut entries = vec![];
6589 let mut shreds = vec![];
6590 for slot in 0..num_slots {
6591 let parent_slot = slot.saturating_sub(1);
6592 let (slot_shreds, entry) = make_slot_entries(slot, parent_slot, 1);
6593 shreds.extend(slot_shreds);
6594 entries.extend(entry);
6595 }
6596
6597 let num_shreds = shreds.len();
6598 if should_bulk_write {
6600 blockstore.insert_shreds(shreds, None, false).unwrap();
6601 } else {
6602 for _ in 0..num_shreds {
6603 let shred = shreds.remove(0);
6604 blockstore.insert_shreds(vec![shred], None, false).unwrap();
6605 }
6606 }
6607
6608 for i in 0..num_slots - 1 {
6609 assert_eq!(
6610 blockstore.get_slot_entries(i, 0).unwrap()[0],
6611 entries[i as usize]
6612 );
6613
6614 let meta = blockstore.meta(i).unwrap().unwrap();
6615 assert_eq!(meta.received, DATA_SHREDS_PER_FEC_BLOCK as u64);
6616 assert_eq!(meta.last_index, Some(DATA_SHREDS_PER_FEC_BLOCK as u64 - 1));
6617 assert_eq!(meta.parent_slot, Some(i.saturating_sub(1)));
6618 assert_eq!(meta.consumed, DATA_SHREDS_PER_FEC_BLOCK as u64);
6619 }
6620 }
6621
6622 #[test]
6623 fn test_find_missing_data_indexes() {
6624 let slot = 0;
6625 let ledger_path = get_tmp_ledger_path_auto_delete!();
6626 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6627
6628 let gap: u64 = 10;
6630 assert!(gap > 3);
6631 let entries = create_ticks(1, 0, Hash::default());
6633 let mut shreds = entries_to_test_shreds(&entries, slot, 0, true, 0);
6634 shreds.retain(|s| (s.index() % gap as u32) == 0);
6635 let num_shreds = 2;
6636 shreds.truncate(num_shreds);
6637 blockstore.insert_shreds(shreds, None, false).unwrap();
6638
6639 let expected: Vec<u64> = (1..gap).collect();
6644 assert_eq!(
6645 blockstore.find_missing_data_indexes(
6646 slot,
6647 0, 0, 0, gap, gap as usize, ),
6653 expected
6654 );
6655 assert_eq!(
6656 blockstore.find_missing_data_indexes(
6657 slot,
6658 0, 0, 1, gap, (gap - 1) as usize, ),
6664 expected,
6665 );
6666 assert_eq!(
6667 blockstore.find_missing_data_indexes(
6668 slot,
6669 0, 0, 0, gap - 1, (gap - 1) as usize, ),
6675 &expected[..expected.len() - 1],
6676 );
6677 assert_eq!(
6678 blockstore.find_missing_data_indexes(
6679 slot,
6680 0, 0, gap - 2, gap, gap as usize, ),
6686 vec![gap - 2, gap - 1],
6687 );
6688 assert_eq!(
6689 blockstore.find_missing_data_indexes(
6690 slot, 0, 0, gap - 2, gap, 1, ),
6697 vec![gap - 2],
6698 );
6699 assert_eq!(
6700 blockstore.find_missing_data_indexes(
6701 slot, 0, 0, 0, gap, 1, ),
6708 vec![1],
6709 );
6710
6711 let mut expected: Vec<u64> = (1..gap).collect();
6714 expected.push(gap + 1);
6715 assert_eq!(
6716 blockstore.find_missing_data_indexes(
6717 slot,
6718 0, 0, 0, gap + 2, (gap + 2) as usize, ),
6724 expected,
6725 );
6726 assert_eq!(
6727 blockstore.find_missing_data_indexes(
6728 slot,
6729 0, 0, 0, gap + 2, (gap - 1) as usize, ),
6735 &expected[..expected.len() - 1],
6736 );
6737
6738 for i in 0..num_shreds as u64 {
6739 for j in 0..i {
6740 let expected: Vec<u64> = (j..i)
6741 .flat_map(|k| {
6742 let begin = k * gap + 1;
6743 let end = (k + 1) * gap;
6744 begin..end
6745 })
6746 .collect();
6747 assert_eq!(
6748 blockstore.find_missing_data_indexes(
6749 slot,
6750 0, 0, j * gap, i * gap, ((i - j) * gap) as usize, ),
6756 expected,
6757 );
6758 }
6759 }
6760 }
6761
6762 #[test]
6763 fn test_find_missing_data_indexes_timeout() {
6764 let slot = 1;
6765 let ledger_path = get_tmp_ledger_path_auto_delete!();
6766 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6767
6768 let defer_threshold_ticks = DEFAULT_TICKS_PER_SLOT / 16;
6771 let start_index = 0;
6772 let end_index = 50;
6773 let max_missing = 9;
6774
6775 let gap: u64 = 10;
6777
6778 let keypair = Keypair::new();
6779 let reed_solomon_cache = ReedSolomonCache::default();
6780 let mut stats = ProcessShredsStats::default();
6781 let shreds: Vec<_> = (0u64..64)
6782 .map(|i| {
6783 let shredder = Shredder::new(slot, slot - 1, i as u8, 42).unwrap();
6784
6785 let mut shreds = shredder
6786 .make_shreds_from_data_slice(
6787 &keypair,
6788 &[],
6789 false,
6790 Hash::default(), (i * gap) as u32,
6792 (i * gap) as u32,
6793 &reed_solomon_cache,
6794 &mut stats,
6795 )
6796 .unwrap();
6797 shreds.next().unwrap()
6798 })
6799 .collect();
6800 blockstore.insert_shreds(shreds, None, false).unwrap();
6801
6802 let empty: Vec<u64> = vec![];
6803 assert_eq!(
6804 blockstore.find_missing_data_indexes(
6805 slot,
6806 timestamp(), defer_threshold_ticks,
6808 start_index,
6809 end_index,
6810 max_missing,
6811 ),
6812 empty
6813 );
6814 let expected: Vec<_> = (1..=9).collect();
6815 assert_eq!(
6816 blockstore.find_missing_data_indexes(
6817 slot,
6818 timestamp() - DEFAULT_MS_PER_SLOT, defer_threshold_ticks,
6820 start_index,
6821 end_index,
6822 max_missing,
6823 ),
6824 expected
6825 );
6826 }
6827
6828 #[test]
6829 fn test_find_missing_data_indexes_sanity() {
6830 let slot = 0;
6831
6832 let ledger_path = get_tmp_ledger_path_auto_delete!();
6833 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6834
6835 let empty: Vec<u64> = vec![];
6837 assert_eq!(
6838 blockstore.find_missing_data_indexes(
6839 slot, 0, 0, 0, 0, 1, ),
6846 empty
6847 );
6848 assert_eq!(
6849 blockstore.find_missing_data_indexes(
6850 slot, 0, 0, 5, 5, 1, ),
6857 empty
6858 );
6859 assert_eq!(
6860 blockstore.find_missing_data_indexes(
6861 slot, 0, 0, 4, 3, 1, ),
6868 empty
6869 );
6870 assert_eq!(
6871 blockstore.find_missing_data_indexes(
6872 slot, 0, 0, 1, 2, 0, ),
6879 empty
6880 );
6881
6882 let entries = create_ticks(100, 0, Hash::default());
6883 let mut shreds = entries_to_test_shreds(&entries, slot, 0, true, 0);
6884
6885 const ONE: u64 = 1;
6886 const OTHER: u64 = 4;
6887 assert!(shreds.len() > OTHER as usize);
6888
6889 let shreds = vec![shreds.remove(OTHER as usize), shreds.remove(ONE as usize)];
6890
6891 blockstore.insert_shreds(shreds, None, false).unwrap();
6893
6894 const STARTS: u64 = OTHER * 2;
6895 const END: u64 = OTHER * 3;
6896 const MAX: usize = 10;
6897 for start in 0..STARTS {
6901 let result = blockstore.find_missing_data_indexes(
6902 slot, 0, 0, start, END, MAX, );
6909 let expected: Vec<u64> = (start..END).filter(|i| *i != ONE && *i != OTHER).collect();
6910 assert_eq!(result, expected);
6911 }
6912 }
6913
6914 #[test]
6915 fn test_no_missing_shred_indexes() {
6916 let slot = 0;
6917 let ledger_path = get_tmp_ledger_path_auto_delete!();
6918 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6919
6920 let num_entries = 10;
6922 let entries = create_ticks(num_entries, 0, Hash::default());
6923 let shreds = entries_to_test_shreds(&entries, slot, 0, true, 0);
6924 let num_shreds = shreds.len();
6925
6926 blockstore.insert_shreds(shreds, None, false).unwrap();
6927
6928 let empty: Vec<u64> = vec![];
6929 for i in 0..num_shreds as u64 {
6930 for j in 0..i {
6931 assert_eq!(
6932 blockstore.find_missing_data_indexes(
6933 slot,
6934 0, 0, j, i, (i - j) as usize, ),
6940 empty
6941 );
6942 }
6943 }
6944 }
6945
6946 #[test]
6947 fn test_verify_shred_slots() {
6948 assert!(verify_shred_slots(0, 0, 0));
6950 assert!(verify_shred_slots(2, 1, 0));
6951 assert!(verify_shred_slots(2, 1, 1));
6952 assert!(!verify_shred_slots(2, 3, 0));
6953 assert!(!verify_shred_slots(2, 2, 0));
6954 assert!(!verify_shred_slots(2, 3, 3));
6955 assert!(!verify_shred_slots(2, 2, 2));
6956 assert!(!verify_shred_slots(2, 1, 3));
6957 assert!(!verify_shred_slots(2, 3, 4));
6958 assert!(!verify_shred_slots(2, 2, 3));
6959 }
6960
6961 #[test]
6962 fn test_should_insert_data_shred() {
6963 agave_logger::setup();
6964 let entries = create_ticks(2000, 1, Hash::new_unique());
6965 let shredder = Shredder::new(0, 0, 1, 0).unwrap();
6966 let keypair = Keypair::new();
6967 let rsc = ReedSolomonCache::default();
6968 let shreds = shredder
6969 .entries_to_merkle_shreds_for_tests(
6970 &keypair,
6971 &entries,
6972 true,
6973 Hash::default(), 0,
6975 0,
6976 &rsc,
6977 &mut ProcessShredsStats::default(),
6978 )
6979 .0;
6980 assert!(
6981 shreds.len() > DATA_SHREDS_PER_FEC_BLOCK,
6982 "we want multiple fec sets",
6983 );
6984 let ledger_path = get_tmp_ledger_path_auto_delete!();
6985 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
6986 let max_root = 0;
6987
6988 blockstore
6990 .insert_shreds(shreds[0..5].to_vec(), None, false)
6991 .unwrap();
6992
6993 let slot_meta = blockstore.meta(0).unwrap().unwrap();
6994
6995 let terminator = shredder
6997 .entries_to_merkle_shreds_for_tests(
6998 &keypair,
6999 &[],
7000 true,
7001 Hash::default(), 6, 6, &rsc,
7005 &mut ProcessShredsStats::default(),
7006 )
7007 .0;
7008
7009 let terminator_shred = terminator.last().unwrap().clone();
7010 assert!(terminator_shred.last_in_slot());
7011 assert!(blockstore.should_insert_data_shred(
7012 &terminator_shred,
7013 &slot_meta,
7014 &HashMap::new(),
7015 max_root,
7016 None,
7017 ShredSource::Repaired,
7018 &mut Vec::new(),
7019 ));
7020 let term_last_idx = terminator.last().unwrap().index() as usize;
7021 blockstore
7024 .insert_shreds(
7025 shreds[term_last_idx + 2..term_last_idx + 3].iter().cloned(),
7026 None,
7027 false,
7028 )
7029 .unwrap();
7030 let slot_meta = blockstore.meta(0).unwrap().unwrap();
7031 assert_eq!(slot_meta.received, term_last_idx as u64 + 3);
7032 let mut duplicate_shreds = vec![];
7033 assert!(
7035 !blockstore.should_insert_data_shred(
7036 &terminator_shred,
7037 &slot_meta,
7038 &HashMap::new(),
7039 max_root,
7040 None,
7041 ShredSource::Repaired,
7042 &mut duplicate_shreds,
7043 ),
7044 "Should not insert shred with 'last' flag set and index less than already existing \
7045 shreds"
7046 );
7047 assert!(blockstore.has_duplicate_shreds_in_slot(0));
7048 assert_eq!(duplicate_shreds.len(), 1);
7049 assert_matches!(
7050 duplicate_shreds[0],
7051 PossibleDuplicateShred::LastIndexConflict(_, _)
7052 );
7053 assert_eq!(duplicate_shreds[0].slot(), 0);
7054 let last_idx = shreds.last().unwrap().index();
7055 blockstore.insert_shreds(shreds, None, false).unwrap();
7057 let slot_meta = blockstore.meta(0).unwrap().unwrap();
7058
7059 let past_tail_shreds = shredder
7060 .entries_to_merkle_shreds_for_tests(
7061 &Keypair::new(),
7062 &entries,
7063 true,
7064 Hash::default(), last_idx, last_idx, &rsc,
7068 &mut ProcessShredsStats::default(),
7069 )
7070 .0;
7071
7072 duplicate_shreds.clear();
7074 blockstore.duplicate_slots_cf.delete(0).unwrap();
7075 assert!(!blockstore.has_duplicate_shreds_in_slot(0));
7076 assert!(
7077 !blockstore.should_insert_data_shred(
7078 &past_tail_shreds[5], &slot_meta,
7080 &HashMap::new(),
7081 max_root,
7082 None,
7083 ShredSource::Repaired,
7084 &mut duplicate_shreds,
7085 ),
7086 "Shreds past end of block should fail to insert"
7087 );
7088
7089 assert_eq!(duplicate_shreds.len(), 1);
7090 assert_matches!(
7091 duplicate_shreds[0],
7092 PossibleDuplicateShred::LastIndexConflict(_, _)
7093 );
7094 assert_eq!(duplicate_shreds[0].slot(), 0);
7095 assert!(blockstore.has_duplicate_shreds_in_slot(0));
7096 }
7097
7098 #[test]
7099 fn test_is_data_shred_present() {
7100 let (shreds, _) = make_slot_entries(0, 0, 200);
7101 let ledger_path = get_tmp_ledger_path_auto_delete!();
7102 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
7103 let index_cf = &blockstore.index_cf;
7104
7105 blockstore
7106 .insert_shreds(shreds[0..5].to_vec(), None, false)
7107 .unwrap();
7108 let slot_meta = blockstore.meta(0).unwrap().unwrap();
7111 let index = index_cf.get(0).unwrap().unwrap();
7112 assert_eq!(slot_meta.consumed, 5);
7113 assert!(Blockstore::is_data_shred_present(
7114 &shreds[1],
7115 &slot_meta,
7116 index.data(),
7117 ));
7118
7119 blockstore
7121 .insert_shreds(shreds[6..7].to_vec(), None, false)
7122 .unwrap();
7123 let slot_meta = blockstore.meta(0).unwrap().unwrap();
7124 let index = index_cf.get(0).unwrap().unwrap();
7125 assert!(Blockstore::is_data_shred_present(
7126 &shreds[6],
7127 &slot_meta,
7128 index.data()
7129 ),);
7130 }
7131
7132 #[test]
7133 fn test_merkle_root_metas_coding() {
7134 let ledger_path = get_tmp_ledger_path_auto_delete!();
7135 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
7136
7137 let parent_slot = 0;
7138 let slot = 1;
7139 let index = 0;
7140 let (_, coding_shreds, _) = setup_erasure_shreds(slot, parent_slot, 10);
7141 let coding_shred = coding_shreds[index as usize].clone();
7142
7143 let mut shred_insertion_tracker =
7144 ShredInsertionTracker::new(coding_shreds.len(), blockstore.get_write_batch().unwrap());
7145 assert!(blockstore.check_insert_coding_shred(
7146 Cow::Borrowed(&coding_shred),
7147 &mut shred_insertion_tracker,
7148 false,
7149 ShredSource::Turbine,
7150 &mut BlockstoreInsertionMetrics::default(),
7151 ));
7152 let ShredInsertionTracker {
7153 merkle_root_metas,
7154 write_batch,
7155 ..
7156 } = shred_insertion_tracker;
7157
7158 assert_eq!(merkle_root_metas.len(), 1);
7159 assert_eq!(
7160 merkle_root_metas
7161 .get(&coding_shred.erasure_set())
7162 .unwrap()
7163 .as_ref()
7164 .merkle_root(),
7165 coding_shred.merkle_root().ok(),
7166 );
7167 assert_eq!(
7168 merkle_root_metas
7169 .get(&coding_shred.erasure_set())
7170 .unwrap()
7171 .as_ref()
7172 .first_received_shred_index(),
7173 index
7174 );
7175 assert_eq!(
7176 merkle_root_metas
7177 .get(&coding_shred.erasure_set())
7178 .unwrap()
7179 .as_ref()
7180 .first_received_shred_type(),
7181 ShredType::Code,
7182 );
7183
7184 for (erasure_set, working_merkle_root_meta) in merkle_root_metas {
7185 blockstore
7186 .merkle_root_meta_cf
7187 .put(erasure_set.store_key(), working_merkle_root_meta.as_ref())
7188 .unwrap();
7189 }
7190 blockstore.write_batch(write_batch).unwrap();
7191
7192 let (_, coding_shreds, _) = setup_erasure_shreds(slot, parent_slot, 10);
7194 let new_coding_shred = coding_shreds[(index + 1) as usize].clone();
7195
7196 let mut shred_insertion_tracker =
7197 ShredInsertionTracker::new(coding_shreds.len(), blockstore.get_write_batch().unwrap());
7198
7199 assert!(!blockstore.check_insert_coding_shred(
7200 Cow::Owned(new_coding_shred),
7201 &mut shred_insertion_tracker,
7202 false,
7203 ShredSource::Turbine,
7204 &mut BlockstoreInsertionMetrics::default(),
7205 ));
7206 let ShredInsertionTracker {
7207 ref merkle_root_metas,
7208 ref duplicate_shreds,
7209 ..
7210 } = shred_insertion_tracker;
7211
7212 assert_eq!(duplicate_shreds.len(), 1);
7214 match &duplicate_shreds[0] {
7215 PossibleDuplicateShred::MerkleRootConflict(shred, _) if shred.slot() == slot => (),
7216 _ => panic!("No merkle root conflict"),
7217 }
7218
7219 assert_eq!(merkle_root_metas.len(), 1);
7221 assert_eq!(
7222 merkle_root_metas
7223 .get(&coding_shred.erasure_set())
7224 .unwrap()
7225 .as_ref()
7226 .merkle_root(),
7227 coding_shred.merkle_root().ok()
7228 );
7229 assert_eq!(
7230 merkle_root_metas
7231 .get(&coding_shred.erasure_set())
7232 .unwrap()
7233 .as_ref()
7234 .first_received_shred_index(),
7235 index
7236 );
7237
7238 assert_eq!(
7240 blockstore
7241 .merkle_root_meta(coding_shred.erasure_set())
7242 .unwrap()
7243 .unwrap()
7244 .merkle_root(),
7245 coding_shred.merkle_root().ok()
7246 );
7247 assert_eq!(
7248 blockstore
7249 .merkle_root_meta(coding_shred.erasure_set())
7250 .unwrap()
7251 .unwrap()
7252 .first_received_shred_index(),
7253 index
7254 );
7255
7256 let new_index = index + 31;
7258 let (_, coding_shreds, _) =
7259 setup_erasure_shreds_with_index(slot, parent_slot, 10, new_index);
7260 let new_coding_shred = coding_shreds[0].clone();
7261
7262 assert!(blockstore.check_insert_coding_shred(
7263 Cow::Borrowed(&new_coding_shred),
7264 &mut shred_insertion_tracker,
7265 false,
7266 ShredSource::Turbine,
7267 &mut BlockstoreInsertionMetrics::default(),
7268 ));
7269 let ShredInsertionTracker {
7270 ref merkle_root_metas,
7271 ..
7272 } = shred_insertion_tracker;
7273
7274 assert_eq!(merkle_root_metas.len(), 2);
7277 assert_eq!(
7278 merkle_root_metas
7279 .get(&coding_shred.erasure_set())
7280 .unwrap()
7281 .as_ref()
7282 .merkle_root(),
7283 coding_shred.merkle_root().ok()
7284 );
7285 assert_eq!(
7286 merkle_root_metas
7287 .get(&coding_shred.erasure_set())
7288 .unwrap()
7289 .as_ref()
7290 .first_received_shred_index(),
7291 index
7292 );
7293 assert_eq!(
7294 merkle_root_metas
7295 .get(&new_coding_shred.erasure_set())
7296 .unwrap()
7297 .as_ref()
7298 .merkle_root(),
7299 new_coding_shred.merkle_root().ok()
7300 );
7301 assert_eq!(
7302 merkle_root_metas
7303 .get(&new_coding_shred.erasure_set())
7304 .unwrap()
7305 .as_ref()
7306 .first_received_shred_index(),
7307 new_index
7308 );
7309 }
7310
7311 #[test]
7312 fn test_merkle_root_metas_data() {
7313 let ledger_path = get_tmp_ledger_path_auto_delete!();
7314 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
7315
7316 let parent_slot = 0;
7317 let slot = 1;
7318 let index = 11;
7319 let fec_set_index = 11;
7320 let (data_shreds, _, _) =
7321 setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index);
7322 let data_shred = data_shreds[0].clone();
7323
7324 let mut shred_insertion_tracker =
7325 ShredInsertionTracker::new(data_shreds.len(), blockstore.get_write_batch().unwrap());
7326 blockstore
7327 .check_insert_data_shred(
7328 Cow::Borrowed(&data_shred),
7329 &mut shred_insertion_tracker,
7330 false,
7331 None,
7332 ShredSource::Turbine,
7333 )
7334 .unwrap();
7335 let ShredInsertionTracker {
7336 merkle_root_metas,
7337 write_batch,
7338 ..
7339 } = shred_insertion_tracker;
7340 assert_eq!(merkle_root_metas.len(), 1);
7341 assert_eq!(
7342 merkle_root_metas
7343 .get(&data_shred.erasure_set())
7344 .unwrap()
7345 .as_ref()
7346 .merkle_root(),
7347 data_shred.merkle_root().ok()
7348 );
7349 assert_eq!(
7350 merkle_root_metas
7351 .get(&data_shred.erasure_set())
7352 .unwrap()
7353 .as_ref()
7354 .first_received_shred_index(),
7355 index
7356 );
7357 assert_eq!(
7358 merkle_root_metas
7359 .get(&data_shred.erasure_set())
7360 .unwrap()
7361 .as_ref()
7362 .first_received_shred_type(),
7363 ShredType::Data,
7364 );
7365
7366 for (erasure_set, working_merkle_root_meta) in merkle_root_metas {
7367 blockstore
7368 .merkle_root_meta_cf
7369 .put(erasure_set.store_key(), working_merkle_root_meta.as_ref())
7370 .unwrap();
7371 }
7372 blockstore.write_batch(write_batch).unwrap();
7373
7374 let (data_shreds, _, _) =
7376 setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index);
7377 let new_data_shred = data_shreds[1].clone();
7378
7379 let mut shred_insertion_tracker =
7380 ShredInsertionTracker::new(data_shreds.len(), blockstore.get_write_batch().unwrap());
7381
7382 assert!(blockstore
7383 .check_insert_data_shred(
7384 Cow::Owned(new_data_shred),
7385 &mut shred_insertion_tracker,
7386 false,
7387 None,
7388 ShredSource::Turbine,
7389 )
7390 .is_err());
7391 let ShredInsertionTracker {
7392 merkle_root_metas,
7393 duplicate_shreds,
7394 write_batch,
7395 ..
7396 } = shred_insertion_tracker;
7397
7398 assert_eq!(duplicate_shreds.len(), 1);
7400 assert_matches!(
7401 duplicate_shreds[0],
7402 PossibleDuplicateShred::MerkleRootConflict(_, _)
7403 );
7404
7405 assert_eq!(merkle_root_metas.len(), 1);
7407 assert_eq!(
7408 merkle_root_metas
7409 .get(&data_shred.erasure_set())
7410 .unwrap()
7411 .as_ref()
7412 .merkle_root(),
7413 data_shred.merkle_root().ok()
7414 );
7415 assert_eq!(
7416 merkle_root_metas
7417 .get(&data_shred.erasure_set())
7418 .unwrap()
7419 .as_ref()
7420 .first_received_shred_index(),
7421 index
7422 );
7423
7424 blockstore.db.write(write_batch).unwrap();
7426 assert!(blockstore.is_dead(slot));
7427 blockstore.remove_dead_slot(slot).unwrap();
7428
7429 assert_eq!(
7431 blockstore
7432 .merkle_root_meta(data_shred.erasure_set())
7433 .unwrap()
7434 .unwrap()
7435 .merkle_root(),
7436 data_shred.merkle_root().ok()
7437 );
7438 assert_eq!(
7439 blockstore
7440 .merkle_root_meta(data_shred.erasure_set())
7441 .unwrap()
7442 .unwrap()
7443 .first_received_shred_index(),
7444 index
7445 );
7446
7447 let shredder = Shredder::new(slot, slot.saturating_sub(1), 0, 0).unwrap();
7448 let keypair = Keypair::new();
7449 let reed_solomon_cache = ReedSolomonCache::default();
7450 let new_index = fec_set_index + 31;
7451 let new_data_shred = shredder
7453 .make_shreds_from_data_slice(
7454 &keypair,
7455 &[3, 3, 3],
7456 false,
7457 Hash::default(),
7458 new_index,
7459 new_index,
7460 &reed_solomon_cache,
7461 &mut ProcessShredsStats::default(),
7462 )
7463 .unwrap()
7464 .next()
7465 .unwrap();
7466
7467 let mut shred_insertion_tracker =
7468 ShredInsertionTracker::new(data_shreds.len(), blockstore.db.batch().unwrap());
7469 blockstore
7470 .check_insert_data_shred(
7471 Cow::Borrowed(&new_data_shred),
7472 &mut shred_insertion_tracker,
7473 false,
7474 None,
7475 ShredSource::Turbine,
7476 )
7477 .unwrap();
7478 let ShredInsertionTracker {
7479 merkle_root_metas,
7480 write_batch,
7481 ..
7482 } = shred_insertion_tracker;
7483 blockstore.db.write(write_batch).unwrap();
7484
7485 assert_eq!(
7488 blockstore
7489 .merkle_root_meta(data_shred.erasure_set())
7490 .unwrap()
7491 .as_ref()
7492 .unwrap()
7493 .merkle_root(),
7494 data_shred.merkle_root().ok()
7495 );
7496 assert_eq!(
7497 blockstore
7498 .merkle_root_meta(data_shred.erasure_set())
7499 .unwrap()
7500 .as_ref()
7501 .unwrap()
7502 .first_received_shred_index(),
7503 index
7504 );
7505 assert_eq!(
7506 merkle_root_metas
7507 .get(&new_data_shred.erasure_set())
7508 .unwrap()
7509 .as_ref()
7510 .merkle_root(),
7511 new_data_shred.merkle_root().ok()
7512 );
7513 assert_eq!(
7514 merkle_root_metas
7515 .get(&new_data_shred.erasure_set())
7516 .unwrap()
7517 .as_ref()
7518 .first_received_shred_index(),
7519 new_index
7520 );
7521 }
7522
7523 #[test]
7524 fn test_check_insert_coding_shred() {
7525 let ledger_path = get_tmp_ledger_path_auto_delete!();
7526 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
7527
7528 let slot = 1;
7529 let (_data_shreds, code_shreds, _) =
7530 setup_erasure_shreds_with_index_and_chained_merkle_and_last_in_slot(
7531 slot,
7532 0,
7533 10,
7534 0,
7535 Hash::default(),
7536 true,
7537 );
7538 let coding_shred = code_shreds[0].clone();
7539
7540 let mut shred_insertion_tracker =
7541 ShredInsertionTracker::new(1, blockstore.get_write_batch().unwrap());
7542 assert!(blockstore.check_insert_coding_shred(
7543 Cow::Borrowed(&coding_shred),
7544 &mut shred_insertion_tracker,
7545 false,
7546 ShredSource::Turbine,
7547 &mut BlockstoreInsertionMetrics::default(),
7548 ));
7549
7550 assert!(!blockstore.check_insert_coding_shred(
7552 Cow::Borrowed(&coding_shred),
7553 &mut shred_insertion_tracker,
7554 false,
7555 ShredSource::Turbine,
7556 &mut BlockstoreInsertionMetrics::default(),
7557 ));
7558 assert_eq!(
7559 shred_insertion_tracker.duplicate_shreds,
7560 vec![PossibleDuplicateShred::Exists(coding_shred)]
7561 );
7562 }
7563
7564 #[test]
7565 fn test_should_insert_coding_shred() {
7566 let ledger_path = get_tmp_ledger_path_auto_delete!();
7567 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
7568 let max_root = 0;
7569
7570 let slot = 1;
7571 let (_data_shreds, code_shreds, _) =
7572 setup_erasure_shreds_with_index_and_chained_merkle_and_last_in_slot(
7573 slot,
7574 0,
7575 10,
7576 0,
7577 Hash::default(),
7578 true,
7579 );
7580 let coding_shred = code_shreds[0].clone();
7581
7582 assert!(
7583 Blockstore::should_insert_coding_shred(&coding_shred, max_root),
7584 "Insertion of a good coding shred should be allowed"
7585 );
7586
7587 blockstore
7588 .insert_shreds(vec![coding_shred.clone()], None, false)
7589 .expect("Insertion should succeed");
7590
7591 assert!(
7592 Blockstore::should_insert_coding_shred(&coding_shred, max_root),
7593 "Inserting the same shred again should be allowed since this doesn't check for \
7594 duplicate index"
7595 );
7596
7597 assert!(
7598 Blockstore::should_insert_coding_shred(&code_shreds[1], max_root),
7599 "Inserting next shred should be allowed"
7600 );
7601
7602 assert!(
7603 !Blockstore::should_insert_coding_shred(&coding_shred, coding_shred.slot()),
7604 "Trying to insert shred into slot <= last root should not be allowed"
7605 );
7606 }
7607
7608 #[test]
7609 fn test_insert_multiple_is_last() {
7610 agave_logger::setup();
7611 let (shreds, _) = make_slot_entries(0, 0, 18);
7612 let num_shreds = shreds.len() as u64;
7613 let ledger_path = get_tmp_ledger_path_auto_delete!();
7614 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
7615
7616 blockstore.insert_shreds(shreds, None, false).unwrap();
7617 let slot_meta = blockstore.meta(0).unwrap().unwrap();
7618
7619 assert_eq!(slot_meta.consumed, num_shreds);
7620 assert_eq!(slot_meta.received, num_shreds);
7621 assert_eq!(slot_meta.last_index, Some(num_shreds - 1));
7622 assert!(slot_meta.is_full());
7623
7624 let (shreds, _) = make_slot_entries(0, 0, 600);
7625 assert!(shreds.len() > num_shreds as usize);
7626 blockstore.insert_shreds(shreds, None, false).unwrap();
7627 let slot_meta = blockstore.meta(0).unwrap().unwrap();
7628
7629 assert_eq!(slot_meta.consumed, num_shreds);
7630 assert_eq!(slot_meta.received, num_shreds);
7631 assert_eq!(slot_meta.last_index, Some(num_shreds - 1));
7632 assert!(slot_meta.is_full());
7633
7634 assert!(blockstore.has_duplicate_shreds_in_slot(0));
7635 }
7636
7637 #[test]
7638 fn test_slot_data_iterator() {
7639 let ledger_path = get_tmp_ledger_path_auto_delete!();
7641 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
7642 let shreds_per_slot = 10;
7643 let slots = vec![2, 4, 8, 12];
7644 let all_shreds = make_chaining_slot_entries(&slots, shreds_per_slot, 0);
7645 let slot_8_shreds = all_shreds[2].0.clone();
7646 for (slot_shreds, _) in all_shreds {
7647 blockstore.insert_shreds(slot_shreds, None, false).unwrap();
7648 }
7649
7650 let shred_iter = blockstore.slot_data_iterator(5, 0).unwrap();
7652 let result: Vec<_> = shred_iter.collect();
7653 assert_eq!(result, vec![]);
7654
7655 let shred_iter = blockstore.slot_data_iterator(8, 0).unwrap();
7657 let result: Vec<Shred> = shred_iter
7658 .filter_map(|(_, bytes)| Shred::new_from_serialized_shred(bytes.to_vec()).ok())
7659 .collect();
7660 assert_eq!(result.len(), slot_8_shreds.len());
7661 assert_eq!(result, slot_8_shreds);
7662 }
7663
7664 #[test]
7665 fn test_set_roots() {
7666 let ledger_path = get_tmp_ledger_path_auto_delete!();
7667 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
7668 let chained_slots = vec![0, 2, 4, 7, 12, 15];
7669 assert_eq!(blockstore.max_root(), 0);
7670
7671 blockstore.set_roots(chained_slots.iter()).unwrap();
7672
7673 assert_eq!(blockstore.max_root(), 15);
7674
7675 for i in chained_slots {
7676 assert!(blockstore.is_root(i));
7677 }
7678 }
7679
7680 #[test]
7681 fn test_is_skipped() {
7682 let ledger_path = get_tmp_ledger_path_auto_delete!();
7683 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
7684 let roots = [2, 4, 7, 12, 15];
7685 blockstore.set_roots(roots.iter()).unwrap();
7686
7687 for i in 0..20 {
7688 if i < 2 || roots.contains(&i) || i > 15 {
7689 assert!(!blockstore.is_skipped(i));
7690 } else {
7691 assert!(blockstore.is_skipped(i));
7692 }
7693 }
7694 }
7695
7696 #[test]
7697 fn test_iter_bounds() {
7698 let ledger_path = get_tmp_ledger_path_auto_delete!();
7699 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
7700
7701 blockstore
7703 .slot_meta_iterator(5)
7704 .unwrap()
7705 .for_each(|_| panic!());
7706 }
7707
7708 #[test]
7709 fn test_get_completed_data_ranges() {
7710 let completed_data_end_indexes = [2, 4, 9, 11].iter().copied().collect();
7711
7712 let start_index = 0;
7714 let consumed = 1;
7715 assert_eq!(
7716 Blockstore::get_completed_data_ranges(
7717 start_index,
7718 &completed_data_end_indexes,
7719 consumed
7720 ),
7721 vec![]
7722 );
7723
7724 let start_index = 0;
7725 let consumed = 3;
7726 assert_eq!(
7727 Blockstore::get_completed_data_ranges(
7728 start_index,
7729 &completed_data_end_indexes,
7730 consumed
7731 ),
7732 vec![0..3]
7733 );
7734
7735 let completed_data_end_indexes: Vec<_> = completed_data_end_indexes.into_iter().collect();
7743 for i in 0..completed_data_end_indexes.len() {
7744 for j in i..completed_data_end_indexes.len() {
7745 let start_index = completed_data_end_indexes[i];
7746 let consumed = completed_data_end_indexes[j] + 1;
7747 let expected = std::iter::once(start_index..start_index + 1)
7751 .chain(
7752 completed_data_end_indexes[i..=j]
7753 .windows(2)
7754 .map(|end_indexes| end_indexes[0] + 1..end_indexes[1] + 1),
7755 )
7756 .collect::<Vec<_>>();
7757
7758 let completed_data_end_indexes =
7759 completed_data_end_indexes.iter().copied().collect();
7760 assert_eq!(
7761 Blockstore::get_completed_data_ranges(
7762 start_index,
7763 &completed_data_end_indexes,
7764 consumed
7765 ),
7766 expected
7767 );
7768 }
7769 }
7770 }
7771
7772 #[test]
7773 fn test_get_slot_entries_with_shred_count_corruption() {
7774 let ledger_path = get_tmp_ledger_path_auto_delete!();
7775 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
7776 let num_ticks = 8;
7777 let entries = create_ticks(num_ticks, 0, Hash::default());
7778 let slot = 1;
7779 let shreds = entries_to_test_shreds(&entries, slot, 0, false, 0);
7780 let next_shred_index = shreds.len();
7781 blockstore
7782 .insert_shreds(shreds, None, false)
7783 .expect("Expected successful write of shreds");
7784 assert_eq!(
7785 blockstore.get_slot_entries(slot, 0).unwrap().len() as u64,
7786 num_ticks
7787 );
7788
7789 let shredder = Shredder::new(slot, slot.saturating_sub(1), 0, 0).unwrap();
7792 let keypair = Keypair::new();
7793 let reed_solomon_cache = ReedSolomonCache::default();
7794
7795 let shreds: Vec<Shred> = shredder
7796 .make_shreds_from_data_slice(
7797 &keypair,
7798 &[1, 1, 1],
7799 true,
7800 Hash::default(),
7801 next_shred_index as u32,
7802 next_shred_index as u32,
7803 &reed_solomon_cache,
7804 &mut ProcessShredsStats::default(),
7805 )
7806 .unwrap()
7807 .take(DATA_SHREDS_PER_FEC_BLOCK)
7808 .collect();
7809
7810 blockstore
7813 .insert_shreds(shreds, None, false)
7814 .expect("Expected successful write of shreds");
7815 assert!(blockstore.get_slot_entries(slot, 0).is_err());
7816 }
7817
7818 #[test]
7819 fn test_no_insert_but_modify_slot_meta() {
7820 let (shreds0, _) = make_slot_entries(0, 0, 200);
7823 let ledger_path = get_tmp_ledger_path_auto_delete!();
7824 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
7825
7826 blockstore
7828 .insert_shreds(shreds0[0..5].to_vec(), None, false)
7829 .unwrap();
7830
7831 let (mut shreds2, _) = make_slot_entries(2, 0, 200);
7835 let (mut shreds3, _) = make_slot_entries(3, 0, 200);
7836 shreds2.push(shreds0[1].clone());
7837 shreds3.insert(0, shreds0[1].clone());
7838 blockstore.insert_shreds(shreds2, None, false).unwrap();
7839 let slot_meta = blockstore.meta(0).unwrap().unwrap();
7840 assert_eq!(slot_meta.next_slots, vec![2]);
7841 blockstore.insert_shreds(shreds3, None, false).unwrap();
7842 let slot_meta = blockstore.meta(0).unwrap().unwrap();
7843 assert_eq!(slot_meta.next_slots, vec![2, 3]);
7844 }
7845
7846 #[test]
7847 fn test_trusted_insert_shreds() {
7848 let ledger_path = get_tmp_ledger_path_auto_delete!();
7849 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
7850
7851 let (shreds1, _) = make_slot_entries(1, 0, 1);
7853 let max_root = 100;
7854
7855 blockstore.set_roots(std::iter::once(&max_root)).unwrap();
7856
7857 blockstore
7859 .insert_shreds(shreds1[..].to_vec(), None, false)
7860 .unwrap();
7861 assert!(blockstore.get_data_shred(1, 0).unwrap().is_none());
7862
7863 blockstore
7865 .insert_shreds(shreds1[..].to_vec(), None, true)
7866 .unwrap();
7867 assert!(blockstore.get_data_shred(1, 0).unwrap().is_some());
7868 }
7869
7870 #[test]
7871 fn test_get_first_available_block() {
7872 let mint_total = 1_000_000_000_000;
7873 let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(mint_total);
7874 let (ledger_path, _blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
7875 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
7876 assert_eq!(blockstore.get_first_available_block().unwrap(), 0);
7877 assert_eq!(blockstore.lowest_slot_with_genesis(), 0);
7878 assert_eq!(blockstore.lowest_slot(), 0);
7879 for slot in 1..4 {
7880 let entries = make_slot_entries_with_transactions(100);
7881 let shreds = entries_to_test_shreds(
7882 &entries,
7883 slot,
7884 slot - 1, true, 0, );
7888 blockstore.insert_shreds(shreds, None, false).unwrap();
7889 blockstore.set_roots([slot].iter()).unwrap();
7890 }
7891 assert_eq!(blockstore.get_first_available_block().unwrap(), 0);
7892 assert_eq!(blockstore.lowest_slot_with_genesis(), 0);
7893 assert_eq!(blockstore.lowest_slot(), 1);
7894
7895 blockstore.purge_slots(0, 1, PurgeType::CompactionFilter);
7896 assert_eq!(blockstore.get_first_available_block().unwrap(), 3);
7897 assert_eq!(blockstore.lowest_slot_with_genesis(), 2);
7898 assert_eq!(blockstore.lowest_slot(), 2);
7899 }
7900
7901 #[test]
7902 fn test_get_rooted_block() {
7903 let slot = 10;
7904 let entries = make_slot_entries_with_transactions(100);
7905 let blockhash = get_last_hash(entries.iter()).unwrap();
7906 let shreds = entries_to_test_shreds(
7907 &entries,
7908 slot,
7909 slot - 1, true, 0, );
7913 let more_shreds = entries_to_test_shreds(
7914 &entries,
7915 slot + 1,
7916 slot, true, 0, );
7920 let unrooted_shreds = entries_to_test_shreds(
7921 &entries,
7922 slot + 2,
7923 slot + 1, true, 0, );
7927 let ledger_path = get_tmp_ledger_path_auto_delete!();
7928 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
7929 blockstore.insert_shreds(shreds, None, false).unwrap();
7930 blockstore.insert_shreds(more_shreds, None, false).unwrap();
7931 blockstore
7932 .insert_shreds(unrooted_shreds, None, false)
7933 .unwrap();
7934 blockstore
7935 .set_roots([slot - 1, slot, slot + 1].iter())
7936 .unwrap();
7937
7938 let parent_meta = SlotMeta::default();
7939 blockstore.put_meta(slot - 1, &parent_meta).unwrap();
7940
7941 let expected_transactions: Vec<VersionedTransactionWithStatusMeta> = entries
7942 .iter()
7943 .filter(|entry| !entry.is_tick())
7944 .cloned()
7945 .flat_map(|entry| entry.transactions)
7946 .map(|transaction| {
7947 let mut pre_balances: Vec<u64> = vec![];
7948 let mut post_balances: Vec<u64> = vec![];
7949 for i in 0..transaction.message.static_account_keys().len() {
7950 pre_balances.push(i as u64 * 10);
7951 post_balances.push(i as u64 * 11);
7952 }
7953 let compute_units_consumed = Some(12345);
7954 let cost_units = Some(6789);
7955 let signature = transaction.signatures[0];
7956 let status = TransactionStatusMeta {
7957 status: Ok(()),
7958 fee: 42,
7959 pre_balances: pre_balances.clone(),
7960 post_balances: post_balances.clone(),
7961 inner_instructions: Some(vec![]),
7962 log_messages: Some(vec![]),
7963 pre_token_balances: Some(vec![]),
7964 post_token_balances: Some(vec![]),
7965 rewards: Some(vec![]),
7966 loaded_addresses: LoadedAddresses::default(),
7967 return_data: Some(TransactionReturnData::default()),
7968 compute_units_consumed,
7969 cost_units,
7970 }
7971 .into();
7972 blockstore
7973 .transaction_status_cf
7974 .put_protobuf((signature, slot), &status)
7975 .unwrap();
7976 let status = TransactionStatusMeta {
7977 status: Ok(()),
7978 fee: 42,
7979 pre_balances: pre_balances.clone(),
7980 post_balances: post_balances.clone(),
7981 inner_instructions: Some(vec![]),
7982 log_messages: Some(vec![]),
7983 pre_token_balances: Some(vec![]),
7984 post_token_balances: Some(vec![]),
7985 rewards: Some(vec![]),
7986 loaded_addresses: LoadedAddresses::default(),
7987 return_data: Some(TransactionReturnData::default()),
7988 compute_units_consumed,
7989 cost_units,
7990 }
7991 .into();
7992 blockstore
7993 .transaction_status_cf
7994 .put_protobuf((signature, slot + 1), &status)
7995 .unwrap();
7996 let status = TransactionStatusMeta {
7997 status: Ok(()),
7998 fee: 42,
7999 pre_balances: pre_balances.clone(),
8000 post_balances: post_balances.clone(),
8001 inner_instructions: Some(vec![]),
8002 log_messages: Some(vec![]),
8003 pre_token_balances: Some(vec![]),
8004 post_token_balances: Some(vec![]),
8005 rewards: Some(vec![]),
8006 loaded_addresses: LoadedAddresses::default(),
8007 return_data: Some(TransactionReturnData::default()),
8008 compute_units_consumed,
8009 cost_units,
8010 }
8011 .into();
8012 blockstore
8013 .transaction_status_cf
8014 .put_protobuf((signature, slot + 2), &status)
8015 .unwrap();
8016 VersionedTransactionWithStatusMeta {
8017 transaction,
8018 meta: TransactionStatusMeta {
8019 status: Ok(()),
8020 fee: 42,
8021 pre_balances,
8022 post_balances,
8023 inner_instructions: Some(vec![]),
8024 log_messages: Some(vec![]),
8025 pre_token_balances: Some(vec![]),
8026 post_token_balances: Some(vec![]),
8027 rewards: Some(vec![]),
8028 loaded_addresses: LoadedAddresses::default(),
8029 return_data: Some(TransactionReturnData::default()),
8030 compute_units_consumed,
8031 cost_units,
8032 },
8033 }
8034 })
8035 .collect();
8036
8037 assert_matches!(
8039 blockstore.get_rooted_block(slot - 1, true),
8040 Err(BlockstoreError::SlotUnavailable)
8041 );
8042
8043 assert_matches!(
8046 blockstore.get_rooted_block(slot, true),
8047 Err(BlockstoreError::ParentEntriesUnavailable)
8048 );
8049
8050 let confirmed_block = blockstore.get_rooted_block(slot, false).unwrap();
8052 assert_eq!(confirmed_block.transactions.len(), 100);
8053 let expected_block = VersionedConfirmedBlock {
8054 transactions: expected_transactions.clone(),
8055 parent_slot: slot - 1,
8056 blockhash: blockhash.to_string(),
8057 previous_blockhash: Hash::default().to_string(),
8058 rewards: vec![],
8059 num_partitions: None,
8060 block_time: None,
8061 block_height: None,
8062 };
8063 assert_eq!(confirmed_block, expected_block);
8064
8065 let confirmed_block = blockstore.get_rooted_block(slot + 1, true).unwrap();
8066 assert_eq!(confirmed_block.transactions.len(), 100);
8067
8068 let mut expected_block = VersionedConfirmedBlock {
8069 transactions: expected_transactions.clone(),
8070 parent_slot: slot,
8071 blockhash: blockhash.to_string(),
8072 previous_blockhash: blockhash.to_string(),
8073 rewards: vec![],
8074 num_partitions: None,
8075 block_time: None,
8076 block_height: None,
8077 };
8078 assert_eq!(confirmed_block, expected_block);
8079
8080 let not_root = blockstore.get_rooted_block(slot + 2, true).unwrap_err();
8081 assert_matches!(not_root, BlockstoreError::SlotNotRooted);
8082
8083 let complete_block = blockstore.get_complete_block(slot + 2, true).unwrap();
8084 assert_eq!(complete_block.transactions.len(), 100);
8085
8086 let mut expected_complete_block = VersionedConfirmedBlock {
8087 transactions: expected_transactions,
8088 parent_slot: slot + 1,
8089 blockhash: blockhash.to_string(),
8090 previous_blockhash: blockhash.to_string(),
8091 rewards: vec![],
8092 num_partitions: None,
8093 block_time: None,
8094 block_height: None,
8095 };
8096 assert_eq!(complete_block, expected_complete_block);
8097
8098 let timestamp = 1_576_183_541;
8100 blockstore.blocktime_cf.put(slot + 1, ×tamp).unwrap();
8101 expected_block.block_time = Some(timestamp);
8102 let block_height = slot - 2;
8103 blockstore
8104 .block_height_cf
8105 .put(slot + 1, &block_height)
8106 .unwrap();
8107 expected_block.block_height = Some(block_height);
8108
8109 let confirmed_block = blockstore.get_rooted_block(slot + 1, true).unwrap();
8110 assert_eq!(confirmed_block, expected_block);
8111
8112 let timestamp = 1_576_183_542;
8113 blockstore.blocktime_cf.put(slot + 2, ×tamp).unwrap();
8114 expected_complete_block.block_time = Some(timestamp);
8115 let block_height = slot - 1;
8116 blockstore
8117 .block_height_cf
8118 .put(slot + 2, &block_height)
8119 .unwrap();
8120 expected_complete_block.block_height = Some(block_height);
8121
8122 let complete_block = blockstore.get_complete_block(slot + 2, true).unwrap();
8123 assert_eq!(complete_block, expected_complete_block);
8124 }
8125
8126 #[test]
8127 fn test_persist_transaction_status() {
8128 let ledger_path = get_tmp_ledger_path_auto_delete!();
8129 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
8130
8131 let transaction_status_cf = &blockstore.transaction_status_cf;
8132
8133 let pre_balances_vec = vec![1, 2, 3];
8134 let post_balances_vec = vec![3, 2, 1];
8135 let inner_instructions_vec = vec![InnerInstructions {
8136 index: 0,
8137 instructions: vec![InnerInstruction {
8138 instruction: CompiledInstruction::new(1, &(), vec![0]),
8139 stack_height: Some(2),
8140 }],
8141 }];
8142 let log_messages_vec = vec![String::from("Test message\n")];
8143 let pre_token_balances_vec = vec![];
8144 let post_token_balances_vec = vec![];
8145 let rewards_vec = vec![];
8146 let test_loaded_addresses = LoadedAddresses {
8147 writable: vec![Pubkey::new_unique()],
8148 readonly: vec![Pubkey::new_unique()],
8149 };
8150 let test_return_data = TransactionReturnData {
8151 program_id: Pubkey::new_unique(),
8152 data: vec![1, 2, 3],
8153 };
8154 let compute_units_consumed_1 = Some(3812649u64);
8155 let cost_units_1 = Some(1234);
8156 let compute_units_consumed_2 = Some(42u64);
8157 let cost_units_2 = Some(5678);
8158
8159 assert!(transaction_status_cf
8161 .get_protobuf((Signature::default(), 0))
8162 .unwrap()
8163 .is_none());
8164
8165 let status = TransactionStatusMeta {
8167 status: solana_transaction_error::TransactionResult::<()>::Err(
8168 TransactionError::AccountNotFound,
8169 ),
8170 fee: 5u64,
8171 pre_balances: pre_balances_vec.clone(),
8172 post_balances: post_balances_vec.clone(),
8173 inner_instructions: Some(inner_instructions_vec.clone()),
8174 log_messages: Some(log_messages_vec.clone()),
8175 pre_token_balances: Some(pre_token_balances_vec.clone()),
8176 post_token_balances: Some(post_token_balances_vec.clone()),
8177 rewards: Some(rewards_vec.clone()),
8178 loaded_addresses: test_loaded_addresses.clone(),
8179 return_data: Some(test_return_data.clone()),
8180 compute_units_consumed: compute_units_consumed_1,
8181 cost_units: cost_units_1,
8182 }
8183 .into();
8184 assert!(transaction_status_cf
8185 .put_protobuf((Signature::default(), 0), &status)
8186 .is_ok());
8187
8188 let TransactionStatusMeta {
8190 status,
8191 fee,
8192 pre_balances,
8193 post_balances,
8194 inner_instructions,
8195 log_messages,
8196 pre_token_balances,
8197 post_token_balances,
8198 rewards,
8199 loaded_addresses,
8200 return_data,
8201 compute_units_consumed,
8202 cost_units,
8203 } = transaction_status_cf
8204 .get_protobuf((Signature::default(), 0))
8205 .unwrap()
8206 .unwrap()
8207 .try_into()
8208 .unwrap();
8209 assert_eq!(status, Err(TransactionError::AccountNotFound));
8210 assert_eq!(fee, 5u64);
8211 assert_eq!(pre_balances, pre_balances_vec);
8212 assert_eq!(post_balances, post_balances_vec);
8213 assert_eq!(inner_instructions.unwrap(), inner_instructions_vec);
8214 assert_eq!(log_messages.unwrap(), log_messages_vec);
8215 assert_eq!(pre_token_balances.unwrap(), pre_token_balances_vec);
8216 assert_eq!(post_token_balances.unwrap(), post_token_balances_vec);
8217 assert_eq!(rewards.unwrap(), rewards_vec);
8218 assert_eq!(loaded_addresses, test_loaded_addresses);
8219 assert_eq!(return_data.unwrap(), test_return_data);
8220 assert_eq!(compute_units_consumed, compute_units_consumed_1);
8221 assert_eq!(cost_units, cost_units_1);
8222
8223 let status = TransactionStatusMeta {
8225 status: solana_transaction_error::TransactionResult::<()>::Ok(()),
8226 fee: 9u64,
8227 pre_balances: pre_balances_vec.clone(),
8228 post_balances: post_balances_vec.clone(),
8229 inner_instructions: Some(inner_instructions_vec.clone()),
8230 log_messages: Some(log_messages_vec.clone()),
8231 pre_token_balances: Some(pre_token_balances_vec.clone()),
8232 post_token_balances: Some(post_token_balances_vec.clone()),
8233 rewards: Some(rewards_vec.clone()),
8234 loaded_addresses: test_loaded_addresses.clone(),
8235 return_data: Some(test_return_data.clone()),
8236 compute_units_consumed: compute_units_consumed_2,
8237 cost_units: cost_units_2,
8238 }
8239 .into();
8240 assert!(transaction_status_cf
8241 .put_protobuf((Signature::from([2u8; 64]), 9), &status,)
8242 .is_ok());
8243
8244 let TransactionStatusMeta {
8246 status,
8247 fee,
8248 pre_balances,
8249 post_balances,
8250 inner_instructions,
8251 log_messages,
8252 pre_token_balances,
8253 post_token_balances,
8254 rewards,
8255 loaded_addresses,
8256 return_data,
8257 compute_units_consumed,
8258 cost_units,
8259 } = transaction_status_cf
8260 .get_protobuf((Signature::from([2u8; 64]), 9))
8261 .unwrap()
8262 .unwrap()
8263 .try_into()
8264 .unwrap();
8265
8266 assert_eq!(status, Ok(()));
8268 assert_eq!(fee, 9u64);
8269 assert_eq!(pre_balances, pre_balances_vec);
8270 assert_eq!(post_balances, post_balances_vec);
8271 assert_eq!(inner_instructions.unwrap(), inner_instructions_vec);
8272 assert_eq!(log_messages.unwrap(), log_messages_vec);
8273 assert_eq!(pre_token_balances.unwrap(), pre_token_balances_vec);
8274 assert_eq!(post_token_balances.unwrap(), post_token_balances_vec);
8275 assert_eq!(rewards.unwrap(), rewards_vec);
8276 assert_eq!(loaded_addresses, test_loaded_addresses);
8277 assert_eq!(return_data.unwrap(), test_return_data);
8278 assert_eq!(compute_units_consumed, compute_units_consumed_2);
8279 assert_eq!(cost_units, cost_units_2);
8280 }
8281
8282 #[test]
8283 fn test_read_transaction_status_with_old_data() {
8284 let ledger_path = get_tmp_ledger_path_auto_delete!();
8285 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
8286 let signature = Signature::from([1; 64]);
8287
8288 let index0_slot = 2;
8289 blockstore
8290 .write_deprecated_transaction_status(
8291 0,
8292 index0_slot,
8293 signature,
8294 vec![&Pubkey::new_unique()],
8295 vec![&Pubkey::new_unique()],
8296 TransactionStatusMeta {
8297 fee: index0_slot * 1_000,
8298 ..TransactionStatusMeta::default()
8299 },
8300 )
8301 .unwrap();
8302
8303 let index1_slot = 1;
8304 blockstore
8305 .write_deprecated_transaction_status(
8306 1,
8307 index1_slot,
8308 signature,
8309 vec![&Pubkey::new_unique()],
8310 vec![&Pubkey::new_unique()],
8311 TransactionStatusMeta {
8312 fee: index1_slot * 1_000,
8313 ..TransactionStatusMeta::default()
8314 },
8315 )
8316 .unwrap();
8317
8318 let slot = 3;
8319 blockstore
8320 .write_transaction_status(
8321 slot,
8322 signature,
8323 vec![
8324 (&Pubkey::new_unique(), true),
8325 (&Pubkey::new_unique(), false),
8326 ]
8327 .into_iter(),
8328 TransactionStatusMeta {
8329 fee: slot * 1_000,
8330 ..TransactionStatusMeta::default()
8331 },
8332 0,
8333 )
8334 .unwrap();
8335
8336 let meta = blockstore
8337 .read_transaction_status((signature, slot))
8338 .unwrap()
8339 .unwrap();
8340 assert_eq!(meta.fee, slot * 1000);
8341
8342 let meta = blockstore
8343 .read_transaction_status((signature, index0_slot))
8344 .unwrap()
8345 .unwrap();
8346 assert_eq!(meta.fee, index0_slot * 1000);
8347
8348 let meta = blockstore
8349 .read_transaction_status((signature, index1_slot))
8350 .unwrap()
8351 .unwrap();
8352 assert_eq!(meta.fee, index1_slot * 1000);
8353 }
8354
8355 #[test]
8356 fn test_get_transaction_status() {
8357 let ledger_path = get_tmp_ledger_path_auto_delete!();
8358 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
8359 let transaction_status_cf = &blockstore.transaction_status_cf;
8360
8361 let pre_balances_vec = vec![1, 2, 3];
8362 let post_balances_vec = vec![3, 2, 1];
8363 let status = TransactionStatusMeta {
8364 status: solana_transaction_error::TransactionResult::<()>::Ok(()),
8365 fee: 42u64,
8366 pre_balances: pre_balances_vec,
8367 post_balances: post_balances_vec,
8368 inner_instructions: Some(vec![]),
8369 log_messages: Some(vec![]),
8370 pre_token_balances: Some(vec![]),
8371 post_token_balances: Some(vec![]),
8372 rewards: Some(vec![]),
8373 loaded_addresses: LoadedAddresses::default(),
8374 return_data: Some(TransactionReturnData::default()),
8375 compute_units_consumed: Some(42u64),
8376 cost_units: Some(1234),
8377 }
8378 .into();
8379
8380 let signature1 = Signature::from([1u8; 64]);
8381 let signature2 = Signature::from([2u8; 64]);
8382 let signature3 = Signature::from([3u8; 64]);
8383 let signature4 = Signature::from([4u8; 64]);
8384 let signature5 = Signature::from([5u8; 64]);
8385 let signature6 = Signature::from([6u8; 64]);
8386 let signature7 = Signature::from([7u8; 64]);
8387
8388 let meta0 = SlotMeta::new(0, Some(0));
8396 blockstore.meta_cf.put(0, &meta0).unwrap();
8397 let meta1 = SlotMeta::new(1, Some(0));
8398 blockstore.meta_cf.put(1, &meta1).unwrap();
8399 let meta2 = SlotMeta::new(2, Some(0));
8400 blockstore.meta_cf.put(2, &meta2).unwrap();
8401 let meta3 = SlotMeta::new(3, Some(2));
8402 blockstore.meta_cf.put(3, &meta3).unwrap();
8403
8404 blockstore.set_roots([0, 2].iter()).unwrap();
8405
8406 transaction_status_cf
8413 .put_protobuf((signature2, 1), &status)
8414 .unwrap();
8415
8416 transaction_status_cf
8417 .put_protobuf((signature2, 2), &status)
8418 .unwrap();
8419
8420 transaction_status_cf
8421 .put_protobuf((signature4, 1), &status)
8422 .unwrap();
8423
8424 transaction_status_cf
8425 .put_protobuf((signature5, 1), &status)
8426 .unwrap();
8427
8428 transaction_status_cf
8429 .put_protobuf((signature5, 3), &status)
8430 .unwrap();
8431
8432 transaction_status_cf
8433 .put_protobuf((signature6, 1), &status)
8434 .unwrap();
8435
8436 transaction_status_cf
8437 .put_protobuf((signature5, 5), &status)
8438 .unwrap();
8439
8440 transaction_status_cf
8441 .put_protobuf((signature6, 3), &status)
8442 .unwrap();
8443
8444 if let (Some((slot, _status)), counter) = blockstore
8446 .get_transaction_status_with_counter(signature2, &[].into())
8447 .unwrap()
8448 {
8449 assert_eq!(slot, 2);
8450 assert_eq!(counter, 2);
8451 }
8452
8453 if let (Some((slot, _status)), counter) = blockstore
8455 .get_transaction_status_with_counter(signature2, &[3].into())
8456 .unwrap()
8457 {
8458 assert_eq!(slot, 2);
8459 assert_eq!(counter, 2);
8460 }
8461
8462 let (status, counter) = blockstore
8464 .get_transaction_status_with_counter(signature4, &[].into())
8465 .unwrap();
8466 assert_eq!(status, None);
8467 assert_eq!(counter, 2);
8468
8469 let (status, counter) = blockstore
8471 .get_transaction_status_with_counter(signature4, &[3].into())
8472 .unwrap();
8473 assert_eq!(status, None);
8474 assert_eq!(counter, 2);
8475
8476 let (status, counter) = blockstore
8478 .get_transaction_status_with_counter(signature5, &[].into())
8479 .unwrap();
8480 assert_eq!(status, None);
8481 assert_eq!(counter, 4);
8482
8483 if let (Some((slot, _status)), counter) = blockstore
8485 .get_transaction_status_with_counter(signature5, &[3].into())
8486 .unwrap()
8487 {
8488 assert_eq!(slot, 3);
8489 assert_eq!(counter, 2);
8490 }
8491
8492 let (status, counter) = blockstore
8494 .get_transaction_status_with_counter(signature1, &[].into())
8495 .unwrap();
8496 assert_eq!(status, None);
8497 assert_eq!(counter, 1);
8498
8499 let (status, counter) = blockstore
8500 .get_transaction_status_with_counter(signature1, &[3].into())
8501 .unwrap();
8502 assert_eq!(status, None);
8503 assert_eq!(counter, 1);
8504
8505 let (status, counter) = blockstore
8507 .get_transaction_status_with_counter(signature3, &[].into())
8508 .unwrap();
8509 assert_eq!(status, None);
8510 assert_eq!(counter, 1);
8511
8512 let (status, counter) = blockstore
8513 .get_transaction_status_with_counter(signature3, &[3].into())
8514 .unwrap();
8515 assert_eq!(status, None);
8516 assert_eq!(counter, 1);
8517
8518 let (status, counter) = blockstore
8520 .get_transaction_status_with_counter(signature7, &[].into())
8521 .unwrap();
8522 assert_eq!(status, None);
8523 assert_eq!(counter, 0);
8524
8525 let (status, counter) = blockstore
8526 .get_transaction_status_with_counter(signature7, &[3].into())
8527 .unwrap();
8528 assert_eq!(status, None);
8529 assert_eq!(counter, 0);
8530 }
8531
8532 #[test]
8533 fn test_get_transaction_status_with_old_data() {
8534 let ledger_path = get_tmp_ledger_path_auto_delete!();
8535 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
8536 let transaction_status_cf = &blockstore.transaction_status_cf;
8537
8538 let pre_balances_vec = vec![1, 2, 3];
8539 let post_balances_vec = vec![3, 2, 1];
8540 let status = TransactionStatusMeta {
8541 status: solana_transaction_error::TransactionResult::<()>::Ok(()),
8542 fee: 42u64,
8543 pre_balances: pre_balances_vec,
8544 post_balances: post_balances_vec,
8545 inner_instructions: Some(vec![]),
8546 log_messages: Some(vec![]),
8547 pre_token_balances: Some(vec![]),
8548 post_token_balances: Some(vec![]),
8549 rewards: Some(vec![]),
8550 loaded_addresses: LoadedAddresses::default(),
8551 return_data: Some(TransactionReturnData::default()),
8552 compute_units_consumed: Some(42u64),
8553 cost_units: Some(1234),
8554 }
8555 .into();
8556
8557 let signature1 = Signature::from([1u8; 64]);
8558 let signature2 = Signature::from([2u8; 64]);
8559 let signature3 = Signature::from([3u8; 64]);
8560 let signature4 = Signature::from([4u8; 64]);
8561 let signature5 = Signature::from([5u8; 64]);
8562 let signature6 = Signature::from([6u8; 64]);
8563
8564 let meta0 = SlotMeta::new(0, Some(0));
8575 blockstore.meta_cf.put(0, &meta0).unwrap();
8576 let meta1 = SlotMeta::new(1, Some(0));
8577 blockstore.meta_cf.put(1, &meta1).unwrap();
8578 let meta2 = SlotMeta::new(2, Some(0));
8579 blockstore.meta_cf.put(2, &meta2).unwrap();
8580 let meta3 = SlotMeta::new(3, Some(2));
8581 blockstore.meta_cf.put(3, &meta3).unwrap();
8582 let meta4 = SlotMeta::new(4, Some(2));
8583 blockstore.meta_cf.put(4, &meta4).unwrap();
8584 let meta5 = SlotMeta::new(5, Some(4));
8585 blockstore.meta_cf.put(5, &meta5).unwrap();
8586
8587 blockstore.set_roots([0, 2, 4].iter()).unwrap();
8588
8589 transaction_status_cf
8596 .put_deprecated_protobuf((1, signature1, 1), &status)
8597 .unwrap();
8598
8599 transaction_status_cf
8600 .put_deprecated_protobuf((1, signature1, 2), &status)
8601 .unwrap();
8602
8603 transaction_status_cf
8604 .put_deprecated_protobuf((0, signature2, 3), &status)
8605 .unwrap();
8606
8607 transaction_status_cf
8608 .put_deprecated_protobuf((0, signature2, 4), &status)
8609 .unwrap();
8610 blockstore.set_highest_primary_index_slot(Some(4));
8611
8612 transaction_status_cf
8613 .put_protobuf((signature3, 4), &status)
8614 .unwrap();
8615
8616 transaction_status_cf
8617 .put_protobuf((signature4, 5), &status)
8618 .unwrap();
8619
8620 transaction_status_cf
8621 .put_protobuf((signature5, 5), &status)
8622 .unwrap();
8623
8624 if let (Some((slot, _status)), counter) = blockstore
8626 .get_transaction_status_with_counter(signature1, &[].into())
8627 .unwrap()
8628 {
8629 assert_eq!(slot, 2);
8630 assert_eq!(counter, 4);
8631 }
8632
8633 if let (Some((slot, _status)), counter) = blockstore
8635 .get_transaction_status_with_counter(signature2, &[].into())
8636 .unwrap()
8637 {
8638 assert_eq!(slot, 4);
8639 assert_eq!(counter, 3);
8640 }
8641
8642 if let (Some((slot, _status)), counter) = blockstore
8644 .get_transaction_status_with_counter(signature3, &[].into())
8645 .unwrap()
8646 {
8647 assert_eq!(slot, 4);
8648 assert_eq!(counter, 1);
8649 }
8650
8651 let (status, counter) = blockstore
8653 .get_transaction_status_with_counter(signature6, &[].into())
8654 .unwrap();
8655 assert_eq!(status, None);
8656 assert_eq!(counter, 1);
8657 }
8658
8659 fn do_test_lowest_cleanup_slot_and_special_cfs(simulate_blockstore_cleanup_service: bool) {
8660 agave_logger::setup();
8661
8662 let ledger_path = get_tmp_ledger_path_auto_delete!();
8663 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
8664 let transaction_status_cf = &blockstore.transaction_status_cf;
8665
8666 let pre_balances_vec = vec![1, 2, 3];
8667 let post_balances_vec = vec![3, 2, 1];
8668 let status = TransactionStatusMeta {
8669 status: solana_transaction_error::TransactionResult::<()>::Ok(()),
8670 fee: 42u64,
8671 pre_balances: pre_balances_vec,
8672 post_balances: post_balances_vec,
8673 inner_instructions: Some(vec![]),
8674 log_messages: Some(vec![]),
8675 pre_token_balances: Some(vec![]),
8676 post_token_balances: Some(vec![]),
8677 rewards: Some(vec![]),
8678 loaded_addresses: LoadedAddresses::default(),
8679 return_data: Some(TransactionReturnData::default()),
8680 compute_units_consumed: Some(42u64),
8681 cost_units: Some(1234),
8682 }
8683 .into();
8684
8685 let signature1 = Signature::from([2u8; 64]);
8686 let signature2 = Signature::from([3u8; 64]);
8687
8688 let meta0 = SlotMeta::new(0, Some(0));
8690 blockstore.meta_cf.put(0, &meta0).unwrap();
8691 let meta1 = SlotMeta::new(1, Some(0));
8692 blockstore.meta_cf.put(1, &meta1).unwrap();
8693 let meta2 = SlotMeta::new(2, Some(1));
8694 blockstore.meta_cf.put(2, &meta2).unwrap();
8695 let meta3 = SlotMeta::new(3, Some(2));
8696 blockstore.meta_cf.put(3, &meta3).unwrap();
8697
8698 blockstore.set_roots([0, 1, 2, 3].iter()).unwrap();
8699
8700 let lowest_cleanup_slot = 1;
8701 let lowest_available_slot = lowest_cleanup_slot + 1;
8702
8703 transaction_status_cf
8704 .put_protobuf((signature1, lowest_cleanup_slot), &status)
8705 .unwrap();
8706
8707 transaction_status_cf
8708 .put_protobuf((signature2, lowest_available_slot), &status)
8709 .unwrap();
8710
8711 let address0 = solana_pubkey::new_rand();
8712 let address1 = solana_pubkey::new_rand();
8713 blockstore
8714 .write_transaction_status(
8715 lowest_cleanup_slot,
8716 signature1,
8717 vec![(&address0, true)].into_iter(),
8718 TransactionStatusMeta::default(),
8719 0,
8720 )
8721 .unwrap();
8722 blockstore
8723 .write_transaction_status(
8724 lowest_available_slot,
8725 signature2,
8726 vec![(&address1, true)].into_iter(),
8727 TransactionStatusMeta::default(),
8728 0,
8729 )
8730 .unwrap();
8731
8732 let check_for_missing = || {
8733 (
8734 blockstore
8735 .get_transaction_status_with_counter(signature1, &[].into())
8736 .unwrap()
8737 .0
8738 .is_none(),
8739 blockstore
8740 .find_address_signatures_for_slot(address0, lowest_cleanup_slot)
8741 .unwrap()
8742 .is_empty(),
8743 )
8744 };
8745
8746 let assert_existing_always = || {
8747 let are_existing_always = (
8748 blockstore
8749 .get_transaction_status_with_counter(signature2, &[].into())
8750 .unwrap()
8751 .0
8752 .is_some(),
8753 !blockstore
8754 .find_address_signatures_for_slot(address1, lowest_available_slot)
8755 .unwrap()
8756 .is_empty(),
8757 );
8758 assert_eq!(are_existing_always, (true, true));
8759 };
8760
8761 let are_missing = check_for_missing();
8762 assert_eq!(are_missing, (false, false));
8764 assert_existing_always();
8765
8766 if simulate_blockstore_cleanup_service {
8767 *blockstore.lowest_cleanup_slot.write().unwrap() = lowest_cleanup_slot;
8768 blockstore.purge_slots(0, lowest_cleanup_slot, PurgeType::CompactionFilter);
8769 }
8770
8771 let are_missing = check_for_missing();
8772 if simulate_blockstore_cleanup_service {
8773 assert_eq!(are_missing, (true, true));
8776 } else {
8777 assert_eq!(are_missing, (false, false));
8779 }
8780 assert_existing_always();
8781 }
8782
8783 #[test]
8784 fn test_lowest_cleanup_slot_and_special_cfs_with_blockstore_cleanup_service_simulation() {
8785 do_test_lowest_cleanup_slot_and_special_cfs(true);
8786 }
8787
8788 #[test]
8789 fn test_lowest_cleanup_slot_and_special_cfs_without_blockstore_cleanup_service_simulation() {
8790 do_test_lowest_cleanup_slot_and_special_cfs(false);
8791 }
8792
8793 #[test]
8794 fn test_get_rooted_transaction() {
8795 let slot = 2;
8796 let entries = make_slot_entries_with_transactions(5);
8797 let shreds = entries_to_test_shreds(
8798 &entries,
8799 slot,
8800 slot - 1, true, 0, );
8804 let ledger_path = get_tmp_ledger_path_auto_delete!();
8805 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
8806 blockstore.insert_shreds(shreds, None, false).unwrap();
8807 blockstore.set_roots([slot - 1, slot].iter()).unwrap();
8808
8809 let expected_transactions: Vec<VersionedTransactionWithStatusMeta> = entries
8810 .iter()
8811 .filter(|entry| !entry.is_tick())
8812 .cloned()
8813 .flat_map(|entry| entry.transactions)
8814 .map(|transaction| {
8815 let mut pre_balances: Vec<u64> = vec![];
8816 let mut post_balances: Vec<u64> = vec![];
8817 for i in 0..transaction.message.static_account_keys().len() {
8818 pre_balances.push(i as u64 * 10);
8819 post_balances.push(i as u64 * 11);
8820 }
8821 let inner_instructions = Some(vec![InnerInstructions {
8822 index: 0,
8823 instructions: vec![InnerInstruction {
8824 instruction: CompiledInstruction::new(1, &(), vec![0]),
8825 stack_height: Some(2),
8826 }],
8827 }]);
8828 let log_messages = Some(vec![String::from("Test message\n")]);
8829 let pre_token_balances = Some(vec![]);
8830 let post_token_balances = Some(vec![]);
8831 let rewards = Some(vec![]);
8832 let signature = transaction.signatures[0];
8833 let return_data = Some(TransactionReturnData {
8834 program_id: Pubkey::new_unique(),
8835 data: vec![1, 2, 3],
8836 });
8837 let status = TransactionStatusMeta {
8838 status: Ok(()),
8839 fee: 42,
8840 pre_balances: pre_balances.clone(),
8841 post_balances: post_balances.clone(),
8842 inner_instructions: inner_instructions.clone(),
8843 log_messages: log_messages.clone(),
8844 pre_token_balances: pre_token_balances.clone(),
8845 post_token_balances: post_token_balances.clone(),
8846 rewards: rewards.clone(),
8847 loaded_addresses: LoadedAddresses::default(),
8848 return_data: return_data.clone(),
8849 compute_units_consumed: Some(42),
8850 cost_units: Some(1234),
8851 }
8852 .into();
8853 blockstore
8854 .transaction_status_cf
8855 .put_protobuf((signature, slot), &status)
8856 .unwrap();
8857 VersionedTransactionWithStatusMeta {
8858 transaction,
8859 meta: TransactionStatusMeta {
8860 status: Ok(()),
8861 fee: 42,
8862 pre_balances,
8863 post_balances,
8864 inner_instructions,
8865 log_messages,
8866 pre_token_balances,
8867 post_token_balances,
8868 rewards,
8869 loaded_addresses: LoadedAddresses::default(),
8870 return_data,
8871 compute_units_consumed: Some(42),
8872 cost_units: Some(1234),
8873 },
8874 }
8875 })
8876 .collect();
8877
8878 for tx_with_meta in expected_transactions.clone() {
8879 let signature = tx_with_meta.transaction.signatures[0];
8880 assert_eq!(
8881 blockstore.get_rooted_transaction(signature).unwrap(),
8882 Some(ConfirmedTransactionWithStatusMeta {
8883 slot,
8884 tx_with_meta: TransactionWithStatusMeta::Complete(tx_with_meta.clone()),
8885 block_time: None
8886 })
8887 );
8888 assert_eq!(
8889 blockstore
8890 .get_complete_transaction(signature, slot + 1)
8891 .unwrap(),
8892 Some(ConfirmedTransactionWithStatusMeta {
8893 slot,
8894 tx_with_meta: TransactionWithStatusMeta::Complete(tx_with_meta),
8895 block_time: None
8896 })
8897 );
8898 }
8899
8900 blockstore
8901 .run_purge(0, slot, PurgeType::CompactionFilter)
8902 .unwrap();
8903 *blockstore.lowest_cleanup_slot.write().unwrap() = slot;
8904 for VersionedTransactionWithStatusMeta { transaction, .. } in expected_transactions {
8905 let signature = transaction.signatures[0];
8906 assert_eq!(blockstore.get_rooted_transaction(signature).unwrap(), None);
8907 assert_eq!(
8908 blockstore
8909 .get_complete_transaction(signature, slot + 1)
8910 .unwrap(),
8911 None,
8912 );
8913 }
8914 }
8915
8916 #[test]
8917 fn test_get_complete_transaction() {
8918 let ledger_path = get_tmp_ledger_path_auto_delete!();
8919 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
8920
8921 let slot = 2;
8922 let entries = make_slot_entries_with_transactions(5);
8923 let shreds = entries_to_test_shreds(
8924 &entries,
8925 slot,
8926 slot - 1, true, 0, );
8930 blockstore.insert_shreds(shreds, None, false).unwrap();
8931
8932 let expected_transactions: Vec<VersionedTransactionWithStatusMeta> = entries
8933 .iter()
8934 .filter(|entry| !entry.is_tick())
8935 .cloned()
8936 .flat_map(|entry| entry.transactions)
8937 .map(|transaction| {
8938 let mut pre_balances: Vec<u64> = vec![];
8939 let mut post_balances: Vec<u64> = vec![];
8940 for i in 0..transaction.message.static_account_keys().len() {
8941 pre_balances.push(i as u64 * 10);
8942 post_balances.push(i as u64 * 11);
8943 }
8944 let inner_instructions = Some(vec![InnerInstructions {
8945 index: 0,
8946 instructions: vec![InnerInstruction {
8947 instruction: CompiledInstruction::new(1, &(), vec![0]),
8948 stack_height: Some(2),
8949 }],
8950 }]);
8951 let log_messages = Some(vec![String::from("Test message\n")]);
8952 let pre_token_balances = Some(vec![]);
8953 let post_token_balances = Some(vec![]);
8954 let rewards = Some(vec![]);
8955 let return_data = Some(TransactionReturnData {
8956 program_id: Pubkey::new_unique(),
8957 data: vec![1, 2, 3],
8958 });
8959 let signature = transaction.signatures[0];
8960 let status = TransactionStatusMeta {
8961 status: Ok(()),
8962 fee: 42,
8963 pre_balances: pre_balances.clone(),
8964 post_balances: post_balances.clone(),
8965 inner_instructions: inner_instructions.clone(),
8966 log_messages: log_messages.clone(),
8967 pre_token_balances: pre_token_balances.clone(),
8968 post_token_balances: post_token_balances.clone(),
8969 rewards: rewards.clone(),
8970 loaded_addresses: LoadedAddresses::default(),
8971 return_data: return_data.clone(),
8972 compute_units_consumed: Some(42u64),
8973 cost_units: Some(1234),
8974 }
8975 .into();
8976 blockstore
8977 .transaction_status_cf
8978 .put_protobuf((signature, slot), &status)
8979 .unwrap();
8980 VersionedTransactionWithStatusMeta {
8981 transaction,
8982 meta: TransactionStatusMeta {
8983 status: Ok(()),
8984 fee: 42,
8985 pre_balances,
8986 post_balances,
8987 inner_instructions,
8988 log_messages,
8989 pre_token_balances,
8990 post_token_balances,
8991 rewards,
8992 loaded_addresses: LoadedAddresses::default(),
8993 return_data,
8994 compute_units_consumed: Some(42u64),
8995 cost_units: Some(1234),
8996 },
8997 }
8998 })
8999 .collect();
9000
9001 for tx_with_meta in expected_transactions.clone() {
9002 let signature = tx_with_meta.transaction.signatures[0];
9003 assert_eq!(
9004 blockstore
9005 .get_complete_transaction(signature, slot)
9006 .unwrap(),
9007 Some(ConfirmedTransactionWithStatusMeta {
9008 slot,
9009 tx_with_meta: TransactionWithStatusMeta::Complete(tx_with_meta),
9010 block_time: None
9011 })
9012 );
9013 assert_eq!(blockstore.get_rooted_transaction(signature).unwrap(), None);
9014 }
9015
9016 blockstore
9017 .run_purge(0, slot, PurgeType::CompactionFilter)
9018 .unwrap();
9019 *blockstore.lowest_cleanup_slot.write().unwrap() = slot;
9020 for VersionedTransactionWithStatusMeta { transaction, .. } in expected_transactions {
9021 let signature = transaction.signatures[0];
9022 assert_eq!(
9023 blockstore
9024 .get_complete_transaction(signature, slot)
9025 .unwrap(),
9026 None,
9027 );
9028 assert_eq!(blockstore.get_rooted_transaction(signature).unwrap(), None,);
9029 }
9030 }
9031
9032 #[test]
9033 fn test_empty_transaction_status() {
9034 let ledger_path = get_tmp_ledger_path_auto_delete!();
9035 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
9036
9037 blockstore.set_roots(std::iter::once(&0)).unwrap();
9038 assert_eq!(
9039 blockstore
9040 .get_rooted_transaction(Signature::default())
9041 .unwrap(),
9042 None
9043 );
9044 }
9045
9046 impl Blockstore {
9047 pub(crate) fn write_deprecated_transaction_status(
9048 &self,
9049 primary_index: u64,
9050 slot: Slot,
9051 signature: Signature,
9052 writable_keys: Vec<&Pubkey>,
9053 readonly_keys: Vec<&Pubkey>,
9054 status: TransactionStatusMeta,
9055 ) -> Result<()> {
9056 let status = status.into();
9057 self.transaction_status_cf
9058 .put_deprecated_protobuf((primary_index, signature, slot), &status)?;
9059 for address in writable_keys {
9060 self.address_signatures_cf.put_deprecated(
9061 (primary_index, *address, slot, signature),
9062 &AddressSignatureMeta { writeable: true },
9063 )?;
9064 }
9065 for address in readonly_keys {
9066 self.address_signatures_cf.put_deprecated(
9067 (primary_index, *address, slot, signature),
9068 &AddressSignatureMeta { writeable: false },
9069 )?;
9070 }
9071 let mut w_highest_primary_index_slot = self.highest_primary_index_slot.write().unwrap();
9072 if w_highest_primary_index_slot.is_none()
9073 || w_highest_primary_index_slot.is_some_and(|highest_slot| highest_slot < slot)
9074 {
9075 *w_highest_primary_index_slot = Some(slot);
9076 }
9077 Ok(())
9078 }
9079 }
9080
9081 #[test]
9082 fn test_find_address_signatures_for_slot() {
9083 let ledger_path = get_tmp_ledger_path_auto_delete!();
9084 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
9085
9086 let address0 = solana_pubkey::new_rand();
9087 let address1 = solana_pubkey::new_rand();
9088
9089 let slot1 = 1;
9090 for x in 1..5 {
9091 let signature = Signature::from([x; 64]);
9092 blockstore
9093 .write_transaction_status(
9094 slot1,
9095 signature,
9096 vec![(&address0, true), (&address1, false)].into_iter(),
9097 TransactionStatusMeta::default(),
9098 x as usize,
9099 )
9100 .unwrap();
9101 }
9102 let slot2 = 2;
9103 for x in 5..7 {
9104 let signature = Signature::from([x; 64]);
9105 blockstore
9106 .write_transaction_status(
9107 slot2,
9108 signature,
9109 vec![(&address0, true), (&address1, false)].into_iter(),
9110 TransactionStatusMeta::default(),
9111 x as usize,
9112 )
9113 .unwrap();
9114 }
9115 for x in 7..9 {
9116 let signature = Signature::from([x; 64]);
9117 blockstore
9118 .write_transaction_status(
9119 slot2,
9120 signature,
9121 vec![(&address0, true), (&address1, false)].into_iter(),
9122 TransactionStatusMeta::default(),
9123 x as usize,
9124 )
9125 .unwrap();
9126 }
9127 let slot3 = 3;
9128 for x in 9..13 {
9129 let signature = Signature::from([x; 64]);
9130 blockstore
9131 .write_transaction_status(
9132 slot3,
9133 signature,
9134 vec![(&address0, true), (&address1, false)].into_iter(),
9135 TransactionStatusMeta::default(),
9136 x as usize,
9137 )
9138 .unwrap();
9139 }
9140 blockstore.set_roots(std::iter::once(&slot1)).unwrap();
9141
9142 let slot1_signatures = blockstore
9143 .find_address_signatures_for_slot(address0, 1)
9144 .unwrap();
9145 for (i, (slot, signature)) in slot1_signatures.iter().enumerate() {
9146 assert_eq!(*slot, slot1);
9147 assert_eq!(*signature, Signature::from([i as u8 + 1; 64]));
9148 }
9149
9150 let slot2_signatures = blockstore
9151 .find_address_signatures_for_slot(address0, 2)
9152 .unwrap();
9153 for (i, (slot, signature)) in slot2_signatures.iter().enumerate() {
9154 assert_eq!(*slot, slot2);
9155 assert_eq!(*signature, Signature::from([i as u8 + 5; 64]));
9156 }
9157
9158 let slot3_signatures = blockstore
9159 .find_address_signatures_for_slot(address0, 3)
9160 .unwrap();
9161 for (i, (slot, signature)) in slot3_signatures.iter().enumerate() {
9162 assert_eq!(*slot, slot3);
9163 assert_eq!(*signature, Signature::from([i as u8 + 9; 64]));
9164 }
9165 }
9166
9167 #[test]
9168 fn test_get_confirmed_signatures_for_address2() {
9169 let ledger_path = get_tmp_ledger_path_auto_delete!();
9170 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
9171
9172 let (shreds, _) = make_slot_entries(1, 0, 4);
9173 blockstore.insert_shreds(shreds, None, false).unwrap();
9174
9175 fn make_slot_entries_with_transaction_addresses(addresses: &[Pubkey]) -> Vec<Entry> {
9176 let mut entries: Vec<Entry> = Vec::new();
9177 for address in addresses {
9178 let transaction = Transaction::new_with_compiled_instructions(
9179 &[&Keypair::new()],
9180 &[*address],
9181 Hash::default(),
9182 vec![solana_pubkey::new_rand()],
9183 vec![CompiledInstruction::new(1, &(), vec![0])],
9184 );
9185 entries.push(next_entry_mut(&mut Hash::default(), 0, vec![transaction]));
9186 let mut tick = create_ticks(1, 0, hash(&serialize(address).unwrap()));
9187 entries.append(&mut tick);
9188 }
9189 entries
9190 }
9191
9192 let address0 = solana_pubkey::new_rand();
9193 let address1 = solana_pubkey::new_rand();
9194
9195 for slot in 2..=8 {
9196 let entries = make_slot_entries_with_transaction_addresses(&[
9197 address0, address1, address0, address1,
9198 ]);
9199 let shreds = entries_to_test_shreds(
9200 &entries,
9201 slot,
9202 slot - 1, true, 0, );
9206 blockstore.insert_shreds(shreds, None, false).unwrap();
9207
9208 let mut counter = 0;
9209 for entry in entries.into_iter() {
9210 for transaction in entry.transactions {
9211 assert_eq!(transaction.signatures.len(), 1);
9212 blockstore
9213 .write_transaction_status(
9214 slot,
9215 transaction.signatures[0],
9216 transaction
9217 .message
9218 .static_account_keys()
9219 .iter()
9220 .map(|key| (key, true)),
9221 TransactionStatusMeta::default(),
9222 counter,
9223 )
9224 .unwrap();
9225 counter += 1;
9226 }
9227 }
9228 }
9229
9230 for slot in 9..=10 {
9232 let entries = make_slot_entries_with_transaction_addresses(&[
9233 address0, address1, address0, address1,
9234 ]);
9235 let shreds = entries_to_test_shreds(&entries, slot, 8, true, 0);
9236 blockstore.insert_shreds(shreds, None, false).unwrap();
9237
9238 let mut counter = 0;
9239 for entry in entries.into_iter() {
9240 for transaction in entry.transactions {
9241 assert_eq!(transaction.signatures.len(), 1);
9242 blockstore
9243 .write_transaction_status(
9244 slot,
9245 transaction.signatures[0],
9246 transaction
9247 .message
9248 .static_account_keys()
9249 .iter()
9250 .map(|key| (key, true)),
9251 TransactionStatusMeta::default(),
9252 counter,
9253 )
9254 .unwrap();
9255 counter += 1;
9256 }
9257 }
9258 }
9259
9260 blockstore.set_roots([1, 2, 4, 5, 6, 7, 8].iter()).unwrap();
9262 let highest_super_majority_root = 8;
9263
9264 let sig_infos = blockstore
9266 .get_confirmed_signatures_for_address2(
9267 address0,
9268 highest_super_majority_root,
9269 None,
9270 None,
9271 usize::MAX,
9272 )
9273 .unwrap();
9274 assert!(sig_infos.found_before);
9275 let all0 = sig_infos.infos;
9276 assert_eq!(all0.len(), 12);
9277
9278 let all1 = blockstore
9280 .get_confirmed_signatures_for_address2(
9281 address1,
9282 highest_super_majority_root,
9283 None,
9284 None,
9285 usize::MAX,
9286 )
9287 .unwrap()
9288 .infos;
9289 assert_eq!(all1.len(), 12);
9290
9291 for i in 0..all0.len() {
9293 let sig_infos = blockstore
9294 .get_confirmed_signatures_for_address2(
9295 address0,
9296 highest_super_majority_root,
9297 if i == 0 {
9298 None
9299 } else {
9300 Some(all0[i - 1].signature)
9301 },
9302 None,
9303 1,
9304 )
9305 .unwrap();
9306 assert!(sig_infos.found_before);
9307 let results = sig_infos.infos;
9308 assert_eq!(results.len(), 1);
9309 assert_eq!(results[0], all0[i], "Unexpected result for {i}");
9310 }
9311 for i in 0..all0.len() {
9313 let results = blockstore
9314 .get_confirmed_signatures_for_address2(
9315 address0,
9316 highest_super_majority_root,
9317 if i == 0 {
9318 None
9319 } else {
9320 Some(all0[i - 1].signature)
9321 },
9322 if i == all0.len() - 1 || i == all0.len() {
9323 None
9324 } else {
9325 Some(all0[i + 1].signature)
9326 },
9327 10,
9328 )
9329 .unwrap()
9330 .infos;
9331 assert_eq!(results.len(), 1);
9332 assert_eq!(results[0], all0[i], "Unexpected result for {i}");
9333 }
9334
9335 let sig_infos = blockstore
9336 .get_confirmed_signatures_for_address2(
9337 address0,
9338 highest_super_majority_root,
9339 Some(all0[all0.len() - 1].signature),
9340 None,
9341 1,
9342 )
9343 .unwrap();
9344 assert!(sig_infos.found_before);
9345 assert!(sig_infos.infos.is_empty());
9346
9347 assert!(blockstore
9348 .get_confirmed_signatures_for_address2(
9349 address0,
9350 highest_super_majority_root,
9351 None,
9352 Some(all0[0].signature),
9353 2,
9354 )
9355 .unwrap()
9356 .infos
9357 .is_empty());
9358
9359 assert!(all0.len() % 3 == 0);
9361 for i in (0..all0.len()).step_by(3) {
9362 let results = blockstore
9363 .get_confirmed_signatures_for_address2(
9364 address0,
9365 highest_super_majority_root,
9366 if i == 0 {
9367 None
9368 } else {
9369 Some(all0[i - 1].signature)
9370 },
9371 None,
9372 3,
9373 )
9374 .unwrap()
9375 .infos;
9376 assert_eq!(results.len(), 3);
9377 assert_eq!(results[0], all0[i]);
9378 assert_eq!(results[1], all0[i + 1]);
9379 assert_eq!(results[2], all0[i + 2]);
9380 }
9381
9382 for i in (0..all1.len()).step_by(2) {
9384 let results = blockstore
9385 .get_confirmed_signatures_for_address2(
9386 address1,
9387 highest_super_majority_root,
9388 if i == 0 {
9389 None
9390 } else {
9391 Some(all1[i - 1].signature)
9392 },
9393 None,
9394 2,
9395 )
9396 .unwrap()
9397 .infos;
9398 assert_eq!(results.len(), 2);
9399 assert_eq!(results[0].slot, results[1].slot);
9400 assert_eq!(results[0], all1[i]);
9401 assert_eq!(results[1], all1[i + 1]);
9402 }
9403
9404 let sig_infos = blockstore
9406 .get_confirmed_signatures_for_address2(
9407 address0,
9408 highest_super_majority_root,
9409 Some(all1[0].signature),
9410 None,
9411 usize::MAX,
9412 )
9413 .unwrap();
9414 assert!(sig_infos.found_before);
9415 let results = sig_infos.infos;
9416 assert!(!results.is_empty());
9419
9420 let results2 = blockstore
9421 .get_confirmed_signatures_for_address2(
9422 address0,
9423 highest_super_majority_root,
9424 Some(all1[0].signature),
9425 Some(all1[4].signature),
9426 usize::MAX,
9427 )
9428 .unwrap()
9429 .infos;
9430 assert!(results2.len() < results.len());
9431
9432 let highest_confirmed_slot = 10;
9434
9435 let all0 = blockstore
9437 .get_confirmed_signatures_for_address2(
9438 address0,
9439 highest_confirmed_slot,
9440 None,
9441 None,
9442 usize::MAX,
9443 )
9444 .unwrap()
9445 .infos;
9446 assert_eq!(all0.len(), 14);
9447
9448 let all1 = blockstore
9450 .get_confirmed_signatures_for_address2(
9451 address1,
9452 highest_confirmed_slot,
9453 None,
9454 None,
9455 usize::MAX,
9456 )
9457 .unwrap()
9458 .infos;
9459 assert_eq!(all1.len(), 14);
9460
9461 for i in 0..all0.len() {
9463 let results = blockstore
9464 .get_confirmed_signatures_for_address2(
9465 address0,
9466 highest_confirmed_slot,
9467 if i == 0 {
9468 None
9469 } else {
9470 Some(all0[i - 1].signature)
9471 },
9472 None,
9473 1,
9474 )
9475 .unwrap()
9476 .infos;
9477 assert_eq!(results.len(), 1);
9478 assert_eq!(results[0], all0[i], "Unexpected result for {i}");
9479 }
9480 for i in 0..all0.len() {
9482 let results = blockstore
9483 .get_confirmed_signatures_for_address2(
9484 address0,
9485 highest_confirmed_slot,
9486 if i == 0 {
9487 None
9488 } else {
9489 Some(all0[i - 1].signature)
9490 },
9491 if i == all0.len() - 1 || i == all0.len() {
9492 None
9493 } else {
9494 Some(all0[i + 1].signature)
9495 },
9496 10,
9497 )
9498 .unwrap()
9499 .infos;
9500 assert_eq!(results.len(), 1);
9501 assert_eq!(results[0], all0[i], "Unexpected result for {i}");
9502 }
9503
9504 assert!(blockstore
9505 .get_confirmed_signatures_for_address2(
9506 address0,
9507 highest_confirmed_slot,
9508 Some(all0[all0.len() - 1].signature),
9509 None,
9510 1,
9511 )
9512 .unwrap()
9513 .infos
9514 .is_empty());
9515
9516 assert!(blockstore
9517 .get_confirmed_signatures_for_address2(
9518 address0,
9519 highest_confirmed_slot,
9520 None,
9521 Some(all0[0].signature),
9522 2,
9523 )
9524 .unwrap()
9525 .infos
9526 .is_empty());
9527
9528 assert!(all0.len() % 3 == 2);
9530 for i in (0..all0.len()).step_by(3) {
9531 let results = blockstore
9532 .get_confirmed_signatures_for_address2(
9533 address0,
9534 highest_confirmed_slot,
9535 if i == 0 {
9536 None
9537 } else {
9538 Some(all0[i - 1].signature)
9539 },
9540 None,
9541 3,
9542 )
9543 .unwrap()
9544 .infos;
9545 if i < 12 {
9546 assert_eq!(results.len(), 3);
9547 assert_eq!(results[2], all0[i + 2]);
9548 } else {
9549 assert_eq!(results.len(), 2);
9550 }
9551 assert_eq!(results[0], all0[i]);
9552 assert_eq!(results[1], all0[i + 1]);
9553 }
9554
9555 for i in (0..all1.len()).step_by(2) {
9557 let results = blockstore
9558 .get_confirmed_signatures_for_address2(
9559 address1,
9560 highest_confirmed_slot,
9561 if i == 0 {
9562 None
9563 } else {
9564 Some(all1[i - 1].signature)
9565 },
9566 None,
9567 2,
9568 )
9569 .unwrap()
9570 .infos;
9571 assert_eq!(results.len(), 2);
9572 assert_eq!(results[0].slot, results[1].slot);
9573 assert_eq!(results[0], all1[i]);
9574 assert_eq!(results[1], all1[i + 1]);
9575 }
9576
9577 let results = blockstore
9579 .get_confirmed_signatures_for_address2(
9580 address0,
9581 highest_confirmed_slot,
9582 Some(all1[0].signature),
9583 None,
9584 usize::MAX,
9585 )
9586 .unwrap()
9587 .infos;
9588 assert!(!results.is_empty());
9591
9592 let results2 = blockstore
9593 .get_confirmed_signatures_for_address2(
9594 address0,
9595 highest_confirmed_slot,
9596 Some(all1[0].signature),
9597 Some(all1[4].signature),
9598 usize::MAX,
9599 )
9600 .unwrap()
9601 .infos;
9602 assert!(results2.len() < results.len());
9603
9604 blockstore
9606 .address_signatures_cf
9607 .delete((address0, 2, 0, all0[0].signature))
9608 .unwrap();
9609 let sig_infos = blockstore
9610 .get_confirmed_signatures_for_address2(
9611 address0,
9612 highest_super_majority_root,
9613 Some(all0[0].signature),
9614 None,
9615 usize::MAX,
9616 )
9617 .unwrap();
9618 assert!(!sig_infos.found_before);
9619 assert!(sig_infos.infos.is_empty());
9620 }
9621
9622 #[test]
9623 fn test_get_last_hash() {
9624 let entries: Vec<Entry> = vec![];
9625 let empty_entries_iterator = entries.iter();
9626 assert!(get_last_hash(empty_entries_iterator).is_none());
9627
9628 let entry = next_entry(&solana_sha256_hasher::hash(&[42u8]), 1, vec![]);
9629 let entries: Vec<Entry> = std::iter::successors(Some(entry), |entry| {
9630 Some(next_entry(&entry.hash, 1, vec![]))
9631 })
9632 .take(10)
9633 .collect();
9634 let entries_iterator = entries.iter();
9635 assert_eq!(get_last_hash(entries_iterator).unwrap(), entries[9].hash);
9636 }
9637
9638 #[test]
9639 fn test_map_transactions_to_statuses() {
9640 let ledger_path = get_tmp_ledger_path_auto_delete!();
9641 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
9642
9643 let transaction_status_cf = &blockstore.transaction_status_cf;
9644
9645 let slot = 0;
9646 let mut transactions: Vec<VersionedTransaction> = vec![];
9647 for x in 0..4 {
9648 let transaction = Transaction::new_with_compiled_instructions(
9649 &[&Keypair::new()],
9650 &[solana_pubkey::new_rand()],
9651 Hash::default(),
9652 vec![solana_pubkey::new_rand()],
9653 vec![CompiledInstruction::new(1, &(), vec![0])],
9654 );
9655 let status = TransactionStatusMeta {
9656 status: solana_transaction_error::TransactionResult::<()>::Err(
9657 TransactionError::AccountNotFound,
9658 ),
9659 fee: x,
9660 pre_balances: vec![],
9661 post_balances: vec![],
9662 inner_instructions: Some(vec![]),
9663 log_messages: Some(vec![]),
9664 pre_token_balances: Some(vec![]),
9665 post_token_balances: Some(vec![]),
9666 rewards: Some(vec![]),
9667 loaded_addresses: LoadedAddresses::default(),
9668 return_data: Some(TransactionReturnData::default()),
9669 compute_units_consumed: None,
9670 cost_units: None,
9671 }
9672 .into();
9673 transaction_status_cf
9674 .put_protobuf((transaction.signatures[0], slot), &status)
9675 .unwrap();
9676 transactions.push(transaction.into());
9677 }
9678
9679 let map_result =
9680 blockstore.map_transactions_to_statuses(slot, transactions.clone().into_iter());
9681 assert!(map_result.is_ok());
9682 let map = map_result.unwrap();
9683 assert_eq!(map.len(), 4);
9684 for (x, m) in map.iter().enumerate() {
9685 assert_eq!(m.meta.fee, x as u64);
9686 }
9687
9688 transactions.push(
9690 Transaction::new_with_compiled_instructions(
9691 &[&Keypair::new()],
9692 &[solana_pubkey::new_rand()],
9693 Hash::default(),
9694 vec![solana_pubkey::new_rand()],
9695 vec![CompiledInstruction::new(1, &(), vec![0])],
9696 )
9697 .into(),
9698 );
9699
9700 let map_result =
9701 blockstore.map_transactions_to_statuses(slot, transactions.clone().into_iter());
9702 assert_matches!(map_result, Err(BlockstoreError::MissingTransactionMetadata));
9703 }
9704
9705 #[test]
9706 fn test_get_recent_perf_samples_v1_only() {
9707 let ledger_path = get_tmp_ledger_path_auto_delete!();
9708 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
9709
9710 let num_entries: usize = 10;
9711
9712 let slot_sample = |i: u64| PerfSampleV1 {
9713 num_transactions: 1406 + i,
9714 num_slots: 34 + i / 2,
9715 sample_period_secs: (40 + i / 5) as u16,
9716 };
9717
9718 let mut perf_samples: Vec<(Slot, PerfSample)> = vec![];
9719 for i in 0..num_entries {
9720 let slot = (i + 1) as u64 * 50;
9721 let sample = slot_sample(i as u64);
9722
9723 let bytes = serialize(&sample).unwrap();
9724 blockstore.perf_samples_cf.put_bytes(slot, &bytes).unwrap();
9725 perf_samples.push((slot, sample.into()));
9726 }
9727
9728 for i in 0..num_entries {
9729 let mut expected_samples = perf_samples[num_entries - 1 - i..].to_vec();
9730 expected_samples.sort_by(|a, b| b.0.cmp(&a.0));
9731 assert_eq!(
9732 blockstore.get_recent_perf_samples(i + 1).unwrap(),
9733 expected_samples
9734 );
9735 }
9736 }
9737
9738 #[test]
9739 fn test_get_recent_perf_samples_v2_only() {
9740 let ledger_path = get_tmp_ledger_path_auto_delete!();
9741 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
9742
9743 let num_entries: usize = 10;
9744
9745 let slot_sample = |i: u64| PerfSampleV2 {
9746 num_transactions: 2495 + i,
9747 num_slots: 167 + i / 2,
9748 sample_period_secs: (37 + i / 5) as u16,
9749 num_non_vote_transactions: 1672 + i,
9750 };
9751
9752 let mut perf_samples: Vec<(Slot, PerfSample)> = vec![];
9753 for i in 0..num_entries {
9754 let slot = (i + 1) as u64 * 50;
9755 let sample = slot_sample(i as u64);
9756
9757 let bytes = serialize(&sample).unwrap();
9758 blockstore.perf_samples_cf.put_bytes(slot, &bytes).unwrap();
9759 perf_samples.push((slot, sample.into()));
9760 }
9761
9762 for i in 0..num_entries {
9763 let mut expected_samples = perf_samples[num_entries - 1 - i..].to_vec();
9764 expected_samples.sort_by(|a, b| b.0.cmp(&a.0));
9765 assert_eq!(
9766 blockstore.get_recent_perf_samples(i + 1).unwrap(),
9767 expected_samples
9768 );
9769 }
9770 }
9771
9772 #[test]
9773 fn test_get_recent_perf_samples_v1_and_v2() {
9774 let ledger_path = get_tmp_ledger_path_auto_delete!();
9775 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
9776
9777 let num_entries: usize = 10;
9778
9779 let slot_sample_v1 = |i: u64| PerfSampleV1 {
9780 num_transactions: 1599 + i,
9781 num_slots: 123 + i / 2,
9782 sample_period_secs: (42 + i / 5) as u16,
9783 };
9784
9785 let slot_sample_v2 = |i: u64| PerfSampleV2 {
9786 num_transactions: 5809 + i,
9787 num_slots: 81 + i / 2,
9788 sample_period_secs: (35 + i / 5) as u16,
9789 num_non_vote_transactions: 2209 + i,
9790 };
9791
9792 let mut perf_samples: Vec<(Slot, PerfSample)> = vec![];
9793 for i in 0..num_entries {
9794 let slot = (i + 1) as u64 * 50;
9795
9796 if i % 3 == 0 {
9797 let sample = slot_sample_v1(i as u64);
9798 let bytes = serialize(&sample).unwrap();
9799 blockstore.perf_samples_cf.put_bytes(slot, &bytes).unwrap();
9800 perf_samples.push((slot, sample.into()));
9801 } else {
9802 let sample = slot_sample_v2(i as u64);
9803 let bytes = serialize(&sample).unwrap();
9804 blockstore.perf_samples_cf.put_bytes(slot, &bytes).unwrap();
9805 perf_samples.push((slot, sample.into()));
9806 }
9807 }
9808
9809 for i in 0..num_entries {
9810 let mut expected_samples = perf_samples[num_entries - 1 - i..].to_vec();
9811 expected_samples.sort_by(|a, b| b.0.cmp(&a.0));
9812 assert_eq!(
9813 blockstore.get_recent_perf_samples(i + 1).unwrap(),
9814 expected_samples
9815 );
9816 }
9817 }
9818
9819 #[test]
9820 fn test_write_perf_samples() {
9821 let ledger_path = get_tmp_ledger_path_auto_delete!();
9822 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
9823
9824 let num_entries: usize = 10;
9825 let mut perf_samples: Vec<(Slot, PerfSample)> = vec![];
9826 for x in 1..num_entries + 1 {
9827 let slot = x as u64 * 50;
9828 let sample = PerfSampleV2 {
9829 num_transactions: 1000 + x as u64,
9830 num_slots: 50,
9831 sample_period_secs: 20,
9832 num_non_vote_transactions: 300 + x as u64,
9833 };
9834
9835 blockstore.write_perf_sample(slot, &sample).unwrap();
9836 perf_samples.push((slot, PerfSample::V2(sample)));
9837 }
9838
9839 for x in 0..num_entries {
9840 let mut expected_samples = perf_samples[num_entries - 1 - x..].to_vec();
9841 expected_samples.sort_by(|a, b| b.0.cmp(&a.0));
9842 assert_eq!(
9843 blockstore.get_recent_perf_samples(x + 1).unwrap(),
9844 expected_samples
9845 );
9846 }
9847 }
9848
9849 #[test]
9850 fn test_lowest_slot() {
9851 let ledger_path = get_tmp_ledger_path_auto_delete!();
9852 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
9853
9854 assert_eq!(blockstore.lowest_slot(), 0);
9855
9856 for slot in 0..10 {
9857 let (shreds, _) = make_slot_entries(slot, 0, 1);
9858 blockstore.insert_shreds(shreds, None, false).unwrap();
9859 }
9860 assert_eq!(blockstore.lowest_slot(), 1);
9861 blockstore.run_purge(0, 5, PurgeType::Exact).unwrap();
9862 assert_eq!(blockstore.lowest_slot(), 6);
9863 }
9864
9865 #[test]
9866 fn test_highest_slot() {
9867 let ledger_path = get_tmp_ledger_path_auto_delete!();
9868 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
9869
9870 assert_eq!(blockstore.highest_slot().unwrap(), None);
9871
9872 for slot in 0..10 {
9873 let (shreds, _) = make_slot_entries(slot, 0, 1);
9874 blockstore.insert_shreds(shreds, None, false).unwrap();
9875 assert_eq!(blockstore.highest_slot().unwrap(), Some(slot));
9876 }
9877 blockstore.run_purge(5, 10, PurgeType::Exact).unwrap();
9878 assert_eq!(blockstore.highest_slot().unwrap(), Some(4));
9879
9880 blockstore.run_purge(0, 4, PurgeType::Exact).unwrap();
9881 assert_eq!(blockstore.highest_slot().unwrap(), None);
9882 }
9883
9884 #[test]
9885 fn test_recovery() {
9886 let ledger_path = get_tmp_ledger_path_auto_delete!();
9887 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
9888
9889 let slot = 1;
9890 let (data_shreds, coding_shreds, leader_schedule_cache) =
9891 setup_erasure_shreds(slot, 0, 100);
9892
9893 let (dummy_retransmit_sender, _) = EvictingSender::new_bounded(0);
9894 let coding_shreds = coding_shreds
9895 .into_iter()
9896 .map(|shred| (Cow::Owned(shred), false));
9897 blockstore
9898 .do_insert_shreds(
9899 coding_shreds,
9900 Some(&leader_schedule_cache),
9901 false, Some((&ReedSolomonCache::default(), &dummy_retransmit_sender)),
9903 &mut BlockstoreInsertionMetrics::default(),
9904 )
9905 .unwrap();
9906 let shred_bufs: Vec<_> = data_shreds.iter().map(Shred::payload).cloned().collect();
9907
9908 for (s, buf) in data_shreds.iter().zip(shred_bufs) {
9910 assert_eq!(
9911 blockstore
9912 .get_data_shred(s.slot(), s.index() as u64)
9913 .unwrap()
9914 .unwrap(),
9915 buf.as_ref(),
9916 );
9917 }
9918
9919 verify_index_integrity(&blockstore, slot);
9920 }
9921
9922 #[test]
9923 fn test_index_integrity() {
9924 let slot = 1;
9925 let num_entries = 100;
9926 let (data_shreds, coding_shreds, leader_schedule_cache) =
9927 setup_erasure_shreds(slot, 0, num_entries);
9928 assert!(data_shreds.len() > 3);
9929 assert!(coding_shreds.len() > 3);
9930
9931 let ledger_path = get_tmp_ledger_path_auto_delete!();
9932 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
9933
9934 let all_shreds: Vec<_> = data_shreds
9936 .iter()
9937 .cloned()
9938 .chain(coding_shreds.iter().cloned())
9939 .collect();
9940 blockstore
9941 .insert_shreds(all_shreds, Some(&leader_schedule_cache), false)
9942 .unwrap();
9943 verify_index_integrity(&blockstore, slot);
9944 blockstore.purge_and_compact_slots(0, slot);
9945
9946 blockstore
9948 .insert_shreds(coding_shreds.clone(), Some(&leader_schedule_cache), false)
9949 .unwrap();
9950 verify_index_integrity(&blockstore, slot);
9951 blockstore.purge_and_compact_slots(0, slot);
9952
9953 blockstore
9955 .insert_shreds(
9956 coding_shreds[..coding_shreds.len() - 1].to_vec(),
9957 Some(&leader_schedule_cache),
9958 false,
9959 )
9960 .unwrap();
9961 verify_index_integrity(&blockstore, slot);
9962 blockstore.purge_and_compact_slots(0, slot);
9963
9964 let shreds: Vec<_> = data_shreds[..data_shreds.len() - 1]
9966 .iter()
9967 .cloned()
9968 .chain(coding_shreds[..coding_shreds.len() - 1].iter().cloned())
9969 .collect();
9970 blockstore
9971 .insert_shreds(shreds, Some(&leader_schedule_cache), false)
9972 .unwrap();
9973 verify_index_integrity(&blockstore, slot);
9974 blockstore.purge_and_compact_slots(0, slot);
9975
9976 let shreds: Vec<_> = data_shreds[..data_shreds.len() / 2 - 1]
9978 .iter()
9979 .cloned()
9980 .chain(coding_shreds[..coding_shreds.len() / 2 - 1].iter().cloned())
9981 .collect();
9982 blockstore
9983 .insert_shreds(shreds, Some(&leader_schedule_cache), false)
9984 .unwrap();
9985 verify_index_integrity(&blockstore, slot);
9986 blockstore.purge_and_compact_slots(0, slot);
9987
9988 let shreds1: Vec<_> = data_shreds[..data_shreds.len() / 2 - 1]
9990 .iter()
9991 .cloned()
9992 .chain(coding_shreds[..coding_shreds.len() / 2 - 1].iter().cloned())
9993 .collect();
9994 let shreds2: Vec<_> = data_shreds[data_shreds.len() / 2 - 1..]
9995 .iter()
9996 .cloned()
9997 .chain(coding_shreds[coding_shreds.len() / 2 - 1..].iter().cloned())
9998 .collect();
9999 blockstore
10000 .insert_shreds(shreds1, Some(&leader_schedule_cache), false)
10001 .unwrap();
10002 blockstore
10003 .insert_shreds(shreds2, Some(&leader_schedule_cache), false)
10004 .unwrap();
10005 verify_index_integrity(&blockstore, slot);
10006 blockstore.purge_and_compact_slots(0, slot);
10007
10008 let shreds1: Vec<_> = data_shreds[..data_shreds.len() / 2 - 1]
10011 .iter()
10012 .cloned()
10013 .chain(coding_shreds[..coding_shreds.len() / 2 - 1].iter().cloned())
10014 .collect();
10015 let shreds2: Vec<_> = data_shreds[data_shreds.len() / 2 - 1..data_shreds.len() / 2]
10016 .iter()
10017 .cloned()
10018 .chain(
10019 coding_shreds[coding_shreds.len() / 2 - 1..coding_shreds.len() / 2]
10020 .iter()
10021 .cloned(),
10022 )
10023 .collect();
10024 blockstore
10025 .insert_shreds(shreds1, Some(&leader_schedule_cache), false)
10026 .unwrap();
10027 blockstore
10028 .insert_shreds(shreds2, Some(&leader_schedule_cache), false)
10029 .unwrap();
10030 verify_index_integrity(&blockstore, slot);
10031 blockstore.purge_and_compact_slots(0, slot);
10032
10033 let shreds1: Vec<_> = data_shreds[..data_shreds.len() / 2 - 2]
10036 .iter()
10037 .cloned()
10038 .chain(coding_shreds[..coding_shreds.len() / 2 - 2].iter().cloned())
10039 .collect();
10040 let shreds2: Vec<_> = data_shreds[data_shreds.len() / 2 - 2..data_shreds.len() / 2 - 1]
10041 .iter()
10042 .cloned()
10043 .chain(
10044 coding_shreds[coding_shreds.len() / 2 - 2..coding_shreds.len() / 2 - 1]
10045 .iter()
10046 .cloned(),
10047 )
10048 .collect();
10049 blockstore
10050 .insert_shreds(shreds1, Some(&leader_schedule_cache), false)
10051 .unwrap();
10052 blockstore
10053 .insert_shreds(shreds2, Some(&leader_schedule_cache), false)
10054 .unwrap();
10055 verify_index_integrity(&blockstore, slot);
10056 blockstore.purge_and_compact_slots(0, slot);
10057 }
10058
10059 fn setup_erasure_shreds(
10060 slot: u64,
10061 parent_slot: u64,
10062 num_entries: u64,
10063 ) -> (Vec<Shred>, Vec<Shred>, Arc<LeaderScheduleCache>) {
10064 setup_erasure_shreds_with_index(slot, parent_slot, num_entries, 0)
10065 }
10066
10067 fn setup_erasure_shreds_with_index(
10068 slot: u64,
10069 parent_slot: u64,
10070 num_entries: u64,
10071 fec_set_index: u32,
10072 ) -> (Vec<Shred>, Vec<Shred>, Arc<LeaderScheduleCache>) {
10073 setup_erasure_shreds_with_index_and_chained_merkle(
10074 slot,
10075 parent_slot,
10076 num_entries,
10077 fec_set_index,
10078 Hash::new_from_array(rand::thread_rng().gen()),
10079 )
10080 }
10081
10082 fn setup_erasure_shreds_with_index_and_chained_merkle(
10083 slot: u64,
10084 parent_slot: u64,
10085 num_entries: u64,
10086 fec_set_index: u32,
10087 chained_merkle_root: Hash,
10088 ) -> (Vec<Shred>, Vec<Shred>, Arc<LeaderScheduleCache>) {
10089 setup_erasure_shreds_with_index_and_chained_merkle_and_last_in_slot(
10090 slot,
10091 parent_slot,
10092 num_entries,
10093 fec_set_index,
10094 chained_merkle_root,
10095 true,
10096 )
10097 }
10098
10099 fn setup_erasure_shreds_with_index_and_chained_merkle_and_last_in_slot(
10100 slot: u64,
10101 parent_slot: u64,
10102 num_entries: u64,
10103 fec_set_index: u32,
10104 chained_merkle_root: Hash,
10105 is_last_in_slot: bool,
10106 ) -> (Vec<Shred>, Vec<Shred>, Arc<LeaderScheduleCache>) {
10107 let entries = make_slot_entries_with_transactions(num_entries);
10108 let leader_keypair = Arc::new(Keypair::new());
10109 let shredder = Shredder::new(slot, parent_slot, 0, 0).unwrap();
10110 let (data_shreds, coding_shreds) = shredder.entries_to_merkle_shreds_for_tests(
10111 &leader_keypair,
10112 &entries,
10113 is_last_in_slot,
10114 chained_merkle_root,
10115 fec_set_index, fec_set_index, &ReedSolomonCache::default(),
10118 &mut ProcessShredsStats::default(),
10119 );
10120
10121 let genesis_config = create_genesis_config(2).genesis_config;
10122 let bank = Arc::new(Bank::new_for_tests(&genesis_config));
10123 let mut leader_schedule_cache = LeaderScheduleCache::new_from_bank(&bank);
10124 let fixed_schedule = FixedSchedule {
10125 leader_schedule: Arc::new(Box::new(IdentityKeyedLeaderSchedule::new_from_schedule(
10126 vec![leader_keypair.pubkey()],
10127 ))),
10128 };
10129 leader_schedule_cache.set_fixed_leader_schedule(Some(fixed_schedule));
10130
10131 (data_shreds, coding_shreds, Arc::new(leader_schedule_cache))
10132 }
10133
10134 fn verify_index_integrity(blockstore: &Blockstore, slot: u64) {
10135 let shred_index = blockstore.get_index(slot).unwrap().unwrap();
10136
10137 let data_iter = blockstore.slot_data_iterator(slot, 0).unwrap();
10138 let mut num_data = 0;
10139 for ((slot, index), _) in data_iter {
10140 num_data += 1;
10141 assert!(blockstore.get_data_shred(slot, index).unwrap().is_some());
10143 assert!(shred_index.data().contains(index));
10145 }
10146
10147 let num_data_in_index = shred_index.data().num_shreds();
10149 assert_eq!(num_data_in_index, num_data);
10150
10151 let coding_iter = blockstore.slot_coding_iterator(slot, 0).unwrap();
10152 let mut num_coding = 0;
10153 for ((slot, index), _) in coding_iter {
10154 num_coding += 1;
10155 assert!(blockstore.get_coding_shred(slot, index).unwrap().is_some());
10157 assert!(shred_index.coding().contains(index));
10159 }
10160
10161 let num_coding_in_index = shred_index.coding().num_shreds();
10163 assert_eq!(num_coding_in_index, num_coding);
10164 }
10165
10166 #[test]
10167 fn test_duplicate_slot() {
10168 let slot = 0;
10169 let entries1 = make_slot_entries_with_transactions(1);
10170 let entries2 = make_slot_entries_with_transactions(1);
10171 let leader_keypair = Arc::new(Keypair::new());
10172 let reed_solomon_cache = ReedSolomonCache::default();
10173 let shredder = Shredder::new(slot, 0, 0, 0).unwrap();
10174 let merkle_root = Hash::new_from_array(rand::thread_rng().gen());
10175 let (shreds, _) = shredder.entries_to_merkle_shreds_for_tests(
10176 &leader_keypair,
10177 &entries1,
10178 true, merkle_root,
10180 0, 0, &reed_solomon_cache,
10183 &mut ProcessShredsStats::default(),
10184 );
10185 let (duplicate_shreds, _) = shredder.entries_to_merkle_shreds_for_tests(
10186 &leader_keypair,
10187 &entries2,
10188 true, merkle_root,
10190 0, 0, &reed_solomon_cache,
10193 &mut ProcessShredsStats::default(),
10194 );
10195 let shred = shreds[0].clone();
10196 let duplicate_shred = duplicate_shreds[0].clone();
10197 let non_duplicate_shred = shred.clone();
10198
10199 let ledger_path = get_tmp_ledger_path_auto_delete!();
10200 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
10201
10202 blockstore
10203 .insert_shreds(vec![shred.clone()], None, false)
10204 .unwrap();
10205
10206 assert!(!blockstore.has_duplicate_shreds_in_slot(slot));
10208
10209 assert_eq!(
10211 blockstore.is_shred_duplicate(&duplicate_shred).as_deref(),
10212 Some(shred.payload().as_ref())
10213 );
10214 assert!(blockstore
10215 .is_shred_duplicate(&non_duplicate_shred)
10216 .is_none());
10217
10218 blockstore
10220 .store_duplicate_slot(
10221 slot,
10222 shred.payload().clone(),
10223 duplicate_shred.payload().clone(),
10224 )
10225 .unwrap();
10226
10227 assert!(blockstore.has_duplicate_shreds_in_slot(slot));
10229
10230 let duplicate_proof = blockstore.get_duplicate_slot(slot).unwrap();
10232 assert_eq!(duplicate_proof.shred1, *shred.payload());
10233 assert_eq!(duplicate_proof.shred2, *duplicate_shred.payload());
10234 }
10235
10236 #[test]
10237 fn test_clear_unconfirmed_slot() {
10238 let ledger_path = get_tmp_ledger_path_auto_delete!();
10239 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
10240
10241 let unconfirmed_slot = 9;
10242 let unconfirmed_child_slot = 10;
10243 let slots = vec![2, unconfirmed_slot, unconfirmed_child_slot];
10244
10245 let shreds: Vec<_> = make_chaining_slot_entries(&slots, 1, 0)
10247 .into_iter()
10248 .flat_map(|x| x.0)
10249 .collect();
10250 blockstore.insert_shreds(shreds, None, false).unwrap();
10251 for index in 0..32 {
10253 assert_matches!(
10254 blockstore.get_data_shred(unconfirmed_slot, index as u64),
10255 Ok(Some(_))
10256 );
10257 }
10258 blockstore.set_dead_slot(unconfirmed_slot).unwrap();
10259
10260 blockstore.clear_unconfirmed_slot(unconfirmed_slot);
10262 assert!(!blockstore.is_dead(unconfirmed_slot));
10263 assert_eq!(
10264 blockstore
10265 .meta(unconfirmed_slot)
10266 .unwrap()
10267 .unwrap()
10268 .next_slots,
10269 vec![unconfirmed_child_slot]
10270 );
10271 assert!(blockstore
10272 .get_data_shred(unconfirmed_slot, 0)
10273 .unwrap()
10274 .is_none());
10275 }
10276
10277 #[test]
10278 fn test_clear_unconfirmed_slot_and_insert_again() {
10279 let ledger_path = get_tmp_ledger_path_auto_delete!();
10280 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
10281
10282 let confirmed_slot = 7;
10283 let unconfirmed_slot = 8;
10284 let slots = vec![confirmed_slot, unconfirmed_slot];
10285
10286 let shreds: Vec<_> = make_chaining_slot_entries(&slots, 1, 0)
10287 .into_iter()
10288 .flat_map(|x| x.0)
10289 .collect();
10290 assert_eq!(shreds.len(), 2 * 32);
10291
10292 let unconfirmed_slot_shreds = vec![shreds[32].clone()];
10294 assert_eq!(unconfirmed_slot_shreds[0].slot(), unconfirmed_slot);
10295
10296 blockstore.insert_shreds(shreds, None, false).unwrap();
10298
10299 blockstore.clear_unconfirmed_slot(unconfirmed_slot);
10301 assert!(!blockstore.is_dead(unconfirmed_slot));
10302 assert!(blockstore
10303 .get_data_shred(unconfirmed_slot, 0)
10304 .unwrap()
10305 .is_none());
10306
10307 blockstore
10310 .insert_shreds(unconfirmed_slot_shreds, None, false)
10311 .unwrap();
10312 assert_eq!(
10313 blockstore.meta(confirmed_slot).unwrap().unwrap().next_slots,
10314 vec![unconfirmed_slot]
10315 );
10316 }
10317
10318 #[test]
10319 fn test_update_completed_data_indexes() {
10320 let mut completed_data_indexes = CompletedDataIndexes::default();
10321 let mut shred_index = ShredIndex::default();
10322
10323 for i in 0..10 {
10324 shred_index.insert(i as u64);
10325 assert!(update_completed_data_indexes(
10326 true,
10327 i,
10328 &shred_index,
10329 &mut completed_data_indexes
10330 )
10331 .eq(std::iter::once(i..i + 1)));
10332 assert!(completed_data_indexes.clone().into_iter().eq(0..=i));
10333 }
10334 }
10335
10336 #[test]
10337 fn test_update_completed_data_indexes_out_of_order() {
10338 let mut completed_data_indexes = CompletedDataIndexes::default();
10339 let mut shred_index = ShredIndex::default();
10340
10341 shred_index.insert(4);
10342 assert!(
10343 update_completed_data_indexes(false, 4, &shred_index, &mut completed_data_indexes)
10344 .eq([])
10345 );
10346 assert!(completed_data_indexes.is_empty());
10347
10348 shred_index.insert(2);
10349 assert!(
10350 update_completed_data_indexes(false, 2, &shred_index, &mut completed_data_indexes)
10351 .eq([])
10352 );
10353 assert!(completed_data_indexes.is_empty());
10354
10355 shred_index.insert(3);
10356 assert!(
10357 update_completed_data_indexes(true, 3, &shred_index, &mut completed_data_indexes)
10358 .eq([])
10359 );
10360 assert!(completed_data_indexes.clone().into_iter().eq([3]));
10361
10362 shred_index.insert(1);
10365 assert!(
10366 update_completed_data_indexes(true, 1, &shred_index, &mut completed_data_indexes)
10367 .eq(std::iter::once(2..4))
10368 );
10369 assert!(completed_data_indexes.clone().into_iter().eq([1, 3]));
10370
10371 shred_index.insert(0);
10374 assert!(
10375 update_completed_data_indexes(true, 0, &shred_index, &mut completed_data_indexes)
10376 .eq([0..1, 1..2])
10377 );
10378 assert!(completed_data_indexes.clone().into_iter().eq([0, 1, 3]));
10379 }
10380
10381 #[test]
10382 fn test_rewards_protobuf_backward_compatibility() {
10383 let ledger_path = get_tmp_ledger_path_auto_delete!();
10384 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
10385
10386 let rewards: Rewards = (0..100)
10387 .map(|i| Reward {
10388 pubkey: solana_pubkey::new_rand().to_string(),
10389 lamports: 42 + i,
10390 post_balance: u64::MAX,
10391 reward_type: Some(RewardType::Fee),
10392 commission: None,
10393 })
10394 .collect();
10395 let protobuf_rewards: generated::Rewards = rewards.into();
10396
10397 let deprecated_rewards: StoredExtendedRewards = protobuf_rewards.clone().into();
10398 for slot in 0..2 {
10399 let data = serialize(&deprecated_rewards).unwrap();
10400 blockstore.rewards_cf.put_bytes(slot, &data).unwrap();
10401 }
10402 for slot in 2..4 {
10403 blockstore
10404 .rewards_cf
10405 .put_protobuf(slot, &protobuf_rewards)
10406 .unwrap();
10407 }
10408 for slot in 0..4 {
10409 assert_eq!(
10410 blockstore
10411 .rewards_cf
10412 .get_protobuf_or_bincode::<StoredExtendedRewards>(slot)
10413 .unwrap()
10414 .unwrap(),
10415 protobuf_rewards
10416 );
10417 }
10418 }
10419
10420 #[test]
10425 fn test_transaction_status_protobuf_backward_compatibility() {
10426 let ledger_path = get_tmp_ledger_path_auto_delete!();
10427 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
10428
10429 let status = TransactionStatusMeta {
10430 status: Ok(()),
10431 fee: 42,
10432 pre_balances: vec![1, 2, 3],
10433 post_balances: vec![1, 2, 3],
10434 inner_instructions: Some(vec![]),
10435 log_messages: Some(vec![]),
10436 pre_token_balances: Some(vec![TransactionTokenBalance {
10437 account_index: 0,
10438 mint: Pubkey::new_unique().to_string(),
10439 ui_token_amount: UiTokenAmount {
10440 ui_amount: Some(1.1),
10441 decimals: 1,
10442 amount: "11".to_string(),
10443 ui_amount_string: "1.1".to_string(),
10444 },
10445 owner: Pubkey::new_unique().to_string(),
10446 program_id: Pubkey::new_unique().to_string(),
10447 }]),
10448 post_token_balances: Some(vec![TransactionTokenBalance {
10449 account_index: 0,
10450 mint: Pubkey::new_unique().to_string(),
10451 ui_token_amount: UiTokenAmount {
10452 ui_amount: None,
10453 decimals: 1,
10454 amount: "11".to_string(),
10455 ui_amount_string: "1.1".to_string(),
10456 },
10457 owner: Pubkey::new_unique().to_string(),
10458 program_id: Pubkey::new_unique().to_string(),
10459 }]),
10460 rewards: Some(vec![Reward {
10461 pubkey: "My11111111111111111111111111111111111111111".to_string(),
10462 lamports: -42,
10463 post_balance: 42,
10464 reward_type: Some(RewardType::Rent),
10465 commission: None,
10466 }]),
10467 loaded_addresses: LoadedAddresses::default(),
10468 return_data: Some(TransactionReturnData {
10469 program_id: Pubkey::new_unique(),
10470 data: vec![1, 2, 3],
10471 }),
10472 compute_units_consumed: Some(23456),
10473 cost_units: Some(5678),
10474 };
10475 let deprecated_status: StoredTransactionStatusMeta = status.clone().try_into().unwrap();
10476 let protobuf_status: generated::TransactionStatusMeta = status.into();
10477
10478 for slot in 0..2 {
10479 let data = serialize(&deprecated_status).unwrap();
10480 blockstore
10481 .transaction_status_cf
10482 .put_bytes((Signature::default(), slot), &data)
10483 .unwrap();
10484 }
10485 for slot in 2..4 {
10486 blockstore
10487 .transaction_status_cf
10488 .put_protobuf((Signature::default(), slot), &protobuf_status)
10489 .unwrap();
10490 }
10491 for slot in 0..4 {
10492 assert_eq!(
10493 blockstore
10494 .transaction_status_cf
10495 .get_protobuf_or_bincode::<StoredTransactionStatusMeta>((
10496 Signature::default(),
10497 slot
10498 ))
10499 .unwrap()
10500 .unwrap(),
10501 protobuf_status
10502 );
10503 }
10504 }
10505
10506 fn make_large_tx_entry(num_txs: usize) -> Entry {
10507 let txs: Vec<_> = (0..num_txs)
10508 .map(|_| {
10509 let keypair0 = Keypair::new();
10510 let to = solana_pubkey::new_rand();
10511 solana_system_transaction::transfer(&keypair0, &to, 1, Hash::default())
10512 })
10513 .collect();
10514
10515 Entry::new(&Hash::default(), 1, txs)
10516 }
10517
10518 #[test]
10519 fn erasure_multiple_config() {
10523 agave_logger::setup();
10524 let slot = 1;
10525 let num_txs = 20;
10526 let entries = [make_large_tx_entry(num_txs)];
10528 let entries2 = [make_large_tx_entry(num_txs)];
10530
10531 let version = version_from_hash(&entries[0].hash);
10532 let shredder = Shredder::new(slot, 0, 0, version).unwrap();
10533 let reed_solomon_cache = ReedSolomonCache::default();
10534 let merkle_root = Hash::new_from_array(rand::thread_rng().gen());
10535 let kp = Keypair::new();
10536 let (data1, coding1) = shredder.entries_to_merkle_shreds_for_tests(
10538 &kp,
10539 &entries,
10540 true, merkle_root,
10542 0, 0, &reed_solomon_cache,
10545 &mut ProcessShredsStats::default(),
10546 );
10547 let (_data2, coding2) = shredder.entries_to_merkle_shreds_for_tests(
10551 &kp,
10552 &entries2,
10553 true, merkle_root,
10555 0, 1, &reed_solomon_cache,
10558 &mut ProcessShredsStats::default(),
10559 );
10560
10561 let ledger_path = get_tmp_ledger_path_auto_delete!();
10562 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
10563
10564 for shred in &data1 {
10565 info!("shred {:?}", shred.id());
10566 }
10567 for shred in &coding1 {
10568 info!("coding1 {:?}", shred.id());
10569 }
10570 for shred in &coding2 {
10571 info!("coding2 {:?}", shred.id());
10572 }
10573 blockstore
10575 .insert_shreds(data1[..data1.len() - 2].to_vec(), None, false)
10576 .unwrap();
10577 blockstore
10579 .insert_shreds(vec![coding1[0].clone(), coding2[1].clone()], None, false)
10580 .unwrap();
10581 assert!(blockstore.has_duplicate_shreds_in_slot(slot));
10582 }
10583
10584 #[test]
10585 fn test_insert_data_shreds_same_slot_last_index() {
10586 let ledger_path = get_tmp_ledger_path_auto_delete!();
10587 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
10588
10589 let num_unique_entries = max_ticks_per_n_shreds(1, None) + 1;
10591 let (mut original_shreds, original_entries) = make_slot_entries(0, 0, num_unique_entries);
10592 let mut duplicate_shreds = original_shreds.clone();
10593 for shred in &mut duplicate_shreds {
10595 shred.sign(&Keypair::new());
10596 }
10597 assert!(original_shreds.len() > 1);
10599 let last_index = original_shreds.last().unwrap().index() as u64;
10600 original_shreds.remove(0);
10601
10602 for _ in 0..10 {
10605 blockstore
10606 .insert_shreds(original_shreds.clone(), None, false)
10607 .unwrap();
10608 let meta = blockstore.meta(0).unwrap().unwrap();
10609 assert!(!blockstore.is_dead(0));
10610 assert_eq!(blockstore.get_slot_entries(0, 0).unwrap(), vec![]);
10611 assert_eq!(meta.consumed, 0);
10612 assert_eq!(meta.received, last_index + 1);
10613 assert_eq!(meta.parent_slot, Some(0));
10614 assert_eq!(meta.last_index, Some(last_index));
10615 assert!(!blockstore.is_full(0));
10616 }
10617
10618 let num_shreds = duplicate_shreds.len() as u64;
10619 blockstore
10620 .insert_shreds(duplicate_shreds, None, false)
10621 .unwrap();
10622
10623 assert_eq!(blockstore.get_slot_entries(0, 0).unwrap(), original_entries);
10624
10625 let meta = blockstore.meta(0).unwrap().unwrap();
10626 assert_eq!(meta.consumed, num_shreds);
10627 assert_eq!(meta.received, num_shreds);
10628 assert_eq!(meta.parent_slot, Some(0));
10629 assert_eq!(meta.last_index, Some(num_shreds - 1));
10630 assert!(blockstore.is_full(0));
10631 assert!(!blockstore.is_dead(0));
10632 }
10633
10634 #[allow(clippy::type_complexity)]
10638 fn setup_duplicate_last_in_slot(
10639 slot: Slot,
10640 ) -> ((Vec<Shred>, Vec<Shred>), (Vec<Shred>, Vec<Shred>)) {
10641 let entries = make_slot_entries_with_transactions(1);
10642 let leader_keypair = Arc::new(Keypair::new());
10643 let reed_solomon_cache = ReedSolomonCache::default();
10644 let shredder = Shredder::new(slot, 0, 0, 0).unwrap();
10645 let (shreds1, code1): (Vec<Shred>, Vec<Shred>) = shredder
10646 .make_merkle_shreds_from_entries(
10647 &leader_keypair,
10648 &entries,
10649 true, Hash::new_unique(), 0, 0, &reed_solomon_cache,
10654 &mut ProcessShredsStats::default(),
10655 )
10656 .partition(Shred::is_data);
10657 let last_data1 = shreds1.last().unwrap();
10658 let last_code1 = code1.last().unwrap();
10659
10660 let (shreds2, code2) = shredder
10661 .make_merkle_shreds_from_entries(
10662 &leader_keypair,
10663 &entries,
10664 true, last_data1.chained_merkle_root().unwrap(),
10666 last_data1.index() + 1, last_code1.index() + 1, &reed_solomon_cache,
10669 &mut ProcessShredsStats::default(),
10670 )
10671 .partition(Shred::is_data);
10672 ((shreds1, code1), (shreds2, code2))
10673 }
10674
10675 #[test]
10676 fn test_duplicate_last_index() {
10677 let slot = 1;
10678 let ((shreds1, _code1), (shreds2, _code2)) = setup_duplicate_last_in_slot(slot);
10679
10680 let last_data1 = shreds1.last().unwrap();
10681 let last_data2 = shreds2.last().unwrap();
10682 let ledger_path = get_tmp_ledger_path_auto_delete!();
10683 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
10684
10685 blockstore
10686 .insert_shreds(vec![last_data1.clone(), last_data2.clone()], None, false)
10687 .unwrap();
10688
10689 assert!(blockstore.get_duplicate_slot(slot).is_some());
10690 }
10691
10692 #[test]
10693 fn test_duplicate_last_index_mark_dead() {
10694 let num_shreds = 10;
10695 let smaller_last_shred_index = 31;
10696 let larger_last_shred_index = 8;
10697
10698 let setup_test_shreds = |slot: Slot| -> Vec<Shred> {
10699 let ((mut shreds1, _code1), (mut shreds2, _code2)) = setup_duplicate_last_in_slot(slot);
10700 shreds1.append(&mut shreds2);
10701 shreds1
10702 };
10703
10704 let get_expected_slot_meta_and_index_meta =
10705 |blockstore: &Blockstore, shreds: Vec<Shred>| -> (SlotMeta, Index) {
10706 let slot = shreds[0].slot();
10707 blockstore
10708 .insert_shreds(shreds.clone(), None, false)
10709 .unwrap();
10710 let meta = blockstore.meta(slot).unwrap().unwrap();
10711 assert_eq!(meta.consumed, shreds.len() as u64);
10712 let shreds_index = blockstore.get_index(slot).unwrap().unwrap();
10713 for i in 0..shreds.len() as u64 {
10714 assert!(shreds_index.data().contains(i));
10715 }
10716
10717 blockstore
10719 .run_purge(slot, slot, PurgeType::Exact)
10720 .expect("Purge database operations failed");
10721 assert!(blockstore.meta(slot).unwrap().is_none());
10722
10723 (meta, shreds_index)
10724 };
10725
10726 let ledger_path = get_tmp_ledger_path_auto_delete!();
10727 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
10728
10729 let mut slot = 0;
10730 let shreds = setup_test_shreds(slot);
10731
10732 let (expected_slot_meta, expected_index) = get_expected_slot_meta_and_index_meta(
10737 &blockstore,
10738 shreds[..=smaller_last_shred_index].to_vec(),
10739 );
10740 blockstore
10741 .insert_shreds(shreds.clone(), None, false)
10742 .unwrap();
10743 assert!(blockstore.get_duplicate_slot(slot).is_some());
10744 assert!(!blockstore.is_dead(slot));
10745 for i in 0..num_shreds {
10746 if i <= smaller_last_shred_index as u64 {
10747 assert_eq!(
10748 blockstore.get_data_shred(slot, i).unwrap().unwrap(),
10749 shreds[i as usize].payload().as_ref(),
10750 );
10751 } else {
10752 assert!(blockstore.get_data_shred(slot, i).unwrap().is_none());
10753 }
10754 }
10755 let mut meta = blockstore.meta(slot).unwrap().unwrap();
10756 meta.first_shred_timestamp = expected_slot_meta.first_shred_timestamp;
10757 assert_eq!(meta, expected_slot_meta);
10758 assert_eq!(blockstore.get_index(slot).unwrap().unwrap(), expected_index);
10759
10760 slot += 1;
10763 let mut shreds = setup_test_shreds(slot);
10764 shreds.reverse();
10765 blockstore
10766 .insert_shreds(shreds.clone(), None, false)
10767 .unwrap();
10768 assert!(blockstore.is_dead(slot));
10769 for i in 0..num_shreds {
10775 let shred_to_check = &shreds[i as usize];
10776 let shred_index = shred_to_check.index() as u64;
10777 if shred_index != smaller_last_shred_index as u64
10778 && shred_index != larger_last_shred_index as u64
10779 {
10780 assert_eq!(
10781 blockstore
10782 .get_data_shred(slot, shred_index)
10783 .unwrap()
10784 .unwrap(),
10785 shred_to_check.payload().as_ref(),
10786 );
10787 } else {
10788 assert!(blockstore
10789 .get_data_shred(slot, shred_index)
10790 .unwrap()
10791 .is_none());
10792 }
10793 }
10794
10795 slot += 1;
10798 let mut shreds = setup_test_shreds(slot);
10799 shreds.reverse();
10800 for shred in shreds.clone() {
10801 blockstore.insert_shreds(vec![shred], None, false).unwrap();
10802 }
10803 assert!(blockstore.is_dead(slot));
10804 for i in 0..num_shreds {
10806 let shred_to_check = &shreds[i as usize];
10807 let shred_index = shred_to_check.index() as u64;
10808 if shred_index != smaller_last_shred_index as u64
10809 && shred_index != larger_last_shred_index as u64
10810 {
10811 assert_eq!(
10812 blockstore
10813 .get_data_shred(slot, shred_index)
10814 .unwrap()
10815 .unwrap(),
10816 shred_to_check.payload().as_ref(),
10817 );
10818 } else {
10819 assert!(blockstore
10820 .get_data_shred(slot, shred_index)
10821 .unwrap()
10822 .is_none());
10823 }
10824 }
10825 }
10826
10827 #[test]
10828 fn test_get_slot_entries_dead_slot_race() {
10829 let ledger_path = get_tmp_ledger_path_auto_delete!();
10830 {
10831 let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap());
10832 let (slot_sender, slot_receiver) = unbounded();
10833 let (shred_sender, shred_receiver) = unbounded::<Vec<Shred>>();
10834 let (signal_sender, signal_receiver) = unbounded();
10835
10836 std::thread::scope(|scope| {
10837 scope.spawn(|| {
10838 while let Ok(slot) = slot_receiver.recv() {
10839 match blockstore.get_slot_entries_with_shred_info(slot, 0, false) {
10840 Ok((_entries, _num_shreds, is_full)) => {
10841 if is_full {
10842 signal_sender
10843 .send(Err(IoError::other(
10844 "got full slot entries for dead slot",
10845 )))
10846 .unwrap();
10847 }
10848 }
10849 Err(err) => {
10850 assert_matches!(err, BlockstoreError::DeadSlot);
10851 }
10852 }
10853 signal_sender.send(Ok(())).unwrap();
10854 }
10855 });
10856
10857 scope.spawn(|| {
10858 while let Ok(shreds) = shred_receiver.recv() {
10859 let slot = shreds[0].slot();
10860 #[allow(clippy::readonly_write_lock)]
10864 let _lowest_cleanup_slot = blockstore.lowest_cleanup_slot.write().unwrap();
10867 blockstore.insert_shreds(shreds, None, false).unwrap();
10868 assert!(blockstore.get_duplicate_slot(slot).is_some());
10869 assert!(blockstore.is_dead(slot));
10870 signal_sender.send(Ok(())).unwrap();
10871 }
10872 });
10873
10874 for slot in 0..100 {
10875 let ((mut shreds1, _), (mut shreds2, _)) = setup_duplicate_last_in_slot(slot);
10876 shreds2.append(&mut shreds1);
10879 slot_sender.send(slot).unwrap();
10881 shred_sender.send(shreds2).unwrap();
10882
10883 for _ in 1..=2 {
10885 let res = signal_receiver.recv().unwrap();
10886 assert!(res.is_ok(), "race condition: {res:?}");
10887 }
10888 }
10889
10890 drop(slot_sender);
10891 drop(shred_sender);
10892 });
10893 }
10894 }
10895
10896 #[test]
10897 fn test_previous_erasure_set() {
10898 let ledger_path = get_tmp_ledger_path_auto_delete!();
10899 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
10900 let mut erasure_metas = BTreeMap::new();
10901
10902 let parent_slot = 0;
10903 let prev_slot = 1;
10904 let slot = 2;
10905 let (data_shreds_0, coding_shreds_0, _) =
10906 setup_erasure_shreds_with_index(slot, parent_slot, 10, 0);
10907 let erasure_set_0 = ErasureSetId::new(slot, 0);
10908 let erasure_meta_0 =
10909 ErasureMeta::from_coding_shred(coding_shreds_0.first().unwrap()).unwrap();
10910
10911 let prev_fec_set_index = data_shreds_0.len() as u32;
10912 let (data_shreds_prev, coding_shreds_prev, _) =
10913 setup_erasure_shreds_with_index(slot, parent_slot, 10, prev_fec_set_index);
10914 let erasure_set_prev = ErasureSetId::new(slot, prev_fec_set_index);
10915 let erasure_meta_prev =
10916 ErasureMeta::from_coding_shred(coding_shreds_prev.first().unwrap()).unwrap();
10917
10918 let (_, coding_shreds_prev_slot, _) =
10919 setup_erasure_shreds_with_index(prev_slot, parent_slot, 10, prev_fec_set_index);
10920 let erasure_set_prev_slot = ErasureSetId::new(prev_slot, prev_fec_set_index);
10921 let erasure_meta_prev_slot =
10922 ErasureMeta::from_coding_shred(coding_shreds_prev_slot.first().unwrap()).unwrap();
10923
10924 let fec_set_index = data_shreds_prev.len() as u32 + prev_fec_set_index;
10925 let erasure_set = ErasureSetId::new(slot, fec_set_index);
10926
10927 assert_eq!(
10929 blockstore
10930 .previous_erasure_set(erasure_set, &erasure_metas)
10931 .unwrap(),
10932 None
10933 );
10934
10935 erasure_metas.insert(erasure_set_0, WorkingEntry::Dirty(erasure_meta_0));
10937 assert_eq!(
10938 blockstore
10939 .previous_erasure_set(erasure_set, &erasure_metas)
10940 .unwrap(),
10941 None
10942 );
10943
10944 erasure_metas.insert(erasure_set_0, WorkingEntry::Clean(erasure_meta_0));
10946 blockstore
10947 .put_erasure_meta(erasure_set_0, &erasure_meta_0)
10948 .unwrap();
10949 assert_eq!(
10950 blockstore
10951 .previous_erasure_set(erasure_set, &erasure_metas)
10952 .unwrap(),
10953 None
10954 );
10955
10956 erasure_metas.insert(erasure_set_prev, WorkingEntry::Dirty(erasure_meta_prev));
10958 assert_eq!(
10959 blockstore
10960 .previous_erasure_set(erasure_set, &erasure_metas)
10961 .unwrap()
10962 .map(|(erasure_set, erasure_meta)| (erasure_set, erasure_meta.into_owned())),
10963 Some((erasure_set_prev, erasure_meta_prev))
10964 );
10965
10966 erasure_metas.remove(&erasure_set_prev);
10968 blockstore
10969 .put_erasure_meta(erasure_set_prev, &erasure_meta_prev)
10970 .unwrap();
10971 assert_eq!(
10972 blockstore
10973 .previous_erasure_set(erasure_set, &erasure_metas)
10974 .unwrap()
10975 .map(|(erasure_set, erasure_meta)| (erasure_set, erasure_meta.into_owned())),
10976 Some((erasure_set_prev, erasure_meta_prev))
10977 );
10978
10979 erasure_metas.insert(erasure_set_prev, WorkingEntry::Clean(erasure_meta_prev));
10981 assert_eq!(
10982 blockstore
10983 .previous_erasure_set(erasure_set, &erasure_metas)
10984 .unwrap()
10985 .map(|(erasure_set, erasure_meta)| (erasure_set, erasure_meta.into_owned())),
10986 Some((erasure_set_prev, erasure_meta_prev))
10987 );
10988
10989 assert_eq!(
10991 blockstore
10992 .previous_erasure_set(erasure_set_prev, &erasure_metas)
10993 .unwrap()
10994 .map(|(erasure_set, erasure_meta)| (erasure_set, erasure_meta.into_owned())),
10995 Some((erasure_set_0, erasure_meta_0))
10996 );
10997 erasure_metas.remove(&erasure_set_0);
10998 assert_eq!(
10999 blockstore
11000 .previous_erasure_set(erasure_set_prev, &erasure_metas)
11001 .unwrap()
11002 .map(|(erasure_set, erasure_meta)| (erasure_set, erasure_meta.into_owned())),
11003 Some((erasure_set_0, erasure_meta_0))
11004 );
11005
11006 let ledger_path = get_tmp_ledger_path_auto_delete!();
11008 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11009 erasure_metas.clear();
11010 erasure_metas.insert(
11011 erasure_set_prev_slot,
11012 WorkingEntry::Dirty(erasure_meta_prev_slot),
11013 );
11014 assert_eq!(
11015 erasure_meta_prev_slot.next_fec_set_index().unwrap(),
11016 fec_set_index
11017 );
11018 assert_eq!(
11019 blockstore
11020 .previous_erasure_set(erasure_set, &erasure_metas)
11021 .unwrap(),
11022 None,
11023 );
11024 erasure_metas.insert(
11025 erasure_set_prev_slot,
11026 WorkingEntry::Clean(erasure_meta_prev_slot),
11027 );
11028 blockstore
11029 .put_erasure_meta(erasure_set_prev_slot, &erasure_meta_prev_slot)
11030 .unwrap();
11031 assert_eq!(
11032 blockstore
11033 .previous_erasure_set(erasure_set, &erasure_metas)
11034 .unwrap(),
11035 None,
11036 );
11037 }
11038
11039 #[test]
11040 fn test_chained_merkle_root_consistency_backwards() {
11041 let ledger_path = get_tmp_ledger_path_auto_delete!();
11043 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11044
11045 let parent_slot = 0;
11046 let slot = 1;
11047 let fec_set_index = 0;
11048 let (data_shreds, coding_shreds, leader_schedule) =
11049 setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index);
11050 let coding_shred = coding_shreds[0].clone();
11051 let next_fec_set_index = fec_set_index + data_shreds.len() as u32;
11052
11053 assert!(blockstore
11054 .insert_shred_return_duplicate(coding_shred.clone(), &leader_schedule,)
11055 .is_empty());
11056
11057 let merkle_root = coding_shred.merkle_root().unwrap();
11058
11059 let (data_shreds, coding_shreds, _) = setup_erasure_shreds_with_index_and_chained_merkle(
11061 slot,
11062 parent_slot,
11063 10,
11064 next_fec_set_index,
11065 merkle_root,
11066 );
11067 let data_shred = data_shreds[0].clone();
11068 let coding_shred = coding_shreds[0].clone();
11069 assert!(blockstore
11070 .insert_shred_return_duplicate(coding_shred, &leader_schedule,)
11071 .is_empty());
11072 assert!(blockstore
11073 .insert_shred_return_duplicate(data_shred, &leader_schedule,)
11074 .is_empty());
11075 }
11076
11077 #[test]
11078 fn test_chained_merkle_root_consistency_forwards() {
11079 let ledger_path = get_tmp_ledger_path_auto_delete!();
11081 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11082
11083 let parent_slot = 0;
11084 let slot = 1;
11085 let fec_set_index = 0;
11086 let (data_shreds, coding_shreds, leader_schedule) =
11087 setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index);
11088 let coding_shred = coding_shreds[0].clone();
11089 let next_fec_set_index = fec_set_index + data_shreds.len() as u32;
11090
11091 let merkle_root = coding_shred.merkle_root().unwrap();
11093 let (_, next_coding_shreds, _) = setup_erasure_shreds_with_index_and_chained_merkle(
11094 slot,
11095 parent_slot,
11096 10,
11097 next_fec_set_index,
11098 merkle_root,
11099 );
11100 let next_coding_shred = next_coding_shreds[0].clone();
11101
11102 assert!(blockstore
11103 .insert_shred_return_duplicate(next_coding_shred, &leader_schedule,)
11104 .is_empty());
11105
11106 assert!(blockstore
11108 .insert_shred_return_duplicate(coding_shred, &leader_schedule,)
11109 .is_empty());
11110 }
11111
11112 #[test]
11113 fn test_chained_merkle_root_across_slots_backwards() {
11114 let ledger_path = get_tmp_ledger_path_auto_delete!();
11115 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11116
11117 let parent_slot = 0;
11118 let slot = 1;
11119 let fec_set_index = 0;
11120 let (data_shreds, _, leader_schedule) =
11121 setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index);
11122 let data_shred = data_shreds[0].clone();
11123
11124 assert!(blockstore
11125 .insert_shred_return_duplicate(data_shred.clone(), &leader_schedule,)
11126 .is_empty());
11127
11128 let merkle_root = Hash::new_unique();
11130 assert!(merkle_root != data_shred.merkle_root().unwrap());
11131 let (next_slot_data_shreds, next_slot_coding_shreds, leader_schedule) =
11132 setup_erasure_shreds_with_index_and_chained_merkle(
11133 slot + 1,
11134 slot,
11135 10,
11136 fec_set_index,
11137 merkle_root,
11138 );
11139 let next_slot_data_shred = next_slot_data_shreds[0].clone();
11140 let next_slot_coding_shred = next_slot_coding_shreds[0].clone();
11141 assert!(blockstore
11142 .insert_shred_return_duplicate(next_slot_coding_shred, &leader_schedule,)
11143 .is_empty());
11144 assert!(blockstore
11145 .insert_shred_return_duplicate(next_slot_data_shred, &leader_schedule)
11146 .is_empty());
11147 }
11148
11149 #[test]
11150 fn test_chained_merkle_root_across_slots_forwards() {
11151 let ledger_path = get_tmp_ledger_path_auto_delete!();
11152 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11153
11154 let parent_slot = 0;
11155 let slot = 1;
11156 let fec_set_index = 0;
11157 let (_, coding_shreds, _) =
11158 setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index);
11159 let coding_shred = coding_shreds[0].clone();
11160
11161 let merkle_root = Hash::new_unique();
11163 assert!(merkle_root != coding_shred.merkle_root().unwrap());
11164 let (next_slot_data_shreds, _, leader_schedule) =
11165 setup_erasure_shreds_with_index_and_chained_merkle(
11166 slot + 1,
11167 slot,
11168 10,
11169 fec_set_index,
11170 merkle_root,
11171 );
11172 let next_slot_data_shred = next_slot_data_shreds[0].clone();
11173
11174 assert!(blockstore
11175 .insert_shred_return_duplicate(next_slot_data_shred.clone(), &leader_schedule,)
11176 .is_empty());
11177
11178 assert!(blockstore
11180 .insert_shred_return_duplicate(coding_shred, &leader_schedule,)
11181 .is_empty());
11182 }
11183
11184 #[test]
11185 fn test_chained_merkle_root_inconsistency_backwards_insert_code() {
11186 let ledger_path = get_tmp_ledger_path_auto_delete!();
11188 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11189
11190 let parent_slot = 0;
11191 let slot = 1;
11192 let fec_set_index = 0;
11193 let (data_shreds, coding_shreds, leader_schedule) =
11194 setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index);
11195 let coding_shred_previous = coding_shreds[0].clone();
11196 let next_fec_set_index = fec_set_index + data_shreds.len() as u32;
11197
11198 assert!(blockstore
11199 .insert_shred_return_duplicate(coding_shred_previous.clone(), &leader_schedule,)
11200 .is_empty());
11201
11202 let merkle_root = Hash::new_unique();
11204 assert!(merkle_root != coding_shred_previous.merkle_root().unwrap());
11205 let (data_shreds, coding_shreds, leader_schedule) =
11206 setup_erasure_shreds_with_index_and_chained_merkle(
11207 slot,
11208 parent_slot,
11209 10,
11210 next_fec_set_index,
11211 merkle_root,
11212 );
11213 let data_shred = data_shreds[0].clone();
11214 let coding_shred = coding_shreds[0].clone();
11215 let duplicate_shreds =
11216 blockstore.insert_shred_return_duplicate(coding_shred.clone(), &leader_schedule);
11217 assert_eq!(duplicate_shreds.len(), 1);
11218 assert_eq!(
11219 duplicate_shreds[0],
11220 PossibleDuplicateShred::ChainedMerkleRootConflict(
11221 coding_shred,
11222 coding_shred_previous.into_payload()
11223 )
11224 );
11225
11226 assert!(blockstore
11228 .insert_shred_return_duplicate(data_shred.clone(), &leader_schedule,)
11229 .is_empty());
11230 }
11231
11232 #[test]
11233 fn test_chained_merkle_root_inconsistency_backwards_insert_data() {
11234 let ledger_path = get_tmp_ledger_path_auto_delete!();
11236 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11237
11238 let parent_slot = 0;
11239 let slot = 1;
11240 let fec_set_index = 0;
11241 let (data_shreds, coding_shreds, leader_schedule) =
11242 setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index);
11243 let coding_shred_previous = coding_shreds[0].clone();
11244 let next_fec_set_index = fec_set_index + data_shreds.len() as u32;
11245
11246 assert!(blockstore
11247 .insert_shred_return_duplicate(coding_shred_previous.clone(), &leader_schedule,)
11248 .is_empty());
11249
11250 let merkle_root = Hash::new_unique();
11252 assert!(merkle_root != coding_shred_previous.merkle_root().unwrap());
11253 let (data_shreds, coding_shreds, leader_schedule) =
11254 setup_erasure_shreds_with_index_and_chained_merkle(
11255 slot,
11256 parent_slot,
11257 10,
11258 next_fec_set_index,
11259 merkle_root,
11260 );
11261 let data_shred = data_shreds[0].clone();
11262 let coding_shred = coding_shreds[0].clone();
11263
11264 let duplicate_shreds =
11265 blockstore.insert_shred_return_duplicate(data_shred.clone(), &leader_schedule);
11266 assert_eq!(duplicate_shreds.len(), 1);
11267 assert_eq!(
11268 duplicate_shreds[0],
11269 PossibleDuplicateShred::ChainedMerkleRootConflict(
11270 data_shred,
11271 coding_shred_previous.into_payload(),
11272 )
11273 );
11274 assert!(blockstore
11276 .insert_shred_return_duplicate(coding_shred.clone(), &leader_schedule,)
11277 .is_empty());
11278 }
11279
11280 #[test]
11281 fn test_chained_merkle_root_inconsistency_forwards() {
11282 let ledger_path = get_tmp_ledger_path_auto_delete!();
11284 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11285
11286 let parent_slot = 0;
11287 let slot = 1;
11288 let fec_set_index = 0;
11289 let (data_shreds, coding_shreds, leader_schedule) =
11290 setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index);
11291 let coding_shred = coding_shreds[0].clone();
11292 let next_fec_set_index = fec_set_index + data_shreds.len() as u32;
11293
11294 let merkle_root = Hash::new_unique();
11296 assert!(merkle_root != coding_shred.merkle_root().unwrap());
11297 let (next_data_shreds, _, leader_schedule_next) =
11298 setup_erasure_shreds_with_index_and_chained_merkle(
11299 slot,
11300 parent_slot,
11301 10,
11302 next_fec_set_index,
11303 merkle_root,
11304 );
11305 let next_data_shred = next_data_shreds[0].clone();
11306
11307 assert!(blockstore
11308 .insert_shred_return_duplicate(next_data_shred.clone(), &leader_schedule_next,)
11309 .is_empty());
11310
11311 let duplicate_shreds =
11313 blockstore.insert_shred_return_duplicate(coding_shred.clone(), &leader_schedule);
11314
11315 assert_eq!(duplicate_shreds.len(), 1);
11316 assert_eq!(
11317 duplicate_shreds[0],
11318 PossibleDuplicateShred::ChainedMerkleRootConflict(
11319 coding_shred,
11320 next_data_shred.into_payload(),
11321 )
11322 );
11323 }
11324
11325 #[test]
11326 fn test_chained_merkle_root_inconsistency_both() {
11327 let ledger_path = get_tmp_ledger_path_auto_delete!();
11331 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11332
11333 let parent_slot = 0;
11334 let slot = 1;
11335 let prev_fec_set_index = 0;
11336 let (prev_data_shreds, prev_coding_shreds, leader_schedule_prev) =
11337 setup_erasure_shreds_with_index(slot, parent_slot, 10, prev_fec_set_index);
11338 let prev_coding_shred = prev_coding_shreds[0].clone();
11339 let fec_set_index = prev_fec_set_index + prev_data_shreds.len() as u32;
11340
11341 let merkle_root = Hash::new_unique();
11343 assert!(merkle_root != prev_coding_shred.merkle_root().unwrap());
11344 let (data_shreds, coding_shreds, leader_schedule) =
11345 setup_erasure_shreds_with_index_and_chained_merkle(
11346 slot,
11347 parent_slot,
11348 10,
11349 fec_set_index,
11350 merkle_root,
11351 );
11352 let data_shred = data_shreds[0].clone();
11353 let coding_shred = coding_shreds[0].clone();
11354 let next_fec_set_index = fec_set_index + prev_data_shreds.len() as u32;
11355
11356 let merkle_root = Hash::new_unique();
11358 assert!(merkle_root != data_shred.merkle_root().unwrap());
11359 let (next_data_shreds, _, leader_schedule_next) =
11360 setup_erasure_shreds_with_index_and_chained_merkle(
11361 slot,
11362 parent_slot,
11363 10,
11364 next_fec_set_index,
11365 merkle_root,
11366 );
11367 let next_data_shred = next_data_shreds[0].clone();
11368
11369 assert!(blockstore
11370 .insert_shred_return_duplicate(prev_coding_shred.clone(), &leader_schedule_prev,)
11371 .is_empty());
11372
11373 assert!(blockstore
11374 .insert_shred_return_duplicate(next_data_shred.clone(), &leader_schedule_next)
11375 .is_empty());
11376
11377 let duplicate_shreds =
11379 blockstore.insert_shred_return_duplicate(data_shred.clone(), &leader_schedule);
11380
11381 assert_eq!(duplicate_shreds.len(), 1);
11383 assert_eq!(
11384 duplicate_shreds[0],
11385 PossibleDuplicateShred::ChainedMerkleRootConflict(
11386 data_shred,
11387 prev_coding_shred.into_payload(),
11388 )
11389 );
11390
11391 let duplicate_shreds =
11393 blockstore.insert_shred_return_duplicate(coding_shred.clone(), &leader_schedule);
11394
11395 assert_eq!(duplicate_shreds.len(), 1);
11397 assert_eq!(
11398 duplicate_shreds[0],
11399 PossibleDuplicateShred::ChainedMerkleRootConflict(
11400 coding_shred,
11401 next_data_shred.into_payload(),
11402 )
11403 );
11404 }
11405
11406 #[test]
11407 fn test_chained_merkle_root_upgrade_inconsistency_backwards() {
11408 let ledger_path = get_tmp_ledger_path_auto_delete!();
11410 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11411
11412 let parent_slot = 0;
11413 let slot = 1;
11414 let fec_set_index = 0;
11415 let (data_shreds, coding_shreds, leader_schedule) =
11416 setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index);
11417 let coding_shred_previous = coding_shreds[1].clone();
11418 let next_fec_set_index = fec_set_index + data_shreds.len() as u32;
11419
11420 assert!(blockstore
11421 .insert_shred_return_duplicate(coding_shred_previous.clone(), &leader_schedule,)
11422 .is_empty());
11423
11424 let mut erasure_meta = blockstore
11427 .erasure_meta(coding_shred_previous.erasure_set())
11428 .unwrap()
11429 .unwrap();
11430 erasure_meta.clear_first_received_coding_shred_index();
11431 blockstore
11432 .put_erasure_meta(coding_shred_previous.erasure_set(), &erasure_meta)
11433 .unwrap();
11434 let mut write_batch = blockstore.get_write_batch().unwrap();
11435 blockstore
11436 .merkle_root_meta_cf
11437 .delete_range_in_batch(&mut write_batch, slot, slot)
11438 .unwrap();
11439 blockstore.write_batch(write_batch).unwrap();
11440 assert!(blockstore
11441 .merkle_root_meta(coding_shred_previous.erasure_set())
11442 .unwrap()
11443 .is_none());
11444
11445 let merkle_root = Hash::new_unique();
11448 assert!(merkle_root != coding_shred_previous.merkle_root().unwrap());
11449 let (data_shreds, coding_shreds, leader_schedule) =
11450 setup_erasure_shreds_with_index_and_chained_merkle(
11451 slot,
11452 parent_slot,
11453 10,
11454 next_fec_set_index,
11455 merkle_root,
11456 );
11457 let data_shred = data_shreds[0].clone();
11458 let coding_shred = coding_shreds[0].clone();
11459 assert!(blockstore
11460 .insert_shred_return_duplicate(coding_shred, &leader_schedule)
11461 .is_empty());
11462 assert!(blockstore
11463 .insert_shred_return_duplicate(data_shred, &leader_schedule,)
11464 .is_empty());
11465 }
11466
11467 #[test]
11468 fn test_chained_merkle_root_upgrade_inconsistency_forwards() {
11469 let ledger_path = get_tmp_ledger_path_auto_delete!();
11471 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11472
11473 let parent_slot = 0;
11474 let slot = 1;
11475 let fec_set_index = 0;
11476 let (data_shreds, coding_shreds, leader_schedule) =
11477 setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index);
11478 let coding_shred = coding_shreds[0].clone();
11479 let next_fec_set_index = fec_set_index + data_shreds.len() as u32;
11480
11481 let merkle_root = Hash::new_unique();
11483 assert!(merkle_root != coding_shred.merkle_root().unwrap());
11484 let (next_data_shreds, next_coding_shreds, leader_schedule_next) =
11485 setup_erasure_shreds_with_index_and_chained_merkle(
11486 slot,
11487 parent_slot,
11488 10,
11489 next_fec_set_index,
11490 merkle_root,
11491 );
11492 let next_data_shred = next_data_shreds[0].clone();
11493
11494 assert!(blockstore
11495 .insert_shred_return_duplicate(next_data_shred, &leader_schedule_next,)
11496 .is_empty());
11497
11498 let mut write_batch = blockstore.get_write_batch().unwrap();
11501 blockstore
11502 .merkle_root_meta_cf
11503 .delete_range_in_batch(&mut write_batch, slot, slot)
11504 .unwrap();
11505 blockstore.write_batch(write_batch).unwrap();
11506 assert!(blockstore
11507 .merkle_root_meta(next_coding_shreds[0].erasure_set())
11508 .unwrap()
11509 .is_none());
11510
11511 assert!(blockstore
11514 .insert_shred_return_duplicate(coding_shred, &leader_schedule)
11515 .is_empty());
11516 }
11517
11518 #[test]
11519 fn test_check_last_fec_set() {
11520 let ledger_path = get_tmp_ledger_path_auto_delete!();
11521 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11522
11523 let parent_slot = 0;
11524 let slot = 1;
11525
11526 let fec_set_index = 30;
11527 let (data_shreds, _, _) =
11528 setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index);
11529 let total_shreds = fec_set_index as u64 + data_shreds.len() as u64;
11530
11531 assert_eq!(data_shreds.len(), DATA_SHREDS_PER_FEC_BLOCK);
11533
11534 assert_matches!(
11536 blockstore.check_last_fec_set(0),
11537 Err(BlockstoreError::SlotUnavailable)
11538 );
11539
11540 blockstore
11542 .insert_shreds(
11543 data_shreds[0..DATA_SHREDS_PER_FEC_BLOCK - 1].to_vec(),
11544 None,
11545 false,
11546 )
11547 .unwrap();
11548 let meta = blockstore.meta(slot).unwrap().unwrap();
11549 assert!(meta.last_index.is_none());
11550 assert_matches!(
11551 blockstore.check_last_fec_set(slot),
11552 Err(BlockstoreError::UnknownLastIndex(_))
11553 );
11554 blockstore.run_purge(slot, slot, PurgeType::Exact).unwrap();
11555
11556 blockstore
11558 .insert_shreds(data_shreds[1..].to_vec(), None, false)
11559 .unwrap();
11560 let meta = blockstore.meta(slot).unwrap().unwrap();
11561 assert_eq!(meta.last_index, Some(total_shreds - 1));
11562 assert_matches!(
11563 blockstore.check_last_fec_set(slot),
11564 Err(BlockstoreError::MissingShred(_, _))
11565 );
11566 blockstore.run_purge(slot, slot, PurgeType::Exact).unwrap();
11567
11568 let block_id = data_shreds[0].merkle_root().unwrap();
11570 blockstore.insert_shreds(data_shreds, None, false).unwrap();
11571 let results = blockstore.check_last_fec_set(slot).unwrap();
11572 assert_eq!(results.last_fec_set_merkle_root, Some(block_id));
11573 assert!(results.is_retransmitter_signed);
11574 blockstore.run_purge(slot, slot, PurgeType::Exact).unwrap();
11575
11576 let mut fec_set_index = 0;
11578 let (first_data_shreds, _, _) =
11579 setup_erasure_shreds_with_index_and_chained_merkle_and_last_in_slot(
11580 slot,
11581 parent_slot,
11582 10,
11583 fec_set_index,
11584 Hash::default(),
11585 false,
11586 );
11587 let merkle_root = first_data_shreds[0].merkle_root().unwrap();
11588 fec_set_index += first_data_shreds.len() as u32;
11589 let (last_data_shreds, _, _) =
11590 setup_erasure_shreds_with_index_and_chained_merkle_and_last_in_slot(
11591 slot,
11592 parent_slot,
11593 40,
11594 fec_set_index,
11595 merkle_root,
11596 false,
11597 );
11598 let last_index = last_data_shreds.last().unwrap().index();
11599 let total_shreds = first_data_shreds.len() + last_data_shreds.len();
11600 assert_eq!(total_shreds, 2 * DATA_SHREDS_PER_FEC_BLOCK);
11601 let merkle_root = last_data_shreds[0].merkle_root().unwrap();
11602 blockstore
11603 .insert_shreds(first_data_shreds, None, false)
11604 .unwrap();
11605 blockstore
11606 .insert_shreds(last_data_shreds, None, false)
11607 .unwrap();
11608 let mut slot_meta = blockstore.meta(slot).unwrap().unwrap();
11610 slot_meta.last_index = Some(last_index as u64);
11611 blockstore.put_meta(slot, &slot_meta).unwrap();
11612 let results = blockstore.check_last_fec_set(slot).unwrap();
11613 assert_eq!(results.last_fec_set_merkle_root, Some(merkle_root));
11614 assert!(!results.is_retransmitter_signed);
11615 blockstore.run_purge(slot, slot, PurgeType::Exact).unwrap();
11616
11617 let mut fec_set_index = 0;
11619 let (first_data_shreds, _, _) =
11620 setup_erasure_shreds_with_index_and_chained_merkle_and_last_in_slot(
11621 slot,
11622 parent_slot,
11623 100,
11624 fec_set_index,
11625 Hash::default(),
11626 false,
11627 );
11628 let merkle_root = first_data_shreds[0].merkle_root().unwrap();
11629 fec_set_index += first_data_shreds.len() as u32;
11630 let (last_data_shreds, _, _) =
11631 setup_erasure_shreds_with_index_and_chained_merkle_and_last_in_slot(
11632 slot,
11633 parent_slot,
11634 100,
11635 fec_set_index,
11636 merkle_root,
11637 false,
11638 );
11639 let last_index = last_data_shreds.last().unwrap().index();
11640 let total_shreds = first_data_shreds.len() + last_data_shreds.len();
11641 assert_eq!(last_data_shreds.len(), DATA_SHREDS_PER_FEC_BLOCK);
11642 assert_eq!(total_shreds, 2 * DATA_SHREDS_PER_FEC_BLOCK);
11643 let merkle_root = last_data_shreds[0].merkle_root().unwrap();
11644 blockstore
11645 .insert_shreds(first_data_shreds, None, false)
11646 .unwrap();
11647 blockstore
11648 .insert_shreds(last_data_shreds, None, false)
11649 .unwrap();
11650 let mut slot_meta = blockstore.meta(slot).unwrap().unwrap();
11652 slot_meta.last_index = Some(last_index as u64);
11653 blockstore.put_meta(slot, &slot_meta).unwrap();
11654 let results = blockstore.check_last_fec_set(slot).unwrap();
11655 assert_eq!(results.last_fec_set_merkle_root, Some(merkle_root));
11656 assert!(!results.is_retransmitter_signed);
11657 }
11658
11659 #[test]
11660 fn test_last_fec_set_check_results() {
11661 let enabled_feature_set = FeatureSet::all_enabled();
11662 let full_only = FeatureSet::default();
11663
11664 let results = LastFECSetCheckResults {
11665 last_fec_set_merkle_root: None,
11666 is_retransmitter_signed: false,
11667 };
11668 assert_matches!(
11669 results.get_last_fec_set_merkle_root(&enabled_feature_set),
11670 Err(BlockstoreProcessorError::IncompleteFinalFecSet)
11671 );
11672 assert_matches!(
11673 results.get_last_fec_set_merkle_root(&full_only),
11674 Err(BlockstoreProcessorError::IncompleteFinalFecSet)
11675 );
11676
11677 let block_id = Hash::new_unique();
11678 let results = LastFECSetCheckResults {
11679 last_fec_set_merkle_root: Some(block_id),
11680 is_retransmitter_signed: false,
11681 };
11682 assert_matches!(
11683 results.get_last_fec_set_merkle_root(&enabled_feature_set),
11684 Err(BlockstoreProcessorError::InvalidRetransmitterSignatureFinalFecSet)
11685 );
11686 assert_eq!(
11687 results.get_last_fec_set_merkle_root(&full_only).unwrap(),
11688 Some(block_id)
11689 );
11690
11691 let results = LastFECSetCheckResults {
11692 last_fec_set_merkle_root: None,
11693 is_retransmitter_signed: true,
11694 };
11695 assert_matches!(
11696 results.get_last_fec_set_merkle_root(&enabled_feature_set),
11697 Err(BlockstoreProcessorError::IncompleteFinalFecSet)
11698 );
11699 assert_matches!(
11700 results.get_last_fec_set_merkle_root(&full_only),
11701 Err(BlockstoreProcessorError::IncompleteFinalFecSet)
11702 );
11703
11704 let block_id = Hash::new_unique();
11705 let results = LastFECSetCheckResults {
11706 last_fec_set_merkle_root: Some(block_id),
11707 is_retransmitter_signed: true,
11708 };
11709 for feature_set in [enabled_feature_set, full_only] {
11710 assert_eq!(
11711 results.get_last_fec_set_merkle_root(&feature_set).unwrap(),
11712 Some(block_id)
11713 );
11714 }
11715 }
11716
11717 #[test]
11718 fn test_write_transaction_memos() {
11719 let ledger_path = get_tmp_ledger_path_auto_delete!();
11720 let blockstore = Blockstore::open(ledger_path.path())
11721 .expect("Expected to be able to open database ledger");
11722 let signature: Signature = Signature::new_unique();
11723
11724 blockstore
11725 .write_transaction_memos(&signature, 4, "test_write_transaction_memos".to_string())
11726 .unwrap();
11727
11728 let memo = blockstore
11729 .read_transaction_memos(signature, 4)
11730 .expect("Expected to find memo");
11731 assert_eq!(memo, Some("test_write_transaction_memos".to_string()));
11732 }
11733
11734 #[test]
11735 fn test_add_transaction_memos_to_batch() {
11736 let ledger_path = get_tmp_ledger_path_auto_delete!();
11737 let blockstore = Blockstore::open(ledger_path.path())
11738 .expect("Expected to be able to open database ledger");
11739 let signatures: Vec<Signature> = (0..2).map(|_| Signature::new_unique()).collect();
11740 let mut memos_batch = blockstore.get_write_batch().unwrap();
11741
11742 blockstore
11743 .add_transaction_memos_to_batch(
11744 &signatures[0],
11745 4,
11746 "test_write_transaction_memos1".to_string(),
11747 &mut memos_batch,
11748 )
11749 .unwrap();
11750
11751 blockstore
11752 .add_transaction_memos_to_batch(
11753 &signatures[1],
11754 5,
11755 "test_write_transaction_memos2".to_string(),
11756 &mut memos_batch,
11757 )
11758 .unwrap();
11759
11760 blockstore.write_batch(memos_batch).unwrap();
11761
11762 let memo1 = blockstore
11763 .read_transaction_memos(signatures[0], 4)
11764 .expect("Expected to find memo");
11765 assert_eq!(memo1, Some("test_write_transaction_memos1".to_string()));
11766
11767 let memo2 = blockstore
11768 .read_transaction_memos(signatures[1], 5)
11769 .expect("Expected to find memo");
11770 assert_eq!(memo2, Some("test_write_transaction_memos2".to_string()));
11771 }
11772
11773 #[test]
11774 fn test_write_transaction_status() {
11775 let ledger_path = get_tmp_ledger_path_auto_delete!();
11776 let blockstore = Blockstore::open(ledger_path.path())
11777 .expect("Expected to be able to open database ledger");
11778 let signatures: Vec<Signature> = (0..2).map(|_| Signature::new_unique()).collect();
11779 let keys_with_writable: Vec<(Pubkey, bool)> =
11780 vec![(Pubkey::new_unique(), true), (Pubkey::new_unique(), false)];
11781 let slot = 5;
11782
11783 blockstore
11784 .write_transaction_status(
11785 slot,
11786 signatures[0],
11787 keys_with_writable
11788 .iter()
11789 .map(|&(ref pubkey, writable)| (pubkey, writable)),
11790 TransactionStatusMeta {
11791 fee: 4200,
11792 ..TransactionStatusMeta::default()
11793 },
11794 0,
11795 )
11796 .unwrap();
11797
11798 let tx_status = blockstore
11799 .read_transaction_status((signatures[0], slot))
11800 .unwrap()
11801 .unwrap();
11802 assert_eq!(tx_status.fee, 4200);
11803 }
11804
11805 #[test]
11806 fn test_add_transaction_status_to_batch() {
11807 let ledger_path = get_tmp_ledger_path_auto_delete!();
11808 let blockstore = Blockstore::open(ledger_path.path())
11809 .expect("Expected to be able to open database ledger");
11810 let signatures: Vec<Signature> = (0..2).map(|_| Signature::new_unique()).collect();
11811 let keys_with_writable: Vec<Vec<(Pubkey, bool)>> = (0..2)
11812 .map(|_| vec![(Pubkey::new_unique(), true), (Pubkey::new_unique(), false)])
11813 .collect();
11814 let slot = 5;
11815 let mut status_batch = blockstore.get_write_batch().unwrap();
11816
11817 for (tx_idx, signature) in signatures.iter().enumerate() {
11818 blockstore
11819 .add_transaction_status_to_batch(
11820 slot,
11821 *signature,
11822 keys_with_writable[tx_idx].iter().map(|(k, v)| (k, *v)),
11823 TransactionStatusMeta {
11824 fee: 5700 + tx_idx as u64,
11825 status: if tx_idx % 2 == 0 {
11826 Ok(())
11827 } else {
11828 Err(TransactionError::InsufficientFundsForFee)
11829 },
11830 ..TransactionStatusMeta::default()
11831 },
11832 tx_idx,
11833 &mut status_batch,
11834 )
11835 .unwrap();
11836 }
11837
11838 blockstore.write_batch(status_batch).unwrap();
11839
11840 let tx_status1 = blockstore
11841 .read_transaction_status((signatures[0], slot))
11842 .unwrap()
11843 .unwrap();
11844 assert_eq!(tx_status1.fee, 5700);
11845 assert_eq!(tx_status1.status, Ok(()));
11846
11847 let tx_status2 = blockstore
11848 .read_transaction_status((signatures[1], slot))
11849 .unwrap()
11850 .unwrap();
11851 assert_eq!(tx_status2.fee, 5701);
11852 assert_eq!(
11853 tx_status2.status,
11854 Err(TransactionError::InsufficientFundsForFee)
11855 );
11856 }
11857}