solana_ledger/
blockstore_processor.rs

1use {
2    crate::{
3        block_error::BlockError,
4        blockstore::{Blockstore, BlockstoreError},
5        blockstore_meta::SlotMeta,
6        entry_notifier_service::{EntryNotification, EntryNotifierSender},
7        leader_schedule_cache::LeaderScheduleCache,
8        transaction_balances::compile_collected_balances,
9        use_snapshot_archives_at_startup::UseSnapshotArchivesAtStartup,
10    },
11    chrono_humanize::{Accuracy, HumanTime, Tense},
12    crossbeam_channel::Sender,
13    itertools::Itertools,
14    log::*,
15    rayon::{prelude::*, ThreadPool},
16    scopeguard::defer,
17    solana_accounts_db::{
18        accounts_db::AccountsDbConfig, accounts_update_notifier_interface::AccountsUpdateNotifier,
19    },
20    solana_clock::{Slot, MAX_PROCESSING_AGE},
21    solana_cost_model::{cost_model::CostModel, transaction_cost::TransactionCost},
22    solana_entry::entry::{
23        self, create_ticks, Entry, EntrySlice, EntryType, EntryVerificationStatus, VerifyRecyclers,
24    },
25    solana_genesis_config::GenesisConfig,
26    solana_hash::Hash,
27    solana_keypair::Keypair,
28    solana_measure::{measure::Measure, measure_us},
29    solana_metrics::datapoint_error,
30    solana_pubkey::Pubkey,
31    solana_runtime::{
32        bank::{Bank, PreCommitResult, TransactionBalancesSet},
33        bank_forks::{BankForks, SetRootError},
34        bank_utils,
35        commitment::VOTE_THRESHOLD_SIZE,
36        dependency_tracker::DependencyTracker,
37        installed_scheduler_pool::BankWithScheduler,
38        prioritization_fee_cache::PrioritizationFeeCache,
39        runtime_config::RuntimeConfig,
40        snapshot_config::SnapshotConfig,
41        snapshot_controller::SnapshotController,
42        transaction_batch::{OwnedOrBorrowed, TransactionBatch},
43        vote_sender_types::ReplayVoteSender,
44    },
45    solana_runtime_transaction::{
46        runtime_transaction::RuntimeTransaction, transaction_with_meta::TransactionWithMeta,
47    },
48    solana_signature::Signature,
49    solana_svm::{
50        transaction_commit_result::{TransactionCommitResult, TransactionCommitResultExtensions},
51        transaction_processing_result::ProcessedTransaction,
52        transaction_processor::ExecutionRecordingConfig,
53    },
54    solana_svm_timings::{report_execute_timings, ExecuteTimingType, ExecuteTimings},
55    solana_svm_transaction::{svm_message::SVMMessage, svm_transaction::SVMTransaction},
56    solana_transaction::{
57        sanitized::SanitizedTransaction, versioned::VersionedTransaction,
58        TransactionVerificationMode,
59    },
60    solana_transaction_error::{TransactionError, TransactionResult as Result},
61    solana_transaction_status::token_balances::TransactionTokenBalancesSet,
62    solana_vote::vote_account::VoteAccountsHashMap,
63    std::{
64        borrow::Cow,
65        collections::{HashMap, HashSet},
66        num::Saturating,
67        ops::Index,
68        path::PathBuf,
69        result,
70        sync::{atomic::AtomicBool, Arc, Mutex, RwLock},
71        time::{Duration, Instant},
72        vec::Drain,
73    },
74    thiserror::Error,
75    ExecuteTimingType::{NumExecuteBatches, TotalBatchesLen},
76};
77#[cfg(feature = "dev-context-only-utils")]
78use {qualifier_attr::qualifiers, solana_runtime::bank::HashOverrides};
79
80pub struct TransactionBatchWithIndexes<'a, 'b, Tx: SVMMessage> {
81    pub batch: TransactionBatch<'a, 'b, Tx>,
82    pub transaction_indexes: Vec<usize>,
83}
84
85// `TransactionBatchWithIndexes` but without the `Drop` that prevents
86// us from nicely unwinding these with manual unlocking.
87pub struct LockedTransactionsWithIndexes<Tx: SVMMessage> {
88    lock_results: Vec<Result<()>>,
89    transactions: Vec<RuntimeTransaction<Tx>>,
90    starting_index: usize,
91}
92
93struct ReplayEntry {
94    entry: EntryType<RuntimeTransaction<SanitizedTransaction>>,
95    starting_index: usize,
96}
97
98fn first_err(results: &[Result<()>]) -> Result<()> {
99    for r in results {
100        if r.is_err() {
101            return r.clone();
102        }
103    }
104    Ok(())
105}
106
107// Includes transaction signature for unit-testing
108fn do_get_first_error<T, Tx: SVMTransaction>(
109    batch: &TransactionBatch<Tx>,
110    results: &[Result<T>],
111) -> Option<(Result<()>, Signature)> {
112    let mut first_err = None;
113    for (result, transaction) in results.iter().zip(batch.sanitized_transactions()) {
114        if let Err(err) = result {
115            if first_err.is_none() {
116                first_err = Some((Err(err.clone()), *transaction.signature()));
117            }
118            warn!("Unexpected validator error: {err:?}, transaction: {transaction:?}");
119            datapoint_error!(
120                "validator_process_entry_error",
121                (
122                    "error",
123                    format!("error: {err:?}, transaction: {transaction:?}"),
124                    String
125                )
126            );
127        }
128    }
129    first_err
130}
131
132fn get_first_error<T, Tx: SVMTransaction>(
133    batch: &TransactionBatch<Tx>,
134    commit_results: &[Result<T>],
135) -> Result<()> {
136    do_get_first_error(batch, commit_results)
137        .map(|(error, _signature)| error)
138        .unwrap_or(Ok(()))
139}
140
141fn create_thread_pool(num_threads: usize) -> ThreadPool {
142    rayon::ThreadPoolBuilder::new()
143        .num_threads(num_threads)
144        .thread_name(|i| format!("solReplayTx{i:02}"))
145        .build()
146        .expect("new rayon threadpool")
147}
148
149pub fn execute_batch<'a>(
150    batch: &'a TransactionBatchWithIndexes<impl TransactionWithMeta>,
151    bank: &'a Arc<Bank>,
152    transaction_status_sender: Option<&'a TransactionStatusSender>,
153    replay_vote_sender: Option<&'a ReplayVoteSender>,
154    timings: &'a mut ExecuteTimings,
155    log_messages_bytes_limit: Option<usize>,
156    prioritization_fee_cache: &'a PrioritizationFeeCache,
157    extra_pre_commit_callback: Option<
158        impl FnOnce(&Result<ProcessedTransaction>) -> Result<Option<usize>>,
159    >,
160) -> Result<()> {
161    let TransactionBatchWithIndexes {
162        batch,
163        transaction_indexes,
164    } = batch;
165
166    // extra_pre_commit_callback allows for reuse of this function between the
167    // unified scheduler block production path and block verification path(s)
168    //   Some(_) => unified scheduler block production path
169    //   None    => block verification path(s)
170    let block_verification = extra_pre_commit_callback.is_none();
171    let record_transaction_meta = transaction_status_sender.is_some();
172    let mut transaction_indexes = Cow::from(transaction_indexes);
173
174    let pre_commit_callback = |_timings: &mut _, processing_results: &_| -> PreCommitResult {
175        match extra_pre_commit_callback {
176            None => {
177                // We're entering into one of the block-verification methods.
178                get_first_error(batch, processing_results)?;
179                Ok(None)
180            }
181            Some(extra_pre_commit_callback) => {
182                // We're entering into the block-production unified scheduler special case...
183                // `processing_results` should always contain exactly only 1 result in that case.
184                let [result] = processing_results else {
185                    panic!("unexpected result count: {}", processing_results.len());
186                };
187                // transaction_indexes is intended to be populated later; so barely-initialized vec
188                // should be provided.
189                assert!(transaction_indexes.is_empty());
190
191                // From now on, we need to freeze-lock the tpu bank, in order to prevent it from
192                // freezing in the middle of this code-path. Otherwise, the assertion at the start
193                // of commit_transactions() would trigger panic because it's fatal runtime
194                // invariant violation.
195                let freeze_lock = bank.freeze_lock();
196
197                // `result` won't be examined at all here. Rather, `extra_pre_commit_callback` is
198                // responsible for all result handling, including the very basic precondition of
199                // successful execution of transactions as well.
200                let committed_index = extra_pre_commit_callback(result)?;
201
202                // The callback succeeded. Optionally, update transaction_indexes as well.
203                // Refer to TaskHandler::handle()'s transaction_indexes initialization for further
204                // background.
205                if let Some(index) = committed_index {
206                    let transaction_indexes = transaction_indexes.to_mut();
207                    // Adjust the empty new vec with the exact needed capacity. Otherwise, excess
208                    // cap would be reserved on `.push()` in it.
209                    transaction_indexes.reserve_exact(1);
210                    transaction_indexes.push(index);
211                }
212                // At this point, poh should have been succeeded so it's guaranteed that the bank
213                // hasn't been frozen yet and we're still holding the lock. So, it's okay to pass
214                // down freeze_lock without any introspection here to be unconditionally dropped
215                // after commit_transactions(). This reasoning is same as
216                // solana_core::banking_stage::Consumer::execute_and_commit_transactions_locked()
217                Ok(Some(freeze_lock))
218            }
219        }
220    };
221
222    let (commit_results, balance_collector) = batch
223        .bank()
224        .load_execute_and_commit_transactions_with_pre_commit_callback(
225            batch,
226            MAX_PROCESSING_AGE,
227            ExecutionRecordingConfig::new_single_setting(transaction_status_sender.is_some()),
228            timings,
229            log_messages_bytes_limit,
230            pre_commit_callback,
231        )?;
232
233    let mut check_block_costs_elapsed = Measure::start("check_block_costs");
234    let tx_costs = if block_verification {
235        // Block verification (including unified scheduler) case;
236        // collect and check transaction costs
237        let tx_costs = get_transaction_costs(bank, &commit_results, batch.sanitized_transactions());
238        check_block_cost_limits(bank, &tx_costs).map(|_| tx_costs)
239    } else if record_transaction_meta {
240        // Unified scheduler block production case;
241        // the scheduler will track costs elsewhere but costs are recalculated
242        // here so they can be recorded with other transaction metadata
243        Ok(get_transaction_costs(
244            bank,
245            &commit_results,
246            batch.sanitized_transactions(),
247        ))
248    } else {
249        // Unified scheduler block production wihout metadata recording
250        Ok(vec![])
251    };
252    check_block_costs_elapsed.stop();
253    timings.saturating_add_in_place(
254        ExecuteTimingType::CheckBlockLimitsUs,
255        check_block_costs_elapsed.as_us(),
256    );
257    let tx_costs = tx_costs?;
258
259    bank_utils::find_and_send_votes(
260        batch.sanitized_transactions(),
261        &commit_results,
262        replay_vote_sender,
263    );
264
265    let committed_transactions = commit_results
266        .iter()
267        .zip(batch.sanitized_transactions())
268        .filter_map(|(commit_result, tx)| commit_result.was_committed().then_some(tx));
269    prioritization_fee_cache.update(bank, committed_transactions);
270
271    if let Some(transaction_status_sender) = transaction_status_sender {
272        let transactions: Vec<SanitizedTransaction> = batch
273            .sanitized_transactions()
274            .iter()
275            .map(|tx| tx.as_sanitized_transaction().into_owned())
276            .collect();
277
278        // There are two cases where balance_collector could be None:
279        // * Balance recording is disabled. If that were the case, there would
280        //   be no TransactionStatusSender, and we would not be in this branch.
281        // * The batch was aborted in its entirety in SVM. In that case, nothing
282        //   would have been committed.
283        // Therefore this should always be true.
284        debug_assert!(balance_collector.is_some());
285
286        let (balances, token_balances) =
287            compile_collected_balances(balance_collector.unwrap_or_default());
288
289        // The length of costs vector needs to be consistent with all other
290        // vectors that are sent over (such as `transactions`). So, replace the
291        // None elements with Some(0)
292        let tx_costs = tx_costs
293            .into_iter()
294            .map(|tx_cost_option| tx_cost_option.map(|tx_cost| tx_cost.sum()).or(Some(0)))
295            .collect();
296
297        transaction_status_sender.send_transaction_status_batch(
298            bank.slot(),
299            transactions,
300            commit_results,
301            balances,
302            token_balances,
303            tx_costs,
304            transaction_indexes.into_owned(),
305        );
306    }
307
308    Ok(())
309}
310
311// Get actual transaction execution costs from transaction commit results
312fn get_transaction_costs<'a, Tx: TransactionWithMeta>(
313    bank: &Bank,
314    commit_results: &[TransactionCommitResult],
315    sanitized_transactions: &'a [Tx],
316) -> Vec<Option<TransactionCost<'a, Tx>>> {
317    assert_eq!(sanitized_transactions.len(), commit_results.len());
318
319    commit_results
320        .iter()
321        .zip(sanitized_transactions)
322        .map(|(commit_result, tx)| {
323            if let Ok(committed_tx) = commit_result {
324                Some(CostModel::calculate_cost_for_executed_transaction(
325                    tx,
326                    committed_tx.executed_units,
327                    committed_tx.loaded_account_stats.loaded_accounts_data_size,
328                    &bank.feature_set,
329                ))
330            } else {
331                None
332            }
333        })
334        .collect()
335}
336
337fn check_block_cost_limits<Tx: TransactionWithMeta>(
338    bank: &Bank,
339    tx_costs: &[Option<TransactionCost<'_, Tx>>],
340) -> Result<()> {
341    let mut cost_tracker = bank.write_cost_tracker().unwrap();
342    for tx_cost in tx_costs.iter().flatten() {
343        cost_tracker
344            .try_add(tx_cost)
345            .map_err(TransactionError::from)?;
346    }
347
348    Ok(())
349}
350
351#[derive(Default)]
352pub struct ExecuteBatchesInternalMetrics {
353    execution_timings_per_thread: HashMap<usize, ThreadExecuteTimings>,
354    total_batches_len: u64,
355    execute_batches_us: u64,
356}
357
358impl ExecuteBatchesInternalMetrics {
359    pub fn new_with_timings_from_all_threads(execute_timings: ExecuteTimings) -> Self {
360        const DUMMY_THREAD_INDEX: usize = 999;
361        let mut new = Self::default();
362        new.execution_timings_per_thread.insert(
363            DUMMY_THREAD_INDEX,
364            ThreadExecuteTimings {
365                execute_timings,
366                ..ThreadExecuteTimings::default()
367            },
368        );
369        new
370    }
371}
372
373fn execute_batches_internal(
374    bank: &Arc<Bank>,
375    replay_tx_thread_pool: &ThreadPool,
376    batches: &[TransactionBatchWithIndexes<RuntimeTransaction<SanitizedTransaction>>],
377    transaction_status_sender: Option<&TransactionStatusSender>,
378    replay_vote_sender: Option<&ReplayVoteSender>,
379    log_messages_bytes_limit: Option<usize>,
380    prioritization_fee_cache: &PrioritizationFeeCache,
381) -> Result<ExecuteBatchesInternalMetrics> {
382    assert!(!batches.is_empty());
383    let execution_timings_per_thread: Mutex<HashMap<usize, ThreadExecuteTimings>> =
384        Mutex::new(HashMap::new());
385
386    let mut execute_batches_elapsed = Measure::start("execute_batches_elapsed");
387    let results: Vec<Result<()>> = replay_tx_thread_pool.install(|| {
388        batches
389            .into_par_iter()
390            .map(|transaction_batch| {
391                let transaction_count =
392                    transaction_batch.batch.sanitized_transactions().len() as u64;
393                let mut timings = ExecuteTimings::default();
394                let (result, execute_batches_us) = measure_us!(execute_batch(
395                    transaction_batch,
396                    bank,
397                    transaction_status_sender,
398                    replay_vote_sender,
399                    &mut timings,
400                    log_messages_bytes_limit,
401                    prioritization_fee_cache,
402                    None::<fn(&_) -> _>,
403                ));
404
405                let thread_index = replay_tx_thread_pool.current_thread_index().unwrap();
406                execution_timings_per_thread
407                    .lock()
408                    .unwrap()
409                    .entry(thread_index)
410                    .and_modify(|thread_execution_time| {
411                        let ThreadExecuteTimings {
412                            total_thread_us,
413                            total_transactions_executed,
414                            execute_timings: total_thread_execute_timings,
415                        } = thread_execution_time;
416                        *total_thread_us += execute_batches_us;
417                        *total_transactions_executed += transaction_count;
418                        total_thread_execute_timings
419                            .saturating_add_in_place(ExecuteTimingType::TotalBatchesLen, 1);
420                        total_thread_execute_timings.accumulate(&timings);
421                    })
422                    .or_insert(ThreadExecuteTimings {
423                        total_thread_us: Saturating(execute_batches_us),
424                        total_transactions_executed: Saturating(transaction_count),
425                        execute_timings: timings,
426                    });
427                result
428            })
429            .collect()
430    });
431    execute_batches_elapsed.stop();
432
433    first_err(&results)?;
434
435    Ok(ExecuteBatchesInternalMetrics {
436        execution_timings_per_thread: execution_timings_per_thread.into_inner().unwrap(),
437        total_batches_len: batches.len() as u64,
438        execute_batches_us: execute_batches_elapsed.as_us(),
439    })
440}
441
442// This fn diverts the code-path into two variants. Both must provide exactly the same set of
443// validations. For this reason, this fn is deliberately inserted into the code path to be called
444// inside process_entries(), so that Bank::prepare_sanitized_batch() has been called on all of
445// batches already, while minimizing code duplication (thus divergent behavior risk) at the cost of
446// acceptable overhead of meaningless buffering of batches for the scheduler variant.
447//
448// Also note that the scheduler variant can't implement the batch-level sanitization naively, due
449// to the nature of individual tx processing. That's another reason of this particular placement of
450// divergent point in the code-path (i.e. not one layer up with its own prepare_sanitized_batch()
451// invocation).
452fn process_batches(
453    bank: &BankWithScheduler,
454    replay_tx_thread_pool: &ThreadPool,
455    locked_entries: impl ExactSizeIterator<Item = LockedTransactionsWithIndexes<SanitizedTransaction>>,
456    transaction_status_sender: Option<&TransactionStatusSender>,
457    replay_vote_sender: Option<&ReplayVoteSender>,
458    batch_execution_timing: &mut BatchExecutionTiming,
459    log_messages_bytes_limit: Option<usize>,
460    prioritization_fee_cache: &PrioritizationFeeCache,
461) -> Result<()> {
462    if bank.has_installed_scheduler() {
463        debug!(
464            "process_batches()/schedule_batches_for_execution({} batches)",
465            locked_entries.len()
466        );
467        // Scheduling usually succeeds (immediately returns `Ok(())`) here without being blocked on
468        // the actual transaction executions.
469        //
470        // As an exception, this code path could propagate the transaction execution _errors of
471        // previously-scheduled transactions_ to notify the replay stage. Then, the replay stage
472        // will bail out the further processing of the malformed (possibly malicious) block
473        // immediately, not to waste any system resources. Note that this propagation is of early
474        // hints. Even if errors won't be propagated in this way, they are guaranteed to be
475        // propagated eventually via the blocking fn called
476        // BankWithScheduler::wait_for_completed_scheduler().
477        //
478        // To recite, the returned error is completely unrelated to the argument's `locked_entries`
479        // at the hand. While being awkward, the _async_ unified scheduler is abusing this existing
480        // error propagation code path to the replay stage for compatibility and ease of
481        // integration, exploiting the fact that the replay stage doesn't care _which transaction
482        // the returned error is originating from_.
483        //
484        // In the future, more proper error propagation mechanism will be introduced once after we
485        // fully transition to the unified scheduler for the block verification. That one would be
486        // a push based one from the unified scheduler to the replay stage to eliminate the current
487        // overhead: 1 read lock per batch in
488        // `BankWithScheduler::schedule_transaction_executions()`.
489        schedule_batches_for_execution(bank, locked_entries)
490    } else {
491        debug!(
492            "process_batches()/execute_batches({} batches)",
493            locked_entries.len()
494        );
495        execute_batches(
496            bank,
497            replay_tx_thread_pool,
498            locked_entries,
499            transaction_status_sender,
500            replay_vote_sender,
501            batch_execution_timing,
502            log_messages_bytes_limit,
503            prioritization_fee_cache,
504        )
505    }
506}
507
508fn schedule_batches_for_execution(
509    bank: &BankWithScheduler,
510    locked_entries: impl Iterator<Item = LockedTransactionsWithIndexes<SanitizedTransaction>>,
511) -> Result<()> {
512    // Track the first error encountered in the loop below, if any.
513    // This error will be propagated to the replay stage, or Ok(()).
514    let mut first_err = Ok(());
515
516    for LockedTransactionsWithIndexes {
517        lock_results,
518        transactions,
519        starting_index,
520    } in locked_entries
521    {
522        // unlock before sending to scheduler.
523        bank.unlock_accounts(transactions.iter().zip(lock_results.iter()));
524        // give ownership to scheduler. capture the first error, but continue the loop
525        // to unlock.
526        // scheduling is skipped if we have already detected an error in this loop
527        let indexes = starting_index..starting_index + transactions.len();
528        first_err = first_err.and_then(|()| {
529            bank.schedule_transaction_executions(transactions.into_iter().zip_eq(indexes))
530        });
531    }
532    first_err
533}
534
535fn execute_batches(
536    bank: &Arc<Bank>,
537    replay_tx_thread_pool: &ThreadPool,
538    locked_entries: impl ExactSizeIterator<Item = LockedTransactionsWithIndexes<SanitizedTransaction>>,
539    transaction_status_sender: Option<&TransactionStatusSender>,
540    replay_vote_sender: Option<&ReplayVoteSender>,
541    timing: &mut BatchExecutionTiming,
542    log_messages_bytes_limit: Option<usize>,
543    prioritization_fee_cache: &PrioritizationFeeCache,
544) -> Result<()> {
545    if locked_entries.len() == 0 {
546        return Ok(());
547    }
548
549    let tx_batches: Vec<_> = locked_entries
550        .into_iter()
551        .map(
552            |LockedTransactionsWithIndexes {
553                 lock_results,
554                 transactions,
555                 starting_index,
556             }| {
557                let ending_index = starting_index + transactions.len();
558                TransactionBatchWithIndexes {
559                    batch: TransactionBatch::new(
560                        lock_results,
561                        bank,
562                        OwnedOrBorrowed::Owned(transactions),
563                    ),
564                    transaction_indexes: (starting_index..ending_index).collect(),
565                }
566            },
567        )
568        .collect();
569
570    let execute_batches_internal_metrics = execute_batches_internal(
571        bank,
572        replay_tx_thread_pool,
573        &tx_batches,
574        transaction_status_sender,
575        replay_vote_sender,
576        log_messages_bytes_limit,
577        prioritization_fee_cache,
578    )?;
579
580    // Pass false because this code-path is never touched by unified scheduler.
581    timing.accumulate(execute_batches_internal_metrics, false);
582    Ok(())
583}
584
585/// Process an ordered list of entries in parallel
586/// 1. In order lock accounts for each entry while the lock succeeds, up to a Tick entry
587/// 2. Process the locked group in parallel
588/// 3. Register the `Tick` if it's available
589/// 4. Update the leader scheduler, goto 1
590///
591/// This method is for use testing against a single Bank, and assumes `Bank::transaction_count()`
592/// represents the number of transactions executed in this Bank
593pub fn process_entries_for_tests(
594    bank: &BankWithScheduler,
595    entries: Vec<Entry>,
596    transaction_status_sender: Option<&TransactionStatusSender>,
597    replay_vote_sender: Option<&ReplayVoteSender>,
598) -> Result<()> {
599    let replay_tx_thread_pool = create_thread_pool(1);
600    let verify_transaction = {
601        let bank = bank.clone_with_scheduler();
602        move |versioned_tx: VersionedTransaction| -> Result<RuntimeTransaction<SanitizedTransaction>> {
603            bank.verify_transaction(versioned_tx, TransactionVerificationMode::FullVerification)
604        }
605    };
606
607    let mut entry_starting_index: usize = bank.transaction_count().try_into().unwrap();
608    let mut batch_timing = BatchExecutionTiming::default();
609    let replay_entries: Vec<_> = entry::verify_transactions(
610        entries,
611        &replay_tx_thread_pool,
612        Arc::new(verify_transaction),
613    )?
614    .into_iter()
615    .map(|entry| {
616        let starting_index = entry_starting_index;
617        if let EntryType::Transactions(ref transactions) = entry {
618            entry_starting_index = entry_starting_index.saturating_add(transactions.len());
619        }
620        ReplayEntry {
621            entry,
622            starting_index,
623        }
624    })
625    .collect();
626
627    let ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64);
628    let result = process_entries(
629        bank,
630        &replay_tx_thread_pool,
631        replay_entries,
632        transaction_status_sender,
633        replay_vote_sender,
634        &mut batch_timing,
635        None,
636        &ignored_prioritization_fee_cache,
637    );
638
639    debug!("process_entries: {batch_timing:?}");
640    result
641}
642
643fn process_entries(
644    bank: &BankWithScheduler,
645    replay_tx_thread_pool: &ThreadPool,
646    entries: Vec<ReplayEntry>,
647    transaction_status_sender: Option<&TransactionStatusSender>,
648    replay_vote_sender: Option<&ReplayVoteSender>,
649    batch_timing: &mut BatchExecutionTiming,
650    log_messages_bytes_limit: Option<usize>,
651    prioritization_fee_cache: &PrioritizationFeeCache,
652) -> Result<()> {
653    // accumulator for entries that can be processed in parallel
654    let mut batches = vec![];
655    let mut tick_hashes = vec![];
656
657    for ReplayEntry {
658        entry,
659        starting_index,
660    } in entries
661    {
662        match entry {
663            EntryType::Tick(hash) => {
664                // If it's a tick, save it for later
665                tick_hashes.push(hash);
666                if bank.is_block_boundary(bank.tick_height() + tick_hashes.len() as u64) {
667                    // If it's a tick that will cause a new blockhash to be created,
668                    // execute the group and register the tick
669                    process_batches(
670                        bank,
671                        replay_tx_thread_pool,
672                        batches.drain(..),
673                        transaction_status_sender,
674                        replay_vote_sender,
675                        batch_timing,
676                        log_messages_bytes_limit,
677                        prioritization_fee_cache,
678                    )?;
679                    for hash in tick_hashes.drain(..) {
680                        bank.register_tick(&hash);
681                    }
682                }
683            }
684            EntryType::Transactions(transactions) => {
685                queue_batches_with_lock_retry(
686                    bank,
687                    starting_index,
688                    transactions,
689                    &mut batches,
690                    |batches| {
691                        process_batches(
692                            bank,
693                            replay_tx_thread_pool,
694                            batches,
695                            transaction_status_sender,
696                            replay_vote_sender,
697                            batch_timing,
698                            log_messages_bytes_limit,
699                            prioritization_fee_cache,
700                        )
701                    },
702                )?;
703            }
704        }
705    }
706    process_batches(
707        bank,
708        replay_tx_thread_pool,
709        batches.into_iter(),
710        transaction_status_sender,
711        replay_vote_sender,
712        batch_timing,
713        log_messages_bytes_limit,
714        prioritization_fee_cache,
715    )?;
716    for hash in tick_hashes {
717        bank.register_tick(&hash);
718    }
719    Ok(())
720}
721
722/// If an entry can be locked without failure, the transactions are pushed
723/// as a batch to `batches`. If the lock fails, the transactions are unlocked
724/// and the batches are processed.
725/// The locking process is retried, and if it fails again the block is marked
726/// as dead.
727/// If the lock retry succeeds, then the batch is pushed into `batches`.
728fn queue_batches_with_lock_retry(
729    bank: &Bank,
730    starting_index: usize,
731    transactions: Vec<RuntimeTransaction<SanitizedTransaction>>,
732    batches: &mut Vec<LockedTransactionsWithIndexes<SanitizedTransaction>>,
733    mut process_batches: impl FnMut(
734        Drain<LockedTransactionsWithIndexes<SanitizedTransaction>>,
735    ) -> Result<()>,
736) -> Result<()> {
737    // try to lock the accounts
738    let lock_results = bank.try_lock_accounts(&transactions);
739    let first_lock_err = first_err(&lock_results);
740    if first_lock_err.is_ok() {
741        batches.push(LockedTransactionsWithIndexes {
742            lock_results,
743            transactions,
744            starting_index,
745        });
746        return Ok(());
747    }
748
749    // We need to unlock the transactions that succeeded to lock before the
750    // retry.
751    bank.unlock_accounts(transactions.iter().zip(lock_results.iter()));
752
753    // We failed to lock, there are 2 possible reasons:
754    // 1. A batch already in `batches` holds the lock.
755    // 2. The batch is "self-conflicting" (i.e. the batch has account lock conflicts with itself)
756
757    // Use the callback to process batches, and clear them.
758    // Clearing the batches will `Drop` the batches which will unlock the accounts.
759    process_batches(batches.drain(..))?;
760
761    // Retry the lock
762    let lock_results = bank.try_lock_accounts(&transactions);
763    match first_err(&lock_results) {
764        Ok(()) => {
765            batches.push(LockedTransactionsWithIndexes {
766                lock_results,
767                transactions,
768                starting_index,
769            });
770            Ok(())
771        }
772        Err(err) => {
773            // We still may have succeeded to lock some accounts, unlock them.
774            bank.unlock_accounts(transactions.iter().zip(lock_results.iter()));
775
776            // An entry has account lock conflicts with *itself*, which should not happen
777            // if generated by a properly functioning leader
778            datapoint_error!(
779                "validator_process_entry_error",
780                (
781                    "error",
782                    format!(
783                        "Lock accounts error, entry conflicts with itself, txs: {transactions:?}"
784                    ),
785                    String
786                )
787            );
788            Err(err)
789        }
790    }
791}
792
793#[derive(Error, Debug)]
794pub enum BlockstoreProcessorError {
795    #[error("failed to load entries, error: {0}")]
796    FailedToLoadEntries(#[from] BlockstoreError),
797
798    #[error("failed to load meta")]
799    FailedToLoadMeta,
800
801    #[error("failed to replay bank 0, did you forget to provide a snapshot")]
802    FailedToReplayBank0,
803
804    #[error("invalid block error: {0}")]
805    InvalidBlock(#[from] BlockError),
806
807    #[error("invalid transaction error: {0}")]
808    InvalidTransaction(#[from] TransactionError),
809
810    #[error("no valid forks found")]
811    NoValidForksFound,
812
813    #[error("invalid hard fork slot {0}")]
814    InvalidHardFork(Slot),
815
816    #[error("root bank with mismatched capitalization at {0}")]
817    RootBankWithMismatchedCapitalization(Slot),
818
819    #[error("set root error {0}")]
820    SetRootError(#[from] SetRootError),
821
822    #[error("incomplete final fec set")]
823    IncompleteFinalFecSet,
824
825    #[error("invalid retransmitter signature final fec set")]
826    InvalidRetransmitterSignatureFinalFecSet,
827}
828
829/// Callback for accessing bank state after each slot is confirmed while
830/// processing the blockstore
831pub type ProcessSlotCallback = Arc<dyn Fn(&Bank) + Sync + Send>;
832
833#[derive(Default, Clone)]
834pub struct ProcessOptions {
835    /// Run PoH, transaction signature and other transaction verifications on the entries.
836    pub run_verification: bool,
837    pub full_leader_cache: bool,
838    pub halt_at_slot: Option<Slot>,
839    pub slot_callback: Option<ProcessSlotCallback>,
840    pub new_hard_forks: Option<Vec<Slot>>,
841    pub debug_keys: Option<Arc<HashSet<Pubkey>>>,
842    pub limit_load_slot_count_from_snapshot: Option<usize>,
843    pub allow_dead_slots: bool,
844    pub accounts_db_skip_shrink: bool,
845    pub accounts_db_force_initial_clean: bool,
846    pub accounts_db_config: Option<AccountsDbConfig>,
847    pub verify_index: bool,
848    pub runtime_config: RuntimeConfig,
849    /// true if after processing the contents of the blockstore at startup, we should run an accounts hash calc
850    /// This is useful for debugging.
851    pub run_final_accounts_hash_calc: bool,
852    pub use_snapshot_archives_at_startup: UseSnapshotArchivesAtStartup,
853    #[cfg(feature = "dev-context-only-utils")]
854    pub hash_overrides: Option<HashOverrides>,
855    pub abort_on_invalid_block: bool,
856    pub no_block_cost_limits: bool,
857}
858
859pub fn test_process_blockstore(
860    genesis_config: &GenesisConfig,
861    blockstore: &Blockstore,
862    opts: &ProcessOptions,
863    exit: Arc<AtomicBool>,
864) -> (Arc<RwLock<BankForks>>, LeaderScheduleCache) {
865    let (bank_forks, leader_schedule_cache, ..) = crate::bank_forks_utils::load_bank_forks(
866        genesis_config,
867        blockstore,
868        Vec::new(),
869        &SnapshotConfig::new_disabled(),
870        opts,
871        None,
872        None,
873        None,
874        exit.clone(),
875    )
876    .unwrap();
877
878    process_blockstore_from_root(
879        blockstore,
880        &bank_forks,
881        &leader_schedule_cache,
882        opts,
883        None,
884        None,
885        None, // snapshots are disabled
886    )
887    .unwrap();
888
889    (bank_forks, leader_schedule_cache)
890}
891
892pub(crate) fn process_blockstore_for_bank_0(
893    genesis_config: &GenesisConfig,
894    blockstore: &Blockstore,
895    account_paths: Vec<PathBuf>,
896    opts: &ProcessOptions,
897    transaction_status_sender: Option<&TransactionStatusSender>,
898    entry_notification_sender: Option<&EntryNotifierSender>,
899    accounts_update_notifier: Option<AccountsUpdateNotifier>,
900    exit: Arc<AtomicBool>,
901) -> result::Result<Arc<RwLock<BankForks>>, BlockstoreProcessorError> {
902    // Setup bank for slot 0
903    let bank0 = Bank::new_with_paths(
904        genesis_config,
905        Arc::new(opts.runtime_config.clone()),
906        account_paths,
907        opts.debug_keys.clone(),
908        None,
909        false,
910        opts.accounts_db_config.clone(),
911        accounts_update_notifier,
912        None,
913        exit,
914        None,
915        None,
916    );
917    let bank0_slot = bank0.slot();
918    let bank_forks = BankForks::new_rw_arc(bank0);
919
920    info!("Processing ledger for slot 0...");
921    let replay_tx_thread_pool = create_thread_pool(num_cpus::get());
922    process_bank_0(
923        &bank_forks
924            .read()
925            .unwrap()
926            .get_with_scheduler(bank0_slot)
927            .unwrap(),
928        blockstore,
929        &replay_tx_thread_pool,
930        opts,
931        transaction_status_sender,
932        &VerifyRecyclers::default(),
933        entry_notification_sender,
934    )?;
935
936    Ok(bank_forks)
937}
938
939/// Process blockstore from a known root bank
940#[allow(clippy::too_many_arguments)]
941pub fn process_blockstore_from_root(
942    blockstore: &Blockstore,
943    bank_forks: &RwLock<BankForks>,
944    leader_schedule_cache: &LeaderScheduleCache,
945    opts: &ProcessOptions,
946    transaction_status_sender: Option<&TransactionStatusSender>,
947    entry_notification_sender: Option<&EntryNotifierSender>,
948    snapshot_controller: Option<&SnapshotController>,
949) -> result::Result<(), BlockstoreProcessorError> {
950    let (start_slot, start_slot_hash) = {
951        // Starting slot must be a root, and thus has no parents
952        assert_eq!(bank_forks.read().unwrap().banks().len(), 1);
953        let bank = bank_forks.read().unwrap().root_bank();
954        #[cfg(feature = "dev-context-only-utils")]
955        if let Some(hash_overrides) = &opts.hash_overrides {
956            info!("Will override following slots' hashes: {hash_overrides:#?}");
957            bank.set_hash_overrides(hash_overrides.clone());
958        }
959        if opts.no_block_cost_limits {
960            warn!("setting block cost limits to MAX");
961            bank.write_cost_tracker()
962                .unwrap()
963                .set_limits(u64::MAX, u64::MAX, u64::MAX);
964        }
965        assert!(bank.parent().is_none());
966        (bank.slot(), bank.hash())
967    };
968
969    info!("Processing ledger from slot {start_slot}...");
970    let now = Instant::now();
971
972    // Ensure start_slot is rooted for correct replay; also ensure start_slot and
973    // qualifying children are marked as connected
974    if blockstore.is_primary_access() {
975        blockstore
976            .mark_slots_as_if_rooted_normally_at_startup(
977                vec![(start_slot, Some(start_slot_hash))],
978                true,
979            )
980            .expect("Couldn't mark start_slot as root in startup");
981        blockstore
982            .set_and_chain_connected_on_root_and_next_slots(start_slot)
983            .expect("Couldn't mark start_slot as connected during startup")
984    } else {
985        info!(
986            "Start slot {start_slot} isn't a root, and won't be updated due to secondary \
987             blockstore access"
988        );
989    }
990
991    if let Ok(Some(highest_slot)) = blockstore.highest_slot() {
992        info!("ledger holds data through slot {highest_slot}");
993    }
994
995    let mut timing = ExecuteTimings::default();
996    let (num_slots_processed, num_new_roots_found) = if let Some(start_slot_meta) = blockstore
997        .meta(start_slot)
998        .unwrap_or_else(|_| panic!("Failed to get meta for slot {start_slot}"))
999    {
1000        let replay_tx_thread_pool = create_thread_pool(num_cpus::get());
1001        load_frozen_forks(
1002            bank_forks,
1003            &start_slot_meta,
1004            blockstore,
1005            &replay_tx_thread_pool,
1006            leader_schedule_cache,
1007            opts,
1008            transaction_status_sender,
1009            entry_notification_sender,
1010            &mut timing,
1011            snapshot_controller,
1012        )?
1013    } else {
1014        // If there's no meta in the blockstore for the input `start_slot`,
1015        // then we started from a snapshot and are unable to process anything.
1016        //
1017        // If the ledger has any data at all, the snapshot was likely taken at
1018        // a slot that is not within the range of ledger min/max slot(s).
1019        warn!("Starting slot {start_slot} is not in Blockstore, unable to process");
1020        (0, 0)
1021    };
1022
1023    let processing_time = now.elapsed();
1024    let num_frozen_banks = bank_forks.read().unwrap().frozen_banks().count();
1025    datapoint_info!(
1026        "process_blockstore_from_root",
1027        ("total_time_us", processing_time.as_micros(), i64),
1028        ("frozen_banks", num_frozen_banks, i64),
1029        ("slot", bank_forks.read().unwrap().root(), i64),
1030        ("num_slots_processed", num_slots_processed, i64),
1031        ("num_new_roots_found", num_new_roots_found, i64),
1032        ("forks", bank_forks.read().unwrap().banks().len(), i64),
1033    );
1034
1035    info!("ledger processing timing: {timing:?}");
1036    {
1037        let bank_forks = bank_forks.read().unwrap();
1038        let mut bank_slots = bank_forks.banks().keys().copied().collect::<Vec<_>>();
1039        bank_slots.sort_unstable();
1040
1041        info!(
1042            "ledger processed in {}. root slot is {}, {} bank{}: {}",
1043            HumanTime::from(chrono::Duration::from_std(processing_time).unwrap())
1044                .to_text_en(Accuracy::Precise, Tense::Present),
1045            bank_forks.root(),
1046            bank_slots.len(),
1047            if bank_slots.len() > 1 { "s" } else { "" },
1048            bank_slots.iter().map(|slot| slot.to_string()).join(", "),
1049        );
1050        assert!(bank_forks.active_bank_slots().is_empty());
1051    }
1052
1053    Ok(())
1054}
1055
1056/// Verify that a segment of entries has the correct number of ticks and hashes
1057fn verify_ticks(
1058    bank: &Bank,
1059    mut entries: &[Entry],
1060    slot_full: bool,
1061    tick_hash_count: &mut u64,
1062) -> std::result::Result<(), BlockError> {
1063    let next_bank_tick_height = bank.tick_height() + entries.tick_count();
1064    let max_bank_tick_height = bank.max_tick_height();
1065
1066    if next_bank_tick_height > max_bank_tick_height {
1067        warn!("Too many entry ticks found in slot: {}", bank.slot());
1068        return Err(BlockError::TooManyTicks);
1069    }
1070
1071    if next_bank_tick_height < max_bank_tick_height && slot_full {
1072        info!("Too few entry ticks found in slot: {}", bank.slot());
1073        return Err(BlockError::TooFewTicks);
1074    }
1075
1076    if next_bank_tick_height == max_bank_tick_height {
1077        let has_trailing_entry = entries.last().map(|e| !e.is_tick()).unwrap_or_default();
1078        if has_trailing_entry {
1079            warn!("Slot: {} did not end with a tick entry", bank.slot());
1080            return Err(BlockError::TrailingEntry);
1081        }
1082
1083        if !slot_full {
1084            warn!("Slot: {} was not marked full", bank.slot());
1085            return Err(BlockError::InvalidLastTick);
1086        }
1087    }
1088
1089    if let Some(first_alpenglow_slot) = bank
1090        .feature_set
1091        .activated_slot(&agave_feature_set::alpenglow::id())
1092    {
1093        if bank.parent_slot() >= first_alpenglow_slot {
1094            // If both the parent and the bank slot are in an epoch post alpenglow activation,
1095            // no tick verification is needed
1096            return Ok(());
1097        }
1098
1099        // If the bank is in the alpenglow epoch, but the parent is from an epoch
1100        // where the feature flag is not active, we must verify ticks that correspond
1101        // to the epoch in which PoH is active. This verification is criticial, as otherwise
1102        // a leader could jump the gun and publish a block in the alpenglow epoch without waiting
1103        // the appropriate time as determined by PoH in the prior epoch.
1104        if bank.slot() >= first_alpenglow_slot && next_bank_tick_height == max_bank_tick_height {
1105            if entries.is_empty() {
1106                // This shouldn't happen, but good to double check
1107                error!("Processing empty entries in verify_ticks()");
1108                return Ok(());
1109            }
1110            // last entry must be a tick, as verified by the `has_trailing_entry`
1111            // check above. Because in Alpenglow the last tick does not have any
1112            // hashing guarantees, we pass everything but that last tick to the
1113            // entry verification.
1114            entries = &entries[..entries.len() - 1];
1115        }
1116    }
1117
1118    let hashes_per_tick = bank.hashes_per_tick().unwrap_or(0);
1119    if !entries.verify_tick_hash_count(tick_hash_count, hashes_per_tick) {
1120        warn!(
1121            "Tick with invalid number of hashes found in slot: {}",
1122            bank.slot()
1123        );
1124        return Err(BlockError::InvalidTickHashCount);
1125    }
1126
1127    Ok(())
1128}
1129
1130#[allow(clippy::too_many_arguments)]
1131#[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))]
1132fn confirm_full_slot(
1133    blockstore: &Blockstore,
1134    bank: &BankWithScheduler,
1135    replay_tx_thread_pool: &ThreadPool,
1136    opts: &ProcessOptions,
1137    recyclers: &VerifyRecyclers,
1138    progress: &mut ConfirmationProgress,
1139    transaction_status_sender: Option<&TransactionStatusSender>,
1140    entry_notification_sender: Option<&EntryNotifierSender>,
1141    replay_vote_sender: Option<&ReplayVoteSender>,
1142    timing: &mut ExecuteTimings,
1143) -> result::Result<(), BlockstoreProcessorError> {
1144    let mut confirmation_timing = ConfirmationTiming::default();
1145    let skip_verification = !opts.run_verification;
1146    let ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64);
1147
1148    confirm_slot(
1149        blockstore,
1150        bank,
1151        replay_tx_thread_pool,
1152        &mut confirmation_timing,
1153        progress,
1154        skip_verification,
1155        transaction_status_sender,
1156        entry_notification_sender,
1157        replay_vote_sender,
1158        recyclers,
1159        opts.allow_dead_slots,
1160        opts.runtime_config.log_messages_bytes_limit,
1161        &ignored_prioritization_fee_cache,
1162    )?;
1163
1164    timing.accumulate(&confirmation_timing.batch_execute.totals);
1165
1166    if !bank.is_complete() {
1167        Err(BlockstoreProcessorError::InvalidBlock(
1168            BlockError::Incomplete,
1169        ))
1170    } else {
1171        Ok(())
1172    }
1173}
1174
1175/// Measures different parts of the slot confirmation processing pipeline.
1176#[derive(Debug)]
1177pub struct ConfirmationTiming {
1178    /// Moment when the `ConfirmationTiming` instance was created.  Used to track the total wall
1179    /// clock time from the moment the first shard for the slot is received and to the moment the
1180    /// slot is complete.
1181    pub started: Instant,
1182
1183    /// Wall clock time used by the slot confirmation code, including PoH/signature verification,
1184    /// and replay.  As replay can run in parallel with the verification, this value can not be
1185    /// recovered from the `replay_elapsed` and or `{poh,transaction}_verify_elapsed`.  This
1186    /// includes failed cases, when `confirm_slot_entries` exist with an error.  In microseconds.
1187    /// When unified scheduler is enabled, replay excludes the transaction execution, only
1188    /// accounting for task creation and submission to the scheduler.
1189    pub confirmation_elapsed: u64,
1190
1191    /// Wall clock time used by the entry replay code.  Does not include the PoH or the transaction
1192    /// signature/precompiles verification, but can overlap with the PoH and signature verification.
1193    /// In microseconds.
1194    /// When unified scheduler is enabled, replay excludes the transaction execution, only
1195    /// accounting for task creation and submission to the scheduler.
1196    pub replay_elapsed: u64,
1197
1198    /// Wall clock times, used for the PoH verification of entries.  In microseconds.
1199    pub poh_verify_elapsed: u64,
1200
1201    /// Wall clock time, used for the signature verification as well as precompiles verification.
1202    /// In microseconds.
1203    pub transaction_verify_elapsed: u64,
1204
1205    /// Wall clock time spent loading data sets (and entries) from the blockstore.  This does not
1206    /// include the case when the blockstore load failed.  In microseconds.
1207    pub fetch_elapsed: u64,
1208
1209    /// Same as `fetch_elapsed` above, but for the case when the blockstore load fails.  In
1210    /// microseconds.
1211    pub fetch_fail_elapsed: u64,
1212
1213    /// `batch_execute()` measurements.
1214    pub batch_execute: BatchExecutionTiming,
1215}
1216
1217impl Default for ConfirmationTiming {
1218    fn default() -> Self {
1219        Self {
1220            started: Instant::now(),
1221            confirmation_elapsed: 0,
1222            replay_elapsed: 0,
1223            poh_verify_elapsed: 0,
1224            transaction_verify_elapsed: 0,
1225            fetch_elapsed: 0,
1226            fetch_fail_elapsed: 0,
1227            batch_execute: BatchExecutionTiming::default(),
1228        }
1229    }
1230}
1231
1232/// Measures times related to transaction execution in a slot.
1233#[derive(Debug, Default)]
1234pub struct BatchExecutionTiming {
1235    /// Time used by transaction execution.  Accumulated across multiple threads that are running
1236    /// `execute_batch()`.
1237    pub totals: ExecuteTimings,
1238
1239    /// Wall clock time used by the transaction execution part of pipeline.
1240    /// [`ConfirmationTiming::replay_elapsed`] includes this time.  In microseconds.
1241    wall_clock_us: Saturating<u64>,
1242
1243    /// Time used to execute transactions, via `execute_batch()`, in the thread that consumed the
1244    /// most time (in terms of total_thread_us) among rayon threads. Note that the slowest thread
1245    /// is determined each time a given group of batches is newly processed. So, this is a coarse
1246    /// approximation of wall-time single-threaded linearized metrics, discarding all metrics other
1247    /// than the arbitrary set of batches mixed with various transactions, which replayed slowest
1248    /// as a whole for each rayon processing session.
1249    ///
1250    /// When unified scheduler is enabled, this field isn't maintained, because it's not batched at
1251    /// all.
1252    slowest_thread: ThreadExecuteTimings,
1253}
1254
1255impl BatchExecutionTiming {
1256    pub fn accumulate(
1257        &mut self,
1258        new_batch: ExecuteBatchesInternalMetrics,
1259        is_unified_scheduler_enabled: bool,
1260    ) {
1261        let Self {
1262            totals,
1263            wall_clock_us,
1264            slowest_thread,
1265        } = self;
1266
1267        // These metric fields aren't applicable for the unified scheduler
1268        if !is_unified_scheduler_enabled {
1269            *wall_clock_us += new_batch.execute_batches_us;
1270
1271            totals.saturating_add_in_place(TotalBatchesLen, new_batch.total_batches_len);
1272            totals.saturating_add_in_place(NumExecuteBatches, 1);
1273        }
1274
1275        for thread_times in new_batch.execution_timings_per_thread.values() {
1276            totals.accumulate(&thread_times.execute_timings);
1277        }
1278
1279        // This whole metric (replay-slot-end-to-end-stats) isn't applicable for the unified
1280        // scheduler.
1281        if !is_unified_scheduler_enabled {
1282            let slowest = new_batch
1283                .execution_timings_per_thread
1284                .values()
1285                .max_by_key(|thread_times| thread_times.total_thread_us);
1286
1287            if let Some(slowest) = slowest {
1288                slowest_thread.accumulate(slowest);
1289                slowest_thread
1290                    .execute_timings
1291                    .saturating_add_in_place(NumExecuteBatches, 1);
1292            };
1293        }
1294    }
1295}
1296
1297#[derive(Debug, Default)]
1298pub struct ThreadExecuteTimings {
1299    pub total_thread_us: Saturating<u64>,
1300    pub total_transactions_executed: Saturating<u64>,
1301    pub execute_timings: ExecuteTimings,
1302}
1303
1304impl ThreadExecuteTimings {
1305    pub fn report_stats(&self, slot: Slot) {
1306        lazy! {
1307            datapoint_info!(
1308                "replay-slot-end-to-end-stats",
1309                ("slot", slot as i64, i64),
1310                ("total_thread_us", self.total_thread_us.0 as i64, i64),
1311                ("total_transactions_executed", self.total_transactions_executed.0 as i64, i64),
1312                // Everything inside the `eager!` block will be eagerly expanded before
1313                // evaluation of the rest of the surrounding macro.
1314                // Pass false because this code-path is never touched by unified scheduler.
1315                eager!{report_execute_timings!(self.execute_timings, false)}
1316            );
1317        };
1318    }
1319
1320    pub fn accumulate(&mut self, other: &ThreadExecuteTimings) {
1321        self.execute_timings.accumulate(&other.execute_timings);
1322        self.total_thread_us += other.total_thread_us;
1323        self.total_transactions_executed += other.total_transactions_executed;
1324    }
1325}
1326
1327#[derive(Default)]
1328pub struct ReplaySlotStats(ConfirmationTiming);
1329impl std::ops::Deref for ReplaySlotStats {
1330    type Target = ConfirmationTiming;
1331    fn deref(&self) -> &Self::Target {
1332        &self.0
1333    }
1334}
1335impl std::ops::DerefMut for ReplaySlotStats {
1336    fn deref_mut(&mut self) -> &mut Self::Target {
1337        &mut self.0
1338    }
1339}
1340
1341impl ReplaySlotStats {
1342    pub fn report_stats(
1343        &self,
1344        slot: Slot,
1345        num_txs: usize,
1346        num_entries: usize,
1347        num_shreds: u64,
1348        bank_complete_time_us: u64,
1349        is_unified_scheduler_enabled: bool,
1350    ) {
1351        let confirmation_elapsed = if is_unified_scheduler_enabled {
1352            "confirmation_without_replay_us"
1353        } else {
1354            "confirmation_time_us"
1355        };
1356        let replay_elapsed = if is_unified_scheduler_enabled {
1357            "task_submission_us"
1358        } else {
1359            "replay_time"
1360        };
1361        let execute_batches_us = if is_unified_scheduler_enabled {
1362            None
1363        } else {
1364            Some(self.batch_execute.wall_clock_us.0 as i64)
1365        };
1366
1367        lazy! {
1368            datapoint_info!(
1369                "replay-slot-stats",
1370                ("slot", slot as i64, i64),
1371                ("fetch_entries_time", self.fetch_elapsed as i64, i64),
1372                (
1373                    "fetch_entries_fail_time",
1374                    self.fetch_fail_elapsed as i64,
1375                    i64
1376                ),
1377                (
1378                    "entry_poh_verification_time",
1379                    self.poh_verify_elapsed as i64,
1380                    i64
1381                ),
1382                (
1383                    "entry_transaction_verification_time",
1384                    self.transaction_verify_elapsed as i64,
1385                    i64
1386                ),
1387                (confirmation_elapsed, self.confirmation_elapsed as i64, i64),
1388                (replay_elapsed, self.replay_elapsed as i64, i64),
1389                ("execute_batches_us", execute_batches_us, Option<i64>),
1390                (
1391                    "replay_total_elapsed",
1392                    self.started.elapsed().as_micros() as i64,
1393                    i64
1394                ),
1395                ("bank_complete_time_us", bank_complete_time_us, i64),
1396                ("total_transactions", num_txs as i64, i64),
1397                ("total_entries", num_entries as i64, i64),
1398                ("total_shreds", num_shreds as i64, i64),
1399                // Everything inside the `eager!` block will be eagerly expanded before
1400                // evaluation of the rest of the surrounding macro.
1401                eager!{report_execute_timings!(self.batch_execute.totals, is_unified_scheduler_enabled)}
1402            );
1403        };
1404
1405        // Skip reporting replay-slot-end-to-end-stats entirely if unified scheduler is enabled,
1406        // because the whole metrics itself is only meaningful for rayon-based worker threads.
1407        //
1408        // See slowest_thread doc comment for details.
1409        if !is_unified_scheduler_enabled {
1410            self.batch_execute.slowest_thread.report_stats(slot);
1411        }
1412
1413        // per_program_timings datapoints are only reported at the trace level, and all preparations
1414        // required to generate them can only occur when trace level is enabled.
1415        if log::log_enabled!(log::Level::Trace) {
1416            let mut per_pubkey_timings: Vec<_> = self
1417                .batch_execute
1418                .totals
1419                .details
1420                .per_program_timings
1421                .iter()
1422                .collect();
1423            per_pubkey_timings.sort_by(|a, b| b.1.accumulated_us.cmp(&a.1.accumulated_us));
1424            let (total_us, total_units, total_count, total_errored_units, total_errored_count) =
1425                per_pubkey_timings.iter().fold(
1426                    (0, 0, 0, 0, 0),
1427                    |(sum_us, sum_units, sum_count, sum_errored_units, sum_errored_count), a| {
1428                        (
1429                            sum_us + a.1.accumulated_us.0,
1430                            sum_units + a.1.accumulated_units.0,
1431                            sum_count + a.1.count.0,
1432                            sum_errored_units + a.1.total_errored_units.0,
1433                            sum_errored_count + a.1.errored_txs_compute_consumed.len(),
1434                        )
1435                    },
1436                );
1437
1438            for (pubkey, time) in per_pubkey_timings.iter().take(5) {
1439                datapoint_trace!(
1440                    "per_program_timings",
1441                    ("slot", slot as i64, i64),
1442                    ("pubkey", pubkey.to_string(), String),
1443                    ("execute_us", time.accumulated_us.0, i64),
1444                    ("accumulated_units", time.accumulated_units.0, i64),
1445                    ("errored_units", time.total_errored_units.0, i64),
1446                    ("count", time.count.0, i64),
1447                    (
1448                        "errored_count",
1449                        time.errored_txs_compute_consumed.len(),
1450                        i64
1451                    ),
1452                );
1453            }
1454            datapoint_info!(
1455                "per_program_timings",
1456                ("slot", slot as i64, i64),
1457                ("pubkey", "all", String),
1458                ("execute_us", total_us, i64),
1459                ("accumulated_units", total_units, i64),
1460                ("count", total_count, i64),
1461                ("errored_units", total_errored_units, i64),
1462                ("errored_count", total_errored_count, i64)
1463            );
1464        }
1465    }
1466}
1467
1468#[derive(Default)]
1469pub struct ConfirmationProgress {
1470    pub last_entry: Hash,
1471    pub tick_hash_count: u64,
1472    pub num_shreds: u64,
1473    pub num_entries: usize,
1474    pub num_txs: usize,
1475}
1476
1477impl ConfirmationProgress {
1478    pub fn new(last_entry: Hash) -> Self {
1479        Self {
1480            last_entry,
1481            ..Self::default()
1482        }
1483    }
1484}
1485
1486#[allow(clippy::too_many_arguments)]
1487pub fn confirm_slot(
1488    blockstore: &Blockstore,
1489    bank: &BankWithScheduler,
1490    replay_tx_thread_pool: &ThreadPool,
1491    timing: &mut ConfirmationTiming,
1492    progress: &mut ConfirmationProgress,
1493    skip_verification: bool,
1494    transaction_status_sender: Option<&TransactionStatusSender>,
1495    entry_notification_sender: Option<&EntryNotifierSender>,
1496    replay_vote_sender: Option<&ReplayVoteSender>,
1497    recyclers: &VerifyRecyclers,
1498    allow_dead_slots: bool,
1499    log_messages_bytes_limit: Option<usize>,
1500    prioritization_fee_cache: &PrioritizationFeeCache,
1501) -> result::Result<(), BlockstoreProcessorError> {
1502    let slot = bank.slot();
1503
1504    let slot_entries_load_result = {
1505        let mut load_elapsed = Measure::start("load_elapsed");
1506        let load_result = blockstore
1507            .get_slot_entries_with_shred_info(slot, progress.num_shreds, allow_dead_slots)
1508            .map_err(BlockstoreProcessorError::FailedToLoadEntries);
1509        load_elapsed.stop();
1510        if load_result.is_err() {
1511            timing.fetch_fail_elapsed += load_elapsed.as_us();
1512        } else {
1513            timing.fetch_elapsed += load_elapsed.as_us();
1514        }
1515        load_result
1516    }?;
1517
1518    confirm_slot_entries(
1519        bank,
1520        replay_tx_thread_pool,
1521        slot_entries_load_result,
1522        timing,
1523        progress,
1524        skip_verification,
1525        transaction_status_sender,
1526        entry_notification_sender,
1527        replay_vote_sender,
1528        recyclers,
1529        log_messages_bytes_limit,
1530        prioritization_fee_cache,
1531    )
1532}
1533
1534#[allow(clippy::too_many_arguments)]
1535fn confirm_slot_entries(
1536    bank: &BankWithScheduler,
1537    replay_tx_thread_pool: &ThreadPool,
1538    slot_entries_load_result: (Vec<Entry>, u64, bool),
1539    timing: &mut ConfirmationTiming,
1540    progress: &mut ConfirmationProgress,
1541    skip_verification: bool,
1542    transaction_status_sender: Option<&TransactionStatusSender>,
1543    entry_notification_sender: Option<&EntryNotifierSender>,
1544    replay_vote_sender: Option<&ReplayVoteSender>,
1545    recyclers: &VerifyRecyclers,
1546    log_messages_bytes_limit: Option<usize>,
1547    prioritization_fee_cache: &PrioritizationFeeCache,
1548) -> result::Result<(), BlockstoreProcessorError> {
1549    let ConfirmationTiming {
1550        confirmation_elapsed,
1551        replay_elapsed,
1552        poh_verify_elapsed,
1553        transaction_verify_elapsed,
1554        batch_execute: batch_execute_timing,
1555        ..
1556    } = timing;
1557
1558    let confirmation_elapsed_timer = Measure::start("confirmation_elapsed");
1559    defer! {
1560        *confirmation_elapsed += confirmation_elapsed_timer.end_as_us();
1561    };
1562
1563    let slot = bank.slot();
1564    let (entries, num_shreds, slot_full) = slot_entries_load_result;
1565    let num_entries = entries.len();
1566    let mut entry_tx_starting_indexes = Vec::with_capacity(num_entries);
1567    let mut entry_tx_starting_index = progress.num_txs;
1568    let num_txs = entries
1569        .iter()
1570        .enumerate()
1571        .map(|(i, entry)| {
1572            if let Some(entry_notification_sender) = entry_notification_sender {
1573                let entry_index = progress.num_entries.saturating_add(i);
1574                if let Err(err) = entry_notification_sender.send(EntryNotification {
1575                    slot,
1576                    index: entry_index,
1577                    entry: entry.into(),
1578                    starting_transaction_index: entry_tx_starting_index,
1579                }) {
1580                    warn!(
1581                        "Slot {slot}, entry {entry_index} entry_notification_sender send failed: \
1582                         {err:?}"
1583                    );
1584                }
1585            }
1586            let num_txs = entry.transactions.len();
1587            let next_tx_starting_index = entry_tx_starting_index.saturating_add(num_txs);
1588            entry_tx_starting_indexes.push(entry_tx_starting_index);
1589            entry_tx_starting_index = next_tx_starting_index;
1590            num_txs
1591        })
1592        .sum::<usize>();
1593    trace!(
1594        "Fetched entries for slot {slot}, num_entries: {num_entries}, num_shreds: {num_shreds}, \
1595         num_txs: {num_txs}, slot_full: {slot_full}",
1596    );
1597
1598    if !skip_verification {
1599        let tick_hash_count = &mut progress.tick_hash_count;
1600        verify_ticks(bank, &entries, slot_full, tick_hash_count).map_err(|err| {
1601            warn!(
1602                "{:#?}, slot: {}, entry len: {}, tick_height: {}, last entry: {}, last_blockhash: \
1603                 {}, shred_index: {}, slot_full: {}",
1604                err,
1605                slot,
1606                num_entries,
1607                bank.tick_height(),
1608                progress.last_entry,
1609                bank.last_blockhash(),
1610                num_shreds,
1611                slot_full,
1612            );
1613            err
1614        })?;
1615    }
1616
1617    let last_entry_hash = entries.last().map(|e| e.hash);
1618    let verifier = if !skip_verification {
1619        datapoint_debug!("verify-batch-size", ("size", num_entries as i64, i64));
1620        let entry_state = entries.start_verify(
1621            &progress.last_entry,
1622            replay_tx_thread_pool,
1623            recyclers.clone(),
1624        );
1625        if entry_state.status() == EntryVerificationStatus::Failure {
1626            warn!("Ledger proof of history failed at slot: {slot}");
1627            return Err(BlockError::InvalidEntryHash.into());
1628        }
1629        Some(entry_state)
1630    } else {
1631        None
1632    };
1633
1634    let verify_transaction = {
1635        let bank = bank.clone_with_scheduler();
1636        move |versioned_tx: VersionedTransaction,
1637              verification_mode: TransactionVerificationMode|
1638              -> Result<RuntimeTransaction<SanitizedTransaction>> {
1639            bank.verify_transaction(versioned_tx, verification_mode)
1640        }
1641    };
1642
1643    let transaction_verification_start = Instant::now();
1644    let transaction_verification_result = entry::start_verify_transactions(
1645        entries,
1646        skip_verification,
1647        replay_tx_thread_pool,
1648        recyclers.clone(),
1649        Arc::new(verify_transaction),
1650    );
1651    let transaction_cpu_duration_us = transaction_verification_start.elapsed().as_micros() as u64;
1652
1653    let mut transaction_verification_result = match transaction_verification_result {
1654        Ok(transaction_verification_result) => transaction_verification_result,
1655        Err(err) => {
1656            warn!(
1657                "Ledger transaction signature verification failed at slot: {}",
1658                bank.slot()
1659            );
1660            return Err(err.into());
1661        }
1662    };
1663
1664    let entries = transaction_verification_result
1665        .entries()
1666        .expect("Transaction verification generates entries");
1667
1668    let mut replay_timer = Measure::start("replay_elapsed");
1669    let replay_entries: Vec<_> = entries
1670        .into_iter()
1671        .zip(entry_tx_starting_indexes)
1672        .map(|(entry, tx_starting_index)| ReplayEntry {
1673            entry,
1674            starting_index: tx_starting_index,
1675        })
1676        .collect();
1677    let process_result = process_entries(
1678        bank,
1679        replay_tx_thread_pool,
1680        replay_entries,
1681        transaction_status_sender,
1682        replay_vote_sender,
1683        batch_execute_timing,
1684        log_messages_bytes_limit,
1685        prioritization_fee_cache,
1686    )
1687    .map_err(BlockstoreProcessorError::from);
1688    replay_timer.stop();
1689    *replay_elapsed += replay_timer.as_us();
1690
1691    {
1692        // If running signature verification on the GPU, wait for that computation to finish, and
1693        // get the result of it. If we did the signature verification on the CPU, this just returns
1694        // the already-computed result produced in start_verify_transactions.  Either way, check the
1695        // result of the signature verification.
1696        let valid = transaction_verification_result.finish_verify();
1697
1698        // The GPU Entry verification (if any) is kicked off right when the CPU-side Entry
1699        // verification finishes, so these times should be disjoint
1700        *transaction_verify_elapsed +=
1701            transaction_cpu_duration_us + transaction_verification_result.gpu_verify_duration();
1702
1703        if !valid {
1704            warn!(
1705                "Ledger transaction signature verification failed at slot: {}",
1706                bank.slot()
1707            );
1708            return Err(TransactionError::SignatureFailure.into());
1709        }
1710    }
1711
1712    if let Some(mut verifier) = verifier {
1713        let verified = verifier.finish_verify(replay_tx_thread_pool);
1714        *poh_verify_elapsed += verifier.poh_duration_us();
1715        if !verified {
1716            warn!("Ledger proof of history failed at slot: {}", bank.slot());
1717            return Err(BlockError::InvalidEntryHash.into());
1718        }
1719    }
1720
1721    process_result?;
1722
1723    progress.num_shreds += num_shreds;
1724    progress.num_entries += num_entries;
1725    progress.num_txs += num_txs;
1726    if let Some(last_entry_hash) = last_entry_hash {
1727        progress.last_entry = last_entry_hash;
1728    }
1729
1730    Ok(())
1731}
1732
1733// Special handling required for processing the entries in slot 0
1734#[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))]
1735fn process_bank_0(
1736    bank0: &BankWithScheduler,
1737    blockstore: &Blockstore,
1738    replay_tx_thread_pool: &ThreadPool,
1739    opts: &ProcessOptions,
1740    transaction_status_sender: Option<&TransactionStatusSender>,
1741    recyclers: &VerifyRecyclers,
1742    entry_notification_sender: Option<&EntryNotifierSender>,
1743) -> result::Result<(), BlockstoreProcessorError> {
1744    assert_eq!(bank0.slot(), 0);
1745    let mut progress = ConfirmationProgress::new(bank0.last_blockhash());
1746    confirm_full_slot(
1747        blockstore,
1748        bank0,
1749        replay_tx_thread_pool,
1750        opts,
1751        recyclers,
1752        &mut progress,
1753        None,
1754        entry_notification_sender,
1755        None,
1756        &mut ExecuteTimings::default(),
1757    )
1758    .map_err(|_| BlockstoreProcessorError::FailedToReplayBank0)?;
1759    if let Some((result, _timings)) = bank0.wait_for_completed_scheduler() {
1760        result.unwrap();
1761    }
1762    bank0.freeze();
1763    if blockstore.is_primary_access() {
1764        blockstore.insert_bank_hash(bank0.slot(), bank0.hash(), false);
1765    }
1766
1767    if let Some(transaction_status_sender) = transaction_status_sender {
1768        transaction_status_sender.send_transaction_status_freeze_message(bank0);
1769    }
1770
1771    Ok(())
1772}
1773
1774// Given a bank, add its children to the pending slots queue if those children slots are
1775// complete
1776fn process_next_slots(
1777    bank: &Arc<Bank>,
1778    meta: &SlotMeta,
1779    blockstore: &Blockstore,
1780    leader_schedule_cache: &LeaderScheduleCache,
1781    pending_slots: &mut Vec<(SlotMeta, Bank, Hash)>,
1782    opts: &ProcessOptions,
1783) -> result::Result<(), BlockstoreProcessorError> {
1784    if meta.next_slots.is_empty() {
1785        return Ok(());
1786    }
1787
1788    // This is a fork point if there are multiple children, create a new child bank for each fork
1789    for next_slot in &meta.next_slots {
1790        if opts
1791            .halt_at_slot
1792            .is_some_and(|halt_at_slot| *next_slot > halt_at_slot)
1793        {
1794            continue;
1795        }
1796        if !opts.allow_dead_slots && blockstore.is_dead(*next_slot) {
1797            continue;
1798        }
1799
1800        let next_meta = blockstore
1801            .meta(*next_slot)
1802            .map_err(|err| {
1803                warn!("Failed to load meta for slot {next_slot}: {err:?}");
1804                BlockstoreProcessorError::FailedToLoadMeta
1805            })?
1806            .unwrap();
1807
1808        // Only process full slots in blockstore_processor, replay_stage
1809        // handles any partials
1810        if next_meta.is_full() {
1811            let next_bank = Bank::new_from_parent(
1812                bank.clone(),
1813                &leader_schedule_cache
1814                    .slot_leader_at(*next_slot, Some(bank))
1815                    .unwrap(),
1816                *next_slot,
1817            );
1818            set_alpenglow_ticks(&next_bank);
1819            trace!(
1820                "New bank for slot {}, parent slot is {}",
1821                next_slot,
1822                bank.slot(),
1823            );
1824            pending_slots.push((next_meta, next_bank, bank.last_blockhash()));
1825        }
1826    }
1827
1828    // Reverse sort by slot, so the next slot to be processed can be popped
1829    pending_slots.sort_by(|a, b| b.1.slot().cmp(&a.1.slot()));
1830    Ok(())
1831}
1832
1833/// Set alpenglow bank tick height.
1834///
1835/// For alpenglow banks this tick height is `max_tick_height` - 1, for a bank on the epoch boundary
1836/// of feature activation, we need ticks_per_slot for each slot between the parent and epoch boundary
1837/// and one extra tick for the alpenglow bank
1838pub fn set_alpenglow_ticks(bank: &Bank) {
1839    let Some(first_alpenglow_slot) = bank
1840        .feature_set
1841        .activated_slot(&agave_feature_set::alpenglow::id())
1842    else {
1843        return;
1844    };
1845
1846    let Some(alpenglow_ticks) = calculate_alpenglow_ticks(
1847        bank.slot(),
1848        first_alpenglow_slot,
1849        bank.parent_slot(),
1850        bank.ticks_per_slot(),
1851    ) else {
1852        return;
1853    };
1854
1855    info!(
1856        "Alpenglow: Setting tick height for slot {} to {}",
1857        bank.slot(),
1858        bank.max_tick_height() - alpenglow_ticks
1859    );
1860    bank.set_tick_height(bank.max_tick_height() - alpenglow_ticks);
1861}
1862
1863/// Calculates how many ticks are needed for a block at `slot` with parent `parent_slot`
1864///
1865/// If both `parent_slot` and `slot` are greater than or equal to `first_alpenglow_slot`, then
1866/// only 1 tick is needed. This tick has no hashing guarantees, it is simply used as a signal
1867/// for the end of the block.
1868///
1869/// If both `parent_slot` and `slot` are less than `first_alpenglow_slot`, we need the
1870/// appropriate amount of PoH ticks, indicated by a None return value.
1871///
1872/// If `parent_slot` is less than `first_alpenglow_slot` and `slot` is greater than or equal
1873/// to `first_alpenglow_slot` (A block that "straddles" the activation epoch boundary) then:
1874///
1875/// 1. All slots between `parent_slot` and `first_alpenglow_slot` need to have `ticks_per_slot` ticks
1876/// 2. One extra tick for the actual alpenglow slot
1877/// 3. There are no ticks for any skipped alpenglow slots
1878fn calculate_alpenglow_ticks(
1879    slot: Slot,
1880    first_alpenglow_slot: Slot,
1881    parent_slot: Slot,
1882    ticks_per_slot: u64,
1883) -> Option<u64> {
1884    // Slots before alpenglow shouldn't have alpenglow ticks
1885    if slot < first_alpenglow_slot {
1886        return None;
1887    }
1888
1889    let alpenglow_ticks = if parent_slot < first_alpenglow_slot && slot >= first_alpenglow_slot {
1890        (first_alpenglow_slot - parent_slot - 1) * ticks_per_slot + 1
1891    } else {
1892        1
1893    };
1894
1895    Some(alpenglow_ticks)
1896}
1897
1898/// Starting with the root slot corresponding to `start_slot_meta`, iteratively
1899/// find and process children slots from the blockstore.
1900///
1901/// Returns a tuple (a, b) where a is the number of slots processed and b is
1902/// the number of newly found cluster roots.
1903#[allow(clippy::too_many_arguments)]
1904fn load_frozen_forks(
1905    bank_forks: &RwLock<BankForks>,
1906    start_slot_meta: &SlotMeta,
1907    blockstore: &Blockstore,
1908    replay_tx_thread_pool: &ThreadPool,
1909    leader_schedule_cache: &LeaderScheduleCache,
1910    opts: &ProcessOptions,
1911    transaction_status_sender: Option<&TransactionStatusSender>,
1912    entry_notification_sender: Option<&EntryNotifierSender>,
1913    timing: &mut ExecuteTimings,
1914    snapshot_controller: Option<&SnapshotController>,
1915) -> result::Result<(u64, usize), BlockstoreProcessorError> {
1916    let blockstore_max_root = blockstore.max_root();
1917    let mut root = bank_forks.read().unwrap().root();
1918    let max_root = std::cmp::max(root, blockstore_max_root);
1919    info!(
1920        "load_frozen_forks() latest root from blockstore: {blockstore_max_root}, max_root: \
1921         {max_root}",
1922    );
1923
1924    // The total number of slots processed
1925    let mut total_slots_processed = 0;
1926    // The total number of newly identified root slots
1927    let mut total_rooted_slots = 0;
1928
1929    let mut pending_slots = vec![];
1930    process_next_slots(
1931        &bank_forks
1932            .read()
1933            .unwrap()
1934            .get(start_slot_meta.slot)
1935            .unwrap(),
1936        start_slot_meta,
1937        blockstore,
1938        leader_schedule_cache,
1939        &mut pending_slots,
1940        opts,
1941    )?;
1942
1943    if Some(bank_forks.read().unwrap().root()) != opts.halt_at_slot {
1944        let recyclers = VerifyRecyclers::default();
1945        let mut all_banks = HashMap::new();
1946
1947        const STATUS_REPORT_INTERVAL: Duration = Duration::from_secs(2);
1948        let mut last_status_report = Instant::now();
1949        let mut slots_processed = 0;
1950        let mut txs = 0;
1951        let mut set_root_us = 0;
1952        let mut root_retain_us = 0;
1953        let mut process_single_slot_us = 0;
1954        let mut voting_us = 0;
1955
1956        while !pending_slots.is_empty() {
1957            timing.details.per_program_timings.clear();
1958            let (meta, bank, last_entry_hash) = pending_slots.pop().unwrap();
1959            let slot = bank.slot();
1960            if last_status_report.elapsed() > STATUS_REPORT_INTERVAL {
1961                let secs = last_status_report.elapsed().as_secs() as f32;
1962                let slots_per_sec = slots_processed as f32 / secs;
1963                let txs_per_sec = txs as f32 / secs;
1964                info!(
1965                    "processing ledger: slot={slot}, root_slot={root} slots={slots_processed}, \
1966                     slots/s={slots_per_sec}, txs/s={txs_per_sec}"
1967                );
1968                debug!(
1969                    "processing ledger timing: set_root_us={set_root_us}, \
1970                     root_retain_us={root_retain_us}, \
1971                     process_single_slot_us:{process_single_slot_us}, voting_us: {voting_us}"
1972                );
1973
1974                last_status_report = Instant::now();
1975                slots_processed = 0;
1976                txs = 0;
1977                set_root_us = 0;
1978                root_retain_us = 0;
1979                process_single_slot_us = 0;
1980                voting_us = 0;
1981            }
1982
1983            let mut progress = ConfirmationProgress::new(last_entry_hash);
1984            let mut m = Measure::start("process_single_slot");
1985            let bank = bank_forks.write().unwrap().insert_from_ledger(bank);
1986            if let Err(error) = process_single_slot(
1987                blockstore,
1988                &bank,
1989                replay_tx_thread_pool,
1990                opts,
1991                &recyclers,
1992                &mut progress,
1993                transaction_status_sender,
1994                entry_notification_sender,
1995                None,
1996                timing,
1997            ) {
1998                assert!(bank_forks.write().unwrap().remove(bank.slot()).is_some());
1999                if opts.abort_on_invalid_block {
2000                    Err(error)?
2001                }
2002                continue;
2003            }
2004            txs += progress.num_txs;
2005
2006            // Block must be frozen by this point; otherwise,
2007            // process_single_slot() would have errored above.
2008            assert!(bank.is_frozen());
2009            all_banks.insert(bank.slot(), bank.clone_with_scheduler());
2010            m.stop();
2011            process_single_slot_us += m.as_us();
2012
2013            let mut m = Measure::start("voting");
2014            // If we've reached the last known root in blockstore, start looking
2015            // for newer cluster confirmed roots
2016            let new_root_bank = {
2017                if bank_forks.read().unwrap().root() >= max_root {
2018                    supermajority_root_from_vote_accounts(
2019                        bank.total_epoch_stake(),
2020                        &bank.vote_accounts(),
2021                    ).and_then(|supermajority_root| {
2022                        if supermajority_root > root {
2023                            // If there's a cluster confirmed root greater than our last
2024                            // replayed root, then because the cluster confirmed root should
2025                            // be descended from our last root, it must exist in `all_banks`
2026                            let cluster_root_bank = all_banks.get(&supermajority_root).unwrap();
2027
2028                            // cluster root must be a descendant of our root, otherwise something
2029                            // is drastically wrong
2030                            assert!(cluster_root_bank.ancestors.contains_key(&root));
2031                            info!(
2032                                "blockstore processor found new cluster confirmed root: {}, observed in bank: {}",
2033                                cluster_root_bank.slot(), bank.slot()
2034                            );
2035
2036                            // Ensure cluster-confirmed root and parents are set as root in blockstore
2037                            let mut rooted_slots = vec![];
2038                            let mut new_root_bank = cluster_root_bank.clone_without_scheduler();
2039                            loop {
2040                                if new_root_bank.slot() == root { break; } // Found the last root in the chain, yay!
2041                                assert!(new_root_bank.slot() > root);
2042
2043                                rooted_slots.push((new_root_bank.slot(), Some(new_root_bank.hash())));
2044                                // As noted, the cluster confirmed root should be descended from
2045                                // our last root; therefore parent should be set
2046                                new_root_bank = new_root_bank.parent().unwrap();
2047                            }
2048                            total_rooted_slots += rooted_slots.len();
2049                            if blockstore.is_primary_access() {
2050                                blockstore
2051                                    .mark_slots_as_if_rooted_normally_at_startup(rooted_slots, true)
2052                                    .expect("Blockstore::mark_slots_as_if_rooted_normally_at_startup() should succeed");
2053                            }
2054                            Some(cluster_root_bank)
2055                        } else {
2056                            None
2057                        }
2058                    })
2059                } else if blockstore.is_root(slot) {
2060                    Some(&bank)
2061                } else {
2062                    None
2063                }
2064            };
2065            m.stop();
2066            voting_us += m.as_us();
2067
2068            if let Some(new_root_bank) = new_root_bank {
2069                let mut m = Measure::start("set_root");
2070                root = new_root_bank.slot();
2071
2072                leader_schedule_cache.set_root(new_root_bank);
2073                new_root_bank.prune_program_cache(root, new_root_bank.epoch());
2074                let _ = bank_forks
2075                    .write()
2076                    .unwrap()
2077                    .set_root(root, snapshot_controller, None)?;
2078                m.stop();
2079                set_root_us += m.as_us();
2080
2081                // Filter out all non descendants of the new root
2082                let mut m = Measure::start("filter pending slots");
2083                pending_slots
2084                    .retain(|(_, pending_bank, _)| pending_bank.ancestors.contains_key(&root));
2085                all_banks.retain(|_, bank| bank.ancestors.contains_key(&root));
2086                m.stop();
2087                root_retain_us += m.as_us();
2088            }
2089
2090            slots_processed += 1;
2091            total_slots_processed += 1;
2092
2093            trace!(
2094                "Bank for {}slot {} is complete",
2095                if root == slot { "root " } else { "" },
2096                slot,
2097            );
2098
2099            let done_processing = opts
2100                .halt_at_slot
2101                .map(|halt_at_slot| slot >= halt_at_slot)
2102                .unwrap_or(false);
2103            if done_processing {
2104                if opts.run_final_accounts_hash_calc {
2105                    bank.run_final_hash_calc();
2106                }
2107                break;
2108            }
2109
2110            process_next_slots(
2111                &bank,
2112                &meta,
2113                blockstore,
2114                leader_schedule_cache,
2115                &mut pending_slots,
2116                opts,
2117            )?;
2118        }
2119    } else if opts.run_final_accounts_hash_calc {
2120        bank_forks.read().unwrap().root_bank().run_final_hash_calc();
2121    }
2122
2123    Ok((total_slots_processed, total_rooted_slots))
2124}
2125
2126// `roots` is sorted largest to smallest by root slot
2127fn supermajority_root(roots: &[(Slot, u64)], total_epoch_stake: u64) -> Option<Slot> {
2128    if roots.is_empty() {
2129        return None;
2130    }
2131
2132    // Find latest root
2133    let mut total = 0;
2134    let mut prev_root = roots[0].0;
2135    for (root, stake) in roots.iter() {
2136        assert!(*root <= prev_root);
2137        total += stake;
2138        if total as f64 / total_epoch_stake as f64 > VOTE_THRESHOLD_SIZE {
2139            return Some(*root);
2140        }
2141        prev_root = *root;
2142    }
2143
2144    None
2145}
2146
2147fn supermajority_root_from_vote_accounts(
2148    total_epoch_stake: u64,
2149    vote_accounts: &VoteAccountsHashMap,
2150) -> Option<Slot> {
2151    let mut roots_stakes: Vec<(Slot, u64)> = vote_accounts
2152        .values()
2153        .filter_map(|(stake, account)| {
2154            if *stake == 0 {
2155                return None;
2156            }
2157
2158            Some((account.vote_state_view().root_slot()?, *stake))
2159        })
2160        .collect();
2161
2162    // Sort from greatest to smallest slot
2163    roots_stakes.sort_unstable_by(|a, b| a.0.cmp(&b.0).reverse());
2164
2165    // Find latest root
2166    supermajority_root(&roots_stakes, total_epoch_stake)
2167}
2168
2169// Processes and replays the contents of a single slot, returns Error
2170// if failed to play the slot
2171#[allow(clippy::too_many_arguments)]
2172pub fn process_single_slot(
2173    blockstore: &Blockstore,
2174    bank: &BankWithScheduler,
2175    replay_tx_thread_pool: &ThreadPool,
2176    opts: &ProcessOptions,
2177    recyclers: &VerifyRecyclers,
2178    progress: &mut ConfirmationProgress,
2179    transaction_status_sender: Option<&TransactionStatusSender>,
2180    entry_notification_sender: Option<&EntryNotifierSender>,
2181    replay_vote_sender: Option<&ReplayVoteSender>,
2182    timing: &mut ExecuteTimings,
2183) -> result::Result<(), BlockstoreProcessorError> {
2184    let slot = bank.slot();
2185    // Mark corrupt slots as dead so validators don't replay this slot and
2186    // see AlreadyProcessed errors later in ReplayStage
2187    confirm_full_slot(
2188        blockstore,
2189        bank,
2190        replay_tx_thread_pool,
2191        opts,
2192        recyclers,
2193        progress,
2194        transaction_status_sender,
2195        entry_notification_sender,
2196        replay_vote_sender,
2197        timing,
2198    )
2199    .and_then(|()| {
2200        if let Some((result, completed_timings)) = bank.wait_for_completed_scheduler() {
2201            timing.accumulate(&completed_timings);
2202            result?
2203        }
2204        Ok(())
2205    })
2206    .map_err(|err| {
2207        warn!("slot {slot} failed to verify: {err}");
2208        if blockstore.is_primary_access() {
2209            blockstore
2210                .set_dead_slot(slot)
2211                .expect("Failed to mark slot as dead in blockstore");
2212        } else {
2213            info!(
2214                "Failed slot {slot} won't be marked dead due to being secondary blockstore access"
2215            );
2216        }
2217        err
2218    })?;
2219
2220    if let Some((result, _timings)) = bank.wait_for_completed_scheduler() {
2221        result?
2222    }
2223
2224    let block_id = blockstore
2225        .check_last_fec_set_and_get_block_id(slot, bank.hash(), &bank.feature_set)
2226        .inspect_err(|err| {
2227            warn!("slot {slot} failed last fec set checks: {err}");
2228            if blockstore.is_primary_access() {
2229                blockstore
2230                    .set_dead_slot(slot)
2231                    .expect("Failed to mark slot as dead in blockstore");
2232            } else {
2233                info!(
2234                    "Failed last fec set checks slot {slot} won't be marked dead due to being \
2235                     secondary blockstore access"
2236                );
2237            }
2238        })?;
2239    bank.set_block_id(block_id);
2240    bank.freeze(); // all banks handled by this routine are created from complete slots
2241
2242    if let Some(slot_callback) = &opts.slot_callback {
2243        slot_callback(bank);
2244    }
2245
2246    if blockstore.is_primary_access() {
2247        blockstore.insert_bank_hash(bank.slot(), bank.hash(), false);
2248    }
2249
2250    if let Some(transaction_status_sender) = transaction_status_sender {
2251        transaction_status_sender.send_transaction_status_freeze_message(bank);
2252    }
2253
2254    Ok(())
2255}
2256
2257type WorkSequence = u64;
2258
2259#[allow(clippy::large_enum_variant)]
2260#[derive(Debug)]
2261pub enum TransactionStatusMessage {
2262    Batch((TransactionStatusBatch, Option<WorkSequence>)),
2263    Freeze(Arc<Bank>),
2264}
2265
2266#[derive(Debug)]
2267pub struct TransactionStatusBatch {
2268    pub slot: Slot,
2269    pub transactions: Vec<SanitizedTransaction>,
2270    pub commit_results: Vec<TransactionCommitResult>,
2271    pub balances: TransactionBalancesSet,
2272    pub token_balances: TransactionTokenBalancesSet,
2273    pub costs: Vec<Option<u64>>,
2274    pub transaction_indexes: Vec<usize>,
2275}
2276
2277#[derive(Clone, Debug)]
2278pub struct TransactionStatusSender {
2279    pub sender: Sender<TransactionStatusMessage>,
2280    pub dependency_tracker: Option<Arc<DependencyTracker>>,
2281}
2282
2283impl TransactionStatusSender {
2284    pub fn send_transaction_status_batch(
2285        &self,
2286        slot: Slot,
2287        transactions: Vec<SanitizedTransaction>,
2288        commit_results: Vec<TransactionCommitResult>,
2289        balances: TransactionBalancesSet,
2290        token_balances: TransactionTokenBalancesSet,
2291        costs: Vec<Option<u64>>,
2292        transaction_indexes: Vec<usize>,
2293    ) {
2294        let work_sequence = self
2295            .dependency_tracker
2296            .as_ref()
2297            .map(|dependency_tracker| dependency_tracker.declare_work());
2298
2299        if let Err(e) = self.sender.send(TransactionStatusMessage::Batch((
2300            TransactionStatusBatch {
2301                slot,
2302                transactions,
2303                commit_results,
2304                balances,
2305                token_balances,
2306                costs,
2307                transaction_indexes,
2308            },
2309            work_sequence,
2310        ))) {
2311            trace!("Slot {slot} transaction_status send batch failed: {e:?}");
2312        }
2313    }
2314
2315    pub fn send_transaction_status_freeze_message(&self, bank: &Arc<Bank>) {
2316        if let Err(e) = self
2317            .sender
2318            .send(TransactionStatusMessage::Freeze(bank.clone()))
2319        {
2320            let slot = bank.slot();
2321            warn!("Slot {slot} transaction_status send freeze message failed: {e:?}");
2322        }
2323    }
2324}
2325
2326// used for tests only
2327pub fn fill_blockstore_slot_with_ticks(
2328    blockstore: &Blockstore,
2329    ticks_per_slot: u64,
2330    slot: u64,
2331    parent_slot: u64,
2332    last_entry_hash: Hash,
2333) -> Hash {
2334    // Only slot 0 can be equal to the parent_slot
2335    assert!(slot.saturating_sub(1) >= parent_slot);
2336    let num_slots = (slot - parent_slot).max(1);
2337    let entries = create_ticks(num_slots * ticks_per_slot, 0, last_entry_hash);
2338    let last_entry_hash = entries.last().unwrap().hash;
2339
2340    blockstore
2341        .write_entries(
2342            slot,
2343            0,
2344            0,
2345            ticks_per_slot,
2346            Some(parent_slot),
2347            true,
2348            &Arc::new(Keypair::new()),
2349            entries,
2350            0,
2351        )
2352        .unwrap();
2353
2354    last_entry_hash
2355}
2356
2357#[cfg(test)]
2358pub mod tests {
2359    use {
2360        super::*,
2361        crate::{
2362            blockstore_options::{AccessType, BlockstoreOptions},
2363            genesis_utils::{
2364                create_genesis_config, create_genesis_config_with_leader, GenesisConfigInfo,
2365            },
2366        },
2367        assert_matches::assert_matches,
2368        rand::{thread_rng, Rng},
2369        solana_account::{AccountSharedData, WritableAccount},
2370        solana_cost_model::transaction_cost::TransactionCost,
2371        solana_entry::entry::{create_ticks, next_entry, next_entry_mut},
2372        solana_epoch_schedule::EpochSchedule,
2373        solana_hash::Hash,
2374        solana_instruction::{error::InstructionError, Instruction},
2375        solana_keypair::Keypair,
2376        solana_native_token::LAMPORTS_PER_SOL,
2377        solana_program_runtime::declare_process_instruction,
2378        solana_pubkey::Pubkey,
2379        solana_runtime::{
2380            bank::bank_hash_details::SlotDetails,
2381            genesis_utils::{
2382                self, create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs,
2383            },
2384            installed_scheduler_pool::{
2385                MockInstalledScheduler, MockUninstalledScheduler, SchedulerAborted,
2386                SchedulingContext,
2387            },
2388        },
2389        solana_signer::Signer,
2390        solana_svm::transaction_processor::ExecutionRecordingConfig,
2391        solana_system_interface::error::SystemError,
2392        solana_system_transaction as system_transaction,
2393        solana_transaction::Transaction,
2394        solana_transaction_error::TransactionError,
2395        solana_vote::{vote_account::VoteAccount, vote_transaction},
2396        solana_vote_program::{
2397            self,
2398            vote_state::{TowerSync, VoteStateV3, VoteStateVersions, MAX_LOCKOUT_HISTORY},
2399        },
2400        std::{collections::BTreeSet, slice, sync::RwLock},
2401        test_case::{test_case, test_matrix},
2402        trees::tr,
2403    };
2404
2405    // Convenience wrapper to optionally process blockstore with Secondary access.
2406    //
2407    // Setting up the ledger for a test requires Primary access as items will need to be inserted.
2408    // However, once a Secondary access has been opened, it won't automatically see updates made by
2409    // the Primary access. So, open (and close) the Secondary access within this function to ensure
2410    // that "stale" Secondary accesses don't propagate.
2411    fn test_process_blockstore_with_custom_options(
2412        genesis_config: &GenesisConfig,
2413        blockstore: &Blockstore,
2414        opts: &ProcessOptions,
2415        access_type: AccessType,
2416    ) -> (Arc<RwLock<BankForks>>, LeaderScheduleCache) {
2417        match access_type {
2418            AccessType::Primary | AccessType::PrimaryForMaintenance => {
2419                // Attempting to open a second Primary access would fail, so
2420                // just pass the original session if it is a Primary variant
2421                test_process_blockstore(genesis_config, blockstore, opts, Arc::default())
2422            }
2423            AccessType::Secondary => {
2424                let secondary_blockstore = Blockstore::open_with_options(
2425                    blockstore.ledger_path(),
2426                    BlockstoreOptions {
2427                        access_type,
2428                        ..BlockstoreOptions::default()
2429                    },
2430                )
2431                .expect("Unable to open access to blockstore");
2432                test_process_blockstore(genesis_config, &secondary_blockstore, opts, Arc::default())
2433            }
2434        }
2435    }
2436
2437    fn process_entries_for_tests_without_scheduler(
2438        bank: &Arc<Bank>,
2439        entries: Vec<Entry>,
2440    ) -> Result<()> {
2441        process_entries_for_tests(
2442            &BankWithScheduler::new_without_scheduler(bank.clone()),
2443            entries,
2444            None,
2445            None,
2446        )
2447    }
2448
2449    #[test]
2450    fn test_process_blockstore_with_missing_hashes() {
2451        do_test_process_blockstore_with_missing_hashes(AccessType::Primary);
2452    }
2453
2454    #[test]
2455    fn test_process_blockstore_with_missing_hashes_secondary_access() {
2456        do_test_process_blockstore_with_missing_hashes(AccessType::Secondary);
2457    }
2458
2459    // Intentionally make slot 1 faulty and ensure that processing sees it as dead
2460    fn do_test_process_blockstore_with_missing_hashes(blockstore_access_type: AccessType) {
2461        solana_logger::setup();
2462
2463        let hashes_per_tick = 2;
2464        let GenesisConfigInfo {
2465            mut genesis_config, ..
2466        } = create_genesis_config(10_000);
2467        genesis_config.poh_config.hashes_per_tick = Some(hashes_per_tick);
2468        let ticks_per_slot = genesis_config.ticks_per_slot;
2469
2470        let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
2471        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
2472
2473        let parent_slot = 0;
2474        let slot = 1;
2475        let entries = create_ticks(ticks_per_slot, hashes_per_tick - 1, blockhash);
2476        assert_matches!(
2477            blockstore.write_entries(
2478                slot,
2479                0,
2480                0,
2481                ticks_per_slot,
2482                Some(parent_slot),
2483                true,
2484                &Arc::new(Keypair::new()),
2485                entries,
2486                0,
2487            ),
2488            Ok(_)
2489        );
2490
2491        let (bank_forks, ..) = test_process_blockstore_with_custom_options(
2492            &genesis_config,
2493            &blockstore,
2494            &ProcessOptions {
2495                run_verification: true,
2496                ..ProcessOptions::default()
2497            },
2498            blockstore_access_type.clone(),
2499        );
2500        assert_eq!(frozen_bank_slots(&bank_forks.read().unwrap()), vec![0]);
2501
2502        let dead_slots: Vec<Slot> = blockstore.dead_slots_iterator(0).unwrap().collect();
2503        match blockstore_access_type {
2504            // Secondary access is immutable so even though a dead slot
2505            // will be identified, it won't actually be marked dead.
2506            AccessType::Secondary => {
2507                assert_eq!(dead_slots.len(), 0);
2508            }
2509            AccessType::Primary | AccessType::PrimaryForMaintenance => {
2510                assert_eq!(&dead_slots, &[1]);
2511            }
2512        }
2513    }
2514
2515    #[test]
2516    fn test_process_blockstore_with_invalid_slot_tick_count() {
2517        solana_logger::setup();
2518
2519        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
2520        let ticks_per_slot = genesis_config.ticks_per_slot;
2521
2522        // Create a new ledger with slot 0 full of ticks
2523        let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
2524        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
2525
2526        // Write slot 1 with one tick missing
2527        let parent_slot = 0;
2528        let slot = 1;
2529        let entries = create_ticks(ticks_per_slot - 1, 0, blockhash);
2530        assert_matches!(
2531            blockstore.write_entries(
2532                slot,
2533                0,
2534                0,
2535                ticks_per_slot,
2536                Some(parent_slot),
2537                true,
2538                &Arc::new(Keypair::new()),
2539                entries,
2540                0,
2541            ),
2542            Ok(_)
2543        );
2544
2545        // Should return slot 0, the last slot on the fork that is valid
2546        let (bank_forks, ..) = test_process_blockstore(
2547            &genesis_config,
2548            &blockstore,
2549            &ProcessOptions {
2550                run_verification: true,
2551                ..ProcessOptions::default()
2552            },
2553            Arc::default(),
2554        );
2555        assert_eq!(frozen_bank_slots(&bank_forks.read().unwrap()), vec![0]);
2556
2557        // Write slot 2 fully
2558        let _last_slot2_entry_hash =
2559            fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 0, blockhash);
2560
2561        let (bank_forks, ..) = test_process_blockstore(
2562            &genesis_config,
2563            &blockstore,
2564            &ProcessOptions {
2565                run_verification: true,
2566                ..ProcessOptions::default()
2567            },
2568            Arc::default(),
2569        );
2570
2571        // One valid fork, one bad fork.  process_blockstore() should only return the valid fork
2572        assert_eq!(frozen_bank_slots(&bank_forks.read().unwrap()), vec![0, 2]);
2573        assert_eq!(bank_forks.read().unwrap().working_bank().slot(), 2);
2574        assert_eq!(bank_forks.read().unwrap().root(), 0);
2575    }
2576
2577    #[test]
2578    fn test_process_blockstore_with_slot_with_trailing_entry() {
2579        solana_logger::setup();
2580
2581        let GenesisConfigInfo {
2582            mint_keypair,
2583            genesis_config,
2584            ..
2585        } = create_genesis_config(10_000);
2586        let ticks_per_slot = genesis_config.ticks_per_slot;
2587
2588        let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
2589        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
2590
2591        let mut entries = create_ticks(ticks_per_slot, 0, blockhash);
2592        let trailing_entry = {
2593            let keypair = Keypair::new();
2594            let tx = system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 1, blockhash);
2595            next_entry(&blockhash, 1, vec![tx])
2596        };
2597        entries.push(trailing_entry);
2598
2599        // Tricks blockstore into writing the trailing entry by lying that there is one more tick
2600        // per slot.
2601        let parent_slot = 0;
2602        let slot = 1;
2603        assert_matches!(
2604            blockstore.write_entries(
2605                slot,
2606                0,
2607                0,
2608                ticks_per_slot + 1,
2609                Some(parent_slot),
2610                true,
2611                &Arc::new(Keypair::new()),
2612                entries,
2613                0,
2614            ),
2615            Ok(_)
2616        );
2617
2618        let opts = ProcessOptions {
2619            run_verification: true,
2620            ..ProcessOptions::default()
2621        };
2622        let (bank_forks, ..) =
2623            test_process_blockstore(&genesis_config, &blockstore, &opts, Arc::default());
2624        assert_eq!(frozen_bank_slots(&bank_forks.read().unwrap()), vec![0]);
2625    }
2626
2627    #[test]
2628    fn test_process_blockstore_with_incomplete_slot() {
2629        solana_logger::setup();
2630
2631        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
2632        let ticks_per_slot = genesis_config.ticks_per_slot;
2633
2634        /*
2635          Build a blockstore in the ledger with the following fork structure:
2636
2637               slot 0 (all ticks)
2638                 |
2639               slot 1 (all ticks but one)
2640                 |
2641               slot 2 (all ticks)
2642
2643           where slot 1 is incomplete (missing 1 tick at the end)
2644        */
2645
2646        // Create a new ledger with slot 0 full of ticks
2647        let (ledger_path, mut blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
2648        debug!("ledger_path: {ledger_path:?}");
2649
2650        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
2651
2652        // Write slot 1
2653        // slot 1, points at slot 0.  Missing one tick
2654        {
2655            let parent_slot = 0;
2656            let slot = 1;
2657            let mut entries = create_ticks(ticks_per_slot, 0, blockhash);
2658            blockhash = entries.last().unwrap().hash;
2659
2660            // throw away last one
2661            entries.pop();
2662
2663            assert_matches!(
2664                blockstore.write_entries(
2665                    slot,
2666                    0,
2667                    0,
2668                    ticks_per_slot,
2669                    Some(parent_slot),
2670                    false,
2671                    &Arc::new(Keypair::new()),
2672                    entries,
2673                    0,
2674                ),
2675                Ok(_)
2676            );
2677        }
2678
2679        // slot 2, points at slot 1
2680        fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 1, blockhash);
2681
2682        let opts = ProcessOptions {
2683            run_verification: true,
2684            ..ProcessOptions::default()
2685        };
2686        let (bank_forks, ..) =
2687            test_process_blockstore(&genesis_config, &blockstore, &opts, Arc::default());
2688
2689        assert_eq!(frozen_bank_slots(&bank_forks.read().unwrap()), vec![0]); // slot 1 isn't "full", we stop at slot zero
2690
2691        /* Add a complete slot such that the store looks like:
2692
2693                                 slot 0 (all ticks)
2694                               /                  \
2695               slot 1 (all ticks but one)        slot 3 (all ticks)
2696                      |
2697               slot 2 (all ticks)
2698        */
2699        let opts = ProcessOptions {
2700            run_verification: true,
2701            ..ProcessOptions::default()
2702        };
2703        fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 0, blockhash);
2704        // Slot 0 should not show up in the ending bank_forks_info
2705        let (bank_forks, ..) =
2706            test_process_blockstore(&genesis_config, &blockstore, &opts, Arc::default());
2707
2708        // slot 1 isn't "full", we stop at slot zero
2709        assert_eq!(frozen_bank_slots(&bank_forks.read().unwrap()), vec![0, 3]);
2710    }
2711
2712    #[test]
2713    fn test_process_blockstore_with_two_forks_and_squash() {
2714        solana_logger::setup();
2715
2716        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
2717        let ticks_per_slot = genesis_config.ticks_per_slot;
2718
2719        // Create a new ledger with slot 0 full of ticks
2720        let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
2721        debug!("ledger_path: {ledger_path:?}");
2722        let mut last_entry_hash = blockhash;
2723
2724        /*
2725            Build a blockstore in the ledger with the following fork structure:
2726
2727                 slot 0
2728                   |
2729                 slot 1
2730                 /   \
2731            slot 2   |
2732               /     |
2733            slot 3   |
2734                     |
2735                   slot 4 <-- set_root(true)
2736
2737        */
2738        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
2739
2740        // Fork 1, ending at slot 3
2741        let last_slot1_entry_hash =
2742            fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, last_entry_hash);
2743        last_entry_hash = fill_blockstore_slot_with_ticks(
2744            &blockstore,
2745            ticks_per_slot,
2746            2,
2747            1,
2748            last_slot1_entry_hash,
2749        );
2750        let last_fork1_entry_hash =
2751            fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 2, last_entry_hash);
2752
2753        // Fork 2, ending at slot 4
2754        let last_fork2_entry_hash = fill_blockstore_slot_with_ticks(
2755            &blockstore,
2756            ticks_per_slot,
2757            4,
2758            1,
2759            last_slot1_entry_hash,
2760        );
2761
2762        info!("last_fork1_entry.hash: {last_fork1_entry_hash:?}");
2763        info!("last_fork2_entry.hash: {last_fork2_entry_hash:?}");
2764
2765        blockstore.set_roots([0, 1, 4].iter()).unwrap();
2766
2767        let opts = ProcessOptions {
2768            run_verification: true,
2769            ..ProcessOptions::default()
2770        };
2771        let (bank_forks, ..) =
2772            test_process_blockstore(&genesis_config, &blockstore, &opts, Arc::default());
2773        let bank_forks = bank_forks.read().unwrap();
2774
2775        // One fork, other one is ignored b/c not a descendant of the root
2776        assert_eq!(frozen_bank_slots(&bank_forks), vec![4]);
2777
2778        assert!(&bank_forks[4]
2779            .parents()
2780            .iter()
2781            .map(|bank| bank.slot())
2782            .next()
2783            .is_none());
2784
2785        // Ensure bank_forks holds the right banks
2786        verify_fork_infos(&bank_forks);
2787
2788        assert_eq!(bank_forks.root(), 4);
2789    }
2790
2791    #[test]
2792    fn test_process_blockstore_with_two_forks() {
2793        solana_logger::setup();
2794
2795        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
2796        let ticks_per_slot = genesis_config.ticks_per_slot;
2797
2798        // Create a new ledger with slot 0 full of ticks
2799        let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
2800        debug!("ledger_path: {ledger_path:?}");
2801        let mut last_entry_hash = blockhash;
2802
2803        /*
2804            Build a blockstore in the ledger with the following fork structure:
2805
2806                 slot 0
2807                   |
2808                 slot 1  <-- set_root(true)
2809                 /   \
2810            slot 2   |
2811               /     |
2812            slot 3   |
2813                     |
2814                   slot 4
2815
2816        */
2817        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
2818
2819        // Fork 1, ending at slot 3
2820        let last_slot1_entry_hash =
2821            fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, last_entry_hash);
2822        last_entry_hash = fill_blockstore_slot_with_ticks(
2823            &blockstore,
2824            ticks_per_slot,
2825            2,
2826            1,
2827            last_slot1_entry_hash,
2828        );
2829        let last_fork1_entry_hash =
2830            fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 2, last_entry_hash);
2831
2832        // Fork 2, ending at slot 4
2833        let last_fork2_entry_hash = fill_blockstore_slot_with_ticks(
2834            &blockstore,
2835            ticks_per_slot,
2836            4,
2837            1,
2838            last_slot1_entry_hash,
2839        );
2840
2841        info!("last_fork1_entry.hash: {last_fork1_entry_hash:?}");
2842        info!("last_fork2_entry.hash: {last_fork2_entry_hash:?}");
2843
2844        blockstore.set_roots([0, 1].iter()).unwrap();
2845
2846        let opts = ProcessOptions {
2847            run_verification: true,
2848            ..ProcessOptions::default()
2849        };
2850        let (bank_forks, ..) =
2851            test_process_blockstore(&genesis_config, &blockstore, &opts, Arc::default());
2852        let bank_forks = bank_forks.read().unwrap();
2853
2854        assert_eq!(frozen_bank_slots(&bank_forks), vec![1, 2, 3, 4]);
2855        assert_eq!(bank_forks.working_bank().slot(), 4);
2856        assert_eq!(bank_forks.root(), 1);
2857
2858        assert_eq!(
2859            &bank_forks[3]
2860                .parents()
2861                .iter()
2862                .map(|bank| bank.slot())
2863                .collect::<Vec<_>>(),
2864            &[2, 1]
2865        );
2866        assert_eq!(
2867            &bank_forks[4]
2868                .parents()
2869                .iter()
2870                .map(|bank| bank.slot())
2871                .collect::<Vec<_>>(),
2872            &[1]
2873        );
2874
2875        assert_eq!(bank_forks.root(), 1);
2876
2877        // Ensure bank_forks holds the right banks
2878        verify_fork_infos(&bank_forks);
2879    }
2880
2881    #[test]
2882    fn test_process_blockstore_with_dead_slot() {
2883        solana_logger::setup();
2884
2885        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
2886        let ticks_per_slot = genesis_config.ticks_per_slot;
2887        let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
2888        debug!("ledger_path: {ledger_path:?}");
2889
2890        /*
2891                   slot 0
2892                     |
2893                   slot 1
2894                  /     \
2895                 /       \
2896           slot 2 (dead)  \
2897                           \
2898                        slot 3
2899        */
2900        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
2901        let slot1_blockhash =
2902            fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, blockhash);
2903        fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 1, slot1_blockhash);
2904        blockstore.set_dead_slot(2).unwrap();
2905        fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 1, slot1_blockhash);
2906
2907        let (bank_forks, ..) = test_process_blockstore(
2908            &genesis_config,
2909            &blockstore,
2910            &ProcessOptions::default(),
2911            Arc::default(),
2912        );
2913        let bank_forks = bank_forks.read().unwrap();
2914
2915        assert_eq!(frozen_bank_slots(&bank_forks), vec![0, 1, 3]);
2916        assert_eq!(bank_forks.working_bank().slot(), 3);
2917        assert_eq!(
2918            &bank_forks[3]
2919                .parents()
2920                .iter()
2921                .map(|bank| bank.slot())
2922                .collect::<Vec<_>>(),
2923            &[1, 0]
2924        );
2925        verify_fork_infos(&bank_forks);
2926    }
2927
2928    #[test]
2929    fn test_process_blockstore_with_dead_child() {
2930        solana_logger::setup();
2931
2932        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
2933        let ticks_per_slot = genesis_config.ticks_per_slot;
2934        let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
2935        debug!("ledger_path: {ledger_path:?}");
2936
2937        /*
2938                   slot 0
2939                     |
2940                   slot 1
2941                  /     \
2942                 /       \
2943              slot 2      \
2944               /           \
2945           slot 4 (dead)   slot 3
2946        */
2947        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
2948        let slot1_blockhash =
2949            fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, blockhash);
2950        let slot2_blockhash =
2951            fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 1, slot1_blockhash);
2952        fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 4, 2, slot2_blockhash);
2953        blockstore.set_dead_slot(4).unwrap();
2954        fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 1, slot1_blockhash);
2955
2956        let (bank_forks, ..) = test_process_blockstore(
2957            &genesis_config,
2958            &blockstore,
2959            &ProcessOptions::default(),
2960            Arc::default(),
2961        );
2962        let bank_forks = bank_forks.read().unwrap();
2963
2964        // Should see the parent of the dead child
2965        assert_eq!(frozen_bank_slots(&bank_forks), vec![0, 1, 2, 3]);
2966        assert_eq!(bank_forks.working_bank().slot(), 3);
2967
2968        assert_eq!(
2969            &bank_forks[3]
2970                .parents()
2971                .iter()
2972                .map(|bank| bank.slot())
2973                .collect::<Vec<_>>(),
2974            &[1, 0]
2975        );
2976        assert_eq!(
2977            &bank_forks[2]
2978                .parents()
2979                .iter()
2980                .map(|bank| bank.slot())
2981                .collect::<Vec<_>>(),
2982            &[1, 0]
2983        );
2984        assert_eq!(bank_forks.working_bank().slot(), 3);
2985        verify_fork_infos(&bank_forks);
2986    }
2987
2988    #[test]
2989    fn test_root_with_all_dead_children() {
2990        solana_logger::setup();
2991
2992        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
2993        let ticks_per_slot = genesis_config.ticks_per_slot;
2994        let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
2995        debug!("ledger_path: {ledger_path:?}");
2996
2997        /*
2998                   slot 0
2999                 /        \
3000                /          \
3001           slot 1 (dead)  slot 2 (dead)
3002        */
3003        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
3004        fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, blockhash);
3005        fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 0, blockhash);
3006        blockstore.set_dead_slot(1).unwrap();
3007        blockstore.set_dead_slot(2).unwrap();
3008        let (bank_forks, ..) = test_process_blockstore(
3009            &genesis_config,
3010            &blockstore,
3011            &ProcessOptions::default(),
3012            Arc::default(),
3013        );
3014        let bank_forks = bank_forks.read().unwrap();
3015
3016        // Should see only the parent of the dead children
3017        assert_eq!(frozen_bank_slots(&bank_forks), vec![0]);
3018        verify_fork_infos(&bank_forks);
3019    }
3020
3021    #[test]
3022    fn test_process_blockstore_epoch_boundary_root() {
3023        solana_logger::setup();
3024
3025        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
3026        let ticks_per_slot = genesis_config.ticks_per_slot;
3027
3028        // Create a new ledger with slot 0 full of ticks
3029        let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
3030        let mut last_entry_hash = blockhash;
3031
3032        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
3033
3034        // Let `last_slot` be the number of slots in the first two epochs
3035        let epoch_schedule = get_epoch_schedule(&genesis_config);
3036        let last_slot = epoch_schedule.get_last_slot_in_epoch(1);
3037
3038        // Create a single chain of slots with all indexes in the range [0, v + 1]
3039        for i in 1..=last_slot + 1 {
3040            last_entry_hash = fill_blockstore_slot_with_ticks(
3041                &blockstore,
3042                ticks_per_slot,
3043                i,
3044                i - 1,
3045                last_entry_hash,
3046            );
3047        }
3048
3049        // Set a root on the last slot of the last confirmed epoch
3050        let rooted_slots: Vec<Slot> = (0..=last_slot).collect();
3051        blockstore.set_roots(rooted_slots.iter()).unwrap();
3052
3053        // Set a root on the next slot of the confirmed epoch
3054        blockstore
3055            .set_roots(std::iter::once(&(last_slot + 1)))
3056            .unwrap();
3057
3058        // Check that we can properly restart the ledger / leader scheduler doesn't fail
3059        let opts = ProcessOptions {
3060            run_verification: true,
3061            ..ProcessOptions::default()
3062        };
3063        let (bank_forks, ..) =
3064            test_process_blockstore(&genesis_config, &blockstore, &opts, Arc::default());
3065        let bank_forks = bank_forks.read().unwrap();
3066
3067        // There is one fork, head is last_slot + 1
3068        assert_eq!(frozen_bank_slots(&bank_forks), vec![last_slot + 1]);
3069
3070        // The latest root should have purged all its parents
3071        assert!(&bank_forks[last_slot + 1]
3072            .parents()
3073            .iter()
3074            .map(|bank| bank.slot())
3075            .next()
3076            .is_none());
3077    }
3078
3079    #[test]
3080    fn test_first_err() {
3081        assert_eq!(first_err(&[Ok(())]), Ok(()));
3082        assert_eq!(
3083            first_err(&[Ok(()), Err(TransactionError::AlreadyProcessed)]),
3084            Err(TransactionError::AlreadyProcessed)
3085        );
3086        assert_eq!(
3087            first_err(&[
3088                Ok(()),
3089                Err(TransactionError::AlreadyProcessed),
3090                Err(TransactionError::AccountInUse)
3091            ]),
3092            Err(TransactionError::AlreadyProcessed)
3093        );
3094        assert_eq!(
3095            first_err(&[
3096                Ok(()),
3097                Err(TransactionError::AccountInUse),
3098                Err(TransactionError::AlreadyProcessed)
3099            ]),
3100            Err(TransactionError::AccountInUse)
3101        );
3102        assert_eq!(
3103            first_err(&[
3104                Err(TransactionError::AccountInUse),
3105                Ok(()),
3106                Err(TransactionError::AlreadyProcessed)
3107            ]),
3108            Err(TransactionError::AccountInUse)
3109        );
3110    }
3111
3112    #[test]
3113    fn test_process_empty_entry_is_registered() {
3114        solana_logger::setup();
3115
3116        let GenesisConfigInfo {
3117            genesis_config,
3118            mint_keypair,
3119            ..
3120        } = create_genesis_config(2);
3121        let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
3122        let keypair = Keypair::new();
3123        let slot_entries = create_ticks(genesis_config.ticks_per_slot, 1, genesis_config.hash());
3124        let tx = system_transaction::transfer(
3125            &mint_keypair,
3126            &keypair.pubkey(),
3127            1,
3128            slot_entries.last().unwrap().hash,
3129        );
3130
3131        // First, ensure the TX is rejected because of the unregistered last ID
3132        assert_eq!(
3133            bank.process_transaction(&tx),
3134            Err(TransactionError::BlockhashNotFound)
3135        );
3136
3137        // Now ensure the TX is accepted despite pointing to the ID of an empty entry.
3138        process_entries_for_tests_without_scheduler(&bank, slot_entries).unwrap();
3139        assert_eq!(bank.process_transaction(&tx), Ok(()));
3140    }
3141
3142    #[test]
3143    fn test_process_ledger_simple() {
3144        solana_logger::setup();
3145        let leader_pubkey = solana_pubkey::new_rand();
3146        let mint = 100;
3147        let hashes_per_tick = 10;
3148        let GenesisConfigInfo {
3149            mut genesis_config,
3150            mint_keypair,
3151            ..
3152        } = create_genesis_config_with_leader(mint, &leader_pubkey, 50);
3153        genesis_config.poh_config.hashes_per_tick = Some(hashes_per_tick);
3154        let (ledger_path, mut last_entry_hash) =
3155            create_new_tmp_ledger_auto_delete!(&genesis_config);
3156        debug!("ledger_path: {ledger_path:?}");
3157
3158        let deducted_from_mint = 3;
3159        let mut entries = vec![];
3160        let blockhash = genesis_config.hash();
3161        for _ in 0..deducted_from_mint {
3162            // Transfer one token from the mint to a random account
3163            let keypair = Keypair::new();
3164            let tx = system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 1, blockhash);
3165            let entry = next_entry_mut(&mut last_entry_hash, 1, vec![tx]);
3166            entries.push(entry);
3167
3168            // Add a second Transaction that will produce a
3169            // InstructionError<0, ResultWithNegativeLamports> error when processed
3170            let keypair2 = Keypair::new();
3171            let tx =
3172                system_transaction::transfer(&mint_keypair, &keypair2.pubkey(), 101, blockhash);
3173            let entry = next_entry_mut(&mut last_entry_hash, 1, vec![tx]);
3174            entries.push(entry);
3175        }
3176
3177        let remaining_hashes = hashes_per_tick - entries.len() as u64;
3178        let tick_entry = next_entry_mut(&mut last_entry_hash, remaining_hashes, vec![]);
3179        entries.push(tick_entry);
3180
3181        // Fill up the rest of slot 1 with ticks
3182        entries.extend(create_ticks(
3183            genesis_config.ticks_per_slot - 1,
3184            genesis_config.poh_config.hashes_per_tick.unwrap(),
3185            last_entry_hash,
3186        ));
3187        let last_blockhash = entries.last().unwrap().hash;
3188
3189        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
3190        blockstore
3191            .write_entries(
3192                1,
3193                0,
3194                0,
3195                genesis_config.ticks_per_slot,
3196                None,
3197                true,
3198                &Arc::new(Keypair::new()),
3199                entries,
3200                0,
3201            )
3202            .unwrap();
3203        let opts = ProcessOptions {
3204            run_verification: true,
3205            ..ProcessOptions::default()
3206        };
3207        let (bank_forks, ..) =
3208            test_process_blockstore(&genesis_config, &blockstore, &opts, Arc::default());
3209        let bank_forks = bank_forks.read().unwrap();
3210
3211        assert_eq!(frozen_bank_slots(&bank_forks), vec![0, 1]);
3212        assert_eq!(bank_forks.root(), 0);
3213        assert_eq!(bank_forks.working_bank().slot(), 1);
3214
3215        let bank = bank_forks[1].clone();
3216        assert_eq!(
3217            bank.get_balance(&mint_keypair.pubkey()),
3218            mint - deducted_from_mint
3219        );
3220        assert_eq!(bank.tick_height(), 2 * genesis_config.ticks_per_slot);
3221        assert_eq!(bank.last_blockhash(), last_blockhash);
3222    }
3223
3224    #[test]
3225    fn test_process_ledger_with_one_tick_per_slot() {
3226        let GenesisConfigInfo {
3227            mut genesis_config, ..
3228        } = create_genesis_config(123);
3229        genesis_config.ticks_per_slot = 1;
3230        let (ledger_path, _blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
3231
3232        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
3233        let opts = ProcessOptions {
3234            run_verification: true,
3235            ..ProcessOptions::default()
3236        };
3237        let (bank_forks, ..) =
3238            test_process_blockstore(&genesis_config, &blockstore, &opts, Arc::default());
3239        let bank_forks = bank_forks.read().unwrap();
3240
3241        assert_eq!(frozen_bank_slots(&bank_forks), vec![0]);
3242        let bank = bank_forks[0].clone();
3243        assert_eq!(bank.tick_height(), 1);
3244    }
3245
3246    #[test]
3247    fn test_process_ledger_options_full_leader_cache() {
3248        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(123);
3249        let (ledger_path, _blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
3250
3251        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
3252        let opts = ProcessOptions {
3253            full_leader_cache: true,
3254            ..ProcessOptions::default()
3255        };
3256        let (_bank_forks, leader_schedule) =
3257            test_process_blockstore(&genesis_config, &blockstore, &opts, Arc::default());
3258        assert_eq!(leader_schedule.max_schedules(), usize::MAX);
3259    }
3260
3261    #[test]
3262    fn test_process_entries_tick() {
3263        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(1000);
3264        let bank = Arc::new(Bank::new_for_tests(&genesis_config));
3265
3266        // ensure bank can process a tick
3267        assert_eq!(bank.tick_height(), 0);
3268        let tick = next_entry(&genesis_config.hash(), 1, vec![]);
3269        assert_eq!(
3270            process_entries_for_tests_without_scheduler(&bank, vec![tick]),
3271            Ok(())
3272        );
3273        assert_eq!(bank.tick_height(), 1);
3274    }
3275
3276    #[test]
3277    fn test_process_entries_2_entries_collision() {
3278        let GenesisConfigInfo {
3279            genesis_config,
3280            mint_keypair,
3281            ..
3282        } = create_genesis_config(1000);
3283        let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
3284        let keypair1 = Keypair::new();
3285        let keypair2 = Keypair::new();
3286
3287        let blockhash = bank.last_blockhash();
3288
3289        // ensure bank can process 2 entries that have a common account and no tick is registered
3290        let tx = system_transaction::transfer(
3291            &mint_keypair,
3292            &keypair1.pubkey(),
3293            2,
3294            bank.last_blockhash(),
3295        );
3296        let entry_1 = next_entry(&blockhash, 1, vec![tx]);
3297        let tx = system_transaction::transfer(
3298            &mint_keypair,
3299            &keypair2.pubkey(),
3300            2,
3301            bank.last_blockhash(),
3302        );
3303        let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]);
3304        assert_eq!(
3305            process_entries_for_tests_without_scheduler(&bank, vec![entry_1, entry_2]),
3306            Ok(())
3307        );
3308        assert_eq!(bank.get_balance(&keypair1.pubkey()), 2);
3309        assert_eq!(bank.get_balance(&keypair2.pubkey()), 2);
3310        assert_eq!(bank.last_blockhash(), blockhash);
3311    }
3312
3313    #[test]
3314    fn test_process_entries_2_txes_collision() {
3315        let GenesisConfigInfo {
3316            genesis_config,
3317            mint_keypair,
3318            ..
3319        } = create_genesis_config(1000);
3320        let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
3321        let keypair1 = Keypair::new();
3322        let keypair2 = Keypair::new();
3323        let keypair3 = Keypair::new();
3324
3325        // fund: put 4 in each of 1 and 2
3326        assert_matches!(bank.transfer(4, &mint_keypair, &keypair1.pubkey()), Ok(_));
3327        assert_matches!(bank.transfer(4, &mint_keypair, &keypair2.pubkey()), Ok(_));
3328
3329        // construct an Entry whose 2nd transaction would cause a lock conflict with previous entry
3330        let entry_1_to_mint = next_entry(
3331            &bank.last_blockhash(),
3332            1,
3333            vec![system_transaction::transfer(
3334                &keypair1,
3335                &mint_keypair.pubkey(),
3336                1,
3337                bank.last_blockhash(),
3338            )],
3339        );
3340
3341        let entry_2_to_3_mint_to_1 = next_entry(
3342            &entry_1_to_mint.hash,
3343            1,
3344            vec![
3345                system_transaction::transfer(
3346                    &keypair2,
3347                    &keypair3.pubkey(),
3348                    2,
3349                    bank.last_blockhash(),
3350                ), // should be fine
3351                system_transaction::transfer(
3352                    &keypair1,
3353                    &mint_keypair.pubkey(),
3354                    2,
3355                    bank.last_blockhash(),
3356                ), // will collide
3357            ],
3358        );
3359
3360        assert_eq!(
3361            process_entries_for_tests_without_scheduler(
3362                &bank,
3363                vec![entry_1_to_mint, entry_2_to_3_mint_to_1],
3364            ),
3365            Ok(())
3366        );
3367
3368        assert_eq!(bank.get_balance(&keypair1.pubkey()), 1);
3369        assert_eq!(bank.get_balance(&keypair2.pubkey()), 2);
3370        assert_eq!(bank.get_balance(&keypair3.pubkey()), 2);
3371    }
3372
3373    #[test]
3374    fn test_process_entries_2_txes_collision_and_error() {
3375        let GenesisConfigInfo {
3376            genesis_config,
3377            mint_keypair,
3378            ..
3379        } = create_genesis_config(1000);
3380        let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
3381        let keypair1 = Keypair::new();
3382        let keypair2 = Keypair::new();
3383        let keypair3 = Keypair::new();
3384        let keypair4 = Keypair::new();
3385
3386        // fund: put 4 in each of 1 and 2
3387        assert_matches!(bank.transfer(4, &mint_keypair, &keypair1.pubkey()), Ok(_));
3388        assert_matches!(bank.transfer(4, &mint_keypair, &keypair2.pubkey()), Ok(_));
3389        assert_matches!(bank.transfer(4, &mint_keypair, &keypair4.pubkey()), Ok(_));
3390
3391        let good_tx = system_transaction::transfer(
3392            &keypair1,
3393            &mint_keypair.pubkey(),
3394            1,
3395            bank.last_blockhash(),
3396        );
3397
3398        // construct an Entry whose 2nd transaction would cause a lock conflict with previous entry
3399        let entry_1_to_mint = next_entry(
3400            &bank.last_blockhash(),
3401            1,
3402            vec![
3403                good_tx.clone(),
3404                system_transaction::transfer(
3405                    &keypair4,
3406                    &keypair4.pubkey(),
3407                    1,
3408                    Hash::default(), // Should cause a transaction failure with BlockhashNotFound
3409                ),
3410            ],
3411        );
3412
3413        let entry_2_to_3_mint_to_1 = next_entry(
3414            &entry_1_to_mint.hash,
3415            1,
3416            vec![
3417                system_transaction::transfer(
3418                    &keypair2,
3419                    &keypair3.pubkey(),
3420                    2,
3421                    bank.last_blockhash(),
3422                ), // should be fine
3423                system_transaction::transfer(
3424                    &keypair1,
3425                    &mint_keypair.pubkey(),
3426                    2,
3427                    bank.last_blockhash(),
3428                ), // will collide
3429            ],
3430        );
3431
3432        assert_matches!(
3433            process_entries_for_tests_without_scheduler(
3434                &bank,
3435                vec![entry_1_to_mint.clone(), entry_2_to_3_mint_to_1.clone()],
3436            ),
3437            Err(TransactionError::BlockhashNotFound)
3438        );
3439
3440        // First transaction in first entry was rolled-back, so keypair1 didn't lost 1 lamport
3441        assert_eq!(bank.get_balance(&keypair1.pubkey()), 4);
3442        assert_eq!(bank.get_balance(&keypair2.pubkey()), 4);
3443
3444        // Check all accounts are unlocked
3445        let txs1 = entry_1_to_mint.transactions;
3446        let txs2 = entry_2_to_3_mint_to_1.transactions;
3447        let batch1 = bank.prepare_entry_batch(txs1).unwrap();
3448        for result in batch1.lock_results() {
3449            assert!(result.is_ok());
3450        }
3451        // txs1 and txs2 have accounts that conflict, so we must drop txs1 first
3452        drop(batch1);
3453        let batch2 = bank.prepare_entry_batch(txs2).unwrap();
3454        for result in batch2.lock_results() {
3455            assert!(result.is_ok());
3456        }
3457        drop(batch2);
3458
3459        // ensure good_tx will succeed and was just rolled back above due to other failing tx
3460        let entry_3 = next_entry(&entry_2_to_3_mint_to_1.hash, 1, vec![good_tx]);
3461        assert_matches!(
3462            process_entries_for_tests_without_scheduler(&bank, vec![entry_3]),
3463            Ok(())
3464        );
3465        // First transaction in third entry succeeded, so keypair1 lost 1 lamport
3466        assert_eq!(bank.get_balance(&keypair1.pubkey()), 3);
3467    }
3468
3469    #[test]
3470    fn test_transaction_result_does_not_affect_bankhash() {
3471        solana_logger::setup();
3472        let GenesisConfigInfo {
3473            genesis_config,
3474            mint_keypair,
3475            ..
3476        } = create_genesis_config(1000);
3477
3478        fn get_instruction_errors() -> Vec<InstructionError> {
3479            vec![
3480                InstructionError::GenericError,
3481                InstructionError::InvalidArgument,
3482                InstructionError::InvalidInstructionData,
3483                InstructionError::InvalidAccountData,
3484                InstructionError::AccountDataTooSmall,
3485                InstructionError::InsufficientFunds,
3486                InstructionError::IncorrectProgramId,
3487                InstructionError::MissingRequiredSignature,
3488                InstructionError::AccountAlreadyInitialized,
3489                InstructionError::UninitializedAccount,
3490                InstructionError::UnbalancedInstruction,
3491                InstructionError::ModifiedProgramId,
3492                InstructionError::ExternalAccountLamportSpend,
3493                InstructionError::ExternalAccountDataModified,
3494                InstructionError::ReadonlyLamportChange,
3495                InstructionError::ReadonlyDataModified,
3496                InstructionError::DuplicateAccountIndex,
3497                InstructionError::ExecutableModified,
3498                InstructionError::RentEpochModified,
3499                InstructionError::NotEnoughAccountKeys,
3500                InstructionError::AccountDataSizeChanged,
3501                InstructionError::AccountNotExecutable,
3502                InstructionError::AccountBorrowFailed,
3503                InstructionError::AccountBorrowOutstanding,
3504                InstructionError::DuplicateAccountOutOfSync,
3505                InstructionError::Custom(0),
3506                InstructionError::InvalidError,
3507                InstructionError::ExecutableDataModified,
3508                InstructionError::ExecutableLamportChange,
3509                InstructionError::ExecutableAccountNotRentExempt,
3510                InstructionError::UnsupportedProgramId,
3511                InstructionError::CallDepth,
3512                InstructionError::MissingAccount,
3513                InstructionError::ReentrancyNotAllowed,
3514                InstructionError::MaxSeedLengthExceeded,
3515                InstructionError::InvalidSeeds,
3516                InstructionError::InvalidRealloc,
3517                InstructionError::ComputationalBudgetExceeded,
3518                InstructionError::PrivilegeEscalation,
3519                InstructionError::ProgramEnvironmentSetupFailure,
3520                InstructionError::ProgramFailedToComplete,
3521                InstructionError::ProgramFailedToCompile,
3522                InstructionError::Immutable,
3523                InstructionError::IncorrectAuthority,
3524                InstructionError::BorshIoError,
3525                InstructionError::AccountNotRentExempt,
3526                InstructionError::InvalidAccountOwner,
3527                InstructionError::ArithmeticOverflow,
3528                InstructionError::UnsupportedSysvar,
3529                InstructionError::IllegalOwner,
3530                InstructionError::MaxAccountsDataAllocationsExceeded,
3531                InstructionError::MaxAccountsExceeded,
3532                InstructionError::MaxInstructionTraceLengthExceeded,
3533                InstructionError::BuiltinProgramsMustConsumeComputeUnits,
3534            ]
3535        }
3536
3537        declare_process_instruction!(MockBuiltinOk, 1, |_invoke_context| {
3538            // Always succeeds
3539            Ok(())
3540        });
3541
3542        let mock_program_id = Pubkey::new_unique();
3543
3544        let (bank, _bank_forks) = Bank::new_with_mockup_builtin_for_tests(
3545            &genesis_config,
3546            mock_program_id,
3547            MockBuiltinOk::vm,
3548        );
3549
3550        let tx = Transaction::new_signed_with_payer(
3551            &[Instruction::new_with_bincode(
3552                mock_program_id,
3553                &10,
3554                Vec::new(),
3555            )],
3556            Some(&mint_keypair.pubkey()),
3557            &[&mint_keypair],
3558            bank.last_blockhash(),
3559        );
3560
3561        let entry = next_entry(&bank.last_blockhash(), 1, vec![tx]);
3562        let result = process_entries_for_tests_without_scheduler(&bank, vec![entry]);
3563        bank.freeze();
3564        let ok_bank_details = SlotDetails::new_from_bank(&bank, true).unwrap();
3565        assert!(result.is_ok());
3566
3567        declare_process_instruction!(MockBuiltinErr, 1, |invoke_context| {
3568            let instruction_errors = get_instruction_errors();
3569
3570            let instruction_context = invoke_context
3571                .transaction_context
3572                .get_current_instruction_context()
3573                .expect("Failed to get instruction context");
3574            let err = instruction_context
3575                .get_instruction_data()
3576                .first()
3577                .expect("Failed to get instruction data");
3578            Err(instruction_errors
3579                .get(*err as usize)
3580                .expect("Invalid error index")
3581                .clone())
3582        });
3583
3584        // Store details to compare against subsequent iterations
3585        let mut err_bank_details = None;
3586
3587        (0..get_instruction_errors().len()).for_each(|err| {
3588            let (bank, _bank_forks) = Bank::new_with_mockup_builtin_for_tests(
3589                &genesis_config,
3590                mock_program_id,
3591                MockBuiltinErr::vm,
3592            );
3593
3594            let tx = Transaction::new_signed_with_payer(
3595                &[Instruction::new_with_bincode(
3596                    mock_program_id,
3597                    &(err as u8),
3598                    Vec::new(),
3599                )],
3600                Some(&mint_keypair.pubkey()),
3601                &[&mint_keypair],
3602                bank.last_blockhash(),
3603            );
3604
3605            let entry = next_entry(&bank.last_blockhash(), 1, vec![tx]);
3606            let bank = Arc::new(bank);
3607            let result = process_entries_for_tests_without_scheduler(&bank, vec![entry]);
3608            assert!(result.is_ok()); // No failing transaction error - only instruction errors
3609            bank.freeze();
3610            let bank_details = SlotDetails::new_from_bank(&bank, true).unwrap();
3611
3612            // Transaction success/failure should not affect block hash ...
3613            assert_eq!(
3614                ok_bank_details
3615                    .bank_hash_components
3616                    .as_ref()
3617                    .unwrap()
3618                    .last_blockhash,
3619                bank_details
3620                    .bank_hash_components
3621                    .as_ref()
3622                    .unwrap()
3623                    .last_blockhash
3624            );
3625            // Though bankhash is not affected, bank_details should be different.
3626            assert_ne!(ok_bank_details, bank_details);
3627            // Different types of transaction failure should not affect bank hash
3628            if let Some(prev_bank_details) = &err_bank_details {
3629                assert_eq!(
3630                    *prev_bank_details,
3631                    bank_details,
3632                    "bank hash mismatched for tx error: {:?}",
3633                    get_instruction_errors()[err]
3634                );
3635            } else {
3636                err_bank_details = Some(bank_details);
3637            }
3638        });
3639    }
3640
3641    #[test_case(false; "old")]
3642    #[test_case(true; "simd83")]
3643    fn test_process_entries_2nd_entry_collision_with_self_and_error(
3644        relax_intrabatch_account_locks: bool,
3645    ) {
3646        solana_logger::setup();
3647
3648        let GenesisConfigInfo {
3649            genesis_config,
3650            mint_keypair,
3651            ..
3652        } = create_genesis_config(1000);
3653        let mut bank = Bank::new_for_tests(&genesis_config);
3654        if !relax_intrabatch_account_locks {
3655            bank.deactivate_feature(&agave_feature_set::relax_intrabatch_account_locks::id());
3656        }
3657        let (bank, _bank_forks) = bank.wrap_with_bank_forks_for_tests();
3658        let keypair1 = Keypair::new();
3659        let keypair2 = Keypair::new();
3660        let keypair3 = Keypair::new();
3661
3662        // fund: put some money in each of 1 and 2
3663        assert_matches!(bank.transfer(5, &mint_keypair, &keypair1.pubkey()), Ok(_));
3664        assert_matches!(bank.transfer(4, &mint_keypair, &keypair2.pubkey()), Ok(_));
3665
3666        // 3 entries: first has a transfer, 2nd has a conflict with 1st, 3rd has a conflict with itself
3667        let entry_1_to_mint = next_entry(
3668            &bank.last_blockhash(),
3669            1,
3670            vec![system_transaction::transfer(
3671                &keypair1,
3672                &mint_keypair.pubkey(),
3673                1,
3674                bank.last_blockhash(),
3675            )],
3676        );
3677        // should now be:
3678        // keypair1=4
3679        // keypair2=4
3680        // keypair3=0
3681
3682        let entry_2_to_3_and_1_to_mint = next_entry(
3683            &entry_1_to_mint.hash,
3684            1,
3685            vec![
3686                system_transaction::transfer(
3687                    &keypair2,
3688                    &keypair3.pubkey(),
3689                    2,
3690                    bank.last_blockhash(),
3691                ), // should be fine
3692                system_transaction::transfer(
3693                    &keypair1,
3694                    &mint_keypair.pubkey(),
3695                    2,
3696                    bank.last_blockhash(),
3697                ), // will collide with preceding entry
3698            ],
3699        );
3700        // should now be:
3701        // keypair1=2
3702        // keypair2=2
3703        // keypair3=2
3704
3705        let entry_conflict_itself = next_entry(
3706            &entry_2_to_3_and_1_to_mint.hash,
3707            1,
3708            vec![
3709                system_transaction::transfer(
3710                    &keypair1,
3711                    &keypair3.pubkey(),
3712                    1,
3713                    bank.last_blockhash(),
3714                ),
3715                system_transaction::transfer(
3716                    &keypair1,
3717                    &keypair2.pubkey(),
3718                    1,
3719                    bank.last_blockhash(),
3720                ), // will collide with preceding transaction
3721            ],
3722        );
3723        // if successful, becomes:
3724        // keypair1=0
3725        // keypair2=3
3726        // keypair3=3
3727
3728        // succeeds following simd83 locking, fails otherwise
3729        let result = process_entries_for_tests_without_scheduler(
3730            &bank,
3731            vec![
3732                entry_1_to_mint,
3733                entry_2_to_3_and_1_to_mint,
3734                entry_conflict_itself,
3735            ],
3736        );
3737
3738        let balances = [
3739            bank.get_balance(&keypair1.pubkey()),
3740            bank.get_balance(&keypair2.pubkey()),
3741            bank.get_balance(&keypair3.pubkey()),
3742        ];
3743
3744        if relax_intrabatch_account_locks {
3745            assert!(result.is_ok());
3746            assert_eq!(balances, [0, 3, 3]);
3747        } else {
3748            assert!(result.is_err());
3749            assert_eq!(balances, [2, 2, 2]);
3750        }
3751    }
3752
3753    #[test_case(false; "old")]
3754    #[test_case(true; "simd83")]
3755    fn test_process_entry_duplicate_transaction(relax_intrabatch_account_locks: bool) {
3756        solana_logger::setup();
3757
3758        let GenesisConfigInfo {
3759            genesis_config,
3760            mint_keypair,
3761            ..
3762        } = create_genesis_config(1000);
3763        let mut bank = Bank::new_for_tests(&genesis_config);
3764        if !relax_intrabatch_account_locks {
3765            bank.deactivate_feature(&agave_feature_set::relax_intrabatch_account_locks::id());
3766        }
3767        let (bank, _bank_forks) = bank.wrap_with_bank_forks_for_tests();
3768        let keypair1 = Keypair::new();
3769        let keypair2 = Keypair::new();
3770
3771        // fund: put some money in each of 1 and 2
3772        assert_matches!(bank.transfer(5, &mint_keypair, &keypair1.pubkey()), Ok(_));
3773        assert_matches!(bank.transfer(5, &mint_keypair, &keypair2.pubkey()), Ok(_));
3774
3775        // one entry, two instances of the same transaction. this entry is invalid
3776        // without simd83: due to lock conflicts
3777        // with simd83: due to message hash duplication
3778        let entry_1_to_2_twice = next_entry(
3779            &bank.last_blockhash(),
3780            1,
3781            vec![
3782                system_transaction::transfer(
3783                    &keypair1,
3784                    &keypair2.pubkey(),
3785                    1,
3786                    bank.last_blockhash(),
3787                ),
3788                system_transaction::transfer(
3789                    &keypair1,
3790                    &keypair2.pubkey(),
3791                    1,
3792                    bank.last_blockhash(),
3793                ),
3794            ],
3795        );
3796        // should now be:
3797        // keypair1=5
3798        // keypair2=5
3799
3800        // succeeds following simd83 locking, fails otherwise
3801        let result = process_entries_for_tests_without_scheduler(&bank, vec![entry_1_to_2_twice]);
3802
3803        let balances = [
3804            bank.get_balance(&keypair1.pubkey()),
3805            bank.get_balance(&keypair2.pubkey()),
3806        ];
3807
3808        assert_eq!(balances, [5, 5]);
3809        if relax_intrabatch_account_locks {
3810            assert_eq!(result, Err(TransactionError::AlreadyProcessed));
3811        } else {
3812            assert_eq!(result, Err(TransactionError::AccountInUse));
3813        }
3814    }
3815
3816    #[test]
3817    fn test_process_entries_2_entries_par() {
3818        let GenesisConfigInfo {
3819            genesis_config,
3820            mint_keypair,
3821            ..
3822        } = create_genesis_config(1000);
3823        let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
3824        let keypair1 = Keypair::new();
3825        let keypair2 = Keypair::new();
3826        let keypair3 = Keypair::new();
3827        let keypair4 = Keypair::new();
3828
3829        //load accounts
3830        let tx = system_transaction::transfer(
3831            &mint_keypair,
3832            &keypair1.pubkey(),
3833            1,
3834            bank.last_blockhash(),
3835        );
3836        assert_eq!(bank.process_transaction(&tx), Ok(()));
3837        let tx = system_transaction::transfer(
3838            &mint_keypair,
3839            &keypair2.pubkey(),
3840            1,
3841            bank.last_blockhash(),
3842        );
3843        assert_eq!(bank.process_transaction(&tx), Ok(()));
3844
3845        // ensure bank can process 2 entries that do not have a common account and no tick is registered
3846        let blockhash = bank.last_blockhash();
3847        let tx =
3848            system_transaction::transfer(&keypair1, &keypair3.pubkey(), 1, bank.last_blockhash());
3849        let entry_1 = next_entry(&blockhash, 1, vec![tx]);
3850        let tx =
3851            system_transaction::transfer(&keypair2, &keypair4.pubkey(), 1, bank.last_blockhash());
3852        let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]);
3853        assert_eq!(
3854            process_entries_for_tests_without_scheduler(&bank, vec![entry_1, entry_2]),
3855            Ok(())
3856        );
3857        assert_eq!(bank.get_balance(&keypair3.pubkey()), 1);
3858        assert_eq!(bank.get_balance(&keypair4.pubkey()), 1);
3859        assert_eq!(bank.last_blockhash(), blockhash);
3860    }
3861
3862    #[test]
3863    fn test_process_entry_tx_random_execution_with_error() {
3864        let GenesisConfigInfo {
3865            genesis_config,
3866            mint_keypair,
3867            ..
3868        } = create_genesis_config(1_000_000_000);
3869        let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
3870
3871        const NUM_TRANSFERS_PER_ENTRY: usize = 8;
3872        const NUM_TRANSFERS: usize = NUM_TRANSFERS_PER_ENTRY * 32;
3873        // large enough to scramble locks and results
3874
3875        let keypairs: Vec<_> = (0..NUM_TRANSFERS * 2).map(|_| Keypair::new()).collect();
3876
3877        // give everybody one lamport
3878        for keypair in &keypairs {
3879            bank.transfer(1, &mint_keypair, &keypair.pubkey())
3880                .expect("funding failed");
3881        }
3882        let mut hash = bank.last_blockhash();
3883
3884        let present_account_key = Keypair::new();
3885        let present_account = AccountSharedData::new(1, 10, &Pubkey::default());
3886        bank.store_account(&present_account_key.pubkey(), &present_account);
3887
3888        let entries: Vec<_> = (0..NUM_TRANSFERS)
3889            .step_by(NUM_TRANSFERS_PER_ENTRY)
3890            .map(|i| {
3891                let mut transactions = (0..NUM_TRANSFERS_PER_ENTRY)
3892                    .map(|j| {
3893                        system_transaction::transfer(
3894                            &keypairs[i + j],
3895                            &keypairs[i + j + NUM_TRANSFERS].pubkey(),
3896                            1,
3897                            bank.last_blockhash(),
3898                        )
3899                    })
3900                    .collect::<Vec<_>>();
3901
3902                transactions.push(system_transaction::create_account(
3903                    &mint_keypair,
3904                    &present_account_key, // puts a TX error in results
3905                    bank.last_blockhash(),
3906                    1,
3907                    0,
3908                    &solana_pubkey::new_rand(),
3909                ));
3910
3911                next_entry_mut(&mut hash, 0, transactions)
3912            })
3913            .collect();
3914        assert_eq!(
3915            process_entries_for_tests_without_scheduler(&bank, entries),
3916            Ok(())
3917        );
3918    }
3919
3920    #[test]
3921    fn test_process_entry_tx_random_execution_no_error() {
3922        // entropy multiplier should be big enough to provide sufficient entropy
3923        // but small enough to not take too much time while executing the test.
3924        let entropy_multiplier: usize = 25;
3925        let initial_lamports = 100;
3926
3927        // number of accounts need to be in multiple of 4 for correct
3928        // execution of the test.
3929        let num_accounts = entropy_multiplier * 4;
3930        let GenesisConfigInfo {
3931            genesis_config,
3932            mint_keypair,
3933            ..
3934        } = create_genesis_config((num_accounts + 1) as u64 * initial_lamports);
3935
3936        let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
3937
3938        let mut keypairs: Vec<Keypair> = vec![];
3939
3940        for _ in 0..num_accounts {
3941            let keypair = Keypair::new();
3942            let create_account_tx = system_transaction::transfer(
3943                &mint_keypair,
3944                &keypair.pubkey(),
3945                0,
3946                bank.last_blockhash(),
3947            );
3948            assert_eq!(bank.process_transaction(&create_account_tx), Ok(()));
3949            assert_matches!(
3950                bank.transfer(initial_lamports, &mint_keypair, &keypair.pubkey()),
3951                Ok(_)
3952            );
3953            keypairs.push(keypair);
3954        }
3955
3956        let mut tx_vector: Vec<Transaction> = vec![];
3957
3958        for i in (0..num_accounts).step_by(4) {
3959            tx_vector.append(&mut vec![
3960                system_transaction::transfer(
3961                    &keypairs[i + 1],
3962                    &keypairs[i].pubkey(),
3963                    initial_lamports,
3964                    bank.last_blockhash(),
3965                ),
3966                system_transaction::transfer(
3967                    &keypairs[i + 3],
3968                    &keypairs[i + 2].pubkey(),
3969                    initial_lamports,
3970                    bank.last_blockhash(),
3971                ),
3972            ]);
3973        }
3974
3975        // Transfer lamports to each other
3976        let entry = next_entry(&bank.last_blockhash(), 1, tx_vector);
3977        assert_eq!(
3978            process_entries_for_tests_without_scheduler(&bank, vec![entry]),
3979            Ok(())
3980        );
3981        bank.squash();
3982
3983        // Even number keypair should have balance of 2 * initial_lamports and
3984        // odd number keypair should have balance of 0, which proves
3985        // that even in case of random order of execution, overall state remains
3986        // consistent.
3987        for (i, keypair) in keypairs.iter().enumerate() {
3988            if i % 2 == 0 {
3989                assert_eq!(bank.get_balance(&keypair.pubkey()), 2 * initial_lamports);
3990            } else {
3991                assert_eq!(bank.get_balance(&keypair.pubkey()), 0);
3992            }
3993        }
3994    }
3995
3996    #[test]
3997    fn test_process_entries_2_entries_tick() {
3998        let GenesisConfigInfo {
3999            genesis_config,
4000            mint_keypair,
4001            ..
4002        } = create_genesis_config(1000);
4003        let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
4004        let keypair1 = Keypair::new();
4005        let keypair2 = Keypair::new();
4006        let keypair3 = Keypair::new();
4007        let keypair4 = Keypair::new();
4008
4009        //load accounts
4010        let tx = system_transaction::transfer(
4011            &mint_keypair,
4012            &keypair1.pubkey(),
4013            1,
4014            bank.last_blockhash(),
4015        );
4016        assert_eq!(bank.process_transaction(&tx), Ok(()));
4017        let tx = system_transaction::transfer(
4018            &mint_keypair,
4019            &keypair2.pubkey(),
4020            1,
4021            bank.last_blockhash(),
4022        );
4023        assert_eq!(bank.process_transaction(&tx), Ok(()));
4024
4025        let blockhash = bank.last_blockhash();
4026        while blockhash == bank.last_blockhash() {
4027            bank.register_default_tick_for_test();
4028        }
4029
4030        // ensure bank can process 2 entries that do not have a common account and tick is registered
4031        let tx = system_transaction::transfer(&keypair2, &keypair3.pubkey(), 1, blockhash);
4032        let entry_1 = next_entry(&blockhash, 1, vec![tx]);
4033        let tick = next_entry(&entry_1.hash, 1, vec![]);
4034        let tx =
4035            system_transaction::transfer(&keypair1, &keypair4.pubkey(), 1, bank.last_blockhash());
4036        let entry_2 = next_entry(&tick.hash, 1, vec![tx]);
4037        assert_eq!(
4038            process_entries_for_tests_without_scheduler(
4039                &bank,
4040                vec![entry_1, tick, entry_2.clone()],
4041            ),
4042            Ok(())
4043        );
4044        assert_eq!(bank.get_balance(&keypair3.pubkey()), 1);
4045        assert_eq!(bank.get_balance(&keypair4.pubkey()), 1);
4046
4047        // ensure that an error is returned for an empty account (keypair2)
4048        let tx =
4049            system_transaction::transfer(&keypair2, &keypair3.pubkey(), 1, bank.last_blockhash());
4050        let entry_3 = next_entry(&entry_2.hash, 1, vec![tx]);
4051        assert_eq!(
4052            process_entries_for_tests_without_scheduler(&bank, vec![entry_3]),
4053            Err(TransactionError::AccountNotFound)
4054        );
4055    }
4056
4057    #[test]
4058    fn test_update_transaction_statuses() {
4059        let GenesisConfigInfo {
4060            genesis_config,
4061            mint_keypair,
4062            ..
4063        } = create_genesis_config(11_000);
4064        let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
4065
4066        // Make sure instruction errors still update the signature cache
4067        let pubkey = solana_pubkey::new_rand();
4068        bank.transfer(1_000, &mint_keypair, &pubkey).unwrap();
4069        assert_eq!(bank.transaction_count(), 1);
4070        assert_eq!(bank.get_balance(&pubkey), 1_000);
4071        assert_eq!(
4072            bank.transfer(10_001, &mint_keypair, &pubkey),
4073            Err(TransactionError::InstructionError(
4074                0,
4075                SystemError::ResultWithNegativeLamports.into(),
4076            ))
4077        );
4078        assert_eq!(
4079            bank.transfer(10_001, &mint_keypair, &pubkey),
4080            Err(TransactionError::AlreadyProcessed)
4081        );
4082
4083        // Make sure fees-only transactions still update the signature cache
4084        let missing_program_id = Pubkey::new_unique();
4085        let tx = Transaction::new_signed_with_payer(
4086            &[Instruction::new_with_bincode(
4087                missing_program_id,
4088                &10,
4089                Vec::new(),
4090            )],
4091            Some(&mint_keypair.pubkey()),
4092            &[&mint_keypair],
4093            bank.last_blockhash(),
4094        );
4095        // First process attempt will fail but still update status cache
4096        assert_eq!(
4097            bank.process_transaction(&tx),
4098            Err(TransactionError::ProgramAccountNotFound)
4099        );
4100        // Second attempt will be rejected since tx was already in status cache
4101        assert_eq!(
4102            bank.process_transaction(&tx),
4103            Err(TransactionError::AlreadyProcessed)
4104        );
4105
4106        // Make sure other errors don't update the signature cache
4107        let tx = system_transaction::transfer(&mint_keypair, &pubkey, 1000, Hash::default());
4108        let signature = tx.signatures[0];
4109
4110        // Should fail with blockhash not found
4111        assert_eq!(
4112            bank.process_transaction(&tx).map(|_| signature),
4113            Err(TransactionError::BlockhashNotFound)
4114        );
4115
4116        // Should fail again with blockhash not found
4117        assert_eq!(
4118            bank.process_transaction(&tx).map(|_| signature),
4119            Err(TransactionError::BlockhashNotFound)
4120        );
4121    }
4122
4123    #[test_case(false; "old")]
4124    #[test_case(true; "simd83")]
4125    fn test_update_transaction_statuses_fail(relax_intrabatch_account_locks: bool) {
4126        let GenesisConfigInfo {
4127            genesis_config,
4128            mint_keypair,
4129            ..
4130        } = create_genesis_config(11_000);
4131        let mut bank = Bank::new_for_tests(&genesis_config);
4132        if !relax_intrabatch_account_locks {
4133            bank.deactivate_feature(&agave_feature_set::relax_intrabatch_account_locks::id());
4134        }
4135        let (bank, _bank_forks) = bank.wrap_with_bank_forks_for_tests();
4136        let keypair1 = Keypair::new();
4137        let keypair2 = Keypair::new();
4138        let success_tx = system_transaction::transfer(
4139            &mint_keypair,
4140            &keypair1.pubkey(),
4141            1,
4142            bank.last_blockhash(),
4143        );
4144        let test_tx = system_transaction::transfer(
4145            &mint_keypair,
4146            &keypair2.pubkey(),
4147            2,
4148            bank.last_blockhash(),
4149        );
4150
4151        let entry_1_to_mint = next_entry(
4152            &bank.last_blockhash(),
4153            1,
4154            vec![
4155                success_tx,
4156                test_tx.clone(), // will collide
4157            ],
4158        );
4159
4160        // succeeds with simd83, fails because of account locking conflict otherwise
4161        assert_eq!(
4162            process_entries_for_tests_without_scheduler(&bank, vec![entry_1_to_mint]),
4163            if relax_intrabatch_account_locks {
4164                Ok(())
4165            } else {
4166                Err(TransactionError::AccountInUse)
4167            }
4168        );
4169
4170        // fails with simd83 as already processed, succeeds otherwise
4171        assert_eq!(
4172            bank.process_transaction(&test_tx),
4173            if relax_intrabatch_account_locks {
4174                Err(TransactionError::AlreadyProcessed)
4175            } else {
4176                Ok(())
4177            }
4178        );
4179    }
4180
4181    #[test]
4182    fn test_halt_at_slot_starting_snapshot_root() {
4183        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(123);
4184
4185        // Create roots at slots 0, 1
4186        let forks = tr(0) / tr(1);
4187        let ledger_path = get_tmp_ledger_path_auto_delete!();
4188        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
4189        blockstore.add_tree(
4190            forks,
4191            false,
4192            true,
4193            genesis_config.ticks_per_slot,
4194            genesis_config.hash(),
4195        );
4196        blockstore.set_roots([0, 1].iter()).unwrap();
4197
4198        // Specify halting at slot 0
4199        let opts = ProcessOptions {
4200            run_verification: true,
4201            halt_at_slot: Some(0),
4202            ..ProcessOptions::default()
4203        };
4204        let (bank_forks, ..) =
4205            test_process_blockstore(&genesis_config, &blockstore, &opts, Arc::default());
4206        let bank_forks = bank_forks.read().unwrap();
4207
4208        // Should be able to fetch slot 0 because we specified halting at slot 0, even
4209        // if there is a greater root at slot 1.
4210        assert!(bank_forks.get(0).is_some());
4211    }
4212
4213    #[test]
4214    fn test_process_blockstore_from_root() {
4215        let GenesisConfigInfo {
4216            mut genesis_config, ..
4217        } = create_genesis_config(123);
4218
4219        let ticks_per_slot = 1;
4220        genesis_config.ticks_per_slot = ticks_per_slot;
4221        let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
4222        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
4223
4224        /*
4225          Build a blockstore in the ledger with the following fork structure:
4226
4227               slot 0 (all ticks)
4228                 |
4229               slot 1 (all ticks)
4230                 |
4231               slot 2 (all ticks)
4232                 |
4233               slot 3 (all ticks) -> root
4234                 |
4235               slot 4 (all ticks)
4236                 |
4237               slot 5 (all ticks) -> root
4238                 |
4239               slot 6 (all ticks)
4240        */
4241
4242        let mut last_hash = blockhash;
4243        for i in 0..6 {
4244            last_hash =
4245                fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, i + 1, i, last_hash);
4246        }
4247        blockstore.set_roots([3, 5].iter()).unwrap();
4248
4249        // Set up bank1
4250        let bank_forks = BankForks::new_rw_arc(Bank::new_for_tests(&genesis_config));
4251        let bank0 = bank_forks.read().unwrap().get_with_scheduler(0).unwrap();
4252        let opts = ProcessOptions {
4253            run_verification: true,
4254            ..ProcessOptions::default()
4255        };
4256        let recyclers = VerifyRecyclers::default();
4257        let replay_tx_thread_pool = create_thread_pool(1);
4258        process_bank_0(
4259            &bank0,
4260            &blockstore,
4261            &replay_tx_thread_pool,
4262            &opts,
4263            None,
4264            &recyclers,
4265            None,
4266        )
4267        .unwrap();
4268        let bank0_last_blockhash = bank0.last_blockhash();
4269        let bank1 = bank_forks.write().unwrap().insert(Bank::new_from_parent(
4270            bank0.clone_without_scheduler(),
4271            &Pubkey::default(),
4272            1,
4273        ));
4274        confirm_full_slot(
4275            &blockstore,
4276            &bank1,
4277            &replay_tx_thread_pool,
4278            &opts,
4279            &recyclers,
4280            &mut ConfirmationProgress::new(bank0_last_blockhash),
4281            None,
4282            None,
4283            None,
4284            &mut ExecuteTimings::default(),
4285        )
4286        .unwrap();
4287        bank_forks.write().unwrap().set_root(1, None, None).unwrap();
4288
4289        let leader_schedule_cache = LeaderScheduleCache::new_from_bank(&bank1);
4290
4291        // Test process_blockstore_from_root() from slot 1 onwards
4292        process_blockstore_from_root(
4293            &blockstore,
4294            &bank_forks,
4295            &leader_schedule_cache,
4296            &opts,
4297            None,
4298            None,
4299            None, // snapshot_controller
4300        )
4301        .unwrap();
4302
4303        let bank_forks = bank_forks.read().unwrap();
4304
4305        assert_eq!(frozen_bank_slots(&bank_forks), vec![5, 6]);
4306        assert_eq!(bank_forks.working_bank().slot(), 6);
4307        assert_eq!(bank_forks.root(), 5);
4308
4309        // Verify the parents of the head of the fork
4310        assert_eq!(
4311            &bank_forks[6]
4312                .parents()
4313                .iter()
4314                .map(|bank| bank.slot())
4315                .collect::<Vec<_>>(),
4316            &[5]
4317        );
4318
4319        // Check that bank forks has the correct banks
4320        verify_fork_infos(&bank_forks);
4321    }
4322
4323    #[test]
4324    #[ignore]
4325    fn test_process_entries_stress() {
4326        // this test throws lots of rayon threads at process_entries()
4327        //  finds bugs in very low-layer stuff
4328        solana_logger::setup();
4329        let GenesisConfigInfo {
4330            genesis_config,
4331            mint_keypair,
4332            ..
4333        } = create_genesis_config(1_000_000_000);
4334        let mut bank = Arc::new(Bank::new_for_tests(&genesis_config));
4335
4336        const NUM_TRANSFERS_PER_ENTRY: usize = 8;
4337        const NUM_TRANSFERS: usize = NUM_TRANSFERS_PER_ENTRY * 32;
4338
4339        let keypairs: Vec<_> = (0..NUM_TRANSFERS * 2).map(|_| Keypair::new()).collect();
4340
4341        // give everybody one lamport
4342        for keypair in &keypairs {
4343            bank.transfer(1, &mint_keypair, &keypair.pubkey())
4344                .expect("funding failed");
4345        }
4346
4347        let present_account_key = Keypair::new();
4348        let present_account = AccountSharedData::new(1, 10, &Pubkey::default());
4349        bank.store_account(&present_account_key.pubkey(), &present_account);
4350
4351        let mut i = 0;
4352        let mut hash = bank.last_blockhash();
4353        let mut root: Option<Arc<Bank>> = None;
4354        loop {
4355            let entries: Vec<_> = (0..NUM_TRANSFERS)
4356                .step_by(NUM_TRANSFERS_PER_ENTRY)
4357                .map(|i| {
4358                    next_entry_mut(&mut hash, 0, {
4359                        let mut transactions = (i..i + NUM_TRANSFERS_PER_ENTRY)
4360                            .map(|i| {
4361                                system_transaction::transfer(
4362                                    &keypairs[i],
4363                                    &keypairs[i + NUM_TRANSFERS].pubkey(),
4364                                    1,
4365                                    bank.last_blockhash(),
4366                                )
4367                            })
4368                            .collect::<Vec<_>>();
4369
4370                        transactions.push(system_transaction::create_account(
4371                            &mint_keypair,
4372                            &present_account_key, // puts a TX error in results
4373                            bank.last_blockhash(),
4374                            100,
4375                            100,
4376                            &solana_pubkey::new_rand(),
4377                        ));
4378                        transactions
4379                    })
4380                })
4381                .collect();
4382            info!("paying iteration {i}");
4383            process_entries_for_tests_without_scheduler(&bank, entries).expect("paying failed");
4384
4385            let entries: Vec<_> = (0..NUM_TRANSFERS)
4386                .step_by(NUM_TRANSFERS_PER_ENTRY)
4387                .map(|i| {
4388                    next_entry_mut(
4389                        &mut hash,
4390                        0,
4391                        (i..i + NUM_TRANSFERS_PER_ENTRY)
4392                            .map(|i| {
4393                                system_transaction::transfer(
4394                                    &keypairs[i + NUM_TRANSFERS],
4395                                    &keypairs[i].pubkey(),
4396                                    1,
4397                                    bank.last_blockhash(),
4398                                )
4399                            })
4400                            .collect::<Vec<_>>(),
4401                    )
4402                })
4403                .collect();
4404
4405            info!("refunding iteration {i}");
4406            process_entries_for_tests_without_scheduler(&bank, entries).expect("refunding failed");
4407
4408            // advance to next block
4409            process_entries_for_tests_without_scheduler(
4410                &bank,
4411                (0..bank.ticks_per_slot())
4412                    .map(|_| next_entry_mut(&mut hash, 1, vec![]))
4413                    .collect::<Vec<_>>(),
4414            )
4415            .expect("process ticks failed");
4416
4417            if i % 16 == 0 {
4418                if let Some(old_root) = root {
4419                    old_root.squash();
4420                }
4421                root = Some(bank.clone());
4422            }
4423            i += 1;
4424
4425            let slot = bank.slot() + thread_rng().gen_range(1..3);
4426            bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot));
4427        }
4428    }
4429
4430    #[test]
4431    fn test_process_ledger_ticks_ordering() {
4432        let GenesisConfigInfo {
4433            genesis_config,
4434            mint_keypair,
4435            ..
4436        } = create_genesis_config(100);
4437        let (bank0, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
4438        let genesis_hash = genesis_config.hash();
4439        let keypair = Keypair::new();
4440
4441        // Simulate a slot of virtual ticks, creates a new blockhash
4442        let mut entries = create_ticks(genesis_config.ticks_per_slot, 1, genesis_hash);
4443
4444        // The new blockhash is going to be the hash of the last tick in the block
4445        let new_blockhash = entries.last().unwrap().hash;
4446        // Create an transaction that references the new blockhash, should still
4447        // be able to find the blockhash if we process transactions all in the same
4448        // batch
4449        let tx = system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 1, new_blockhash);
4450        let entry = next_entry(&new_blockhash, 1, vec![tx]);
4451        entries.push(entry);
4452
4453        process_entries_for_tests_without_scheduler(&bank0, entries).unwrap();
4454        assert_eq!(bank0.get_balance(&keypair.pubkey()), 1)
4455    }
4456
4457    fn get_epoch_schedule(genesis_config: &GenesisConfig) -> EpochSchedule {
4458        let bank = Bank::new_for_tests(genesis_config);
4459        bank.epoch_schedule().clone()
4460    }
4461
4462    fn frozen_bank_slots(bank_forks: &BankForks) -> Vec<Slot> {
4463        let mut slots: Vec<_> = bank_forks
4464            .frozen_banks()
4465            .map(|(slot, _bank)| slot)
4466            .collect();
4467        slots.sort_unstable();
4468        slots
4469    }
4470
4471    // Check that `bank_forks` contains all the ancestors and banks for each fork identified in
4472    // `bank_forks_info`
4473    fn verify_fork_infos(bank_forks: &BankForks) {
4474        for slot in frozen_bank_slots(bank_forks) {
4475            let head_bank = &bank_forks[slot];
4476            let mut parents = head_bank.parents();
4477            parents.push(head_bank.clone());
4478
4479            // Ensure the tip of each fork and all its parents are in the given bank_forks
4480            for parent in parents {
4481                let parent_bank = &bank_forks[parent.slot()];
4482                assert_eq!(parent_bank.slot(), parent.slot());
4483                assert!(parent_bank.is_frozen());
4484            }
4485        }
4486    }
4487
4488    #[test]
4489    fn test_get_first_error() {
4490        let GenesisConfigInfo {
4491            genesis_config,
4492            mint_keypair,
4493            ..
4494        } = create_genesis_config(1_000_000_000);
4495        let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
4496
4497        let present_account_key = Keypair::new();
4498        let present_account = AccountSharedData::new(1, 10, &Pubkey::default());
4499        bank.store_account(&present_account_key.pubkey(), &present_account);
4500
4501        let keypair = Keypair::new();
4502
4503        // Create array of two transactions which throw different errors
4504        let account_not_found_tx = system_transaction::transfer(
4505            &keypair,
4506            &solana_pubkey::new_rand(),
4507            42,
4508            bank.last_blockhash(),
4509        );
4510        let account_not_found_sig = account_not_found_tx.signatures[0];
4511        let invalid_blockhash_tx = system_transaction::transfer(
4512            &mint_keypair,
4513            &solana_pubkey::new_rand(),
4514            42,
4515            Hash::default(),
4516        );
4517        let txs = vec![account_not_found_tx, invalid_blockhash_tx];
4518        let batch = bank.prepare_batch_for_tests(txs);
4519        let (commit_results, _) = batch.bank().load_execute_and_commit_transactions(
4520            &batch,
4521            MAX_PROCESSING_AGE,
4522            ExecutionRecordingConfig::new_single_setting(false),
4523            &mut ExecuteTimings::default(),
4524            None,
4525        );
4526        let (err, signature) = do_get_first_error(&batch, &commit_results).unwrap();
4527        assert_eq!(err.unwrap_err(), TransactionError::AccountNotFound);
4528        assert_eq!(signature, account_not_found_sig);
4529    }
4530
4531    #[test]
4532    fn test_replay_vote_sender() {
4533        let validator_keypairs: Vec<_> =
4534            (0..10).map(|_| ValidatorVoteKeypairs::new_rand()).collect();
4535        let GenesisConfigInfo {
4536            genesis_config,
4537            voting_keypair: _,
4538            ..
4539        } = create_genesis_config_with_vote_accounts(
4540            1_000_000_000,
4541            &validator_keypairs,
4542            vec![100; validator_keypairs.len()],
4543        );
4544        let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
4545        bank0.freeze();
4546
4547        let bank1 = bank_forks
4548            .write()
4549            .unwrap()
4550            .insert(Bank::new_from_parent(
4551                bank0.clone(),
4552                &solana_pubkey::new_rand(),
4553                1,
4554            ))
4555            .clone_without_scheduler();
4556
4557        // The new blockhash is going to be the hash of the last tick in the block
4558        let bank_1_blockhash = bank1.last_blockhash();
4559
4560        // Create an transaction that references the new blockhash, should still
4561        // be able to find the blockhash if we process transactions all in the same
4562        // batch
4563        let mut expected_successful_voter_pubkeys = BTreeSet::new();
4564        let vote_txs: Vec<_> = validator_keypairs
4565            .iter()
4566            .enumerate()
4567            .map(|(i, validator_keypairs)| {
4568                let tower_sync = TowerSync::new_from_slots(vec![0], bank0.hash(), None);
4569                if i % 3 == 0 {
4570                    // These votes are correct
4571                    expected_successful_voter_pubkeys
4572                        .insert(validator_keypairs.vote_keypair.pubkey());
4573                    vote_transaction::new_tower_sync_transaction(
4574                        tower_sync,
4575                        bank_1_blockhash,
4576                        &validator_keypairs.node_keypair,
4577                        &validator_keypairs.vote_keypair,
4578                        &validator_keypairs.vote_keypair,
4579                        None,
4580                    )
4581                } else if i % 3 == 1 {
4582                    // These have the wrong authorized voter
4583                    vote_transaction::new_tower_sync_transaction(
4584                        tower_sync,
4585                        bank_1_blockhash,
4586                        &validator_keypairs.node_keypair,
4587                        &validator_keypairs.vote_keypair,
4588                        &Keypair::new(),
4589                        None,
4590                    )
4591                } else {
4592                    // These have an invalid vote for non-existent bank 2
4593                    vote_transaction::new_tower_sync_transaction(
4594                        TowerSync::from(vec![(bank1.slot() + 1, 1)]),
4595                        bank_1_blockhash,
4596                        &validator_keypairs.node_keypair,
4597                        &validator_keypairs.vote_keypair,
4598                        &validator_keypairs.vote_keypair,
4599                        None,
4600                    )
4601                }
4602            })
4603            .collect();
4604        let entry = next_entry(&bank_1_blockhash, 1, vote_txs);
4605        let (replay_vote_sender, replay_vote_receiver) = crossbeam_channel::unbounded();
4606        let _ = process_entries_for_tests(
4607            &BankWithScheduler::new_without_scheduler(bank1),
4608            vec![entry],
4609            None,
4610            Some(&replay_vote_sender),
4611        );
4612        let successes: BTreeSet<Pubkey> = replay_vote_receiver
4613            .try_iter()
4614            .map(|(vote_pubkey, ..)| vote_pubkey)
4615            .collect();
4616        assert_eq!(successes, expected_successful_voter_pubkeys);
4617    }
4618
4619    fn make_slot_with_vote_tx(
4620        blockstore: &Blockstore,
4621        ticks_per_slot: u64,
4622        tx_landed_slot: Slot,
4623        parent_slot: Slot,
4624        parent_blockhash: &Hash,
4625        vote_tx: Transaction,
4626        slot_leader_keypair: &Arc<Keypair>,
4627    ) {
4628        // Add votes to `last_slot` so that `root` will be confirmed
4629        let vote_entry = next_entry(parent_blockhash, 1, vec![vote_tx]);
4630        let mut entries = create_ticks(ticks_per_slot, 0, vote_entry.hash);
4631        entries.insert(0, vote_entry);
4632        blockstore
4633            .write_entries(
4634                tx_landed_slot,
4635                0,
4636                0,
4637                ticks_per_slot,
4638                Some(parent_slot),
4639                true,
4640                slot_leader_keypair,
4641                entries,
4642                0,
4643            )
4644            .unwrap();
4645    }
4646
4647    fn run_test_process_blockstore_with_supermajority_root(
4648        blockstore_root: Option<Slot>,
4649        blockstore_access_type: AccessType,
4650    ) {
4651        solana_logger::setup();
4652        /*
4653            Build fork structure:
4654                 slot 0
4655                   |
4656                 slot 1 <- (blockstore root)
4657                 /    \
4658            slot 2    |
4659               |      |
4660            slot 4    |
4661                    slot 5
4662                      |
4663                `expected_root_slot`
4664                     /    \
4665                  ...    minor fork
4666                  /
4667            `last_slot`
4668                 |
4669            `really_last_slot`
4670        */
4671        let starting_fork_slot = 5;
4672        let mut main_fork = tr(starting_fork_slot);
4673        let mut main_fork_ref = main_fork.root_mut().get_mut();
4674
4675        // Make enough slots to make a root slot > blockstore_root
4676        let expected_root_slot = starting_fork_slot + blockstore_root.unwrap_or(0);
4677        let really_expected_root_slot = expected_root_slot + 1;
4678        let last_main_fork_slot = expected_root_slot + MAX_LOCKOUT_HISTORY as u64 + 1;
4679        let really_last_main_fork_slot = last_main_fork_slot + 1;
4680
4681        // Make `minor_fork`
4682        let last_minor_fork_slot = really_last_main_fork_slot + 1;
4683        let minor_fork = tr(last_minor_fork_slot);
4684
4685        // Make 'main_fork`
4686        for slot in starting_fork_slot + 1..last_main_fork_slot {
4687            if slot - 1 == expected_root_slot {
4688                main_fork_ref.push_front(minor_fork.clone());
4689            }
4690            main_fork_ref.push_front(tr(slot));
4691            main_fork_ref = main_fork_ref.front_mut().unwrap().get_mut();
4692        }
4693        let forks = tr(0) / (tr(1) / (tr(2) / (tr(4))) / main_fork);
4694        let validator_keypairs = ValidatorVoteKeypairs::new_rand();
4695        let GenesisConfigInfo { genesis_config, .. } =
4696            genesis_utils::create_genesis_config_with_vote_accounts(
4697                10_000,
4698                &[&validator_keypairs],
4699                vec![100],
4700            );
4701        let ticks_per_slot = genesis_config.ticks_per_slot();
4702        let ledger_path = get_tmp_ledger_path_auto_delete!();
4703        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
4704        blockstore.add_tree(forks, false, true, ticks_per_slot, genesis_config.hash());
4705
4706        if let Some(blockstore_root) = blockstore_root {
4707            blockstore
4708                .set_roots(std::iter::once(&blockstore_root))
4709                .unwrap();
4710        }
4711
4712        let opts = ProcessOptions {
4713            run_verification: true,
4714            ..ProcessOptions::default()
4715        };
4716
4717        let (bank_forks, ..) = test_process_blockstore_with_custom_options(
4718            &genesis_config,
4719            &blockstore,
4720            &opts,
4721            blockstore_access_type.clone(),
4722        );
4723        let bank_forks = bank_forks.read().unwrap();
4724
4725        // prepare to add votes
4726        let last_vote_bank_hash = bank_forks.get(last_main_fork_slot - 1).unwrap().hash();
4727        let last_vote_blockhash = bank_forks
4728            .get(last_main_fork_slot - 1)
4729            .unwrap()
4730            .last_blockhash();
4731        let tower_sync = TowerSync::new_from_slot(last_main_fork_slot - 1, last_vote_bank_hash);
4732        let vote_tx = vote_transaction::new_tower_sync_transaction(
4733            tower_sync,
4734            last_vote_blockhash,
4735            &validator_keypairs.node_keypair,
4736            &validator_keypairs.vote_keypair,
4737            &validator_keypairs.vote_keypair,
4738            None,
4739        );
4740
4741        // Add votes to `last_slot` so that `root` will be confirmed
4742        let leader_keypair = Arc::new(validator_keypairs.node_keypair);
4743        make_slot_with_vote_tx(
4744            &blockstore,
4745            ticks_per_slot,
4746            last_main_fork_slot,
4747            last_main_fork_slot - 1,
4748            &last_vote_blockhash,
4749            vote_tx,
4750            &leader_keypair,
4751        );
4752
4753        let (bank_forks, ..) = test_process_blockstore_with_custom_options(
4754            &genesis_config,
4755            &blockstore,
4756            &opts,
4757            blockstore_access_type.clone(),
4758        );
4759        let bank_forks = bank_forks.read().unwrap();
4760
4761        assert_eq!(bank_forks.root(), expected_root_slot);
4762        assert_eq!(
4763            bank_forks.frozen_banks().count() as u64,
4764            last_minor_fork_slot - really_expected_root_slot + 1
4765        );
4766
4767        // Minor fork at `last_main_fork_slot + 1` was above the `expected_root_slot`
4768        // so should not have been purged
4769        //
4770        // Fork at slot 2 was purged because it was below the `expected_root_slot`
4771        for slot in 0..=last_minor_fork_slot {
4772            // this slot will be created below
4773            if slot == really_last_main_fork_slot {
4774                continue;
4775            }
4776            if slot >= expected_root_slot {
4777                let bank = bank_forks.get(slot).unwrap();
4778                assert_eq!(bank.slot(), slot);
4779                assert!(bank.is_frozen());
4780            } else {
4781                assert!(bank_forks.get(slot).is_none());
4782            }
4783        }
4784
4785        // really prepare to add votes
4786        let last_vote_bank_hash = bank_forks.get(last_main_fork_slot).unwrap().hash();
4787        let last_vote_blockhash = bank_forks
4788            .get(last_main_fork_slot)
4789            .unwrap()
4790            .last_blockhash();
4791        let tower_sync = TowerSync::new_from_slot(last_main_fork_slot, last_vote_bank_hash);
4792        let vote_tx = vote_transaction::new_tower_sync_transaction(
4793            tower_sync,
4794            last_vote_blockhash,
4795            &leader_keypair,
4796            &validator_keypairs.vote_keypair,
4797            &validator_keypairs.vote_keypair,
4798            None,
4799        );
4800
4801        // Add votes to `really_last_slot` so that `root` will be confirmed again
4802        make_slot_with_vote_tx(
4803            &blockstore,
4804            ticks_per_slot,
4805            really_last_main_fork_slot,
4806            last_main_fork_slot,
4807            &last_vote_blockhash,
4808            vote_tx,
4809            &leader_keypair,
4810        );
4811
4812        let (bank_forks, ..) = test_process_blockstore_with_custom_options(
4813            &genesis_config,
4814            &blockstore,
4815            &opts,
4816            blockstore_access_type,
4817        );
4818        let bank_forks = bank_forks.read().unwrap();
4819
4820        assert_eq!(bank_forks.root(), really_expected_root_slot);
4821    }
4822
4823    #[test]
4824    fn test_process_blockstore_with_supermajority_root_without_blockstore_root() {
4825        run_test_process_blockstore_with_supermajority_root(None, AccessType::Primary);
4826    }
4827
4828    #[test]
4829    fn test_process_blockstore_with_supermajority_root_without_blockstore_root_secondary_access() {
4830        run_test_process_blockstore_with_supermajority_root(None, AccessType::Secondary);
4831    }
4832
4833    #[test]
4834    fn test_process_blockstore_with_supermajority_root_with_blockstore_root() {
4835        run_test_process_blockstore_with_supermajority_root(Some(1), AccessType::Primary)
4836    }
4837
4838    #[test]
4839    #[allow(clippy::field_reassign_with_default)]
4840    fn test_supermajority_root_from_vote_accounts() {
4841        let convert_to_vote_accounts = |roots_stakes: Vec<(Slot, u64)>| -> VoteAccountsHashMap {
4842            roots_stakes
4843                .into_iter()
4844                .map(|(root, stake)| {
4845                    let mut vote_state = VoteStateV3::default();
4846                    vote_state.root_slot = Some(root);
4847                    let mut vote_account = AccountSharedData::new(
4848                        1,
4849                        VoteStateV3::size_of(),
4850                        &solana_vote_program::id(),
4851                    );
4852                    let versioned = VoteStateVersions::new_v3(vote_state);
4853                    VoteStateV3::serialize(&versioned, vote_account.data_as_mut_slice()).unwrap();
4854                    (
4855                        solana_pubkey::new_rand(),
4856                        (stake, VoteAccount::try_from(vote_account).unwrap()),
4857                    )
4858                })
4859                .collect()
4860        };
4861
4862        let total_stake = 10;
4863
4864        // Supermajority root should be None
4865        assert!(supermajority_root_from_vote_accounts(total_stake, &HashMap::default()).is_none());
4866
4867        // Supermajority root should be None
4868        let roots_stakes = vec![(8, 1), (3, 1), (4, 1), (8, 1)];
4869        let accounts = convert_to_vote_accounts(roots_stakes);
4870        assert!(supermajority_root_from_vote_accounts(total_stake, &accounts).is_none());
4871
4872        // Supermajority root should be 4, has 7/10 of the stake
4873        let roots_stakes = vec![(8, 1), (3, 1), (4, 1), (8, 5)];
4874        let accounts = convert_to_vote_accounts(roots_stakes);
4875        assert_eq!(
4876            supermajority_root_from_vote_accounts(total_stake, &accounts).unwrap(),
4877            4
4878        );
4879
4880        // Supermajority root should be 8, it has 7/10 of the stake
4881        let roots_stakes = vec![(8, 1), (3, 1), (4, 1), (8, 6)];
4882        let accounts = convert_to_vote_accounts(roots_stakes);
4883        assert_eq!(
4884            supermajority_root_from_vote_accounts(total_stake, &accounts).unwrap(),
4885            8
4886        );
4887    }
4888
4889    fn confirm_slot_entries_for_tests(
4890        bank: &Arc<Bank>,
4891        slot_entries: Vec<Entry>,
4892        slot_full: bool,
4893        prev_entry_hash: Hash,
4894    ) -> result::Result<(), BlockstoreProcessorError> {
4895        let replay_tx_thread_pool = create_thread_pool(1);
4896        confirm_slot_entries(
4897            &BankWithScheduler::new_without_scheduler(bank.clone()),
4898            &replay_tx_thread_pool,
4899            (slot_entries, 0, slot_full),
4900            &mut ConfirmationTiming::default(),
4901            &mut ConfirmationProgress::new(prev_entry_hash),
4902            false,
4903            None,
4904            None,
4905            None,
4906            &VerifyRecyclers::default(),
4907            None,
4908            &PrioritizationFeeCache::new(0u64),
4909        )
4910    }
4911
4912    fn create_test_transactions(
4913        mint_keypair: &Keypair,
4914        genesis_hash: &Hash,
4915    ) -> Vec<RuntimeTransaction<SanitizedTransaction>> {
4916        let pubkey = solana_pubkey::new_rand();
4917        let keypair2 = Keypair::new();
4918        let pubkey2 = solana_pubkey::new_rand();
4919        let keypair3 = Keypair::new();
4920        let pubkey3 = solana_pubkey::new_rand();
4921
4922        vec![
4923            RuntimeTransaction::from_transaction_for_tests(system_transaction::transfer(
4924                mint_keypair,
4925                &pubkey,
4926                1,
4927                *genesis_hash,
4928            )),
4929            RuntimeTransaction::from_transaction_for_tests(system_transaction::transfer(
4930                &keypair2,
4931                &pubkey2,
4932                1,
4933                *genesis_hash,
4934            )),
4935            RuntimeTransaction::from_transaction_for_tests(system_transaction::transfer(
4936                &keypair3,
4937                &pubkey3,
4938                1,
4939                *genesis_hash,
4940            )),
4941        ]
4942    }
4943
4944    #[test]
4945    fn test_confirm_slot_entries_progress_num_txs_indexes() {
4946        let GenesisConfigInfo {
4947            genesis_config,
4948            mint_keypair,
4949            ..
4950        } = create_genesis_config(100 * LAMPORTS_PER_SOL);
4951        let genesis_hash = genesis_config.hash();
4952        let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
4953        let bank = BankWithScheduler::new_without_scheduler(bank);
4954        let replay_tx_thread_pool = create_thread_pool(1);
4955        let mut timing = ConfirmationTiming::default();
4956        let mut progress = ConfirmationProgress::new(genesis_hash);
4957        let amount = genesis_config.rent.minimum_balance(0);
4958        let keypair1 = Keypair::new();
4959        let keypair2 = Keypair::new();
4960        let keypair3 = Keypair::new();
4961        let keypair4 = Keypair::new();
4962        bank.transfer(LAMPORTS_PER_SOL, &mint_keypair, &keypair1.pubkey())
4963            .unwrap();
4964        bank.transfer(LAMPORTS_PER_SOL, &mint_keypair, &keypair2.pubkey())
4965            .unwrap();
4966
4967        let (transaction_status_sender, transaction_status_receiver) =
4968            crossbeam_channel::unbounded();
4969        let transaction_status_sender = TransactionStatusSender {
4970            sender: transaction_status_sender,
4971            dependency_tracker: None,
4972        };
4973
4974        let blockhash = bank.last_blockhash();
4975        let tx1 = system_transaction::transfer(
4976            &keypair1,
4977            &keypair3.pubkey(),
4978            amount,
4979            bank.last_blockhash(),
4980        );
4981        let tx2 = system_transaction::transfer(
4982            &keypair2,
4983            &keypair4.pubkey(),
4984            amount,
4985            bank.last_blockhash(),
4986        );
4987        let entry = next_entry(&blockhash, 1, vec![tx1, tx2]);
4988        let new_hash = entry.hash;
4989
4990        confirm_slot_entries(
4991            &bank,
4992            &replay_tx_thread_pool,
4993            (vec![entry], 0, false),
4994            &mut timing,
4995            &mut progress,
4996            false,
4997            Some(&transaction_status_sender),
4998            None,
4999            None,
5000            &VerifyRecyclers::default(),
5001            None,
5002            &PrioritizationFeeCache::new(0u64),
5003        )
5004        .unwrap();
5005        assert_eq!(progress.num_txs, 2);
5006        let batch = transaction_status_receiver.recv().unwrap();
5007        if let TransactionStatusMessage::Batch((batch, _sequence)) = batch {
5008            assert_eq!(batch.transactions.len(), 2);
5009            assert_eq!(batch.transaction_indexes.len(), 2);
5010            assert_eq!(batch.transaction_indexes, [0, 1]);
5011        } else {
5012            panic!("batch should have been sent");
5013        }
5014
5015        let tx1 = system_transaction::transfer(
5016            &keypair1,
5017            &keypair3.pubkey(),
5018            amount + 1,
5019            bank.last_blockhash(),
5020        );
5021        let tx2 = system_transaction::transfer(
5022            &keypair2,
5023            &keypair4.pubkey(),
5024            amount + 1,
5025            bank.last_blockhash(),
5026        );
5027        let tx3 = system_transaction::transfer(
5028            &mint_keypair,
5029            &Pubkey::new_unique(),
5030            amount,
5031            bank.last_blockhash(),
5032        );
5033        let entry = next_entry(&new_hash, 1, vec![tx1, tx2, tx3]);
5034
5035        confirm_slot_entries(
5036            &bank,
5037            &replay_tx_thread_pool,
5038            (vec![entry], 0, false),
5039            &mut timing,
5040            &mut progress,
5041            false,
5042            Some(&transaction_status_sender),
5043            None,
5044            None,
5045            &VerifyRecyclers::default(),
5046            None,
5047            &PrioritizationFeeCache::new(0u64),
5048        )
5049        .unwrap();
5050        assert_eq!(progress.num_txs, 5);
5051        let batch = transaction_status_receiver.recv().unwrap();
5052        if let TransactionStatusMessage::Batch((batch, _sequnce)) = batch {
5053            assert_eq!(batch.transactions.len(), 3);
5054            assert_eq!(batch.transaction_indexes.len(), 3);
5055            assert_eq!(batch.transaction_indexes, [2, 3, 4]);
5056        } else {
5057            panic!("batch should have been sent");
5058        }
5059    }
5060
5061    fn do_test_schedule_batches_for_execution(should_succeed: bool) {
5062        solana_logger::setup();
5063        let dummy_leader_pubkey = solana_pubkey::new_rand();
5064        let GenesisConfigInfo {
5065            genesis_config,
5066            mint_keypair,
5067            ..
5068        } = create_genesis_config_with_leader(500, &dummy_leader_pubkey, 100);
5069        let bank = Arc::new(Bank::new_for_tests(&genesis_config));
5070        let context = SchedulingContext::for_verification(bank.clone());
5071
5072        let txs = create_test_transactions(&mint_keypair, &genesis_config.hash());
5073
5074        let mut mocked_scheduler = MockInstalledScheduler::new();
5075        let seq = Arc::new(Mutex::new(mockall::Sequence::new()));
5076        let seq_cloned = seq.clone();
5077        mocked_scheduler
5078            .expect_context()
5079            .times(1)
5080            .in_sequence(&mut seq.lock().unwrap())
5081            .return_const(context);
5082        if should_succeed {
5083            mocked_scheduler
5084                .expect_schedule_execution()
5085                .times(txs.len())
5086                .returning(|_, _| Ok(()));
5087        } else {
5088            // mocked_scheduler isn't async; so short-circuiting behavior is quite visible in that
5089            // .times(1) is called instead of .times(txs.len()), not like the succeeding case
5090            mocked_scheduler
5091                .expect_schedule_execution()
5092                .times(1)
5093                .returning(|_, _| Err(SchedulerAborted));
5094            mocked_scheduler
5095                .expect_recover_error_after_abort()
5096                .times(1)
5097                .returning(|| TransactionError::InsufficientFundsForFee);
5098        }
5099        mocked_scheduler
5100            .expect_wait_for_termination()
5101            .with(mockall::predicate::eq(true))
5102            .times(1)
5103            .in_sequence(&mut seq.lock().unwrap())
5104            .returning(move |_| {
5105                let mut mocked_uninstalled_scheduler = MockUninstalledScheduler::new();
5106                mocked_uninstalled_scheduler
5107                    .expect_return_to_pool()
5108                    .times(1)
5109                    .in_sequence(&mut seq_cloned.lock().unwrap())
5110                    .returning(|| ());
5111                (
5112                    (Ok(()), ExecuteTimings::default()),
5113                    Box::new(mocked_uninstalled_scheduler),
5114                )
5115            });
5116        let bank = BankWithScheduler::new(bank, Some(Box::new(mocked_scheduler)));
5117
5118        let locked_entry = LockedTransactionsWithIndexes {
5119            lock_results: bank.try_lock_accounts(&txs),
5120            transactions: txs,
5121            starting_index: 0,
5122        };
5123
5124        let replay_tx_thread_pool = create_thread_pool(1);
5125        let mut batch_execution_timing = BatchExecutionTiming::default();
5126        let ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64);
5127        let result = process_batches(
5128            &bank,
5129            &replay_tx_thread_pool,
5130            [locked_entry].into_iter(),
5131            None,
5132            None,
5133            &mut batch_execution_timing,
5134            None,
5135            &ignored_prioritization_fee_cache,
5136        );
5137        if should_succeed {
5138            assert_matches!(result, Ok(()));
5139        } else {
5140            assert_matches!(result, Err(TransactionError::InsufficientFundsForFee));
5141        }
5142    }
5143
5144    #[test]
5145    fn test_schedule_batches_for_execution_success() {
5146        do_test_schedule_batches_for_execution(true);
5147    }
5148
5149    #[test]
5150    fn test_schedule_batches_for_execution_failure() {
5151        do_test_schedule_batches_for_execution(false);
5152    }
5153
5154    enum TxResult {
5155        ExecutedWithSuccess,
5156        ExecutedWithFailure,
5157        NotExecuted,
5158    }
5159
5160    #[test_matrix(
5161        [TxResult::ExecutedWithSuccess, TxResult::ExecutedWithFailure, TxResult::NotExecuted],
5162        [Ok(None), Ok(Some(4)), Err(TransactionError::CommitCancelled)]
5163    )]
5164    fn test_execute_batch_pre_commit_callback(
5165        tx_result: TxResult,
5166        poh_result: Result<Option<usize>>,
5167    ) {
5168        solana_logger::setup();
5169        let dummy_leader_pubkey = solana_pubkey::new_rand();
5170        let GenesisConfigInfo {
5171            genesis_config,
5172            mint_keypair,
5173            ..
5174        } = create_genesis_config_with_leader(500, &dummy_leader_pubkey, 100);
5175        let bank = Bank::new_for_tests(&genesis_config);
5176        let (bank, _bank_forks) = bank.wrap_with_bank_forks_for_tests();
5177        let bank = Arc::new(bank);
5178        let pubkey = solana_pubkey::new_rand();
5179        let (tx, expected_tx_result) = match tx_result {
5180            TxResult::ExecutedWithSuccess => (
5181                RuntimeTransaction::from_transaction_for_tests(system_transaction::transfer(
5182                    &mint_keypair,
5183                    &pubkey,
5184                    1,
5185                    genesis_config.hash(),
5186                )),
5187                Ok(()),
5188            ),
5189            TxResult::ExecutedWithFailure => (
5190                RuntimeTransaction::from_transaction_for_tests(system_transaction::transfer(
5191                    &mint_keypair,
5192                    &pubkey,
5193                    100000000,
5194                    genesis_config.hash(),
5195                )),
5196                Ok(()),
5197            ),
5198            TxResult::NotExecuted => (
5199                RuntimeTransaction::from_transaction_for_tests(system_transaction::transfer(
5200                    &mint_keypair,
5201                    &pubkey,
5202                    1,
5203                    Hash::default(),
5204                )),
5205                Err(TransactionError::BlockhashNotFound),
5206            ),
5207        };
5208        let mut batch = TransactionBatch::new(
5209            vec![Ok(()); 1],
5210            &bank,
5211            OwnedOrBorrowed::Borrowed(slice::from_ref(&tx)),
5212        );
5213        batch.set_needs_unlock(false);
5214        let poh_with_index = matches!(&poh_result, Ok(Some(_)));
5215        let batch = TransactionBatchWithIndexes {
5216            batch,
5217            transaction_indexes: vec![],
5218        };
5219        let prioritization_fee_cache = PrioritizationFeeCache::default();
5220        let mut timing = ExecuteTimings::default();
5221        let (sender, receiver) = crossbeam_channel::unbounded();
5222
5223        assert_eq!(bank.transaction_count(), 0);
5224        assert_eq!(bank.transaction_error_count(), 0);
5225        let should_commit = poh_result.is_ok();
5226        let mut is_called = false;
5227        let result = execute_batch(
5228            &batch,
5229            &bank,
5230            Some(&TransactionStatusSender {
5231                sender,
5232                dependency_tracker: None,
5233            }),
5234            None,
5235            &mut timing,
5236            None,
5237            &prioritization_fee_cache,
5238            Some(|processing_result: &'_ Result<_>| {
5239                is_called = true;
5240                let ok = poh_result?;
5241                if let Err(error) = processing_result {
5242                    Err(error.clone())?;
5243                };
5244                Ok(ok)
5245            }),
5246        );
5247
5248        // pre_commit_callback() should alwasy be called regardless of tx_result
5249        assert!(is_called);
5250
5251        if should_commit {
5252            assert_eq!(result, expected_tx_result);
5253            if expected_tx_result.is_ok() {
5254                assert_eq!(bank.transaction_count(), 1);
5255                if matches!(tx_result, TxResult::ExecutedWithFailure) {
5256                    assert_eq!(bank.transaction_error_count(), 1);
5257                } else {
5258                    assert_eq!(bank.transaction_error_count(), 0);
5259                }
5260            } else {
5261                assert_eq!(bank.transaction_count(), 0);
5262            }
5263        } else {
5264            assert_matches!(result, Err(TransactionError::CommitCancelled));
5265            assert_eq!(bank.transaction_count(), 0);
5266        }
5267        if poh_with_index && expected_tx_result.is_ok() {
5268            assert_matches!(
5269                receiver.try_recv(),
5270                Ok(TransactionStatusMessage::Batch((TransactionStatusBatch{transaction_indexes, ..}, _sequence)))
5271                    if transaction_indexes == vec![4_usize]
5272            );
5273        } else if should_commit && expected_tx_result.is_ok() {
5274            assert_matches!(
5275                receiver.try_recv(),
5276                Ok(TransactionStatusMessage::Batch((TransactionStatusBatch{transaction_indexes, ..}, _sequence)))
5277                    if transaction_indexes.is_empty()
5278            );
5279        } else {
5280            assert_matches!(receiver.try_recv(), Err(_));
5281        }
5282    }
5283
5284    #[test]
5285    fn test_confirm_slot_entries_with_fix() {
5286        const HASHES_PER_TICK: u64 = 10;
5287        const TICKS_PER_SLOT: u64 = 2;
5288
5289        let collector_id = Pubkey::new_unique();
5290
5291        let GenesisConfigInfo {
5292            mut genesis_config,
5293            mint_keypair,
5294            ..
5295        } = create_genesis_config(10_000);
5296        genesis_config.poh_config.hashes_per_tick = Some(HASHES_PER_TICK);
5297        genesis_config.ticks_per_slot = TICKS_PER_SLOT;
5298        let genesis_hash = genesis_config.hash();
5299
5300        let (slot_0_bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
5301        assert_eq!(slot_0_bank.slot(), 0);
5302        assert_eq!(slot_0_bank.tick_height(), 0);
5303        assert_eq!(slot_0_bank.max_tick_height(), 2);
5304        assert_eq!(slot_0_bank.last_blockhash(), genesis_hash);
5305        assert_eq!(slot_0_bank.get_hash_age(&genesis_hash), Some(0));
5306
5307        let slot_0_entries = entry::create_ticks(TICKS_PER_SLOT, HASHES_PER_TICK, genesis_hash);
5308        let slot_0_hash = slot_0_entries.last().unwrap().hash;
5309        confirm_slot_entries_for_tests(&slot_0_bank, slot_0_entries, true, genesis_hash).unwrap();
5310        assert_eq!(slot_0_bank.tick_height(), slot_0_bank.max_tick_height());
5311        assert_eq!(slot_0_bank.last_blockhash(), slot_0_hash);
5312        assert_eq!(slot_0_bank.get_hash_age(&genesis_hash), Some(1));
5313        assert_eq!(slot_0_bank.get_hash_age(&slot_0_hash), Some(0));
5314
5315        let new_bank = Bank::new_from_parent(slot_0_bank, &collector_id, 2);
5316        let slot_2_bank = bank_forks
5317            .write()
5318            .unwrap()
5319            .insert(new_bank)
5320            .clone_without_scheduler();
5321        assert_eq!(slot_2_bank.slot(), 2);
5322        assert_eq!(slot_2_bank.tick_height(), 2);
5323        assert_eq!(slot_2_bank.max_tick_height(), 6);
5324        assert_eq!(slot_2_bank.last_blockhash(), slot_0_hash);
5325
5326        let slot_1_entries = entry::create_ticks(TICKS_PER_SLOT, HASHES_PER_TICK, slot_0_hash);
5327        let slot_1_hash = slot_1_entries.last().unwrap().hash;
5328        confirm_slot_entries_for_tests(&slot_2_bank, slot_1_entries, false, slot_0_hash).unwrap();
5329        assert_eq!(slot_2_bank.tick_height(), 4);
5330        assert_eq!(slot_2_bank.last_blockhash(), slot_0_hash);
5331        assert_eq!(slot_2_bank.get_hash_age(&genesis_hash), Some(1));
5332        assert_eq!(slot_2_bank.get_hash_age(&slot_0_hash), Some(0));
5333
5334        struct TestCase {
5335            recent_blockhash: Hash,
5336            expected_result: result::Result<(), BlockstoreProcessorError>,
5337        }
5338
5339        let test_cases = [
5340            TestCase {
5341                recent_blockhash: slot_1_hash,
5342                expected_result: Err(BlockstoreProcessorError::InvalidTransaction(
5343                    TransactionError::BlockhashNotFound,
5344                )),
5345            },
5346            TestCase {
5347                recent_blockhash: slot_0_hash,
5348                expected_result: Ok(()),
5349            },
5350        ];
5351
5352        // Check that slot 2 transactions can only use hashes for completed blocks.
5353        for TestCase {
5354            recent_blockhash,
5355            expected_result,
5356        } in test_cases
5357        {
5358            let slot_2_entries = {
5359                let to_pubkey = Pubkey::new_unique();
5360                let mut prev_entry_hash = slot_1_hash;
5361                let mut remaining_entry_hashes = HASHES_PER_TICK;
5362
5363                let tx =
5364                    system_transaction::transfer(&mint_keypair, &to_pubkey, 1, recent_blockhash);
5365                remaining_entry_hashes = remaining_entry_hashes.checked_sub(1).unwrap();
5366                let mut entries = vec![next_entry_mut(&mut prev_entry_hash, 1, vec![tx])];
5367
5368                entries.push(next_entry_mut(
5369                    &mut prev_entry_hash,
5370                    remaining_entry_hashes,
5371                    vec![],
5372                ));
5373                entries.push(next_entry_mut(
5374                    &mut prev_entry_hash,
5375                    HASHES_PER_TICK,
5376                    vec![],
5377                ));
5378
5379                entries
5380            };
5381
5382            let slot_2_hash = slot_2_entries.last().unwrap().hash;
5383            let result =
5384                confirm_slot_entries_for_tests(&slot_2_bank, slot_2_entries, true, slot_1_hash);
5385            match (result, expected_result) {
5386                (Ok(()), Ok(())) => {
5387                    assert_eq!(slot_2_bank.tick_height(), slot_2_bank.max_tick_height());
5388                    assert_eq!(slot_2_bank.last_blockhash(), slot_2_hash);
5389                    assert_eq!(slot_2_bank.get_hash_age(&genesis_hash), Some(2));
5390                    assert_eq!(slot_2_bank.get_hash_age(&slot_0_hash), Some(1));
5391                    assert_eq!(slot_2_bank.get_hash_age(&slot_2_hash), Some(0));
5392                }
5393                (
5394                    Err(BlockstoreProcessorError::InvalidTransaction(err)),
5395                    Err(BlockstoreProcessorError::InvalidTransaction(expected_err)),
5396                ) => {
5397                    assert_eq!(err, expected_err);
5398                }
5399                (result, expected_result) => {
5400                    panic!("actual result {result:?} != expected result {expected_result:?}");
5401                }
5402            }
5403        }
5404    }
5405
5406    #[test]
5407    fn test_check_block_cost_limit() {
5408        let dummy_leader_pubkey = solana_pubkey::new_rand();
5409        let GenesisConfigInfo {
5410            genesis_config,
5411            mint_keypair,
5412            ..
5413        } = create_genesis_config_with_leader(500, &dummy_leader_pubkey, 100);
5414        let bank = Bank::new_for_tests(&genesis_config);
5415
5416        let tx = RuntimeTransaction::from_transaction_for_tests(system_transaction::transfer(
5417            &mint_keypair,
5418            &Pubkey::new_unique(),
5419            1,
5420            genesis_config.hash(),
5421        ));
5422        let mut tx_cost = CostModel::calculate_cost(&tx, &bank.feature_set);
5423        let actual_execution_cu = 1;
5424        let actual_loaded_accounts_data_size = 64 * 1024;
5425        let TransactionCost::Transaction(ref mut usage_cost_details) = tx_cost else {
5426            unreachable!("test tx is non-vote tx");
5427        };
5428        usage_cost_details.programs_execution_cost = actual_execution_cu;
5429        usage_cost_details.loaded_accounts_data_size_cost =
5430            CostModel::calculate_loaded_accounts_data_size_cost(
5431                actual_loaded_accounts_data_size,
5432                &bank.feature_set,
5433            );
5434        // set block-limit to be able to just have one transaction
5435        let block_limit = tx_cost.sum();
5436        bank.write_cost_tracker()
5437            .unwrap()
5438            .set_limits(u64::MAX, block_limit, u64::MAX);
5439
5440        let tx_costs = vec![None, Some(tx_cost), None];
5441        // The transaction will fit when added the first time
5442        assert!(check_block_cost_limits(&bank, &tx_costs).is_ok());
5443        // But adding a second time will exceed the block limit
5444        assert_eq!(
5445            Err(TransactionError::WouldExceedMaxBlockCostLimit),
5446            check_block_cost_limits(&bank, &tx_costs)
5447        );
5448        // Adding another None will noop (even though the block is already full)
5449        assert!(check_block_cost_limits(&bank, &tx_costs[0..1]).is_ok());
5450    }
5451
5452    #[test]
5453    fn test_calculate_alpenglow_ticks() {
5454        let first_alpenglow_slot = 10;
5455        let ticks_per_slot = 2;
5456
5457        // Slots before alpenglow don't have alpenglow ticks
5458        let slot = 9;
5459        let parent_slot = 8;
5460        assert!(
5461            calculate_alpenglow_ticks(slot, first_alpenglow_slot, parent_slot, ticks_per_slot)
5462                .is_none()
5463        );
5464
5465        // First alpenglow slot should only have 1 tick
5466        let slot = first_alpenglow_slot;
5467        let parent_slot = first_alpenglow_slot - 1;
5468        assert_eq!(
5469            calculate_alpenglow_ticks(slot, first_alpenglow_slot, parent_slot, ticks_per_slot)
5470                .unwrap(),
5471            1
5472        );
5473
5474        // First alpenglow slot with skipped non-alpenglow slots
5475        // need to have `ticks_per_slot` ticks per skipped slot and
5476        // then one additional tick for the first alpenglow slot
5477        let slot = first_alpenglow_slot;
5478        let num_skipped_slots = 3;
5479        let parent_slot = first_alpenglow_slot - num_skipped_slots - 1;
5480        assert_eq!(
5481            calculate_alpenglow_ticks(slot, first_alpenglow_slot, parent_slot, ticks_per_slot)
5482                .unwrap(),
5483            num_skipped_slots * ticks_per_slot + 1
5484        );
5485
5486        // Skipped alpenglow slots don't need any additional ticks
5487        let slot = first_alpenglow_slot + 2;
5488        let parent_slot = first_alpenglow_slot;
5489        assert_eq!(
5490            calculate_alpenglow_ticks(slot, first_alpenglow_slot, parent_slot, ticks_per_slot)
5491                .unwrap(),
5492            1
5493        );
5494
5495        // Skipped alpenglow slots along skipped non-alpenglow slots
5496        // need to have `ticks_per_slot` ticks per skipped non-alpenglow
5497        // slot only and then one additional tick for the alpenglow slot
5498        let slot = first_alpenglow_slot + 2;
5499        let num_skipped_non_alpenglow_slots = 4;
5500        let parent_slot = first_alpenglow_slot - num_skipped_non_alpenglow_slots - 1;
5501        assert_eq!(
5502            calculate_alpenglow_ticks(slot, first_alpenglow_slot, parent_slot, ticks_per_slot)
5503                .unwrap(),
5504            num_skipped_non_alpenglow_slots * ticks_per_slot + 1
5505        );
5506    }
5507}