Skip to main content

solana_ledger/
blockstore_processor.rs

1use {
2    crate::{
3        block_error::BlockError,
4        blockstore::{Blockstore, BlockstoreError},
5        blockstore_meta::SlotMeta,
6        entry_notifier_service::{EntryNotification, EntryNotifierSender},
7        leader_schedule_cache::LeaderScheduleCache,
8        transaction_balances::compile_collected_balances,
9        use_snapshot_archives_at_startup::UseSnapshotArchivesAtStartup,
10    },
11    agave_snapshots::snapshot_config::SnapshotConfig,
12    chrono_humanize::{Accuracy, HumanTime, Tense},
13    crossbeam_channel::Sender,
14    itertools::Itertools,
15    log::*,
16    rayon::{prelude::*, ThreadPool},
17    scopeguard::defer,
18    solana_accounts_db::{
19        accounts_db::AccountsDbConfig, accounts_update_notifier_interface::AccountsUpdateNotifier,
20    },
21    solana_clock::{Slot, MAX_PROCESSING_AGE},
22    solana_cost_model::{cost_model::CostModel, transaction_cost::TransactionCost},
23    solana_entry::entry::{
24        self, create_ticks, Entry, EntrySlice, EntryType, EntryVerificationStatus, VerifyRecyclers,
25    },
26    solana_genesis_config::GenesisConfig,
27    solana_hash::Hash,
28    solana_keypair::Keypair,
29    solana_measure::{measure::Measure, measure_us},
30    solana_metrics::datapoint_error,
31    solana_pubkey::Pubkey,
32    solana_runtime::{
33        bank::{Bank, PreCommitResult, TransactionBalancesSet},
34        bank_forks::BankForks,
35        bank_utils,
36        commitment::VOTE_THRESHOLD_SIZE,
37        dependency_tracker::DependencyTracker,
38        installed_scheduler_pool::BankWithScheduler,
39        prioritization_fee_cache::PrioritizationFeeCache,
40        runtime_config::RuntimeConfig,
41        snapshot_controller::SnapshotController,
42        transaction_batch::{OwnedOrBorrowed, TransactionBatch},
43        vote_sender_types::ReplayVoteSender,
44    },
45    solana_runtime_transaction::{
46        runtime_transaction::RuntimeTransaction, transaction_with_meta::TransactionWithMeta,
47    },
48    solana_signature::Signature,
49    solana_svm::{
50        transaction_commit_result::{TransactionCommitResult, TransactionCommitResultExtensions},
51        transaction_processing_result::ProcessedTransaction,
52        transaction_processor::ExecutionRecordingConfig,
53    },
54    solana_svm_timings::{report_execute_timings, ExecuteTimingType, ExecuteTimings},
55    solana_svm_transaction::{svm_message::SVMMessage, svm_transaction::SVMTransaction},
56    solana_transaction::{
57        sanitized::SanitizedTransaction, versioned::VersionedTransaction,
58        TransactionVerificationMode,
59    },
60    solana_transaction_error::{TransactionError, TransactionResult as Result},
61    solana_transaction_status::token_balances::TransactionTokenBalancesSet,
62    solana_vote::vote_account::VoteAccountsHashMap,
63    std::{
64        borrow::Cow,
65        collections::{HashMap, HashSet},
66        num::Saturating,
67        ops::Index,
68        path::PathBuf,
69        result,
70        sync::{atomic::AtomicBool, Arc, Mutex, RwLock},
71        time::{Duration, Instant},
72        vec::Drain,
73    },
74    thiserror::Error,
75    ExecuteTimingType::{NumExecuteBatches, TotalBatchesLen},
76};
77#[cfg(feature = "dev-context-only-utils")]
78use {qualifier_attr::qualifiers, solana_runtime::bank::HashOverrides};
79
80pub struct TransactionBatchWithIndexes<'a, 'b, Tx: SVMMessage> {
81    pub batch: TransactionBatch<'a, 'b, Tx>,
82    pub transaction_indexes: Vec<usize>,
83}
84
85// `TransactionBatchWithIndexes` but without the `Drop` that prevents
86// us from nicely unwinding these with manual unlocking.
87pub struct LockedTransactionsWithIndexes<Tx: SVMMessage> {
88    lock_results: Vec<Result<()>>,
89    transactions: Vec<RuntimeTransaction<Tx>>,
90    starting_index: usize,
91}
92
93struct ReplayEntry {
94    entry: EntryType<RuntimeTransaction<SanitizedTransaction>>,
95    starting_index: usize,
96}
97
98fn first_err(results: &[Result<()>]) -> Result<()> {
99    for r in results {
100        if r.is_err() {
101            return r.clone();
102        }
103    }
104    Ok(())
105}
106
107// Includes transaction signature for unit-testing
108fn do_get_first_error<T, Tx: SVMTransaction>(
109    batch: &TransactionBatch<Tx>,
110    results: &[Result<T>],
111) -> Option<(Result<()>, Signature)> {
112    let mut first_err = None;
113    for (result, transaction) in results.iter().zip(batch.sanitized_transactions()) {
114        if let Err(err) = result {
115            if first_err.is_none() {
116                first_err = Some((Err(err.clone()), *transaction.signature()));
117            }
118            warn!("Unexpected validator error: {err:?}, transaction: {transaction:?}");
119            datapoint_error!(
120                "validator_process_entry_error",
121                (
122                    "error",
123                    format!("error: {err:?}, transaction: {transaction:?}"),
124                    String
125                )
126            );
127        }
128    }
129    first_err
130}
131
132fn get_first_error<T, Tx: SVMTransaction>(
133    batch: &TransactionBatch<Tx>,
134    commit_results: &[Result<T>],
135) -> Result<()> {
136    do_get_first_error(batch, commit_results)
137        .map(|(error, _signature)| error)
138        .unwrap_or(Ok(()))
139}
140
141fn create_thread_pool(num_threads: usize) -> ThreadPool {
142    rayon::ThreadPoolBuilder::new()
143        .num_threads(num_threads)
144        .thread_name(|i| format!("solReplayTx{i:02}"))
145        .build()
146        .expect("new rayon threadpool")
147}
148
149pub fn execute_batch<'a>(
150    batch: &'a TransactionBatchWithIndexes<impl TransactionWithMeta>,
151    bank: &'a Arc<Bank>,
152    transaction_status_sender: Option<&'a TransactionStatusSender>,
153    replay_vote_sender: Option<&'a ReplayVoteSender>,
154    timings: &'a mut ExecuteTimings,
155    log_messages_bytes_limit: Option<usize>,
156    prioritization_fee_cache: &'a PrioritizationFeeCache,
157    extra_pre_commit_callback: Option<
158        impl FnOnce(&Result<ProcessedTransaction>) -> Result<Option<usize>>,
159    >,
160) -> Result<()> {
161    let TransactionBatchWithIndexes {
162        batch,
163        transaction_indexes,
164    } = batch;
165
166    // extra_pre_commit_callback allows for reuse of this function between the
167    // unified scheduler block production path and block verification path(s)
168    //   Some(_) => unified scheduler block production path
169    //   None    => block verification path(s)
170    let block_verification = extra_pre_commit_callback.is_none();
171    let record_transaction_meta = transaction_status_sender.is_some();
172    let mut transaction_indexes = Cow::from(transaction_indexes);
173
174    let pre_commit_callback = |_timings: &mut _, processing_results: &_| -> PreCommitResult {
175        match extra_pre_commit_callback {
176            None => {
177                // We're entering into one of the block-verification methods.
178                get_first_error(batch, processing_results)?;
179                Ok(None)
180            }
181            Some(extra_pre_commit_callback) => {
182                // We're entering into the block-production unified scheduler special case...
183                // `processing_results` should always contain exactly only 1 result in that case.
184                let [result] = processing_results else {
185                    panic!("unexpected result count: {}", processing_results.len());
186                };
187                // transaction_indexes is intended to be populated later; so barely-initialized vec
188                // should be provided.
189                assert!(transaction_indexes.is_empty());
190
191                // From now on, we need to freeze-lock the tpu bank, in order to prevent it from
192                // freezing in the middle of this code-path. Otherwise, the assertion at the start
193                // of commit_transactions() would trigger panic because it's fatal runtime
194                // invariant violation.
195                let freeze_lock = bank.freeze_lock();
196
197                // `result` won't be examined at all here. Rather, `extra_pre_commit_callback` is
198                // responsible for all result handling, including the very basic precondition of
199                // successful execution of transactions as well.
200                let committed_index = extra_pre_commit_callback(result)?;
201
202                // The callback succeeded. Optionally, update transaction_indexes as well.
203                // Refer to TaskHandler::handle()'s transaction_indexes initialization for further
204                // background.
205                if let Some(index) = committed_index {
206                    let transaction_indexes = transaction_indexes.to_mut();
207                    // Adjust the empty new vec with the exact needed capacity. Otherwise, excess
208                    // cap would be reserved on `.push()` in it.
209                    transaction_indexes.reserve_exact(1);
210                    transaction_indexes.push(index);
211                }
212                // At this point, poh should have been succeeded so it's guaranteed that the bank
213                // hasn't been frozen yet and we're still holding the lock. So, it's okay to pass
214                // down freeze_lock without any introspection here to be unconditionally dropped
215                // after commit_transactions(). This reasoning is same as
216                // solana_core::banking_stage::Consumer::execute_and_commit_transactions_locked()
217                Ok(Some(freeze_lock))
218            }
219        }
220    };
221
222    let (commit_results, balance_collector) = batch
223        .bank()
224        .load_execute_and_commit_transactions_with_pre_commit_callback(
225            batch,
226            MAX_PROCESSING_AGE,
227            ExecutionRecordingConfig::new_single_setting(transaction_status_sender.is_some()),
228            timings,
229            log_messages_bytes_limit,
230            pre_commit_callback,
231        )?;
232
233    let mut check_block_costs_elapsed = Measure::start("check_block_costs");
234    let tx_costs = if block_verification {
235        // Block verification (including unified scheduler) case;
236        // collect and check transaction costs
237        let tx_costs = get_transaction_costs(bank, &commit_results, batch.sanitized_transactions());
238        check_block_cost_limits(bank, &tx_costs).map(|_| tx_costs)
239    } else if record_transaction_meta {
240        // Unified scheduler block production case;
241        // the scheduler will track costs elsewhere but costs are recalculated
242        // here so they can be recorded with other transaction metadata
243        Ok(get_transaction_costs(
244            bank,
245            &commit_results,
246            batch.sanitized_transactions(),
247        ))
248    } else {
249        // Unified scheduler block production wihout metadata recording
250        Ok(vec![])
251    };
252    check_block_costs_elapsed.stop();
253    timings.saturating_add_in_place(
254        ExecuteTimingType::CheckBlockLimitsUs,
255        check_block_costs_elapsed.as_us(),
256    );
257    let tx_costs = tx_costs?;
258
259    bank_utils::find_and_send_votes(
260        batch.sanitized_transactions(),
261        &commit_results,
262        replay_vote_sender,
263    );
264
265    let committed_transactions = commit_results
266        .iter()
267        .zip(batch.sanitized_transactions())
268        .filter_map(|(commit_result, tx)| commit_result.was_committed().then_some(tx));
269    prioritization_fee_cache.update(bank, committed_transactions);
270
271    if let Some(transaction_status_sender) = transaction_status_sender {
272        let transactions: Vec<SanitizedTransaction> = batch
273            .sanitized_transactions()
274            .iter()
275            .map(|tx| tx.as_sanitized_transaction().into_owned())
276            .collect();
277
278        // There are two cases where balance_collector could be None:
279        // * Balance recording is disabled. If that were the case, there would
280        //   be no TransactionStatusSender, and we would not be in this branch.
281        // * The batch was aborted in its entirety in SVM. In that case, nothing
282        //   would have been committed.
283        // Therefore this should always be true.
284        debug_assert!(balance_collector.is_some());
285
286        let (balances, token_balances) =
287            compile_collected_balances(balance_collector.unwrap_or_default());
288
289        // The length of costs vector needs to be consistent with all other
290        // vectors that are sent over (such as `transactions`). So, replace the
291        // None elements with Some(0)
292        let tx_costs = tx_costs
293            .into_iter()
294            .map(|tx_cost_option| tx_cost_option.map(|tx_cost| tx_cost.sum()).or(Some(0)))
295            .collect();
296
297        transaction_status_sender.send_transaction_status_batch(
298            bank.slot(),
299            transactions,
300            commit_results,
301            balances,
302            token_balances,
303            tx_costs,
304            transaction_indexes.into_owned(),
305        );
306    }
307
308    Ok(())
309}
310
311// Get actual transaction execution costs from transaction commit results
312fn get_transaction_costs<'a, Tx: TransactionWithMeta>(
313    bank: &Bank,
314    commit_results: &[TransactionCommitResult],
315    sanitized_transactions: &'a [Tx],
316) -> Vec<Option<TransactionCost<'a, Tx>>> {
317    assert_eq!(sanitized_transactions.len(), commit_results.len());
318
319    commit_results
320        .iter()
321        .zip(sanitized_transactions)
322        .map(|(commit_result, tx)| {
323            if let Ok(committed_tx) = commit_result {
324                Some(CostModel::calculate_cost_for_executed_transaction(
325                    tx,
326                    committed_tx.executed_units,
327                    committed_tx.loaded_account_stats.loaded_accounts_data_size,
328                    &bank.feature_set,
329                ))
330            } else {
331                None
332            }
333        })
334        .collect()
335}
336
337fn check_block_cost_limits<Tx: TransactionWithMeta>(
338    bank: &Bank,
339    tx_costs: &[Option<TransactionCost<'_, Tx>>],
340) -> Result<()> {
341    let mut cost_tracker = bank.write_cost_tracker().unwrap();
342    for tx_cost in tx_costs.iter().flatten() {
343        cost_tracker
344            .try_add(tx_cost)
345            .map_err(TransactionError::from)?;
346    }
347
348    Ok(())
349}
350
351#[derive(Default)]
352pub struct ExecuteBatchesInternalMetrics {
353    execution_timings_per_thread: HashMap<usize, ThreadExecuteTimings>,
354    total_batches_len: u64,
355    execute_batches_us: u64,
356}
357
358impl ExecuteBatchesInternalMetrics {
359    pub fn new_with_timings_from_all_threads(execute_timings: ExecuteTimings) -> Self {
360        const DUMMY_THREAD_INDEX: usize = 999;
361        let mut new = Self::default();
362        new.execution_timings_per_thread.insert(
363            DUMMY_THREAD_INDEX,
364            ThreadExecuteTimings {
365                execute_timings,
366                ..ThreadExecuteTimings::default()
367            },
368        );
369        new
370    }
371}
372
373fn execute_batches_internal(
374    bank: &Arc<Bank>,
375    replay_tx_thread_pool: &ThreadPool,
376    batches: &[TransactionBatchWithIndexes<RuntimeTransaction<SanitizedTransaction>>],
377    transaction_status_sender: Option<&TransactionStatusSender>,
378    replay_vote_sender: Option<&ReplayVoteSender>,
379    log_messages_bytes_limit: Option<usize>,
380    prioritization_fee_cache: &PrioritizationFeeCache,
381) -> Result<ExecuteBatchesInternalMetrics> {
382    assert!(!batches.is_empty());
383    let execution_timings_per_thread: Mutex<HashMap<usize, ThreadExecuteTimings>> =
384        Mutex::new(HashMap::new());
385
386    let mut execute_batches_elapsed = Measure::start("execute_batches_elapsed");
387    let results: Vec<Result<()>> = replay_tx_thread_pool.install(|| {
388        batches
389            .into_par_iter()
390            .map(|transaction_batch| {
391                let transaction_count =
392                    transaction_batch.batch.sanitized_transactions().len() as u64;
393                let mut timings = ExecuteTimings::default();
394                let (result, execute_batches_us) = measure_us!(execute_batch(
395                    transaction_batch,
396                    bank,
397                    transaction_status_sender,
398                    replay_vote_sender,
399                    &mut timings,
400                    log_messages_bytes_limit,
401                    prioritization_fee_cache,
402                    None::<fn(&_) -> _>,
403                ));
404
405                let thread_index = replay_tx_thread_pool.current_thread_index().unwrap();
406                execution_timings_per_thread
407                    .lock()
408                    .unwrap()
409                    .entry(thread_index)
410                    .and_modify(|thread_execution_time| {
411                        let ThreadExecuteTimings {
412                            total_thread_us,
413                            total_transactions_executed,
414                            execute_timings: total_thread_execute_timings,
415                        } = thread_execution_time;
416                        *total_thread_us += execute_batches_us;
417                        *total_transactions_executed += transaction_count;
418                        total_thread_execute_timings
419                            .saturating_add_in_place(ExecuteTimingType::TotalBatchesLen, 1);
420                        total_thread_execute_timings.accumulate(&timings);
421                    })
422                    .or_insert(ThreadExecuteTimings {
423                        total_thread_us: Saturating(execute_batches_us),
424                        total_transactions_executed: Saturating(transaction_count),
425                        execute_timings: timings,
426                    });
427                result
428            })
429            .collect()
430    });
431    execute_batches_elapsed.stop();
432
433    first_err(&results)?;
434
435    Ok(ExecuteBatchesInternalMetrics {
436        execution_timings_per_thread: execution_timings_per_thread.into_inner().unwrap(),
437        total_batches_len: batches.len() as u64,
438        execute_batches_us: execute_batches_elapsed.as_us(),
439    })
440}
441
442// This fn diverts the code-path into two variants. Both must provide exactly the same set of
443// validations. For this reason, this fn is deliberately inserted into the code path to be called
444// inside process_entries(), so that Bank::prepare_sanitized_batch() has been called on all of
445// batches already, while minimizing code duplication (thus divergent behavior risk) at the cost of
446// acceptable overhead of meaningless buffering of batches for the scheduler variant.
447//
448// Also note that the scheduler variant can't implement the batch-level sanitization naively, due
449// to the nature of individual tx processing. That's another reason of this particular placement of
450// divergent point in the code-path (i.e. not one layer up with its own prepare_sanitized_batch()
451// invocation).
452fn process_batches(
453    bank: &BankWithScheduler,
454    replay_tx_thread_pool: &ThreadPool,
455    locked_entries: impl ExactSizeIterator<Item = LockedTransactionsWithIndexes<SanitizedTransaction>>,
456    transaction_status_sender: Option<&TransactionStatusSender>,
457    replay_vote_sender: Option<&ReplayVoteSender>,
458    batch_execution_timing: &mut BatchExecutionTiming,
459    log_messages_bytes_limit: Option<usize>,
460    prioritization_fee_cache: &PrioritizationFeeCache,
461) -> Result<()> {
462    if bank.has_installed_scheduler() {
463        debug!(
464            "process_batches()/schedule_batches_for_execution({} batches)",
465            locked_entries.len()
466        );
467        // Scheduling usually succeeds (immediately returns `Ok(())`) here without being blocked on
468        // the actual transaction executions.
469        //
470        // As an exception, this code path could propagate the transaction execution _errors of
471        // previously-scheduled transactions_ to notify the replay stage. Then, the replay stage
472        // will bail out the further processing of the malformed (possibly malicious) block
473        // immediately, not to waste any system resources. Note that this propagation is of early
474        // hints. Even if errors won't be propagated in this way, they are guaranteed to be
475        // propagated eventually via the blocking fn called
476        // BankWithScheduler::wait_for_completed_scheduler().
477        //
478        // To recite, the returned error is completely unrelated to the argument's `locked_entries`
479        // at the hand. While being awkward, the _async_ unified scheduler is abusing this existing
480        // error propagation code path to the replay stage for compatibility and ease of
481        // integration, exploiting the fact that the replay stage doesn't care _which transaction
482        // the returned error is originating from_.
483        //
484        // In the future, more proper error propagation mechanism will be introduced once after we
485        // fully transition to the unified scheduler for the block verification. That one would be
486        // a push based one from the unified scheduler to the replay stage to eliminate the current
487        // overhead: 1 read lock per batch in
488        // `BankWithScheduler::schedule_transaction_executions()`.
489        schedule_batches_for_execution(bank, locked_entries)
490    } else {
491        debug!(
492            "process_batches()/execute_batches({} batches)",
493            locked_entries.len()
494        );
495        execute_batches(
496            bank,
497            replay_tx_thread_pool,
498            locked_entries,
499            transaction_status_sender,
500            replay_vote_sender,
501            batch_execution_timing,
502            log_messages_bytes_limit,
503            prioritization_fee_cache,
504        )
505    }
506}
507
508fn schedule_batches_for_execution(
509    bank: &BankWithScheduler,
510    locked_entries: impl Iterator<Item = LockedTransactionsWithIndexes<SanitizedTransaction>>,
511) -> Result<()> {
512    // Track the first error encountered in the loop below, if any.
513    // This error will be propagated to the replay stage, or Ok(()).
514    let mut first_err = Ok(());
515
516    for LockedTransactionsWithIndexes {
517        lock_results,
518        transactions,
519        starting_index,
520    } in locked_entries
521    {
522        // unlock before sending to scheduler.
523        bank.unlock_accounts(transactions.iter().zip(lock_results.iter()));
524        // give ownership to scheduler. capture the first error, but continue the loop
525        // to unlock.
526        // scheduling is skipped if we have already detected an error in this loop
527        let indexes = starting_index..starting_index + transactions.len();
528        // Widening usize index to OrderedTaskId (= u128) won't ever fail.
529        let task_ids = indexes.map(|i| i.try_into().unwrap());
530        first_err = first_err.and_then(|()| {
531            bank.schedule_transaction_executions(transactions.into_iter().zip_eq(task_ids))
532        });
533    }
534    first_err
535}
536
537fn execute_batches(
538    bank: &Arc<Bank>,
539    replay_tx_thread_pool: &ThreadPool,
540    locked_entries: impl ExactSizeIterator<Item = LockedTransactionsWithIndexes<SanitizedTransaction>>,
541    transaction_status_sender: Option<&TransactionStatusSender>,
542    replay_vote_sender: Option<&ReplayVoteSender>,
543    timing: &mut BatchExecutionTiming,
544    log_messages_bytes_limit: Option<usize>,
545    prioritization_fee_cache: &PrioritizationFeeCache,
546) -> Result<()> {
547    if locked_entries.len() == 0 {
548        return Ok(());
549    }
550
551    let tx_batches: Vec<_> = locked_entries
552        .into_iter()
553        .map(
554            |LockedTransactionsWithIndexes {
555                 lock_results,
556                 transactions,
557                 starting_index,
558             }| {
559                let ending_index = starting_index + transactions.len();
560                TransactionBatchWithIndexes {
561                    batch: TransactionBatch::new(
562                        lock_results,
563                        bank,
564                        OwnedOrBorrowed::Owned(transactions),
565                    ),
566                    transaction_indexes: (starting_index..ending_index).collect(),
567                }
568            },
569        )
570        .collect();
571
572    let execute_batches_internal_metrics = execute_batches_internal(
573        bank,
574        replay_tx_thread_pool,
575        &tx_batches,
576        transaction_status_sender,
577        replay_vote_sender,
578        log_messages_bytes_limit,
579        prioritization_fee_cache,
580    )?;
581
582    // Pass false because this code-path is never touched by unified scheduler.
583    timing.accumulate(execute_batches_internal_metrics, false);
584    Ok(())
585}
586
587/// Process an ordered list of entries in parallel
588/// 1. In order lock accounts for each entry while the lock succeeds, up to a Tick entry
589/// 2. Process the locked group in parallel
590/// 3. Register the `Tick` if it's available
591/// 4. Update the leader scheduler, goto 1
592///
593/// This method is for use testing against a single Bank, and assumes `Bank::transaction_count()`
594/// represents the number of transactions executed in this Bank
595pub fn process_entries_for_tests(
596    bank: &BankWithScheduler,
597    entries: Vec<Entry>,
598    transaction_status_sender: Option<&TransactionStatusSender>,
599    replay_vote_sender: Option<&ReplayVoteSender>,
600) -> Result<()> {
601    let replay_tx_thread_pool = create_thread_pool(1);
602    let verify_transaction = {
603        let bank = bank.clone_with_scheduler();
604        move |versioned_tx: VersionedTransaction| -> Result<RuntimeTransaction<SanitizedTransaction>> {
605            bank.verify_transaction(versioned_tx, TransactionVerificationMode::FullVerification)
606        }
607    };
608
609    let mut entry_starting_index: usize = bank.transaction_count().try_into().unwrap();
610    let mut batch_timing = BatchExecutionTiming::default();
611    let replay_entries: Vec<_> = entry::verify_transactions(
612        entries,
613        &replay_tx_thread_pool,
614        Arc::new(verify_transaction),
615    )?
616    .into_iter()
617    .map(|entry| {
618        let starting_index = entry_starting_index;
619        if let EntryType::Transactions(ref transactions) = entry {
620            entry_starting_index = entry_starting_index.saturating_add(transactions.len());
621        }
622        ReplayEntry {
623            entry,
624            starting_index,
625        }
626    })
627    .collect();
628
629    let ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64);
630    let result = process_entries(
631        bank,
632        &replay_tx_thread_pool,
633        replay_entries,
634        transaction_status_sender,
635        replay_vote_sender,
636        &mut batch_timing,
637        None,
638        &ignored_prioritization_fee_cache,
639    );
640
641    debug!("process_entries: {batch_timing:?}");
642    result
643}
644
645fn process_entries(
646    bank: &BankWithScheduler,
647    replay_tx_thread_pool: &ThreadPool,
648    entries: Vec<ReplayEntry>,
649    transaction_status_sender: Option<&TransactionStatusSender>,
650    replay_vote_sender: Option<&ReplayVoteSender>,
651    batch_timing: &mut BatchExecutionTiming,
652    log_messages_bytes_limit: Option<usize>,
653    prioritization_fee_cache: &PrioritizationFeeCache,
654) -> Result<()> {
655    // accumulator for entries that can be processed in parallel
656    let mut batches = vec![];
657    let mut tick_hashes = vec![];
658
659    for ReplayEntry {
660        entry,
661        starting_index,
662    } in entries
663    {
664        match entry {
665            EntryType::Tick(hash) => {
666                // If it's a tick, save it for later
667                tick_hashes.push(hash);
668                if bank.is_block_boundary(bank.tick_height() + tick_hashes.len() as u64) {
669                    // If it's a tick that will cause a new blockhash to be created,
670                    // execute the group and register the tick
671                    process_batches(
672                        bank,
673                        replay_tx_thread_pool,
674                        batches.drain(..),
675                        transaction_status_sender,
676                        replay_vote_sender,
677                        batch_timing,
678                        log_messages_bytes_limit,
679                        prioritization_fee_cache,
680                    )?;
681                    for hash in tick_hashes.drain(..) {
682                        bank.register_tick(&hash);
683                    }
684                }
685            }
686            EntryType::Transactions(transactions) => {
687                queue_batches_with_lock_retry(
688                    bank,
689                    starting_index,
690                    transactions,
691                    &mut batches,
692                    |batches| {
693                        process_batches(
694                            bank,
695                            replay_tx_thread_pool,
696                            batches,
697                            transaction_status_sender,
698                            replay_vote_sender,
699                            batch_timing,
700                            log_messages_bytes_limit,
701                            prioritization_fee_cache,
702                        )
703                    },
704                )?;
705            }
706        }
707    }
708    process_batches(
709        bank,
710        replay_tx_thread_pool,
711        batches.into_iter(),
712        transaction_status_sender,
713        replay_vote_sender,
714        batch_timing,
715        log_messages_bytes_limit,
716        prioritization_fee_cache,
717    )?;
718    for hash in tick_hashes {
719        bank.register_tick(&hash);
720    }
721    Ok(())
722}
723
724/// If an entry can be locked without failure, the transactions are pushed
725/// as a batch to `batches`. If the lock fails, the transactions are unlocked
726/// and the batches are processed.
727/// The locking process is retried, and if it fails again the block is marked
728/// as dead.
729/// If the lock retry succeeds, then the batch is pushed into `batches`.
730fn queue_batches_with_lock_retry(
731    bank: &Bank,
732    starting_index: usize,
733    transactions: Vec<RuntimeTransaction<SanitizedTransaction>>,
734    batches: &mut Vec<LockedTransactionsWithIndexes<SanitizedTransaction>>,
735    mut process_batches: impl FnMut(
736        Drain<LockedTransactionsWithIndexes<SanitizedTransaction>>,
737    ) -> Result<()>,
738) -> Result<()> {
739    // try to lock the accounts
740    let lock_results = bank.try_lock_accounts(&transactions);
741    let first_lock_err = first_err(&lock_results);
742    if first_lock_err.is_ok() {
743        batches.push(LockedTransactionsWithIndexes {
744            lock_results,
745            transactions,
746            starting_index,
747        });
748        return Ok(());
749    }
750
751    // We need to unlock the transactions that succeeded to lock before the
752    // retry.
753    bank.unlock_accounts(transactions.iter().zip(lock_results.iter()));
754
755    // We failed to lock, there are 2 possible reasons:
756    // 1. A batch already in `batches` holds the lock.
757    // 2. The batch is "self-conflicting" (i.e. the batch has account lock conflicts with itself)
758
759    // Use the callback to process batches, and clear them.
760    // Clearing the batches will `Drop` the batches which will unlock the accounts.
761    process_batches(batches.drain(..))?;
762
763    // Retry the lock
764    let lock_results = bank.try_lock_accounts(&transactions);
765    match first_err(&lock_results) {
766        Ok(()) => {
767            batches.push(LockedTransactionsWithIndexes {
768                lock_results,
769                transactions,
770                starting_index,
771            });
772            Ok(())
773        }
774        Err(err) => {
775            // We still may have succeeded to lock some accounts, unlock them.
776            bank.unlock_accounts(transactions.iter().zip(lock_results.iter()));
777
778            // An entry has account lock conflicts with *itself*, which should not happen
779            // if generated by a properly functioning leader
780            datapoint_error!(
781                "validator_process_entry_error",
782                (
783                    "error",
784                    format!(
785                        "Lock accounts error, entry conflicts with itself, txs: {transactions:?}"
786                    ),
787                    String
788                )
789            );
790            Err(err)
791        }
792    }
793}
794
795#[derive(Error, Debug)]
796pub enum BlockstoreProcessorError {
797    #[error("failed to load entries, error: {0}")]
798    FailedToLoadEntries(#[from] BlockstoreError),
799
800    #[error("failed to load meta")]
801    FailedToLoadMeta,
802
803    #[error("failed to replay bank 0, did you forget to provide a snapshot")]
804    FailedToReplayBank0,
805
806    #[error("invalid block error: {0}")]
807    InvalidBlock(#[from] BlockError),
808
809    #[error("invalid transaction error: {0}")]
810    InvalidTransaction(#[from] TransactionError),
811
812    #[error("no valid forks found")]
813    NoValidForksFound,
814
815    #[error("invalid hard fork slot {0}")]
816    InvalidHardFork(Slot),
817
818    #[error("root bank with mismatched capitalization at {0}")]
819    RootBankWithMismatchedCapitalization(Slot),
820
821    #[error("incomplete final fec set")]
822    IncompleteFinalFecSet,
823
824    #[error("invalid retransmitter signature final fec set")]
825    InvalidRetransmitterSignatureFinalFecSet,
826}
827
828/// Callback for accessing bank state after each slot is confirmed while
829/// processing the blockstore
830pub type ProcessSlotCallback = Arc<dyn Fn(&Bank) + Sync + Send>;
831
832#[derive(Default, Clone)]
833pub struct ProcessOptions {
834    /// Run PoH, transaction signature and other transaction verifications on the entries.
835    pub run_verification: bool,
836    pub full_leader_cache: bool,
837    pub halt_at_slot: Option<Slot>,
838    pub slot_callback: Option<ProcessSlotCallback>,
839    pub new_hard_forks: Option<Vec<Slot>>,
840    pub debug_keys: Option<Arc<HashSet<Pubkey>>>,
841    pub limit_load_slot_count_from_snapshot: Option<usize>,
842    pub allow_dead_slots: bool,
843    pub accounts_db_skip_shrink: bool,
844    pub accounts_db_force_initial_clean: bool,
845    pub accounts_db_config: AccountsDbConfig,
846    pub verify_index: bool,
847    pub runtime_config: RuntimeConfig,
848    /// true if after processing the contents of the blockstore at startup, we should run an accounts hash calc
849    /// This is useful for debugging.
850    pub run_final_accounts_hash_calc: bool,
851    pub use_snapshot_archives_at_startup: UseSnapshotArchivesAtStartup,
852    #[cfg(feature = "dev-context-only-utils")]
853    pub hash_overrides: Option<HashOverrides>,
854    pub abort_on_invalid_block: bool,
855    pub no_block_cost_limits: bool,
856}
857
858pub fn test_process_blockstore(
859    genesis_config: &GenesisConfig,
860    blockstore: &Blockstore,
861    opts: &ProcessOptions,
862    exit: Arc<AtomicBool>,
863) -> (Arc<RwLock<BankForks>>, LeaderScheduleCache) {
864    let (bank_forks, leader_schedule_cache, ..) = crate::bank_forks_utils::load_bank_forks(
865        genesis_config,
866        blockstore,
867        Vec::new(),
868        &SnapshotConfig::new_disabled(),
869        opts,
870        None,
871        None,
872        None,
873        exit.clone(),
874    )
875    .unwrap();
876
877    process_blockstore_from_root(
878        blockstore,
879        &bank_forks,
880        &leader_schedule_cache,
881        opts,
882        None,
883        None,
884        None, // snapshots are disabled
885    )
886    .unwrap();
887
888    (bank_forks, leader_schedule_cache)
889}
890
891pub(crate) fn process_blockstore_for_bank_0(
892    genesis_config: &GenesisConfig,
893    blockstore: &Blockstore,
894    account_paths: Vec<PathBuf>,
895    opts: &ProcessOptions,
896    transaction_status_sender: Option<&TransactionStatusSender>,
897    entry_notification_sender: Option<&EntryNotifierSender>,
898    accounts_update_notifier: Option<AccountsUpdateNotifier>,
899    exit: Arc<AtomicBool>,
900) -> result::Result<Arc<RwLock<BankForks>>, BlockstoreProcessorError> {
901    // Setup bank for slot 0
902    let bank0 = Bank::new_from_genesis(
903        genesis_config,
904        Arc::new(opts.runtime_config.clone()),
905        account_paths,
906        opts.debug_keys.clone(),
907        opts.accounts_db_config.clone(),
908        accounts_update_notifier,
909        None,
910        exit,
911        None,
912        None,
913    );
914    let bank0_slot = bank0.slot();
915    let bank_forks = BankForks::new_rw_arc(bank0);
916
917    info!("Processing ledger for slot 0...");
918    let replay_tx_thread_pool = create_thread_pool(num_cpus::get());
919    process_bank_0(
920        &bank_forks
921            .read()
922            .unwrap()
923            .get_with_scheduler(bank0_slot)
924            .unwrap(),
925        blockstore,
926        &replay_tx_thread_pool,
927        opts,
928        transaction_status_sender,
929        &VerifyRecyclers::default(),
930        entry_notification_sender,
931    )?;
932
933    Ok(bank_forks)
934}
935
936/// Process blockstore from a known root bank
937#[allow(clippy::too_many_arguments)]
938pub fn process_blockstore_from_root(
939    blockstore: &Blockstore,
940    bank_forks: &RwLock<BankForks>,
941    leader_schedule_cache: &LeaderScheduleCache,
942    opts: &ProcessOptions,
943    transaction_status_sender: Option<&TransactionStatusSender>,
944    entry_notification_sender: Option<&EntryNotifierSender>,
945    snapshot_controller: Option<&SnapshotController>,
946) -> result::Result<(), BlockstoreProcessorError> {
947    let (start_slot, start_slot_hash) = {
948        // Starting slot must be a root, and thus has no parents
949        assert_eq!(bank_forks.read().unwrap().banks().len(), 1);
950        let bank = bank_forks.read().unwrap().root_bank();
951        #[cfg(feature = "dev-context-only-utils")]
952        if let Some(hash_overrides) = &opts.hash_overrides {
953            info!("Will override following slots' hashes: {hash_overrides:#?}");
954            bank.set_hash_overrides(hash_overrides.clone());
955        }
956        if opts.no_block_cost_limits {
957            warn!("setting block cost limits to MAX");
958            bank.write_cost_tracker()
959                .unwrap()
960                .set_limits(u64::MAX, u64::MAX, u64::MAX);
961        }
962        assert!(bank.parent().is_none());
963        (bank.slot(), bank.hash())
964    };
965
966    info!("Processing ledger from slot {start_slot}...");
967    let now = Instant::now();
968
969    // Ensure start_slot is rooted for correct replay; also ensure start_slot and
970    // qualifying children are marked as connected
971    if blockstore.is_primary_access() {
972        blockstore
973            .mark_slots_as_if_rooted_normally_at_startup(
974                vec![(start_slot, Some(start_slot_hash))],
975                true,
976            )
977            .expect("Couldn't mark start_slot as root in startup");
978        blockstore
979            .set_and_chain_connected_on_root_and_next_slots(start_slot)
980            .expect("Couldn't mark start_slot as connected during startup")
981    } else {
982        info!(
983            "Start slot {start_slot} isn't a root, and won't be updated due to secondary \
984             blockstore access"
985        );
986    }
987
988    if let Ok(Some(highest_slot)) = blockstore.highest_slot() {
989        info!("ledger holds data through slot {highest_slot}");
990    }
991
992    let mut timing = ExecuteTimings::default();
993    let (num_slots_processed, num_new_roots_found) = if let Some(start_slot_meta) = blockstore
994        .meta(start_slot)
995        .unwrap_or_else(|_| panic!("Failed to get meta for slot {start_slot}"))
996    {
997        let replay_tx_thread_pool = create_thread_pool(num_cpus::get());
998        load_frozen_forks(
999            bank_forks,
1000            &start_slot_meta,
1001            blockstore,
1002            &replay_tx_thread_pool,
1003            leader_schedule_cache,
1004            opts,
1005            transaction_status_sender,
1006            entry_notification_sender,
1007            &mut timing,
1008            snapshot_controller,
1009        )?
1010    } else {
1011        // If there's no meta in the blockstore for the input `start_slot`,
1012        // then we started from a snapshot and are unable to process anything.
1013        //
1014        // If the ledger has any data at all, the snapshot was likely taken at
1015        // a slot that is not within the range of ledger min/max slot(s).
1016        warn!("Starting slot {start_slot} is not in Blockstore, unable to process");
1017        (0, 0)
1018    };
1019
1020    let processing_time = now.elapsed();
1021    let num_frozen_banks = bank_forks.read().unwrap().frozen_banks().count();
1022    datapoint_info!(
1023        "process_blockstore_from_root",
1024        ("total_time_us", processing_time.as_micros(), i64),
1025        ("frozen_banks", num_frozen_banks, i64),
1026        ("slot", bank_forks.read().unwrap().root(), i64),
1027        ("num_slots_processed", num_slots_processed, i64),
1028        ("num_new_roots_found", num_new_roots_found, i64),
1029        ("forks", bank_forks.read().unwrap().banks().len(), i64),
1030    );
1031
1032    info!("ledger processing timing: {timing:?}");
1033    {
1034        let bank_forks = bank_forks.read().unwrap();
1035        let mut bank_slots = bank_forks.banks().keys().copied().collect::<Vec<_>>();
1036        bank_slots.sort_unstable();
1037
1038        info!(
1039            "ledger processed in {}. root slot is {}, {} bank{}: {}",
1040            HumanTime::from(chrono::Duration::from_std(processing_time).unwrap())
1041                .to_text_en(Accuracy::Precise, Tense::Present),
1042            bank_forks.root(),
1043            bank_slots.len(),
1044            if bank_slots.len() > 1 { "s" } else { "" },
1045            bank_slots.iter().map(|slot| slot.to_string()).join(", "),
1046        );
1047        assert!(bank_forks.active_bank_slots().is_empty());
1048    }
1049
1050    Ok(())
1051}
1052
1053/// Verify that a segment of entries has the correct number of ticks and hashes
1054fn verify_ticks(
1055    bank: &Bank,
1056    mut entries: &[Entry],
1057    slot_full: bool,
1058    tick_hash_count: &mut u64,
1059) -> std::result::Result<(), BlockError> {
1060    let next_bank_tick_height = bank.tick_height() + entries.tick_count();
1061    let max_bank_tick_height = bank.max_tick_height();
1062
1063    if next_bank_tick_height > max_bank_tick_height {
1064        warn!("Too many entry ticks found in slot: {}", bank.slot());
1065        return Err(BlockError::TooManyTicks);
1066    }
1067
1068    if next_bank_tick_height < max_bank_tick_height && slot_full {
1069        info!("Too few entry ticks found in slot: {}", bank.slot());
1070        return Err(BlockError::TooFewTicks);
1071    }
1072
1073    if next_bank_tick_height == max_bank_tick_height {
1074        let has_trailing_entry = entries.last().map(|e| !e.is_tick()).unwrap_or_default();
1075        if has_trailing_entry {
1076            warn!("Slot: {} did not end with a tick entry", bank.slot());
1077            return Err(BlockError::TrailingEntry);
1078        }
1079
1080        if !slot_full {
1081            warn!("Slot: {} was not marked full", bank.slot());
1082            return Err(BlockError::InvalidLastTick);
1083        }
1084    }
1085
1086    if let Some(first_alpenglow_slot) = bank
1087        .feature_set
1088        .activated_slot(&agave_feature_set::alpenglow::id())
1089    {
1090        if bank.parent_slot() >= first_alpenglow_slot {
1091            // If both the parent and the bank slot are in an epoch post alpenglow activation,
1092            // no tick verification is needed
1093            return Ok(());
1094        }
1095
1096        // If the bank is in the alpenglow epoch, but the parent is from an epoch
1097        // where the feature flag is not active, we must verify ticks that correspond
1098        // to the epoch in which PoH is active. This verification is criticial, as otherwise
1099        // a leader could jump the gun and publish a block in the alpenglow epoch without waiting
1100        // the appropriate time as determined by PoH in the prior epoch.
1101        if bank.slot() >= first_alpenglow_slot && next_bank_tick_height == max_bank_tick_height {
1102            if entries.is_empty() {
1103                // This shouldn't happen, but good to double check
1104                error!("Processing empty entries in verify_ticks()");
1105                return Ok(());
1106            }
1107            // last entry must be a tick, as verified by the `has_trailing_entry`
1108            // check above. Because in Alpenglow the last tick does not have any
1109            // hashing guarantees, we pass everything but that last tick to the
1110            // entry verification.
1111            entries = &entries[..entries.len() - 1];
1112        }
1113    }
1114
1115    let hashes_per_tick = bank.hashes_per_tick().unwrap_or(0);
1116    if !entries.verify_tick_hash_count(tick_hash_count, hashes_per_tick) {
1117        warn!(
1118            "Tick with invalid number of hashes found in slot: {}",
1119            bank.slot()
1120        );
1121        return Err(BlockError::InvalidTickHashCount);
1122    }
1123
1124    Ok(())
1125}
1126
1127#[allow(clippy::too_many_arguments)]
1128#[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))]
1129fn confirm_full_slot(
1130    blockstore: &Blockstore,
1131    bank: &BankWithScheduler,
1132    replay_tx_thread_pool: &ThreadPool,
1133    opts: &ProcessOptions,
1134    recyclers: &VerifyRecyclers,
1135    progress: &mut ConfirmationProgress,
1136    transaction_status_sender: Option<&TransactionStatusSender>,
1137    entry_notification_sender: Option<&EntryNotifierSender>,
1138    replay_vote_sender: Option<&ReplayVoteSender>,
1139    timing: &mut ExecuteTimings,
1140) -> result::Result<(), BlockstoreProcessorError> {
1141    let mut confirmation_timing = ConfirmationTiming::default();
1142    let skip_verification = !opts.run_verification;
1143    let ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64);
1144
1145    confirm_slot(
1146        blockstore,
1147        bank,
1148        replay_tx_thread_pool,
1149        &mut confirmation_timing,
1150        progress,
1151        skip_verification,
1152        transaction_status_sender,
1153        entry_notification_sender,
1154        replay_vote_sender,
1155        recyclers,
1156        opts.allow_dead_slots,
1157        opts.runtime_config.log_messages_bytes_limit,
1158        &ignored_prioritization_fee_cache,
1159    )?;
1160
1161    timing.accumulate(&confirmation_timing.batch_execute.totals);
1162
1163    if !bank.is_complete() {
1164        Err(BlockstoreProcessorError::InvalidBlock(
1165            BlockError::Incomplete,
1166        ))
1167    } else {
1168        Ok(())
1169    }
1170}
1171
1172/// Measures different parts of the slot confirmation processing pipeline.
1173#[derive(Debug)]
1174pub struct ConfirmationTiming {
1175    /// Moment when the `ConfirmationTiming` instance was created.  Used to track the total wall
1176    /// clock time from the moment the first shard for the slot is received and to the moment the
1177    /// slot is complete.
1178    pub started: Instant,
1179
1180    /// Wall clock time used by the slot confirmation code, including PoH/signature verification,
1181    /// and replay.  As replay can run in parallel with the verification, this value can not be
1182    /// recovered from the `replay_elapsed` and or `{poh,transaction}_verify_elapsed`.  This
1183    /// includes failed cases, when `confirm_slot_entries` exist with an error.  In microseconds.
1184    /// When unified scheduler is enabled, replay excludes the transaction execution, only
1185    /// accounting for task creation and submission to the scheduler.
1186    pub confirmation_elapsed: u64,
1187
1188    /// Wall clock time used by the entry replay code.  Does not include the PoH or the transaction
1189    /// signature/precompiles verification, but can overlap with the PoH and signature verification.
1190    /// In microseconds.
1191    /// When unified scheduler is enabled, replay excludes the transaction execution, only
1192    /// accounting for task creation and submission to the scheduler.
1193    pub replay_elapsed: u64,
1194
1195    /// Wall clock times, used for the PoH verification of entries.  In microseconds.
1196    pub poh_verify_elapsed: u64,
1197
1198    /// Wall clock time, used for the signature verification as well as precompiles verification.
1199    /// In microseconds.
1200    pub transaction_verify_elapsed: u64,
1201
1202    /// Wall clock time spent loading data sets (and entries) from the blockstore.  This does not
1203    /// include the case when the blockstore load failed.  In microseconds.
1204    pub fetch_elapsed: u64,
1205
1206    /// Same as `fetch_elapsed` above, but for the case when the blockstore load fails.  In
1207    /// microseconds.
1208    pub fetch_fail_elapsed: u64,
1209
1210    /// `batch_execute()` measurements.
1211    pub batch_execute: BatchExecutionTiming,
1212}
1213
1214impl Default for ConfirmationTiming {
1215    fn default() -> Self {
1216        Self {
1217            started: Instant::now(),
1218            confirmation_elapsed: 0,
1219            replay_elapsed: 0,
1220            poh_verify_elapsed: 0,
1221            transaction_verify_elapsed: 0,
1222            fetch_elapsed: 0,
1223            fetch_fail_elapsed: 0,
1224            batch_execute: BatchExecutionTiming::default(),
1225        }
1226    }
1227}
1228
1229/// Measures times related to transaction execution in a slot.
1230#[derive(Debug, Default)]
1231pub struct BatchExecutionTiming {
1232    /// Time used by transaction execution.  Accumulated across multiple threads that are running
1233    /// `execute_batch()`.
1234    pub totals: ExecuteTimings,
1235
1236    /// Wall clock time used by the transaction execution part of pipeline.
1237    /// [`ConfirmationTiming::replay_elapsed`] includes this time.  In microseconds.
1238    wall_clock_us: Saturating<u64>,
1239
1240    /// Time used to execute transactions, via `execute_batch()`, in the thread that consumed the
1241    /// most time (in terms of total_thread_us) among rayon threads. Note that the slowest thread
1242    /// is determined each time a given group of batches is newly processed. So, this is a coarse
1243    /// approximation of wall-time single-threaded linearized metrics, discarding all metrics other
1244    /// than the arbitrary set of batches mixed with various transactions, which replayed slowest
1245    /// as a whole for each rayon processing session.
1246    ///
1247    /// When unified scheduler is enabled, this field isn't maintained, because it's not batched at
1248    /// all.
1249    slowest_thread: ThreadExecuteTimings,
1250}
1251
1252impl BatchExecutionTiming {
1253    pub fn accumulate(
1254        &mut self,
1255        new_batch: ExecuteBatchesInternalMetrics,
1256        is_unified_scheduler_enabled: bool,
1257    ) {
1258        let Self {
1259            totals,
1260            wall_clock_us,
1261            slowest_thread,
1262        } = self;
1263
1264        // These metric fields aren't applicable for the unified scheduler
1265        if !is_unified_scheduler_enabled {
1266            *wall_clock_us += new_batch.execute_batches_us;
1267
1268            totals.saturating_add_in_place(TotalBatchesLen, new_batch.total_batches_len);
1269            totals.saturating_add_in_place(NumExecuteBatches, 1);
1270        }
1271
1272        for thread_times in new_batch.execution_timings_per_thread.values() {
1273            totals.accumulate(&thread_times.execute_timings);
1274        }
1275
1276        // This whole metric (replay-slot-end-to-end-stats) isn't applicable for the unified
1277        // scheduler.
1278        if !is_unified_scheduler_enabled {
1279            let slowest = new_batch
1280                .execution_timings_per_thread
1281                .values()
1282                .max_by_key(|thread_times| thread_times.total_thread_us);
1283
1284            if let Some(slowest) = slowest {
1285                slowest_thread.accumulate(slowest);
1286                slowest_thread
1287                    .execute_timings
1288                    .saturating_add_in_place(NumExecuteBatches, 1);
1289            };
1290        }
1291    }
1292}
1293
1294#[derive(Debug, Default)]
1295pub struct ThreadExecuteTimings {
1296    pub total_thread_us: Saturating<u64>,
1297    pub total_transactions_executed: Saturating<u64>,
1298    pub execute_timings: ExecuteTimings,
1299}
1300
1301impl ThreadExecuteTimings {
1302    pub fn report_stats(&self, slot: Slot) {
1303        lazy! {
1304            datapoint_info!(
1305                "replay-slot-end-to-end-stats",
1306                ("slot", slot as i64, i64),
1307                ("total_thread_us", self.total_thread_us.0 as i64, i64),
1308                ("total_transactions_executed", self.total_transactions_executed.0 as i64, i64),
1309                // Everything inside the `eager!` block will be eagerly expanded before
1310                // evaluation of the rest of the surrounding macro.
1311                // Pass false because this code-path is never touched by unified scheduler.
1312                eager!{report_execute_timings!(self.execute_timings, false)}
1313            );
1314        };
1315    }
1316
1317    pub fn accumulate(&mut self, other: &ThreadExecuteTimings) {
1318        self.execute_timings.accumulate(&other.execute_timings);
1319        self.total_thread_us += other.total_thread_us;
1320        self.total_transactions_executed += other.total_transactions_executed;
1321    }
1322}
1323
1324#[derive(Default)]
1325pub struct ReplaySlotStats(ConfirmationTiming);
1326impl std::ops::Deref for ReplaySlotStats {
1327    type Target = ConfirmationTiming;
1328    fn deref(&self) -> &Self::Target {
1329        &self.0
1330    }
1331}
1332impl std::ops::DerefMut for ReplaySlotStats {
1333    fn deref_mut(&mut self) -> &mut Self::Target {
1334        &mut self.0
1335    }
1336}
1337
1338impl ReplaySlotStats {
1339    pub fn report_stats(
1340        &self,
1341        slot: Slot,
1342        num_txs: usize,
1343        num_entries: usize,
1344        num_shreds: u64,
1345        bank_complete_time_us: u64,
1346        is_unified_scheduler_enabled: bool,
1347    ) {
1348        let confirmation_elapsed = if is_unified_scheduler_enabled {
1349            "confirmation_without_replay_us"
1350        } else {
1351            "confirmation_time_us"
1352        };
1353        let replay_elapsed = if is_unified_scheduler_enabled {
1354            "task_submission_us"
1355        } else {
1356            "replay_time"
1357        };
1358        let execute_batches_us = if is_unified_scheduler_enabled {
1359            None
1360        } else {
1361            Some(self.batch_execute.wall_clock_us.0 as i64)
1362        };
1363
1364        lazy! {
1365            datapoint_info!(
1366                "replay-slot-stats",
1367                ("slot", slot as i64, i64),
1368                ("fetch_entries_time", self.fetch_elapsed as i64, i64),
1369                (
1370                    "fetch_entries_fail_time",
1371                    self.fetch_fail_elapsed as i64,
1372                    i64
1373                ),
1374                (
1375                    "entry_poh_verification_time",
1376                    self.poh_verify_elapsed as i64,
1377                    i64
1378                ),
1379                (
1380                    "entry_transaction_verification_time",
1381                    self.transaction_verify_elapsed as i64,
1382                    i64
1383                ),
1384                (confirmation_elapsed, self.confirmation_elapsed as i64, i64),
1385                (replay_elapsed, self.replay_elapsed as i64, i64),
1386                ("execute_batches_us", execute_batches_us, Option<i64>),
1387                (
1388                    "replay_total_elapsed",
1389                    self.started.elapsed().as_micros() as i64,
1390                    i64
1391                ),
1392                ("bank_complete_time_us", bank_complete_time_us, i64),
1393                ("total_transactions", num_txs as i64, i64),
1394                ("total_entries", num_entries as i64, i64),
1395                ("total_shreds", num_shreds as i64, i64),
1396                // Everything inside the `eager!` block will be eagerly expanded before
1397                // evaluation of the rest of the surrounding macro.
1398                eager!{report_execute_timings!(self.batch_execute.totals, is_unified_scheduler_enabled)}
1399            );
1400        };
1401
1402        // Skip reporting replay-slot-end-to-end-stats entirely if unified scheduler is enabled,
1403        // because the whole metrics itself is only meaningful for rayon-based worker threads.
1404        //
1405        // See slowest_thread doc comment for details.
1406        if !is_unified_scheduler_enabled {
1407            self.batch_execute.slowest_thread.report_stats(slot);
1408        }
1409
1410        // per_program_timings datapoints are only reported at the trace level, and all preparations
1411        // required to generate them can only occur when trace level is enabled.
1412        if log::log_enabled!(log::Level::Trace) {
1413            let mut per_pubkey_timings: Vec<_> = self
1414                .batch_execute
1415                .totals
1416                .details
1417                .per_program_timings
1418                .iter()
1419                .collect();
1420            per_pubkey_timings.sort_by(|a, b| b.1.accumulated_us.cmp(&a.1.accumulated_us));
1421            let (total_us, total_units, total_count, total_errored_units, total_errored_count) =
1422                per_pubkey_timings.iter().fold(
1423                    (0, 0, 0, 0, 0),
1424                    |(sum_us, sum_units, sum_count, sum_errored_units, sum_errored_count), a| {
1425                        (
1426                            sum_us + a.1.accumulated_us.0,
1427                            sum_units + a.1.accumulated_units.0,
1428                            sum_count + a.1.count.0,
1429                            sum_errored_units + a.1.total_errored_units.0,
1430                            sum_errored_count + a.1.errored_txs_compute_consumed.len(),
1431                        )
1432                    },
1433                );
1434
1435            for (pubkey, time) in per_pubkey_timings.iter().take(5) {
1436                datapoint_trace!(
1437                    "per_program_timings",
1438                    ("slot", slot as i64, i64),
1439                    ("pubkey", pubkey.to_string(), String),
1440                    ("execute_us", time.accumulated_us.0, i64),
1441                    ("accumulated_units", time.accumulated_units.0, i64),
1442                    ("errored_units", time.total_errored_units.0, i64),
1443                    ("count", time.count.0, i64),
1444                    (
1445                        "errored_count",
1446                        time.errored_txs_compute_consumed.len(),
1447                        i64
1448                    ),
1449                );
1450            }
1451            datapoint_info!(
1452                "per_program_timings",
1453                ("slot", slot as i64, i64),
1454                ("pubkey", "all", String),
1455                ("execute_us", total_us, i64),
1456                ("accumulated_units", total_units, i64),
1457                ("count", total_count, i64),
1458                ("errored_units", total_errored_units, i64),
1459                ("errored_count", total_errored_count, i64)
1460            );
1461        }
1462    }
1463}
1464
1465#[derive(Default)]
1466pub struct ConfirmationProgress {
1467    pub last_entry: Hash,
1468    pub tick_hash_count: u64,
1469    pub num_shreds: u64,
1470    pub num_entries: usize,
1471    pub num_txs: usize,
1472}
1473
1474impl ConfirmationProgress {
1475    pub fn new(last_entry: Hash) -> Self {
1476        Self {
1477            last_entry,
1478            ..Self::default()
1479        }
1480    }
1481}
1482
1483#[allow(clippy::too_many_arguments)]
1484pub fn confirm_slot(
1485    blockstore: &Blockstore,
1486    bank: &BankWithScheduler,
1487    replay_tx_thread_pool: &ThreadPool,
1488    timing: &mut ConfirmationTiming,
1489    progress: &mut ConfirmationProgress,
1490    skip_verification: bool,
1491    transaction_status_sender: Option<&TransactionStatusSender>,
1492    entry_notification_sender: Option<&EntryNotifierSender>,
1493    replay_vote_sender: Option<&ReplayVoteSender>,
1494    recyclers: &VerifyRecyclers,
1495    allow_dead_slots: bool,
1496    log_messages_bytes_limit: Option<usize>,
1497    prioritization_fee_cache: &PrioritizationFeeCache,
1498) -> result::Result<(), BlockstoreProcessorError> {
1499    let slot = bank.slot();
1500
1501    let slot_entries_load_result = {
1502        let mut load_elapsed = Measure::start("load_elapsed");
1503        let load_result = blockstore
1504            .get_slot_entries_with_shred_info(slot, progress.num_shreds, allow_dead_slots)
1505            .map_err(BlockstoreProcessorError::FailedToLoadEntries);
1506        load_elapsed.stop();
1507        if load_result.is_err() {
1508            timing.fetch_fail_elapsed += load_elapsed.as_us();
1509        } else {
1510            timing.fetch_elapsed += load_elapsed.as_us();
1511        }
1512        load_result
1513    }?;
1514
1515    confirm_slot_entries(
1516        bank,
1517        replay_tx_thread_pool,
1518        slot_entries_load_result,
1519        timing,
1520        progress,
1521        skip_verification,
1522        transaction_status_sender,
1523        entry_notification_sender,
1524        replay_vote_sender,
1525        recyclers,
1526        log_messages_bytes_limit,
1527        prioritization_fee_cache,
1528    )
1529}
1530
1531#[allow(clippy::too_many_arguments)]
1532fn confirm_slot_entries(
1533    bank: &BankWithScheduler,
1534    replay_tx_thread_pool: &ThreadPool,
1535    slot_entries_load_result: (Vec<Entry>, u64, bool),
1536    timing: &mut ConfirmationTiming,
1537    progress: &mut ConfirmationProgress,
1538    skip_verification: bool,
1539    transaction_status_sender: Option<&TransactionStatusSender>,
1540    entry_notification_sender: Option<&EntryNotifierSender>,
1541    replay_vote_sender: Option<&ReplayVoteSender>,
1542    recyclers: &VerifyRecyclers,
1543    log_messages_bytes_limit: Option<usize>,
1544    prioritization_fee_cache: &PrioritizationFeeCache,
1545) -> result::Result<(), BlockstoreProcessorError> {
1546    let ConfirmationTiming {
1547        confirmation_elapsed,
1548        replay_elapsed,
1549        poh_verify_elapsed,
1550        transaction_verify_elapsed,
1551        batch_execute: batch_execute_timing,
1552        ..
1553    } = timing;
1554
1555    let confirmation_elapsed_timer = Measure::start("confirmation_elapsed");
1556    defer! {
1557        *confirmation_elapsed += confirmation_elapsed_timer.end_as_us();
1558    };
1559
1560    let slot = bank.slot();
1561    let (entries, num_shreds, slot_full) = slot_entries_load_result;
1562    let num_entries = entries.len();
1563    let mut entry_tx_starting_indexes = Vec::with_capacity(num_entries);
1564    let mut entry_tx_starting_index = progress.num_txs;
1565    let num_txs = entries
1566        .iter()
1567        .enumerate()
1568        .map(|(i, entry)| {
1569            if let Some(entry_notification_sender) = entry_notification_sender {
1570                let entry_index = progress.num_entries.saturating_add(i);
1571                if let Err(err) = entry_notification_sender.send(EntryNotification {
1572                    slot,
1573                    index: entry_index,
1574                    entry: entry.into(),
1575                    starting_transaction_index: entry_tx_starting_index,
1576                }) {
1577                    warn!(
1578                        "Slot {slot}, entry {entry_index} entry_notification_sender send failed: \
1579                         {err:?}"
1580                    );
1581                }
1582            }
1583            let num_txs = entry.transactions.len();
1584            let next_tx_starting_index = entry_tx_starting_index.saturating_add(num_txs);
1585            entry_tx_starting_indexes.push(entry_tx_starting_index);
1586            entry_tx_starting_index = next_tx_starting_index;
1587            num_txs
1588        })
1589        .sum::<usize>();
1590    trace!(
1591        "Fetched entries for slot {slot}, num_entries: {num_entries}, num_shreds: {num_shreds}, \
1592         num_txs: {num_txs}, slot_full: {slot_full}",
1593    );
1594
1595    if !skip_verification {
1596        let tick_hash_count = &mut progress.tick_hash_count;
1597        verify_ticks(bank, &entries, slot_full, tick_hash_count).map_err(|err| {
1598            warn!(
1599                "{:#?}, slot: {}, entry len: {}, tick_height: {}, last entry: {}, last_blockhash: \
1600                 {}, shred_index: {}, slot_full: {}",
1601                err,
1602                slot,
1603                num_entries,
1604                bank.tick_height(),
1605                progress.last_entry,
1606                bank.last_blockhash(),
1607                num_shreds,
1608                slot_full,
1609            );
1610            err
1611        })?;
1612    }
1613
1614    let last_entry_hash = entries.last().map(|e| e.hash);
1615    let verifier = if !skip_verification {
1616        datapoint_debug!("verify-batch-size", ("size", num_entries as i64, i64));
1617        let entry_state = entries.start_verify(
1618            &progress.last_entry,
1619            replay_tx_thread_pool,
1620            recyclers.clone(),
1621        );
1622        if entry_state.status() == EntryVerificationStatus::Failure {
1623            warn!("Ledger proof of history failed at slot: {slot}");
1624            return Err(BlockError::InvalidEntryHash.into());
1625        }
1626        Some(entry_state)
1627    } else {
1628        None
1629    };
1630
1631    let verify_transaction = {
1632        let bank = bank.clone_with_scheduler();
1633        move |versioned_tx: VersionedTransaction,
1634              verification_mode: TransactionVerificationMode|
1635              -> Result<RuntimeTransaction<SanitizedTransaction>> {
1636            bank.verify_transaction(versioned_tx, verification_mode)
1637        }
1638    };
1639
1640    let transaction_verification_start = Instant::now();
1641    let transaction_verification_result = entry::start_verify_transactions(
1642        entries,
1643        skip_verification,
1644        replay_tx_thread_pool,
1645        recyclers.clone(),
1646        Arc::new(verify_transaction),
1647    );
1648    let transaction_cpu_duration_us = transaction_verification_start.elapsed().as_micros() as u64;
1649
1650    let mut transaction_verification_result = match transaction_verification_result {
1651        Ok(transaction_verification_result) => transaction_verification_result,
1652        Err(err) => {
1653            warn!(
1654                "Ledger transaction signature verification failed at slot: {}",
1655                bank.slot()
1656            );
1657            return Err(err.into());
1658        }
1659    };
1660
1661    let entries = transaction_verification_result
1662        .entries()
1663        .expect("Transaction verification generates entries");
1664
1665    let mut replay_timer = Measure::start("replay_elapsed");
1666    let replay_entries: Vec<_> = entries
1667        .into_iter()
1668        .zip(entry_tx_starting_indexes)
1669        .map(|(entry, tx_starting_index)| ReplayEntry {
1670            entry,
1671            starting_index: tx_starting_index,
1672        })
1673        .collect();
1674    let process_result = process_entries(
1675        bank,
1676        replay_tx_thread_pool,
1677        replay_entries,
1678        transaction_status_sender,
1679        replay_vote_sender,
1680        batch_execute_timing,
1681        log_messages_bytes_limit,
1682        prioritization_fee_cache,
1683    )
1684    .map_err(BlockstoreProcessorError::from);
1685    replay_timer.stop();
1686    *replay_elapsed += replay_timer.as_us();
1687
1688    {
1689        // If running signature verification on the GPU, wait for that computation to finish, and
1690        // get the result of it. If we did the signature verification on the CPU, this just returns
1691        // the already-computed result produced in start_verify_transactions.  Either way, check the
1692        // result of the signature verification.
1693        let valid = transaction_verification_result.finish_verify();
1694
1695        // The GPU Entry verification (if any) is kicked off right when the CPU-side Entry
1696        // verification finishes, so these times should be disjoint
1697        *transaction_verify_elapsed +=
1698            transaction_cpu_duration_us + transaction_verification_result.gpu_verify_duration();
1699
1700        if !valid {
1701            warn!(
1702                "Ledger transaction signature verification failed at slot: {}",
1703                bank.slot()
1704            );
1705            return Err(TransactionError::SignatureFailure.into());
1706        }
1707    }
1708
1709    if let Some(mut verifier) = verifier {
1710        let verified = verifier.finish_verify(replay_tx_thread_pool);
1711        *poh_verify_elapsed += verifier.poh_duration_us();
1712        if !verified {
1713            warn!("Ledger proof of history failed at slot: {}", bank.slot());
1714            return Err(BlockError::InvalidEntryHash.into());
1715        }
1716    }
1717
1718    process_result?;
1719
1720    progress.num_shreds += num_shreds;
1721    progress.num_entries += num_entries;
1722    progress.num_txs += num_txs;
1723    if let Some(last_entry_hash) = last_entry_hash {
1724        progress.last_entry = last_entry_hash;
1725    }
1726
1727    Ok(())
1728}
1729
1730// Special handling required for processing the entries in slot 0
1731#[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))]
1732fn process_bank_0(
1733    bank0: &BankWithScheduler,
1734    blockstore: &Blockstore,
1735    replay_tx_thread_pool: &ThreadPool,
1736    opts: &ProcessOptions,
1737    transaction_status_sender: Option<&TransactionStatusSender>,
1738    recyclers: &VerifyRecyclers,
1739    entry_notification_sender: Option<&EntryNotifierSender>,
1740) -> result::Result<(), BlockstoreProcessorError> {
1741    assert_eq!(bank0.slot(), 0);
1742    let mut progress = ConfirmationProgress::new(bank0.last_blockhash());
1743    confirm_full_slot(
1744        blockstore,
1745        bank0,
1746        replay_tx_thread_pool,
1747        opts,
1748        recyclers,
1749        &mut progress,
1750        None,
1751        entry_notification_sender,
1752        None,
1753        &mut ExecuteTimings::default(),
1754    )
1755    .map_err(|_| BlockstoreProcessorError::FailedToReplayBank0)?;
1756    if let Some((result, _timings)) = bank0.wait_for_completed_scheduler() {
1757        result.unwrap();
1758    }
1759    bank0.freeze();
1760    if blockstore.is_primary_access() {
1761        blockstore.insert_bank_hash(bank0.slot(), bank0.hash(), false);
1762    }
1763
1764    if let Some(transaction_status_sender) = transaction_status_sender {
1765        transaction_status_sender.send_transaction_status_freeze_message(bank0);
1766    }
1767
1768    Ok(())
1769}
1770
1771// Given a bank, add its children to the pending slots queue if those children slots are
1772// complete
1773fn process_next_slots(
1774    bank: &Arc<Bank>,
1775    meta: &SlotMeta,
1776    blockstore: &Blockstore,
1777    leader_schedule_cache: &LeaderScheduleCache,
1778    pending_slots: &mut Vec<(SlotMeta, Bank, Hash)>,
1779    opts: &ProcessOptions,
1780) -> result::Result<(), BlockstoreProcessorError> {
1781    if meta.next_slots.is_empty() {
1782        return Ok(());
1783    }
1784
1785    // This is a fork point if there are multiple children, create a new child bank for each fork
1786    for next_slot in &meta.next_slots {
1787        if opts
1788            .halt_at_slot
1789            .is_some_and(|halt_at_slot| *next_slot > halt_at_slot)
1790        {
1791            continue;
1792        }
1793        if !opts.allow_dead_slots && blockstore.is_dead(*next_slot) {
1794            continue;
1795        }
1796
1797        let next_meta = blockstore
1798            .meta(*next_slot)
1799            .map_err(|err| {
1800                warn!("Failed to load meta for slot {next_slot}: {err:?}");
1801                BlockstoreProcessorError::FailedToLoadMeta
1802            })?
1803            .unwrap();
1804
1805        // Only process full slots in blockstore_processor, replay_stage
1806        // handles any partials
1807        if next_meta.is_full() {
1808            let next_bank = Bank::new_from_parent(
1809                bank.clone(),
1810                &leader_schedule_cache
1811                    .slot_leader_at(*next_slot, Some(bank))
1812                    .unwrap(),
1813                *next_slot,
1814            );
1815            set_alpenglow_ticks(&next_bank);
1816            trace!(
1817                "New bank for slot {}, parent slot is {}",
1818                next_slot,
1819                bank.slot(),
1820            );
1821            pending_slots.push((next_meta, next_bank, bank.last_blockhash()));
1822        }
1823    }
1824
1825    // Reverse sort by slot, so the next slot to be processed can be popped
1826    pending_slots.sort_by(|a, b| b.1.slot().cmp(&a.1.slot()));
1827    Ok(())
1828}
1829
1830/// Set alpenglow bank tick height.
1831///
1832/// For alpenglow banks this tick height is `max_tick_height` - 1, for a bank on the epoch boundary
1833/// of feature activation, we need ticks_per_slot for each slot between the parent and epoch boundary
1834/// and one extra tick for the alpenglow bank
1835pub fn set_alpenglow_ticks(bank: &Bank) {
1836    let Some(first_alpenglow_slot) = bank
1837        .feature_set
1838        .activated_slot(&agave_feature_set::alpenglow::id())
1839    else {
1840        return;
1841    };
1842
1843    let Some(alpenglow_ticks) = calculate_alpenglow_ticks(
1844        bank.slot(),
1845        first_alpenglow_slot,
1846        bank.parent_slot(),
1847        bank.ticks_per_slot(),
1848    ) else {
1849        return;
1850    };
1851
1852    info!(
1853        "Alpenglow: Setting tick height for slot {} to {}",
1854        bank.slot(),
1855        bank.max_tick_height() - alpenglow_ticks
1856    );
1857    bank.set_tick_height(bank.max_tick_height() - alpenglow_ticks);
1858}
1859
1860/// Calculates how many ticks are needed for a block at `slot` with parent `parent_slot`
1861///
1862/// If both `parent_slot` and `slot` are greater than or equal to `first_alpenglow_slot`, then
1863/// only 1 tick is needed. This tick has no hashing guarantees, it is simply used as a signal
1864/// for the end of the block.
1865///
1866/// If both `parent_slot` and `slot` are less than `first_alpenglow_slot`, we need the
1867/// appropriate amount of PoH ticks, indicated by a None return value.
1868///
1869/// If `parent_slot` is less than `first_alpenglow_slot` and `slot` is greater than or equal
1870/// to `first_alpenglow_slot` (A block that "straddles" the activation epoch boundary) then:
1871///
1872/// 1. All slots between `parent_slot` and `first_alpenglow_slot` need to have `ticks_per_slot` ticks
1873/// 2. One extra tick for the actual alpenglow slot
1874/// 3. There are no ticks for any skipped alpenglow slots
1875fn calculate_alpenglow_ticks(
1876    slot: Slot,
1877    first_alpenglow_slot: Slot,
1878    parent_slot: Slot,
1879    ticks_per_slot: u64,
1880) -> Option<u64> {
1881    // Slots before alpenglow shouldn't have alpenglow ticks
1882    if slot < first_alpenglow_slot {
1883        return None;
1884    }
1885
1886    let alpenglow_ticks = if parent_slot < first_alpenglow_slot && slot >= first_alpenglow_slot {
1887        (first_alpenglow_slot - parent_slot - 1) * ticks_per_slot + 1
1888    } else {
1889        1
1890    };
1891
1892    Some(alpenglow_ticks)
1893}
1894
1895/// Starting with the root slot corresponding to `start_slot_meta`, iteratively
1896/// find and process children slots from the blockstore.
1897///
1898/// Returns a tuple (a, b) where a is the number of slots processed and b is
1899/// the number of newly found cluster roots.
1900#[allow(clippy::too_many_arguments)]
1901fn load_frozen_forks(
1902    bank_forks: &RwLock<BankForks>,
1903    start_slot_meta: &SlotMeta,
1904    blockstore: &Blockstore,
1905    replay_tx_thread_pool: &ThreadPool,
1906    leader_schedule_cache: &LeaderScheduleCache,
1907    opts: &ProcessOptions,
1908    transaction_status_sender: Option<&TransactionStatusSender>,
1909    entry_notification_sender: Option<&EntryNotifierSender>,
1910    timing: &mut ExecuteTimings,
1911    snapshot_controller: Option<&SnapshotController>,
1912) -> result::Result<(u64, usize), BlockstoreProcessorError> {
1913    let blockstore_max_root = blockstore.max_root();
1914    let mut root = bank_forks.read().unwrap().root();
1915    let max_root = std::cmp::max(root, blockstore_max_root);
1916    info!(
1917        "load_frozen_forks() latest root from blockstore: {blockstore_max_root}, max_root: \
1918         {max_root}",
1919    );
1920
1921    // The total number of slots processed
1922    let mut total_slots_processed = 0;
1923    // The total number of newly identified root slots
1924    let mut total_rooted_slots = 0;
1925
1926    let mut pending_slots = vec![];
1927    process_next_slots(
1928        &bank_forks
1929            .read()
1930            .unwrap()
1931            .get(start_slot_meta.slot)
1932            .unwrap(),
1933        start_slot_meta,
1934        blockstore,
1935        leader_schedule_cache,
1936        &mut pending_slots,
1937        opts,
1938    )?;
1939
1940    if Some(bank_forks.read().unwrap().root()) != opts.halt_at_slot {
1941        let recyclers = VerifyRecyclers::default();
1942        let mut all_banks = HashMap::new();
1943
1944        const STATUS_REPORT_INTERVAL: Duration = Duration::from_secs(2);
1945        let mut last_status_report = Instant::now();
1946        let mut slots_processed = 0;
1947        let mut txs = 0;
1948        let mut set_root_us = 0;
1949        let mut root_retain_us = 0;
1950        let mut process_single_slot_us = 0;
1951        let mut voting_us = 0;
1952
1953        while !pending_slots.is_empty() {
1954            timing.details.per_program_timings.clear();
1955            let (meta, bank, last_entry_hash) = pending_slots.pop().unwrap();
1956            let slot = bank.slot();
1957            if last_status_report.elapsed() > STATUS_REPORT_INTERVAL {
1958                let secs = last_status_report.elapsed().as_secs() as f32;
1959                let slots_per_sec = slots_processed as f32 / secs;
1960                let txs_per_sec = txs as f32 / secs;
1961                info!(
1962                    "processing ledger: slot={slot}, root_slot={root} slots={slots_processed}, \
1963                     slots/s={slots_per_sec}, txs/s={txs_per_sec}"
1964                );
1965                debug!(
1966                    "processing ledger timing: set_root_us={set_root_us}, \
1967                     root_retain_us={root_retain_us}, \
1968                     process_single_slot_us:{process_single_slot_us}, voting_us: {voting_us}"
1969                );
1970
1971                last_status_report = Instant::now();
1972                slots_processed = 0;
1973                txs = 0;
1974                set_root_us = 0;
1975                root_retain_us = 0;
1976                process_single_slot_us = 0;
1977                voting_us = 0;
1978            }
1979
1980            let mut progress = ConfirmationProgress::new(last_entry_hash);
1981            let mut m = Measure::start("process_single_slot");
1982            let bank = bank_forks.write().unwrap().insert_from_ledger(bank);
1983            if let Err(error) = process_single_slot(
1984                blockstore,
1985                &bank,
1986                replay_tx_thread_pool,
1987                opts,
1988                &recyclers,
1989                &mut progress,
1990                transaction_status_sender,
1991                entry_notification_sender,
1992                None,
1993                timing,
1994            ) {
1995                assert!(bank_forks.write().unwrap().remove(bank.slot()).is_some());
1996                if opts.abort_on_invalid_block {
1997                    Err(error)?
1998                }
1999                continue;
2000            }
2001            txs += progress.num_txs;
2002
2003            // Block must be frozen by this point; otherwise,
2004            // process_single_slot() would have errored above.
2005            assert!(bank.is_frozen());
2006            all_banks.insert(bank.slot(), bank.clone_with_scheduler());
2007            m.stop();
2008            process_single_slot_us += m.as_us();
2009
2010            let mut m = Measure::start("voting");
2011            // If we've reached the last known root in blockstore, start looking
2012            // for newer cluster confirmed roots
2013            let new_root_bank = {
2014                if bank_forks.read().unwrap().root() >= max_root {
2015                    supermajority_root_from_vote_accounts(
2016                        bank.total_epoch_stake(),
2017                        &bank.vote_accounts(),
2018                    ).and_then(|supermajority_root| {
2019                        if supermajority_root > root {
2020                            // If there's a cluster confirmed root greater than our last
2021                            // replayed root, then because the cluster confirmed root should
2022                            // be descended from our last root, it must exist in `all_banks`
2023                            let cluster_root_bank = all_banks.get(&supermajority_root).unwrap();
2024
2025                            // cluster root must be a descendant of our root, otherwise something
2026                            // is drastically wrong
2027                            assert!(cluster_root_bank.ancestors.contains_key(&root));
2028                            info!(
2029                                "blockstore processor found new cluster confirmed root: {}, observed in bank: {}",
2030                                cluster_root_bank.slot(), bank.slot()
2031                            );
2032
2033                            // Ensure cluster-confirmed root and parents are set as root in blockstore
2034                            let mut rooted_slots = vec![];
2035                            let mut new_root_bank = cluster_root_bank.clone_without_scheduler();
2036                            loop {
2037                                if new_root_bank.slot() == root { break; } // Found the last root in the chain, yay!
2038                                assert!(new_root_bank.slot() > root);
2039
2040                                rooted_slots.push((new_root_bank.slot(), Some(new_root_bank.hash())));
2041                                // As noted, the cluster confirmed root should be descended from
2042                                // our last root; therefore parent should be set
2043                                new_root_bank = new_root_bank.parent().unwrap();
2044                            }
2045                            total_rooted_slots += rooted_slots.len();
2046                            if blockstore.is_primary_access() {
2047                                blockstore
2048                                    .mark_slots_as_if_rooted_normally_at_startup(rooted_slots, true)
2049                                    .expect("Blockstore::mark_slots_as_if_rooted_normally_at_startup() should succeed");
2050                            }
2051                            Some(cluster_root_bank)
2052                        } else {
2053                            None
2054                        }
2055                    })
2056                } else if blockstore.is_root(slot) {
2057                    Some(&bank)
2058                } else {
2059                    None
2060                }
2061            };
2062            m.stop();
2063            voting_us += m.as_us();
2064
2065            if let Some(new_root_bank) = new_root_bank {
2066                let mut m = Measure::start("set_root");
2067                root = new_root_bank.slot();
2068
2069                leader_schedule_cache.set_root(new_root_bank);
2070                new_root_bank.prune_program_cache(root, new_root_bank.epoch());
2071                let _ = bank_forks
2072                    .write()
2073                    .unwrap()
2074                    .set_root(root, snapshot_controller, None);
2075                m.stop();
2076                set_root_us += m.as_us();
2077
2078                // Filter out all non descendants of the new root
2079                let mut m = Measure::start("filter pending slots");
2080                pending_slots
2081                    .retain(|(_, pending_bank, _)| pending_bank.ancestors.contains_key(&root));
2082                all_banks.retain(|_, bank| bank.ancestors.contains_key(&root));
2083                m.stop();
2084                root_retain_us += m.as_us();
2085            }
2086
2087            slots_processed += 1;
2088            total_slots_processed += 1;
2089
2090            trace!(
2091                "Bank for {}slot {} is complete",
2092                if root == slot { "root " } else { "" },
2093                slot,
2094            );
2095
2096            let done_processing = opts
2097                .halt_at_slot
2098                .map(|halt_at_slot| slot >= halt_at_slot)
2099                .unwrap_or(false);
2100            if done_processing {
2101                if opts.run_final_accounts_hash_calc {
2102                    bank.run_final_hash_calc();
2103                }
2104                break;
2105            }
2106
2107            process_next_slots(
2108                &bank,
2109                &meta,
2110                blockstore,
2111                leader_schedule_cache,
2112                &mut pending_slots,
2113                opts,
2114            )?;
2115        }
2116    } else if opts.run_final_accounts_hash_calc {
2117        bank_forks.read().unwrap().root_bank().run_final_hash_calc();
2118    }
2119
2120    Ok((total_slots_processed, total_rooted_slots))
2121}
2122
2123// `roots` is sorted largest to smallest by root slot
2124fn supermajority_root(roots: &[(Slot, u64)], total_epoch_stake: u64) -> Option<Slot> {
2125    if roots.is_empty() {
2126        return None;
2127    }
2128
2129    // Find latest root
2130    let mut total = 0;
2131    let mut prev_root = roots[0].0;
2132    for (root, stake) in roots.iter() {
2133        assert!(*root <= prev_root);
2134        total += stake;
2135        if total as f64 / total_epoch_stake as f64 > VOTE_THRESHOLD_SIZE {
2136            return Some(*root);
2137        }
2138        prev_root = *root;
2139    }
2140
2141    None
2142}
2143
2144fn supermajority_root_from_vote_accounts(
2145    total_epoch_stake: u64,
2146    vote_accounts: &VoteAccountsHashMap,
2147) -> Option<Slot> {
2148    let mut roots_stakes: Vec<(Slot, u64)> = vote_accounts
2149        .values()
2150        .filter_map(|(stake, account)| {
2151            if *stake == 0 {
2152                return None;
2153            }
2154
2155            Some((account.vote_state_view().root_slot()?, *stake))
2156        })
2157        .collect();
2158
2159    // Sort from greatest to smallest slot
2160    roots_stakes.sort_unstable_by(|a, b| a.0.cmp(&b.0).reverse());
2161
2162    // Find latest root
2163    supermajority_root(&roots_stakes, total_epoch_stake)
2164}
2165
2166// Processes and replays the contents of a single slot, returns Error
2167// if failed to play the slot
2168#[allow(clippy::too_many_arguments)]
2169pub fn process_single_slot(
2170    blockstore: &Blockstore,
2171    bank: &BankWithScheduler,
2172    replay_tx_thread_pool: &ThreadPool,
2173    opts: &ProcessOptions,
2174    recyclers: &VerifyRecyclers,
2175    progress: &mut ConfirmationProgress,
2176    transaction_status_sender: Option<&TransactionStatusSender>,
2177    entry_notification_sender: Option<&EntryNotifierSender>,
2178    replay_vote_sender: Option<&ReplayVoteSender>,
2179    timing: &mut ExecuteTimings,
2180) -> result::Result<(), BlockstoreProcessorError> {
2181    let slot = bank.slot();
2182    // Mark corrupt slots as dead so validators don't replay this slot and
2183    // see AlreadyProcessed errors later in ReplayStage
2184    confirm_full_slot(
2185        blockstore,
2186        bank,
2187        replay_tx_thread_pool,
2188        opts,
2189        recyclers,
2190        progress,
2191        transaction_status_sender,
2192        entry_notification_sender,
2193        replay_vote_sender,
2194        timing,
2195    )
2196    .and_then(|()| {
2197        if let Some((result, completed_timings)) = bank.wait_for_completed_scheduler() {
2198            timing.accumulate(&completed_timings);
2199            result?
2200        }
2201        Ok(())
2202    })
2203    .map_err(|err| {
2204        warn!("slot {slot} failed to verify: {err}");
2205        if blockstore.is_primary_access() {
2206            blockstore
2207                .set_dead_slot(slot)
2208                .expect("Failed to mark slot as dead in blockstore");
2209        } else {
2210            info!(
2211                "Failed slot {slot} won't be marked dead due to being secondary blockstore access"
2212            );
2213        }
2214        err
2215    })?;
2216
2217    if let Some((result, _timings)) = bank.wait_for_completed_scheduler() {
2218        result?
2219    }
2220
2221    let block_id = blockstore
2222        .check_last_fec_set_and_get_block_id(slot, bank.hash(), &bank.feature_set)
2223        .inspect_err(|err| {
2224            warn!("slot {slot} failed last fec set checks: {err}");
2225            if blockstore.is_primary_access() {
2226                blockstore
2227                    .set_dead_slot(slot)
2228                    .expect("Failed to mark slot as dead in blockstore");
2229            } else {
2230                info!(
2231                    "Failed last fec set checks slot {slot} won't be marked dead due to being \
2232                     secondary blockstore access"
2233                );
2234            }
2235        })?;
2236    bank.set_block_id(block_id);
2237    bank.freeze(); // all banks handled by this routine are created from complete slots
2238
2239    if let Some(slot_callback) = &opts.slot_callback {
2240        slot_callback(bank);
2241    }
2242
2243    if blockstore.is_primary_access() {
2244        blockstore.insert_bank_hash(bank.slot(), bank.hash(), false);
2245    }
2246
2247    if let Some(transaction_status_sender) = transaction_status_sender {
2248        transaction_status_sender.send_transaction_status_freeze_message(bank);
2249    }
2250
2251    Ok(())
2252}
2253
2254type WorkSequence = u64;
2255
2256#[allow(clippy::large_enum_variant)]
2257#[derive(Debug)]
2258pub enum TransactionStatusMessage {
2259    Batch((TransactionStatusBatch, Option<WorkSequence>)),
2260    Freeze(Arc<Bank>),
2261}
2262
2263#[derive(Debug)]
2264pub struct TransactionStatusBatch {
2265    pub slot: Slot,
2266    pub transactions: Vec<SanitizedTransaction>,
2267    pub commit_results: Vec<TransactionCommitResult>,
2268    pub balances: TransactionBalancesSet,
2269    pub token_balances: TransactionTokenBalancesSet,
2270    pub costs: Vec<Option<u64>>,
2271    pub transaction_indexes: Vec<usize>,
2272}
2273
2274#[derive(Clone, Debug)]
2275pub struct TransactionStatusSender {
2276    pub sender: Sender<TransactionStatusMessage>,
2277    pub dependency_tracker: Option<Arc<DependencyTracker>>,
2278}
2279
2280impl TransactionStatusSender {
2281    pub fn send_transaction_status_batch(
2282        &self,
2283        slot: Slot,
2284        transactions: Vec<SanitizedTransaction>,
2285        commit_results: Vec<TransactionCommitResult>,
2286        balances: TransactionBalancesSet,
2287        token_balances: TransactionTokenBalancesSet,
2288        costs: Vec<Option<u64>>,
2289        transaction_indexes: Vec<usize>,
2290    ) {
2291        let work_sequence = self
2292            .dependency_tracker
2293            .as_ref()
2294            .map(|dependency_tracker| dependency_tracker.declare_work());
2295
2296        if let Err(e) = self.sender.send(TransactionStatusMessage::Batch((
2297            TransactionStatusBatch {
2298                slot,
2299                transactions,
2300                commit_results,
2301                balances,
2302                token_balances,
2303                costs,
2304                transaction_indexes,
2305            },
2306            work_sequence,
2307        ))) {
2308            trace!("Slot {slot} transaction_status send batch failed: {e:?}");
2309        }
2310    }
2311
2312    pub fn send_transaction_status_freeze_message(&self, bank: &Arc<Bank>) {
2313        if let Err(e) = self
2314            .sender
2315            .send(TransactionStatusMessage::Freeze(bank.clone()))
2316        {
2317            let slot = bank.slot();
2318            warn!("Slot {slot} transaction_status send freeze message failed: {e:?}");
2319        }
2320    }
2321}
2322
2323// used for tests only
2324pub fn fill_blockstore_slot_with_ticks(
2325    blockstore: &Blockstore,
2326    ticks_per_slot: u64,
2327    slot: u64,
2328    parent_slot: u64,
2329    last_entry_hash: Hash,
2330) -> Hash {
2331    // Only slot 0 can be equal to the parent_slot
2332    assert!(slot.saturating_sub(1) >= parent_slot);
2333    let num_slots = (slot - parent_slot).max(1);
2334    let entries = create_ticks(num_slots * ticks_per_slot, 0, last_entry_hash);
2335    let last_entry_hash = entries.last().unwrap().hash;
2336
2337    blockstore
2338        .write_entries(
2339            slot,
2340            0,
2341            0,
2342            ticks_per_slot,
2343            Some(parent_slot),
2344            true,
2345            &Arc::new(Keypair::new()),
2346            entries,
2347            0,
2348        )
2349        .unwrap();
2350
2351    last_entry_hash
2352}
2353
2354#[cfg(test)]
2355pub mod tests {
2356    use {
2357        super::*,
2358        crate::{
2359            blockstore_options::{AccessType, BlockstoreOptions},
2360            genesis_utils::{
2361                create_genesis_config, create_genesis_config_with_leader, GenesisConfigInfo,
2362            },
2363        },
2364        assert_matches::assert_matches,
2365        rand::{thread_rng, Rng},
2366        solana_account::{AccountSharedData, WritableAccount},
2367        solana_cost_model::transaction_cost::TransactionCost,
2368        solana_entry::entry::{create_ticks, next_entry, next_entry_mut},
2369        solana_epoch_schedule::EpochSchedule,
2370        solana_hash::Hash,
2371        solana_instruction::{error::InstructionError, Instruction},
2372        solana_keypair::Keypair,
2373        solana_native_token::LAMPORTS_PER_SOL,
2374        solana_program_runtime::declare_process_instruction,
2375        solana_pubkey::Pubkey,
2376        solana_runtime::{
2377            bank::bank_hash_details::SlotDetails,
2378            genesis_utils::{
2379                self, create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs,
2380            },
2381            installed_scheduler_pool::{
2382                MockInstalledScheduler, MockUninstalledScheduler, SchedulerAborted,
2383                SchedulingContext,
2384            },
2385        },
2386        solana_signer::Signer,
2387        solana_svm::transaction_processor::ExecutionRecordingConfig,
2388        solana_system_interface::error::SystemError,
2389        solana_system_transaction as system_transaction,
2390        solana_transaction::Transaction,
2391        solana_transaction_error::TransactionError,
2392        solana_vote::{vote_account::VoteAccount, vote_transaction},
2393        solana_vote_program::{
2394            self,
2395            vote_state::{TowerSync, VoteStateV4, VoteStateVersions, MAX_LOCKOUT_HISTORY},
2396        },
2397        std::{collections::BTreeSet, slice, sync::RwLock},
2398        test_case::{test_case, test_matrix},
2399        trees::tr,
2400    };
2401
2402    // Convenience wrapper to optionally process blockstore with Secondary access.
2403    //
2404    // Setting up the ledger for a test requires Primary access as items will need to be inserted.
2405    // However, once a Secondary access has been opened, it won't automatically see updates made by
2406    // the Primary access. So, open (and close) the Secondary access within this function to ensure
2407    // that "stale" Secondary accesses don't propagate.
2408    fn test_process_blockstore_with_custom_options(
2409        genesis_config: &GenesisConfig,
2410        blockstore: &Blockstore,
2411        opts: &ProcessOptions,
2412        access_type: AccessType,
2413    ) -> (Arc<RwLock<BankForks>>, LeaderScheduleCache) {
2414        match access_type {
2415            AccessType::Primary | AccessType::PrimaryForMaintenance => {
2416                // Attempting to open a second Primary access would fail, so
2417                // just pass the original session if it is a Primary variant
2418                test_process_blockstore(genesis_config, blockstore, opts, Arc::default())
2419            }
2420            AccessType::Secondary => {
2421                let secondary_blockstore = Blockstore::open_with_options(
2422                    blockstore.ledger_path(),
2423                    BlockstoreOptions {
2424                        access_type,
2425                        ..BlockstoreOptions::default()
2426                    },
2427                )
2428                .expect("Unable to open access to blockstore");
2429                test_process_blockstore(genesis_config, &secondary_blockstore, opts, Arc::default())
2430            }
2431        }
2432    }
2433
2434    fn process_entries_for_tests_without_scheduler(
2435        bank: &Arc<Bank>,
2436        entries: Vec<Entry>,
2437    ) -> Result<()> {
2438        process_entries_for_tests(
2439            &BankWithScheduler::new_without_scheduler(bank.clone()),
2440            entries,
2441            None,
2442            None,
2443        )
2444    }
2445
2446    #[test]
2447    fn test_process_blockstore_with_missing_hashes() {
2448        do_test_process_blockstore_with_missing_hashes(AccessType::Primary);
2449    }
2450
2451    #[test]
2452    fn test_process_blockstore_with_missing_hashes_secondary_access() {
2453        do_test_process_blockstore_with_missing_hashes(AccessType::Secondary);
2454    }
2455
2456    // Intentionally make slot 1 faulty and ensure that processing sees it as dead
2457    fn do_test_process_blockstore_with_missing_hashes(blockstore_access_type: AccessType) {
2458        agave_logger::setup();
2459
2460        let hashes_per_tick = 2;
2461        let GenesisConfigInfo {
2462            mut genesis_config, ..
2463        } = create_genesis_config(10_000);
2464        genesis_config.poh_config.hashes_per_tick = Some(hashes_per_tick);
2465        let ticks_per_slot = genesis_config.ticks_per_slot;
2466
2467        let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
2468        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
2469
2470        let parent_slot = 0;
2471        let slot = 1;
2472        let entries = create_ticks(ticks_per_slot, hashes_per_tick - 1, blockhash);
2473        assert_matches!(
2474            blockstore.write_entries(
2475                slot,
2476                0,
2477                0,
2478                ticks_per_slot,
2479                Some(parent_slot),
2480                true,
2481                &Arc::new(Keypair::new()),
2482                entries,
2483                0,
2484            ),
2485            Ok(_)
2486        );
2487
2488        let (bank_forks, ..) = test_process_blockstore_with_custom_options(
2489            &genesis_config,
2490            &blockstore,
2491            &ProcessOptions {
2492                run_verification: true,
2493                ..ProcessOptions::default()
2494            },
2495            blockstore_access_type.clone(),
2496        );
2497        assert_eq!(frozen_bank_slots(&bank_forks.read().unwrap()), vec![0]);
2498
2499        let dead_slots: Vec<Slot> = blockstore.dead_slots_iterator(0).unwrap().collect();
2500        match blockstore_access_type {
2501            // Secondary access is immutable so even though a dead slot
2502            // will be identified, it won't actually be marked dead.
2503            AccessType::Secondary => {
2504                assert_eq!(dead_slots.len(), 0);
2505            }
2506            AccessType::Primary | AccessType::PrimaryForMaintenance => {
2507                assert_eq!(&dead_slots, &[1]);
2508            }
2509        }
2510    }
2511
2512    #[test]
2513    fn test_process_blockstore_with_invalid_slot_tick_count() {
2514        agave_logger::setup();
2515
2516        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
2517        let ticks_per_slot = genesis_config.ticks_per_slot;
2518
2519        // Create a new ledger with slot 0 full of ticks
2520        let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
2521        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
2522
2523        // Write slot 1 with one tick missing
2524        let parent_slot = 0;
2525        let slot = 1;
2526        let entries = create_ticks(ticks_per_slot - 1, 0, blockhash);
2527        assert_matches!(
2528            blockstore.write_entries(
2529                slot,
2530                0,
2531                0,
2532                ticks_per_slot,
2533                Some(parent_slot),
2534                true,
2535                &Arc::new(Keypair::new()),
2536                entries,
2537                0,
2538            ),
2539            Ok(_)
2540        );
2541
2542        // Should return slot 0, the last slot on the fork that is valid
2543        let (bank_forks, ..) = test_process_blockstore(
2544            &genesis_config,
2545            &blockstore,
2546            &ProcessOptions {
2547                run_verification: true,
2548                ..ProcessOptions::default()
2549            },
2550            Arc::default(),
2551        );
2552        assert_eq!(frozen_bank_slots(&bank_forks.read().unwrap()), vec![0]);
2553
2554        // Write slot 2 fully
2555        let _last_slot2_entry_hash =
2556            fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 0, blockhash);
2557
2558        let (bank_forks, ..) = test_process_blockstore(
2559            &genesis_config,
2560            &blockstore,
2561            &ProcessOptions {
2562                run_verification: true,
2563                ..ProcessOptions::default()
2564            },
2565            Arc::default(),
2566        );
2567
2568        // One valid fork, one bad fork.  process_blockstore() should only return the valid fork
2569        assert_eq!(frozen_bank_slots(&bank_forks.read().unwrap()), vec![0, 2]);
2570        assert_eq!(bank_forks.read().unwrap().working_bank().slot(), 2);
2571        assert_eq!(bank_forks.read().unwrap().root(), 0);
2572    }
2573
2574    #[test]
2575    fn test_process_blockstore_with_slot_with_trailing_entry() {
2576        agave_logger::setup();
2577
2578        let GenesisConfigInfo {
2579            mint_keypair,
2580            genesis_config,
2581            ..
2582        } = create_genesis_config(10_000);
2583        let ticks_per_slot = genesis_config.ticks_per_slot;
2584
2585        let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
2586        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
2587
2588        let mut entries = create_ticks(ticks_per_slot, 0, blockhash);
2589        let trailing_entry = {
2590            let keypair = Keypair::new();
2591            let tx = system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 1, blockhash);
2592            next_entry(&blockhash, 1, vec![tx])
2593        };
2594        entries.push(trailing_entry);
2595
2596        // Tricks blockstore into writing the trailing entry by lying that there is one more tick
2597        // per slot.
2598        let parent_slot = 0;
2599        let slot = 1;
2600        assert_matches!(
2601            blockstore.write_entries(
2602                slot,
2603                0,
2604                0,
2605                ticks_per_slot + 1,
2606                Some(parent_slot),
2607                true,
2608                &Arc::new(Keypair::new()),
2609                entries,
2610                0,
2611            ),
2612            Ok(_)
2613        );
2614
2615        let opts = ProcessOptions {
2616            run_verification: true,
2617            ..ProcessOptions::default()
2618        };
2619        let (bank_forks, ..) =
2620            test_process_blockstore(&genesis_config, &blockstore, &opts, Arc::default());
2621        assert_eq!(frozen_bank_slots(&bank_forks.read().unwrap()), vec![0]);
2622    }
2623
2624    #[test]
2625    fn test_process_blockstore_with_incomplete_slot() {
2626        agave_logger::setup();
2627
2628        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
2629        let ticks_per_slot = genesis_config.ticks_per_slot;
2630
2631        /*
2632          Build a blockstore in the ledger with the following fork structure:
2633
2634               slot 0 (all ticks)
2635                 |
2636               slot 1 (all ticks but one)
2637                 |
2638               slot 2 (all ticks)
2639
2640           where slot 1 is incomplete (missing 1 tick at the end)
2641        */
2642
2643        // Create a new ledger with slot 0 full of ticks
2644        let (ledger_path, mut blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
2645        debug!("ledger_path: {ledger_path:?}");
2646
2647        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
2648
2649        // Write slot 1
2650        // slot 1, points at slot 0.  Missing one tick
2651        {
2652            let parent_slot = 0;
2653            let slot = 1;
2654            let mut entries = create_ticks(ticks_per_slot, 0, blockhash);
2655            blockhash = entries.last().unwrap().hash;
2656
2657            // throw away last one
2658            entries.pop();
2659
2660            assert_matches!(
2661                blockstore.write_entries(
2662                    slot,
2663                    0,
2664                    0,
2665                    ticks_per_slot,
2666                    Some(parent_slot),
2667                    false,
2668                    &Arc::new(Keypair::new()),
2669                    entries,
2670                    0,
2671                ),
2672                Ok(_)
2673            );
2674        }
2675
2676        // slot 2, points at slot 1
2677        fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 1, blockhash);
2678
2679        let opts = ProcessOptions {
2680            run_verification: true,
2681            ..ProcessOptions::default()
2682        };
2683        let (bank_forks, ..) =
2684            test_process_blockstore(&genesis_config, &blockstore, &opts, Arc::default());
2685
2686        assert_eq!(frozen_bank_slots(&bank_forks.read().unwrap()), vec![0]); // slot 1 isn't "full", we stop at slot zero
2687
2688        /* Add a complete slot such that the store looks like:
2689
2690                                 slot 0 (all ticks)
2691                               /                  \
2692               slot 1 (all ticks but one)        slot 3 (all ticks)
2693                      |
2694               slot 2 (all ticks)
2695        */
2696        let opts = ProcessOptions {
2697            run_verification: true,
2698            ..ProcessOptions::default()
2699        };
2700        fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 0, blockhash);
2701        // Slot 0 should not show up in the ending bank_forks_info
2702        let (bank_forks, ..) =
2703            test_process_blockstore(&genesis_config, &blockstore, &opts, Arc::default());
2704
2705        // slot 1 isn't "full", we stop at slot zero
2706        assert_eq!(frozen_bank_slots(&bank_forks.read().unwrap()), vec![0, 3]);
2707    }
2708
2709    #[test]
2710    fn test_process_blockstore_with_two_forks_and_squash() {
2711        agave_logger::setup();
2712
2713        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
2714        let ticks_per_slot = genesis_config.ticks_per_slot;
2715
2716        // Create a new ledger with slot 0 full of ticks
2717        let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
2718        debug!("ledger_path: {ledger_path:?}");
2719        let mut last_entry_hash = blockhash;
2720
2721        /*
2722            Build a blockstore in the ledger with the following fork structure:
2723
2724                 slot 0
2725                   |
2726                 slot 1
2727                 /   \
2728            slot 2   |
2729               /     |
2730            slot 3   |
2731                     |
2732                   slot 4 <-- set_root(true)
2733
2734        */
2735        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
2736
2737        // Fork 1, ending at slot 3
2738        let last_slot1_entry_hash =
2739            fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, last_entry_hash);
2740        last_entry_hash = fill_blockstore_slot_with_ticks(
2741            &blockstore,
2742            ticks_per_slot,
2743            2,
2744            1,
2745            last_slot1_entry_hash,
2746        );
2747        let last_fork1_entry_hash =
2748            fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 2, last_entry_hash);
2749
2750        // Fork 2, ending at slot 4
2751        let last_fork2_entry_hash = fill_blockstore_slot_with_ticks(
2752            &blockstore,
2753            ticks_per_slot,
2754            4,
2755            1,
2756            last_slot1_entry_hash,
2757        );
2758
2759        info!("last_fork1_entry.hash: {last_fork1_entry_hash:?}");
2760        info!("last_fork2_entry.hash: {last_fork2_entry_hash:?}");
2761
2762        blockstore.set_roots([0, 1, 4].iter()).unwrap();
2763
2764        let opts = ProcessOptions {
2765            run_verification: true,
2766            ..ProcessOptions::default()
2767        };
2768        let (bank_forks, ..) =
2769            test_process_blockstore(&genesis_config, &blockstore, &opts, Arc::default());
2770        let bank_forks = bank_forks.read().unwrap();
2771
2772        // One fork, other one is ignored b/c not a descendant of the root
2773        assert_eq!(frozen_bank_slots(&bank_forks), vec![4]);
2774
2775        assert!(&bank_forks[4]
2776            .parents()
2777            .iter()
2778            .map(|bank| bank.slot())
2779            .next()
2780            .is_none());
2781
2782        // Ensure bank_forks holds the right banks
2783        verify_fork_infos(&bank_forks);
2784
2785        assert_eq!(bank_forks.root(), 4);
2786    }
2787
2788    #[test]
2789    fn test_process_blockstore_with_two_forks() {
2790        agave_logger::setup();
2791
2792        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
2793        let ticks_per_slot = genesis_config.ticks_per_slot;
2794
2795        // Create a new ledger with slot 0 full of ticks
2796        let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
2797        debug!("ledger_path: {ledger_path:?}");
2798        let mut last_entry_hash = blockhash;
2799
2800        /*
2801            Build a blockstore in the ledger with the following fork structure:
2802
2803                 slot 0
2804                   |
2805                 slot 1  <-- set_root(true)
2806                 /   \
2807            slot 2   |
2808               /     |
2809            slot 3   |
2810                     |
2811                   slot 4
2812
2813        */
2814        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
2815
2816        // Fork 1, ending at slot 3
2817        let last_slot1_entry_hash =
2818            fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, last_entry_hash);
2819        last_entry_hash = fill_blockstore_slot_with_ticks(
2820            &blockstore,
2821            ticks_per_slot,
2822            2,
2823            1,
2824            last_slot1_entry_hash,
2825        );
2826        let last_fork1_entry_hash =
2827            fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 2, last_entry_hash);
2828
2829        // Fork 2, ending at slot 4
2830        let last_fork2_entry_hash = fill_blockstore_slot_with_ticks(
2831            &blockstore,
2832            ticks_per_slot,
2833            4,
2834            1,
2835            last_slot1_entry_hash,
2836        );
2837
2838        info!("last_fork1_entry.hash: {last_fork1_entry_hash:?}");
2839        info!("last_fork2_entry.hash: {last_fork2_entry_hash:?}");
2840
2841        blockstore.set_roots([0, 1].iter()).unwrap();
2842
2843        let opts = ProcessOptions {
2844            run_verification: true,
2845            ..ProcessOptions::default()
2846        };
2847        let (bank_forks, ..) =
2848            test_process_blockstore(&genesis_config, &blockstore, &opts, Arc::default());
2849        let bank_forks = bank_forks.read().unwrap();
2850
2851        assert_eq!(frozen_bank_slots(&bank_forks), vec![1, 2, 3, 4]);
2852        assert_eq!(bank_forks.working_bank().slot(), 4);
2853        assert_eq!(bank_forks.root(), 1);
2854
2855        assert_eq!(
2856            &bank_forks[3]
2857                .parents()
2858                .iter()
2859                .map(|bank| bank.slot())
2860                .collect::<Vec<_>>(),
2861            &[2, 1]
2862        );
2863        assert_eq!(
2864            &bank_forks[4]
2865                .parents()
2866                .iter()
2867                .map(|bank| bank.slot())
2868                .collect::<Vec<_>>(),
2869            &[1]
2870        );
2871
2872        assert_eq!(bank_forks.root(), 1);
2873
2874        // Ensure bank_forks holds the right banks
2875        verify_fork_infos(&bank_forks);
2876    }
2877
2878    #[test]
2879    fn test_process_blockstore_with_dead_slot() {
2880        agave_logger::setup();
2881
2882        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
2883        let ticks_per_slot = genesis_config.ticks_per_slot;
2884        let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
2885        debug!("ledger_path: {ledger_path:?}");
2886
2887        /*
2888                   slot 0
2889                     |
2890                   slot 1
2891                  /     \
2892                 /       \
2893           slot 2 (dead)  \
2894                           \
2895                        slot 3
2896        */
2897        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
2898        let slot1_blockhash =
2899            fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, blockhash);
2900        fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 1, slot1_blockhash);
2901        blockstore.set_dead_slot(2).unwrap();
2902        fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 1, slot1_blockhash);
2903
2904        let (bank_forks, ..) = test_process_blockstore(
2905            &genesis_config,
2906            &blockstore,
2907            &ProcessOptions::default(),
2908            Arc::default(),
2909        );
2910        let bank_forks = bank_forks.read().unwrap();
2911
2912        assert_eq!(frozen_bank_slots(&bank_forks), vec![0, 1, 3]);
2913        assert_eq!(bank_forks.working_bank().slot(), 3);
2914        assert_eq!(
2915            &bank_forks[3]
2916                .parents()
2917                .iter()
2918                .map(|bank| bank.slot())
2919                .collect::<Vec<_>>(),
2920            &[1, 0]
2921        );
2922        verify_fork_infos(&bank_forks);
2923    }
2924
2925    #[test]
2926    fn test_process_blockstore_with_dead_child() {
2927        agave_logger::setup();
2928
2929        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
2930        let ticks_per_slot = genesis_config.ticks_per_slot;
2931        let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
2932        debug!("ledger_path: {ledger_path:?}");
2933
2934        /*
2935                   slot 0
2936                     |
2937                   slot 1
2938                  /     \
2939                 /       \
2940              slot 2      \
2941               /           \
2942           slot 4 (dead)   slot 3
2943        */
2944        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
2945        let slot1_blockhash =
2946            fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, blockhash);
2947        let slot2_blockhash =
2948            fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 1, slot1_blockhash);
2949        fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 4, 2, slot2_blockhash);
2950        blockstore.set_dead_slot(4).unwrap();
2951        fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 1, slot1_blockhash);
2952
2953        let (bank_forks, ..) = test_process_blockstore(
2954            &genesis_config,
2955            &blockstore,
2956            &ProcessOptions::default(),
2957            Arc::default(),
2958        );
2959        let bank_forks = bank_forks.read().unwrap();
2960
2961        // Should see the parent of the dead child
2962        assert_eq!(frozen_bank_slots(&bank_forks), vec![0, 1, 2, 3]);
2963        assert_eq!(bank_forks.working_bank().slot(), 3);
2964
2965        assert_eq!(
2966            &bank_forks[3]
2967                .parents()
2968                .iter()
2969                .map(|bank| bank.slot())
2970                .collect::<Vec<_>>(),
2971            &[1, 0]
2972        );
2973        assert_eq!(
2974            &bank_forks[2]
2975                .parents()
2976                .iter()
2977                .map(|bank| bank.slot())
2978                .collect::<Vec<_>>(),
2979            &[1, 0]
2980        );
2981        assert_eq!(bank_forks.working_bank().slot(), 3);
2982        verify_fork_infos(&bank_forks);
2983    }
2984
2985    #[test]
2986    fn test_root_with_all_dead_children() {
2987        agave_logger::setup();
2988
2989        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
2990        let ticks_per_slot = genesis_config.ticks_per_slot;
2991        let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
2992        debug!("ledger_path: {ledger_path:?}");
2993
2994        /*
2995                   slot 0
2996                 /        \
2997                /          \
2998           slot 1 (dead)  slot 2 (dead)
2999        */
3000        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
3001        fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, blockhash);
3002        fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 0, blockhash);
3003        blockstore.set_dead_slot(1).unwrap();
3004        blockstore.set_dead_slot(2).unwrap();
3005        let (bank_forks, ..) = test_process_blockstore(
3006            &genesis_config,
3007            &blockstore,
3008            &ProcessOptions::default(),
3009            Arc::default(),
3010        );
3011        let bank_forks = bank_forks.read().unwrap();
3012
3013        // Should see only the parent of the dead children
3014        assert_eq!(frozen_bank_slots(&bank_forks), vec![0]);
3015        verify_fork_infos(&bank_forks);
3016    }
3017
3018    #[test]
3019    fn test_process_blockstore_epoch_boundary_root() {
3020        agave_logger::setup();
3021
3022        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
3023        let ticks_per_slot = genesis_config.ticks_per_slot;
3024
3025        // Create a new ledger with slot 0 full of ticks
3026        let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
3027        let mut last_entry_hash = blockhash;
3028
3029        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
3030
3031        // Let `last_slot` be the number of slots in the first two epochs
3032        let epoch_schedule = get_epoch_schedule(&genesis_config);
3033        let last_slot = epoch_schedule.get_last_slot_in_epoch(1);
3034
3035        // Create a single chain of slots with all indexes in the range [0, v + 1]
3036        for i in 1..=last_slot + 1 {
3037            last_entry_hash = fill_blockstore_slot_with_ticks(
3038                &blockstore,
3039                ticks_per_slot,
3040                i,
3041                i - 1,
3042                last_entry_hash,
3043            );
3044        }
3045
3046        // Set a root on the last slot of the last confirmed epoch
3047        let rooted_slots: Vec<Slot> = (0..=last_slot).collect();
3048        blockstore.set_roots(rooted_slots.iter()).unwrap();
3049
3050        // Set a root on the next slot of the confirmed epoch
3051        blockstore
3052            .set_roots(std::iter::once(&(last_slot + 1)))
3053            .unwrap();
3054
3055        // Check that we can properly restart the ledger / leader scheduler doesn't fail
3056        let opts = ProcessOptions {
3057            run_verification: true,
3058            ..ProcessOptions::default()
3059        };
3060        let (bank_forks, ..) =
3061            test_process_blockstore(&genesis_config, &blockstore, &opts, Arc::default());
3062        let bank_forks = bank_forks.read().unwrap();
3063
3064        // There is one fork, head is last_slot + 1
3065        assert_eq!(frozen_bank_slots(&bank_forks), vec![last_slot + 1]);
3066
3067        // The latest root should have purged all its parents
3068        assert!(&bank_forks[last_slot + 1]
3069            .parents()
3070            .iter()
3071            .map(|bank| bank.slot())
3072            .next()
3073            .is_none());
3074    }
3075
3076    #[test]
3077    fn test_first_err() {
3078        assert_eq!(first_err(&[Ok(())]), Ok(()));
3079        assert_eq!(
3080            first_err(&[Ok(()), Err(TransactionError::AlreadyProcessed)]),
3081            Err(TransactionError::AlreadyProcessed)
3082        );
3083        assert_eq!(
3084            first_err(&[
3085                Ok(()),
3086                Err(TransactionError::AlreadyProcessed),
3087                Err(TransactionError::AccountInUse)
3088            ]),
3089            Err(TransactionError::AlreadyProcessed)
3090        );
3091        assert_eq!(
3092            first_err(&[
3093                Ok(()),
3094                Err(TransactionError::AccountInUse),
3095                Err(TransactionError::AlreadyProcessed)
3096            ]),
3097            Err(TransactionError::AccountInUse)
3098        );
3099        assert_eq!(
3100            first_err(&[
3101                Err(TransactionError::AccountInUse),
3102                Ok(()),
3103                Err(TransactionError::AlreadyProcessed)
3104            ]),
3105            Err(TransactionError::AccountInUse)
3106        );
3107    }
3108
3109    #[test]
3110    fn test_process_empty_entry_is_registered() {
3111        agave_logger::setup();
3112
3113        let GenesisConfigInfo {
3114            genesis_config,
3115            mint_keypair,
3116            ..
3117        } = create_genesis_config(2);
3118        let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
3119        let keypair = Keypair::new();
3120        let slot_entries = create_ticks(genesis_config.ticks_per_slot, 1, genesis_config.hash());
3121        let tx = system_transaction::transfer(
3122            &mint_keypair,
3123            &keypair.pubkey(),
3124            1,
3125            slot_entries.last().unwrap().hash,
3126        );
3127
3128        // First, ensure the TX is rejected because of the unregistered last ID
3129        assert_eq!(
3130            bank.process_transaction(&tx),
3131            Err(TransactionError::BlockhashNotFound)
3132        );
3133
3134        // Now ensure the TX is accepted despite pointing to the ID of an empty entry.
3135        process_entries_for_tests_without_scheduler(&bank, slot_entries).unwrap();
3136        assert_eq!(bank.process_transaction(&tx), Ok(()));
3137    }
3138
3139    #[test]
3140    fn test_process_ledger_simple() {
3141        agave_logger::setup();
3142        let leader_pubkey = solana_pubkey::new_rand();
3143        let mint = 100;
3144        let hashes_per_tick = 10;
3145        let GenesisConfigInfo {
3146            mut genesis_config,
3147            mint_keypair,
3148            ..
3149        } = create_genesis_config_with_leader(mint, &leader_pubkey, 50);
3150        genesis_config.poh_config.hashes_per_tick = Some(hashes_per_tick);
3151        let (ledger_path, mut last_entry_hash) =
3152            create_new_tmp_ledger_auto_delete!(&genesis_config);
3153        debug!("ledger_path: {ledger_path:?}");
3154
3155        let deducted_from_mint = 3;
3156        let mut entries = vec![];
3157        let blockhash = genesis_config.hash();
3158        for _ in 0..deducted_from_mint {
3159            // Transfer one token from the mint to a random account
3160            let keypair = Keypair::new();
3161            let tx = system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 1, blockhash);
3162            let entry = next_entry_mut(&mut last_entry_hash, 1, vec![tx]);
3163            entries.push(entry);
3164
3165            // Add a second Transaction that will produce a
3166            // InstructionError<0, ResultWithNegativeLamports> error when processed
3167            let keypair2 = Keypair::new();
3168            let tx =
3169                system_transaction::transfer(&mint_keypair, &keypair2.pubkey(), 101, blockhash);
3170            let entry = next_entry_mut(&mut last_entry_hash, 1, vec![tx]);
3171            entries.push(entry);
3172        }
3173
3174        let remaining_hashes = hashes_per_tick - entries.len() as u64;
3175        let tick_entry = next_entry_mut(&mut last_entry_hash, remaining_hashes, vec![]);
3176        entries.push(tick_entry);
3177
3178        // Fill up the rest of slot 1 with ticks
3179        entries.extend(create_ticks(
3180            genesis_config.ticks_per_slot - 1,
3181            genesis_config.poh_config.hashes_per_tick.unwrap(),
3182            last_entry_hash,
3183        ));
3184        let last_blockhash = entries.last().unwrap().hash;
3185
3186        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
3187        blockstore
3188            .write_entries(
3189                1,
3190                0,
3191                0,
3192                genesis_config.ticks_per_slot,
3193                None,
3194                true,
3195                &Arc::new(Keypair::new()),
3196                entries,
3197                0,
3198            )
3199            .unwrap();
3200        let opts = ProcessOptions {
3201            run_verification: true,
3202            ..ProcessOptions::default()
3203        };
3204        let (bank_forks, ..) =
3205            test_process_blockstore(&genesis_config, &blockstore, &opts, Arc::default());
3206        let bank_forks = bank_forks.read().unwrap();
3207
3208        assert_eq!(frozen_bank_slots(&bank_forks), vec![0, 1]);
3209        assert_eq!(bank_forks.root(), 0);
3210        assert_eq!(bank_forks.working_bank().slot(), 1);
3211
3212        let bank = bank_forks[1].clone();
3213        assert_eq!(
3214            bank.get_balance(&mint_keypair.pubkey()),
3215            mint - deducted_from_mint
3216        );
3217        assert_eq!(bank.tick_height(), 2 * genesis_config.ticks_per_slot);
3218        assert_eq!(bank.last_blockhash(), last_blockhash);
3219    }
3220
3221    #[test]
3222    fn test_process_ledger_with_one_tick_per_slot() {
3223        let GenesisConfigInfo {
3224            mut genesis_config, ..
3225        } = create_genesis_config(123);
3226        genesis_config.ticks_per_slot = 1;
3227        let (ledger_path, _blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
3228
3229        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
3230        let opts = ProcessOptions {
3231            run_verification: true,
3232            ..ProcessOptions::default()
3233        };
3234        let (bank_forks, ..) =
3235            test_process_blockstore(&genesis_config, &blockstore, &opts, Arc::default());
3236        let bank_forks = bank_forks.read().unwrap();
3237
3238        assert_eq!(frozen_bank_slots(&bank_forks), vec![0]);
3239        let bank = bank_forks[0].clone();
3240        assert_eq!(bank.tick_height(), 1);
3241    }
3242
3243    #[test]
3244    fn test_process_ledger_options_full_leader_cache() {
3245        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(123);
3246        let (ledger_path, _blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
3247
3248        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
3249        let opts = ProcessOptions {
3250            full_leader_cache: true,
3251            ..ProcessOptions::default()
3252        };
3253        let (_bank_forks, leader_schedule) =
3254            test_process_blockstore(&genesis_config, &blockstore, &opts, Arc::default());
3255        assert_eq!(leader_schedule.max_schedules(), usize::MAX);
3256    }
3257
3258    #[test]
3259    fn test_process_entries_tick() {
3260        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(1000);
3261        let bank = Arc::new(Bank::new_for_tests(&genesis_config));
3262
3263        // ensure bank can process a tick
3264        assert_eq!(bank.tick_height(), 0);
3265        let tick = next_entry(&genesis_config.hash(), 1, vec![]);
3266        assert_eq!(
3267            process_entries_for_tests_without_scheduler(&bank, vec![tick]),
3268            Ok(())
3269        );
3270        assert_eq!(bank.tick_height(), 1);
3271    }
3272
3273    #[test]
3274    fn test_process_entries_2_entries_collision() {
3275        let GenesisConfigInfo {
3276            genesis_config,
3277            mint_keypair,
3278            ..
3279        } = create_genesis_config(1000);
3280        let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
3281        let keypair1 = Keypair::new();
3282        let keypair2 = Keypair::new();
3283
3284        let blockhash = bank.last_blockhash();
3285
3286        // ensure bank can process 2 entries that have a common account and no tick is registered
3287        let tx = system_transaction::transfer(
3288            &mint_keypair,
3289            &keypair1.pubkey(),
3290            2,
3291            bank.last_blockhash(),
3292        );
3293        let entry_1 = next_entry(&blockhash, 1, vec![tx]);
3294        let tx = system_transaction::transfer(
3295            &mint_keypair,
3296            &keypair2.pubkey(),
3297            2,
3298            bank.last_blockhash(),
3299        );
3300        let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]);
3301        assert_eq!(
3302            process_entries_for_tests_without_scheduler(&bank, vec![entry_1, entry_2]),
3303            Ok(())
3304        );
3305        assert_eq!(bank.get_balance(&keypair1.pubkey()), 2);
3306        assert_eq!(bank.get_balance(&keypair2.pubkey()), 2);
3307        assert_eq!(bank.last_blockhash(), blockhash);
3308    }
3309
3310    #[test]
3311    fn test_process_entries_2_txes_collision() {
3312        let GenesisConfigInfo {
3313            genesis_config,
3314            mint_keypair,
3315            ..
3316        } = create_genesis_config(1000);
3317        let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
3318        let keypair1 = Keypair::new();
3319        let keypair2 = Keypair::new();
3320        let keypair3 = Keypair::new();
3321
3322        // fund: put 4 in each of 1 and 2
3323        assert_matches!(bank.transfer(4, &mint_keypair, &keypair1.pubkey()), Ok(_));
3324        assert_matches!(bank.transfer(4, &mint_keypair, &keypair2.pubkey()), Ok(_));
3325
3326        // construct an Entry whose 2nd transaction would cause a lock conflict with previous entry
3327        let entry_1_to_mint = next_entry(
3328            &bank.last_blockhash(),
3329            1,
3330            vec![system_transaction::transfer(
3331                &keypair1,
3332                &mint_keypair.pubkey(),
3333                1,
3334                bank.last_blockhash(),
3335            )],
3336        );
3337
3338        let entry_2_to_3_mint_to_1 = next_entry(
3339            &entry_1_to_mint.hash,
3340            1,
3341            vec![
3342                system_transaction::transfer(
3343                    &keypair2,
3344                    &keypair3.pubkey(),
3345                    2,
3346                    bank.last_blockhash(),
3347                ), // should be fine
3348                system_transaction::transfer(
3349                    &keypair1,
3350                    &mint_keypair.pubkey(),
3351                    2,
3352                    bank.last_blockhash(),
3353                ), // will collide
3354            ],
3355        );
3356
3357        assert_eq!(
3358            process_entries_for_tests_without_scheduler(
3359                &bank,
3360                vec![entry_1_to_mint, entry_2_to_3_mint_to_1],
3361            ),
3362            Ok(())
3363        );
3364
3365        assert_eq!(bank.get_balance(&keypair1.pubkey()), 1);
3366        assert_eq!(bank.get_balance(&keypair2.pubkey()), 2);
3367        assert_eq!(bank.get_balance(&keypair3.pubkey()), 2);
3368    }
3369
3370    #[test]
3371    fn test_process_entries_2_txes_collision_and_error() {
3372        let GenesisConfigInfo {
3373            genesis_config,
3374            mint_keypair,
3375            ..
3376        } = create_genesis_config(1000);
3377        let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
3378        let keypair1 = Keypair::new();
3379        let keypair2 = Keypair::new();
3380        let keypair3 = Keypair::new();
3381        let keypair4 = Keypair::new();
3382
3383        // fund: put 4 in each of 1 and 2
3384        assert_matches!(bank.transfer(4, &mint_keypair, &keypair1.pubkey()), Ok(_));
3385        assert_matches!(bank.transfer(4, &mint_keypair, &keypair2.pubkey()), Ok(_));
3386        assert_matches!(bank.transfer(4, &mint_keypair, &keypair4.pubkey()), Ok(_));
3387
3388        let good_tx = system_transaction::transfer(
3389            &keypair1,
3390            &mint_keypair.pubkey(),
3391            1,
3392            bank.last_blockhash(),
3393        );
3394
3395        // construct an Entry whose 2nd transaction would cause a lock conflict with previous entry
3396        let entry_1_to_mint = next_entry(
3397            &bank.last_blockhash(),
3398            1,
3399            vec![
3400                good_tx.clone(),
3401                system_transaction::transfer(
3402                    &keypair4,
3403                    &keypair4.pubkey(),
3404                    1,
3405                    Hash::default(), // Should cause a transaction failure with BlockhashNotFound
3406                ),
3407            ],
3408        );
3409
3410        let entry_2_to_3_mint_to_1 = next_entry(
3411            &entry_1_to_mint.hash,
3412            1,
3413            vec![
3414                system_transaction::transfer(
3415                    &keypair2,
3416                    &keypair3.pubkey(),
3417                    2,
3418                    bank.last_blockhash(),
3419                ), // should be fine
3420                system_transaction::transfer(
3421                    &keypair1,
3422                    &mint_keypair.pubkey(),
3423                    2,
3424                    bank.last_blockhash(),
3425                ), // will collide
3426            ],
3427        );
3428
3429        assert_matches!(
3430            process_entries_for_tests_without_scheduler(
3431                &bank,
3432                vec![entry_1_to_mint.clone(), entry_2_to_3_mint_to_1.clone()],
3433            ),
3434            Err(TransactionError::BlockhashNotFound)
3435        );
3436
3437        // First transaction in first entry was rolled-back, so keypair1 didn't lost 1 lamport
3438        assert_eq!(bank.get_balance(&keypair1.pubkey()), 4);
3439        assert_eq!(bank.get_balance(&keypair2.pubkey()), 4);
3440
3441        // Check all accounts are unlocked
3442        let txs1 = entry_1_to_mint.transactions;
3443        let txs2 = entry_2_to_3_mint_to_1.transactions;
3444        let batch1 = bank.prepare_entry_batch(txs1).unwrap();
3445        for result in batch1.lock_results() {
3446            assert!(result.is_ok());
3447        }
3448        // txs1 and txs2 have accounts that conflict, so we must drop txs1 first
3449        drop(batch1);
3450        let batch2 = bank.prepare_entry_batch(txs2).unwrap();
3451        for result in batch2.lock_results() {
3452            assert!(result.is_ok());
3453        }
3454        drop(batch2);
3455
3456        // ensure good_tx will succeed and was just rolled back above due to other failing tx
3457        let entry_3 = next_entry(&entry_2_to_3_mint_to_1.hash, 1, vec![good_tx]);
3458        assert_matches!(
3459            process_entries_for_tests_without_scheduler(&bank, vec![entry_3]),
3460            Ok(())
3461        );
3462        // First transaction in third entry succeeded, so keypair1 lost 1 lamport
3463        assert_eq!(bank.get_balance(&keypair1.pubkey()), 3);
3464    }
3465
3466    #[test]
3467    fn test_transaction_result_does_not_affect_bankhash() {
3468        agave_logger::setup();
3469        let GenesisConfigInfo {
3470            genesis_config,
3471            mint_keypair,
3472            ..
3473        } = create_genesis_config(1000);
3474
3475        fn get_instruction_errors() -> Vec<InstructionError> {
3476            vec![
3477                InstructionError::GenericError,
3478                InstructionError::InvalidArgument,
3479                InstructionError::InvalidInstructionData,
3480                InstructionError::InvalidAccountData,
3481                InstructionError::AccountDataTooSmall,
3482                InstructionError::InsufficientFunds,
3483                InstructionError::IncorrectProgramId,
3484                InstructionError::MissingRequiredSignature,
3485                InstructionError::AccountAlreadyInitialized,
3486                InstructionError::UninitializedAccount,
3487                InstructionError::UnbalancedInstruction,
3488                InstructionError::ModifiedProgramId,
3489                InstructionError::ExternalAccountLamportSpend,
3490                InstructionError::ExternalAccountDataModified,
3491                InstructionError::ReadonlyLamportChange,
3492                InstructionError::ReadonlyDataModified,
3493                InstructionError::DuplicateAccountIndex,
3494                InstructionError::ExecutableModified,
3495                InstructionError::RentEpochModified,
3496                InstructionError::NotEnoughAccountKeys,
3497                InstructionError::AccountDataSizeChanged,
3498                InstructionError::AccountNotExecutable,
3499                InstructionError::AccountBorrowFailed,
3500                InstructionError::AccountBorrowOutstanding,
3501                InstructionError::DuplicateAccountOutOfSync,
3502                InstructionError::Custom(0),
3503                InstructionError::InvalidError,
3504                InstructionError::ExecutableDataModified,
3505                InstructionError::ExecutableLamportChange,
3506                InstructionError::ExecutableAccountNotRentExempt,
3507                InstructionError::UnsupportedProgramId,
3508                InstructionError::CallDepth,
3509                InstructionError::MissingAccount,
3510                InstructionError::ReentrancyNotAllowed,
3511                InstructionError::MaxSeedLengthExceeded,
3512                InstructionError::InvalidSeeds,
3513                InstructionError::InvalidRealloc,
3514                InstructionError::ComputationalBudgetExceeded,
3515                InstructionError::PrivilegeEscalation,
3516                InstructionError::ProgramEnvironmentSetupFailure,
3517                InstructionError::ProgramFailedToComplete,
3518                InstructionError::ProgramFailedToCompile,
3519                InstructionError::Immutable,
3520                InstructionError::IncorrectAuthority,
3521                InstructionError::BorshIoError,
3522                InstructionError::AccountNotRentExempt,
3523                InstructionError::InvalidAccountOwner,
3524                InstructionError::ArithmeticOverflow,
3525                InstructionError::UnsupportedSysvar,
3526                InstructionError::IllegalOwner,
3527                InstructionError::MaxAccountsDataAllocationsExceeded,
3528                InstructionError::MaxAccountsExceeded,
3529                InstructionError::MaxInstructionTraceLengthExceeded,
3530                InstructionError::BuiltinProgramsMustConsumeComputeUnits,
3531            ]
3532        }
3533
3534        declare_process_instruction!(MockBuiltinOk, 1, |_invoke_context| {
3535            // Always succeeds
3536            Ok(())
3537        });
3538
3539        let mock_program_id = Pubkey::new_unique();
3540
3541        let (bank, _bank_forks) = Bank::new_with_mockup_builtin_for_tests(
3542            &genesis_config,
3543            mock_program_id,
3544            MockBuiltinOk::vm,
3545        );
3546
3547        let tx = Transaction::new_signed_with_payer(
3548            &[Instruction::new_with_bincode(
3549                mock_program_id,
3550                &10,
3551                Vec::new(),
3552            )],
3553            Some(&mint_keypair.pubkey()),
3554            &[&mint_keypair],
3555            bank.last_blockhash(),
3556        );
3557
3558        let entry = next_entry(&bank.last_blockhash(), 1, vec![tx]);
3559        let result = process_entries_for_tests_without_scheduler(&bank, vec![entry]);
3560        bank.freeze();
3561        let ok_bank_details = SlotDetails::new_from_bank(&bank, true).unwrap();
3562        assert!(result.is_ok());
3563
3564        declare_process_instruction!(MockBuiltinErr, 1, |invoke_context| {
3565            let instruction_errors = get_instruction_errors();
3566
3567            let instruction_context = invoke_context
3568                .transaction_context
3569                .get_current_instruction_context()
3570                .expect("Failed to get instruction context");
3571            let err = instruction_context
3572                .get_instruction_data()
3573                .first()
3574                .expect("Failed to get instruction data");
3575            Err(instruction_errors
3576                .get(*err as usize)
3577                .expect("Invalid error index")
3578                .clone())
3579        });
3580
3581        // Store details to compare against subsequent iterations
3582        let mut err_bank_details = None;
3583
3584        (0..get_instruction_errors().len()).for_each(|err| {
3585            let (bank, _bank_forks) = Bank::new_with_mockup_builtin_for_tests(
3586                &genesis_config,
3587                mock_program_id,
3588                MockBuiltinErr::vm,
3589            );
3590
3591            let tx = Transaction::new_signed_with_payer(
3592                &[Instruction::new_with_bincode(
3593                    mock_program_id,
3594                    &(err as u8),
3595                    Vec::new(),
3596                )],
3597                Some(&mint_keypair.pubkey()),
3598                &[&mint_keypair],
3599                bank.last_blockhash(),
3600            );
3601
3602            let entry = next_entry(&bank.last_blockhash(), 1, vec![tx]);
3603            let bank = Arc::new(bank);
3604            let result = process_entries_for_tests_without_scheduler(&bank, vec![entry]);
3605            assert!(result.is_ok()); // No failing transaction error - only instruction errors
3606            bank.freeze();
3607            let bank_details = SlotDetails::new_from_bank(&bank, true).unwrap();
3608
3609            // Transaction success/failure should not affect block hash ...
3610            assert_eq!(
3611                ok_bank_details
3612                    .bank_hash_components
3613                    .as_ref()
3614                    .unwrap()
3615                    .last_blockhash,
3616                bank_details
3617                    .bank_hash_components
3618                    .as_ref()
3619                    .unwrap()
3620                    .last_blockhash
3621            );
3622            // Though bankhash is not affected, bank_details should be different.
3623            assert_ne!(ok_bank_details, bank_details);
3624            // Different types of transaction failure should not affect bank hash
3625            if let Some(prev_bank_details) = &err_bank_details {
3626                assert_eq!(
3627                    *prev_bank_details,
3628                    bank_details,
3629                    "bank hash mismatched for tx error: {:?}",
3630                    get_instruction_errors()[err]
3631                );
3632            } else {
3633                err_bank_details = Some(bank_details);
3634            }
3635        });
3636    }
3637
3638    #[test_case(false; "old")]
3639    #[test_case(true; "simd83")]
3640    fn test_process_entries_2nd_entry_collision_with_self_and_error(
3641        relax_intrabatch_account_locks: bool,
3642    ) {
3643        agave_logger::setup();
3644
3645        let GenesisConfigInfo {
3646            genesis_config,
3647            mint_keypair,
3648            ..
3649        } = create_genesis_config(1000);
3650        let mut bank = Bank::new_for_tests(&genesis_config);
3651        if !relax_intrabatch_account_locks {
3652            bank.deactivate_feature(&agave_feature_set::relax_intrabatch_account_locks::id());
3653        }
3654        let (bank, _bank_forks) = bank.wrap_with_bank_forks_for_tests();
3655        let keypair1 = Keypair::new();
3656        let keypair2 = Keypair::new();
3657        let keypair3 = Keypair::new();
3658
3659        // fund: put some money in each of 1 and 2
3660        assert_matches!(bank.transfer(5, &mint_keypair, &keypair1.pubkey()), Ok(_));
3661        assert_matches!(bank.transfer(4, &mint_keypair, &keypair2.pubkey()), Ok(_));
3662
3663        // 3 entries: first has a transfer, 2nd has a conflict with 1st, 3rd has a conflict with itself
3664        let entry_1_to_mint = next_entry(
3665            &bank.last_blockhash(),
3666            1,
3667            vec![system_transaction::transfer(
3668                &keypair1,
3669                &mint_keypair.pubkey(),
3670                1,
3671                bank.last_blockhash(),
3672            )],
3673        );
3674        // should now be:
3675        // keypair1=4
3676        // keypair2=4
3677        // keypair3=0
3678
3679        let entry_2_to_3_and_1_to_mint = next_entry(
3680            &entry_1_to_mint.hash,
3681            1,
3682            vec![
3683                system_transaction::transfer(
3684                    &keypair2,
3685                    &keypair3.pubkey(),
3686                    2,
3687                    bank.last_blockhash(),
3688                ), // should be fine
3689                system_transaction::transfer(
3690                    &keypair1,
3691                    &mint_keypair.pubkey(),
3692                    2,
3693                    bank.last_blockhash(),
3694                ), // will collide with preceding entry
3695            ],
3696        );
3697        // should now be:
3698        // keypair1=2
3699        // keypair2=2
3700        // keypair3=2
3701
3702        let entry_conflict_itself = next_entry(
3703            &entry_2_to_3_and_1_to_mint.hash,
3704            1,
3705            vec![
3706                system_transaction::transfer(
3707                    &keypair1,
3708                    &keypair3.pubkey(),
3709                    1,
3710                    bank.last_blockhash(),
3711                ),
3712                system_transaction::transfer(
3713                    &keypair1,
3714                    &keypair2.pubkey(),
3715                    1,
3716                    bank.last_blockhash(),
3717                ), // will collide with preceding transaction
3718            ],
3719        );
3720        // if successful, becomes:
3721        // keypair1=0
3722        // keypair2=3
3723        // keypair3=3
3724
3725        // succeeds following simd83 locking, fails otherwise
3726        let result = process_entries_for_tests_without_scheduler(
3727            &bank,
3728            vec![
3729                entry_1_to_mint,
3730                entry_2_to_3_and_1_to_mint,
3731                entry_conflict_itself,
3732            ],
3733        );
3734
3735        let balances = [
3736            bank.get_balance(&keypair1.pubkey()),
3737            bank.get_balance(&keypair2.pubkey()),
3738            bank.get_balance(&keypair3.pubkey()),
3739        ];
3740
3741        if relax_intrabatch_account_locks {
3742            assert!(result.is_ok());
3743            assert_eq!(balances, [0, 3, 3]);
3744        } else {
3745            assert!(result.is_err());
3746            assert_eq!(balances, [2, 2, 2]);
3747        }
3748    }
3749
3750    #[test_case(false; "old")]
3751    #[test_case(true; "simd83")]
3752    fn test_process_entry_duplicate_transaction(relax_intrabatch_account_locks: bool) {
3753        agave_logger::setup();
3754
3755        let GenesisConfigInfo {
3756            genesis_config,
3757            mint_keypair,
3758            ..
3759        } = create_genesis_config(1000);
3760        let mut bank = Bank::new_for_tests(&genesis_config);
3761        if !relax_intrabatch_account_locks {
3762            bank.deactivate_feature(&agave_feature_set::relax_intrabatch_account_locks::id());
3763        }
3764        let (bank, _bank_forks) = bank.wrap_with_bank_forks_for_tests();
3765        let keypair1 = Keypair::new();
3766        let keypair2 = Keypair::new();
3767
3768        // fund: put some money in each of 1 and 2
3769        assert_matches!(bank.transfer(5, &mint_keypair, &keypair1.pubkey()), Ok(_));
3770        assert_matches!(bank.transfer(5, &mint_keypair, &keypair2.pubkey()), Ok(_));
3771
3772        // one entry, two instances of the same transaction. this entry is invalid
3773        // without simd83: due to lock conflicts
3774        // with simd83: due to message hash duplication
3775        let entry_1_to_2_twice = next_entry(
3776            &bank.last_blockhash(),
3777            1,
3778            vec![
3779                system_transaction::transfer(
3780                    &keypair1,
3781                    &keypair2.pubkey(),
3782                    1,
3783                    bank.last_blockhash(),
3784                ),
3785                system_transaction::transfer(
3786                    &keypair1,
3787                    &keypair2.pubkey(),
3788                    1,
3789                    bank.last_blockhash(),
3790                ),
3791            ],
3792        );
3793        // should now be:
3794        // keypair1=5
3795        // keypair2=5
3796
3797        // succeeds following simd83 locking, fails otherwise
3798        let result = process_entries_for_tests_without_scheduler(&bank, vec![entry_1_to_2_twice]);
3799
3800        let balances = [
3801            bank.get_balance(&keypair1.pubkey()),
3802            bank.get_balance(&keypair2.pubkey()),
3803        ];
3804
3805        assert_eq!(balances, [5, 5]);
3806        if relax_intrabatch_account_locks {
3807            assert_eq!(result, Err(TransactionError::AlreadyProcessed));
3808        } else {
3809            assert_eq!(result, Err(TransactionError::AccountInUse));
3810        }
3811    }
3812
3813    #[test]
3814    fn test_process_entries_2_entries_par() {
3815        let GenesisConfigInfo {
3816            genesis_config,
3817            mint_keypair,
3818            ..
3819        } = create_genesis_config(1000);
3820        let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
3821        let keypair1 = Keypair::new();
3822        let keypair2 = Keypair::new();
3823        let keypair3 = Keypair::new();
3824        let keypair4 = Keypair::new();
3825
3826        //load accounts
3827        let tx = system_transaction::transfer(
3828            &mint_keypair,
3829            &keypair1.pubkey(),
3830            1,
3831            bank.last_blockhash(),
3832        );
3833        assert_eq!(bank.process_transaction(&tx), Ok(()));
3834        let tx = system_transaction::transfer(
3835            &mint_keypair,
3836            &keypair2.pubkey(),
3837            1,
3838            bank.last_blockhash(),
3839        );
3840        assert_eq!(bank.process_transaction(&tx), Ok(()));
3841
3842        // ensure bank can process 2 entries that do not have a common account and no tick is registered
3843        let blockhash = bank.last_blockhash();
3844        let tx =
3845            system_transaction::transfer(&keypair1, &keypair3.pubkey(), 1, bank.last_blockhash());
3846        let entry_1 = next_entry(&blockhash, 1, vec![tx]);
3847        let tx =
3848            system_transaction::transfer(&keypair2, &keypair4.pubkey(), 1, bank.last_blockhash());
3849        let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]);
3850        assert_eq!(
3851            process_entries_for_tests_without_scheduler(&bank, vec![entry_1, entry_2]),
3852            Ok(())
3853        );
3854        assert_eq!(bank.get_balance(&keypair3.pubkey()), 1);
3855        assert_eq!(bank.get_balance(&keypair4.pubkey()), 1);
3856        assert_eq!(bank.last_blockhash(), blockhash);
3857    }
3858
3859    #[test]
3860    fn test_process_entry_tx_random_execution_with_error() {
3861        let GenesisConfigInfo {
3862            genesis_config,
3863            mint_keypair,
3864            ..
3865        } = create_genesis_config(1_000_000_000);
3866        let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
3867
3868        const NUM_TRANSFERS_PER_ENTRY: usize = 8;
3869        const NUM_TRANSFERS: usize = NUM_TRANSFERS_PER_ENTRY * 32;
3870        // large enough to scramble locks and results
3871
3872        let keypairs: Vec<_> = (0..NUM_TRANSFERS * 2).map(|_| Keypair::new()).collect();
3873
3874        // give everybody one lamport
3875        for keypair in &keypairs {
3876            bank.transfer(1, &mint_keypair, &keypair.pubkey())
3877                .expect("funding failed");
3878        }
3879        let mut hash = bank.last_blockhash();
3880
3881        let present_account_key = Keypair::new();
3882        let present_account = AccountSharedData::new(1, 10, &Pubkey::default());
3883        bank.store_account(&present_account_key.pubkey(), &present_account);
3884
3885        let entries: Vec<_> = (0..NUM_TRANSFERS)
3886            .step_by(NUM_TRANSFERS_PER_ENTRY)
3887            .map(|i| {
3888                let mut transactions = (0..NUM_TRANSFERS_PER_ENTRY)
3889                    .map(|j| {
3890                        system_transaction::transfer(
3891                            &keypairs[i + j],
3892                            &keypairs[i + j + NUM_TRANSFERS].pubkey(),
3893                            1,
3894                            bank.last_blockhash(),
3895                        )
3896                    })
3897                    .collect::<Vec<_>>();
3898
3899                transactions.push(system_transaction::create_account(
3900                    &mint_keypair,
3901                    &present_account_key, // puts a TX error in results
3902                    bank.last_blockhash(),
3903                    1,
3904                    0,
3905                    &solana_pubkey::new_rand(),
3906                ));
3907
3908                next_entry_mut(&mut hash, 0, transactions)
3909            })
3910            .collect();
3911        assert_eq!(
3912            process_entries_for_tests_without_scheduler(&bank, entries),
3913            Ok(())
3914        );
3915    }
3916
3917    #[test]
3918    fn test_process_entry_tx_random_execution_no_error() {
3919        // entropy multiplier should be big enough to provide sufficient entropy
3920        // but small enough to not take too much time while executing the test.
3921        let entropy_multiplier: usize = 25;
3922        let initial_lamports = 100;
3923
3924        // number of accounts need to be in multiple of 4 for correct
3925        // execution of the test.
3926        let num_accounts = entropy_multiplier * 4;
3927        let GenesisConfigInfo {
3928            genesis_config,
3929            mint_keypair,
3930            ..
3931        } = create_genesis_config((num_accounts + 1) as u64 * initial_lamports);
3932
3933        let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
3934
3935        let mut keypairs: Vec<Keypair> = vec![];
3936
3937        for _ in 0..num_accounts {
3938            let keypair = Keypair::new();
3939            let create_account_tx = system_transaction::transfer(
3940                &mint_keypair,
3941                &keypair.pubkey(),
3942                0,
3943                bank.last_blockhash(),
3944            );
3945            assert_eq!(bank.process_transaction(&create_account_tx), Ok(()));
3946            assert_matches!(
3947                bank.transfer(initial_lamports, &mint_keypair, &keypair.pubkey()),
3948                Ok(_)
3949            );
3950            keypairs.push(keypair);
3951        }
3952
3953        let mut tx_vector: Vec<Transaction> = vec![];
3954
3955        for i in (0..num_accounts).step_by(4) {
3956            tx_vector.append(&mut vec![
3957                system_transaction::transfer(
3958                    &keypairs[i + 1],
3959                    &keypairs[i].pubkey(),
3960                    initial_lamports,
3961                    bank.last_blockhash(),
3962                ),
3963                system_transaction::transfer(
3964                    &keypairs[i + 3],
3965                    &keypairs[i + 2].pubkey(),
3966                    initial_lamports,
3967                    bank.last_blockhash(),
3968                ),
3969            ]);
3970        }
3971
3972        // Transfer lamports to each other
3973        let entry = next_entry(&bank.last_blockhash(), 1, tx_vector);
3974        assert_eq!(
3975            process_entries_for_tests_without_scheduler(&bank, vec![entry]),
3976            Ok(())
3977        );
3978        bank.squash();
3979
3980        // Even number keypair should have balance of 2 * initial_lamports and
3981        // odd number keypair should have balance of 0, which proves
3982        // that even in case of random order of execution, overall state remains
3983        // consistent.
3984        for (i, keypair) in keypairs.iter().enumerate() {
3985            if i % 2 == 0 {
3986                assert_eq!(bank.get_balance(&keypair.pubkey()), 2 * initial_lamports);
3987            } else {
3988                assert_eq!(bank.get_balance(&keypair.pubkey()), 0);
3989            }
3990        }
3991    }
3992
3993    #[test]
3994    fn test_process_entries_2_entries_tick() {
3995        let GenesisConfigInfo {
3996            genesis_config,
3997            mint_keypair,
3998            ..
3999        } = create_genesis_config(1000);
4000        let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
4001        let keypair1 = Keypair::new();
4002        let keypair2 = Keypair::new();
4003        let keypair3 = Keypair::new();
4004        let keypair4 = Keypair::new();
4005
4006        //load accounts
4007        let tx = system_transaction::transfer(
4008            &mint_keypair,
4009            &keypair1.pubkey(),
4010            1,
4011            bank.last_blockhash(),
4012        );
4013        assert_eq!(bank.process_transaction(&tx), Ok(()));
4014        let tx = system_transaction::transfer(
4015            &mint_keypair,
4016            &keypair2.pubkey(),
4017            1,
4018            bank.last_blockhash(),
4019        );
4020        assert_eq!(bank.process_transaction(&tx), Ok(()));
4021
4022        let blockhash = bank.last_blockhash();
4023        while blockhash == bank.last_blockhash() {
4024            bank.register_default_tick_for_test();
4025        }
4026
4027        // ensure bank can process 2 entries that do not have a common account and tick is registered
4028        let tx = system_transaction::transfer(&keypair2, &keypair3.pubkey(), 1, blockhash);
4029        let entry_1 = next_entry(&blockhash, 1, vec![tx]);
4030        let tick = next_entry(&entry_1.hash, 1, vec![]);
4031        let tx =
4032            system_transaction::transfer(&keypair1, &keypair4.pubkey(), 1, bank.last_blockhash());
4033        let entry_2 = next_entry(&tick.hash, 1, vec![tx]);
4034        assert_eq!(
4035            process_entries_for_tests_without_scheduler(
4036                &bank,
4037                vec![entry_1, tick, entry_2.clone()],
4038            ),
4039            Ok(())
4040        );
4041        assert_eq!(bank.get_balance(&keypair3.pubkey()), 1);
4042        assert_eq!(bank.get_balance(&keypair4.pubkey()), 1);
4043
4044        // ensure that an error is returned for an empty account (keypair2)
4045        let tx =
4046            system_transaction::transfer(&keypair2, &keypair3.pubkey(), 1, bank.last_blockhash());
4047        let entry_3 = next_entry(&entry_2.hash, 1, vec![tx]);
4048        assert_eq!(
4049            process_entries_for_tests_without_scheduler(&bank, vec![entry_3]),
4050            Err(TransactionError::AccountNotFound)
4051        );
4052    }
4053
4054    #[test]
4055    fn test_update_transaction_statuses() {
4056        let GenesisConfigInfo {
4057            genesis_config,
4058            mint_keypair,
4059            ..
4060        } = create_genesis_config(11_000);
4061        let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
4062
4063        // Make sure instruction errors still update the signature cache
4064        let pubkey = solana_pubkey::new_rand();
4065        bank.transfer(1_000, &mint_keypair, &pubkey).unwrap();
4066        assert_eq!(bank.transaction_count(), 1);
4067        assert_eq!(bank.get_balance(&pubkey), 1_000);
4068        assert_eq!(
4069            bank.transfer(10_001, &mint_keypair, &pubkey),
4070            Err(TransactionError::InstructionError(
4071                0,
4072                SystemError::ResultWithNegativeLamports.into(),
4073            ))
4074        );
4075        assert_eq!(
4076            bank.transfer(10_001, &mint_keypair, &pubkey),
4077            Err(TransactionError::AlreadyProcessed)
4078        );
4079
4080        // Make sure fees-only transactions still update the signature cache
4081        let missing_program_id = Pubkey::new_unique();
4082        let tx = Transaction::new_signed_with_payer(
4083            &[Instruction::new_with_bincode(
4084                missing_program_id,
4085                &10,
4086                Vec::new(),
4087            )],
4088            Some(&mint_keypair.pubkey()),
4089            &[&mint_keypair],
4090            bank.last_blockhash(),
4091        );
4092        // First process attempt will fail but still update status cache
4093        assert_eq!(
4094            bank.process_transaction(&tx),
4095            Err(TransactionError::ProgramAccountNotFound)
4096        );
4097        // Second attempt will be rejected since tx was already in status cache
4098        assert_eq!(
4099            bank.process_transaction(&tx),
4100            Err(TransactionError::AlreadyProcessed)
4101        );
4102
4103        // Make sure other errors don't update the signature cache
4104        let tx = system_transaction::transfer(&mint_keypair, &pubkey, 1000, Hash::default());
4105        let signature = tx.signatures[0];
4106
4107        // Should fail with blockhash not found
4108        assert_eq!(
4109            bank.process_transaction(&tx).map(|_| signature),
4110            Err(TransactionError::BlockhashNotFound)
4111        );
4112
4113        // Should fail again with blockhash not found
4114        assert_eq!(
4115            bank.process_transaction(&tx).map(|_| signature),
4116            Err(TransactionError::BlockhashNotFound)
4117        );
4118    }
4119
4120    #[test_case(false; "old")]
4121    #[test_case(true; "simd83")]
4122    fn test_update_transaction_statuses_fail(relax_intrabatch_account_locks: bool) {
4123        let GenesisConfigInfo {
4124            genesis_config,
4125            mint_keypair,
4126            ..
4127        } = create_genesis_config(11_000);
4128        let mut bank = Bank::new_for_tests(&genesis_config);
4129        if !relax_intrabatch_account_locks {
4130            bank.deactivate_feature(&agave_feature_set::relax_intrabatch_account_locks::id());
4131        }
4132        let (bank, _bank_forks) = bank.wrap_with_bank_forks_for_tests();
4133        let keypair1 = Keypair::new();
4134        let keypair2 = Keypair::new();
4135        let success_tx = system_transaction::transfer(
4136            &mint_keypair,
4137            &keypair1.pubkey(),
4138            1,
4139            bank.last_blockhash(),
4140        );
4141        let test_tx = system_transaction::transfer(
4142            &mint_keypair,
4143            &keypair2.pubkey(),
4144            2,
4145            bank.last_blockhash(),
4146        );
4147
4148        let entry_1_to_mint = next_entry(
4149            &bank.last_blockhash(),
4150            1,
4151            vec![
4152                success_tx,
4153                test_tx.clone(), // will collide
4154            ],
4155        );
4156
4157        // succeeds with simd83, fails because of account locking conflict otherwise
4158        assert_eq!(
4159            process_entries_for_tests_without_scheduler(&bank, vec![entry_1_to_mint]),
4160            if relax_intrabatch_account_locks {
4161                Ok(())
4162            } else {
4163                Err(TransactionError::AccountInUse)
4164            }
4165        );
4166
4167        // fails with simd83 as already processed, succeeds otherwise
4168        assert_eq!(
4169            bank.process_transaction(&test_tx),
4170            if relax_intrabatch_account_locks {
4171                Err(TransactionError::AlreadyProcessed)
4172            } else {
4173                Ok(())
4174            }
4175        );
4176    }
4177
4178    #[test]
4179    fn test_halt_at_slot_starting_snapshot_root() {
4180        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(123);
4181
4182        // Create roots at slots 0, 1
4183        let forks = tr(0) / tr(1);
4184        let ledger_path = get_tmp_ledger_path_auto_delete!();
4185        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
4186        blockstore.add_tree(
4187            forks,
4188            false,
4189            true,
4190            genesis_config.ticks_per_slot,
4191            genesis_config.hash(),
4192        );
4193        blockstore.set_roots([0, 1].iter()).unwrap();
4194
4195        // Specify halting at slot 0
4196        let opts = ProcessOptions {
4197            run_verification: true,
4198            halt_at_slot: Some(0),
4199            ..ProcessOptions::default()
4200        };
4201        let (bank_forks, ..) =
4202            test_process_blockstore(&genesis_config, &blockstore, &opts, Arc::default());
4203        let bank_forks = bank_forks.read().unwrap();
4204
4205        // Should be able to fetch slot 0 because we specified halting at slot 0, even
4206        // if there is a greater root at slot 1.
4207        assert!(bank_forks.get(0).is_some());
4208    }
4209
4210    #[test]
4211    fn test_process_blockstore_from_root() {
4212        let GenesisConfigInfo {
4213            mut genesis_config, ..
4214        } = create_genesis_config(123);
4215
4216        let ticks_per_slot = 1;
4217        genesis_config.ticks_per_slot = ticks_per_slot;
4218        let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
4219        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
4220
4221        /*
4222          Build a blockstore in the ledger with the following fork structure:
4223
4224               slot 0 (all ticks)
4225                 |
4226               slot 1 (all ticks)
4227                 |
4228               slot 2 (all ticks)
4229                 |
4230               slot 3 (all ticks) -> root
4231                 |
4232               slot 4 (all ticks)
4233                 |
4234               slot 5 (all ticks) -> root
4235                 |
4236               slot 6 (all ticks)
4237        */
4238
4239        let mut last_hash = blockhash;
4240        for i in 0..6 {
4241            last_hash =
4242                fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, i + 1, i, last_hash);
4243        }
4244        blockstore.set_roots([3, 5].iter()).unwrap();
4245
4246        // Set up bank1
4247        let bank_forks = BankForks::new_rw_arc(Bank::new_for_tests(&genesis_config));
4248        let bank0 = bank_forks.read().unwrap().get_with_scheduler(0).unwrap();
4249        let opts = ProcessOptions {
4250            run_verification: true,
4251            ..ProcessOptions::default()
4252        };
4253        let recyclers = VerifyRecyclers::default();
4254        let replay_tx_thread_pool = create_thread_pool(1);
4255        process_bank_0(
4256            &bank0,
4257            &blockstore,
4258            &replay_tx_thread_pool,
4259            &opts,
4260            None,
4261            &recyclers,
4262            None,
4263        )
4264        .unwrap();
4265        let bank0_last_blockhash = bank0.last_blockhash();
4266        let bank1 = bank_forks.write().unwrap().insert(Bank::new_from_parent(
4267            bank0.clone_without_scheduler(),
4268            &Pubkey::default(),
4269            1,
4270        ));
4271        confirm_full_slot(
4272            &blockstore,
4273            &bank1,
4274            &replay_tx_thread_pool,
4275            &opts,
4276            &recyclers,
4277            &mut ConfirmationProgress::new(bank0_last_blockhash),
4278            None,
4279            None,
4280            None,
4281            &mut ExecuteTimings::default(),
4282        )
4283        .unwrap();
4284        bank_forks.write().unwrap().set_root(1, None, None);
4285
4286        let leader_schedule_cache = LeaderScheduleCache::new_from_bank(&bank1);
4287
4288        // Test process_blockstore_from_root() from slot 1 onwards
4289        process_blockstore_from_root(
4290            &blockstore,
4291            &bank_forks,
4292            &leader_schedule_cache,
4293            &opts,
4294            None,
4295            None,
4296            None, // snapshot_controller
4297        )
4298        .unwrap();
4299
4300        let bank_forks = bank_forks.read().unwrap();
4301
4302        assert_eq!(frozen_bank_slots(&bank_forks), vec![5, 6]);
4303        assert_eq!(bank_forks.working_bank().slot(), 6);
4304        assert_eq!(bank_forks.root(), 5);
4305
4306        // Verify the parents of the head of the fork
4307        assert_eq!(
4308            &bank_forks[6]
4309                .parents()
4310                .iter()
4311                .map(|bank| bank.slot())
4312                .collect::<Vec<_>>(),
4313            &[5]
4314        );
4315
4316        // Check that bank forks has the correct banks
4317        verify_fork_infos(&bank_forks);
4318    }
4319
4320    #[test]
4321    #[ignore]
4322    fn test_process_entries_stress() {
4323        // this test throws lots of rayon threads at process_entries()
4324        //  finds bugs in very low-layer stuff
4325        agave_logger::setup();
4326        let GenesisConfigInfo {
4327            genesis_config,
4328            mint_keypair,
4329            ..
4330        } = create_genesis_config(1_000_000_000);
4331        let mut bank = Arc::new(Bank::new_for_tests(&genesis_config));
4332
4333        const NUM_TRANSFERS_PER_ENTRY: usize = 8;
4334        const NUM_TRANSFERS: usize = NUM_TRANSFERS_PER_ENTRY * 32;
4335
4336        let keypairs: Vec<_> = (0..NUM_TRANSFERS * 2).map(|_| Keypair::new()).collect();
4337
4338        // give everybody one lamport
4339        for keypair in &keypairs {
4340            bank.transfer(1, &mint_keypair, &keypair.pubkey())
4341                .expect("funding failed");
4342        }
4343
4344        let present_account_key = Keypair::new();
4345        let present_account = AccountSharedData::new(1, 10, &Pubkey::default());
4346        bank.store_account(&present_account_key.pubkey(), &present_account);
4347
4348        let mut i = 0;
4349        let mut hash = bank.last_blockhash();
4350        let mut root: Option<Arc<Bank>> = None;
4351        loop {
4352            let entries: Vec<_> = (0..NUM_TRANSFERS)
4353                .step_by(NUM_TRANSFERS_PER_ENTRY)
4354                .map(|i| {
4355                    next_entry_mut(&mut hash, 0, {
4356                        let mut transactions = (i..i + NUM_TRANSFERS_PER_ENTRY)
4357                            .map(|i| {
4358                                system_transaction::transfer(
4359                                    &keypairs[i],
4360                                    &keypairs[i + NUM_TRANSFERS].pubkey(),
4361                                    1,
4362                                    bank.last_blockhash(),
4363                                )
4364                            })
4365                            .collect::<Vec<_>>();
4366
4367                        transactions.push(system_transaction::create_account(
4368                            &mint_keypair,
4369                            &present_account_key, // puts a TX error in results
4370                            bank.last_blockhash(),
4371                            100,
4372                            100,
4373                            &solana_pubkey::new_rand(),
4374                        ));
4375                        transactions
4376                    })
4377                })
4378                .collect();
4379            info!("paying iteration {i}");
4380            process_entries_for_tests_without_scheduler(&bank, entries).expect("paying failed");
4381
4382            let entries: Vec<_> = (0..NUM_TRANSFERS)
4383                .step_by(NUM_TRANSFERS_PER_ENTRY)
4384                .map(|i| {
4385                    next_entry_mut(
4386                        &mut hash,
4387                        0,
4388                        (i..i + NUM_TRANSFERS_PER_ENTRY)
4389                            .map(|i| {
4390                                system_transaction::transfer(
4391                                    &keypairs[i + NUM_TRANSFERS],
4392                                    &keypairs[i].pubkey(),
4393                                    1,
4394                                    bank.last_blockhash(),
4395                                )
4396                            })
4397                            .collect::<Vec<_>>(),
4398                    )
4399                })
4400                .collect();
4401
4402            info!("refunding iteration {i}");
4403            process_entries_for_tests_without_scheduler(&bank, entries).expect("refunding failed");
4404
4405            // advance to next block
4406            process_entries_for_tests_without_scheduler(
4407                &bank,
4408                (0..bank.ticks_per_slot())
4409                    .map(|_| next_entry_mut(&mut hash, 1, vec![]))
4410                    .collect::<Vec<_>>(),
4411            )
4412            .expect("process ticks failed");
4413
4414            if i % 16 == 0 {
4415                if let Some(old_root) = root {
4416                    old_root.squash();
4417                }
4418                root = Some(bank.clone());
4419            }
4420            i += 1;
4421
4422            let slot = bank.slot() + thread_rng().gen_range(1..3);
4423            bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot));
4424        }
4425    }
4426
4427    #[test]
4428    fn test_process_ledger_ticks_ordering() {
4429        let GenesisConfigInfo {
4430            genesis_config,
4431            mint_keypair,
4432            ..
4433        } = create_genesis_config(100);
4434        let (bank0, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
4435        let genesis_hash = genesis_config.hash();
4436        let keypair = Keypair::new();
4437
4438        // Simulate a slot of virtual ticks, creates a new blockhash
4439        let mut entries = create_ticks(genesis_config.ticks_per_slot, 1, genesis_hash);
4440
4441        // The new blockhash is going to be the hash of the last tick in the block
4442        let new_blockhash = entries.last().unwrap().hash;
4443        // Create an transaction that references the new blockhash, should still
4444        // be able to find the blockhash if we process transactions all in the same
4445        // batch
4446        let tx = system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 1, new_blockhash);
4447        let entry = next_entry(&new_blockhash, 1, vec![tx]);
4448        entries.push(entry);
4449
4450        process_entries_for_tests_without_scheduler(&bank0, entries).unwrap();
4451        assert_eq!(bank0.get_balance(&keypair.pubkey()), 1)
4452    }
4453
4454    fn get_epoch_schedule(genesis_config: &GenesisConfig) -> EpochSchedule {
4455        let bank = Bank::new_for_tests(genesis_config);
4456        bank.epoch_schedule().clone()
4457    }
4458
4459    fn frozen_bank_slots(bank_forks: &BankForks) -> Vec<Slot> {
4460        let mut slots: Vec<_> = bank_forks
4461            .frozen_banks()
4462            .map(|(slot, _bank)| slot)
4463            .collect();
4464        slots.sort_unstable();
4465        slots
4466    }
4467
4468    // Check that `bank_forks` contains all the ancestors and banks for each fork identified in
4469    // `bank_forks_info`
4470    fn verify_fork_infos(bank_forks: &BankForks) {
4471        for slot in frozen_bank_slots(bank_forks) {
4472            let head_bank = &bank_forks[slot];
4473            let mut parents = head_bank.parents();
4474            parents.push(head_bank.clone());
4475
4476            // Ensure the tip of each fork and all its parents are in the given bank_forks
4477            for parent in parents {
4478                let parent_bank = &bank_forks[parent.slot()];
4479                assert_eq!(parent_bank.slot(), parent.slot());
4480                assert!(parent_bank.is_frozen());
4481            }
4482        }
4483    }
4484
4485    #[test]
4486    fn test_get_first_error() {
4487        let GenesisConfigInfo {
4488            genesis_config,
4489            mint_keypair,
4490            ..
4491        } = create_genesis_config(1_000_000_000);
4492        let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
4493
4494        let present_account_key = Keypair::new();
4495        let present_account = AccountSharedData::new(1, 10, &Pubkey::default());
4496        bank.store_account(&present_account_key.pubkey(), &present_account);
4497
4498        let keypair = Keypair::new();
4499
4500        // Create array of two transactions which throw different errors
4501        let account_not_found_tx = system_transaction::transfer(
4502            &keypair,
4503            &solana_pubkey::new_rand(),
4504            42,
4505            bank.last_blockhash(),
4506        );
4507        let account_not_found_sig = account_not_found_tx.signatures[0];
4508        let invalid_blockhash_tx = system_transaction::transfer(
4509            &mint_keypair,
4510            &solana_pubkey::new_rand(),
4511            42,
4512            Hash::default(),
4513        );
4514        let txs = vec![account_not_found_tx, invalid_blockhash_tx];
4515        let batch = bank.prepare_batch_for_tests(txs);
4516        let (commit_results, _) = batch.bank().load_execute_and_commit_transactions(
4517            &batch,
4518            MAX_PROCESSING_AGE,
4519            ExecutionRecordingConfig::new_single_setting(false),
4520            &mut ExecuteTimings::default(),
4521            None,
4522        );
4523        let (err, signature) = do_get_first_error(&batch, &commit_results).unwrap();
4524        assert_eq!(err.unwrap_err(), TransactionError::AccountNotFound);
4525        assert_eq!(signature, account_not_found_sig);
4526    }
4527
4528    #[test]
4529    fn test_replay_vote_sender() {
4530        let validator_keypairs: Vec<_> =
4531            (0..10).map(|_| ValidatorVoteKeypairs::new_rand()).collect();
4532        let GenesisConfigInfo {
4533            genesis_config,
4534            voting_keypair: _,
4535            ..
4536        } = create_genesis_config_with_vote_accounts(
4537            1_000_000_000,
4538            &validator_keypairs,
4539            vec![100; validator_keypairs.len()],
4540        );
4541        let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
4542        bank0.freeze();
4543
4544        let bank1 = bank_forks
4545            .write()
4546            .unwrap()
4547            .insert(Bank::new_from_parent(
4548                bank0.clone(),
4549                &solana_pubkey::new_rand(),
4550                1,
4551            ))
4552            .clone_without_scheduler();
4553
4554        // The new blockhash is going to be the hash of the last tick in the block
4555        let bank_1_blockhash = bank1.last_blockhash();
4556
4557        // Create an transaction that references the new blockhash, should still
4558        // be able to find the blockhash if we process transactions all in the same
4559        // batch
4560        let mut expected_successful_voter_pubkeys = BTreeSet::new();
4561        let vote_txs: Vec<_> = validator_keypairs
4562            .iter()
4563            .enumerate()
4564            .map(|(i, validator_keypairs)| {
4565                let tower_sync = TowerSync::new_from_slots(vec![0], bank0.hash(), None);
4566                if i % 3 == 0 {
4567                    // These votes are correct
4568                    expected_successful_voter_pubkeys
4569                        .insert(validator_keypairs.vote_keypair.pubkey());
4570                    vote_transaction::new_tower_sync_transaction(
4571                        tower_sync,
4572                        bank_1_blockhash,
4573                        &validator_keypairs.node_keypair,
4574                        &validator_keypairs.vote_keypair,
4575                        &validator_keypairs.vote_keypair,
4576                        None,
4577                    )
4578                } else if i % 3 == 1 {
4579                    // These have the wrong authorized voter
4580                    vote_transaction::new_tower_sync_transaction(
4581                        tower_sync,
4582                        bank_1_blockhash,
4583                        &validator_keypairs.node_keypair,
4584                        &validator_keypairs.vote_keypair,
4585                        &Keypair::new(),
4586                        None,
4587                    )
4588                } else {
4589                    // These have an invalid vote for non-existent bank 2
4590                    vote_transaction::new_tower_sync_transaction(
4591                        TowerSync::from(vec![(bank1.slot() + 1, 1)]),
4592                        bank_1_blockhash,
4593                        &validator_keypairs.node_keypair,
4594                        &validator_keypairs.vote_keypair,
4595                        &validator_keypairs.vote_keypair,
4596                        None,
4597                    )
4598                }
4599            })
4600            .collect();
4601        let entry = next_entry(&bank_1_blockhash, 1, vote_txs);
4602        let (replay_vote_sender, replay_vote_receiver) = crossbeam_channel::unbounded();
4603        let _ = process_entries_for_tests(
4604            &BankWithScheduler::new_without_scheduler(bank1),
4605            vec![entry],
4606            None,
4607            Some(&replay_vote_sender),
4608        );
4609        let successes: BTreeSet<Pubkey> = replay_vote_receiver
4610            .try_iter()
4611            .map(|(vote_pubkey, ..)| vote_pubkey)
4612            .collect();
4613        assert_eq!(successes, expected_successful_voter_pubkeys);
4614    }
4615
4616    fn make_slot_with_vote_tx(
4617        blockstore: &Blockstore,
4618        ticks_per_slot: u64,
4619        tx_landed_slot: Slot,
4620        parent_slot: Slot,
4621        parent_blockhash: &Hash,
4622        vote_tx: Transaction,
4623        slot_leader_keypair: &Arc<Keypair>,
4624    ) {
4625        // Add votes to `last_slot` so that `root` will be confirmed
4626        let vote_entry = next_entry(parent_blockhash, 1, vec![vote_tx]);
4627        let mut entries = create_ticks(ticks_per_slot, 0, vote_entry.hash);
4628        entries.insert(0, vote_entry);
4629        blockstore
4630            .write_entries(
4631                tx_landed_slot,
4632                0,
4633                0,
4634                ticks_per_slot,
4635                Some(parent_slot),
4636                true,
4637                slot_leader_keypair,
4638                entries,
4639                0,
4640            )
4641            .unwrap();
4642    }
4643
4644    fn run_test_process_blockstore_with_supermajority_root(
4645        blockstore_root: Option<Slot>,
4646        blockstore_access_type: AccessType,
4647    ) {
4648        agave_logger::setup();
4649        /*
4650            Build fork structure:
4651                 slot 0
4652                   |
4653                 slot 1 <- (blockstore root)
4654                 /    \
4655            slot 2    |
4656               |      |
4657            slot 4    |
4658                    slot 5
4659                      |
4660                `expected_root_slot`
4661                     /    \
4662                  ...    minor fork
4663                  /
4664            `last_slot`
4665                 |
4666            `really_last_slot`
4667        */
4668        let starting_fork_slot = 5;
4669        let mut main_fork = tr(starting_fork_slot);
4670        let mut main_fork_ref = main_fork.root_mut().get_mut();
4671
4672        // Make enough slots to make a root slot > blockstore_root
4673        let expected_root_slot = starting_fork_slot + blockstore_root.unwrap_or(0);
4674        let really_expected_root_slot = expected_root_slot + 1;
4675        let last_main_fork_slot = expected_root_slot + MAX_LOCKOUT_HISTORY as u64 + 1;
4676        let really_last_main_fork_slot = last_main_fork_slot + 1;
4677
4678        // Make `minor_fork`
4679        let last_minor_fork_slot = really_last_main_fork_slot + 1;
4680        let minor_fork = tr(last_minor_fork_slot);
4681
4682        // Make 'main_fork`
4683        for slot in starting_fork_slot + 1..last_main_fork_slot {
4684            if slot - 1 == expected_root_slot {
4685                main_fork_ref.push_front(minor_fork.clone());
4686            }
4687            main_fork_ref.push_front(tr(slot));
4688            main_fork_ref = main_fork_ref.front_mut().unwrap().get_mut();
4689        }
4690        let forks = tr(0) / (tr(1) / (tr(2) / (tr(4))) / main_fork);
4691        let validator_keypairs = ValidatorVoteKeypairs::new_rand();
4692        let GenesisConfigInfo { genesis_config, .. } =
4693            genesis_utils::create_genesis_config_with_vote_accounts(
4694                10_000,
4695                &[&validator_keypairs],
4696                vec![100],
4697            );
4698        let ticks_per_slot = genesis_config.ticks_per_slot();
4699        let ledger_path = get_tmp_ledger_path_auto_delete!();
4700        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
4701        blockstore.add_tree(forks, false, true, ticks_per_slot, genesis_config.hash());
4702
4703        if let Some(blockstore_root) = blockstore_root {
4704            blockstore
4705                .set_roots(std::iter::once(&blockstore_root))
4706                .unwrap();
4707        }
4708
4709        let opts = ProcessOptions {
4710            run_verification: true,
4711            ..ProcessOptions::default()
4712        };
4713
4714        let (bank_forks, ..) = test_process_blockstore_with_custom_options(
4715            &genesis_config,
4716            &blockstore,
4717            &opts,
4718            blockstore_access_type.clone(),
4719        );
4720        let bank_forks = bank_forks.read().unwrap();
4721
4722        // prepare to add votes
4723        let last_vote_bank_hash = bank_forks.get(last_main_fork_slot - 1).unwrap().hash();
4724        let last_vote_blockhash = bank_forks
4725            .get(last_main_fork_slot - 1)
4726            .unwrap()
4727            .last_blockhash();
4728        let tower_sync = TowerSync::new_from_slot(last_main_fork_slot - 1, last_vote_bank_hash);
4729        let vote_tx = vote_transaction::new_tower_sync_transaction(
4730            tower_sync,
4731            last_vote_blockhash,
4732            &validator_keypairs.node_keypair,
4733            &validator_keypairs.vote_keypair,
4734            &validator_keypairs.vote_keypair,
4735            None,
4736        );
4737
4738        // Add votes to `last_slot` so that `root` will be confirmed
4739        let leader_keypair = Arc::new(validator_keypairs.node_keypair);
4740        make_slot_with_vote_tx(
4741            &blockstore,
4742            ticks_per_slot,
4743            last_main_fork_slot,
4744            last_main_fork_slot - 1,
4745            &last_vote_blockhash,
4746            vote_tx,
4747            &leader_keypair,
4748        );
4749
4750        let (bank_forks, ..) = test_process_blockstore_with_custom_options(
4751            &genesis_config,
4752            &blockstore,
4753            &opts,
4754            blockstore_access_type.clone(),
4755        );
4756        let bank_forks = bank_forks.read().unwrap();
4757
4758        assert_eq!(bank_forks.root(), expected_root_slot);
4759        assert_eq!(
4760            bank_forks.frozen_banks().count() as u64,
4761            last_minor_fork_slot - really_expected_root_slot + 1
4762        );
4763
4764        // Minor fork at `last_main_fork_slot + 1` was above the `expected_root_slot`
4765        // so should not have been purged
4766        //
4767        // Fork at slot 2 was purged because it was below the `expected_root_slot`
4768        for slot in 0..=last_minor_fork_slot {
4769            // this slot will be created below
4770            if slot == really_last_main_fork_slot {
4771                continue;
4772            }
4773            if slot >= expected_root_slot {
4774                let bank = bank_forks.get(slot).unwrap();
4775                assert_eq!(bank.slot(), slot);
4776                assert!(bank.is_frozen());
4777            } else {
4778                assert!(bank_forks.get(slot).is_none());
4779            }
4780        }
4781
4782        // really prepare to add votes
4783        let last_vote_bank_hash = bank_forks.get(last_main_fork_slot).unwrap().hash();
4784        let last_vote_blockhash = bank_forks
4785            .get(last_main_fork_slot)
4786            .unwrap()
4787            .last_blockhash();
4788        let tower_sync = TowerSync::new_from_slot(last_main_fork_slot, last_vote_bank_hash);
4789        let vote_tx = vote_transaction::new_tower_sync_transaction(
4790            tower_sync,
4791            last_vote_blockhash,
4792            &leader_keypair,
4793            &validator_keypairs.vote_keypair,
4794            &validator_keypairs.vote_keypair,
4795            None,
4796        );
4797
4798        // Add votes to `really_last_slot` so that `root` will be confirmed again
4799        make_slot_with_vote_tx(
4800            &blockstore,
4801            ticks_per_slot,
4802            really_last_main_fork_slot,
4803            last_main_fork_slot,
4804            &last_vote_blockhash,
4805            vote_tx,
4806            &leader_keypair,
4807        );
4808
4809        let (bank_forks, ..) = test_process_blockstore_with_custom_options(
4810            &genesis_config,
4811            &blockstore,
4812            &opts,
4813            blockstore_access_type,
4814        );
4815        let bank_forks = bank_forks.read().unwrap();
4816
4817        assert_eq!(bank_forks.root(), really_expected_root_slot);
4818    }
4819
4820    #[test]
4821    fn test_process_blockstore_with_supermajority_root_without_blockstore_root() {
4822        run_test_process_blockstore_with_supermajority_root(None, AccessType::Primary);
4823    }
4824
4825    #[test]
4826    fn test_process_blockstore_with_supermajority_root_without_blockstore_root_secondary_access() {
4827        run_test_process_blockstore_with_supermajority_root(None, AccessType::Secondary);
4828    }
4829
4830    #[test]
4831    fn test_process_blockstore_with_supermajority_root_with_blockstore_root() {
4832        run_test_process_blockstore_with_supermajority_root(Some(1), AccessType::Primary)
4833    }
4834
4835    #[test]
4836    #[allow(clippy::field_reassign_with_default)]
4837    fn test_supermajority_root_from_vote_accounts() {
4838        let convert_to_vote_accounts = |roots_stakes: Vec<(Slot, u64)>| -> VoteAccountsHashMap {
4839            roots_stakes
4840                .into_iter()
4841                .map(|(root, stake)| {
4842                    let mut vote_state = VoteStateV4::default();
4843                    vote_state.root_slot = Some(root);
4844                    let mut vote_account = AccountSharedData::new(
4845                        1,
4846                        VoteStateV4::size_of(),
4847                        &solana_vote_program::id(),
4848                    );
4849                    let versioned = VoteStateVersions::new_v4(vote_state);
4850                    VoteStateV4::serialize(&versioned, vote_account.data_as_mut_slice()).unwrap();
4851                    (
4852                        solana_pubkey::new_rand(),
4853                        (stake, VoteAccount::try_from(vote_account).unwrap()),
4854                    )
4855                })
4856                .collect()
4857        };
4858
4859        let total_stake = 10;
4860
4861        // Supermajority root should be None
4862        assert!(supermajority_root_from_vote_accounts(total_stake, &HashMap::default()).is_none());
4863
4864        // Supermajority root should be None
4865        let roots_stakes = vec![(8, 1), (3, 1), (4, 1), (8, 1)];
4866        let accounts = convert_to_vote_accounts(roots_stakes);
4867        assert!(supermajority_root_from_vote_accounts(total_stake, &accounts).is_none());
4868
4869        // Supermajority root should be 4, has 7/10 of the stake
4870        let roots_stakes = vec![(8, 1), (3, 1), (4, 1), (8, 5)];
4871        let accounts = convert_to_vote_accounts(roots_stakes);
4872        assert_eq!(
4873            supermajority_root_from_vote_accounts(total_stake, &accounts).unwrap(),
4874            4
4875        );
4876
4877        // Supermajority root should be 8, it has 7/10 of the stake
4878        let roots_stakes = vec![(8, 1), (3, 1), (4, 1), (8, 6)];
4879        let accounts = convert_to_vote_accounts(roots_stakes);
4880        assert_eq!(
4881            supermajority_root_from_vote_accounts(total_stake, &accounts).unwrap(),
4882            8
4883        );
4884    }
4885
4886    fn confirm_slot_entries_for_tests(
4887        bank: &Arc<Bank>,
4888        slot_entries: Vec<Entry>,
4889        slot_full: bool,
4890        prev_entry_hash: Hash,
4891    ) -> result::Result<(), BlockstoreProcessorError> {
4892        let replay_tx_thread_pool = create_thread_pool(1);
4893        confirm_slot_entries(
4894            &BankWithScheduler::new_without_scheduler(bank.clone()),
4895            &replay_tx_thread_pool,
4896            (slot_entries, 0, slot_full),
4897            &mut ConfirmationTiming::default(),
4898            &mut ConfirmationProgress::new(prev_entry_hash),
4899            false,
4900            None,
4901            None,
4902            None,
4903            &VerifyRecyclers::default(),
4904            None,
4905            &PrioritizationFeeCache::new(0u64),
4906        )
4907    }
4908
4909    fn create_test_transactions(
4910        mint_keypair: &Keypair,
4911        genesis_hash: &Hash,
4912    ) -> Vec<RuntimeTransaction<SanitizedTransaction>> {
4913        let pubkey = solana_pubkey::new_rand();
4914        let keypair2 = Keypair::new();
4915        let pubkey2 = solana_pubkey::new_rand();
4916        let keypair3 = Keypair::new();
4917        let pubkey3 = solana_pubkey::new_rand();
4918
4919        vec![
4920            RuntimeTransaction::from_transaction_for_tests(system_transaction::transfer(
4921                mint_keypair,
4922                &pubkey,
4923                1,
4924                *genesis_hash,
4925            )),
4926            RuntimeTransaction::from_transaction_for_tests(system_transaction::transfer(
4927                &keypair2,
4928                &pubkey2,
4929                1,
4930                *genesis_hash,
4931            )),
4932            RuntimeTransaction::from_transaction_for_tests(system_transaction::transfer(
4933                &keypair3,
4934                &pubkey3,
4935                1,
4936                *genesis_hash,
4937            )),
4938        ]
4939    }
4940
4941    #[test]
4942    fn test_confirm_slot_entries_progress_num_txs_indexes() {
4943        let GenesisConfigInfo {
4944            genesis_config,
4945            mint_keypair,
4946            ..
4947        } = create_genesis_config(100 * LAMPORTS_PER_SOL);
4948        let genesis_hash = genesis_config.hash();
4949        let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
4950        let bank = BankWithScheduler::new_without_scheduler(bank);
4951        let replay_tx_thread_pool = create_thread_pool(1);
4952        let mut timing = ConfirmationTiming::default();
4953        let mut progress = ConfirmationProgress::new(genesis_hash);
4954        let amount = genesis_config.rent.minimum_balance(0);
4955        let keypair1 = Keypair::new();
4956        let keypair2 = Keypair::new();
4957        let keypair3 = Keypair::new();
4958        let keypair4 = Keypair::new();
4959        bank.transfer(LAMPORTS_PER_SOL, &mint_keypair, &keypair1.pubkey())
4960            .unwrap();
4961        bank.transfer(LAMPORTS_PER_SOL, &mint_keypair, &keypair2.pubkey())
4962            .unwrap();
4963
4964        let (transaction_status_sender, transaction_status_receiver) =
4965            crossbeam_channel::unbounded();
4966        let transaction_status_sender = TransactionStatusSender {
4967            sender: transaction_status_sender,
4968            dependency_tracker: None,
4969        };
4970
4971        let blockhash = bank.last_blockhash();
4972        let tx1 = system_transaction::transfer(
4973            &keypair1,
4974            &keypair3.pubkey(),
4975            amount,
4976            bank.last_blockhash(),
4977        );
4978        let tx2 = system_transaction::transfer(
4979            &keypair2,
4980            &keypair4.pubkey(),
4981            amount,
4982            bank.last_blockhash(),
4983        );
4984        let entry = next_entry(&blockhash, 1, vec![tx1, tx2]);
4985        let new_hash = entry.hash;
4986
4987        confirm_slot_entries(
4988            &bank,
4989            &replay_tx_thread_pool,
4990            (vec![entry], 0, false),
4991            &mut timing,
4992            &mut progress,
4993            false,
4994            Some(&transaction_status_sender),
4995            None,
4996            None,
4997            &VerifyRecyclers::default(),
4998            None,
4999            &PrioritizationFeeCache::new(0u64),
5000        )
5001        .unwrap();
5002        assert_eq!(progress.num_txs, 2);
5003        let batch = transaction_status_receiver.recv().unwrap();
5004        if let TransactionStatusMessage::Batch((batch, _sequence)) = batch {
5005            assert_eq!(batch.transactions.len(), 2);
5006            assert_eq!(batch.transaction_indexes.len(), 2);
5007            assert_eq!(batch.transaction_indexes, [0, 1]);
5008        } else {
5009            panic!("batch should have been sent");
5010        }
5011
5012        let tx1 = system_transaction::transfer(
5013            &keypair1,
5014            &keypair3.pubkey(),
5015            amount + 1,
5016            bank.last_blockhash(),
5017        );
5018        let tx2 = system_transaction::transfer(
5019            &keypair2,
5020            &keypair4.pubkey(),
5021            amount + 1,
5022            bank.last_blockhash(),
5023        );
5024        let tx3 = system_transaction::transfer(
5025            &mint_keypair,
5026            &Pubkey::new_unique(),
5027            amount,
5028            bank.last_blockhash(),
5029        );
5030        let entry = next_entry(&new_hash, 1, vec![tx1, tx2, tx3]);
5031
5032        confirm_slot_entries(
5033            &bank,
5034            &replay_tx_thread_pool,
5035            (vec![entry], 0, false),
5036            &mut timing,
5037            &mut progress,
5038            false,
5039            Some(&transaction_status_sender),
5040            None,
5041            None,
5042            &VerifyRecyclers::default(),
5043            None,
5044            &PrioritizationFeeCache::new(0u64),
5045        )
5046        .unwrap();
5047        assert_eq!(progress.num_txs, 5);
5048        let batch = transaction_status_receiver.recv().unwrap();
5049        if let TransactionStatusMessage::Batch((batch, _sequnce)) = batch {
5050            assert_eq!(batch.transactions.len(), 3);
5051            assert_eq!(batch.transaction_indexes.len(), 3);
5052            assert_eq!(batch.transaction_indexes, [2, 3, 4]);
5053        } else {
5054            panic!("batch should have been sent");
5055        }
5056    }
5057
5058    fn do_test_schedule_batches_for_execution(should_succeed: bool) {
5059        agave_logger::setup();
5060        let dummy_leader_pubkey = solana_pubkey::new_rand();
5061        let GenesisConfigInfo {
5062            genesis_config,
5063            mint_keypair,
5064            ..
5065        } = create_genesis_config_with_leader(500, &dummy_leader_pubkey, 100);
5066        let bank = Arc::new(Bank::new_for_tests(&genesis_config));
5067        let context = SchedulingContext::for_verification(bank.clone());
5068
5069        let txs = create_test_transactions(&mint_keypair, &genesis_config.hash());
5070
5071        let mut mocked_scheduler = MockInstalledScheduler::new();
5072        let seq = Arc::new(Mutex::new(mockall::Sequence::new()));
5073        let seq_cloned = seq.clone();
5074        mocked_scheduler
5075            .expect_context()
5076            .times(1)
5077            .in_sequence(&mut seq.lock().unwrap())
5078            .return_const(context);
5079        if should_succeed {
5080            mocked_scheduler
5081                .expect_schedule_execution()
5082                .times(txs.len())
5083                .returning(|_, _| Ok(()));
5084        } else {
5085            // mocked_scheduler isn't async; so short-circuiting behavior is quite visible in that
5086            // .times(1) is called instead of .times(txs.len()), not like the succeeding case
5087            mocked_scheduler
5088                .expect_schedule_execution()
5089                .times(1)
5090                .returning(|_, _| Err(SchedulerAborted));
5091            mocked_scheduler
5092                .expect_recover_error_after_abort()
5093                .times(1)
5094                .returning(|| TransactionError::InsufficientFundsForFee);
5095        }
5096        mocked_scheduler
5097            .expect_wait_for_termination()
5098            .with(mockall::predicate::eq(true))
5099            .times(1)
5100            .in_sequence(&mut seq.lock().unwrap())
5101            .returning(move |_| {
5102                let mut mocked_uninstalled_scheduler = MockUninstalledScheduler::new();
5103                mocked_uninstalled_scheduler
5104                    .expect_return_to_pool()
5105                    .times(1)
5106                    .in_sequence(&mut seq_cloned.lock().unwrap())
5107                    .returning(|| ());
5108                (
5109                    (Ok(()), ExecuteTimings::default()),
5110                    Box::new(mocked_uninstalled_scheduler),
5111                )
5112            });
5113        let bank = BankWithScheduler::new(bank, Some(Box::new(mocked_scheduler)));
5114
5115        let locked_entry = LockedTransactionsWithIndexes {
5116            lock_results: bank.try_lock_accounts(&txs),
5117            transactions: txs,
5118            starting_index: 0,
5119        };
5120
5121        let replay_tx_thread_pool = create_thread_pool(1);
5122        let mut batch_execution_timing = BatchExecutionTiming::default();
5123        let ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64);
5124        let result = process_batches(
5125            &bank,
5126            &replay_tx_thread_pool,
5127            [locked_entry].into_iter(),
5128            None,
5129            None,
5130            &mut batch_execution_timing,
5131            None,
5132            &ignored_prioritization_fee_cache,
5133        );
5134        if should_succeed {
5135            assert_matches!(result, Ok(()));
5136        } else {
5137            assert_matches!(result, Err(TransactionError::InsufficientFundsForFee));
5138        }
5139    }
5140
5141    #[test]
5142    fn test_schedule_batches_for_execution_success() {
5143        do_test_schedule_batches_for_execution(true);
5144    }
5145
5146    #[test]
5147    fn test_schedule_batches_for_execution_failure() {
5148        do_test_schedule_batches_for_execution(false);
5149    }
5150
5151    enum TxResult {
5152        ExecutedWithSuccess,
5153        ExecutedWithFailure,
5154        NotExecuted,
5155    }
5156
5157    #[test_matrix(
5158        [TxResult::ExecutedWithSuccess, TxResult::ExecutedWithFailure, TxResult::NotExecuted],
5159        [Ok(None), Ok(Some(4)), Err(TransactionError::CommitCancelled)]
5160    )]
5161    fn test_execute_batch_pre_commit_callback(
5162        tx_result: TxResult,
5163        poh_result: Result<Option<usize>>,
5164    ) {
5165        agave_logger::setup();
5166        let dummy_leader_pubkey = solana_pubkey::new_rand();
5167        let GenesisConfigInfo {
5168            genesis_config,
5169            mint_keypair,
5170            ..
5171        } = create_genesis_config_with_leader(500, &dummy_leader_pubkey, 100);
5172        let bank = Bank::new_for_tests(&genesis_config);
5173        let (bank, _bank_forks) = bank.wrap_with_bank_forks_for_tests();
5174        let bank = Arc::new(bank);
5175        let pubkey = solana_pubkey::new_rand();
5176        let (tx, expected_tx_result) = match tx_result {
5177            TxResult::ExecutedWithSuccess => (
5178                RuntimeTransaction::from_transaction_for_tests(system_transaction::transfer(
5179                    &mint_keypair,
5180                    &pubkey,
5181                    1,
5182                    genesis_config.hash(),
5183                )),
5184                Ok(()),
5185            ),
5186            TxResult::ExecutedWithFailure => (
5187                RuntimeTransaction::from_transaction_for_tests(system_transaction::transfer(
5188                    &mint_keypair,
5189                    &pubkey,
5190                    100000000,
5191                    genesis_config.hash(),
5192                )),
5193                Ok(()),
5194            ),
5195            TxResult::NotExecuted => (
5196                RuntimeTransaction::from_transaction_for_tests(system_transaction::transfer(
5197                    &mint_keypair,
5198                    &pubkey,
5199                    1,
5200                    Hash::default(),
5201                )),
5202                Err(TransactionError::BlockhashNotFound),
5203            ),
5204        };
5205        let mut batch = TransactionBatch::new(
5206            vec![Ok(()); 1],
5207            &bank,
5208            OwnedOrBorrowed::Borrowed(slice::from_ref(&tx)),
5209        );
5210        batch.set_needs_unlock(false);
5211        let poh_with_index = matches!(&poh_result, Ok(Some(_)));
5212        let batch = TransactionBatchWithIndexes {
5213            batch,
5214            transaction_indexes: vec![],
5215        };
5216        let prioritization_fee_cache = PrioritizationFeeCache::default();
5217        let mut timing = ExecuteTimings::default();
5218        let (sender, receiver) = crossbeam_channel::unbounded();
5219
5220        assert_eq!(bank.transaction_count(), 0);
5221        assert_eq!(bank.transaction_error_count(), 0);
5222        let should_commit = poh_result.is_ok();
5223        let mut is_called = false;
5224        let result = execute_batch(
5225            &batch,
5226            &bank,
5227            Some(&TransactionStatusSender {
5228                sender,
5229                dependency_tracker: None,
5230            }),
5231            None,
5232            &mut timing,
5233            None,
5234            &prioritization_fee_cache,
5235            Some(|processing_result: &'_ Result<_>| {
5236                is_called = true;
5237                let ok = poh_result?;
5238                if let Err(error) = processing_result {
5239                    Err(error.clone())?;
5240                };
5241                Ok(ok)
5242            }),
5243        );
5244
5245        // pre_commit_callback() should alwasy be called regardless of tx_result
5246        assert!(is_called);
5247
5248        if should_commit {
5249            assert_eq!(result, expected_tx_result);
5250            if expected_tx_result.is_ok() {
5251                assert_eq!(bank.transaction_count(), 1);
5252                if matches!(tx_result, TxResult::ExecutedWithFailure) {
5253                    assert_eq!(bank.transaction_error_count(), 1);
5254                } else {
5255                    assert_eq!(bank.transaction_error_count(), 0);
5256                }
5257            } else {
5258                assert_eq!(bank.transaction_count(), 0);
5259            }
5260        } else {
5261            assert_matches!(result, Err(TransactionError::CommitCancelled));
5262            assert_eq!(bank.transaction_count(), 0);
5263        }
5264        if poh_with_index && expected_tx_result.is_ok() {
5265            assert_matches!(
5266                receiver.try_recv(),
5267                Ok(TransactionStatusMessage::Batch((TransactionStatusBatch{transaction_indexes, ..}, _sequence)))
5268                    if transaction_indexes == vec![4_usize]
5269            );
5270        } else if should_commit && expected_tx_result.is_ok() {
5271            assert_matches!(
5272                receiver.try_recv(),
5273                Ok(TransactionStatusMessage::Batch((TransactionStatusBatch{transaction_indexes, ..}, _sequence)))
5274                    if transaction_indexes.is_empty()
5275            );
5276        } else {
5277            assert_matches!(receiver.try_recv(), Err(_));
5278        }
5279    }
5280
5281    #[test]
5282    fn test_confirm_slot_entries_with_fix() {
5283        const HASHES_PER_TICK: u64 = 10;
5284        const TICKS_PER_SLOT: u64 = 2;
5285
5286        let collector_id = Pubkey::new_unique();
5287
5288        let GenesisConfigInfo {
5289            mut genesis_config,
5290            mint_keypair,
5291            ..
5292        } = create_genesis_config(10_000);
5293        genesis_config.poh_config.hashes_per_tick = Some(HASHES_PER_TICK);
5294        genesis_config.ticks_per_slot = TICKS_PER_SLOT;
5295        let genesis_hash = genesis_config.hash();
5296
5297        let (slot_0_bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
5298        assert_eq!(slot_0_bank.slot(), 0);
5299        assert_eq!(slot_0_bank.tick_height(), 0);
5300        assert_eq!(slot_0_bank.max_tick_height(), 2);
5301        assert_eq!(slot_0_bank.last_blockhash(), genesis_hash);
5302        assert_eq!(slot_0_bank.get_hash_age(&genesis_hash), Some(0));
5303
5304        let slot_0_entries = entry::create_ticks(TICKS_PER_SLOT, HASHES_PER_TICK, genesis_hash);
5305        let slot_0_hash = slot_0_entries.last().unwrap().hash;
5306        confirm_slot_entries_for_tests(&slot_0_bank, slot_0_entries, true, genesis_hash).unwrap();
5307        assert_eq!(slot_0_bank.tick_height(), slot_0_bank.max_tick_height());
5308        assert_eq!(slot_0_bank.last_blockhash(), slot_0_hash);
5309        assert_eq!(slot_0_bank.get_hash_age(&genesis_hash), Some(1));
5310        assert_eq!(slot_0_bank.get_hash_age(&slot_0_hash), Some(0));
5311
5312        let new_bank = Bank::new_from_parent(slot_0_bank, &collector_id, 2);
5313        let slot_2_bank = bank_forks
5314            .write()
5315            .unwrap()
5316            .insert(new_bank)
5317            .clone_without_scheduler();
5318        assert_eq!(slot_2_bank.slot(), 2);
5319        assert_eq!(slot_2_bank.tick_height(), 2);
5320        assert_eq!(slot_2_bank.max_tick_height(), 6);
5321        assert_eq!(slot_2_bank.last_blockhash(), slot_0_hash);
5322
5323        let slot_1_entries = entry::create_ticks(TICKS_PER_SLOT, HASHES_PER_TICK, slot_0_hash);
5324        let slot_1_hash = slot_1_entries.last().unwrap().hash;
5325        confirm_slot_entries_for_tests(&slot_2_bank, slot_1_entries, false, slot_0_hash).unwrap();
5326        assert_eq!(slot_2_bank.tick_height(), 4);
5327        assert_eq!(slot_2_bank.last_blockhash(), slot_0_hash);
5328        assert_eq!(slot_2_bank.get_hash_age(&genesis_hash), Some(1));
5329        assert_eq!(slot_2_bank.get_hash_age(&slot_0_hash), Some(0));
5330
5331        struct TestCase {
5332            recent_blockhash: Hash,
5333            expected_result: result::Result<(), BlockstoreProcessorError>,
5334        }
5335
5336        let test_cases = [
5337            TestCase {
5338                recent_blockhash: slot_1_hash,
5339                expected_result: Err(BlockstoreProcessorError::InvalidTransaction(
5340                    TransactionError::BlockhashNotFound,
5341                )),
5342            },
5343            TestCase {
5344                recent_blockhash: slot_0_hash,
5345                expected_result: Ok(()),
5346            },
5347        ];
5348
5349        // Check that slot 2 transactions can only use hashes for completed blocks.
5350        for TestCase {
5351            recent_blockhash,
5352            expected_result,
5353        } in test_cases
5354        {
5355            let slot_2_entries = {
5356                let to_pubkey = Pubkey::new_unique();
5357                let mut prev_entry_hash = slot_1_hash;
5358                let mut remaining_entry_hashes = HASHES_PER_TICK;
5359
5360                let tx =
5361                    system_transaction::transfer(&mint_keypair, &to_pubkey, 1, recent_blockhash);
5362                remaining_entry_hashes = remaining_entry_hashes.checked_sub(1).unwrap();
5363                let mut entries = vec![next_entry_mut(&mut prev_entry_hash, 1, vec![tx])];
5364
5365                entries.push(next_entry_mut(
5366                    &mut prev_entry_hash,
5367                    remaining_entry_hashes,
5368                    vec![],
5369                ));
5370                entries.push(next_entry_mut(
5371                    &mut prev_entry_hash,
5372                    HASHES_PER_TICK,
5373                    vec![],
5374                ));
5375
5376                entries
5377            };
5378
5379            let slot_2_hash = slot_2_entries.last().unwrap().hash;
5380            let result =
5381                confirm_slot_entries_for_tests(&slot_2_bank, slot_2_entries, true, slot_1_hash);
5382            match (result, expected_result) {
5383                (Ok(()), Ok(())) => {
5384                    assert_eq!(slot_2_bank.tick_height(), slot_2_bank.max_tick_height());
5385                    assert_eq!(slot_2_bank.last_blockhash(), slot_2_hash);
5386                    assert_eq!(slot_2_bank.get_hash_age(&genesis_hash), Some(2));
5387                    assert_eq!(slot_2_bank.get_hash_age(&slot_0_hash), Some(1));
5388                    assert_eq!(slot_2_bank.get_hash_age(&slot_2_hash), Some(0));
5389                }
5390                (
5391                    Err(BlockstoreProcessorError::InvalidTransaction(err)),
5392                    Err(BlockstoreProcessorError::InvalidTransaction(expected_err)),
5393                ) => {
5394                    assert_eq!(err, expected_err);
5395                }
5396                (result, expected_result) => {
5397                    panic!("actual result {result:?} != expected result {expected_result:?}");
5398                }
5399            }
5400        }
5401    }
5402
5403    #[test]
5404    fn test_check_block_cost_limit() {
5405        let dummy_leader_pubkey = solana_pubkey::new_rand();
5406        let GenesisConfigInfo {
5407            genesis_config,
5408            mint_keypair,
5409            ..
5410        } = create_genesis_config_with_leader(500, &dummy_leader_pubkey, 100);
5411        let bank = Bank::new_for_tests(&genesis_config);
5412
5413        let tx = RuntimeTransaction::from_transaction_for_tests(system_transaction::transfer(
5414            &mint_keypair,
5415            &Pubkey::new_unique(),
5416            1,
5417            genesis_config.hash(),
5418        ));
5419        let mut tx_cost = CostModel::calculate_cost(&tx, &bank.feature_set);
5420        let actual_execution_cu = 1;
5421        let actual_loaded_accounts_data_size = 64 * 1024;
5422        let TransactionCost::Transaction(ref mut usage_cost_details) = tx_cost else {
5423            unreachable!("test tx is non-vote tx");
5424        };
5425        usage_cost_details.programs_execution_cost = actual_execution_cu;
5426        usage_cost_details.loaded_accounts_data_size_cost =
5427            CostModel::calculate_loaded_accounts_data_size_cost(
5428                actual_loaded_accounts_data_size,
5429                &bank.feature_set,
5430            );
5431        // set block-limit to be able to just have one transaction
5432        let block_limit = tx_cost.sum();
5433        bank.write_cost_tracker()
5434            .unwrap()
5435            .set_limits(u64::MAX, block_limit, u64::MAX);
5436
5437        let tx_costs = vec![None, Some(tx_cost), None];
5438        // The transaction will fit when added the first time
5439        assert!(check_block_cost_limits(&bank, &tx_costs).is_ok());
5440        // But adding a second time will exceed the block limit
5441        assert_eq!(
5442            Err(TransactionError::WouldExceedMaxBlockCostLimit),
5443            check_block_cost_limits(&bank, &tx_costs)
5444        );
5445        // Adding another None will noop (even though the block is already full)
5446        assert!(check_block_cost_limits(&bank, &tx_costs[0..1]).is_ok());
5447    }
5448
5449    #[test]
5450    fn test_calculate_alpenglow_ticks() {
5451        let first_alpenglow_slot = 10;
5452        let ticks_per_slot = 2;
5453
5454        // Slots before alpenglow don't have alpenglow ticks
5455        let slot = 9;
5456        let parent_slot = 8;
5457        assert!(
5458            calculate_alpenglow_ticks(slot, first_alpenglow_slot, parent_slot, ticks_per_slot)
5459                .is_none()
5460        );
5461
5462        // First alpenglow slot should only have 1 tick
5463        let slot = first_alpenglow_slot;
5464        let parent_slot = first_alpenglow_slot - 1;
5465        assert_eq!(
5466            calculate_alpenglow_ticks(slot, first_alpenglow_slot, parent_slot, ticks_per_slot)
5467                .unwrap(),
5468            1
5469        );
5470
5471        // First alpenglow slot with skipped non-alpenglow slots
5472        // need to have `ticks_per_slot` ticks per skipped slot and
5473        // then one additional tick for the first alpenglow slot
5474        let slot = first_alpenglow_slot;
5475        let num_skipped_slots = 3;
5476        let parent_slot = first_alpenglow_slot - num_skipped_slots - 1;
5477        assert_eq!(
5478            calculate_alpenglow_ticks(slot, first_alpenglow_slot, parent_slot, ticks_per_slot)
5479                .unwrap(),
5480            num_skipped_slots * ticks_per_slot + 1
5481        );
5482
5483        // Skipped alpenglow slots don't need any additional ticks
5484        let slot = first_alpenglow_slot + 2;
5485        let parent_slot = first_alpenglow_slot;
5486        assert_eq!(
5487            calculate_alpenglow_ticks(slot, first_alpenglow_slot, parent_slot, ticks_per_slot)
5488                .unwrap(),
5489            1
5490        );
5491
5492        // Skipped alpenglow slots along skipped non-alpenglow slots
5493        // need to have `ticks_per_slot` ticks per skipped non-alpenglow
5494        // slot only and then one additional tick for the alpenglow slot
5495        let slot = first_alpenglow_slot + 2;
5496        let num_skipped_non_alpenglow_slots = 4;
5497        let parent_slot = first_alpenglow_slot - num_skipped_non_alpenglow_slots - 1;
5498        assert_eq!(
5499            calculate_alpenglow_ticks(slot, first_alpenglow_slot, parent_slot, ticks_per_slot)
5500                .unwrap(),
5501            num_skipped_non_alpenglow_slots * ticks_per_slot + 1
5502        );
5503    }
5504}