use {
crate::{
block_error::BlockError, blockstore::Blockstore, blockstore_db::BlockstoreError,
blockstore_meta::SlotMeta, leader_schedule_cache::LeaderScheduleCache,
token_balances::collect_token_balances,
},
chrono_humanize::{Accuracy, HumanTime, Tense},
crossbeam_channel::Sender,
itertools::Itertools,
log::*,
rand::{seq::SliceRandom, thread_rng},
rayon::{prelude::*, ThreadPool},
solana_entry::entry::{
self, create_ticks, Entry, EntrySlice, EntryType, EntryVerificationStatus, VerifyRecyclers,
},
solana_measure::{measure, measure::Measure},
solana_metrics::{datapoint_error, inc_new_counter_debug},
solana_program_runtime::timings::{ExecuteTimingType, ExecuteTimings, ThreadExecuteTimings},
solana_rayon_threadlimit::{get_max_thread_count, get_thread_count},
solana_runtime::{
accounts_background_service::AbsRequestSender,
accounts_db::{AccountShrinkThreshold, AccountsDbConfig},
accounts_index::AccountSecondaryIndexes,
accounts_update_notifier_interface::AccountsUpdateNotifier,
bank::{
Bank, RentDebits, TransactionBalancesSet, TransactionExecutionDetails,
TransactionExecutionResult, TransactionResults, VerifyBankHash,
},
bank_forks::BankForks,
bank_utils,
block_cost_limits::*,
commitment::VOTE_THRESHOLD_SIZE,
cost_model::CostModel,
prioritization_fee_cache::PrioritizationFeeCache,
runtime_config::RuntimeConfig,
transaction_batch::TransactionBatch,
vote_account::VoteAccountsHashMap,
vote_sender_types::ReplayVoteSender,
},
solana_sdk::{
clock::{Slot, MAX_PROCESSING_AGE},
feature_set,
genesis_config::GenesisConfig,
hash::Hash,
instruction::InstructionError,
pubkey::Pubkey,
signature::{Keypair, Signature},
timing,
transaction::{
Result, SanitizedTransaction, TransactionError, TransactionVerificationMode,
VersionedTransaction,
},
},
solana_transaction_status::token_balances::TransactionTokenBalancesSet,
std::{
borrow::Cow,
collections::{HashMap, HashSet},
path::PathBuf,
result,
sync::{Arc, Mutex, RwLock},
time::{Duration, Instant},
},
thiserror::Error,
};
struct TransactionBatchWithIndexes<'a, 'b> {
pub batch: TransactionBatch<'a, 'b>,
pub transaction_indexes: Vec<usize>,
}
struct ReplayEntry {
entry: EntryType,
starting_index: usize,
}
lazy_static! {
static ref PAR_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new()
.num_threads(get_max_thread_count())
.thread_name(|ix| format!("solBstoreProc{:02}", ix))
.build()
.unwrap();
}
fn first_err(results: &[Result<()>]) -> Result<()> {
for r in results {
if r.is_err() {
return r.clone();
}
}
Ok(())
}
fn get_first_error(
batch: &TransactionBatch,
fee_collection_results: Vec<Result<()>>,
) -> Option<(Result<()>, Signature)> {
let mut first_err = None;
for (result, transaction) in fee_collection_results
.iter()
.zip(batch.sanitized_transactions())
{
if let Err(ref err) = result {
if first_err.is_none() {
first_err = Some((result.clone(), *transaction.signature()));
}
warn!(
"Unexpected validator error: {:?}, transaction: {:?}",
err, transaction
);
datapoint_error!(
"validator_process_entry_error",
(
"error",
format!("error: {:?}, transaction: {:?}", err, transaction),
String
)
);
}
}
first_err
}
fn execute_batch(
batch: &TransactionBatchWithIndexes,
bank: &Arc<Bank>,
transaction_status_sender: Option<&TransactionStatusSender>,
replay_vote_sender: Option<&ReplayVoteSender>,
timings: &mut ExecuteTimings,
log_messages_bytes_limit: Option<usize>,
) -> Result<()> {
let TransactionBatchWithIndexes {
batch,
transaction_indexes,
} = batch;
let record_token_balances = transaction_status_sender.is_some();
let mut mint_decimals: HashMap<Pubkey, u8> = HashMap::new();
let pre_token_balances = if record_token_balances {
collect_token_balances(bank, batch, &mut mint_decimals)
} else {
vec![]
};
let (tx_results, balances) = batch.bank().load_execute_and_commit_transactions(
batch,
MAX_PROCESSING_AGE,
transaction_status_sender.is_some(),
transaction_status_sender.is_some(),
transaction_status_sender.is_some(),
transaction_status_sender.is_some(),
timings,
log_messages_bytes_limit,
);
bank_utils::find_and_send_votes(
batch.sanitized_transactions(),
&tx_results,
replay_vote_sender,
);
let TransactionResults {
fee_collection_results,
execution_results,
rent_debits,
..
} = tx_results;
check_accounts_data_size(bank, &execution_results)?;
if let Some(transaction_status_sender) = transaction_status_sender {
let transactions = batch.sanitized_transactions().to_vec();
let post_token_balances = if record_token_balances {
collect_token_balances(bank, batch, &mut mint_decimals)
} else {
vec![]
};
let token_balances =
TransactionTokenBalancesSet::new(pre_token_balances, post_token_balances);
transaction_status_sender.send_transaction_status_batch(
bank.clone(),
transactions,
execution_results,
balances,
token_balances,
rent_debits,
transaction_indexes.to_vec(),
);
}
let first_err = get_first_error(batch, fee_collection_results);
first_err.map(|(result, _)| result).unwrap_or(Ok(()))
}
#[derive(Default)]
struct ExecuteBatchesInternalMetrics {
execution_timings_per_thread: HashMap<usize, ThreadExecuteTimings>,
total_batches_len: u64,
execute_batches_us: u64,
}
fn execute_batches_internal(
bank: &Arc<Bank>,
batches: &[TransactionBatchWithIndexes],
entry_callback: Option<&ProcessCallback>,
transaction_status_sender: Option<&TransactionStatusSender>,
replay_vote_sender: Option<&ReplayVoteSender>,
log_messages_bytes_limit: Option<usize>,
) -> Result<ExecuteBatchesInternalMetrics> {
inc_new_counter_debug!("bank-par_execute_entries-count", batches.len());
let execution_timings_per_thread: Mutex<HashMap<usize, ThreadExecuteTimings>> =
Mutex::new(HashMap::new());
let mut execute_batches_elapsed = Measure::start("execute_batches_elapsed");
let results: Vec<Result<()>> = PAR_THREAD_POOL.install(|| {
batches
.into_par_iter()
.map(|transaction_batch| {
let transaction_count =
transaction_batch.batch.sanitized_transactions().len() as u64;
let mut timings = ExecuteTimings::default();
let (result, execute_batches_time): (Result<()>, Measure) = measure!(
{
let result = execute_batch(
transaction_batch,
bank,
transaction_status_sender,
replay_vote_sender,
&mut timings,
log_messages_bytes_limit,
);
if let Some(entry_callback) = entry_callback {
entry_callback(bank);
}
result
},
"execute_batch",
);
let thread_index = PAR_THREAD_POOL.current_thread_index().unwrap();
execution_timings_per_thread
.lock()
.unwrap()
.entry(thread_index)
.and_modify(|thread_execution_time| {
let ThreadExecuteTimings {
total_thread_us,
total_transactions_executed,
execute_timings: total_thread_execute_timings,
} = thread_execution_time;
*total_thread_us += execute_batches_time.as_us();
*total_transactions_executed += transaction_count;
total_thread_execute_timings
.saturating_add_in_place(ExecuteTimingType::TotalBatchesLen, 1);
total_thread_execute_timings.accumulate(&timings);
})
.or_insert(ThreadExecuteTimings {
total_thread_us: execute_batches_time.as_us(),
total_transactions_executed: transaction_count,
execute_timings: timings,
});
result
})
.collect()
});
execute_batches_elapsed.stop();
first_err(&results)?;
Ok(ExecuteBatchesInternalMetrics {
execution_timings_per_thread: execution_timings_per_thread.into_inner().unwrap(),
total_batches_len: batches.len() as u64,
execute_batches_us: execute_batches_elapsed.as_us(),
})
}
fn rebatch_transactions<'a>(
lock_results: &'a [Result<()>],
bank: &'a Arc<Bank>,
sanitized_txs: &'a [SanitizedTransaction],
start: usize,
end: usize,
transaction_indexes: &'a [usize],
) -> TransactionBatchWithIndexes<'a, 'a> {
let txs = &sanitized_txs[start..=end];
let results = &lock_results[start..=end];
let mut tx_batch = TransactionBatch::new(results.to_vec(), bank, Cow::from(txs));
tx_batch.set_needs_unlock(false);
let transaction_indexes = transaction_indexes[start..=end].to_vec();
TransactionBatchWithIndexes {
batch: tx_batch,
transaction_indexes,
}
}
fn execute_batches(
bank: &Arc<Bank>,
batches: &[TransactionBatchWithIndexes],
entry_callback: Option<&ProcessCallback>,
transaction_status_sender: Option<&TransactionStatusSender>,
replay_vote_sender: Option<&ReplayVoteSender>,
confirmation_timing: &mut ConfirmationTiming,
cost_model: &CostModel,
log_messages_bytes_limit: Option<usize>,
) -> Result<()> {
let ((lock_results, sanitized_txs), transaction_indexes): ((Vec<_>, Vec<_>), Vec<_>) = batches
.iter()
.flat_map(|batch| {
batch
.batch
.lock_results()
.iter()
.cloned()
.zip(batch.batch.sanitized_transactions().to_vec())
.zip(batch.transaction_indexes.to_vec())
})
.unzip();
let mut minimal_tx_cost = u64::MAX;
let mut total_cost: u64 = 0;
let mut total_cost_without_bpf: u64 = 0;
#[allow(clippy::needless_collect)]
let tx_costs = sanitized_txs
.iter()
.map(|tx| {
let tx_cost = cost_model.calculate_cost(tx);
let cost = tx_cost.sum();
let cost_without_bpf = tx_cost.sum_without_bpf();
minimal_tx_cost = std::cmp::min(minimal_tx_cost, cost);
total_cost = total_cost.saturating_add(cost);
total_cost_without_bpf = total_cost_without_bpf.saturating_add(cost_without_bpf);
(cost, cost_without_bpf)
})
.collect::<Vec<_>>();
let target_batch_count = get_thread_count() as u64;
let mut tx_batches: Vec<TransactionBatchWithIndexes> = vec![];
let rebatched_txs = if total_cost > target_batch_count.saturating_mul(minimal_tx_cost) {
let target_batch_cost = total_cost / target_batch_count;
let mut batch_cost: u64 = 0;
let mut batch_cost_without_bpf: u64 = 0;
let mut slice_start = 0;
tx_costs
.into_iter()
.enumerate()
.for_each(|(index, cost_pair)| {
let next_index = index + 1;
batch_cost = batch_cost.saturating_add(cost_pair.0);
batch_cost_without_bpf = batch_cost_without_bpf.saturating_add(cost_pair.1);
if batch_cost >= target_batch_cost || next_index == sanitized_txs.len() {
let tx_batch = rebatch_transactions(
&lock_results,
bank,
&sanitized_txs,
slice_start,
index,
&transaction_indexes,
);
slice_start = next_index;
tx_batches.push(tx_batch);
batch_cost = 0;
batch_cost_without_bpf = 0;
}
});
&tx_batches[..]
} else {
batches
};
let execute_batches_internal_metrics = execute_batches_internal(
bank,
rebatched_txs,
entry_callback,
transaction_status_sender,
replay_vote_sender,
log_messages_bytes_limit,
)?;
confirmation_timing.process_execute_batches_internal_metrics(execute_batches_internal_metrics);
Ok(())
}
pub fn process_entries_for_tests(
bank: &Arc<Bank>,
entries: Vec<Entry>,
randomize: bool,
transaction_status_sender: Option<&TransactionStatusSender>,
replay_vote_sender: Option<&ReplayVoteSender>,
) -> Result<()> {
let verify_transaction = {
let bank = bank.clone();
move |versioned_tx: VersionedTransaction| -> Result<SanitizedTransaction> {
bank.verify_transaction(versioned_tx, TransactionVerificationMode::FullVerification)
}
};
let mut entry_starting_index: usize = bank.transaction_count().try_into().unwrap();
let mut confirmation_timing = ConfirmationTiming::default();
let mut replay_entries: Vec<_> =
entry::verify_transactions(entries, Arc::new(verify_transaction))?
.into_iter()
.map(|entry| {
let starting_index = entry_starting_index;
if let EntryType::Transactions(ref transactions) = entry {
entry_starting_index = entry_starting_index.saturating_add(transactions.len());
}
ReplayEntry {
entry,
starting_index,
}
})
.collect();
let _ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64);
let result = process_entries_with_callback(
bank,
&mut replay_entries,
randomize,
None,
transaction_status_sender,
replay_vote_sender,
&mut confirmation_timing,
None,
&_ignored_prioritization_fee_cache,
);
debug!("process_entries: {:?}", confirmation_timing);
result
}
#[allow(clippy::too_many_arguments)]
fn process_entries_with_callback(
bank: &Arc<Bank>,
entries: &mut [ReplayEntry],
randomize: bool,
entry_callback: Option<&ProcessCallback>,
transaction_status_sender: Option<&TransactionStatusSender>,
replay_vote_sender: Option<&ReplayVoteSender>,
confirmation_timing: &mut ConfirmationTiming,
log_messages_bytes_limit: Option<usize>,
prioritization_fee_cache: &PrioritizationFeeCache,
) -> Result<()> {
let mut batches = vec![];
let mut tick_hashes = vec![];
let mut rng = thread_rng();
let cost_model = CostModel::new();
for ReplayEntry {
entry,
starting_index,
} in entries
{
match entry {
EntryType::Tick(hash) => {
tick_hashes.push(hash);
if bank.is_block_boundary(bank.tick_height() + tick_hashes.len() as u64) {
execute_batches(
bank,
&batches,
entry_callback,
transaction_status_sender,
replay_vote_sender,
confirmation_timing,
&cost_model,
log_messages_bytes_limit,
)?;
batches.clear();
for hash in &tick_hashes {
bank.register_tick(hash);
}
tick_hashes.clear();
}
}
EntryType::Transactions(transactions) => {
let starting_index = *starting_index;
let transaction_indexes = if randomize {
let mut transactions_and_indexes: Vec<(SanitizedTransaction, usize)> =
transactions.drain(..).zip(starting_index..).collect();
transactions_and_indexes.shuffle(&mut rng);
let (txs, indexes): (Vec<_>, Vec<_>) =
transactions_and_indexes.into_iter().unzip();
*transactions = txs;
indexes
} else {
(starting_index..starting_index.saturating_add(transactions.len())).collect()
};
loop {
let batch = bank.prepare_sanitized_batch(transactions);
let first_lock_err = first_err(batch.lock_results());
if first_lock_err.is_ok() {
batches.push(TransactionBatchWithIndexes {
batch,
transaction_indexes,
});
prioritization_fee_cache.update(bank.clone(), transactions.iter());
break;
}
if batches.is_empty() {
datapoint_error!(
"validator_process_entry_error",
(
"error",
format!(
"Lock accounts error, entry conflicts with itself, txs: {:?}",
transactions
),
String
)
);
first_lock_err?;
} else {
execute_batches(
bank,
&batches,
entry_callback,
transaction_status_sender,
replay_vote_sender,
confirmation_timing,
&cost_model,
log_messages_bytes_limit,
)?;
batches.clear();
}
}
}
}
}
execute_batches(
bank,
&batches,
entry_callback,
transaction_status_sender,
replay_vote_sender,
confirmation_timing,
&cost_model,
log_messages_bytes_limit,
)?;
for hash in tick_hashes {
bank.register_tick(hash);
}
Ok(())
}
#[derive(Error, Debug)]
pub enum BlockstoreProcessorError {
#[error("failed to load entries, error: {0}")]
FailedToLoadEntries(#[from] BlockstoreError),
#[error("failed to load meta")]
FailedToLoadMeta,
#[error("invalid block error: {0}")]
InvalidBlock(#[from] BlockError),
#[error("invalid transaction error: {0}")]
InvalidTransaction(#[from] TransactionError),
#[error("no valid forks found")]
NoValidForksFound,
#[error("invalid hard fork slot {0}")]
InvalidHardFork(Slot),
#[error("root bank with mismatched capitalization at {0}")]
RootBankWithMismatchedCapitalization(Slot),
}
pub type ProcessCallback = Arc<dyn Fn(&Bank) + Sync + Send>;
#[derive(Default, Clone)]
pub struct ProcessOptions {
pub poh_verify: bool,
pub full_leader_cache: bool,
pub halt_at_slot: Option<Slot>,
pub entry_callback: Option<ProcessCallback>,
pub new_hard_forks: Option<Vec<Slot>>,
pub debug_keys: Option<Arc<HashSet<Pubkey>>>,
pub account_indexes: AccountSecondaryIndexes,
pub accounts_db_caching_enabled: bool,
pub limit_load_slot_count_from_snapshot: Option<usize>,
pub allow_dead_slots: bool,
pub accounts_db_test_hash_calculation: bool,
pub accounts_db_skip_shrink: bool,
pub accounts_db_config: Option<AccountsDbConfig>,
pub verify_index: bool,
pub shrink_ratio: AccountShrinkThreshold,
pub runtime_config: RuntimeConfig,
pub on_halt_store_hash_raw_data_for_debug: bool,
pub run_final_accounts_hash_calc: bool,
}
pub fn test_process_blockstore(
genesis_config: &GenesisConfig,
blockstore: &Blockstore,
opts: &ProcessOptions,
) -> (Arc<RwLock<BankForks>>, LeaderScheduleCache) {
let (bank_forks, leader_schedule_cache, ..) = crate::bank_forks_utils::load_bank_forks(
genesis_config,
blockstore,
Vec::new(),
None,
None,
opts,
None,
None,
);
process_blockstore_from_root(
blockstore,
&bank_forks,
&leader_schedule_cache,
opts,
None,
None,
&AbsRequestSender::default(),
)
.unwrap();
(bank_forks, leader_schedule_cache)
}
pub(crate) fn process_blockstore_for_bank_0(
genesis_config: &GenesisConfig,
blockstore: &Blockstore,
account_paths: Vec<PathBuf>,
opts: &ProcessOptions,
cache_block_meta_sender: Option<&CacheBlockMetaSender>,
accounts_update_notifier: Option<AccountsUpdateNotifier>,
) -> Arc<RwLock<BankForks>> {
let mut bank0 = Bank::new_with_paths(
genesis_config,
account_paths,
opts.debug_keys.clone(),
Some(&crate::builtins::get(opts.runtime_config.bpf_jit)),
opts.account_indexes.clone(),
opts.accounts_db_caching_enabled,
opts.shrink_ratio,
false,
opts.accounts_db_config.clone(),
accounts_update_notifier,
);
bank0.set_compute_budget(opts.runtime_config.compute_budget);
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank0)));
info!("Processing ledger for slot 0...");
process_bank_0(
&bank_forks.read().unwrap().root_bank(),
blockstore,
opts,
&VerifyRecyclers::default(),
cache_block_meta_sender,
);
bank_forks
}
#[allow(clippy::too_many_arguments)]
pub fn process_blockstore_from_root(
blockstore: &Blockstore,
bank_forks: &RwLock<BankForks>,
leader_schedule_cache: &LeaderScheduleCache,
opts: &ProcessOptions,
transaction_status_sender: Option<&TransactionStatusSender>,
cache_block_meta_sender: Option<&CacheBlockMetaSender>,
accounts_background_request_sender: &AbsRequestSender,
) -> result::Result<(), BlockstoreProcessorError> {
assert_eq!(bank_forks.read().unwrap().banks().len(), 1);
let bank = bank_forks.read().unwrap().root_bank();
assert!(bank.parent().is_none());
let start_slot = bank.slot();
info!("Processing ledger from slot {}...", start_slot);
let now = Instant::now();
if blockstore.is_primary_access() {
blockstore
.mark_slots_as_if_rooted_normally_at_startup(
vec![(bank.slot(), Some(bank.hash()))],
true,
)
.expect("Couldn't mark start_slot as root on startup");
} else {
info!(
"Starting slot {} isn't root and won't be updated due to being secondary blockstore access",
start_slot
);
}
if let Ok(Some(highest_slot)) = blockstore.highest_slot() {
info!("ledger holds data through slot {}", highest_slot);
}
let mut timing = ExecuteTimings::default();
let mut num_slots_processed = 0;
if let Some(start_slot_meta) = blockstore
.meta(start_slot)
.unwrap_or_else(|_| panic!("Failed to get meta for slot {}", start_slot))
{
num_slots_processed = load_frozen_forks(
bank_forks,
start_slot,
&start_slot_meta,
blockstore,
leader_schedule_cache,
opts,
transaction_status_sender,
cache_block_meta_sender,
&mut timing,
accounts_background_request_sender,
)?;
} else {
warn!(
"Starting slot {} is not in Blockstore, unable to process",
start_slot
);
};
let processing_time = now.elapsed();
datapoint_info!(
"process_blockstore_from_root",
("total_time_us", processing_time.as_micros(), i64),
(
"frozen_banks",
bank_forks.read().unwrap().frozen_banks().len(),
i64
),
("slot", bank_forks.read().unwrap().root(), i64),
("num_slots_processed", num_slots_processed, i64),
("forks", bank_forks.read().unwrap().banks().len(), i64),
);
info!("ledger processing timing: {:?}", timing);
{
let bank_forks = bank_forks.read().unwrap();
let mut bank_slots = bank_forks.banks().keys().copied().collect::<Vec<_>>();
bank_slots.sort_unstable();
info!(
"ledger processed in {}. root slot is {}, {} bank{}: {}",
HumanTime::from(chrono::Duration::from_std(processing_time).unwrap())
.to_text_en(Accuracy::Precise, Tense::Present),
bank_forks.root(),
bank_slots.len(),
if bank_slots.len() > 1 { "s" } else { "" },
bank_slots.iter().map(|slot| slot.to_string()).join(", "),
);
assert!(bank_forks.active_bank_slots().is_empty());
}
Ok(())
}
fn verify_ticks(
bank: &Bank,
entries: &[Entry],
slot_full: bool,
tick_hash_count: &mut u64,
) -> std::result::Result<(), BlockError> {
let next_bank_tick_height = bank.tick_height() + entries.tick_count();
let max_bank_tick_height = bank.max_tick_height();
if next_bank_tick_height > max_bank_tick_height {
warn!("Too many entry ticks found in slot: {}", bank.slot());
return Err(BlockError::TooManyTicks);
}
if next_bank_tick_height < max_bank_tick_height && slot_full {
info!("Too few entry ticks found in slot: {}", bank.slot());
return Err(BlockError::TooFewTicks);
}
if next_bank_tick_height == max_bank_tick_height {
let has_trailing_entry = entries.last().map(|e| !e.is_tick()).unwrap_or_default();
if has_trailing_entry {
warn!("Slot: {} did not end with a tick entry", bank.slot());
return Err(BlockError::TrailingEntry);
}
if !slot_full {
warn!("Slot: {} was not marked full", bank.slot());
return Err(BlockError::InvalidLastTick);
}
}
let hashes_per_tick = bank.hashes_per_tick().unwrap_or(0);
if !entries.verify_tick_hash_count(tick_hash_count, hashes_per_tick) {
warn!(
"Tick with invalid number of hashes found in slot: {}",
bank.slot()
);
return Err(BlockError::InvalidTickHashCount);
}
Ok(())
}
fn confirm_full_slot(
blockstore: &Blockstore,
bank: &Arc<Bank>,
opts: &ProcessOptions,
recyclers: &VerifyRecyclers,
progress: &mut ConfirmationProgress,
transaction_status_sender: Option<&TransactionStatusSender>,
replay_vote_sender: Option<&ReplayVoteSender>,
timing: &mut ExecuteTimings,
) -> result::Result<(), BlockstoreProcessorError> {
let mut confirmation_timing = ConfirmationTiming::default();
let skip_verification = !opts.poh_verify;
let _ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64);
confirm_slot(
blockstore,
bank,
&mut confirmation_timing,
progress,
skip_verification,
transaction_status_sender,
replay_vote_sender,
opts.entry_callback.as_ref(),
recyclers,
opts.allow_dead_slots,
opts.runtime_config.log_messages_bytes_limit,
&_ignored_prioritization_fee_cache,
)?;
timing.accumulate(&confirmation_timing.execute_timings);
if !bank.is_complete() {
Err(BlockstoreProcessorError::InvalidBlock(
BlockError::Incomplete,
))
} else {
Ok(())
}
}
#[derive(Debug)]
pub struct ConfirmationTiming {
pub started: Instant,
pub replay_elapsed: u64,
pub execute_batches_us: u64,
pub poh_verify_elapsed: u64,
pub transaction_verify_elapsed: u64,
pub fetch_elapsed: u64,
pub fetch_fail_elapsed: u64,
pub execute_timings: ExecuteTimings,
pub end_to_end_execute_timings: ThreadExecuteTimings,
}
impl ConfirmationTiming {
fn process_execute_batches_internal_metrics(
&mut self,
execute_batches_internal_metrics: ExecuteBatchesInternalMetrics,
) {
let ConfirmationTiming {
execute_timings: ref mut cumulative_execute_timings,
execute_batches_us: ref mut cumulative_execute_batches_us,
ref mut end_to_end_execute_timings,
..
} = self;
saturating_add_assign!(
*cumulative_execute_batches_us,
execute_batches_internal_metrics.execute_batches_us
);
cumulative_execute_timings.saturating_add_in_place(
ExecuteTimingType::TotalBatchesLen,
execute_batches_internal_metrics.total_batches_len,
);
cumulative_execute_timings.saturating_add_in_place(ExecuteTimingType::NumExecuteBatches, 1);
let mut current_max_thread_execution_time: Option<ThreadExecuteTimings> = None;
for (_, thread_execution_time) in execute_batches_internal_metrics
.execution_timings_per_thread
.into_iter()
{
let ThreadExecuteTimings {
total_thread_us,
execute_timings,
..
} = &thread_execution_time;
cumulative_execute_timings.accumulate(execute_timings);
if *total_thread_us
> current_max_thread_execution_time
.as_ref()
.map(|thread_execution_time| thread_execution_time.total_thread_us)
.unwrap_or(0)
{
current_max_thread_execution_time = Some(thread_execution_time);
}
}
if let Some(current_max_thread_execution_time) = current_max_thread_execution_time {
end_to_end_execute_timings.accumulate(¤t_max_thread_execution_time);
end_to_end_execute_timings
.execute_timings
.saturating_add_in_place(ExecuteTimingType::NumExecuteBatches, 1);
};
}
}
impl Default for ConfirmationTiming {
fn default() -> Self {
Self {
started: Instant::now(),
replay_elapsed: 0,
execute_batches_us: 0,
poh_verify_elapsed: 0,
transaction_verify_elapsed: 0,
fetch_elapsed: 0,
fetch_fail_elapsed: 0,
execute_timings: ExecuteTimings::default(),
end_to_end_execute_timings: ThreadExecuteTimings::default(),
}
}
}
#[derive(Default)]
pub struct ConfirmationProgress {
pub last_entry: Hash,
pub tick_hash_count: u64,
pub num_shreds: u64,
pub num_entries: usize,
pub num_txs: usize,
}
impl ConfirmationProgress {
pub fn new(last_entry: Hash) -> Self {
Self {
last_entry,
..Self::default()
}
}
}
#[allow(clippy::too_many_arguments)]
pub fn confirm_slot(
blockstore: &Blockstore,
bank: &Arc<Bank>,
timing: &mut ConfirmationTiming,
progress: &mut ConfirmationProgress,
skip_verification: bool,
transaction_status_sender: Option<&TransactionStatusSender>,
replay_vote_sender: Option<&ReplayVoteSender>,
entry_callback: Option<&ProcessCallback>,
recyclers: &VerifyRecyclers,
allow_dead_slots: bool,
log_messages_bytes_limit: Option<usize>,
prioritization_fee_cache: &PrioritizationFeeCache,
) -> result::Result<(), BlockstoreProcessorError> {
let slot = bank.slot();
let slot_entries_load_result = {
let mut load_elapsed = Measure::start("load_elapsed");
let load_result = blockstore
.get_slot_entries_with_shred_info(slot, progress.num_shreds, allow_dead_slots)
.map_err(BlockstoreProcessorError::FailedToLoadEntries);
load_elapsed.stop();
if load_result.is_err() {
timing.fetch_fail_elapsed += load_elapsed.as_us();
} else {
timing.fetch_elapsed += load_elapsed.as_us();
}
load_result
}?;
confirm_slot_entries(
bank,
slot_entries_load_result,
timing,
progress,
skip_verification,
transaction_status_sender,
replay_vote_sender,
entry_callback,
recyclers,
log_messages_bytes_limit,
prioritization_fee_cache,
)
}
#[allow(clippy::too_many_arguments)]
fn confirm_slot_entries(
bank: &Arc<Bank>,
slot_entries_load_result: (Vec<Entry>, u64, bool),
timing: &mut ConfirmationTiming,
progress: &mut ConfirmationProgress,
skip_verification: bool,
transaction_status_sender: Option<&TransactionStatusSender>,
replay_vote_sender: Option<&ReplayVoteSender>,
entry_callback: Option<&ProcessCallback>,
recyclers: &VerifyRecyclers,
log_messages_bytes_limit: Option<usize>,
prioritization_fee_cache: &PrioritizationFeeCache,
) -> result::Result<(), BlockstoreProcessorError> {
let slot = bank.slot();
let (entries, num_shreds, slot_full) = slot_entries_load_result;
let num_entries = entries.len();
let mut entry_starting_indexes = Vec::with_capacity(num_entries);
let mut entry_starting_index = progress.num_txs;
let num_txs = entries
.iter()
.map(|e| {
let num_txs = e.transactions.len();
let next_starting_index = entry_starting_index.saturating_add(num_txs);
entry_starting_indexes.push(entry_starting_index);
entry_starting_index = next_starting_index;
num_txs
})
.sum::<usize>();
trace!(
"Fetched entries for slot {}, num_entries: {}, num_shreds: {}, num_txs: {}, slot_full: {}",
slot,
num_entries,
num_shreds,
num_txs,
slot_full,
);
if !skip_verification {
let tick_hash_count = &mut progress.tick_hash_count;
verify_ticks(bank, &entries, slot_full, tick_hash_count).map_err(|err| {
warn!(
"{:#?}, slot: {}, entry len: {}, tick_height: {}, last entry: {}, last_blockhash: {}, shred_index: {}, slot_full: {}",
err,
slot,
num_entries,
bank.tick_height(),
progress.last_entry,
bank.last_blockhash(),
num_shreds,
slot_full,
);
err
})?;
}
let last_entry_hash = entries.last().map(|e| e.hash);
let verifier = if !skip_verification {
datapoint_debug!("verify-batch-size", ("size", num_entries as i64, i64));
let entry_state = entries.start_verify(&progress.last_entry, recyclers.clone());
if entry_state.status() == EntryVerificationStatus::Failure {
warn!("Ledger proof of history failed at slot: {}", slot);
return Err(BlockError::InvalidEntryHash.into());
}
Some(entry_state)
} else {
None
};
let verify_transaction = {
let bank = bank.clone();
move |versioned_tx: VersionedTransaction,
verification_mode: TransactionVerificationMode|
-> Result<SanitizedTransaction> {
bank.verify_transaction(versioned_tx, verification_mode)
}
};
let check_start = Instant::now();
let check_result = entry::start_verify_transactions(
entries,
skip_verification,
recyclers.clone(),
Arc::new(verify_transaction),
);
let transaction_cpu_duration_us = timing::duration_as_us(&check_start.elapsed());
match check_result {
Ok(mut check_result) => {
let entries = check_result.entries();
assert!(entries.is_some());
let mut replay_elapsed = Measure::start("replay_elapsed");
let mut replay_entries: Vec<_> = entries
.unwrap()
.into_iter()
.zip(entry_starting_indexes)
.map(|(entry, starting_index)| ReplayEntry {
entry,
starting_index,
})
.collect();
let process_result = process_entries_with_callback(
bank,
&mut replay_entries,
true, entry_callback,
transaction_status_sender,
replay_vote_sender,
timing,
log_messages_bytes_limit,
prioritization_fee_cache,
)
.map_err(BlockstoreProcessorError::from);
replay_elapsed.stop();
timing.replay_elapsed += replay_elapsed.as_us();
if !check_result.finish_verify() {
warn!("Ledger proof of history failed at slot: {}", bank.slot());
return Err(TransactionError::SignatureFailure.into());
}
if let Some(mut verifier) = verifier {
let verified = verifier.finish_verify();
timing.poh_verify_elapsed += verifier.poh_duration_us();
timing.transaction_verify_elapsed +=
transaction_cpu_duration_us + check_result.gpu_verify_duration();
if !verified {
warn!("Ledger proof of history failed at slot: {}", bank.slot());
return Err(BlockError::InvalidEntryHash.into());
}
}
process_result?;
progress.num_shreds += num_shreds;
progress.num_entries += num_entries;
progress.num_txs += num_txs;
if let Some(last_entry_hash) = last_entry_hash {
progress.last_entry = last_entry_hash;
}
Ok(())
}
Err(err) => {
warn!("Ledger proof of history failed at slot: {}", bank.slot());
Err(err.into())
}
}
}
fn process_bank_0(
bank0: &Arc<Bank>,
blockstore: &Blockstore,
opts: &ProcessOptions,
recyclers: &VerifyRecyclers,
cache_block_meta_sender: Option<&CacheBlockMetaSender>,
) {
assert_eq!(bank0.slot(), 0);
let mut progress = ConfirmationProgress::new(bank0.last_blockhash());
confirm_full_slot(
blockstore,
bank0,
opts,
recyclers,
&mut progress,
None,
None,
&mut ExecuteTimings::default(),
)
.expect("Failed to process bank 0 from ledger. Did you forget to provide a snapshot?");
bank0.freeze();
if blockstore.is_primary_access() {
blockstore.insert_bank_hash(bank0.slot(), bank0.hash(), false);
}
cache_block_meta(bank0, cache_block_meta_sender);
}
fn process_next_slots(
bank: &Arc<Bank>,
meta: &SlotMeta,
blockstore: &Blockstore,
leader_schedule_cache: &LeaderScheduleCache,
pending_slots: &mut Vec<(SlotMeta, Bank, Hash)>,
halt_at_slot: Option<Slot>,
) -> result::Result<(), BlockstoreProcessorError> {
if meta.next_slots.is_empty() {
return Ok(());
}
for next_slot in &meta.next_slots {
let skip_next_slot = halt_at_slot
.map(|halt_at_slot| *next_slot > halt_at_slot)
.unwrap_or(false);
if skip_next_slot {
continue;
}
let next_meta = blockstore
.meta(*next_slot)
.map_err(|err| {
warn!("Failed to load meta for slot {}: {:?}", next_slot, err);
BlockstoreProcessorError::FailedToLoadMeta
})?
.unwrap();
if next_meta.is_full() {
let next_bank = Bank::new_from_parent(
bank,
&leader_schedule_cache
.slot_leader_at(*next_slot, Some(bank))
.unwrap(),
*next_slot,
);
trace!(
"New bank for slot {}, parent slot is {}",
next_slot,
bank.slot(),
);
pending_slots.push((next_meta, next_bank, bank.last_blockhash()));
}
}
pending_slots.sort_by(|a, b| b.1.slot().cmp(&a.1.slot()));
Ok(())
}
#[allow(clippy::too_many_arguments)]
fn load_frozen_forks(
bank_forks: &RwLock<BankForks>,
start_slot: Slot,
start_slot_meta: &SlotMeta,
blockstore: &Blockstore,
leader_schedule_cache: &LeaderScheduleCache,
opts: &ProcessOptions,
transaction_status_sender: Option<&TransactionStatusSender>,
cache_block_meta_sender: Option<&CacheBlockMetaSender>,
timing: &mut ExecuteTimings,
accounts_background_request_sender: &AbsRequestSender,
) -> result::Result<u64, BlockstoreProcessorError> {
let recyclers = VerifyRecyclers::default();
let mut all_banks = HashMap::new();
let mut last_status_report = Instant::now();
let mut pending_slots = vec![];
let mut total_slots_elapsed = 0;
let mut slots_elapsed = 0;
let mut txs = 0;
let blockstore_max_root = blockstore.max_root();
let mut root = bank_forks.read().unwrap().root();
let max_root = std::cmp::max(root, blockstore_max_root);
info!(
"load_frozen_forks() latest root from blockstore: {}, max_root: {}",
blockstore_max_root, max_root,
);
process_next_slots(
&bank_forks.read().unwrap().get(start_slot).unwrap(),
start_slot_meta,
blockstore,
leader_schedule_cache,
&mut pending_slots,
opts.halt_at_slot,
)?;
let on_halt_store_hash_raw_data_for_debug = opts.on_halt_store_hash_raw_data_for_debug;
if Some(bank_forks.read().unwrap().root()) != opts.halt_at_slot {
while !pending_slots.is_empty() {
timing.details.per_program_timings.clear();
let (meta, bank, last_entry_hash) = pending_slots.pop().unwrap();
let slot = bank.slot();
if last_status_report.elapsed() > Duration::from_secs(2) {
let secs = last_status_report.elapsed().as_secs() as f32;
last_status_report = Instant::now();
info!(
"processing ledger: slot={}, last root slot={} slots={} slots/s={:?} txs/s={}",
slot,
root,
slots_elapsed,
slots_elapsed as f32 / secs,
txs as f32 / secs,
);
slots_elapsed = 0;
txs = 0;
}
let mut progress = ConfirmationProgress::new(last_entry_hash);
let bank = bank_forks.write().unwrap().insert(bank);
if process_single_slot(
blockstore,
&bank,
opts,
&recyclers,
&mut progress,
transaction_status_sender,
cache_block_meta_sender,
None,
timing,
)
.is_err()
{
assert!(bank_forks.write().unwrap().remove(bank.slot()).is_some());
continue;
}
txs += progress.num_txs;
assert!(bank.is_frozen());
all_banks.insert(bank.slot(), bank.clone());
let new_root_bank = {
if bank_forks.read().unwrap().root() >= max_root {
supermajority_root_from_vote_accounts(
bank.slot(),
bank.total_epoch_stake(),
&bank.vote_accounts(),
).and_then(|supermajority_root| {
if supermajority_root > root {
let cluster_root_bank = all_banks.get(&supermajority_root).unwrap();
assert!(cluster_root_bank.ancestors.contains_key(&root));
info!(
"blockstore processor found new cluster confirmed root: {}, observed in bank: {}",
cluster_root_bank.slot(), bank.slot()
);
let mut rooted_slots = vec![];
let mut new_root_bank = cluster_root_bank.clone();
loop {
if new_root_bank.slot() == root { break; } assert!(new_root_bank.slot() > root);
rooted_slots.push((new_root_bank.slot(), Some(new_root_bank.hash())));
new_root_bank = new_root_bank.parent().unwrap();
}
inc_new_counter_info!("load_frozen_forks-cluster-confirmed-root", rooted_slots.len());
if blockstore.is_primary_access() {
blockstore
.mark_slots_as_if_rooted_normally_at_startup(rooted_slots, true)
.expect("Blockstore::mark_slots_as_if_rooted_normally_at_startup() should succeed");
}
Some(cluster_root_bank)
} else {
None
}
})
} else if blockstore.is_root(slot) {
Some(&bank)
} else {
None
}
};
if let Some(new_root_bank) = new_root_bank {
root = new_root_bank.slot();
leader_schedule_cache.set_root(new_root_bank);
let _ = bank_forks.write().unwrap().set_root(
root,
accounts_background_request_sender,
None,
);
pending_slots
.retain(|(_, pending_bank, _)| pending_bank.ancestors.contains_key(&root));
all_banks.retain(|_, bank| bank.ancestors.contains_key(&root));
}
slots_elapsed += 1;
total_slots_elapsed += 1;
trace!(
"Bank for {}slot {} is complete",
if root == slot { "root " } else { "" },
slot,
);
let done_processing = opts
.halt_at_slot
.map(|halt_at_slot| slot >= halt_at_slot)
.unwrap_or(false);
if done_processing {
if opts.run_final_accounts_hash_calc {
run_final_hash_calc(&bank, on_halt_store_hash_raw_data_for_debug);
}
break;
}
process_next_slots(
&bank,
&meta,
blockstore,
leader_schedule_cache,
&mut pending_slots,
opts.halt_at_slot,
)?;
}
} else if on_halt_store_hash_raw_data_for_debug {
run_final_hash_calc(
&bank_forks.read().unwrap().root_bank(),
on_halt_store_hash_raw_data_for_debug,
);
}
Ok(total_slots_elapsed)
}
fn run_final_hash_calc(bank: &Bank, on_halt_store_hash_raw_data_for_debug: bool) {
bank.force_flush_accounts_cache();
let can_cached_slot_be_unflushed = true;
let _ = bank.verify_bank_hash(VerifyBankHash {
test_hash_calculation: false,
can_cached_slot_be_unflushed,
ignore_mismatch: true,
require_rooted_bank: false,
run_in_background: false,
store_hash_raw_data_for_debug: on_halt_store_hash_raw_data_for_debug,
});
}
fn supermajority_root(roots: &[(Slot, u64)], total_epoch_stake: u64) -> Option<Slot> {
if roots.is_empty() {
return None;
}
let mut total = 0;
let mut prev_root = roots[0].0;
for (root, stake) in roots.iter() {
assert!(*root <= prev_root);
total += stake;
if total as f64 / total_epoch_stake as f64 > VOTE_THRESHOLD_SIZE {
return Some(*root);
}
prev_root = *root;
}
None
}
fn supermajority_root_from_vote_accounts(
bank_slot: Slot,
total_epoch_stake: u64,
vote_accounts: &VoteAccountsHashMap,
) -> Option<Slot> {
let mut roots_stakes: Vec<(Slot, u64)> = vote_accounts
.iter()
.filter_map(|(key, (stake, account))| {
if *stake == 0 {
return None;
}
match account.vote_state().as_ref() {
Err(_) => {
warn!(
"Unable to get vote_state from account {} in bank: {}",
key, bank_slot
);
None
}
Ok(vote_state) => Some((vote_state.root_slot?, *stake)),
}
})
.collect();
roots_stakes.sort_unstable_by(|a, b| a.0.cmp(&b.0).reverse());
supermajority_root(&roots_stakes, total_epoch_stake)
}
fn process_single_slot(
blockstore: &Blockstore,
bank: &Arc<Bank>,
opts: &ProcessOptions,
recyclers: &VerifyRecyclers,
progress: &mut ConfirmationProgress,
transaction_status_sender: Option<&TransactionStatusSender>,
cache_block_meta_sender: Option<&CacheBlockMetaSender>,
replay_vote_sender: Option<&ReplayVoteSender>,
timing: &mut ExecuteTimings,
) -> result::Result<(), BlockstoreProcessorError> {
confirm_full_slot(
blockstore,
bank,
opts,
recyclers,
progress,
transaction_status_sender,
replay_vote_sender,
timing,
)
.map_err(|err| {
let slot = bank.slot();
warn!("slot {} failed to verify: {}", slot, err);
if blockstore.is_primary_access() {
blockstore
.set_dead_slot(slot)
.expect("Failed to mark slot as dead in blockstore");
} else {
info!(
"Failed slot {} won't be marked dead due to being secondary blockstore access",
slot
);
}
err
})?;
bank.freeze(); if blockstore.is_primary_access() {
blockstore.insert_bank_hash(bank.slot(), bank.hash(), false);
}
cache_block_meta(bank, cache_block_meta_sender);
Ok(())
}
#[allow(clippy::large_enum_variant)]
pub enum TransactionStatusMessage {
Batch(TransactionStatusBatch),
Freeze(Slot),
}
pub struct TransactionStatusBatch {
pub bank: Arc<Bank>,
pub transactions: Vec<SanitizedTransaction>,
pub execution_results: Vec<Option<TransactionExecutionDetails>>,
pub balances: TransactionBalancesSet,
pub token_balances: TransactionTokenBalancesSet,
pub rent_debits: Vec<RentDebits>,
pub transaction_indexes: Vec<usize>,
}
#[derive(Clone)]
pub struct TransactionStatusSender {
pub sender: Sender<TransactionStatusMessage>,
}
impl TransactionStatusSender {
pub fn send_transaction_status_batch(
&self,
bank: Arc<Bank>,
transactions: Vec<SanitizedTransaction>,
execution_results: Vec<TransactionExecutionResult>,
balances: TransactionBalancesSet,
token_balances: TransactionTokenBalancesSet,
rent_debits: Vec<RentDebits>,
transaction_indexes: Vec<usize>,
) {
let slot = bank.slot();
if let Err(e) = self
.sender
.send(TransactionStatusMessage::Batch(TransactionStatusBatch {
bank,
transactions,
execution_results: execution_results
.into_iter()
.map(|result| match result {
TransactionExecutionResult::Executed { details, .. } => Some(details),
TransactionExecutionResult::NotExecuted(_) => None,
})
.collect(),
balances,
token_balances,
rent_debits,
transaction_indexes,
}))
{
trace!(
"Slot {} transaction_status send batch failed: {:?}",
slot,
e
);
}
}
pub fn send_transaction_status_freeze_message(&self, bank: &Arc<Bank>) {
let slot = bank.slot();
if let Err(e) = self.sender.send(TransactionStatusMessage::Freeze(slot)) {
trace!(
"Slot {} transaction_status send freeze message failed: {:?}",
slot,
e
);
}
}
}
pub type CacheBlockMetaSender = Sender<Arc<Bank>>;
pub fn cache_block_meta(bank: &Arc<Bank>, cache_block_meta_sender: Option<&CacheBlockMetaSender>) {
if let Some(cache_block_meta_sender) = cache_block_meta_sender {
cache_block_meta_sender
.send(bank.clone())
.unwrap_or_else(|err| warn!("cache_block_meta_sender failed: {:?}", err));
}
}
pub fn fill_blockstore_slot_with_ticks(
blockstore: &Blockstore,
ticks_per_slot: u64,
slot: u64,
parent_slot: u64,
last_entry_hash: Hash,
) -> Hash {
assert!(slot.saturating_sub(1) >= parent_slot);
let num_slots = (slot - parent_slot).max(1);
let entries = create_ticks(num_slots * ticks_per_slot, 0, last_entry_hash);
let last_entry_hash = entries.last().unwrap().hash;
blockstore
.write_entries(
slot,
0,
0,
ticks_per_slot,
Some(parent_slot),
true,
&Arc::new(Keypair::new()),
entries,
0,
)
.unwrap();
last_entry_hash
}
fn check_accounts_data_size<'a>(
bank: &Bank,
execution_results: impl IntoIterator<Item = &'a TransactionExecutionResult>,
) -> Result<()> {
check_accounts_data_block_size(bank)?;
check_accounts_data_total_size(bank, execution_results)
}
fn check_accounts_data_block_size(bank: &Bank) -> Result<()> {
if !bank
.feature_set
.is_active(&feature_set::cap_accounts_data_size_per_block::id())
{
return Ok(());
}
debug_assert!(MAX_ACCOUNT_DATA_BLOCK_LEN <= i64::MAX as u64);
if bank.load_accounts_data_size_delta_on_chain() > MAX_ACCOUNT_DATA_BLOCK_LEN as i64 {
Err(TransactionError::WouldExceedAccountDataBlockLimit)
} else {
Ok(())
}
}
fn check_accounts_data_total_size<'a>(
bank: &Bank,
execution_results: impl IntoIterator<Item = &'a TransactionExecutionResult>,
) -> Result<()> {
if !bank
.feature_set
.is_active(&feature_set::cap_accounts_data_len::id())
{
return Ok(());
}
if let Some(result) = execution_results
.into_iter()
.map(|execution_result| execution_result.flattened_result())
.find(|result| {
matches!(
result,
Err(TransactionError::InstructionError(
_,
InstructionError::MaxAccountsDataSizeExceeded
)),
)
})
{
return result;
}
Ok(())
}
#[cfg(test)]
pub mod tests {
use {
super::*,
crate::{
blockstore_options::{AccessType, BlockstoreOptions},
genesis_utils::{
create_genesis_config, create_genesis_config_with_leader, GenesisConfigInfo,
},
},
matches::assert_matches,
rand::{thread_rng, Rng},
solana_entry::entry::{create_ticks, next_entry, next_entry_mut},
solana_program_runtime::{
accounts_data_meter::MAX_ACCOUNTS_DATA_LEN, invoke_context::InvokeContext,
},
solana_runtime::{
genesis_utils::{
self, create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs,
},
vote_account::VoteAccount,
},
solana_sdk::{
account::{AccountSharedData, WritableAccount},
epoch_schedule::EpochSchedule,
hash::Hash,
instruction::InstructionError,
native_token::LAMPORTS_PER_SOL,
pubkey::Pubkey,
signature::{Keypair, Signer},
system_instruction::{SystemError, MAX_PERMITTED_DATA_LENGTH},
system_transaction,
transaction::{Transaction, TransactionError},
},
solana_vote_program::{
self,
vote_state::{VoteState, VoteStateVersions, MAX_LOCKOUT_HISTORY},
vote_transaction,
},
std::{collections::BTreeSet, sync::RwLock},
trees::tr,
};
fn test_process_blockstore_with_custom_options(
genesis_config: &GenesisConfig,
blockstore: &Blockstore,
opts: &ProcessOptions,
access_type: AccessType,
) -> (Arc<RwLock<BankForks>>, LeaderScheduleCache) {
match access_type {
AccessType::Primary | AccessType::PrimaryForMaintenance => {
test_process_blockstore(genesis_config, blockstore, opts)
}
AccessType::Secondary => {
let secondary_blockstore = Blockstore::open_with_options(
blockstore.ledger_path(),
BlockstoreOptions {
access_type,
..BlockstoreOptions::default()
},
)
.expect("Unable to open access to blockstore");
test_process_blockstore(genesis_config, &secondary_blockstore, opts)
}
}
}
#[test]
fn test_process_blockstore_with_missing_hashes() {
do_test_process_blockstore_with_missing_hashes(AccessType::Primary);
}
#[test]
fn test_process_blockstore_with_missing_hashes_secondary_access() {
do_test_process_blockstore_with_missing_hashes(AccessType::Secondary);
}
fn do_test_process_blockstore_with_missing_hashes(blockstore_access_type: AccessType) {
solana_logger::setup();
let hashes_per_tick = 2;
let GenesisConfigInfo {
mut genesis_config, ..
} = create_genesis_config(10_000);
genesis_config.poh_config.hashes_per_tick = Some(hashes_per_tick);
let ticks_per_slot = genesis_config.ticks_per_slot;
let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
let parent_slot = 0;
let slot = 1;
let entries = create_ticks(ticks_per_slot, hashes_per_tick - 1, blockhash);
assert_matches!(
blockstore.write_entries(
slot,
0,
0,
ticks_per_slot,
Some(parent_slot),
true,
&Arc::new(Keypair::new()),
entries,
0,
),
Ok(_)
);
let (bank_forks, ..) = test_process_blockstore_with_custom_options(
&genesis_config,
&blockstore,
&ProcessOptions {
poh_verify: true,
..ProcessOptions::default()
},
blockstore_access_type.clone(),
);
assert_eq!(frozen_bank_slots(&bank_forks.read().unwrap()), vec![0]);
let dead_slots: Vec<Slot> = blockstore.dead_slots_iterator(0).unwrap().collect();
match blockstore_access_type {
AccessType::Secondary => {
assert_eq!(dead_slots.len(), 0);
}
AccessType::Primary | AccessType::PrimaryForMaintenance => {
assert_eq!(&dead_slots, &[1]);
}
}
}
#[test]
fn test_process_blockstore_with_invalid_slot_tick_count() {
solana_logger::setup();
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let ticks_per_slot = genesis_config.ticks_per_slot;
let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
let parent_slot = 0;
let slot = 1;
let entries = create_ticks(ticks_per_slot - 1, 0, blockhash);
assert_matches!(
blockstore.write_entries(
slot,
0,
0,
ticks_per_slot,
Some(parent_slot),
true,
&Arc::new(Keypair::new()),
entries,
0,
),
Ok(_)
);
let (bank_forks, ..) = test_process_blockstore(
&genesis_config,
&blockstore,
&ProcessOptions {
poh_verify: true,
..ProcessOptions::default()
},
);
assert_eq!(frozen_bank_slots(&bank_forks.read().unwrap()), vec![0]);
let _last_slot2_entry_hash =
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 0, blockhash);
let (bank_forks, ..) = test_process_blockstore(
&genesis_config,
&blockstore,
&ProcessOptions {
poh_verify: true,
..ProcessOptions::default()
},
);
assert_eq!(frozen_bank_slots(&bank_forks.read().unwrap()), vec![0, 2]);
assert_eq!(bank_forks.read().unwrap().working_bank().slot(), 2);
assert_eq!(bank_forks.read().unwrap().root(), 0);
}
#[test]
fn test_process_blockstore_with_slot_with_trailing_entry() {
solana_logger::setup();
let GenesisConfigInfo {
mint_keypair,
genesis_config,
..
} = create_genesis_config(10_000);
let ticks_per_slot = genesis_config.ticks_per_slot;
let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
let mut entries = create_ticks(ticks_per_slot, 0, blockhash);
let trailing_entry = {
let keypair = Keypair::new();
let tx = system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 1, blockhash);
next_entry(&blockhash, 1, vec![tx])
};
entries.push(trailing_entry);
let parent_slot = 0;
let slot = 1;
assert_matches!(
blockstore.write_entries(
slot,
0,
0,
ticks_per_slot + 1,
Some(parent_slot),
true,
&Arc::new(Keypair::new()),
entries,
0,
),
Ok(_)
);
let opts = ProcessOptions {
poh_verify: true,
accounts_db_test_hash_calculation: true,
..ProcessOptions::default()
};
let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, &opts);
assert_eq!(frozen_bank_slots(&bank_forks.read().unwrap()), vec![0]);
}
#[test]
fn test_process_blockstore_with_incomplete_slot() {
solana_logger::setup();
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let ticks_per_slot = genesis_config.ticks_per_slot;
let (ledger_path, mut blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
debug!("ledger_path: {:?}", ledger_path);
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
{
let parent_slot = 0;
let slot = 1;
let mut entries = create_ticks(ticks_per_slot, 0, blockhash);
blockhash = entries.last().unwrap().hash;
entries.pop();
assert_matches!(
blockstore.write_entries(
slot,
0,
0,
ticks_per_slot,
Some(parent_slot),
false,
&Arc::new(Keypair::new()),
entries,
0,
),
Ok(_)
);
}
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 1, blockhash);
let opts = ProcessOptions {
poh_verify: true,
accounts_db_test_hash_calculation: true,
..ProcessOptions::default()
};
let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, &opts);
assert_eq!(frozen_bank_slots(&bank_forks.read().unwrap()), vec![0]);
let opts = ProcessOptions {
poh_verify: true,
accounts_db_test_hash_calculation: true,
..ProcessOptions::default()
};
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 0, blockhash);
let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, &opts);
assert_eq!(frozen_bank_slots(&bank_forks.read().unwrap()), vec![0, 3]);
}
#[test]
fn test_process_blockstore_with_two_forks_and_squash() {
solana_logger::setup();
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let ticks_per_slot = genesis_config.ticks_per_slot;
let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
debug!("ledger_path: {:?}", ledger_path);
let mut last_entry_hash = blockhash;
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
let last_slot1_entry_hash =
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, last_entry_hash);
last_entry_hash = fill_blockstore_slot_with_ticks(
&blockstore,
ticks_per_slot,
2,
1,
last_slot1_entry_hash,
);
let last_fork1_entry_hash =
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 2, last_entry_hash);
let last_fork2_entry_hash = fill_blockstore_slot_with_ticks(
&blockstore,
ticks_per_slot,
4,
1,
last_slot1_entry_hash,
);
info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash);
info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash);
blockstore.set_roots(vec![0, 1, 4].iter()).unwrap();
let opts = ProcessOptions {
poh_verify: true,
accounts_db_test_hash_calculation: true,
..ProcessOptions::default()
};
let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, &opts);
let bank_forks = bank_forks.read().unwrap();
assert_eq!(frozen_bank_slots(&bank_forks), vec![4]);
assert!(&bank_forks[4]
.parents()
.iter()
.map(|bank| bank.slot())
.next()
.is_none());
verify_fork_infos(&bank_forks);
assert_eq!(bank_forks.root(), 4);
}
#[test]
fn test_process_blockstore_with_two_forks() {
solana_logger::setup();
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let ticks_per_slot = genesis_config.ticks_per_slot;
let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
debug!("ledger_path: {:?}", ledger_path);
let mut last_entry_hash = blockhash;
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
let last_slot1_entry_hash =
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, last_entry_hash);
last_entry_hash = fill_blockstore_slot_with_ticks(
&blockstore,
ticks_per_slot,
2,
1,
last_slot1_entry_hash,
);
let last_fork1_entry_hash =
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 2, last_entry_hash);
let last_fork2_entry_hash = fill_blockstore_slot_with_ticks(
&blockstore,
ticks_per_slot,
4,
1,
last_slot1_entry_hash,
);
info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash);
info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash);
blockstore.set_roots(vec![0, 1].iter()).unwrap();
let opts = ProcessOptions {
poh_verify: true,
accounts_db_test_hash_calculation: true,
..ProcessOptions::default()
};
let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, &opts);
let bank_forks = bank_forks.read().unwrap();
assert_eq!(frozen_bank_slots(&bank_forks), vec![1, 2, 3, 4]);
assert_eq!(bank_forks.working_bank().slot(), 4);
assert_eq!(bank_forks.root(), 1);
assert_eq!(
&bank_forks[3]
.parents()
.iter()
.map(|bank| bank.slot())
.collect::<Vec<_>>(),
&[2, 1]
);
assert_eq!(
&bank_forks[4]
.parents()
.iter()
.map(|bank| bank.slot())
.collect::<Vec<_>>(),
&[1]
);
assert_eq!(bank_forks.root(), 1);
verify_fork_infos(&bank_forks);
}
#[test]
fn test_process_blockstore_with_dead_slot() {
solana_logger::setup();
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let ticks_per_slot = genesis_config.ticks_per_slot;
let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
debug!("ledger_path: {:?}", ledger_path);
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
let slot1_blockhash =
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, blockhash);
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 1, slot1_blockhash);
blockstore.set_dead_slot(2).unwrap();
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 1, slot1_blockhash);
let (bank_forks, ..) =
test_process_blockstore(&genesis_config, &blockstore, &ProcessOptions::default());
let bank_forks = bank_forks.read().unwrap();
assert_eq!(frozen_bank_slots(&bank_forks), vec![0, 1, 3]);
assert_eq!(bank_forks.working_bank().slot(), 3);
assert_eq!(
&bank_forks[3]
.parents()
.iter()
.map(|bank| bank.slot())
.collect::<Vec<_>>(),
&[1, 0]
);
verify_fork_infos(&bank_forks);
}
#[test]
fn test_process_blockstore_with_dead_child() {
solana_logger::setup();
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let ticks_per_slot = genesis_config.ticks_per_slot;
let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
debug!("ledger_path: {:?}", ledger_path);
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
let slot1_blockhash =
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, blockhash);
let slot2_blockhash =
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 1, slot1_blockhash);
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 4, 2, slot2_blockhash);
blockstore.set_dead_slot(4).unwrap();
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 1, slot1_blockhash);
let (bank_forks, ..) =
test_process_blockstore(&genesis_config, &blockstore, &ProcessOptions::default());
let bank_forks = bank_forks.read().unwrap();
assert_eq!(frozen_bank_slots(&bank_forks), vec![0, 1, 2, 3]);
assert_eq!(bank_forks.working_bank().slot(), 3);
assert_eq!(
&bank_forks[3]
.parents()
.iter()
.map(|bank| bank.slot())
.collect::<Vec<_>>(),
&[1, 0]
);
assert_eq!(
&bank_forks[2]
.parents()
.iter()
.map(|bank| bank.slot())
.collect::<Vec<_>>(),
&[1, 0]
);
assert_eq!(bank_forks.working_bank().slot(), 3);
verify_fork_infos(&bank_forks);
}
#[test]
fn test_root_with_all_dead_children() {
solana_logger::setup();
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let ticks_per_slot = genesis_config.ticks_per_slot;
let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
debug!("ledger_path: {:?}", ledger_path);
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, blockhash);
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 0, blockhash);
blockstore.set_dead_slot(1).unwrap();
blockstore.set_dead_slot(2).unwrap();
let (bank_forks, ..) =
test_process_blockstore(&genesis_config, &blockstore, &ProcessOptions::default());
let bank_forks = bank_forks.read().unwrap();
assert_eq!(frozen_bank_slots(&bank_forks), vec![0]);
verify_fork_infos(&bank_forks);
}
#[test]
fn test_process_blockstore_epoch_boundary_root() {
solana_logger::setup();
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let ticks_per_slot = genesis_config.ticks_per_slot;
let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
let mut last_entry_hash = blockhash;
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
let epoch_schedule = get_epoch_schedule(&genesis_config, Vec::new());
let last_slot = epoch_schedule.get_last_slot_in_epoch(1);
for i in 1..=last_slot + 1 {
last_entry_hash = fill_blockstore_slot_with_ticks(
&blockstore,
ticks_per_slot,
i,
i - 1,
last_entry_hash,
);
}
let rooted_slots: Vec<Slot> = (0..=last_slot).collect();
blockstore.set_roots(rooted_slots.iter()).unwrap();
blockstore
.set_roots(std::iter::once(&(last_slot + 1)))
.unwrap();
let opts = ProcessOptions {
poh_verify: true,
accounts_db_test_hash_calculation: true,
..ProcessOptions::default()
};
let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, &opts);
let bank_forks = bank_forks.read().unwrap();
assert_eq!(frozen_bank_slots(&bank_forks), vec![last_slot + 1]);
assert!(&bank_forks[last_slot + 1]
.parents()
.iter()
.map(|bank| bank.slot())
.next()
.is_none());
}
#[test]
fn test_first_err() {
assert_eq!(first_err(&[Ok(())]), Ok(()));
assert_eq!(
first_err(&[Ok(()), Err(TransactionError::AlreadyProcessed)]),
Err(TransactionError::AlreadyProcessed)
);
assert_eq!(
first_err(&[
Ok(()),
Err(TransactionError::AlreadyProcessed),
Err(TransactionError::AccountInUse)
]),
Err(TransactionError::AlreadyProcessed)
);
assert_eq!(
first_err(&[
Ok(()),
Err(TransactionError::AccountInUse),
Err(TransactionError::AlreadyProcessed)
]),
Err(TransactionError::AccountInUse)
);
assert_eq!(
first_err(&[
Err(TransactionError::AccountInUse),
Ok(()),
Err(TransactionError::AlreadyProcessed)
]),
Err(TransactionError::AccountInUse)
);
}
#[test]
fn test_process_empty_entry_is_registered() {
solana_logger::setup();
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config(2);
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
let keypair = Keypair::new();
let slot_entries = create_ticks(genesis_config.ticks_per_slot, 1, genesis_config.hash());
let tx = system_transaction::transfer(
&mint_keypair,
&keypair.pubkey(),
1,
slot_entries.last().unwrap().hash,
);
assert_eq!(
bank.process_transaction(&tx),
Err(TransactionError::BlockhashNotFound)
);
process_entries_for_tests(&bank, slot_entries, true, None, None).unwrap();
assert_eq!(bank.process_transaction(&tx), Ok(()));
}
#[test]
fn test_process_ledger_simple() {
solana_logger::setup();
let leader_pubkey = solana_sdk::pubkey::new_rand();
let mint = 100;
let hashes_per_tick = 10;
let GenesisConfigInfo {
mut genesis_config,
mint_keypair,
..
} = create_genesis_config_with_leader(mint, &leader_pubkey, 50);
genesis_config.poh_config.hashes_per_tick = Some(hashes_per_tick);
let (ledger_path, mut last_entry_hash) =
create_new_tmp_ledger_auto_delete!(&genesis_config);
debug!("ledger_path: {:?}", ledger_path);
let deducted_from_mint = 3;
let mut entries = vec![];
let blockhash = genesis_config.hash();
for _ in 0..deducted_from_mint {
let keypair = Keypair::new();
let tx = system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 1, blockhash);
let entry = next_entry_mut(&mut last_entry_hash, 1, vec![tx]);
entries.push(entry);
let keypair2 = Keypair::new();
let tx =
system_transaction::transfer(&mint_keypair, &keypair2.pubkey(), 101, blockhash);
let entry = next_entry_mut(&mut last_entry_hash, 1, vec![tx]);
entries.push(entry);
}
let remaining_hashes = hashes_per_tick - entries.len() as u64;
let tick_entry = next_entry_mut(&mut last_entry_hash, remaining_hashes, vec![]);
entries.push(tick_entry);
entries.extend(create_ticks(
genesis_config.ticks_per_slot - 1,
genesis_config.poh_config.hashes_per_tick.unwrap(),
last_entry_hash,
));
let last_blockhash = entries.last().unwrap().hash;
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
blockstore
.write_entries(
1,
0,
0,
genesis_config.ticks_per_slot,
None,
true,
&Arc::new(Keypair::new()),
entries,
0,
)
.unwrap();
let opts = ProcessOptions {
poh_verify: true,
accounts_db_test_hash_calculation: true,
..ProcessOptions::default()
};
let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, &opts);
let bank_forks = bank_forks.read().unwrap();
assert_eq!(frozen_bank_slots(&bank_forks), vec![0, 1]);
assert_eq!(bank_forks.root(), 0);
assert_eq!(bank_forks.working_bank().slot(), 1);
let bank = bank_forks[1].clone();
assert_eq!(
bank.get_balance(&mint_keypair.pubkey()),
mint - deducted_from_mint
);
assert_eq!(bank.tick_height(), 2 * genesis_config.ticks_per_slot);
assert_eq!(bank.last_blockhash(), last_blockhash);
}
#[test]
fn test_process_ledger_with_one_tick_per_slot() {
let GenesisConfigInfo {
mut genesis_config, ..
} = create_genesis_config(123);
genesis_config.ticks_per_slot = 1;
let (ledger_path, _blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
let opts = ProcessOptions {
poh_verify: true,
accounts_db_test_hash_calculation: true,
..ProcessOptions::default()
};
let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, &opts);
let bank_forks = bank_forks.read().unwrap();
assert_eq!(frozen_bank_slots(&bank_forks), vec![0]);
let bank = bank_forks[0].clone();
assert_eq!(bank.tick_height(), 1);
}
#[test]
fn test_process_ledger_options_full_leader_cache() {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(123);
let (ledger_path, _blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
let opts = ProcessOptions {
full_leader_cache: true,
accounts_db_test_hash_calculation: true,
..ProcessOptions::default()
};
let (_bank_forks, leader_schedule) =
test_process_blockstore(&genesis_config, &blockstore, &opts);
assert_eq!(leader_schedule.max_schedules(), std::usize::MAX);
}
#[test]
fn test_process_ledger_options_entry_callback() {
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config(100);
let (ledger_path, last_entry_hash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
let blockhash = genesis_config.hash();
let keypairs = [Keypair::new(), Keypair::new(), Keypair::new()];
let tx = system_transaction::transfer(&mint_keypair, &keypairs[0].pubkey(), 1, blockhash);
let entry_1 = next_entry(&last_entry_hash, 1, vec![tx]);
let tx = system_transaction::transfer(&mint_keypair, &keypairs[1].pubkey(), 1, blockhash);
let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]);
let mut entries = vec![entry_1, entry_2];
entries.extend(create_ticks(
genesis_config.ticks_per_slot,
0,
last_entry_hash,
));
blockstore
.write_entries(
1,
0,
0,
genesis_config.ticks_per_slot,
None,
true,
&Arc::new(Keypair::new()),
entries,
0,
)
.unwrap();
let callback_counter: Arc<RwLock<usize>> = Arc::default();
let entry_callback = {
let counter = callback_counter.clone();
let pubkeys: Vec<Pubkey> = keypairs.iter().map(|k| k.pubkey()).collect();
Arc::new(move |bank: &Bank| {
let mut counter = counter.write().unwrap();
assert_eq!(bank.get_balance(&pubkeys[*counter]), 1);
assert_eq!(bank.get_balance(&pubkeys[*counter + 1]), 0);
*counter += 1;
})
};
let opts = ProcessOptions {
entry_callback: Some(entry_callback),
accounts_db_test_hash_calculation: true,
..ProcessOptions::default()
};
test_process_blockstore(&genesis_config, &blockstore, &opts);
assert_eq!(*callback_counter.write().unwrap(), 2);
}
#[test]
fn test_process_entries_tick() {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(1000);
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
assert_eq!(bank.tick_height(), 0);
let tick = next_entry(&genesis_config.hash(), 1, vec![]);
assert_eq!(
process_entries_for_tests(&bank, vec![tick], true, None, None),
Ok(())
);
assert_eq!(bank.tick_height(), 1);
}
#[test]
fn test_process_entries_2_entries_collision() {
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config(1000);
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
let blockhash = bank.last_blockhash();
let tx = system_transaction::transfer(
&mint_keypair,
&keypair1.pubkey(),
2,
bank.last_blockhash(),
);
let entry_1 = next_entry(&blockhash, 1, vec![tx]);
let tx = system_transaction::transfer(
&mint_keypair,
&keypair2.pubkey(),
2,
bank.last_blockhash(),
);
let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]);
assert_eq!(
process_entries_for_tests(&bank, vec![entry_1, entry_2], true, None, None),
Ok(())
);
assert_eq!(bank.get_balance(&keypair1.pubkey()), 2);
assert_eq!(bank.get_balance(&keypair2.pubkey()), 2);
assert_eq!(bank.last_blockhash(), blockhash);
}
#[test]
fn test_process_entries_2_txes_collision() {
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config(1000);
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
let keypair3 = Keypair::new();
assert_matches!(bank.transfer(4, &mint_keypair, &keypair1.pubkey()), Ok(_));
assert_matches!(bank.transfer(4, &mint_keypair, &keypair2.pubkey()), Ok(_));
let entry_1_to_mint = next_entry(
&bank.last_blockhash(),
1,
vec![system_transaction::transfer(
&keypair1,
&mint_keypair.pubkey(),
1,
bank.last_blockhash(),
)],
);
let entry_2_to_3_mint_to_1 = next_entry(
&entry_1_to_mint.hash,
1,
vec![
system_transaction::transfer(
&keypair2,
&keypair3.pubkey(),
2,
bank.last_blockhash(),
), system_transaction::transfer(
&keypair1,
&mint_keypair.pubkey(),
2,
bank.last_blockhash(),
), ],
);
assert_eq!(
process_entries_for_tests(
&bank,
vec![entry_1_to_mint, entry_2_to_3_mint_to_1],
false,
None,
None,
),
Ok(())
);
assert_eq!(bank.get_balance(&keypair1.pubkey()), 1);
assert_eq!(bank.get_balance(&keypair2.pubkey()), 2);
assert_eq!(bank.get_balance(&keypair3.pubkey()), 2);
}
#[test]
fn test_process_entries_2_txes_collision_and_error() {
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config(1000);
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
let keypair3 = Keypair::new();
let keypair4 = Keypair::new();
assert_matches!(bank.transfer(4, &mint_keypair, &keypair1.pubkey()), Ok(_));
assert_matches!(bank.transfer(4, &mint_keypair, &keypair2.pubkey()), Ok(_));
assert_matches!(bank.transfer(4, &mint_keypair, &keypair4.pubkey()), Ok(_));
let entry_1_to_mint = next_entry(
&bank.last_blockhash(),
1,
vec![
system_transaction::transfer(
&keypair1,
&mint_keypair.pubkey(),
1,
bank.last_blockhash(),
),
system_transaction::transfer(
&keypair4,
&keypair4.pubkey(),
1,
Hash::default(), ),
],
);
let entry_2_to_3_mint_to_1 = next_entry(
&entry_1_to_mint.hash,
1,
vec![
system_transaction::transfer(
&keypair2,
&keypair3.pubkey(),
2,
bank.last_blockhash(),
), system_transaction::transfer(
&keypair1,
&mint_keypair.pubkey(),
2,
bank.last_blockhash(),
), ],
);
assert!(process_entries_for_tests(
&bank,
vec![entry_1_to_mint.clone(), entry_2_to_3_mint_to_1.clone()],
false,
None,
None,
)
.is_err());
assert_eq!(bank.get_balance(&keypair1.pubkey()), 3);
assert_eq!(bank.get_balance(&keypair2.pubkey()), 4);
let txs1 = entry_1_to_mint.transactions;
let txs2 = entry_2_to_3_mint_to_1.transactions;
let batch1 = bank.prepare_entry_batch(txs1).unwrap();
for result in batch1.lock_results() {
assert!(result.is_ok());
}
drop(batch1);
let batch2 = bank.prepare_entry_batch(txs2).unwrap();
for result in batch2.lock_results() {
assert!(result.is_ok());
}
}
#[test]
fn test_process_entries_2nd_entry_collision_with_self_and_error() {
solana_logger::setup();
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config(1000);
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
let keypair3 = Keypair::new();
assert_matches!(bank.transfer(5, &mint_keypair, &keypair1.pubkey()), Ok(_));
assert_matches!(bank.transfer(4, &mint_keypair, &keypair2.pubkey()), Ok(_));
let entry_1_to_mint = next_entry(
&bank.last_blockhash(),
1,
vec![system_transaction::transfer(
&keypair1,
&mint_keypair.pubkey(),
1,
bank.last_blockhash(),
)],
);
let entry_2_to_3_and_1_to_mint = next_entry(
&entry_1_to_mint.hash,
1,
vec![
system_transaction::transfer(
&keypair2,
&keypair3.pubkey(),
2,
bank.last_blockhash(),
), system_transaction::transfer(
&keypair1,
&mint_keypair.pubkey(),
2,
bank.last_blockhash(),
), ],
);
let entry_conflict_itself = next_entry(
&entry_2_to_3_and_1_to_mint.hash,
1,
vec![
system_transaction::transfer(
&keypair1,
&keypair3.pubkey(),
1,
bank.last_blockhash(),
),
system_transaction::transfer(
&keypair1,
&keypair2.pubkey(),
1,
bank.last_blockhash(),
), ],
);
assert!(process_entries_for_tests(
&bank,
vec![
entry_1_to_mint,
entry_2_to_3_and_1_to_mint,
entry_conflict_itself,
],
false,
None,
None,
)
.is_err());
assert_eq!(bank.get_balance(&keypair1.pubkey()), 2);
assert_eq!(bank.get_balance(&keypair2.pubkey()), 2);
assert_eq!(bank.get_balance(&keypair3.pubkey()), 2);
}
#[test]
fn test_process_entries_2_entries_par() {
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config(1000);
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
let keypair3 = Keypair::new();
let keypair4 = Keypair::new();
let tx = system_transaction::transfer(
&mint_keypair,
&keypair1.pubkey(),
1,
bank.last_blockhash(),
);
assert_eq!(bank.process_transaction(&tx), Ok(()));
let tx = system_transaction::transfer(
&mint_keypair,
&keypair2.pubkey(),
1,
bank.last_blockhash(),
);
assert_eq!(bank.process_transaction(&tx), Ok(()));
let blockhash = bank.last_blockhash();
let tx =
system_transaction::transfer(&keypair1, &keypair3.pubkey(), 1, bank.last_blockhash());
let entry_1 = next_entry(&blockhash, 1, vec![tx]);
let tx =
system_transaction::transfer(&keypair2, &keypair4.pubkey(), 1, bank.last_blockhash());
let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]);
assert_eq!(
process_entries_for_tests(&bank, vec![entry_1, entry_2], true, None, None),
Ok(())
);
assert_eq!(bank.get_balance(&keypair3.pubkey()), 1);
assert_eq!(bank.get_balance(&keypair4.pubkey()), 1);
assert_eq!(bank.last_blockhash(), blockhash);
}
#[test]
fn test_process_entry_tx_random_execution_with_error() {
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config(1_000_000_000);
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
const NUM_TRANSFERS_PER_ENTRY: usize = 8;
const NUM_TRANSFERS: usize = NUM_TRANSFERS_PER_ENTRY * 32;
let keypairs: Vec<_> = (0..NUM_TRANSFERS * 2).map(|_| Keypair::new()).collect();
for keypair in &keypairs {
bank.transfer(1, &mint_keypair, &keypair.pubkey())
.expect("funding failed");
}
let mut hash = bank.last_blockhash();
let present_account_key = Keypair::new();
let present_account = AccountSharedData::new(1, 10, &Pubkey::default());
bank.store_account(&present_account_key.pubkey(), &present_account);
let entries: Vec<_> = (0..NUM_TRANSFERS)
.step_by(NUM_TRANSFERS_PER_ENTRY)
.map(|i| {
let mut transactions = (0..NUM_TRANSFERS_PER_ENTRY)
.map(|j| {
system_transaction::transfer(
&keypairs[i + j],
&keypairs[i + j + NUM_TRANSFERS].pubkey(),
1,
bank.last_blockhash(),
)
})
.collect::<Vec<_>>();
transactions.push(system_transaction::create_account(
&mint_keypair,
&present_account_key, bank.last_blockhash(),
1,
0,
&solana_sdk::pubkey::new_rand(),
));
next_entry_mut(&mut hash, 0, transactions)
})
.collect();
assert_eq!(
process_entries_for_tests(&bank, entries, true, None, None),
Ok(())
);
}
#[test]
fn test_process_entry_tx_random_execution_no_error() {
let entropy_multiplier: usize = 25;
let initial_lamports = 100;
let num_accounts = entropy_multiplier * 4;
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config((num_accounts + 1) as u64 * initial_lamports);
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
let mut keypairs: Vec<Keypair> = vec![];
for _ in 0..num_accounts {
let keypair = Keypair::new();
let create_account_tx = system_transaction::transfer(
&mint_keypair,
&keypair.pubkey(),
0,
bank.last_blockhash(),
);
assert_eq!(bank.process_transaction(&create_account_tx), Ok(()));
assert_matches!(
bank.transfer(initial_lamports, &mint_keypair, &keypair.pubkey()),
Ok(_)
);
keypairs.push(keypair);
}
let mut tx_vector: Vec<Transaction> = vec![];
for i in (0..num_accounts).step_by(4) {
tx_vector.append(&mut vec![
system_transaction::transfer(
&keypairs[i + 1],
&keypairs[i].pubkey(),
initial_lamports,
bank.last_blockhash(),
),
system_transaction::transfer(
&keypairs[i + 3],
&keypairs[i + 2].pubkey(),
initial_lamports,
bank.last_blockhash(),
),
]);
}
let entry = next_entry(&bank.last_blockhash(), 1, tx_vector);
assert_eq!(
process_entries_for_tests(&bank, vec![entry], true, None, None),
Ok(())
);
bank.squash();
for (i, keypair) in keypairs.iter().enumerate() {
if i % 2 == 0 {
assert_eq!(bank.get_balance(&keypair.pubkey()), 2 * initial_lamports);
} else {
assert_eq!(bank.get_balance(&keypair.pubkey()), 0);
}
}
}
#[test]
fn test_process_entries_2_entries_tick() {
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config(1000);
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
let keypair3 = Keypair::new();
let keypair4 = Keypair::new();
let tx = system_transaction::transfer(
&mint_keypair,
&keypair1.pubkey(),
1,
bank.last_blockhash(),
);
assert_eq!(bank.process_transaction(&tx), Ok(()));
let tx = system_transaction::transfer(
&mint_keypair,
&keypair2.pubkey(),
1,
bank.last_blockhash(),
);
assert_eq!(bank.process_transaction(&tx), Ok(()));
let blockhash = bank.last_blockhash();
while blockhash == bank.last_blockhash() {
bank.register_tick(&Hash::default());
}
let tx = system_transaction::transfer(&keypair2, &keypair3.pubkey(), 1, blockhash);
let entry_1 = next_entry(&blockhash, 1, vec![tx]);
let tick = next_entry(&entry_1.hash, 1, vec![]);
let tx =
system_transaction::transfer(&keypair1, &keypair4.pubkey(), 1, bank.last_blockhash());
let entry_2 = next_entry(&tick.hash, 1, vec![tx]);
assert_eq!(
process_entries_for_tests(
&bank,
vec![entry_1, tick, entry_2.clone()],
true,
None,
None
),
Ok(())
);
assert_eq!(bank.get_balance(&keypair3.pubkey()), 1);
assert_eq!(bank.get_balance(&keypair4.pubkey()), 1);
let tx =
system_transaction::transfer(&keypair2, &keypair3.pubkey(), 1, bank.last_blockhash());
let entry_3 = next_entry(&entry_2.hash, 1, vec![tx]);
assert_eq!(
process_entries_for_tests(&bank, vec![entry_3], true, None, None),
Err(TransactionError::AccountNotFound)
);
}
#[test]
fn test_update_transaction_statuses() {
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config(11_000);
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
let pubkey = solana_sdk::pubkey::new_rand();
bank.transfer(1_000, &mint_keypair, &pubkey).unwrap();
assert_eq!(bank.transaction_count(), 1);
assert_eq!(bank.get_balance(&pubkey), 1_000);
assert_eq!(
bank.transfer(10_001, &mint_keypair, &pubkey),
Err(TransactionError::InstructionError(
0,
SystemError::ResultWithNegativeLamports.into(),
))
);
assert_eq!(
bank.transfer(10_001, &mint_keypair, &pubkey),
Err(TransactionError::AlreadyProcessed)
);
let tx = system_transaction::transfer(&mint_keypair, &pubkey, 1000, Hash::default());
let signature = tx.signatures[0];
assert_eq!(
bank.process_transaction(&tx).map(|_| signature),
Err(TransactionError::BlockhashNotFound)
);
assert_eq!(
bank.process_transaction(&tx).map(|_| signature),
Err(TransactionError::BlockhashNotFound)
);
}
#[test]
fn test_update_transaction_statuses_fail() {
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config(11_000);
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
let success_tx = system_transaction::transfer(
&mint_keypair,
&keypair1.pubkey(),
1,
bank.last_blockhash(),
);
let fail_tx = system_transaction::transfer(
&mint_keypair,
&keypair2.pubkey(),
2,
bank.last_blockhash(),
);
let entry_1_to_mint = next_entry(
&bank.last_blockhash(),
1,
vec![
success_tx,
fail_tx.clone(), ],
);
assert_eq!(
process_entries_for_tests(&bank, vec![entry_1_to_mint], false, None, None),
Err(TransactionError::AccountInUse)
);
assert_eq!(bank.process_transaction(&fail_tx), Ok(()));
}
#[test]
fn test_halt_at_slot_starting_snapshot_root() {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(123);
let forks = tr(0) / tr(1);
let ledger_path = get_tmp_ledger_path_auto_delete!();
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
blockstore.add_tree(
forks,
false,
true,
genesis_config.ticks_per_slot,
genesis_config.hash(),
);
blockstore.set_roots(vec![0, 1].iter()).unwrap();
let opts = ProcessOptions {
poh_verify: true,
halt_at_slot: Some(0),
accounts_db_test_hash_calculation: true,
..ProcessOptions::default()
};
let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, &opts);
let bank_forks = bank_forks.read().unwrap();
assert!(bank_forks.get(0).is_some());
}
#[test]
fn test_process_blockstore_from_root() {
let GenesisConfigInfo {
mut genesis_config, ..
} = create_genesis_config(123);
let ticks_per_slot = 1;
genesis_config.ticks_per_slot = ticks_per_slot;
let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
let mut last_hash = blockhash;
for i in 0..6 {
last_hash =
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, i + 1, i, last_hash);
}
blockstore.set_roots(vec![3, 5].iter()).unwrap();
let mut bank_forks = BankForks::new(Bank::new_for_tests(&genesis_config));
let bank0 = bank_forks.get(0).unwrap();
let opts = ProcessOptions {
poh_verify: true,
accounts_db_test_hash_calculation: true,
..ProcessOptions::default()
};
let recyclers = VerifyRecyclers::default();
process_bank_0(&bank0, &blockstore, &opts, &recyclers, None);
let bank1 = bank_forks.insert(Bank::new_from_parent(&bank0, &Pubkey::default(), 1));
confirm_full_slot(
&blockstore,
&bank1,
&opts,
&recyclers,
&mut ConfirmationProgress::new(bank0.last_blockhash()),
None,
None,
&mut ExecuteTimings::default(),
)
.unwrap();
bank_forks.set_root(
1,
&solana_runtime::accounts_background_service::AbsRequestSender::default(),
None,
);
let leader_schedule_cache = LeaderScheduleCache::new_from_bank(&bank1);
let bank_forks = RwLock::new(bank_forks);
process_blockstore_from_root(
&blockstore,
&bank_forks,
&leader_schedule_cache,
&opts,
None,
None,
&AbsRequestSender::default(),
)
.unwrap();
let bank_forks = bank_forks.read().unwrap();
assert_eq!(frozen_bank_slots(&bank_forks), vec![5, 6]);
assert_eq!(bank_forks.working_bank().slot(), 6);
assert_eq!(bank_forks.root(), 5);
assert_eq!(
&bank_forks[6]
.parents()
.iter()
.map(|bank| bank.slot())
.collect::<Vec<_>>(),
&[5]
);
verify_fork_infos(&bank_forks);
}
#[test]
#[ignore]
fn test_process_entries_stress() {
solana_logger::setup();
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config(1_000_000_000);
let mut bank = Arc::new(Bank::new_for_tests(&genesis_config));
const NUM_TRANSFERS_PER_ENTRY: usize = 8;
const NUM_TRANSFERS: usize = NUM_TRANSFERS_PER_ENTRY * 32;
let keypairs: Vec<_> = (0..NUM_TRANSFERS * 2).map(|_| Keypair::new()).collect();
for keypair in &keypairs {
bank.transfer(1, &mint_keypair, &keypair.pubkey())
.expect("funding failed");
}
let present_account_key = Keypair::new();
let present_account = AccountSharedData::new(1, 10, &Pubkey::default());
bank.store_account(&present_account_key.pubkey(), &present_account);
let mut i = 0;
let mut hash = bank.last_blockhash();
let mut root: Option<Arc<Bank>> = None;
loop {
let entries: Vec<_> = (0..NUM_TRANSFERS)
.step_by(NUM_TRANSFERS_PER_ENTRY)
.map(|i| {
next_entry_mut(&mut hash, 0, {
let mut transactions = (i..i + NUM_TRANSFERS_PER_ENTRY)
.map(|i| {
system_transaction::transfer(
&keypairs[i],
&keypairs[i + NUM_TRANSFERS].pubkey(),
1,
bank.last_blockhash(),
)
})
.collect::<Vec<_>>();
transactions.push(system_transaction::create_account(
&mint_keypair,
&present_account_key, bank.last_blockhash(),
100,
100,
&solana_sdk::pubkey::new_rand(),
));
transactions
})
})
.collect();
info!("paying iteration {}", i);
process_entries_for_tests(&bank, entries, true, None, None).expect("paying failed");
let entries: Vec<_> = (0..NUM_TRANSFERS)
.step_by(NUM_TRANSFERS_PER_ENTRY)
.map(|i| {
next_entry_mut(
&mut hash,
0,
(i..i + NUM_TRANSFERS_PER_ENTRY)
.map(|i| {
system_transaction::transfer(
&keypairs[i + NUM_TRANSFERS],
&keypairs[i].pubkey(),
1,
bank.last_blockhash(),
)
})
.collect::<Vec<_>>(),
)
})
.collect();
info!("refunding iteration {}", i);
process_entries_for_tests(&bank, entries, true, None, None).expect("refunding failed");
process_entries_for_tests(
&bank,
(0..bank.ticks_per_slot())
.map(|_| next_entry_mut(&mut hash, 1, vec![]))
.collect::<Vec<_>>(),
true,
None,
None,
)
.expect("process ticks failed");
if i % 16 == 0 {
if let Some(old_root) = root {
old_root.squash();
}
root = Some(bank.clone());
}
i += 1;
bank = Arc::new(Bank::new_from_parent(
&bank,
&Pubkey::default(),
bank.slot() + thread_rng().gen_range(1, 3),
));
}
}
#[test]
fn test_process_ledger_ticks_ordering() {
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config(100);
let bank0 = Arc::new(Bank::new_for_tests(&genesis_config));
let genesis_hash = genesis_config.hash();
let keypair = Keypair::new();
let mut entries = create_ticks(genesis_config.ticks_per_slot, 1, genesis_hash);
let new_blockhash = entries.last().unwrap().hash;
let tx = system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 1, new_blockhash);
let entry = next_entry(&new_blockhash, 1, vec![tx]);
entries.push(entry);
process_entries_for_tests(&bank0, entries, true, None, None).unwrap();
assert_eq!(bank0.get_balance(&keypair.pubkey()), 1)
}
fn get_epoch_schedule(
genesis_config: &GenesisConfig,
account_paths: Vec<PathBuf>,
) -> EpochSchedule {
let bank = Bank::new_with_paths_for_tests(
genesis_config,
account_paths,
None,
None,
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
false,
None,
);
*bank.epoch_schedule()
}
fn frozen_bank_slots(bank_forks: &BankForks) -> Vec<Slot> {
let mut slots: Vec<_> = bank_forks.frozen_banks().keys().cloned().collect();
slots.sort_unstable();
slots
}
fn verify_fork_infos(bank_forks: &BankForks) {
for slot in frozen_bank_slots(bank_forks) {
let head_bank = &bank_forks[slot];
let mut parents = head_bank.parents();
parents.push(head_bank.clone());
for parent in parents {
let parent_bank = &bank_forks[parent.slot()];
assert_eq!(parent_bank.slot(), parent.slot());
assert!(parent_bank.is_frozen());
}
}
}
#[test]
fn test_get_first_error() {
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config(1_000_000_000);
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
let present_account_key = Keypair::new();
let present_account = AccountSharedData::new(1, 10, &Pubkey::default());
bank.store_account(&present_account_key.pubkey(), &present_account);
let keypair = Keypair::new();
let account_not_found_tx = system_transaction::transfer(
&keypair,
&solana_sdk::pubkey::new_rand(),
42,
bank.last_blockhash(),
);
let account_not_found_sig = account_not_found_tx.signatures[0];
let invalid_blockhash_tx = system_transaction::transfer(
&mint_keypair,
&solana_sdk::pubkey::new_rand(),
42,
Hash::default(),
);
let txs = vec![account_not_found_tx, invalid_blockhash_tx];
let batch = bank.prepare_batch_for_tests(txs);
let (
TransactionResults {
fee_collection_results,
..
},
_balances,
) = batch.bank().load_execute_and_commit_transactions(
&batch,
MAX_PROCESSING_AGE,
false,
false,
false,
false,
&mut ExecuteTimings::default(),
None,
);
let (err, signature) = get_first_error(&batch, fee_collection_results).unwrap();
assert_eq!(err.unwrap_err(), TransactionError::AccountNotFound);
assert_eq!(signature, account_not_found_sig);
}
#[test]
fn test_replay_vote_sender() {
let validator_keypairs: Vec<_> =
(0..10).map(|_| ValidatorVoteKeypairs::new_rand()).collect();
let GenesisConfigInfo {
genesis_config,
voting_keypair: _,
..
} = create_genesis_config_with_vote_accounts(
1_000_000_000,
&validator_keypairs,
vec![100; validator_keypairs.len()],
);
let bank0 = Arc::new(Bank::new_for_tests(&genesis_config));
bank0.freeze();
let bank1 = Arc::new(Bank::new_from_parent(
&bank0,
&solana_sdk::pubkey::new_rand(),
1,
));
let bank_1_blockhash = bank1.last_blockhash();
let mut expected_successful_voter_pubkeys = BTreeSet::new();
let vote_txs: Vec<_> = validator_keypairs
.iter()
.enumerate()
.map(|(i, validator_keypairs)| {
if i % 3 == 0 {
expected_successful_voter_pubkeys
.insert(validator_keypairs.vote_keypair.pubkey());
vote_transaction::new_vote_transaction(
vec![0],
bank0.hash(),
bank_1_blockhash,
&validator_keypairs.node_keypair,
&validator_keypairs.vote_keypair,
&validator_keypairs.vote_keypair,
None,
)
} else if i % 3 == 1 {
vote_transaction::new_vote_transaction(
vec![0],
bank0.hash(),
bank_1_blockhash,
&validator_keypairs.node_keypair,
&validator_keypairs.vote_keypair,
&Keypair::new(),
None,
)
} else {
vote_transaction::new_vote_transaction(
vec![bank1.slot() + 1],
bank0.hash(),
bank_1_blockhash,
&validator_keypairs.node_keypair,
&validator_keypairs.vote_keypair,
&validator_keypairs.vote_keypair,
None,
)
}
})
.collect();
let entry = next_entry(&bank_1_blockhash, 1, vote_txs);
let (replay_vote_sender, replay_vote_receiver) = crossbeam_channel::unbounded();
let _ =
process_entries_for_tests(&bank1, vec![entry], true, None, Some(&replay_vote_sender));
let successes: BTreeSet<Pubkey> = replay_vote_receiver
.try_iter()
.map(|(vote_pubkey, ..)| vote_pubkey)
.collect();
assert_eq!(successes, expected_successful_voter_pubkeys);
}
fn make_slot_with_vote_tx(
blockstore: &Blockstore,
ticks_per_slot: u64,
tx_landed_slot: Slot,
parent_slot: Slot,
parent_blockhash: &Hash,
vote_tx: Transaction,
slot_leader_keypair: &Arc<Keypair>,
) {
let vote_entry = next_entry(parent_blockhash, 1, vec![vote_tx]);
let mut entries = create_ticks(ticks_per_slot, 0, vote_entry.hash);
entries.insert(0, vote_entry);
blockstore
.write_entries(
tx_landed_slot,
0,
0,
ticks_per_slot,
Some(parent_slot),
true,
slot_leader_keypair,
entries,
0,
)
.unwrap();
}
fn run_test_process_blockstore_with_supermajority_root(
blockstore_root: Option<Slot>,
blockstore_access_type: AccessType,
) {
solana_logger::setup();
let starting_fork_slot = 5;
let mut main_fork = tr(starting_fork_slot);
let mut main_fork_ref = main_fork.root_mut().get_mut();
let expected_root_slot = starting_fork_slot + blockstore_root.unwrap_or(0);
let really_expected_root_slot = expected_root_slot + 1;
let last_main_fork_slot = expected_root_slot + MAX_LOCKOUT_HISTORY as u64 + 1;
let really_last_main_fork_slot = last_main_fork_slot + 1;
let last_minor_fork_slot = really_last_main_fork_slot + 1;
let minor_fork = tr(last_minor_fork_slot);
for slot in starting_fork_slot + 1..last_main_fork_slot {
if slot - 1 == expected_root_slot {
main_fork_ref.push_front(minor_fork.clone());
}
main_fork_ref.push_front(tr(slot));
main_fork_ref = main_fork_ref.front_mut().unwrap().get_mut();
}
let forks = tr(0) / (tr(1) / (tr(2) / (tr(4))) / main_fork);
let validator_keypairs = ValidatorVoteKeypairs::new_rand();
let GenesisConfigInfo { genesis_config, .. } =
genesis_utils::create_genesis_config_with_vote_accounts(
10_000,
&[&validator_keypairs],
vec![100],
);
let ticks_per_slot = genesis_config.ticks_per_slot();
let ledger_path = get_tmp_ledger_path_auto_delete!();
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
blockstore.add_tree(forks, false, true, ticks_per_slot, genesis_config.hash());
if let Some(blockstore_root) = blockstore_root {
blockstore
.set_roots(std::iter::once(&blockstore_root))
.unwrap();
}
let opts = ProcessOptions {
poh_verify: true,
accounts_db_test_hash_calculation: true,
..ProcessOptions::default()
};
let (bank_forks, ..) = test_process_blockstore_with_custom_options(
&genesis_config,
&blockstore,
&opts,
blockstore_access_type.clone(),
);
let bank_forks = bank_forks.read().unwrap();
let last_vote_bank_hash = bank_forks.get(last_main_fork_slot - 1).unwrap().hash();
let last_vote_blockhash = bank_forks
.get(last_main_fork_slot - 1)
.unwrap()
.last_blockhash();
let slots: Vec<_> = (expected_root_slot..last_main_fork_slot).collect();
let vote_tx = vote_transaction::new_vote_transaction(
slots,
last_vote_bank_hash,
last_vote_blockhash,
&validator_keypairs.node_keypair,
&validator_keypairs.vote_keypair,
&validator_keypairs.vote_keypair,
None,
);
let leader_keypair = Arc::new(validator_keypairs.node_keypair);
make_slot_with_vote_tx(
&blockstore,
ticks_per_slot,
last_main_fork_slot,
last_main_fork_slot - 1,
&last_vote_blockhash,
vote_tx,
&leader_keypair,
);
let (bank_forks, ..) = test_process_blockstore_with_custom_options(
&genesis_config,
&blockstore,
&opts,
blockstore_access_type.clone(),
);
let bank_forks = bank_forks.read().unwrap();
assert_eq!(bank_forks.root(), expected_root_slot);
assert_eq!(
bank_forks.frozen_banks().len() as u64,
last_minor_fork_slot - really_expected_root_slot + 1
);
for slot in 0..=last_minor_fork_slot {
if slot == really_last_main_fork_slot {
continue;
}
if slot >= expected_root_slot {
let bank = bank_forks.get(slot).unwrap();
assert_eq!(bank.slot(), slot);
assert!(bank.is_frozen());
} else {
assert!(bank_forks.get(slot).is_none());
}
}
let last_vote_bank_hash = bank_forks.get(last_main_fork_slot).unwrap().hash();
let last_vote_blockhash = bank_forks
.get(last_main_fork_slot)
.unwrap()
.last_blockhash();
let slots: Vec<_> = vec![last_main_fork_slot];
let vote_tx = vote_transaction::new_vote_transaction(
slots,
last_vote_bank_hash,
last_vote_blockhash,
&leader_keypair,
&validator_keypairs.vote_keypair,
&validator_keypairs.vote_keypair,
None,
);
make_slot_with_vote_tx(
&blockstore,
ticks_per_slot,
really_last_main_fork_slot,
last_main_fork_slot,
&last_vote_blockhash,
vote_tx,
&leader_keypair,
);
let (bank_forks, ..) = test_process_blockstore_with_custom_options(
&genesis_config,
&blockstore,
&opts,
blockstore_access_type,
);
let bank_forks = bank_forks.read().unwrap();
assert_eq!(bank_forks.root(), really_expected_root_slot);
}
#[test]
fn test_process_blockstore_with_supermajority_root_without_blockstore_root() {
run_test_process_blockstore_with_supermajority_root(None, AccessType::Primary);
}
#[test]
fn test_process_blockstore_with_supermajority_root_without_blockstore_root_secondary_access() {
run_test_process_blockstore_with_supermajority_root(None, AccessType::Secondary);
}
#[test]
fn test_process_blockstore_with_supermajority_root_with_blockstore_root() {
run_test_process_blockstore_with_supermajority_root(Some(1), AccessType::Primary)
}
#[test]
#[allow(clippy::field_reassign_with_default)]
fn test_supermajority_root_from_vote_accounts() {
let convert_to_vote_accounts = |roots_stakes: Vec<(Slot, u64)>| -> VoteAccountsHashMap {
roots_stakes
.into_iter()
.map(|(root, stake)| {
let mut vote_state = VoteState::default();
vote_state.root_slot = Some(root);
let mut vote_account =
AccountSharedData::new(1, VoteState::size_of(), &solana_vote_program::id());
let versioned = VoteStateVersions::new_current(vote_state);
VoteState::serialize(&versioned, vote_account.data_as_mut_slice()).unwrap();
(
solana_sdk::pubkey::new_rand(),
(stake, VoteAccount::try_from(vote_account).unwrap()),
)
})
.collect()
};
let total_stake = 10;
let slot = 100;
assert!(
supermajority_root_from_vote_accounts(slot, total_stake, &HashMap::default()).is_none()
);
let roots_stakes = vec![(8, 1), (3, 1), (4, 1), (8, 1)];
let accounts = convert_to_vote_accounts(roots_stakes);
assert!(supermajority_root_from_vote_accounts(slot, total_stake, &accounts).is_none());
let roots_stakes = vec![(8, 1), (3, 1), (4, 1), (8, 5)];
let accounts = convert_to_vote_accounts(roots_stakes);
assert_eq!(
supermajority_root_from_vote_accounts(slot, total_stake, &accounts).unwrap(),
4
);
let roots_stakes = vec![(8, 1), (3, 1), (4, 1), (8, 6)];
let accounts = convert_to_vote_accounts(roots_stakes);
assert_eq!(
supermajority_root_from_vote_accounts(slot, total_stake, &accounts).unwrap(),
8
);
}
fn confirm_slot_entries_for_tests(
bank: &Arc<Bank>,
slot_entries: Vec<Entry>,
slot_full: bool,
prev_entry_hash: Hash,
) -> result::Result<(), BlockstoreProcessorError> {
confirm_slot_entries(
bank,
(slot_entries, 0, slot_full),
&mut ConfirmationTiming::default(),
&mut ConfirmationProgress::new(prev_entry_hash),
false,
None,
None,
None,
&VerifyRecyclers::default(),
None,
&PrioritizationFeeCache::new(0u64),
)
}
#[test]
fn test_confirm_slot_entries_without_fix() {
const HASHES_PER_TICK: u64 = 10;
const TICKS_PER_SLOT: u64 = 2;
let collector_id = Pubkey::new_unique();
let GenesisConfigInfo {
mut genesis_config,
mint_keypair,
..
} = create_genesis_config(10_000);
genesis_config.poh_config.hashes_per_tick = Some(HASHES_PER_TICK);
genesis_config.ticks_per_slot = TICKS_PER_SLOT;
let genesis_hash = genesis_config.hash();
let mut slot_0_bank = Bank::new_for_tests(&genesis_config);
slot_0_bank.deactivate_feature(&feature_set::fix_recent_blockhashes::id());
let slot_0_bank = Arc::new(slot_0_bank);
assert_eq!(slot_0_bank.slot(), 0);
assert_eq!(slot_0_bank.tick_height(), 0);
assert_eq!(slot_0_bank.max_tick_height(), 2);
assert_eq!(slot_0_bank.last_blockhash(), genesis_hash);
assert_eq!(slot_0_bank.get_hash_age(&genesis_hash), Some(0));
let slot_0_entries = entry::create_ticks(TICKS_PER_SLOT, HASHES_PER_TICK, genesis_hash);
let slot_0_hash = slot_0_entries.last().unwrap().hash;
confirm_slot_entries_for_tests(&slot_0_bank, slot_0_entries, true, genesis_hash).unwrap();
assert_eq!(slot_0_bank.tick_height(), slot_0_bank.max_tick_height());
assert_eq!(slot_0_bank.last_blockhash(), slot_0_hash);
assert_eq!(slot_0_bank.get_hash_age(&genesis_hash), Some(1));
assert_eq!(slot_0_bank.get_hash_age(&slot_0_hash), Some(0));
let slot_2_bank = Arc::new(Bank::new_from_parent(&slot_0_bank, &collector_id, 2));
assert_eq!(slot_2_bank.slot(), 2);
assert_eq!(slot_2_bank.tick_height(), 2);
assert_eq!(slot_2_bank.max_tick_height(), 6);
assert_eq!(slot_2_bank.last_blockhash(), slot_0_hash);
let slot_1_entries = entry::create_ticks(TICKS_PER_SLOT, HASHES_PER_TICK, slot_0_hash);
let slot_1_hash = slot_1_entries.last().unwrap().hash;
confirm_slot_entries_for_tests(&slot_2_bank, slot_1_entries, false, slot_0_hash).unwrap();
assert_eq!(slot_2_bank.tick_height(), 4);
assert_eq!(slot_2_bank.last_blockhash(), slot_1_hash);
assert_eq!(slot_2_bank.get_hash_age(&genesis_hash), Some(2));
assert_eq!(slot_2_bank.get_hash_age(&slot_0_hash), Some(1));
assert_eq!(slot_2_bank.get_hash_age(&slot_1_hash), Some(0));
let slot_2_entries = {
let to_pubkey = Pubkey::new_unique();
let mut prev_entry_hash = slot_1_hash;
let mut remaining_entry_hashes = HASHES_PER_TICK;
let mut entries: Vec<Entry> = [genesis_hash, slot_0_hash, slot_1_hash]
.into_iter()
.map(|recent_hash| {
let tx =
system_transaction::transfer(&mint_keypair, &to_pubkey, 1, recent_hash);
remaining_entry_hashes = remaining_entry_hashes.checked_sub(1).unwrap();
next_entry_mut(&mut prev_entry_hash, 1, vec![tx])
})
.collect();
entries.push(next_entry_mut(
&mut prev_entry_hash,
remaining_entry_hashes,
vec![],
));
entries.push(next_entry_mut(
&mut prev_entry_hash,
HASHES_PER_TICK,
vec![],
));
entries
};
let slot_2_hash = slot_2_entries.last().unwrap().hash;
confirm_slot_entries_for_tests(&slot_2_bank, slot_2_entries, true, slot_1_hash).unwrap();
assert_eq!(slot_2_bank.tick_height(), slot_2_bank.max_tick_height());
assert_eq!(slot_2_bank.last_blockhash(), slot_2_hash);
assert_eq!(slot_2_bank.get_hash_age(&genesis_hash), Some(3));
assert_eq!(slot_2_bank.get_hash_age(&slot_0_hash), Some(2));
assert_eq!(slot_2_bank.get_hash_age(&slot_1_hash), Some(1));
assert_eq!(slot_2_bank.get_hash_age(&slot_2_hash), Some(0));
}
#[test]
fn test_confirm_slot_entries_progress_num_txs_indexes() {
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config(100 * LAMPORTS_PER_SOL);
let genesis_hash = genesis_config.hash();
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
let mut timing = ConfirmationTiming::default();
let mut progress = ConfirmationProgress::new(genesis_hash);
let amount = genesis_config.rent.minimum_balance(0);
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
let keypair3 = Keypair::new();
let keypair4 = Keypair::new();
bank.transfer(LAMPORTS_PER_SOL, &mint_keypair, &keypair1.pubkey())
.unwrap();
bank.transfer(LAMPORTS_PER_SOL, &mint_keypair, &keypair2.pubkey())
.unwrap();
let (transaction_status_sender, transaction_status_receiver) =
crossbeam_channel::unbounded();
let transaction_status_sender = TransactionStatusSender {
sender: transaction_status_sender,
};
let blockhash = bank.last_blockhash();
let tx1 = system_transaction::transfer(
&keypair1,
&keypair3.pubkey(),
amount,
bank.last_blockhash(),
);
let tx2 = system_transaction::transfer(
&keypair2,
&keypair4.pubkey(),
amount,
bank.last_blockhash(),
);
let entry = next_entry(&blockhash, 1, vec![tx1, tx2]);
let new_hash = entry.hash;
confirm_slot_entries(
&bank,
(vec![entry], 0, false),
&mut timing,
&mut progress,
false,
Some(&transaction_status_sender),
None,
None,
&VerifyRecyclers::default(),
None,
&PrioritizationFeeCache::new(0u64),
)
.unwrap();
assert_eq!(progress.num_txs, 2);
let batch = transaction_status_receiver.recv().unwrap();
if let TransactionStatusMessage::Batch(batch) = batch {
assert_eq!(batch.transactions.len(), 2);
assert_eq!(batch.transaction_indexes.len(), 2);
assert!(batch.transaction_indexes.contains(&0));
assert!(batch.transaction_indexes.contains(&1));
} else {
panic!("batch should have been sent");
}
let tx1 = system_transaction::transfer(
&keypair1,
&keypair3.pubkey(),
amount + 1,
bank.last_blockhash(),
);
let tx2 = system_transaction::transfer(
&keypair2,
&keypair4.pubkey(),
amount + 1,
bank.last_blockhash(),
);
let tx3 = system_transaction::transfer(
&mint_keypair,
&Pubkey::new_unique(),
amount,
bank.last_blockhash(),
);
let entry = next_entry(&new_hash, 1, vec![tx1, tx2, tx3]);
confirm_slot_entries(
&bank,
(vec![entry], 0, false),
&mut timing,
&mut progress,
false,
Some(&transaction_status_sender),
None,
None,
&VerifyRecyclers::default(),
None,
&PrioritizationFeeCache::new(0u64),
)
.unwrap();
assert_eq!(progress.num_txs, 5);
let batch = transaction_status_receiver.recv().unwrap();
if let TransactionStatusMessage::Batch(batch) = batch {
assert_eq!(batch.transactions.len(), 3);
assert_eq!(batch.transaction_indexes.len(), 3);
assert!(batch.transaction_indexes.contains(&2));
assert!(batch.transaction_indexes.contains(&3));
assert!(batch.transaction_indexes.contains(&4));
} else {
panic!("batch should have been sent");
}
}
#[test]
fn test_rebatch_transactions() {
let dummy_leader_pubkey = solana_sdk::pubkey::new_rand();
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config_with_leader(500, &dummy_leader_pubkey, 100);
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
let pubkey = solana_sdk::pubkey::new_rand();
let keypair2 = Keypair::new();
let pubkey2 = solana_sdk::pubkey::new_rand();
let keypair3 = Keypair::new();
let pubkey3 = solana_sdk::pubkey::new_rand();
let txs = vec![
SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer(
&mint_keypair,
&pubkey,
1,
genesis_config.hash(),
)),
SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer(
&keypair2,
&pubkey2,
1,
genesis_config.hash(),
)),
SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer(
&keypair3,
&pubkey3,
1,
genesis_config.hash(),
)),
];
let batch = bank.prepare_sanitized_batch(&txs);
assert!(batch.needs_unlock());
let transaction_indexes = vec![42, 43, 44];
let batch2 = rebatch_transactions(
batch.lock_results(),
&bank,
batch.sanitized_transactions(),
0,
0,
&transaction_indexes,
);
assert!(batch.needs_unlock());
assert!(!batch2.batch.needs_unlock());
assert_eq!(batch2.transaction_indexes, vec![42]);
let batch3 = rebatch_transactions(
batch.lock_results(),
&bank,
batch.sanitized_transactions(),
1,
2,
&transaction_indexes,
);
assert!(!batch3.batch.needs_unlock());
assert_eq!(batch3.transaction_indexes, vec![43, 44]);
}
#[test]
fn test_confirm_slot_entries_with_fix() {
const HASHES_PER_TICK: u64 = 10;
const TICKS_PER_SLOT: u64 = 2;
let collector_id = Pubkey::new_unique();
let GenesisConfigInfo {
mut genesis_config,
mint_keypair,
..
} = create_genesis_config(10_000);
genesis_config.poh_config.hashes_per_tick = Some(HASHES_PER_TICK);
genesis_config.ticks_per_slot = TICKS_PER_SLOT;
let genesis_hash = genesis_config.hash();
let slot_0_bank = Arc::new(Bank::new_for_tests(&genesis_config));
assert_eq!(slot_0_bank.slot(), 0);
assert_eq!(slot_0_bank.tick_height(), 0);
assert_eq!(slot_0_bank.max_tick_height(), 2);
assert_eq!(slot_0_bank.last_blockhash(), genesis_hash);
assert_eq!(slot_0_bank.get_hash_age(&genesis_hash), Some(0));
let slot_0_entries = entry::create_ticks(TICKS_PER_SLOT, HASHES_PER_TICK, genesis_hash);
let slot_0_hash = slot_0_entries.last().unwrap().hash;
confirm_slot_entries_for_tests(&slot_0_bank, slot_0_entries, true, genesis_hash).unwrap();
assert_eq!(slot_0_bank.tick_height(), slot_0_bank.max_tick_height());
assert_eq!(slot_0_bank.last_blockhash(), slot_0_hash);
assert_eq!(slot_0_bank.get_hash_age(&genesis_hash), Some(1));
assert_eq!(slot_0_bank.get_hash_age(&slot_0_hash), Some(0));
let slot_2_bank = Arc::new(Bank::new_from_parent(&slot_0_bank, &collector_id, 2));
assert_eq!(slot_2_bank.slot(), 2);
assert_eq!(slot_2_bank.tick_height(), 2);
assert_eq!(slot_2_bank.max_tick_height(), 6);
assert_eq!(slot_2_bank.last_blockhash(), slot_0_hash);
let slot_1_entries = entry::create_ticks(TICKS_PER_SLOT, HASHES_PER_TICK, slot_0_hash);
let slot_1_hash = slot_1_entries.last().unwrap().hash;
confirm_slot_entries_for_tests(&slot_2_bank, slot_1_entries, false, slot_0_hash).unwrap();
assert_eq!(slot_2_bank.tick_height(), 4);
assert_eq!(slot_2_bank.last_blockhash(), slot_0_hash);
assert_eq!(slot_2_bank.get_hash_age(&genesis_hash), Some(1));
assert_eq!(slot_2_bank.get_hash_age(&slot_0_hash), Some(0));
struct TestCase {
recent_blockhash: Hash,
expected_result: result::Result<(), BlockstoreProcessorError>,
}
let test_cases = [
TestCase {
recent_blockhash: slot_1_hash,
expected_result: Err(BlockstoreProcessorError::InvalidTransaction(
TransactionError::BlockhashNotFound,
)),
},
TestCase {
recent_blockhash: slot_0_hash,
expected_result: Ok(()),
},
];
for TestCase {
recent_blockhash,
expected_result,
} in test_cases
{
let slot_2_entries = {
let to_pubkey = Pubkey::new_unique();
let mut prev_entry_hash = slot_1_hash;
let mut remaining_entry_hashes = HASHES_PER_TICK;
let tx =
system_transaction::transfer(&mint_keypair, &to_pubkey, 1, recent_blockhash);
remaining_entry_hashes = remaining_entry_hashes.checked_sub(1).unwrap();
let mut entries = vec![next_entry_mut(&mut prev_entry_hash, 1, vec![tx])];
entries.push(next_entry_mut(
&mut prev_entry_hash,
remaining_entry_hashes,
vec![],
));
entries.push(next_entry_mut(
&mut prev_entry_hash,
HASHES_PER_TICK,
vec![],
));
entries
};
let slot_2_hash = slot_2_entries.last().unwrap().hash;
let result =
confirm_slot_entries_for_tests(&slot_2_bank, slot_2_entries, true, slot_1_hash);
match (result, expected_result) {
(Ok(()), Ok(())) => {
assert_eq!(slot_2_bank.tick_height(), slot_2_bank.max_tick_height());
assert_eq!(slot_2_bank.last_blockhash(), slot_2_hash);
assert_eq!(slot_2_bank.get_hash_age(&genesis_hash), Some(2));
assert_eq!(slot_2_bank.get_hash_age(&slot_0_hash), Some(1));
assert_eq!(slot_2_bank.get_hash_age(&slot_2_hash), Some(0));
}
(
Err(BlockstoreProcessorError::InvalidTransaction(err)),
Err(BlockstoreProcessorError::InvalidTransaction(expected_err)),
) => {
assert_eq!(err, expected_err);
}
(result, expected_result) => {
panic!(
"actual result {:?} != expected result {:?}",
result, expected_result
);
}
}
}
}
#[test]
fn test_check_accounts_data_block_size() {
const ACCOUNT_SIZE: u64 = MAX_PERMITTED_DATA_LENGTH;
const NUM_ACCOUNTS: u64 = MAX_ACCOUNT_DATA_BLOCK_LEN / ACCOUNT_SIZE;
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config((1_000_000 + NUM_ACCOUNTS + 1) * LAMPORTS_PER_SOL);
let mut bank = Bank::new_for_tests(&genesis_config);
bank.deactivate_feature(
&feature_set::enable_early_verification_of_account_modifications::id(),
);
let bank = Arc::new(bank);
assert!(bank
.feature_set
.is_active(&feature_set::cap_accounts_data_size_per_block::id()));
for _ in 0..NUM_ACCOUNTS {
let transaction = system_transaction::create_account(
&mint_keypair,
&Keypair::new(),
bank.last_blockhash(),
LAMPORTS_PER_SOL,
ACCOUNT_SIZE,
&solana_sdk::system_program::id(),
);
let entry = next_entry(&bank.last_blockhash(), 1, vec![transaction]);
assert_eq!(
process_entries_for_tests(&bank, vec![entry], true, None, None),
Ok(()),
);
}
let transaction = system_transaction::create_account(
&mint_keypair,
&Keypair::new(),
bank.last_blockhash(),
LAMPORTS_PER_SOL,
ACCOUNT_SIZE,
&solana_sdk::system_program::id(),
);
let entry = next_entry(&bank.last_blockhash(), 1, vec![transaction]);
assert_eq!(
process_entries_for_tests(&bank, vec![entry], true, None, None),
Err(TransactionError::WouldExceedAccountDataBlockLimit)
);
}
#[test]
fn test_check_accounts_data_total_size() {
const REMAINING_ACCOUNTS_DATA_SIZE: u64 =
MAX_ACCOUNT_DATA_BLOCK_LEN - MAX_PERMITTED_DATA_LENGTH;
const INITIAL_ACCOUNTS_DATA_SIZE: u64 =
MAX_ACCOUNTS_DATA_LEN - REMAINING_ACCOUNTS_DATA_SIZE;
const ACCOUNT_SIZE: u64 = MAX_PERMITTED_DATA_LENGTH;
const SHRINK_SIZE: u64 = 5678;
const ACCOUNTS_DATA_SIZE_DELTA_PER_ITERATION: u64 = ACCOUNT_SIZE - SHRINK_SIZE;
const NUM_ITERATIONS: u64 =
REMAINING_ACCOUNTS_DATA_SIZE / ACCOUNTS_DATA_SIZE_DELTA_PER_ITERATION;
const ACCOUNT_BALANCE: u64 = 70 * LAMPORTS_PER_SOL;
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config((1_000_000 + NUM_ITERATIONS + 1) * ACCOUNT_BALANCE);
let mut bank = Bank::new_for_tests(&genesis_config);
let mock_realloc_program_id = Pubkey::new_unique();
bank.add_builtin(
"mock_realloc_program",
&mock_realloc_program_id,
mock_realloc::process_instruction,
);
bank.set_accounts_data_size_initial_for_tests(INITIAL_ACCOUNTS_DATA_SIZE);
bank.deactivate_feature(
&feature_set::enable_early_verification_of_account_modifications::id(),
);
let bank = Arc::new(bank);
let bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::new_unique(), 1));
assert!(bank
.feature_set
.is_active(&feature_set::cap_accounts_data_len::id()));
for _ in 0..NUM_ITERATIONS {
let accounts_data_size_before = bank.load_accounts_data_size();
let new_account = Keypair::new();
let transaction = system_transaction::create_account(
&mint_keypair,
&new_account,
bank.last_blockhash(),
ACCOUNT_BALANCE,
ACCOUNT_SIZE,
&mock_realloc_program_id,
);
let entry = next_entry(&bank.last_blockhash(), 1, vec![transaction]);
assert_eq!(
process_entries_for_tests(&bank, vec![entry], true, None, None),
Ok(()),
);
let accounts_data_size_after = bank.load_accounts_data_size();
assert_eq!(
accounts_data_size_after - accounts_data_size_before,
ACCOUNT_SIZE,
);
let new_size = ACCOUNT_SIZE - SHRINK_SIZE;
let transaction = mock_realloc::create_transaction(
&mint_keypair,
&new_account.pubkey(),
new_size as usize,
mock_realloc_program_id,
bank.last_blockhash(),
);
let entry = next_entry(&bank.last_blockhash(), 1, vec![transaction]);
assert_eq!(
process_entries_for_tests(&bank, vec![entry], true, None, None),
Ok(()),
);
let accounts_data_size_after = bank.load_accounts_data_size();
assert_eq!(
accounts_data_size_after - accounts_data_size_before,
new_size,
);
}
let transaction = system_transaction::create_account(
&mint_keypair,
&Keypair::new(),
bank.last_blockhash(),
ACCOUNT_BALANCE,
ACCOUNT_SIZE,
&solana_sdk::system_program::id(),
);
let entry = next_entry(&bank.last_blockhash(), 1, vec![transaction]);
assert!(matches!(
process_entries_for_tests(&bank, vec![entry], true, None, None),
Err(TransactionError::InstructionError(
_,
InstructionError::MaxAccountsDataSizeExceeded,
))
));
}
mod mock_realloc {
use {
super::*,
serde::{Deserialize, Serialize},
};
#[derive(Debug, Serialize, Deserialize)]
enum Instruction {
Realloc { new_size: usize },
}
pub fn process_instruction(
_first_instruction_account: usize,
invoke_context: &mut InvokeContext,
) -> result::Result<(), InstructionError> {
let transaction_context = &invoke_context.transaction_context;
let instruction_context = transaction_context.get_current_instruction_context()?;
let instruction_data = instruction_context.get_instruction_data();
if let Ok(instruction) = bincode::deserialize(instruction_data) {
match instruction {
Instruction::Realloc { new_size } => instruction_context
.try_borrow_instruction_account(transaction_context, 0)?
.set_data_length(new_size),
}
} else {
Err(InstructionError::InvalidInstructionData)
}
}
pub fn create_transaction(
payer: &Keypair,
reallocd: &Pubkey,
new_size: usize,
mock_realloc_program_id: Pubkey,
recent_blockhash: Hash,
) -> Transaction {
let account_metas = vec![solana_sdk::instruction::AccountMeta::new(*reallocd, false)];
let instruction = solana_sdk::instruction::Instruction::new_with_bincode(
mock_realloc_program_id,
&Instruction::Realloc { new_size },
account_metas,
);
Transaction::new_signed_with_payer(
&[instruction],
Some(&payer.pubkey()),
&[payer],
recent_blockhash,
)
}
}
}