solana_runtime/
bank.rs

1//! The `bank` module tracks client accounts and the progress of on-chain
2//! programs.
3//!
4//! A single bank relates to a block produced by a single leader and each bank
5//! except for the genesis bank points back to a parent bank.
6//!
7//! The bank is the main entrypoint for processing verified transactions with the function
8//! `Bank::process_transactions`
9//!
10//! It does this by loading the accounts using the reference it holds on the account store,
11//! and then passing those to an InvokeContext which handles loading the programs specified
12//! by the Transaction and executing it.
13//!
14//! The bank then stores the results to the accounts store.
15//!
16//! It then has APIs for retrieving if a transaction has been processed and it's status.
17//! See `get_signature_status` et al.
18//!
19//! Bank lifecycle:
20//!
21//! A bank is newly created and open to transactions. Transactions are applied
22//! until either the bank reached the tick count when the node is the leader for that slot, or the
23//! node has applied all transactions present in all `Entry`s in the slot.
24//!
25//! Once it is complete, the bank can then be frozen. After frozen, no more transactions can
26//! be applied or state changes made. At the frozen step, rent will be applied and various
27//! sysvar special accounts update to the new state of the system.
28//!
29//! After frozen, and the bank has had the appropriate number of votes on it, then it can become
30//! rooted. At this point, it will not be able to be removed from the chain and the
31//! state is finalized.
32//!
33//! It offers a high-level API that signs transactions
34//! on behalf of the caller, and a low-level API for when they have
35//! already been signed and verified.
36use {
37    crate::{
38        account_saver::collect_accounts_to_store,
39        bank::{
40            metrics::*,
41            partitioned_epoch_rewards::{EpochRewardStatus, VoteRewardsAccounts},
42        },
43        bank_forks::BankForks,
44        epoch_stakes::{NodeVoteAccounts, VersionedEpochStakes},
45        inflation_rewards::points::InflationPointCalculationEvent,
46        installed_scheduler_pool::{BankWithScheduler, InstalledSchedulerRwLock},
47        rent_collector::RentCollector,
48        runtime_config::RuntimeConfig,
49        snapshot_hash::SnapshotHash,
50        stake_account::StakeAccount,
51        stake_weighted_timestamp::{
52            calculate_stake_weighted_timestamp, MaxAllowableDrift,
53            MAX_ALLOWABLE_DRIFT_PERCENTAGE_FAST, MAX_ALLOWABLE_DRIFT_PERCENTAGE_SLOW_V2,
54        },
55        stakes::{SerdeStakesToStakeFormat, Stakes, StakesCache},
56        status_cache::{SlotDelta, StatusCache},
57        transaction_batch::{OwnedOrBorrowed, TransactionBatch},
58    },
59    accounts_lt_hash::{CacheValue as AccountsLtHashCacheValue, Stats as AccountsLtHashStats},
60    agave_feature_set::{self as feature_set, raise_cpi_nesting_limit_to_8, FeatureSet},
61    agave_precompiles::{get_precompile, get_precompiles, is_precompile},
62    agave_reserved_account_keys::ReservedAccountKeys,
63    agave_syscalls::{
64        create_program_runtime_environment_v1, create_program_runtime_environment_v2,
65    },
66    ahash::{AHashSet, RandomState},
67    dashmap::DashMap,
68    log::*,
69    partitioned_epoch_rewards::PartitionedRewardsCalculation,
70    rayon::ThreadPoolBuilder,
71    serde::Serialize,
72    solana_account::{
73        create_account_shared_data_with_fields as create_account, from_account, Account,
74        AccountSharedData, InheritableAccountFields, ReadableAccount, WritableAccount,
75    },
76    solana_accounts_db::{
77        account_locks::validate_account_locks,
78        accounts::{AccountAddressFilter, Accounts, PubkeyAccountSlot},
79        accounts_db::{self, AccountStorageEntry, AccountsDb, AccountsDbConfig, DuplicatesLtHash},
80        accounts_hash::AccountsLtHash,
81        accounts_index::{IndexKey, ScanConfig, ScanResult},
82        accounts_update_notifier_interface::AccountsUpdateNotifier,
83        ancestors::{Ancestors, AncestorsForSerialization},
84        blockhash_queue::BlockhashQueue,
85        storable_accounts::StorableAccounts,
86    },
87    solana_builtins::{prototype::BuiltinPrototype, BUILTINS, STATELESS_BUILTINS},
88    solana_clock::{
89        BankId, Epoch, Slot, SlotIndex, UnixTimestamp, INITIAL_RENT_EPOCH, MAX_PROCESSING_AGE,
90        MAX_TRANSACTION_FORWARDING_DELAY,
91    },
92    solana_cluster_type::ClusterType,
93    solana_compute_budget::compute_budget::ComputeBudget,
94    solana_compute_budget_instruction::instructions_processor::process_compute_budget_instructions,
95    solana_cost_model::{block_cost_limits::simd_0286_block_limits, cost_tracker::CostTracker},
96    solana_epoch_info::EpochInfo,
97    solana_epoch_schedule::EpochSchedule,
98    solana_feature_gate_interface as feature,
99    solana_fee::FeeFeatures,
100    solana_fee_calculator::FeeRateGovernor,
101    solana_fee_structure::{FeeBudgetLimits, FeeDetails, FeeStructure},
102    solana_genesis_config::GenesisConfig,
103    solana_hard_forks::HardForks,
104    solana_hash::Hash,
105    solana_inflation::Inflation,
106    solana_keypair::Keypair,
107    solana_lattice_hash::lt_hash::LtHash,
108    solana_measure::{meas_dur, measure::Measure, measure_time, measure_us},
109    solana_message::{inner_instruction::InnerInstructions, AccountKeys, SanitizedMessage},
110    solana_native_token::LAMPORTS_PER_SOL,
111    solana_packet::PACKET_DATA_SIZE,
112    solana_precompile_error::PrecompileError,
113    solana_program_runtime::{
114        invoke_context::BuiltinFunctionWithContext, loaded_programs::ProgramCacheEntry,
115    },
116    solana_pubkey::Pubkey,
117    solana_reward_info::RewardInfo,
118    solana_runtime_transaction::{
119        runtime_transaction::RuntimeTransaction, transaction_with_meta::TransactionWithMeta,
120    },
121    solana_sdk_ids::{bpf_loader_upgradeable, incinerator, native_loader},
122    solana_sha256_hasher::hashv,
123    solana_signature::Signature,
124    solana_slot_hashes::SlotHashes,
125    solana_slot_history::{Check, SlotHistory},
126    solana_stake_interface::{
127        stake_history::StakeHistory, state::Delegation, sysvar::stake_history,
128    },
129    solana_svm::{
130        account_loader::LoadedTransaction,
131        account_overrides::AccountOverrides,
132        program_loader::load_program_with_pubkey,
133        transaction_balances::{BalanceCollector, SvmTokenInfo},
134        transaction_commit_result::{CommittedTransaction, TransactionCommitResult},
135        transaction_error_metrics::TransactionErrorMetrics,
136        transaction_execution_result::{
137            TransactionExecutionDetails, TransactionLoadedAccountsStats,
138        },
139        transaction_processing_result::{
140            ProcessedTransaction, TransactionProcessingResult,
141            TransactionProcessingResultExtensions,
142        },
143        transaction_processor::{
144            ExecutionRecordingConfig, TransactionBatchProcessor, TransactionLogMessages,
145            TransactionProcessingConfig, TransactionProcessingEnvironment,
146        },
147    },
148    solana_svm_callback::{AccountState, InvokeContextCallback, TransactionProcessingCallback},
149    solana_svm_timings::{ExecuteTimingType, ExecuteTimings},
150    solana_svm_transaction::svm_message::SVMMessage,
151    solana_system_transaction as system_transaction,
152    solana_sysvar::{self as sysvar, last_restart_slot::LastRestartSlot, SysvarSerialize},
153    solana_sysvar_id::SysvarId,
154    solana_time_utils::years_as_slots,
155    solana_transaction::{
156        sanitized::{MessageHash, SanitizedTransaction, MAX_TX_ACCOUNT_LOCKS},
157        versioned::VersionedTransaction,
158        Transaction, TransactionVerificationMode,
159    },
160    solana_transaction_context::{TransactionAccount, TransactionReturnData},
161    solana_transaction_error::{TransactionError, TransactionResult as Result},
162    solana_vote::vote_account::{VoteAccount, VoteAccountsHashMap},
163    std::{
164        collections::{HashMap, HashSet},
165        fmt,
166        num::NonZeroUsize,
167        ops::{AddAssign, RangeFull},
168        path::PathBuf,
169        slice,
170        sync::{
171            atomic::{
172                AtomicBool, AtomicI64, AtomicU64,
173                Ordering::{self, AcqRel, Acquire, Relaxed},
174            },
175            Arc, LockResult, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard, Weak,
176        },
177        thread::Builder,
178        time::{Duration, Instant},
179    },
180};
181#[cfg(feature = "dev-context-only-utils")]
182use {
183    dashmap::DashSet,
184    rayon::iter::{IntoParallelRefIterator, ParallelIterator},
185    solana_accounts_db::accounts_db::{
186        ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, ACCOUNTS_DB_CONFIG_FOR_TESTING,
187    },
188    solana_nonce as nonce,
189    solana_nonce_account::{get_system_account_kind, SystemAccountKind},
190    solana_program_runtime::{loaded_programs::ProgramCacheForTxBatch, sysvar_cache::SysvarCache},
191};
192pub use {partitioned_epoch_rewards::KeyedRewardsAndNumPartitions, solana_reward_info::RewardType};
193
194/// params to `verify_accounts_hash`
195struct VerifyAccountsHashConfig {
196    require_rooted_bank: bool,
197    run_in_background: bool,
198}
199
200mod accounts_lt_hash;
201mod address_lookup_table;
202pub mod bank_hash_details;
203mod builtin_programs;
204pub mod builtins;
205mod check_transactions;
206mod fee_distribution;
207mod metrics;
208pub(crate) mod partitioned_epoch_rewards;
209mod recent_blockhashes_account;
210mod serde_snapshot;
211mod sysvar_cache;
212pub(crate) mod tests;
213
214pub const SECONDS_PER_YEAR: f64 = 365.25 * 24.0 * 60.0 * 60.0;
215
216pub const MAX_LEADER_SCHEDULE_STAKES: Epoch = 5;
217
218pub type BankStatusCache = StatusCache<Result<()>>;
219#[cfg_attr(
220    feature = "frozen-abi",
221    frozen_abi(digest = "2mR2EKFguLhheKtDzbFxoQonSmUtM9svd8kkgeKpe2vu")
222)]
223pub type BankSlotDelta = SlotDelta<Result<()>>;
224
225#[derive(Default, Copy, Clone, Debug, PartialEq, Eq)]
226pub struct SquashTiming {
227    pub squash_accounts_ms: u64,
228    pub squash_accounts_cache_ms: u64,
229    pub squash_accounts_index_ms: u64,
230    pub squash_accounts_store_ms: u64,
231
232    pub squash_cache_ms: u64,
233}
234
235impl AddAssign for SquashTiming {
236    fn add_assign(&mut self, rhs: Self) {
237        self.squash_accounts_ms += rhs.squash_accounts_ms;
238        self.squash_accounts_cache_ms += rhs.squash_accounts_cache_ms;
239        self.squash_accounts_index_ms += rhs.squash_accounts_index_ms;
240        self.squash_accounts_store_ms += rhs.squash_accounts_store_ms;
241        self.squash_cache_ms += rhs.squash_cache_ms;
242    }
243}
244
245#[derive(Clone, Debug, Default, PartialEq)]
246pub struct CollectorFeeDetails {
247    transaction_fee: u64,
248    priority_fee: u64,
249}
250
251impl CollectorFeeDetails {
252    pub(crate) fn accumulate(&mut self, fee_details: &FeeDetails) {
253        self.transaction_fee = self
254            .transaction_fee
255            .saturating_add(fee_details.transaction_fee());
256        self.priority_fee = self
257            .priority_fee
258            .saturating_add(fee_details.prioritization_fee());
259    }
260
261    pub fn total_transaction_fee(&self) -> u64 {
262        self.transaction_fee.saturating_add(self.priority_fee)
263    }
264
265    pub fn total_priority_fee(&self) -> u64 {
266        self.priority_fee
267    }
268}
269
270impl From<FeeDetails> for CollectorFeeDetails {
271    fn from(fee_details: FeeDetails) -> Self {
272        CollectorFeeDetails {
273            transaction_fee: fee_details.transaction_fee(),
274            priority_fee: fee_details.prioritization_fee(),
275        }
276    }
277}
278
279#[derive(Debug)]
280pub struct BankRc {
281    /// where all the Accounts are stored
282    pub accounts: Arc<Accounts>,
283
284    /// Previous checkpoint of this bank
285    pub(crate) parent: RwLock<Option<Arc<Bank>>>,
286
287    pub(crate) bank_id_generator: Arc<AtomicU64>,
288}
289
290impl BankRc {
291    pub(crate) fn new(accounts: Accounts) -> Self {
292        Self {
293            accounts: Arc::new(accounts),
294            parent: RwLock::new(None),
295            bank_id_generator: Arc::new(AtomicU64::new(0)),
296        }
297    }
298}
299
300pub struct LoadAndExecuteTransactionsOutput {
301    // Vector of results indicating whether a transaction was processed or could not
302    // be processed. Note processed transactions can still have failed!
303    pub processing_results: Vec<TransactionProcessingResult>,
304    // Processed transaction counts used to update bank transaction counts and
305    // for metrics reporting.
306    pub processed_counts: ProcessedTransactionCounts,
307    // Balances accumulated for TransactionStatusSender when transaction
308    // balance recording is enabled.
309    pub balance_collector: Option<BalanceCollector>,
310}
311
312#[derive(Debug, PartialEq)]
313pub struct TransactionSimulationResult {
314    pub result: Result<()>,
315    pub logs: TransactionLogMessages,
316    pub post_simulation_accounts: Vec<TransactionAccount>,
317    pub units_consumed: u64,
318    pub loaded_accounts_data_size: u32,
319    pub return_data: Option<TransactionReturnData>,
320    pub inner_instructions: Option<Vec<InnerInstructions>>,
321    pub fee: Option<u64>,
322    pub pre_balances: Option<Vec<u64>>,
323    pub post_balances: Option<Vec<u64>>,
324    pub pre_token_balances: Option<Vec<SvmTokenInfo>>,
325    pub post_token_balances: Option<Vec<SvmTokenInfo>>,
326}
327
328#[derive(Clone, Debug)]
329pub struct TransactionBalancesSet {
330    pub pre_balances: TransactionBalances,
331    pub post_balances: TransactionBalances,
332}
333
334impl TransactionBalancesSet {
335    pub fn new(pre_balances: TransactionBalances, post_balances: TransactionBalances) -> Self {
336        assert_eq!(pre_balances.len(), post_balances.len());
337        Self {
338            pre_balances,
339            post_balances,
340        }
341    }
342}
343pub type TransactionBalances = Vec<Vec<u64>>;
344
345pub type PreCommitResult<'a> = Result<Option<RwLockReadGuard<'a, Hash>>>;
346
347#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
348pub enum TransactionLogCollectorFilter {
349    All,
350    AllWithVotes,
351    None,
352    OnlyMentionedAddresses,
353}
354
355impl Default for TransactionLogCollectorFilter {
356    fn default() -> Self {
357        Self::None
358    }
359}
360
361#[derive(Debug, Default)]
362pub struct TransactionLogCollectorConfig {
363    pub mentioned_addresses: HashSet<Pubkey>,
364    pub filter: TransactionLogCollectorFilter,
365}
366
367#[derive(Clone, Debug, PartialEq, Eq)]
368pub struct TransactionLogInfo {
369    pub signature: Signature,
370    pub result: Result<()>,
371    pub is_vote: bool,
372    pub log_messages: TransactionLogMessages,
373}
374
375#[derive(Default, Debug)]
376pub struct TransactionLogCollector {
377    // All the logs collected for from this Bank.  Exact contents depend on the
378    // active `TransactionLogCollectorFilter`
379    pub logs: Vec<TransactionLogInfo>,
380
381    // For each `mentioned_addresses`, maintain a list of indices into `logs` to easily
382    // locate the logs from transactions that included the mentioned addresses.
383    pub mentioned_address_map: HashMap<Pubkey, Vec<usize>>,
384}
385
386impl TransactionLogCollector {
387    pub fn get_logs_for_address(
388        &self,
389        address: Option<&Pubkey>,
390    ) -> Option<Vec<TransactionLogInfo>> {
391        match address {
392            None => Some(self.logs.clone()),
393            Some(address) => self.mentioned_address_map.get(address).map(|log_indices| {
394                log_indices
395                    .iter()
396                    .filter_map(|i| self.logs.get(*i).cloned())
397                    .collect()
398            }),
399        }
400    }
401}
402
403/// Bank's common fields shared by all supported snapshot versions for deserialization.
404/// Sync fields with BankFieldsToSerialize! This is paired with it.
405/// All members are made public to remain Bank's members private and to make versioned deserializer workable on this
406/// Note that some fields are missing from the serializer struct. This is because of fields added later.
407/// Since it is difficult to insert fields to serialize/deserialize against existing code already deployed,
408/// new fields can be optionally serialized and optionally deserialized. At some point, the serialization and
409/// deserialization will use a new mechanism or otherwise be in sync more clearly.
410#[derive(Clone, Debug)]
411#[cfg_attr(feature = "dev-context-only-utils", derive(PartialEq))]
412pub struct BankFieldsToDeserialize {
413    pub(crate) blockhash_queue: BlockhashQueue,
414    pub(crate) ancestors: AncestorsForSerialization,
415    pub(crate) hash: Hash,
416    pub(crate) parent_hash: Hash,
417    pub(crate) parent_slot: Slot,
418    pub(crate) hard_forks: HardForks,
419    pub(crate) transaction_count: u64,
420    pub(crate) tick_height: u64,
421    pub(crate) signature_count: u64,
422    pub(crate) capitalization: u64,
423    pub(crate) max_tick_height: u64,
424    pub(crate) hashes_per_tick: Option<u64>,
425    pub(crate) ticks_per_slot: u64,
426    pub(crate) ns_per_slot: u128,
427    pub(crate) genesis_creation_time: UnixTimestamp,
428    pub(crate) slots_per_year: f64,
429    pub(crate) slot: Slot,
430    pub(crate) epoch: Epoch,
431    pub(crate) block_height: u64,
432    pub(crate) collector_id: Pubkey,
433    pub(crate) collector_fees: u64,
434    pub(crate) fee_rate_governor: FeeRateGovernor,
435    pub(crate) rent_collector: RentCollector,
436    pub(crate) epoch_schedule: EpochSchedule,
437    pub(crate) inflation: Inflation,
438    pub(crate) stakes: Stakes<Delegation>,
439    pub(crate) versioned_epoch_stakes: HashMap<Epoch, VersionedEpochStakes>,
440    pub(crate) is_delta: bool,
441    pub(crate) accounts_data_len: u64,
442    pub(crate) accounts_lt_hash: AccountsLtHash,
443    pub(crate) bank_hash_stats: BankHashStats,
444}
445
446/// Bank's common fields shared by all supported snapshot versions for serialization.
447/// This was separated from BankFieldsToDeserialize to avoid cloning by using refs.
448/// So, sync fields with BankFieldsToDeserialize!
449/// all members are made public to keep Bank private and to make versioned serializer workable on this.
450/// Note that some fields are missing from the serializer struct. This is because of fields added later.
451/// Since it is difficult to insert fields to serialize/deserialize against existing code already deployed,
452/// new fields can be optionally serialized and optionally deserialized. At some point, the serialization and
453/// deserialization will use a new mechanism or otherwise be in sync more clearly.
454#[derive(Debug)]
455pub struct BankFieldsToSerialize {
456    pub blockhash_queue: BlockhashQueue,
457    pub ancestors: AncestorsForSerialization,
458    pub hash: Hash,
459    pub parent_hash: Hash,
460    pub parent_slot: Slot,
461    pub hard_forks: HardForks,
462    pub transaction_count: u64,
463    pub tick_height: u64,
464    pub signature_count: u64,
465    pub capitalization: u64,
466    pub max_tick_height: u64,
467    pub hashes_per_tick: Option<u64>,
468    pub ticks_per_slot: u64,
469    pub ns_per_slot: u128,
470    pub genesis_creation_time: UnixTimestamp,
471    pub slots_per_year: f64,
472    pub slot: Slot,
473    pub epoch: Epoch,
474    pub block_height: u64,
475    pub collector_id: Pubkey,
476    pub collector_fees: u64,
477    pub fee_rate_governor: FeeRateGovernor,
478    pub rent_collector: RentCollector,
479    pub epoch_schedule: EpochSchedule,
480    pub inflation: Inflation,
481    pub stakes: Stakes<StakeAccount<Delegation>>,
482    pub is_delta: bool,
483    pub accounts_data_len: u64,
484    pub versioned_epoch_stakes: HashMap<u64, VersionedEpochStakes>,
485    pub accounts_lt_hash: AccountsLtHash,
486}
487
488// Can't derive PartialEq because RwLock doesn't implement PartialEq
489#[cfg(feature = "dev-context-only-utils")]
490impl PartialEq for Bank {
491    fn eq(&self, other: &Self) -> bool {
492        if std::ptr::eq(self, other) {
493            return true;
494        }
495        // Suppress rustfmt until https://github.com/rust-lang/rustfmt/issues/5920 is fixed ...
496        #[rustfmt::skip]
497        let Self {
498            rc: _,
499            status_cache: _,
500            blockhash_queue,
501            ancestors,
502            hash,
503            parent_hash,
504            parent_slot,
505            hard_forks,
506            transaction_count,
507            non_vote_transaction_count_since_restart: _,
508            transaction_error_count: _,
509            transaction_entries_count: _,
510            transactions_per_entry_max: _,
511            tick_height,
512            signature_count,
513            capitalization,
514            max_tick_height,
515            hashes_per_tick,
516            ticks_per_slot,
517            ns_per_slot,
518            genesis_creation_time,
519            slots_per_year,
520            slot,
521            bank_id: _,
522            epoch,
523            block_height,
524            collector_id,
525            collector_fees,
526            fee_rate_governor,
527            rent_collector,
528            epoch_schedule,
529            inflation,
530            stakes_cache,
531            epoch_stakes,
532            is_delta,
533            #[cfg(feature = "dev-context-only-utils")]
534            hash_overrides,
535            accounts_lt_hash,
536            // TODO: Confirm if all these fields are intentionally ignored!
537            rewards: _,
538            cluster_type: _,
539            rewards_pool_pubkeys: _,
540            transaction_debug_keys: _,
541            transaction_log_collector_config: _,
542            transaction_log_collector: _,
543            feature_set: _,
544            reserved_account_keys: _,
545            drop_callback: _,
546            freeze_started: _,
547            vote_only_bank: _,
548            cost_tracker: _,
549            accounts_data_size_initial: _,
550            accounts_data_size_delta_on_chain: _,
551            accounts_data_size_delta_off_chain: _,
552            epoch_reward_status: _,
553            transaction_processor: _,
554            check_program_modification_slot: _,
555            collector_fee_details: _,
556            compute_budget: _,
557            transaction_account_lock_limit: _,
558            fee_structure: _,
559            cache_for_accounts_lt_hash: _,
560            stats_for_accounts_lt_hash: _,
561            block_id,
562            bank_hash_stats: _,
563            epoch_rewards_calculation_cache: _,
564            // Ignore new fields explicitly if they do not impact PartialEq.
565            // Adding ".." will remove compile-time checks that if a new field
566            // is added to the struct, this PartialEq is accordingly updated.
567        } = self;
568        *blockhash_queue.read().unwrap() == *other.blockhash_queue.read().unwrap()
569            && ancestors == &other.ancestors
570            && *hash.read().unwrap() == *other.hash.read().unwrap()
571            && parent_hash == &other.parent_hash
572            && parent_slot == &other.parent_slot
573            && *hard_forks.read().unwrap() == *other.hard_forks.read().unwrap()
574            && transaction_count.load(Relaxed) == other.transaction_count.load(Relaxed)
575            && tick_height.load(Relaxed) == other.tick_height.load(Relaxed)
576            && signature_count.load(Relaxed) == other.signature_count.load(Relaxed)
577            && capitalization.load(Relaxed) == other.capitalization.load(Relaxed)
578            && max_tick_height == &other.max_tick_height
579            && hashes_per_tick == &other.hashes_per_tick
580            && ticks_per_slot == &other.ticks_per_slot
581            && ns_per_slot == &other.ns_per_slot
582            && genesis_creation_time == &other.genesis_creation_time
583            && slots_per_year == &other.slots_per_year
584            && slot == &other.slot
585            && epoch == &other.epoch
586            && block_height == &other.block_height
587            && collector_id == &other.collector_id
588            && collector_fees.load(Relaxed) == other.collector_fees.load(Relaxed)
589            && fee_rate_governor == &other.fee_rate_governor
590            && rent_collector == &other.rent_collector
591            && epoch_schedule == &other.epoch_schedule
592            && *inflation.read().unwrap() == *other.inflation.read().unwrap()
593            && *stakes_cache.stakes() == *other.stakes_cache.stakes()
594            && epoch_stakes == &other.epoch_stakes
595            && is_delta.load(Relaxed) == other.is_delta.load(Relaxed)
596            // No deadlock is possbile, when Arc::ptr_eq() returns false, because of being
597            // different Mutexes.
598            && (Arc::ptr_eq(hash_overrides, &other.hash_overrides) ||
599                *hash_overrides.lock().unwrap() == *other.hash_overrides.lock().unwrap())
600            && *accounts_lt_hash.lock().unwrap() == *other.accounts_lt_hash.lock().unwrap()
601            && *block_id.read().unwrap() == *other.block_id.read().unwrap()
602    }
603}
604
605#[cfg(feature = "dev-context-only-utils")]
606impl BankFieldsToSerialize {
607    /// Create a new BankFieldsToSerialize where basically every field is defaulted.
608    /// Only use for tests; many of the fields are invalid!
609    pub fn default_for_tests() -> Self {
610        Self {
611            blockhash_queue: BlockhashQueue::default(),
612            ancestors: AncestorsForSerialization::default(),
613            hash: Hash::default(),
614            parent_hash: Hash::default(),
615            parent_slot: Slot::default(),
616            hard_forks: HardForks::default(),
617            transaction_count: u64::default(),
618            tick_height: u64::default(),
619            signature_count: u64::default(),
620            capitalization: u64::default(),
621            max_tick_height: u64::default(),
622            hashes_per_tick: Option::default(),
623            ticks_per_slot: u64::default(),
624            ns_per_slot: u128::default(),
625            genesis_creation_time: UnixTimestamp::default(),
626            slots_per_year: f64::default(),
627            slot: Slot::default(),
628            epoch: Epoch::default(),
629            block_height: u64::default(),
630            collector_id: Pubkey::default(),
631            collector_fees: u64::default(),
632            fee_rate_governor: FeeRateGovernor::default(),
633            rent_collector: RentCollector::default(),
634            epoch_schedule: EpochSchedule::default(),
635            inflation: Inflation::default(),
636            stakes: Stakes::<StakeAccount<Delegation>>::default(),
637            is_delta: bool::default(),
638            accounts_data_len: u64::default(),
639            versioned_epoch_stakes: HashMap::default(),
640            accounts_lt_hash: AccountsLtHash(LtHash([0x7E57; LtHash::NUM_ELEMENTS])),
641        }
642    }
643}
644
645#[derive(Debug)]
646pub enum RewardCalculationEvent<'a, 'b> {
647    Staking(&'a Pubkey, &'b InflationPointCalculationEvent),
648}
649
650/// type alias is not supported for trait in rust yet. As a workaround, we define the
651/// `RewardCalcTracer` trait explicitly and implement it on any type that implement
652/// `Fn(&RewardCalculationEvent) + Send + Sync`.
653pub trait RewardCalcTracer: Fn(&RewardCalculationEvent) + Send + Sync {}
654
655impl<T: Fn(&RewardCalculationEvent) + Send + Sync> RewardCalcTracer for T {}
656
657fn null_tracer() -> Option<impl RewardCalcTracer> {
658    None::<fn(&RewardCalculationEvent)>
659}
660
661pub trait DropCallback: fmt::Debug {
662    fn callback(&self, b: &Bank);
663    fn clone_box(&self) -> Box<dyn DropCallback + Send + Sync>;
664}
665
666#[derive(Debug, Default)]
667pub struct OptionalDropCallback(Option<Box<dyn DropCallback + Send + Sync>>);
668
669#[derive(Default, Debug, Clone, PartialEq)]
670#[cfg(feature = "dev-context-only-utils")]
671pub struct HashOverrides {
672    hashes: HashMap<Slot, HashOverride>,
673}
674
675#[cfg(feature = "dev-context-only-utils")]
676impl HashOverrides {
677    fn get_hash_override(&self, slot: Slot) -> Option<&HashOverride> {
678        self.hashes.get(&slot)
679    }
680
681    fn get_blockhash_override(&self, slot: Slot) -> Option<&Hash> {
682        self.get_hash_override(slot)
683            .map(|hash_override| &hash_override.blockhash)
684    }
685
686    fn get_bank_hash_override(&self, slot: Slot) -> Option<&Hash> {
687        self.get_hash_override(slot)
688            .map(|hash_override| &hash_override.bank_hash)
689    }
690
691    pub fn add_override(&mut self, slot: Slot, blockhash: Hash, bank_hash: Hash) {
692        let is_new = self
693            .hashes
694            .insert(
695                slot,
696                HashOverride {
697                    blockhash,
698                    bank_hash,
699                },
700            )
701            .is_none();
702        assert!(is_new);
703    }
704}
705
706#[derive(Debug, Clone, PartialEq)]
707#[cfg(feature = "dev-context-only-utils")]
708struct HashOverride {
709    blockhash: Hash,
710    bank_hash: Hash,
711}
712
713/// Manager for the state of all accounts and programs after processing its entries.
714pub struct Bank {
715    /// References to accounts, parent and signature status
716    pub rc: BankRc,
717
718    /// A cache of signature statuses
719    pub status_cache: Arc<RwLock<BankStatusCache>>,
720
721    /// FIFO queue of `recent_blockhash` items
722    blockhash_queue: RwLock<BlockhashQueue>,
723
724    /// The set of parents including this bank
725    pub ancestors: Ancestors,
726
727    /// Hash of this Bank's state. Only meaningful after freezing.
728    hash: RwLock<Hash>,
729
730    /// Hash of this Bank's parent's state
731    parent_hash: Hash,
732
733    /// parent's slot
734    parent_slot: Slot,
735
736    /// slots to hard fork at
737    hard_forks: Arc<RwLock<HardForks>>,
738
739    /// The number of committed transactions since genesis.
740    transaction_count: AtomicU64,
741
742    /// The number of non-vote transactions committed since the most
743    /// recent boot from snapshot or genesis. This value is only stored in
744    /// blockstore for the RPC method "getPerformanceSamples". It is not
745    /// retained within snapshots, but is preserved in `Bank::new_from_parent`.
746    non_vote_transaction_count_since_restart: AtomicU64,
747
748    /// The number of transaction errors in this slot
749    transaction_error_count: AtomicU64,
750
751    /// The number of transaction entries in this slot
752    transaction_entries_count: AtomicU64,
753
754    /// The max number of transaction in an entry in this slot
755    transactions_per_entry_max: AtomicU64,
756
757    /// Bank tick height
758    tick_height: AtomicU64,
759
760    /// The number of signatures from valid transactions in this slot
761    signature_count: AtomicU64,
762
763    /// Total capitalization, used to calculate inflation
764    capitalization: AtomicU64,
765
766    // Bank max_tick_height
767    max_tick_height: u64,
768
769    /// The number of hashes in each tick. None value means hashing is disabled.
770    hashes_per_tick: Option<u64>,
771
772    /// The number of ticks in each slot.
773    ticks_per_slot: u64,
774
775    /// length of a slot in ns
776    pub ns_per_slot: u128,
777
778    /// genesis time, used for computed clock
779    genesis_creation_time: UnixTimestamp,
780
781    /// The number of slots per year, used for inflation
782    slots_per_year: f64,
783
784    /// Bank slot (i.e. block)
785    slot: Slot,
786
787    bank_id: BankId,
788
789    /// Bank epoch
790    epoch: Epoch,
791
792    /// Bank block_height
793    block_height: u64,
794
795    /// The pubkey to send transactions fees to.
796    collector_id: Pubkey,
797
798    /// Fees that have been collected
799    collector_fees: AtomicU64,
800
801    /// Track cluster signature throughput and adjust fee rate
802    pub(crate) fee_rate_governor: FeeRateGovernor,
803
804    /// latest rent collector, knows the epoch
805    rent_collector: RentCollector,
806
807    /// initialized from genesis
808    pub(crate) epoch_schedule: EpochSchedule,
809
810    /// inflation specs
811    inflation: Arc<RwLock<Inflation>>,
812
813    /// cache of vote_account and stake_account state for this fork
814    stakes_cache: StakesCache,
815
816    /// staked nodes on epoch boundaries, saved off when a bank.slot() is at
817    ///   a leader schedule calculation boundary
818    epoch_stakes: HashMap<Epoch, VersionedEpochStakes>,
819
820    /// A boolean reflecting whether any entries were recorded into the PoH
821    /// stream for the slot == self.slot
822    is_delta: AtomicBool,
823
824    /// Protocol-level rewards that were distributed by this bank
825    pub rewards: RwLock<Vec<(Pubkey, RewardInfo)>>,
826
827    pub cluster_type: Option<ClusterType>,
828
829    // this is temporary field only to remove rewards_pool entirely
830    pub rewards_pool_pubkeys: Arc<HashSet<Pubkey>>,
831
832    transaction_debug_keys: Option<Arc<HashSet<Pubkey>>>,
833
834    // Global configuration for how transaction logs should be collected across all banks
835    pub transaction_log_collector_config: Arc<RwLock<TransactionLogCollectorConfig>>,
836
837    // Logs from transactions that this Bank executed collected according to the criteria in
838    // `transaction_log_collector_config`
839    pub transaction_log_collector: Arc<RwLock<TransactionLogCollector>>,
840
841    pub feature_set: Arc<FeatureSet>,
842
843    /// Set of reserved account keys that cannot be write locked
844    reserved_account_keys: Arc<ReservedAccountKeys>,
845
846    /// callback function only to be called when dropping and should only be called once
847    pub drop_callback: RwLock<OptionalDropCallback>,
848
849    pub freeze_started: AtomicBool,
850
851    vote_only_bank: bool,
852
853    cost_tracker: RwLock<CostTracker>,
854
855    /// The initial accounts data size at the start of this Bank, before processing any transactions/etc
856    accounts_data_size_initial: u64,
857    /// The change to accounts data size in this Bank, due on-chain events (i.e. transactions)
858    accounts_data_size_delta_on_chain: AtomicI64,
859    /// The change to accounts data size in this Bank, due to off-chain events (i.e. rent collection)
860    accounts_data_size_delta_off_chain: AtomicI64,
861
862    epoch_reward_status: EpochRewardStatus,
863
864    transaction_processor: TransactionBatchProcessor<BankForks>,
865
866    check_program_modification_slot: bool,
867
868    /// Collected fee details
869    collector_fee_details: RwLock<CollectorFeeDetails>,
870
871    /// The compute budget to use for transaction execution.
872    compute_budget: Option<ComputeBudget>,
873
874    /// The max number of accounts that a transaction may lock.
875    transaction_account_lock_limit: Option<usize>,
876
877    /// Fee structure to use for assessing transaction fees.
878    fee_structure: FeeStructure,
879
880    /// blockhash and bank_hash overrides keyed by slot for simulated block production.
881    /// This _field_ was needed to be DCOU-ed to avoid 2 locks per bank freezing...
882    #[cfg(feature = "dev-context-only-utils")]
883    hash_overrides: Arc<Mutex<HashOverrides>>,
884
885    /// The lattice hash of all accounts
886    ///
887    /// The value is only meaningful after freezing.
888    accounts_lt_hash: Mutex<AccountsLtHash>,
889
890    /// A cache of *the initial state* of accounts modified in this slot
891    ///
892    /// The accounts lt hash needs both the initial and final state of each
893    /// account that was modified in this slot.  Cache the initial state here.
894    ///
895    /// Note: The initial state must be strictly from an ancestor,
896    /// and not an intermediate state within this slot.
897    cache_for_accounts_lt_hash: DashMap<Pubkey, AccountsLtHashCacheValue, ahash::RandomState>,
898
899    /// Stats related to the accounts lt hash
900    stats_for_accounts_lt_hash: AccountsLtHashStats,
901
902    /// The unique identifier for the corresponding block for this bank.
903    /// None for banks that have not yet completed replay or for leader banks as we cannot populate block_id
904    /// until bankless leader. Can be computed directly from shreds without needing to execute transactions.
905    block_id: RwLock<Option<Hash>>,
906
907    /// Accounts stats for computing the bank hash
908    bank_hash_stats: AtomicBankHashStats,
909
910    /// The cache of epoch rewards calculation results
911    /// This is used to avoid recalculating the same epoch rewards at epoch boundary.
912    /// The hashmap is keyed by parent_hash.
913    epoch_rewards_calculation_cache: Arc<Mutex<HashMap<Hash, Arc<PartitionedRewardsCalculation>>>>,
914}
915
916#[derive(Debug)]
917struct VoteReward {
918    vote_account: AccountSharedData,
919    commission: u8,
920    vote_rewards: u64,
921}
922
923type VoteRewards = DashMap<Pubkey, VoteReward, RandomState>;
924
925#[derive(Debug, Default)]
926pub struct NewBankOptions {
927    pub vote_only_bank: bool,
928}
929
930#[cfg(feature = "dev-context-only-utils")]
931#[derive(Debug)]
932pub struct BankTestConfig {
933    pub accounts_db_config: AccountsDbConfig,
934}
935
936#[cfg(feature = "dev-context-only-utils")]
937impl Default for BankTestConfig {
938    fn default() -> Self {
939        Self {
940            accounts_db_config: ACCOUNTS_DB_CONFIG_FOR_TESTING,
941        }
942    }
943}
944
945#[derive(Debug)]
946struct PrevEpochInflationRewards {
947    validator_rewards: u64,
948    prev_epoch_duration_in_years: f64,
949    validator_rate: f64,
950    foundation_rate: f64,
951}
952
953#[derive(Debug, Default, PartialEq)]
954pub struct ProcessedTransactionCounts {
955    pub processed_transactions_count: u64,
956    pub processed_non_vote_transactions_count: u64,
957    pub processed_with_successful_result_count: u64,
958    pub signature_count: u64,
959}
960
961/// Account stats for computing the bank hash
962/// This struct is serialized and stored in the snapshot.
963#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
964#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, Eq)]
965pub struct BankHashStats {
966    pub num_updated_accounts: u64,
967    pub num_removed_accounts: u64,
968    pub num_lamports_stored: u64,
969    pub total_data_len: u64,
970    pub num_executable_accounts: u64,
971}
972
973impl BankHashStats {
974    pub fn update<T: ReadableAccount>(&mut self, account: &T) {
975        if account.lamports() == 0 {
976            self.num_removed_accounts += 1;
977        } else {
978            self.num_updated_accounts += 1;
979        }
980        self.total_data_len = self
981            .total_data_len
982            .wrapping_add(account.data().len() as u64);
983        if account.executable() {
984            self.num_executable_accounts += 1;
985        }
986        self.num_lamports_stored = self.num_lamports_stored.wrapping_add(account.lamports());
987    }
988    pub fn accumulate(&mut self, other: &BankHashStats) {
989        self.num_updated_accounts += other.num_updated_accounts;
990        self.num_removed_accounts += other.num_removed_accounts;
991        self.total_data_len = self.total_data_len.wrapping_add(other.total_data_len);
992        self.num_lamports_stored = self
993            .num_lamports_stored
994            .wrapping_add(other.num_lamports_stored);
995        self.num_executable_accounts += other.num_executable_accounts;
996    }
997}
998
999#[derive(Debug, Default)]
1000pub struct AtomicBankHashStats {
1001    pub num_updated_accounts: AtomicU64,
1002    pub num_removed_accounts: AtomicU64,
1003    pub num_lamports_stored: AtomicU64,
1004    pub total_data_len: AtomicU64,
1005    pub num_executable_accounts: AtomicU64,
1006}
1007
1008impl AtomicBankHashStats {
1009    pub fn new(stat: &BankHashStats) -> Self {
1010        AtomicBankHashStats {
1011            num_updated_accounts: AtomicU64::new(stat.num_updated_accounts),
1012            num_removed_accounts: AtomicU64::new(stat.num_removed_accounts),
1013            num_lamports_stored: AtomicU64::new(stat.num_lamports_stored),
1014            total_data_len: AtomicU64::new(stat.total_data_len),
1015            num_executable_accounts: AtomicU64::new(stat.num_executable_accounts),
1016        }
1017    }
1018
1019    pub fn accumulate(&self, other: &BankHashStats) {
1020        self.num_updated_accounts
1021            .fetch_add(other.num_updated_accounts, Relaxed);
1022        self.num_removed_accounts
1023            .fetch_add(other.num_removed_accounts, Relaxed);
1024        self.total_data_len.fetch_add(other.total_data_len, Relaxed);
1025        self.num_lamports_stored
1026            .fetch_add(other.num_lamports_stored, Relaxed);
1027        self.num_executable_accounts
1028            .fetch_add(other.num_executable_accounts, Relaxed);
1029    }
1030
1031    pub fn load(&self) -> BankHashStats {
1032        BankHashStats {
1033            num_updated_accounts: self.num_updated_accounts.load(Relaxed),
1034            num_removed_accounts: self.num_removed_accounts.load(Relaxed),
1035            num_lamports_stored: self.num_lamports_stored.load(Relaxed),
1036            total_data_len: self.total_data_len.load(Relaxed),
1037            num_executable_accounts: self.num_executable_accounts.load(Relaxed),
1038        }
1039    }
1040}
1041
1042impl Bank {
1043    fn default_with_accounts(accounts: Accounts) -> Self {
1044        let mut bank = Self {
1045            rc: BankRc::new(accounts),
1046            status_cache: Arc::<RwLock<BankStatusCache>>::default(),
1047            blockhash_queue: RwLock::<BlockhashQueue>::default(),
1048            ancestors: Ancestors::default(),
1049            hash: RwLock::<Hash>::default(),
1050            parent_hash: Hash::default(),
1051            parent_slot: Slot::default(),
1052            hard_forks: Arc::<RwLock<HardForks>>::default(),
1053            transaction_count: AtomicU64::default(),
1054            non_vote_transaction_count_since_restart: AtomicU64::default(),
1055            transaction_error_count: AtomicU64::default(),
1056            transaction_entries_count: AtomicU64::default(),
1057            transactions_per_entry_max: AtomicU64::default(),
1058            tick_height: AtomicU64::default(),
1059            signature_count: AtomicU64::default(),
1060            capitalization: AtomicU64::default(),
1061            max_tick_height: u64::default(),
1062            hashes_per_tick: Option::<u64>::default(),
1063            ticks_per_slot: u64::default(),
1064            ns_per_slot: u128::default(),
1065            genesis_creation_time: UnixTimestamp::default(),
1066            slots_per_year: f64::default(),
1067            slot: Slot::default(),
1068            bank_id: BankId::default(),
1069            epoch: Epoch::default(),
1070            block_height: u64::default(),
1071            collector_id: Pubkey::default(),
1072            collector_fees: AtomicU64::default(),
1073            fee_rate_governor: FeeRateGovernor::default(),
1074            rent_collector: RentCollector::default(),
1075            epoch_schedule: EpochSchedule::default(),
1076            inflation: Arc::<RwLock<Inflation>>::default(),
1077            stakes_cache: StakesCache::default(),
1078            epoch_stakes: HashMap::<Epoch, VersionedEpochStakes>::default(),
1079            is_delta: AtomicBool::default(),
1080            rewards: RwLock::<Vec<(Pubkey, RewardInfo)>>::default(),
1081            cluster_type: Option::<ClusterType>::default(),
1082            rewards_pool_pubkeys: Arc::<HashSet<Pubkey>>::default(),
1083            transaction_debug_keys: Option::<Arc<HashSet<Pubkey>>>::default(),
1084            transaction_log_collector_config: Arc::<RwLock<TransactionLogCollectorConfig>>::default(
1085            ),
1086            transaction_log_collector: Arc::<RwLock<TransactionLogCollector>>::default(),
1087            feature_set: Arc::<FeatureSet>::default(),
1088            reserved_account_keys: Arc::<ReservedAccountKeys>::default(),
1089            drop_callback: RwLock::new(OptionalDropCallback(None)),
1090            freeze_started: AtomicBool::default(),
1091            vote_only_bank: false,
1092            cost_tracker: RwLock::<CostTracker>::default(),
1093            accounts_data_size_initial: 0,
1094            accounts_data_size_delta_on_chain: AtomicI64::new(0),
1095            accounts_data_size_delta_off_chain: AtomicI64::new(0),
1096            epoch_reward_status: EpochRewardStatus::default(),
1097            transaction_processor: TransactionBatchProcessor::default(),
1098            check_program_modification_slot: false,
1099            collector_fee_details: RwLock::new(CollectorFeeDetails::default()),
1100            compute_budget: None,
1101            transaction_account_lock_limit: None,
1102            fee_structure: FeeStructure::default(),
1103            #[cfg(feature = "dev-context-only-utils")]
1104            hash_overrides: Arc::new(Mutex::new(HashOverrides::default())),
1105            accounts_lt_hash: Mutex::new(AccountsLtHash(LtHash::identity())),
1106            cache_for_accounts_lt_hash: DashMap::default(),
1107            stats_for_accounts_lt_hash: AccountsLtHashStats::default(),
1108            block_id: RwLock::new(None),
1109            bank_hash_stats: AtomicBankHashStats::default(),
1110            epoch_rewards_calculation_cache: Arc::new(Mutex::new(HashMap::default())),
1111        };
1112
1113        bank.transaction_processor =
1114            TransactionBatchProcessor::new_uninitialized(bank.slot, bank.epoch);
1115
1116        bank.accounts_data_size_initial = bank.calculate_accounts_data_size().unwrap();
1117
1118        bank
1119    }
1120
1121    #[allow(clippy::too_many_arguments)]
1122    pub fn new_with_paths(
1123        genesis_config: &GenesisConfig,
1124        runtime_config: Arc<RuntimeConfig>,
1125        paths: Vec<PathBuf>,
1126        debug_keys: Option<Arc<HashSet<Pubkey>>>,
1127        additional_builtins: Option<&[BuiltinPrototype]>,
1128        debug_do_not_add_builtins: bool,
1129        accounts_db_config: Option<AccountsDbConfig>,
1130        accounts_update_notifier: Option<AccountsUpdateNotifier>,
1131        #[allow(unused)] collector_id_for_tests: Option<Pubkey>,
1132        exit: Arc<AtomicBool>,
1133        #[allow(unused)] genesis_hash: Option<Hash>,
1134        #[allow(unused)] feature_set: Option<FeatureSet>,
1135    ) -> Self {
1136        let accounts_db =
1137            AccountsDb::new_with_config(paths, accounts_db_config, accounts_update_notifier, exit);
1138        let accounts = Accounts::new(Arc::new(accounts_db));
1139        let mut bank = Self::default_with_accounts(accounts);
1140        bank.ancestors = Ancestors::from(vec![bank.slot()]);
1141        bank.compute_budget = runtime_config.compute_budget;
1142        bank.transaction_account_lock_limit = runtime_config.transaction_account_lock_limit;
1143        bank.transaction_debug_keys = debug_keys;
1144        bank.cluster_type = Some(genesis_config.cluster_type);
1145
1146        #[cfg(feature = "dev-context-only-utils")]
1147        {
1148            bank.feature_set = Arc::new(feature_set.unwrap_or_default());
1149        }
1150
1151        #[cfg(not(feature = "dev-context-only-utils"))]
1152        bank.process_genesis_config(genesis_config);
1153        #[cfg(feature = "dev-context-only-utils")]
1154        bank.process_genesis_config(genesis_config, collector_id_for_tests, genesis_hash);
1155
1156        bank.finish_init(
1157            genesis_config,
1158            additional_builtins,
1159            debug_do_not_add_builtins,
1160        );
1161
1162        // genesis needs stakes for all epochs up to the epoch implied by
1163        //  slot = 0 and genesis configuration
1164        {
1165            let stakes = bank.stakes_cache.stakes().clone();
1166            let stakes = SerdeStakesToStakeFormat::from(stakes);
1167            for epoch in 0..=bank.get_leader_schedule_epoch(bank.slot) {
1168                bank.epoch_stakes
1169                    .insert(epoch, VersionedEpochStakes::new(stakes.clone(), epoch));
1170            }
1171            bank.update_stake_history(None);
1172        }
1173        bank.update_clock(None);
1174        bank.update_rent();
1175        bank.update_epoch_schedule();
1176        bank.update_recent_blockhashes();
1177        bank.update_last_restart_slot();
1178        bank.transaction_processor
1179            .fill_missing_sysvar_cache_entries(&bank);
1180        bank
1181    }
1182
1183    /// Create a new bank that points to an immutable checkpoint of another bank.
1184    pub fn new_from_parent(parent: Arc<Bank>, collector_id: &Pubkey, slot: Slot) -> Self {
1185        Self::_new_from_parent(
1186            parent,
1187            collector_id,
1188            slot,
1189            null_tracer(),
1190            NewBankOptions::default(),
1191        )
1192    }
1193
1194    pub fn new_from_parent_with_options(
1195        parent: Arc<Bank>,
1196        collector_id: &Pubkey,
1197        slot: Slot,
1198        new_bank_options: NewBankOptions,
1199    ) -> Self {
1200        Self::_new_from_parent(parent, collector_id, slot, null_tracer(), new_bank_options)
1201    }
1202
1203    pub fn new_from_parent_with_tracer(
1204        parent: Arc<Bank>,
1205        collector_id: &Pubkey,
1206        slot: Slot,
1207        reward_calc_tracer: impl RewardCalcTracer,
1208    ) -> Self {
1209        Self::_new_from_parent(
1210            parent,
1211            collector_id,
1212            slot,
1213            Some(reward_calc_tracer),
1214            NewBankOptions::default(),
1215        )
1216    }
1217
1218    fn get_rent_collector_from(rent_collector: &RentCollector, epoch: Epoch) -> RentCollector {
1219        rent_collector.clone_with_epoch(epoch)
1220    }
1221
1222    fn _new_from_parent(
1223        parent: Arc<Bank>,
1224        collector_id: &Pubkey,
1225        slot: Slot,
1226        reward_calc_tracer: Option<impl RewardCalcTracer>,
1227        new_bank_options: NewBankOptions,
1228    ) -> Self {
1229        let mut time = Measure::start("bank::new_from_parent");
1230        let NewBankOptions { vote_only_bank } = new_bank_options;
1231
1232        parent.freeze();
1233        assert_ne!(slot, parent.slot());
1234
1235        let epoch_schedule = parent.epoch_schedule().clone();
1236        let epoch = epoch_schedule.get_epoch(slot);
1237
1238        let (rc, bank_rc_creation_time_us) = measure_us!({
1239            let accounts_db = Arc::clone(&parent.rc.accounts.accounts_db);
1240            BankRc {
1241                accounts: Arc::new(Accounts::new(accounts_db)),
1242                parent: RwLock::new(Some(Arc::clone(&parent))),
1243                bank_id_generator: Arc::clone(&parent.rc.bank_id_generator),
1244            }
1245        });
1246
1247        let (status_cache, status_cache_time_us) = measure_us!(Arc::clone(&parent.status_cache));
1248
1249        let (fee_rate_governor, fee_components_time_us) = measure_us!(
1250            FeeRateGovernor::new_derived(&parent.fee_rate_governor, parent.signature_count())
1251        );
1252
1253        let bank_id = rc.bank_id_generator.fetch_add(1, Relaxed) + 1;
1254        let (blockhash_queue, blockhash_queue_time_us) =
1255            measure_us!(RwLock::new(parent.blockhash_queue.read().unwrap().clone()));
1256
1257        let (stakes_cache, stakes_cache_time_us) =
1258            measure_us!(StakesCache::new(parent.stakes_cache.stakes().clone()));
1259
1260        let (epoch_stakes, epoch_stakes_time_us) = measure_us!(parent.epoch_stakes.clone());
1261
1262        let (transaction_processor, builtin_program_ids_time_us) = measure_us!(
1263            TransactionBatchProcessor::new_from(&parent.transaction_processor, slot, epoch)
1264        );
1265
1266        let (rewards_pool_pubkeys, rewards_pool_pubkeys_time_us) =
1267            measure_us!(parent.rewards_pool_pubkeys.clone());
1268
1269        let (transaction_debug_keys, transaction_debug_keys_time_us) =
1270            measure_us!(parent.transaction_debug_keys.clone());
1271
1272        let (transaction_log_collector_config, transaction_log_collector_config_time_us) =
1273            measure_us!(parent.transaction_log_collector_config.clone());
1274
1275        let (feature_set, feature_set_time_us) = measure_us!(parent.feature_set.clone());
1276
1277        let accounts_data_size_initial = parent.load_accounts_data_size();
1278        let mut new = Self {
1279            rc,
1280            status_cache,
1281            slot,
1282            bank_id,
1283            epoch,
1284            blockhash_queue,
1285
1286            // TODO: clean this up, so much special-case copying...
1287            hashes_per_tick: parent.hashes_per_tick,
1288            ticks_per_slot: parent.ticks_per_slot,
1289            ns_per_slot: parent.ns_per_slot,
1290            genesis_creation_time: parent.genesis_creation_time,
1291            slots_per_year: parent.slots_per_year,
1292            epoch_schedule,
1293            rent_collector: Self::get_rent_collector_from(&parent.rent_collector, epoch),
1294            max_tick_height: slot
1295                .checked_add(1)
1296                .expect("max tick height addition overflowed")
1297                .checked_mul(parent.ticks_per_slot)
1298                .expect("max tick height multiplication overflowed"),
1299            block_height: parent
1300                .block_height
1301                .checked_add(1)
1302                .expect("block height addition overflowed"),
1303            fee_rate_governor,
1304            capitalization: AtomicU64::new(parent.capitalization()),
1305            vote_only_bank,
1306            inflation: parent.inflation.clone(),
1307            transaction_count: AtomicU64::new(parent.transaction_count()),
1308            non_vote_transaction_count_since_restart: AtomicU64::new(
1309                parent.non_vote_transaction_count_since_restart(),
1310            ),
1311            transaction_error_count: AtomicU64::new(0),
1312            transaction_entries_count: AtomicU64::new(0),
1313            transactions_per_entry_max: AtomicU64::new(0),
1314            // we will .clone_with_epoch() this soon after stake data update; so just .clone() for now
1315            stakes_cache,
1316            epoch_stakes,
1317            parent_hash: parent.hash(),
1318            parent_slot: parent.slot(),
1319            collector_id: *collector_id,
1320            collector_fees: AtomicU64::new(0),
1321            ancestors: Ancestors::default(),
1322            hash: RwLock::new(Hash::default()),
1323            is_delta: AtomicBool::new(false),
1324            tick_height: AtomicU64::new(parent.tick_height.load(Relaxed)),
1325            signature_count: AtomicU64::new(0),
1326            hard_forks: parent.hard_forks.clone(),
1327            rewards: RwLock::new(vec![]),
1328            cluster_type: parent.cluster_type,
1329            rewards_pool_pubkeys,
1330            transaction_debug_keys,
1331            transaction_log_collector_config,
1332            transaction_log_collector: Arc::new(RwLock::new(TransactionLogCollector::default())),
1333            feature_set: Arc::clone(&feature_set),
1334            reserved_account_keys: parent.reserved_account_keys.clone(),
1335            drop_callback: RwLock::new(OptionalDropCallback(
1336                parent
1337                    .drop_callback
1338                    .read()
1339                    .unwrap()
1340                    .0
1341                    .as_ref()
1342                    .map(|drop_callback| drop_callback.clone_box()),
1343            )),
1344            freeze_started: AtomicBool::new(false),
1345            cost_tracker: RwLock::new(parent.read_cost_tracker().unwrap().new_from_parent_limits()),
1346            accounts_data_size_initial,
1347            accounts_data_size_delta_on_chain: AtomicI64::new(0),
1348            accounts_data_size_delta_off_chain: AtomicI64::new(0),
1349            epoch_reward_status: parent.epoch_reward_status.clone(),
1350            transaction_processor,
1351            check_program_modification_slot: false,
1352            collector_fee_details: RwLock::new(CollectorFeeDetails::default()),
1353            compute_budget: parent.compute_budget,
1354            transaction_account_lock_limit: parent.transaction_account_lock_limit,
1355            fee_structure: parent.fee_structure.clone(),
1356            #[cfg(feature = "dev-context-only-utils")]
1357            hash_overrides: parent.hash_overrides.clone(),
1358            accounts_lt_hash: Mutex::new(parent.accounts_lt_hash.lock().unwrap().clone()),
1359            cache_for_accounts_lt_hash: DashMap::default(),
1360            stats_for_accounts_lt_hash: AccountsLtHashStats::default(),
1361            block_id: RwLock::new(None),
1362            bank_hash_stats: AtomicBankHashStats::default(),
1363            epoch_rewards_calculation_cache: parent.epoch_rewards_calculation_cache.clone(),
1364        };
1365
1366        let (_, ancestors_time_us) = measure_us!({
1367            let mut ancestors = Vec::with_capacity(1 + new.parents().len());
1368            ancestors.push(new.slot());
1369            new.parents().iter().for_each(|p| {
1370                ancestors.push(p.slot());
1371            });
1372            new.ancestors = Ancestors::from(ancestors);
1373        });
1374
1375        // Following code may touch AccountsDb, requiring proper ancestors
1376        let (_, update_epoch_time_us) = measure_us!({
1377            if parent.epoch() < new.epoch() {
1378                new.process_new_epoch(
1379                    parent.epoch(),
1380                    parent.slot(),
1381                    parent.block_height(),
1382                    reward_calc_tracer,
1383                );
1384            } else {
1385                // Save a snapshot of stakes for use in consensus and stake weighted networking
1386                let leader_schedule_epoch = new.epoch_schedule().get_leader_schedule_epoch(slot);
1387                new.update_epoch_stakes(leader_schedule_epoch);
1388            }
1389            new.distribute_partitioned_epoch_rewards();
1390        });
1391
1392        let (_, cache_preparation_time_us) =
1393            measure_us!(new.prepare_program_cache_for_upcoming_feature_set());
1394
1395        // Update sysvars before processing transactions
1396        let (_, update_sysvars_time_us) = measure_us!({
1397            new.update_slot_hashes();
1398            new.update_stake_history(Some(parent.epoch()));
1399            new.update_clock(Some(parent.epoch()));
1400            new.update_last_restart_slot()
1401        });
1402
1403        let (_, fill_sysvar_cache_time_us) = measure_us!(new
1404            .transaction_processor
1405            .fill_missing_sysvar_cache_entries(&new));
1406
1407        let (num_accounts_modified_this_slot, populate_cache_for_accounts_lt_hash_us) =
1408            measure_us!({
1409                // The cache for accounts lt hash needs to be made aware of accounts modified
1410                // before transaction processing begins.  Otherwise we may calculate the wrong
1411                // accounts lt hash due to having the wrong initial state of the account.  The
1412                // lt hash cache's initial state must always be from an ancestor, and cannot be
1413                // an intermediate state within this Bank's slot.  If the lt hash cache has the
1414                // wrong initial account state, we'll mix out the wrong lt hash value, and thus
1415                // have the wrong overall accounts lt hash, and diverge.
1416                let accounts_modified_this_slot =
1417                    new.rc.accounts.accounts_db.get_pubkeys_for_slot(slot);
1418                let num_accounts_modified_this_slot = accounts_modified_this_slot.len();
1419                for pubkey in accounts_modified_this_slot {
1420                    new.cache_for_accounts_lt_hash
1421                        .entry(pubkey)
1422                        .or_insert(AccountsLtHashCacheValue::BankNew);
1423                }
1424                num_accounts_modified_this_slot
1425            });
1426
1427        time.stop();
1428        report_new_bank_metrics(
1429            slot,
1430            parent.slot(),
1431            new.block_height,
1432            num_accounts_modified_this_slot,
1433            NewBankTimings {
1434                bank_rc_creation_time_us,
1435                total_elapsed_time_us: time.as_us(),
1436                status_cache_time_us,
1437                fee_components_time_us,
1438                blockhash_queue_time_us,
1439                stakes_cache_time_us,
1440                epoch_stakes_time_us,
1441                builtin_program_ids_time_us,
1442                rewards_pool_pubkeys_time_us,
1443                executor_cache_time_us: 0,
1444                transaction_debug_keys_time_us,
1445                transaction_log_collector_config_time_us,
1446                feature_set_time_us,
1447                ancestors_time_us,
1448                update_epoch_time_us,
1449                cache_preparation_time_us,
1450                update_sysvars_time_us,
1451                fill_sysvar_cache_time_us,
1452                populate_cache_for_accounts_lt_hash_us,
1453            },
1454        );
1455
1456        report_loaded_programs_stats(
1457            &parent
1458                .transaction_processor
1459                .global_program_cache
1460                .read()
1461                .unwrap()
1462                .stats,
1463            parent.slot(),
1464        );
1465
1466        new.transaction_processor
1467            .global_program_cache
1468            .write()
1469            .unwrap()
1470            .stats
1471            .reset();
1472
1473        new
1474    }
1475
1476    pub fn set_fork_graph_in_program_cache(&self, fork_graph: Weak<RwLock<BankForks>>) {
1477        self.transaction_processor
1478            .global_program_cache
1479            .write()
1480            .unwrap()
1481            .set_fork_graph(fork_graph);
1482    }
1483
1484    fn prepare_program_cache_for_upcoming_feature_set(&self) {
1485        let (_epoch, slot_index) = self.epoch_schedule.get_epoch_and_slot_index(self.slot);
1486        let slots_in_epoch = self.epoch_schedule.get_slots_in_epoch(self.epoch);
1487        let (upcoming_feature_set, _newly_activated) = self.compute_active_feature_set(true);
1488        let compute_budget = self
1489            .compute_budget
1490            .unwrap_or(ComputeBudget::new_with_defaults(
1491                upcoming_feature_set.is_active(&raise_cpi_nesting_limit_to_8::id()),
1492            ))
1493            .to_budget();
1494
1495        // Recompile loaded programs one at a time before the next epoch hits
1496        let slots_in_recompilation_phase =
1497            (solana_program_runtime::loaded_programs::MAX_LOADED_ENTRY_COUNT as u64)
1498                .min(slots_in_epoch)
1499                .checked_div(2)
1500                .unwrap();
1501
1502        let mut program_cache = self
1503            .transaction_processor
1504            .global_program_cache
1505            .write()
1506            .unwrap();
1507
1508        if program_cache.upcoming_environments.is_some() {
1509            if let Some((key, program_to_recompile)) = program_cache.programs_to_recompile.pop() {
1510                let effective_epoch = program_cache.latest_root_epoch.saturating_add(1);
1511                drop(program_cache);
1512                let environments_for_epoch = self
1513                    .transaction_processor
1514                    .global_program_cache
1515                    .read()
1516                    .unwrap()
1517                    .get_environments_for_epoch(effective_epoch);
1518                if let Some(recompiled) = load_program_with_pubkey(
1519                    self,
1520                    &environments_for_epoch,
1521                    &key,
1522                    self.slot,
1523                    &mut ExecuteTimings::default(),
1524                    false,
1525                ) {
1526                    recompiled.tx_usage_counter.fetch_add(
1527                        program_to_recompile
1528                            .tx_usage_counter
1529                            .load(Ordering::Relaxed),
1530                        Ordering::Relaxed,
1531                    );
1532                    let mut program_cache = self
1533                        .transaction_processor
1534                        .global_program_cache
1535                        .write()
1536                        .unwrap();
1537                    program_cache.assign_program(key, recompiled);
1538                }
1539            }
1540        } else if self.epoch != program_cache.latest_root_epoch
1541            || slot_index.saturating_add(slots_in_recompilation_phase) >= slots_in_epoch
1542        {
1543            // Anticipate the upcoming program runtime environment for the next epoch,
1544            // so we can try to recompile loaded programs before the feature transition hits.
1545            drop(program_cache);
1546            let mut program_cache = self
1547                .transaction_processor
1548                .global_program_cache
1549                .write()
1550                .unwrap();
1551            let program_runtime_environment_v1 = create_program_runtime_environment_v1(
1552                &upcoming_feature_set.runtime_features(),
1553                &compute_budget,
1554                false, /* deployment */
1555                false, /* debugging_features */
1556            )
1557            .unwrap();
1558            let program_runtime_environment_v2 = create_program_runtime_environment_v2(
1559                &compute_budget,
1560                false, /* debugging_features */
1561            );
1562            let mut upcoming_environments = program_cache.environments.clone();
1563            let changed_program_runtime_v1 =
1564                *upcoming_environments.program_runtime_v1 != program_runtime_environment_v1;
1565            let changed_program_runtime_v2 =
1566                *upcoming_environments.program_runtime_v2 != program_runtime_environment_v2;
1567            if changed_program_runtime_v1 {
1568                upcoming_environments.program_runtime_v1 = Arc::new(program_runtime_environment_v1);
1569            }
1570            if changed_program_runtime_v2 {
1571                upcoming_environments.program_runtime_v2 = Arc::new(program_runtime_environment_v2);
1572            }
1573            program_cache.upcoming_environments = Some(upcoming_environments);
1574            program_cache.programs_to_recompile = program_cache
1575                .get_flattened_entries(changed_program_runtime_v1, changed_program_runtime_v2);
1576            program_cache
1577                .programs_to_recompile
1578                .sort_by_cached_key(|(_id, program)| program.decayed_usage_counter(self.slot));
1579        }
1580    }
1581
1582    pub fn prune_program_cache(&self, new_root_slot: Slot, new_root_epoch: Epoch) {
1583        self.transaction_processor
1584            .global_program_cache
1585            .write()
1586            .unwrap()
1587            .prune(new_root_slot, new_root_epoch);
1588    }
1589
1590    pub fn prune_program_cache_by_deployment_slot(&self, deployment_slot: Slot) {
1591        self.transaction_processor
1592            .global_program_cache
1593            .write()
1594            .unwrap()
1595            .prune_by_deployment_slot(deployment_slot);
1596    }
1597
1598    /// Epoch in which the new cooldown warmup rate for stake was activated
1599    pub fn new_warmup_cooldown_rate_epoch(&self) -> Option<Epoch> {
1600        self.feature_set
1601            .new_warmup_cooldown_rate_epoch(&self.epoch_schedule)
1602    }
1603
1604    /// process for the start of a new epoch
1605    fn process_new_epoch(
1606        &mut self,
1607        parent_epoch: Epoch,
1608        parent_slot: Slot,
1609        parent_height: u64,
1610        reward_calc_tracer: Option<impl RewardCalcTracer>,
1611    ) {
1612        let epoch = self.epoch();
1613        let slot = self.slot();
1614        let (thread_pool, thread_pool_time_us) = measure_us!(ThreadPoolBuilder::new()
1615            .thread_name(|i| format!("solBnkNewEpch{i:02}"))
1616            .build()
1617            .expect("new rayon threadpool"));
1618
1619        let (_, apply_feature_activations_time_us) = measure_us!(thread_pool.install(|| {
1620            self.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, false)
1621        }));
1622
1623        // Add new entry to stakes.stake_history, set appropriate epoch and
1624        // update vote accounts with warmed up stakes before saving a
1625        // snapshot of stakes in epoch stakes
1626        let (_, activate_epoch_time_us) = measure_us!(self.stakes_cache.activate_epoch(
1627            epoch,
1628            &thread_pool,
1629            self.new_warmup_cooldown_rate_epoch()
1630        ));
1631
1632        // Save a snapshot of stakes for use in consensus and stake weighted networking
1633        let leader_schedule_epoch = self.epoch_schedule.get_leader_schedule_epoch(slot);
1634        let (_, update_epoch_stakes_time_us) =
1635            measure_us!(self.update_epoch_stakes(leader_schedule_epoch));
1636
1637        let mut rewards_metrics = RewardsMetrics::default();
1638        // After saving a snapshot of stakes, apply stake rewards and commission
1639        let (_, update_rewards_with_thread_pool_time_us) = measure_us!(self
1640            .begin_partitioned_rewards(
1641                reward_calc_tracer,
1642                &thread_pool,
1643                parent_epoch,
1644                parent_slot,
1645                parent_height,
1646                &mut rewards_metrics,
1647            ));
1648
1649        report_new_epoch_metrics(
1650            epoch,
1651            slot,
1652            parent_slot,
1653            NewEpochTimings {
1654                thread_pool_time_us,
1655                apply_feature_activations_time_us,
1656                activate_epoch_time_us,
1657                update_epoch_stakes_time_us,
1658                update_rewards_with_thread_pool_time_us,
1659            },
1660            rewards_metrics,
1661        );
1662    }
1663
1664    pub fn byte_limit_for_scans(&self) -> Option<usize> {
1665        self.rc
1666            .accounts
1667            .accounts_db
1668            .accounts_index
1669            .scan_results_limit_bytes
1670    }
1671
1672    pub fn proper_ancestors_set(&self) -> HashSet<Slot> {
1673        HashSet::from_iter(self.proper_ancestors())
1674    }
1675
1676    /// Returns all ancestors excluding self.slot.
1677    pub(crate) fn proper_ancestors(&self) -> impl Iterator<Item = Slot> + '_ {
1678        self.ancestors
1679            .keys()
1680            .into_iter()
1681            .filter(move |slot| *slot != self.slot)
1682    }
1683
1684    pub fn set_callback(&self, callback: Option<Box<dyn DropCallback + Send + Sync>>) {
1685        *self.drop_callback.write().unwrap() = OptionalDropCallback(callback);
1686    }
1687
1688    pub fn vote_only_bank(&self) -> bool {
1689        self.vote_only_bank
1690    }
1691
1692    /// Like `new_from_parent` but additionally:
1693    /// * Doesn't assume that the parent is anywhere near `slot`, parent could be millions of slots
1694    ///   in the past
1695    /// * Adjusts the new bank's tick height to avoid having to run PoH for millions of slots
1696    /// * Freezes the new bank, assuming that the user will `Bank::new_from_parent` from this bank
1697    pub fn warp_from_parent(parent: Arc<Bank>, collector_id: &Pubkey, slot: Slot) -> Self {
1698        parent.freeze();
1699        let parent_timestamp = parent.clock().unix_timestamp;
1700        let mut new = Bank::new_from_parent(parent, collector_id, slot);
1701        new.apply_feature_activations(ApplyFeatureActivationsCaller::WarpFromParent, false);
1702        new.update_epoch_stakes(new.epoch_schedule().get_epoch(slot));
1703        new.tick_height.store(new.max_tick_height(), Relaxed);
1704
1705        let mut clock = new.clock();
1706        clock.epoch_start_timestamp = parent_timestamp;
1707        clock.unix_timestamp = parent_timestamp;
1708        new.update_sysvar_account(&sysvar::clock::id(), |account| {
1709            create_account(
1710                &clock,
1711                new.inherit_specially_retained_account_fields(account),
1712            )
1713        });
1714        new.transaction_processor
1715            .fill_missing_sysvar_cache_entries(&new);
1716        new.freeze();
1717        new
1718    }
1719
1720    /// Create a bank from explicit arguments and deserialized fields from snapshot
1721    pub(crate) fn new_from_fields(
1722        bank_rc: BankRc,
1723        genesis_config: &GenesisConfig,
1724        runtime_config: Arc<RuntimeConfig>,
1725        fields: BankFieldsToDeserialize,
1726        debug_keys: Option<Arc<HashSet<Pubkey>>>,
1727        additional_builtins: Option<&[BuiltinPrototype]>,
1728        debug_do_not_add_builtins: bool,
1729        accounts_data_size_initial: u64,
1730    ) -> Self {
1731        let now = Instant::now();
1732        let ancestors = Ancestors::from(&fields.ancestors);
1733        // For backward compatibility, we can only serialize and deserialize
1734        // Stakes<Delegation> in BankFieldsTo{Serialize,Deserialize}. But Bank
1735        // caches Stakes<StakeAccount>. Below Stakes<StakeAccount> is obtained
1736        // from Stakes<Delegation> by reading the full account state from
1737        // accounts-db. Note that it is crucial that these accounts are loaded
1738        // at the right slot and match precisely with serialized Delegations.
1739        //
1740        // Note that we are disabling the read cache while we populate the stakes cache.
1741        // The stakes accounts will not be expected to be loaded again.
1742        // If we populate the read cache with these loads, then we'll just soon have to evict these.
1743        let (stakes, stakes_time) = measure_time!(Stakes::new(&fields.stakes, |pubkey| {
1744            let (account, _slot) = bank_rc
1745                .accounts
1746                .load_with_fixed_root_do_not_populate_read_cache(&ancestors, pubkey)?;
1747            Some(account)
1748        })
1749        .expect(
1750            "Stakes cache is inconsistent with accounts-db. This can indicate a corrupted \
1751             snapshot or bugs in cached accounts or accounts-db.",
1752        ));
1753        info!("Loading Stakes took: {stakes_time}");
1754        let stakes_accounts_load_duration = now.elapsed();
1755        let mut bank = Self {
1756            rc: bank_rc,
1757            status_cache: Arc::<RwLock<BankStatusCache>>::default(),
1758            blockhash_queue: RwLock::new(fields.blockhash_queue),
1759            ancestors,
1760            hash: RwLock::new(fields.hash),
1761            parent_hash: fields.parent_hash,
1762            parent_slot: fields.parent_slot,
1763            hard_forks: Arc::new(RwLock::new(fields.hard_forks)),
1764            transaction_count: AtomicU64::new(fields.transaction_count),
1765            non_vote_transaction_count_since_restart: AtomicU64::default(),
1766            transaction_error_count: AtomicU64::default(),
1767            transaction_entries_count: AtomicU64::default(),
1768            transactions_per_entry_max: AtomicU64::default(),
1769            tick_height: AtomicU64::new(fields.tick_height),
1770            signature_count: AtomicU64::new(fields.signature_count),
1771            capitalization: AtomicU64::new(fields.capitalization),
1772            max_tick_height: fields.max_tick_height,
1773            hashes_per_tick: fields.hashes_per_tick,
1774            ticks_per_slot: fields.ticks_per_slot,
1775            ns_per_slot: fields.ns_per_slot,
1776            genesis_creation_time: fields.genesis_creation_time,
1777            slots_per_year: fields.slots_per_year,
1778            slot: fields.slot,
1779            bank_id: 0,
1780            epoch: fields.epoch,
1781            block_height: fields.block_height,
1782            collector_id: fields.collector_id,
1783            collector_fees: AtomicU64::new(fields.collector_fees),
1784            fee_rate_governor: fields.fee_rate_governor,
1785            // clone()-ing is needed to consider a gated behavior in rent_collector
1786            rent_collector: Self::get_rent_collector_from(&fields.rent_collector, fields.epoch),
1787            epoch_schedule: fields.epoch_schedule,
1788            inflation: Arc::new(RwLock::new(fields.inflation)),
1789            stakes_cache: StakesCache::new(stakes),
1790            epoch_stakes: fields.versioned_epoch_stakes,
1791            is_delta: AtomicBool::new(fields.is_delta),
1792            rewards: RwLock::new(vec![]),
1793            cluster_type: Some(genesis_config.cluster_type),
1794            rewards_pool_pubkeys: Arc::<HashSet<Pubkey>>::default(),
1795            transaction_debug_keys: debug_keys,
1796            transaction_log_collector_config: Arc::<RwLock<TransactionLogCollectorConfig>>::default(
1797            ),
1798            transaction_log_collector: Arc::<RwLock<TransactionLogCollector>>::default(),
1799            feature_set: Arc::<FeatureSet>::default(),
1800            reserved_account_keys: Arc::<ReservedAccountKeys>::default(),
1801            drop_callback: RwLock::new(OptionalDropCallback(None)),
1802            freeze_started: AtomicBool::new(fields.hash != Hash::default()),
1803            vote_only_bank: false,
1804            cost_tracker: RwLock::new(CostTracker::default()),
1805            accounts_data_size_initial,
1806            accounts_data_size_delta_on_chain: AtomicI64::new(0),
1807            accounts_data_size_delta_off_chain: AtomicI64::new(0),
1808            epoch_reward_status: EpochRewardStatus::default(),
1809            transaction_processor: TransactionBatchProcessor::default(),
1810            check_program_modification_slot: false,
1811            // collector_fee_details is not serialized to snapshot
1812            collector_fee_details: RwLock::new(CollectorFeeDetails::default()),
1813            compute_budget: runtime_config.compute_budget,
1814            transaction_account_lock_limit: runtime_config.transaction_account_lock_limit,
1815            fee_structure: FeeStructure::default(),
1816            #[cfg(feature = "dev-context-only-utils")]
1817            hash_overrides: Arc::new(Mutex::new(HashOverrides::default())),
1818            accounts_lt_hash: Mutex::new(fields.accounts_lt_hash),
1819            cache_for_accounts_lt_hash: DashMap::default(),
1820            stats_for_accounts_lt_hash: AccountsLtHashStats::default(),
1821            block_id: RwLock::new(None),
1822            bank_hash_stats: AtomicBankHashStats::new(&fields.bank_hash_stats),
1823            epoch_rewards_calculation_cache: Arc::new(Mutex::new(HashMap::default())),
1824        };
1825
1826        bank.transaction_processor =
1827            TransactionBatchProcessor::new_uninitialized(bank.slot, bank.epoch);
1828
1829        // TODO: Only create the thread pool if we need to recalculate rewards,
1830        // i.e. epoch_reward_status is active. Currently, this thread pool is
1831        // always created and used for recalculate_partitioned_rewards and
1832        // lt_hash calculation. Once lt_hash feature is active, lt_hash won't
1833        // need the thread pool. Thereby, after lt_hash feature activation, we
1834        // can change to create the thread pool only when we need to recalculate
1835        // rewards.
1836        let thread_pool = ThreadPoolBuilder::new()
1837            .thread_name(|i| format!("solBnkNewFlds{i:02}"))
1838            .build()
1839            .expect("new rayon threadpool");
1840        bank.recalculate_partitioned_rewards(null_tracer(), &thread_pool);
1841
1842        bank.finish_init(
1843            genesis_config,
1844            additional_builtins,
1845            debug_do_not_add_builtins,
1846        );
1847        bank.transaction_processor
1848            .fill_missing_sysvar_cache_entries(&bank);
1849
1850        // Sanity assertions between bank snapshot and genesis config
1851        // Consider removing from serializable bank state
1852        // (BankFieldsToSerialize/BankFieldsToDeserialize) and initializing
1853        // from the passed in genesis_config instead (as new()/new_with_paths() already do)
1854        assert_eq!(
1855            bank.genesis_creation_time, genesis_config.creation_time,
1856            "Bank snapshot genesis creation time does not match genesis.bin creation time. The \
1857             snapshot and genesis.bin might pertain to different clusters"
1858        );
1859        assert_eq!(bank.ticks_per_slot, genesis_config.ticks_per_slot);
1860        assert_eq!(
1861            bank.ns_per_slot,
1862            genesis_config.poh_config.target_tick_duration.as_nanos()
1863                * genesis_config.ticks_per_slot as u128
1864        );
1865        assert_eq!(bank.max_tick_height, (bank.slot + 1) * bank.ticks_per_slot);
1866        assert_eq!(
1867            bank.slots_per_year,
1868            years_as_slots(
1869                1.0,
1870                &genesis_config.poh_config.target_tick_duration,
1871                bank.ticks_per_slot,
1872            )
1873        );
1874        assert_eq!(bank.epoch_schedule, genesis_config.epoch_schedule);
1875        assert_eq!(bank.epoch, bank.epoch_schedule.get_epoch(bank.slot));
1876
1877        datapoint_info!(
1878            "bank-new-from-fields",
1879            (
1880                "accounts_data_len-from-snapshot",
1881                fields.accounts_data_len as i64,
1882                i64
1883            ),
1884            (
1885                "accounts_data_len-from-generate_index",
1886                accounts_data_size_initial as i64,
1887                i64
1888            ),
1889            (
1890                "stakes_accounts_load_duration_us",
1891                stakes_accounts_load_duration.as_micros(),
1892                i64
1893            ),
1894        );
1895        bank
1896    }
1897
1898    /// Return subset of bank fields representing serializable state
1899    pub(crate) fn get_fields_to_serialize(&self) -> BankFieldsToSerialize {
1900        BankFieldsToSerialize {
1901            blockhash_queue: self.blockhash_queue.read().unwrap().clone(),
1902            ancestors: AncestorsForSerialization::from(&self.ancestors),
1903            hash: *self.hash.read().unwrap(),
1904            parent_hash: self.parent_hash,
1905            parent_slot: self.parent_slot,
1906            hard_forks: self.hard_forks.read().unwrap().clone(),
1907            transaction_count: self.transaction_count.load(Relaxed),
1908            tick_height: self.tick_height.load(Relaxed),
1909            signature_count: self.signature_count.load(Relaxed),
1910            capitalization: self.capitalization.load(Relaxed),
1911            max_tick_height: self.max_tick_height,
1912            hashes_per_tick: self.hashes_per_tick,
1913            ticks_per_slot: self.ticks_per_slot,
1914            ns_per_slot: self.ns_per_slot,
1915            genesis_creation_time: self.genesis_creation_time,
1916            slots_per_year: self.slots_per_year,
1917            slot: self.slot,
1918            epoch: self.epoch,
1919            block_height: self.block_height,
1920            collector_id: self.collector_id,
1921            collector_fees: self.collector_fees.load(Relaxed),
1922            fee_rate_governor: self.fee_rate_governor.clone(),
1923            rent_collector: self.rent_collector.clone(),
1924            epoch_schedule: self.epoch_schedule.clone(),
1925            inflation: *self.inflation.read().unwrap(),
1926            stakes: self.stakes_cache.stakes().clone(),
1927            is_delta: self.is_delta.load(Relaxed),
1928            accounts_data_len: self.load_accounts_data_size(),
1929            versioned_epoch_stakes: self.epoch_stakes.clone(),
1930            accounts_lt_hash: self.accounts_lt_hash.lock().unwrap().clone(),
1931        }
1932    }
1933
1934    pub fn collector_id(&self) -> &Pubkey {
1935        &self.collector_id
1936    }
1937
1938    pub fn genesis_creation_time(&self) -> UnixTimestamp {
1939        self.genesis_creation_time
1940    }
1941
1942    pub fn slot(&self) -> Slot {
1943        self.slot
1944    }
1945
1946    pub fn bank_id(&self) -> BankId {
1947        self.bank_id
1948    }
1949
1950    pub fn epoch(&self) -> Epoch {
1951        self.epoch
1952    }
1953
1954    pub fn first_normal_epoch(&self) -> Epoch {
1955        self.epoch_schedule().first_normal_epoch
1956    }
1957
1958    pub fn freeze_lock(&self) -> RwLockReadGuard<Hash> {
1959        self.hash.read().unwrap()
1960    }
1961
1962    pub fn hash(&self) -> Hash {
1963        *self.hash.read().unwrap()
1964    }
1965
1966    pub fn is_frozen(&self) -> bool {
1967        *self.hash.read().unwrap() != Hash::default()
1968    }
1969
1970    pub fn freeze_started(&self) -> bool {
1971        self.freeze_started.load(Relaxed)
1972    }
1973
1974    pub fn status_cache_ancestors(&self) -> Vec<u64> {
1975        let mut roots = self.status_cache.read().unwrap().roots().clone();
1976        let min = roots.iter().min().cloned().unwrap_or(0);
1977        for ancestor in self.ancestors.keys() {
1978            if ancestor >= min {
1979                roots.insert(ancestor);
1980            }
1981        }
1982
1983        let mut ancestors: Vec<_> = roots.into_iter().collect();
1984        #[allow(clippy::stable_sort_primitive)]
1985        ancestors.sort();
1986        ancestors
1987    }
1988
1989    /// computed unix_timestamp at this slot height
1990    pub fn unix_timestamp_from_genesis(&self) -> i64 {
1991        self.genesis_creation_time.saturating_add(
1992            (self.slot as u128)
1993                .saturating_mul(self.ns_per_slot)
1994                .saturating_div(1_000_000_000) as i64,
1995        )
1996    }
1997
1998    fn update_sysvar_account<F>(&self, pubkey: &Pubkey, updater: F)
1999    where
2000        F: Fn(&Option<AccountSharedData>) -> AccountSharedData,
2001    {
2002        let old_account = self.get_account_with_fixed_root(pubkey);
2003        let mut new_account = updater(&old_account);
2004
2005        // When new sysvar comes into existence (with RENT_UNADJUSTED_INITIAL_BALANCE lamports),
2006        // this code ensures that the sysvar's balance is adjusted to be rent-exempt.
2007        //
2008        // More generally, this code always re-calculates for possible sysvar data size change,
2009        // although there is no such sysvars currently.
2010        self.adjust_sysvar_balance_for_rent(&mut new_account);
2011        self.store_account_and_update_capitalization(pubkey, &new_account);
2012    }
2013
2014    fn inherit_specially_retained_account_fields(
2015        &self,
2016        old_account: &Option<AccountSharedData>,
2017    ) -> InheritableAccountFields {
2018        const RENT_UNADJUSTED_INITIAL_BALANCE: u64 = 1;
2019
2020        (
2021            old_account
2022                .as_ref()
2023                .map(|a| a.lamports())
2024                .unwrap_or(RENT_UNADJUSTED_INITIAL_BALANCE),
2025            old_account
2026                .as_ref()
2027                .map(|a| a.rent_epoch())
2028                .unwrap_or(INITIAL_RENT_EPOCH),
2029        )
2030    }
2031
2032    pub fn clock(&self) -> sysvar::clock::Clock {
2033        from_account(&self.get_account(&sysvar::clock::id()).unwrap_or_default())
2034            .unwrap_or_default()
2035    }
2036
2037    fn update_clock(&self, parent_epoch: Option<Epoch>) {
2038        let mut unix_timestamp = self.clock().unix_timestamp;
2039        // set epoch_start_timestamp to None to warp timestamp
2040        let epoch_start_timestamp = {
2041            let epoch = if let Some(epoch) = parent_epoch {
2042                epoch
2043            } else {
2044                self.epoch()
2045            };
2046            let first_slot_in_epoch = self.epoch_schedule().get_first_slot_in_epoch(epoch);
2047            Some((first_slot_in_epoch, self.clock().epoch_start_timestamp))
2048        };
2049        let max_allowable_drift = MaxAllowableDrift {
2050            fast: MAX_ALLOWABLE_DRIFT_PERCENTAGE_FAST,
2051            slow: MAX_ALLOWABLE_DRIFT_PERCENTAGE_SLOW_V2,
2052        };
2053
2054        let ancestor_timestamp = self.clock().unix_timestamp;
2055        if let Some(timestamp_estimate) =
2056            self.get_timestamp_estimate(max_allowable_drift, epoch_start_timestamp)
2057        {
2058            unix_timestamp = timestamp_estimate;
2059            if timestamp_estimate < ancestor_timestamp {
2060                unix_timestamp = ancestor_timestamp;
2061            }
2062        }
2063        datapoint_info!(
2064            "bank-timestamp-correction",
2065            ("slot", self.slot(), i64),
2066            ("from_genesis", self.unix_timestamp_from_genesis(), i64),
2067            ("corrected", unix_timestamp, i64),
2068            ("ancestor_timestamp", ancestor_timestamp, i64),
2069        );
2070        let mut epoch_start_timestamp =
2071            // On epoch boundaries, update epoch_start_timestamp
2072            if parent_epoch.is_some() && parent_epoch.unwrap() != self.epoch() {
2073                unix_timestamp
2074            } else {
2075                self.clock().epoch_start_timestamp
2076            };
2077        if self.slot == 0 {
2078            unix_timestamp = self.unix_timestamp_from_genesis();
2079            epoch_start_timestamp = self.unix_timestamp_from_genesis();
2080        }
2081        let clock = sysvar::clock::Clock {
2082            slot: self.slot,
2083            epoch_start_timestamp,
2084            epoch: self.epoch_schedule().get_epoch(self.slot),
2085            leader_schedule_epoch: self.epoch_schedule().get_leader_schedule_epoch(self.slot),
2086            unix_timestamp,
2087        };
2088        self.update_sysvar_account(&sysvar::clock::id(), |account| {
2089            create_account(
2090                &clock,
2091                self.inherit_specially_retained_account_fields(account),
2092            )
2093        });
2094    }
2095
2096    pub fn update_last_restart_slot(&self) {
2097        let feature_flag = self
2098            .feature_set
2099            .is_active(&feature_set::last_restart_slot_sysvar::id());
2100
2101        if feature_flag {
2102            // First, see what the currently stored last restart slot is. This
2103            // account may not exist yet if the feature was just activated.
2104            let current_last_restart_slot = self
2105                .get_account(&sysvar::last_restart_slot::id())
2106                .and_then(|account| {
2107                    let lrs: Option<LastRestartSlot> = from_account(&account);
2108                    lrs
2109                })
2110                .map(|account| account.last_restart_slot);
2111
2112            let last_restart_slot = {
2113                let slot = self.slot;
2114                let hard_forks_r = self.hard_forks.read().unwrap();
2115
2116                // Only consider hard forks <= this bank's slot to avoid prematurely applying
2117                // a hard fork that is set to occur in the future.
2118                hard_forks_r
2119                    .iter()
2120                    .rev()
2121                    .find(|(hard_fork, _)| *hard_fork <= slot)
2122                    .map(|(slot, _)| *slot)
2123                    .unwrap_or(0)
2124            };
2125
2126            // Only need to write if the last restart has changed
2127            if current_last_restart_slot != Some(last_restart_slot) {
2128                self.update_sysvar_account(&sysvar::last_restart_slot::id(), |account| {
2129                    create_account(
2130                        &LastRestartSlot { last_restart_slot },
2131                        self.inherit_specially_retained_account_fields(account),
2132                    )
2133                });
2134            }
2135        }
2136    }
2137
2138    pub fn set_sysvar_for_tests<T>(&self, sysvar: &T)
2139    where
2140        T: SysvarSerialize + SysvarId,
2141    {
2142        self.update_sysvar_account(&T::id(), |account| {
2143            create_account(
2144                sysvar,
2145                self.inherit_specially_retained_account_fields(account),
2146            )
2147        });
2148        // Simply force fill sysvar cache rather than checking which sysvar was
2149        // actually updated since tests don't need to be optimized for performance.
2150        self.transaction_processor.reset_sysvar_cache();
2151        self.transaction_processor
2152            .fill_missing_sysvar_cache_entries(self);
2153    }
2154
2155    fn update_slot_history(&self) {
2156        self.update_sysvar_account(&sysvar::slot_history::id(), |account| {
2157            let mut slot_history = account
2158                .as_ref()
2159                .map(|account| from_account::<SlotHistory, _>(account).unwrap())
2160                .unwrap_or_default();
2161            slot_history.add(self.slot());
2162            create_account(
2163                &slot_history,
2164                self.inherit_specially_retained_account_fields(account),
2165            )
2166        });
2167    }
2168
2169    fn update_slot_hashes(&self) {
2170        self.update_sysvar_account(&sysvar::slot_hashes::id(), |account| {
2171            let mut slot_hashes = account
2172                .as_ref()
2173                .map(|account| from_account::<SlotHashes, _>(account).unwrap())
2174                .unwrap_or_default();
2175            slot_hashes.add(self.parent_slot, self.parent_hash);
2176            create_account(
2177                &slot_hashes,
2178                self.inherit_specially_retained_account_fields(account),
2179            )
2180        });
2181    }
2182
2183    pub fn get_slot_history(&self) -> SlotHistory {
2184        from_account(&self.get_account(&sysvar::slot_history::id()).unwrap()).unwrap()
2185    }
2186
2187    fn update_epoch_stakes(&mut self, leader_schedule_epoch: Epoch) {
2188        // update epoch_stakes cache
2189        //  if my parent didn't populate for this staker's epoch, we've
2190        //  crossed a boundary
2191        if !self.epoch_stakes.contains_key(&leader_schedule_epoch) {
2192            self.epoch_stakes.retain(|&epoch, _| {
2193                epoch >= leader_schedule_epoch.saturating_sub(MAX_LEADER_SCHEDULE_STAKES)
2194            });
2195            let stakes = self.stakes_cache.stakes().clone();
2196            let stakes = SerdeStakesToStakeFormat::from(stakes);
2197            let new_epoch_stakes = VersionedEpochStakes::new(stakes, leader_schedule_epoch);
2198            info!(
2199                "new epoch stakes, epoch: {}, total_stake: {}",
2200                leader_schedule_epoch,
2201                new_epoch_stakes.total_stake(),
2202            );
2203
2204            // It is expensive to log the details of epoch stakes. Only log them at "trace"
2205            // level for debugging purpose.
2206            if log::log_enabled!(log::Level::Trace) {
2207                let vote_stakes: HashMap<_, _> = self
2208                    .stakes_cache
2209                    .stakes()
2210                    .vote_accounts()
2211                    .delegated_stakes()
2212                    .map(|(pubkey, stake)| (*pubkey, stake))
2213                    .collect();
2214                trace!("new epoch stakes, stakes: {vote_stakes:#?}");
2215            }
2216            self.epoch_stakes
2217                .insert(leader_schedule_epoch, new_epoch_stakes);
2218        }
2219    }
2220
2221    #[cfg(feature = "dev-context-only-utils")]
2222    pub fn set_epoch_stakes_for_test(&mut self, epoch: Epoch, stakes: VersionedEpochStakes) {
2223        self.epoch_stakes.insert(epoch, stakes);
2224    }
2225
2226    fn update_rent(&self) {
2227        self.update_sysvar_account(&sysvar::rent::id(), |account| {
2228            create_account(
2229                &self.rent_collector.rent,
2230                self.inherit_specially_retained_account_fields(account),
2231            )
2232        });
2233    }
2234
2235    fn update_epoch_schedule(&self) {
2236        self.update_sysvar_account(&sysvar::epoch_schedule::id(), |account| {
2237            create_account(
2238                self.epoch_schedule(),
2239                self.inherit_specially_retained_account_fields(account),
2240            )
2241        });
2242    }
2243
2244    fn update_stake_history(&self, epoch: Option<Epoch>) {
2245        if epoch == Some(self.epoch()) {
2246            return;
2247        }
2248        // if I'm the first Bank in an epoch, ensure stake_history is updated
2249        self.update_sysvar_account(&stake_history::id(), |account| {
2250            create_account::<StakeHistory>(
2251                self.stakes_cache.stakes().history(),
2252                self.inherit_specially_retained_account_fields(account),
2253            )
2254        });
2255    }
2256
2257    pub fn epoch_duration_in_years(&self, prev_epoch: Epoch) -> f64 {
2258        // period: time that has passed as a fraction of a year, basically the length of
2259        //  an epoch as a fraction of a year
2260        //  calculated as: slots_elapsed / (slots / year)
2261        self.epoch_schedule().get_slots_in_epoch(prev_epoch) as f64 / self.slots_per_year
2262    }
2263
2264    // Calculates the starting-slot for inflation from the activation slot.
2265    // This method assumes that `pico_inflation` will be enabled before `full_inflation`, giving
2266    // precedence to the latter. However, since `pico_inflation` is fixed-rate Inflation, should
2267    // `pico_inflation` be enabled 2nd, the incorrect start slot provided here should have no
2268    // effect on the inflation calculation.
2269    fn get_inflation_start_slot(&self) -> Slot {
2270        let mut slots = self
2271            .feature_set
2272            .full_inflation_features_enabled()
2273            .iter()
2274            .filter_map(|id| self.feature_set.activated_slot(id))
2275            .collect::<Vec<_>>();
2276        slots.sort_unstable();
2277        slots.first().cloned().unwrap_or_else(|| {
2278            self.feature_set
2279                .activated_slot(&feature_set::pico_inflation::id())
2280                .unwrap_or(0)
2281        })
2282    }
2283
2284    fn get_inflation_num_slots(&self) -> u64 {
2285        let inflation_activation_slot = self.get_inflation_start_slot();
2286        // Normalize inflation_start to align with the start of rewards accrual.
2287        let inflation_start_slot = self.epoch_schedule().get_first_slot_in_epoch(
2288            self.epoch_schedule()
2289                .get_epoch(inflation_activation_slot)
2290                .saturating_sub(1),
2291        );
2292        self.epoch_schedule().get_first_slot_in_epoch(self.epoch()) - inflation_start_slot
2293    }
2294
2295    pub fn slot_in_year_for_inflation(&self) -> f64 {
2296        let num_slots = self.get_inflation_num_slots();
2297
2298        // calculated as: num_slots / (slots / year)
2299        num_slots as f64 / self.slots_per_year
2300    }
2301
2302    fn calculate_previous_epoch_inflation_rewards(
2303        &self,
2304        prev_epoch_capitalization: u64,
2305        prev_epoch: Epoch,
2306    ) -> PrevEpochInflationRewards {
2307        let slot_in_year = self.slot_in_year_for_inflation();
2308        let (validator_rate, foundation_rate) = {
2309            let inflation = self.inflation.read().unwrap();
2310            (
2311                (*inflation).validator(slot_in_year),
2312                (*inflation).foundation(slot_in_year),
2313            )
2314        };
2315
2316        let prev_epoch_duration_in_years = self.epoch_duration_in_years(prev_epoch);
2317        let validator_rewards = (validator_rate
2318            * prev_epoch_capitalization as f64
2319            * prev_epoch_duration_in_years) as u64;
2320
2321        PrevEpochInflationRewards {
2322            validator_rewards,
2323            prev_epoch_duration_in_years,
2324            validator_rate,
2325            foundation_rate,
2326        }
2327    }
2328
2329    fn filter_stake_delegations<'a>(
2330        &self,
2331        stakes: &'a Stakes<StakeAccount<Delegation>>,
2332    ) -> Vec<(&'a Pubkey, &'a StakeAccount<Delegation>)> {
2333        if self
2334            .feature_set
2335            .is_active(&feature_set::stake_minimum_delegation_for_rewards::id())
2336        {
2337            let num_stake_delegations = stakes.stake_delegations().len();
2338            let min_stake_delegation = solana_stake_program::get_minimum_delegation(
2339                self.feature_set
2340                    .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()),
2341            )
2342            .max(LAMPORTS_PER_SOL);
2343
2344            let (stake_delegations, filter_time_us) = measure_us!(stakes
2345                .stake_delegations()
2346                .iter()
2347                .filter(|(_stake_pubkey, cached_stake_account)| {
2348                    cached_stake_account.delegation().stake >= min_stake_delegation
2349                })
2350                .collect::<Vec<_>>());
2351
2352            datapoint_info!(
2353                "stake_account_filter_time",
2354                ("filter_time_us", filter_time_us, i64),
2355                ("num_stake_delegations_before", num_stake_delegations, i64),
2356                ("num_stake_delegations_after", stake_delegations.len(), i64)
2357            );
2358            stake_delegations
2359        } else {
2360            stakes.stake_delegations().iter().collect()
2361        }
2362    }
2363
2364    /// Convert computed VoteRewards to VoteRewardsAccounts for storing.
2365    ///
2366    /// This function processes vote rewards and consolidates them into a single
2367    /// structure containing the pubkey, reward info, and updated account data
2368    /// for each vote account. The resulting structure is optimized for storage
2369    /// by combining previously separate rewards and accounts vectors into a
2370    /// single accounts_with_rewards vector.
2371    fn calc_vote_accounts_to_store(vote_account_rewards: VoteRewards) -> VoteRewardsAccounts {
2372        let len = vote_account_rewards.len();
2373        let mut result = VoteRewardsAccounts {
2374            accounts_with_rewards: Vec::with_capacity(len),
2375            total_vote_rewards_lamports: 0,
2376        };
2377        vote_account_rewards.into_iter().for_each(
2378            |(
2379                vote_pubkey,
2380                VoteReward {
2381                    mut vote_account,
2382                    commission,
2383                    vote_rewards,
2384                },
2385            )| {
2386                if let Err(err) = vote_account.checked_add_lamports(vote_rewards) {
2387                    debug!("reward redemption failed for {vote_pubkey}: {err:?}");
2388                    return;
2389                }
2390
2391                result.accounts_with_rewards.push((
2392                    vote_pubkey,
2393                    RewardInfo {
2394                        reward_type: RewardType::Voting,
2395                        lamports: vote_rewards as i64,
2396                        post_balance: vote_account.lamports(),
2397                        commission: Some(commission),
2398                    },
2399                    vote_account,
2400                ));
2401                result.total_vote_rewards_lamports += vote_rewards;
2402            },
2403        );
2404        result
2405    }
2406
2407    fn update_vote_rewards(&self, vote_rewards: &VoteRewardsAccounts) {
2408        let mut rewards = self.rewards.write().unwrap();
2409        rewards.reserve(vote_rewards.accounts_with_rewards.len());
2410        vote_rewards
2411            .accounts_with_rewards
2412            .iter()
2413            .for_each(|(vote_pubkey, vote_reward, _)| {
2414                rewards.push((*vote_pubkey, *vote_reward));
2415            });
2416    }
2417
2418    fn update_recent_blockhashes_locked(&self, locked_blockhash_queue: &BlockhashQueue) {
2419        #[allow(deprecated)]
2420        self.update_sysvar_account(&sysvar::recent_blockhashes::id(), |account| {
2421            let recent_blockhash_iter = locked_blockhash_queue.get_recent_blockhashes();
2422            recent_blockhashes_account::create_account_with_data_and_fields(
2423                recent_blockhash_iter,
2424                self.inherit_specially_retained_account_fields(account),
2425            )
2426        });
2427    }
2428
2429    pub fn update_recent_blockhashes(&self) {
2430        let blockhash_queue = self.blockhash_queue.read().unwrap();
2431        self.update_recent_blockhashes_locked(&blockhash_queue);
2432    }
2433
2434    fn get_timestamp_estimate(
2435        &self,
2436        max_allowable_drift: MaxAllowableDrift,
2437        epoch_start_timestamp: Option<(Slot, UnixTimestamp)>,
2438    ) -> Option<UnixTimestamp> {
2439        let mut get_timestamp_estimate_time = Measure::start("get_timestamp_estimate");
2440        let slots_per_epoch = self.epoch_schedule().slots_per_epoch;
2441        let vote_accounts = self.vote_accounts();
2442        let recent_timestamps = vote_accounts.iter().filter_map(|(pubkey, (_, account))| {
2443            let vote_state = account.vote_state_view();
2444            let last_timestamp = vote_state.last_timestamp();
2445            let slot_delta = self.slot().checked_sub(last_timestamp.slot)?;
2446            (slot_delta <= slots_per_epoch)
2447                .then_some((*pubkey, (last_timestamp.slot, last_timestamp.timestamp)))
2448        });
2449        let slot_duration = Duration::from_nanos(self.ns_per_slot as u64);
2450        let epoch = self.epoch_schedule().get_epoch(self.slot());
2451        let stakes = self.epoch_vote_accounts(epoch)?;
2452        let stake_weighted_timestamp = calculate_stake_weighted_timestamp(
2453            recent_timestamps,
2454            stakes,
2455            self.slot(),
2456            slot_duration,
2457            epoch_start_timestamp,
2458            max_allowable_drift,
2459            self.feature_set
2460                .is_active(&feature_set::warp_timestamp_again::id()),
2461        );
2462        get_timestamp_estimate_time.stop();
2463        datapoint_info!(
2464            "bank-timestamp",
2465            (
2466                "get_timestamp_estimate_us",
2467                get_timestamp_estimate_time.as_us(),
2468                i64
2469            ),
2470        );
2471        stake_weighted_timestamp
2472    }
2473
2474    /// Recalculates the bank hash
2475    ///
2476    /// This is used by ledger-tool when creating a snapshot, which
2477    /// recalcuates the bank hash.
2478    ///
2479    /// Note that the account state is *not* allowed to change by rehashing.
2480    /// If modifying accounts in ledger-tool is needed, create a new bank.
2481    pub fn rehash(&self) {
2482        let mut hash = self.hash.write().unwrap();
2483        let new = self.hash_internal_state();
2484        if new != *hash {
2485            warn!("Updating bank hash to {new}");
2486            *hash = new;
2487        }
2488    }
2489
2490    pub fn freeze(&self) {
2491        // This lock prevents any new commits from BankingStage
2492        // `Consumer::execute_and_commit_transactions_locked()` from
2493        // coming in after the last tick is observed. This is because in
2494        // BankingStage, any transaction successfully recorded in
2495        // `record_transactions()` is recorded after this `hash` lock
2496        // is grabbed. At the time of the successful record,
2497        // this means the PoH has not yet reached the last tick,
2498        // so this means freeze() hasn't been called yet. And because
2499        // BankingStage doesn't release this hash lock until both
2500        // record and commit are finished, those transactions will be
2501        // committed before this write lock can be obtained here.
2502        let mut hash = self.hash.write().unwrap();
2503        if *hash == Hash::default() {
2504            // finish up any deferred changes to account state
2505            self.distribute_transaction_fee_details();
2506            self.update_slot_history();
2507            self.run_incinerator();
2508
2509            // freeze is a one-way trip, idempotent
2510            self.freeze_started.store(true, Relaxed);
2511            // updating the accounts lt hash must happen *outside* of hash_internal_state() so
2512            // that rehash() can be called and *not* modify self.accounts_lt_hash.
2513            self.update_accounts_lt_hash();
2514            *hash = self.hash_internal_state();
2515            self.rc.accounts.accounts_db.mark_slot_frozen(self.slot());
2516        }
2517    }
2518
2519    // dangerous; don't use this; this is only needed for ledger-tool's special command
2520    #[cfg(feature = "dev-context-only-utils")]
2521    pub fn unfreeze_for_ledger_tool(&self) {
2522        self.freeze_started.store(false, Relaxed);
2523    }
2524
2525    pub fn epoch_schedule(&self) -> &EpochSchedule {
2526        &self.epoch_schedule
2527    }
2528
2529    /// squash the parent's state up into this Bank,
2530    ///   this Bank becomes a root
2531    /// Note that this function is not thread-safe. If it is called concurrently on the same bank
2532    /// by multiple threads, the end result could be inconsistent.
2533    /// Calling code does not currently call this concurrently.
2534    pub fn squash(&self) -> SquashTiming {
2535        self.freeze();
2536
2537        //this bank and all its parents are now on the rooted path
2538        let mut roots = vec![self.slot()];
2539        roots.append(&mut self.parents().iter().map(|p| p.slot()).collect());
2540
2541        let mut total_index_us = 0;
2542        let mut total_cache_us = 0;
2543        let mut total_store_us = 0;
2544
2545        let mut squash_accounts_time = Measure::start("squash_accounts_time");
2546        for slot in roots.iter().rev() {
2547            // root forks cannot be purged
2548            let add_root_timing = self.rc.accounts.add_root(*slot);
2549            total_index_us += add_root_timing.index_us;
2550            total_cache_us += add_root_timing.cache_us;
2551            total_store_us += add_root_timing.store_us;
2552        }
2553        squash_accounts_time.stop();
2554
2555        *self.rc.parent.write().unwrap() = None;
2556
2557        let mut squash_cache_time = Measure::start("squash_cache_time");
2558        roots
2559            .iter()
2560            .for_each(|slot| self.status_cache.write().unwrap().add_root(*slot));
2561        squash_cache_time.stop();
2562
2563        SquashTiming {
2564            squash_accounts_ms: squash_accounts_time.as_ms(),
2565            squash_accounts_index_ms: total_index_us / 1000,
2566            squash_accounts_cache_ms: total_cache_us / 1000,
2567            squash_accounts_store_ms: total_store_us / 1000,
2568
2569            squash_cache_ms: squash_cache_time.as_ms(),
2570        }
2571    }
2572
2573    /// Return the more recent checkpoint of this bank instance.
2574    pub fn parent(&self) -> Option<Arc<Bank>> {
2575        self.rc.parent.read().unwrap().clone()
2576    }
2577
2578    pub fn parent_slot(&self) -> Slot {
2579        self.parent_slot
2580    }
2581
2582    pub fn parent_hash(&self) -> Hash {
2583        self.parent_hash
2584    }
2585
2586    fn process_genesis_config(
2587        &mut self,
2588        genesis_config: &GenesisConfig,
2589        #[cfg(feature = "dev-context-only-utils")] collector_id_for_tests: Option<Pubkey>,
2590        #[cfg(feature = "dev-context-only-utils")] genesis_hash: Option<Hash>,
2591    ) {
2592        // Bootstrap validator collects fees until `new_from_parent` is called.
2593        self.fee_rate_governor = genesis_config.fee_rate_governor.clone();
2594
2595        for (pubkey, account) in genesis_config.accounts.iter() {
2596            assert!(
2597                self.get_account(pubkey).is_none(),
2598                "{pubkey} repeated in genesis config"
2599            );
2600            self.store_account(pubkey, &account.to_account_shared_data());
2601            self.capitalization.fetch_add(account.lamports(), Relaxed);
2602            self.accounts_data_size_initial += account.data().len() as u64;
2603        }
2604
2605        for (pubkey, account) in genesis_config.rewards_pools.iter() {
2606            assert!(
2607                self.get_account(pubkey).is_none(),
2608                "{pubkey} repeated in genesis config"
2609            );
2610            self.store_account(pubkey, &account.to_account_shared_data());
2611            self.accounts_data_size_initial += account.data().len() as u64;
2612        }
2613
2614        // After storing genesis accounts, the bank stakes cache will be warmed
2615        // up and can be used to set the collector id to the highest staked
2616        // node. If no staked nodes exist, allow fallback to an unstaked test
2617        // collector id during tests.
2618        let collector_id = self.stakes_cache.stakes().highest_staked_node().copied();
2619        #[cfg(feature = "dev-context-only-utils")]
2620        let collector_id = collector_id.or(collector_id_for_tests);
2621        self.collector_id =
2622            collector_id.expect("genesis processing failed because no staked nodes exist");
2623
2624        #[cfg(not(feature = "dev-context-only-utils"))]
2625        let genesis_hash = genesis_config.hash();
2626        #[cfg(feature = "dev-context-only-utils")]
2627        let genesis_hash = genesis_hash.unwrap_or(genesis_config.hash());
2628
2629        self.blockhash_queue.write().unwrap().genesis_hash(
2630            &genesis_hash,
2631            genesis_config.fee_rate_governor.lamports_per_signature,
2632        );
2633
2634        self.hashes_per_tick = genesis_config.hashes_per_tick();
2635        self.ticks_per_slot = genesis_config.ticks_per_slot();
2636        self.ns_per_slot = genesis_config.ns_per_slot();
2637        self.genesis_creation_time = genesis_config.creation_time;
2638        self.max_tick_height = (self.slot + 1) * self.ticks_per_slot;
2639        self.slots_per_year = genesis_config.slots_per_year();
2640
2641        self.epoch_schedule = genesis_config.epoch_schedule.clone();
2642
2643        self.inflation = Arc::new(RwLock::new(genesis_config.inflation));
2644
2645        self.rent_collector = RentCollector::new(
2646            self.epoch,
2647            self.epoch_schedule().clone(),
2648            self.slots_per_year,
2649            genesis_config.rent.clone(),
2650        );
2651
2652        // Add additional builtin programs specified in the genesis config
2653        for (name, program_id) in &genesis_config.native_instruction_processors {
2654            self.add_builtin_account(name, program_id);
2655        }
2656    }
2657
2658    fn burn_and_purge_account(&self, program_id: &Pubkey, mut account: AccountSharedData) {
2659        let old_data_size = account.data().len();
2660        self.capitalization.fetch_sub(account.lamports(), Relaxed);
2661        // Both resetting account balance to 0 and zeroing the account data
2662        // is needed to really purge from AccountsDb and flush the Stakes cache
2663        account.set_lamports(0);
2664        account.data_as_mut_slice().fill(0);
2665        self.store_account(program_id, &account);
2666        self.calculate_and_update_accounts_data_size_delta_off_chain(old_data_size, 0);
2667    }
2668
2669    /// Add a precompiled program account
2670    pub fn add_precompiled_account(&self, program_id: &Pubkey) {
2671        self.add_precompiled_account_with_owner(program_id, native_loader::id())
2672    }
2673
2674    // Used by tests to simulate clusters with precompiles that aren't owned by the native loader
2675    fn add_precompiled_account_with_owner(&self, program_id: &Pubkey, owner: Pubkey) {
2676        if let Some(account) = self.get_account_with_fixed_root(program_id) {
2677            if account.executable() {
2678                return;
2679            } else {
2680                // malicious account is pre-occupying at program_id
2681                self.burn_and_purge_account(program_id, account);
2682            }
2683        };
2684
2685        assert!(
2686            !self.freeze_started(),
2687            "Can't change frozen bank by adding not-existing new precompiled program \
2688             ({program_id}). Maybe, inconsistent program activation is detected on snapshot \
2689             restore?"
2690        );
2691
2692        // Add a bogus executable account, which will be loaded and ignored.
2693        let (lamports, rent_epoch) = self.inherit_specially_retained_account_fields(&None);
2694
2695        let account = AccountSharedData::from(Account {
2696            lamports,
2697            owner,
2698            data: vec![],
2699            executable: true,
2700            rent_epoch,
2701        });
2702        self.store_account_and_update_capitalization(program_id, &account);
2703    }
2704
2705    pub fn set_rent_burn_percentage(&mut self, burn_percent: u8) {
2706        self.rent_collector.rent.burn_percent = burn_percent;
2707    }
2708
2709    pub fn set_hashes_per_tick(&mut self, hashes_per_tick: Option<u64>) {
2710        self.hashes_per_tick = hashes_per_tick;
2711    }
2712
2713    /// Return the last block hash registered.
2714    pub fn last_blockhash(&self) -> Hash {
2715        self.blockhash_queue.read().unwrap().last_hash()
2716    }
2717
2718    pub fn last_blockhash_and_lamports_per_signature(&self) -> (Hash, u64) {
2719        let blockhash_queue = self.blockhash_queue.read().unwrap();
2720        let last_hash = blockhash_queue.last_hash();
2721        let last_lamports_per_signature = blockhash_queue
2722            .get_lamports_per_signature(&last_hash)
2723            .unwrap(); // safe so long as the BlockhashQueue is consistent
2724        (last_hash, last_lamports_per_signature)
2725    }
2726
2727    pub fn is_blockhash_valid(&self, hash: &Hash) -> bool {
2728        let blockhash_queue = self.blockhash_queue.read().unwrap();
2729        blockhash_queue.is_hash_valid_for_age(hash, MAX_PROCESSING_AGE)
2730    }
2731
2732    pub fn get_minimum_balance_for_rent_exemption(&self, data_len: usize) -> u64 {
2733        self.rent_collector.rent.minimum_balance(data_len).max(1)
2734    }
2735
2736    pub fn get_lamports_per_signature(&self) -> u64 {
2737        self.fee_rate_governor.lamports_per_signature
2738    }
2739
2740    pub fn get_lamports_per_signature_for_blockhash(&self, hash: &Hash) -> Option<u64> {
2741        let blockhash_queue = self.blockhash_queue.read().unwrap();
2742        blockhash_queue.get_lamports_per_signature(hash)
2743    }
2744
2745    pub fn get_fee_for_message(&self, message: &SanitizedMessage) -> Option<u64> {
2746        let lamports_per_signature = {
2747            let blockhash_queue = self.blockhash_queue.read().unwrap();
2748            blockhash_queue.get_lamports_per_signature(message.recent_blockhash())
2749        }
2750        .or_else(|| {
2751            self.load_message_nonce_account(message).map(
2752                |(_nonce_address, _nonce_account, nonce_data)| {
2753                    nonce_data.get_lamports_per_signature()
2754                },
2755            )
2756        })?;
2757        Some(self.get_fee_for_message_with_lamports_per_signature(message, lamports_per_signature))
2758    }
2759
2760    /// Returns true when startup accounts hash verification has completed or never had to run in background.
2761    pub fn get_startup_verification_complete(&self) -> &Arc<AtomicBool> {
2762        &self
2763            .rc
2764            .accounts
2765            .accounts_db
2766            .verify_accounts_hash_in_bg
2767            .verified
2768    }
2769
2770    pub fn get_fee_for_message_with_lamports_per_signature(
2771        &self,
2772        message: &impl SVMMessage,
2773        lamports_per_signature: u64,
2774    ) -> u64 {
2775        let fee_budget_limits = FeeBudgetLimits::from(
2776            process_compute_budget_instructions(
2777                message.program_instructions_iter(),
2778                &self.feature_set,
2779            )
2780            .unwrap_or_default(),
2781        );
2782        solana_fee::calculate_fee(
2783            message,
2784            lamports_per_signature == 0,
2785            self.fee_structure().lamports_per_signature,
2786            fee_budget_limits.prioritization_fee,
2787            FeeFeatures::from(self.feature_set.as_ref()),
2788        )
2789    }
2790
2791    pub fn get_blockhash_last_valid_block_height(&self, blockhash: &Hash) -> Option<Slot> {
2792        let blockhash_queue = self.blockhash_queue.read().unwrap();
2793        // This calculation will need to be updated to consider epoch boundaries if BlockhashQueue
2794        // length is made variable by epoch
2795        blockhash_queue
2796            .get_hash_age(blockhash)
2797            .map(|age| self.block_height + MAX_PROCESSING_AGE as u64 - age)
2798    }
2799
2800    pub fn confirmed_last_blockhash(&self) -> Hash {
2801        const NUM_BLOCKHASH_CONFIRMATIONS: usize = 3;
2802
2803        let parents = self.parents();
2804        if parents.is_empty() {
2805            self.last_blockhash()
2806        } else {
2807            let index = NUM_BLOCKHASH_CONFIRMATIONS.min(parents.len() - 1);
2808            parents[index].last_blockhash()
2809        }
2810    }
2811
2812    /// Forget all signatures. Useful for benchmarking.
2813    pub fn clear_signatures(&self) {
2814        self.status_cache.write().unwrap().clear();
2815    }
2816
2817    pub fn clear_slot_signatures(&self, slot: Slot) {
2818        self.status_cache.write().unwrap().clear_slot_entries(slot);
2819    }
2820
2821    fn update_transaction_statuses(
2822        &self,
2823        sanitized_txs: &[impl TransactionWithMeta],
2824        processing_results: &[TransactionProcessingResult],
2825    ) {
2826        let mut status_cache = self.status_cache.write().unwrap();
2827        assert_eq!(sanitized_txs.len(), processing_results.len());
2828        for (tx, processing_result) in sanitized_txs.iter().zip(processing_results) {
2829            if let Ok(processed_tx) = &processing_result {
2830                // Add the message hash to the status cache to ensure that this message
2831                // won't be processed again with a different signature.
2832                status_cache.insert(
2833                    tx.recent_blockhash(),
2834                    tx.message_hash(),
2835                    self.slot(),
2836                    processed_tx.status(),
2837                );
2838                // Add the transaction signature to the status cache so that transaction status
2839                // can be queried by transaction signature over RPC. In the future, this should
2840                // only be added for API nodes because voting validators don't need to do this.
2841                status_cache.insert(
2842                    tx.recent_blockhash(),
2843                    tx.signature(),
2844                    self.slot(),
2845                    processed_tx.status(),
2846                );
2847            }
2848        }
2849    }
2850
2851    /// Register a new recent blockhash in the bank's recent blockhash queue. Called when a bank
2852    /// reaches its max tick height. Can be called by tests to get new blockhashes for transaction
2853    /// processing without advancing to a new bank slot.
2854    fn register_recent_blockhash(&self, blockhash: &Hash, scheduler: &InstalledSchedulerRwLock) {
2855        // This is needed because recent_blockhash updates necessitate synchronizations for
2856        // consistent tx check_age handling.
2857        BankWithScheduler::wait_for_paused_scheduler(self, scheduler);
2858
2859        // Only acquire the write lock for the blockhash queue on block boundaries because
2860        // readers can starve this write lock acquisition and ticks would be slowed down too
2861        // much if the write lock is acquired for each tick.
2862        let mut w_blockhash_queue = self.blockhash_queue.write().unwrap();
2863
2864        #[cfg(feature = "dev-context-only-utils")]
2865        let blockhash_override = self
2866            .hash_overrides
2867            .lock()
2868            .unwrap()
2869            .get_blockhash_override(self.slot())
2870            .copied()
2871            .inspect(|blockhash_override| {
2872                if blockhash_override != blockhash {
2873                    info!(
2874                        "bank: slot: {}: overrode blockhash: {} with {}",
2875                        self.slot(),
2876                        blockhash,
2877                        blockhash_override
2878                    );
2879                }
2880            });
2881        #[cfg(feature = "dev-context-only-utils")]
2882        let blockhash = blockhash_override.as_ref().unwrap_or(blockhash);
2883
2884        w_blockhash_queue.register_hash(blockhash, self.fee_rate_governor.lamports_per_signature);
2885        self.update_recent_blockhashes_locked(&w_blockhash_queue);
2886    }
2887
2888    // gating this under #[cfg(feature = "dev-context-only-utils")] isn't easy due to
2889    // solana-program-test's usage...
2890    pub fn register_unique_recent_blockhash_for_test(&self) {
2891        self.register_recent_blockhash(
2892            &Hash::new_unique(),
2893            &BankWithScheduler::no_scheduler_available(),
2894        )
2895    }
2896
2897    #[cfg(feature = "dev-context-only-utils")]
2898    pub fn register_recent_blockhash_for_test(
2899        &self,
2900        blockhash: &Hash,
2901        lamports_per_signature: Option<u64>,
2902    ) {
2903        // Only acquire the write lock for the blockhash queue on block boundaries because
2904        // readers can starve this write lock acquisition and ticks would be slowed down too
2905        // much if the write lock is acquired for each tick.
2906        let mut w_blockhash_queue = self.blockhash_queue.write().unwrap();
2907        if let Some(lamports_per_signature) = lamports_per_signature {
2908            w_blockhash_queue.register_hash(blockhash, lamports_per_signature);
2909        } else {
2910            w_blockhash_queue
2911                .register_hash(blockhash, self.fee_rate_governor.lamports_per_signature);
2912        }
2913    }
2914
2915    /// Tell the bank which Entry IDs exist on the ledger. This function assumes subsequent calls
2916    /// correspond to later entries, and will boot the oldest ones once its internal cache is full.
2917    /// Once boot, the bank will reject transactions using that `hash`.
2918    ///
2919    /// This is NOT thread safe because if tick height is updated by two different threads, the
2920    /// block boundary condition could be missed.
2921    pub fn register_tick(&self, hash: &Hash, scheduler: &InstalledSchedulerRwLock) {
2922        assert!(
2923            !self.freeze_started(),
2924            "register_tick() working on a bank that is already frozen or is undergoing freezing!"
2925        );
2926
2927        if self.is_block_boundary(self.tick_height.load(Relaxed) + 1) {
2928            self.register_recent_blockhash(hash, scheduler);
2929        }
2930
2931        // ReplayStage will start computing the accounts delta hash when it
2932        // detects the tick height has reached the boundary, so the system
2933        // needs to guarantee all account updates for the slot have been
2934        // committed before this tick height is incremented (like the blockhash
2935        // sysvar above)
2936        self.tick_height.fetch_add(1, Relaxed);
2937    }
2938
2939    #[cfg(feature = "dev-context-only-utils")]
2940    pub fn register_tick_for_test(&self, hash: &Hash) {
2941        self.register_tick(hash, &BankWithScheduler::no_scheduler_available())
2942    }
2943
2944    #[cfg(feature = "dev-context-only-utils")]
2945    pub fn register_default_tick_for_test(&self) {
2946        self.register_tick_for_test(&Hash::default())
2947    }
2948
2949    pub fn is_complete(&self) -> bool {
2950        self.tick_height() == self.max_tick_height()
2951    }
2952
2953    pub fn is_block_boundary(&self, tick_height: u64) -> bool {
2954        tick_height == self.max_tick_height
2955    }
2956
2957    /// Get the max number of accounts that a transaction may lock in this block
2958    pub fn get_transaction_account_lock_limit(&self) -> usize {
2959        if let Some(transaction_account_lock_limit) = self.transaction_account_lock_limit {
2960            transaction_account_lock_limit
2961        } else if self
2962            .feature_set
2963            .is_active(&feature_set::increase_tx_account_lock_limit::id())
2964        {
2965            MAX_TX_ACCOUNT_LOCKS
2966        } else {
2967            64
2968        }
2969    }
2970
2971    /// Prepare a transaction batch from a list of versioned transactions from
2972    /// an entry. Used for tests only.
2973    pub fn prepare_entry_batch(
2974        &self,
2975        txs: Vec<VersionedTransaction>,
2976    ) -> Result<TransactionBatch<RuntimeTransaction<SanitizedTransaction>>> {
2977        let sanitized_txs = txs
2978            .into_iter()
2979            .map(|tx| {
2980                RuntimeTransaction::try_create(
2981                    tx,
2982                    MessageHash::Compute,
2983                    None,
2984                    self,
2985                    self.get_reserved_account_keys(),
2986                )
2987            })
2988            .collect::<Result<Vec<_>>>()?;
2989        Ok(TransactionBatch::new(
2990            self.try_lock_accounts(&sanitized_txs),
2991            self,
2992            OwnedOrBorrowed::Owned(sanitized_txs),
2993        ))
2994    }
2995
2996    /// Attempt to take locks on the accounts in a transaction batch
2997    pub fn try_lock_accounts(&self, txs: &[impl TransactionWithMeta]) -> Vec<Result<()>> {
2998        self.try_lock_accounts_with_results(txs, txs.iter().map(|_| Ok(())))
2999    }
3000
3001    /// Attempt to take locks on the accounts in a transaction batch, and their cost
3002    /// limited packing status and duplicate transaction conflict status
3003    pub fn try_lock_accounts_with_results(
3004        &self,
3005        txs: &[impl TransactionWithMeta],
3006        tx_results: impl Iterator<Item = Result<()>>,
3007    ) -> Vec<Result<()>> {
3008        let tx_account_lock_limit = self.get_transaction_account_lock_limit();
3009        let relax_intrabatch_account_locks = self
3010            .feature_set
3011            .is_active(&feature_set::relax_intrabatch_account_locks::id());
3012
3013        // with simd83 enabled, we must fail transactions that duplicate a prior message hash
3014        // previously, conflicting account locks would fail such transactions as a side effect
3015        let mut batch_message_hashes = AHashSet::with_capacity(txs.len());
3016        let tx_results = tx_results
3017            .enumerate()
3018            .map(|(i, tx_result)| match tx_result {
3019                Ok(()) if relax_intrabatch_account_locks => {
3020                    // `HashSet::insert()` returns `true` when the value does *not* already exist
3021                    if batch_message_hashes.insert(txs[i].message_hash()) {
3022                        Ok(())
3023                    } else {
3024                        Err(TransactionError::AlreadyProcessed)
3025                    }
3026                }
3027                Ok(()) => Ok(()),
3028                Err(e) => Err(e),
3029            });
3030
3031        self.rc.accounts.lock_accounts(
3032            txs.iter(),
3033            tx_results,
3034            tx_account_lock_limit,
3035            relax_intrabatch_account_locks,
3036        )
3037    }
3038
3039    /// Prepare a locked transaction batch from a list of sanitized transactions.
3040    pub fn prepare_sanitized_batch<'a, 'b, Tx: TransactionWithMeta>(
3041        &'a self,
3042        txs: &'b [Tx],
3043    ) -> TransactionBatch<'a, 'b, Tx> {
3044        self.prepare_sanitized_batch_with_results(txs, txs.iter().map(|_| Ok(())))
3045    }
3046
3047    /// Prepare a locked transaction batch from a list of sanitized transactions, and their cost
3048    /// limited packing status
3049    pub fn prepare_sanitized_batch_with_results<'a, 'b, Tx: TransactionWithMeta>(
3050        &'a self,
3051        transactions: &'b [Tx],
3052        transaction_results: impl Iterator<Item = Result<()>>,
3053    ) -> TransactionBatch<'a, 'b, Tx> {
3054        // this lock_results could be: Ok, AccountInUse, WouldExceedBlockMaxLimit or WouldExceedAccountMaxLimit
3055        TransactionBatch::new(
3056            self.try_lock_accounts_with_results(transactions, transaction_results),
3057            self,
3058            OwnedOrBorrowed::Borrowed(transactions),
3059        )
3060    }
3061
3062    /// Prepare a transaction batch from a single transaction without locking accounts
3063    pub fn prepare_unlocked_batch_from_single_tx<'a, Tx: SVMMessage>(
3064        &'a self,
3065        transaction: &'a Tx,
3066    ) -> TransactionBatch<'a, 'a, Tx> {
3067        let tx_account_lock_limit = self.get_transaction_account_lock_limit();
3068        let lock_result = validate_account_locks(transaction.account_keys(), tx_account_lock_limit);
3069        let mut batch = TransactionBatch::new(
3070            vec![lock_result],
3071            self,
3072            OwnedOrBorrowed::Borrowed(slice::from_ref(transaction)),
3073        );
3074        batch.set_needs_unlock(false);
3075        batch
3076    }
3077
3078    /// Prepare a transaction batch from a single transaction after locking accounts
3079    pub fn prepare_locked_batch_from_single_tx<'a, Tx: TransactionWithMeta>(
3080        &'a self,
3081        transaction: &'a Tx,
3082    ) -> TransactionBatch<'a, 'a, Tx> {
3083        self.prepare_sanitized_batch(slice::from_ref(transaction))
3084    }
3085
3086    /// Run transactions against a frozen bank without committing the results
3087    pub fn simulate_transaction(
3088        &self,
3089        transaction: &impl TransactionWithMeta,
3090        enable_cpi_recording: bool,
3091    ) -> TransactionSimulationResult {
3092        assert!(self.is_frozen(), "simulation bank must be frozen");
3093
3094        self.simulate_transaction_unchecked(transaction, enable_cpi_recording)
3095    }
3096
3097    /// Run transactions against a bank without committing the results; does not check if the bank
3098    /// is frozen, enabling use in single-Bank test frameworks
3099    pub fn simulate_transaction_unchecked(
3100        &self,
3101        transaction: &impl TransactionWithMeta,
3102        enable_cpi_recording: bool,
3103    ) -> TransactionSimulationResult {
3104        let account_keys = transaction.account_keys();
3105        let number_of_accounts = account_keys.len();
3106        let account_overrides = self.get_account_overrides_for_simulation(&account_keys);
3107        let batch = self.prepare_unlocked_batch_from_single_tx(transaction);
3108        let mut timings = ExecuteTimings::default();
3109
3110        let LoadAndExecuteTransactionsOutput {
3111            mut processing_results,
3112            balance_collector,
3113            ..
3114        } = self.load_and_execute_transactions(
3115            &batch,
3116            // After simulation, transactions will need to be forwarded to the leader
3117            // for processing. During forwarding, the transaction could expire if the
3118            // delay is not accounted for.
3119            MAX_PROCESSING_AGE - MAX_TRANSACTION_FORWARDING_DELAY,
3120            &mut timings,
3121            &mut TransactionErrorMetrics::default(),
3122            TransactionProcessingConfig {
3123                account_overrides: Some(&account_overrides),
3124                check_program_modification_slot: self.check_program_modification_slot,
3125                log_messages_bytes_limit: None,
3126                limit_to_load_programs: true,
3127                recording_config: ExecutionRecordingConfig {
3128                    enable_cpi_recording,
3129                    enable_log_recording: true,
3130                    enable_return_data_recording: true,
3131                    enable_transaction_balance_recording: true,
3132                },
3133            },
3134        );
3135
3136        debug!("simulate_transaction: {timings:?}");
3137
3138        let processing_result = processing_results
3139            .pop()
3140            .unwrap_or(Err(TransactionError::InvalidProgramForExecution));
3141        let (
3142            post_simulation_accounts,
3143            result,
3144            fee,
3145            logs,
3146            return_data,
3147            inner_instructions,
3148            units_consumed,
3149            loaded_accounts_data_size,
3150        ) = match processing_result {
3151            Ok(processed_tx) => {
3152                let executed_units = processed_tx.executed_units();
3153                let loaded_accounts_data_size = processed_tx.loaded_accounts_data_size();
3154
3155                match processed_tx {
3156                    ProcessedTransaction::Executed(executed_tx) => {
3157                        let details = executed_tx.execution_details;
3158                        let post_simulation_accounts = executed_tx
3159                            .loaded_transaction
3160                            .accounts
3161                            .into_iter()
3162                            .take(number_of_accounts)
3163                            .collect::<Vec<_>>();
3164                        (
3165                            post_simulation_accounts,
3166                            details.status,
3167                            Some(executed_tx.loaded_transaction.fee_details.total_fee()),
3168                            details.log_messages,
3169                            details.return_data,
3170                            details.inner_instructions,
3171                            executed_units,
3172                            loaded_accounts_data_size,
3173                        )
3174                    }
3175                    ProcessedTransaction::FeesOnly(fees_only_tx) => (
3176                        vec![],
3177                        Err(fees_only_tx.load_error),
3178                        Some(fees_only_tx.fee_details.total_fee()),
3179                        None,
3180                        None,
3181                        None,
3182                        executed_units,
3183                        loaded_accounts_data_size,
3184                    ),
3185                }
3186            }
3187            Err(error) => (vec![], Err(error), None, None, None, None, 0, 0),
3188        };
3189        let logs = logs.unwrap_or_default();
3190
3191        let (pre_balances, post_balances, pre_token_balances, post_token_balances) =
3192            match balance_collector {
3193                Some(balance_collector) => {
3194                    let (mut native_pre, mut native_post, mut token_pre, mut token_post) =
3195                        balance_collector.into_vecs();
3196
3197                    (
3198                        native_pre.pop(),
3199                        native_post.pop(),
3200                        token_pre.pop(),
3201                        token_post.pop(),
3202                    )
3203                }
3204                None => (None, None, None, None),
3205            };
3206
3207        TransactionSimulationResult {
3208            result,
3209            logs,
3210            post_simulation_accounts,
3211            units_consumed,
3212            loaded_accounts_data_size,
3213            return_data,
3214            inner_instructions,
3215            fee,
3216            pre_balances,
3217            post_balances,
3218            pre_token_balances,
3219            post_token_balances,
3220        }
3221    }
3222
3223    fn get_account_overrides_for_simulation(&self, account_keys: &AccountKeys) -> AccountOverrides {
3224        let mut account_overrides = AccountOverrides::default();
3225        let slot_history_id = sysvar::slot_history::id();
3226        if account_keys.iter().any(|pubkey| *pubkey == slot_history_id) {
3227            let current_account = self.get_account_with_fixed_root(&slot_history_id);
3228            let slot_history = current_account
3229                .as_ref()
3230                .map(|account| from_account::<SlotHistory, _>(account).unwrap())
3231                .unwrap_or_default();
3232            if slot_history.check(self.slot()) == Check::Found {
3233                let ancestors = Ancestors::from(self.proper_ancestors().collect::<Vec<_>>());
3234                if let Some((account, _)) =
3235                    self.load_slow_with_fixed_root(&ancestors, &slot_history_id)
3236                {
3237                    account_overrides.set_slot_history(Some(account));
3238                }
3239            }
3240        }
3241        account_overrides
3242    }
3243
3244    pub fn unlock_accounts<'a, Tx: SVMMessage + 'a>(
3245        &self,
3246        txs_and_results: impl Iterator<Item = (&'a Tx, &'a Result<()>)> + Clone,
3247    ) {
3248        self.rc.accounts.unlock_accounts(txs_and_results)
3249    }
3250
3251    pub fn remove_unrooted_slots(&self, slots: &[(Slot, BankId)]) {
3252        self.rc.accounts.accounts_db.remove_unrooted_slots(slots)
3253    }
3254
3255    pub fn get_hash_age(&self, hash: &Hash) -> Option<u64> {
3256        self.blockhash_queue.read().unwrap().get_hash_age(hash)
3257    }
3258
3259    pub fn is_hash_valid_for_age(&self, hash: &Hash, max_age: usize) -> bool {
3260        self.blockhash_queue
3261            .read()
3262            .unwrap()
3263            .is_hash_valid_for_age(hash, max_age)
3264    }
3265
3266    pub fn collect_balances(
3267        &self,
3268        batch: &TransactionBatch<impl SVMMessage>,
3269    ) -> TransactionBalances {
3270        let mut balances: TransactionBalances = vec![];
3271        for transaction in batch.sanitized_transactions() {
3272            let mut transaction_balances: Vec<u64> = vec![];
3273            for account_key in transaction.account_keys().iter() {
3274                transaction_balances.push(self.get_balance(account_key));
3275            }
3276            balances.push(transaction_balances);
3277        }
3278        balances
3279    }
3280
3281    pub fn load_and_execute_transactions(
3282        &self,
3283        batch: &TransactionBatch<impl TransactionWithMeta>,
3284        max_age: usize,
3285        timings: &mut ExecuteTimings,
3286        error_counters: &mut TransactionErrorMetrics,
3287        processing_config: TransactionProcessingConfig,
3288    ) -> LoadAndExecuteTransactionsOutput {
3289        let sanitized_txs = batch.sanitized_transactions();
3290
3291        let (check_results, check_us) = measure_us!(self.check_transactions(
3292            sanitized_txs,
3293            batch.lock_results(),
3294            max_age,
3295            error_counters,
3296        ));
3297        timings.saturating_add_in_place(ExecuteTimingType::CheckUs, check_us);
3298
3299        let (blockhash, blockhash_lamports_per_signature) =
3300            self.last_blockhash_and_lamports_per_signature();
3301        let processing_environment = TransactionProcessingEnvironment {
3302            blockhash,
3303            blockhash_lamports_per_signature,
3304            epoch_total_stake: self.get_current_epoch_total_stake(),
3305            feature_set: self.feature_set.runtime_features(),
3306            rent: self.rent_collector.rent.clone(),
3307        };
3308
3309        let sanitized_output = self
3310            .transaction_processor
3311            .load_and_execute_sanitized_transactions(
3312                self,
3313                sanitized_txs,
3314                check_results,
3315                &processing_environment,
3316                &processing_config,
3317            );
3318
3319        // Accumulate the errors returned by the batch processor.
3320        error_counters.accumulate(&sanitized_output.error_metrics);
3321
3322        // Accumulate the transaction batch execution timings.
3323        timings.accumulate(&sanitized_output.execute_timings);
3324
3325        let ((), collect_logs_us) =
3326            measure_us!(self.collect_logs(sanitized_txs, &sanitized_output.processing_results));
3327        timings.saturating_add_in_place(ExecuteTimingType::CollectLogsUs, collect_logs_us);
3328
3329        let mut processed_counts = ProcessedTransactionCounts::default();
3330        let err_count = &mut error_counters.total;
3331
3332        for (processing_result, tx) in sanitized_output
3333            .processing_results
3334            .iter()
3335            .zip(sanitized_txs)
3336        {
3337            if let Some(debug_keys) = &self.transaction_debug_keys {
3338                for key in tx.account_keys().iter() {
3339                    if debug_keys.contains(key) {
3340                        let result = processing_result.flattened_result();
3341                        info!("slot: {} result: {:?} tx: {:?}", self.slot, result, tx);
3342                        break;
3343                    }
3344                }
3345            }
3346
3347            if processing_result.was_processed() {
3348                // Signature count must be accumulated only if the transaction
3349                // is processed, otherwise a mismatched count between banking
3350                // and replay could occur
3351                processed_counts.signature_count +=
3352                    tx.signature_details().num_transaction_signatures();
3353                processed_counts.processed_transactions_count += 1;
3354
3355                if !tx.is_simple_vote_transaction() {
3356                    processed_counts.processed_non_vote_transactions_count += 1;
3357                }
3358            }
3359
3360            match processing_result.flattened_result() {
3361                Ok(()) => {
3362                    processed_counts.processed_with_successful_result_count += 1;
3363                }
3364                Err(err) => {
3365                    if err_count.0 == 0 {
3366                        debug!("tx error: {err:?} {tx:?}");
3367                    }
3368                    *err_count += 1;
3369                }
3370            }
3371        }
3372
3373        LoadAndExecuteTransactionsOutput {
3374            processing_results: sanitized_output.processing_results,
3375            processed_counts,
3376            balance_collector: sanitized_output.balance_collector,
3377        }
3378    }
3379
3380    fn collect_logs(
3381        &self,
3382        transactions: &[impl TransactionWithMeta],
3383        processing_results: &[TransactionProcessingResult],
3384    ) {
3385        let transaction_log_collector_config =
3386            self.transaction_log_collector_config.read().unwrap();
3387        if transaction_log_collector_config.filter == TransactionLogCollectorFilter::None {
3388            return;
3389        }
3390
3391        let collected_logs: Vec<_> = processing_results
3392            .iter()
3393            .zip(transactions)
3394            .filter_map(|(processing_result, transaction)| {
3395                // Skip log collection for unprocessed transactions
3396                let processed_tx = processing_result.processed_transaction()?;
3397                // Skip log collection for unexecuted transactions
3398                let execution_details = processed_tx.execution_details()?;
3399                Self::collect_transaction_logs(
3400                    &transaction_log_collector_config,
3401                    transaction,
3402                    execution_details,
3403                )
3404            })
3405            .collect();
3406
3407        if !collected_logs.is_empty() {
3408            let mut transaction_log_collector = self.transaction_log_collector.write().unwrap();
3409            for (log, filtered_mentioned_addresses) in collected_logs {
3410                let transaction_log_index = transaction_log_collector.logs.len();
3411                transaction_log_collector.logs.push(log);
3412                for key in filtered_mentioned_addresses.into_iter() {
3413                    transaction_log_collector
3414                        .mentioned_address_map
3415                        .entry(key)
3416                        .or_default()
3417                        .push(transaction_log_index);
3418                }
3419            }
3420        }
3421    }
3422
3423    fn collect_transaction_logs(
3424        transaction_log_collector_config: &TransactionLogCollectorConfig,
3425        transaction: &impl TransactionWithMeta,
3426        execution_details: &TransactionExecutionDetails,
3427    ) -> Option<(TransactionLogInfo, Vec<Pubkey>)> {
3428        // Skip log collection if no log messages were recorded
3429        let log_messages = execution_details.log_messages.as_ref()?;
3430
3431        let mut filtered_mentioned_addresses = Vec::new();
3432        if !transaction_log_collector_config
3433            .mentioned_addresses
3434            .is_empty()
3435        {
3436            for key in transaction.account_keys().iter() {
3437                if transaction_log_collector_config
3438                    .mentioned_addresses
3439                    .contains(key)
3440                {
3441                    filtered_mentioned_addresses.push(*key);
3442                }
3443            }
3444        }
3445
3446        let is_vote = transaction.is_simple_vote_transaction();
3447        let store = match transaction_log_collector_config.filter {
3448            TransactionLogCollectorFilter::All => {
3449                !is_vote || !filtered_mentioned_addresses.is_empty()
3450            }
3451            TransactionLogCollectorFilter::AllWithVotes => true,
3452            TransactionLogCollectorFilter::None => false,
3453            TransactionLogCollectorFilter::OnlyMentionedAddresses => {
3454                !filtered_mentioned_addresses.is_empty()
3455            }
3456        };
3457
3458        if store {
3459            Some((
3460                TransactionLogInfo {
3461                    signature: *transaction.signature(),
3462                    result: execution_details.status.clone(),
3463                    is_vote,
3464                    log_messages: log_messages.clone(),
3465                },
3466                filtered_mentioned_addresses,
3467            ))
3468        } else {
3469            None
3470        }
3471    }
3472
3473    /// Load the accounts data size, in bytes
3474    pub fn load_accounts_data_size(&self) -> u64 {
3475        self.accounts_data_size_initial
3476            .saturating_add_signed(self.load_accounts_data_size_delta())
3477    }
3478
3479    /// Load the change in accounts data size in this Bank, in bytes
3480    pub fn load_accounts_data_size_delta(&self) -> i64 {
3481        let delta_on_chain = self.load_accounts_data_size_delta_on_chain();
3482        let delta_off_chain = self.load_accounts_data_size_delta_off_chain();
3483        delta_on_chain.saturating_add(delta_off_chain)
3484    }
3485
3486    /// Load the change in accounts data size in this Bank, in bytes, from on-chain events
3487    /// i.e. transactions
3488    pub fn load_accounts_data_size_delta_on_chain(&self) -> i64 {
3489        self.accounts_data_size_delta_on_chain.load(Acquire)
3490    }
3491
3492    /// Load the change in accounts data size in this Bank, in bytes, from off-chain events
3493    /// i.e. rent collection
3494    pub fn load_accounts_data_size_delta_off_chain(&self) -> i64 {
3495        self.accounts_data_size_delta_off_chain.load(Acquire)
3496    }
3497
3498    /// Update the accounts data size delta from on-chain events by adding `amount`.
3499    /// The arithmetic saturates.
3500    fn update_accounts_data_size_delta_on_chain(&self, amount: i64) {
3501        if amount == 0 {
3502            return;
3503        }
3504
3505        self.accounts_data_size_delta_on_chain
3506            .fetch_update(AcqRel, Acquire, |accounts_data_size_delta_on_chain| {
3507                Some(accounts_data_size_delta_on_chain.saturating_add(amount))
3508            })
3509            // SAFETY: unwrap() is safe since our update fn always returns `Some`
3510            .unwrap();
3511    }
3512
3513    /// Update the accounts data size delta from off-chain events by adding `amount`.
3514    /// The arithmetic saturates.
3515    fn update_accounts_data_size_delta_off_chain(&self, amount: i64) {
3516        if amount == 0 {
3517            return;
3518        }
3519
3520        self.accounts_data_size_delta_off_chain
3521            .fetch_update(AcqRel, Acquire, |accounts_data_size_delta_off_chain| {
3522                Some(accounts_data_size_delta_off_chain.saturating_add(amount))
3523            })
3524            // SAFETY: unwrap() is safe since our update fn always returns `Some`
3525            .unwrap();
3526    }
3527
3528    /// Calculate the data size delta and update the off-chain accounts data size delta
3529    fn calculate_and_update_accounts_data_size_delta_off_chain(
3530        &self,
3531        old_data_size: usize,
3532        new_data_size: usize,
3533    ) {
3534        let data_size_delta = calculate_data_size_delta(old_data_size, new_data_size);
3535        self.update_accounts_data_size_delta_off_chain(data_size_delta);
3536    }
3537
3538    fn filter_program_errors_and_collect_fee_details(
3539        &self,
3540        processing_results: &[TransactionProcessingResult],
3541    ) {
3542        let mut accumulated_fee_details = FeeDetails::default();
3543
3544        processing_results.iter().for_each(|processing_result| {
3545            if let Ok(processed_tx) = processing_result {
3546                accumulated_fee_details.accumulate(&processed_tx.fee_details());
3547            }
3548        });
3549
3550        self.collector_fee_details
3551            .write()
3552            .unwrap()
3553            .accumulate(&accumulated_fee_details);
3554    }
3555
3556    fn update_bank_hash_stats<'a>(&self, accounts: &impl StorableAccounts<'a>) {
3557        let mut stats = BankHashStats::default();
3558        (0..accounts.len()).for_each(|i| {
3559            accounts.account(i, |account| {
3560                stats.update(&account);
3561            })
3562        });
3563        self.bank_hash_stats.accumulate(&stats);
3564    }
3565
3566    pub fn commit_transactions(
3567        &self,
3568        sanitized_txs: &[impl TransactionWithMeta],
3569        processing_results: Vec<TransactionProcessingResult>,
3570        processed_counts: &ProcessedTransactionCounts,
3571        timings: &mut ExecuteTimings,
3572    ) -> Vec<TransactionCommitResult> {
3573        assert!(
3574            !self.freeze_started(),
3575            "commit_transactions() working on a bank that is already frozen or is undergoing \
3576             freezing!"
3577        );
3578
3579        let ProcessedTransactionCounts {
3580            processed_transactions_count,
3581            processed_non_vote_transactions_count,
3582            processed_with_successful_result_count,
3583            signature_count,
3584        } = *processed_counts;
3585
3586        self.increment_transaction_count(processed_transactions_count);
3587        self.increment_non_vote_transaction_count_since_restart(
3588            processed_non_vote_transactions_count,
3589        );
3590        self.increment_signature_count(signature_count);
3591
3592        let processed_with_failure_result_count =
3593            processed_transactions_count.saturating_sub(processed_with_successful_result_count);
3594        self.transaction_error_count
3595            .fetch_add(processed_with_failure_result_count, Relaxed);
3596
3597        if processed_transactions_count > 0 {
3598            self.is_delta.store(true, Relaxed);
3599            self.transaction_entries_count.fetch_add(1, Relaxed);
3600            self.transactions_per_entry_max
3601                .fetch_max(processed_transactions_count, Relaxed);
3602        }
3603
3604        let ((), store_accounts_us) = measure_us!({
3605            // If geyser is present, we must collect `SanitizedTransaction`
3606            // references in order to comply with that interface - until it
3607            // is changed.
3608            let maybe_transaction_refs = self
3609                .accounts()
3610                .accounts_db
3611                .has_accounts_update_notifier()
3612                .then(|| {
3613                    sanitized_txs
3614                        .iter()
3615                        .map(|tx| tx.as_sanitized_transaction())
3616                        .collect::<Vec<_>>()
3617                });
3618
3619            let (accounts_to_store, transactions) = collect_accounts_to_store(
3620                sanitized_txs,
3621                &maybe_transaction_refs,
3622                &processing_results,
3623            );
3624
3625            let to_store = (self.slot(), accounts_to_store.as_slice());
3626            self.update_bank_hash_stats(&to_store);
3627            // See https://github.com/solana-labs/solana/pull/31455 for discussion
3628            // on *not* updating the index within a threadpool.
3629            self.rc
3630                .accounts
3631                .store_accounts_seq(to_store, transactions.as_deref());
3632        });
3633
3634        // Cached vote and stake accounts are synchronized with accounts-db
3635        // after each transaction.
3636        let ((), update_stakes_cache_us) =
3637            measure_us!(self.update_stakes_cache(sanitized_txs, &processing_results));
3638
3639        let ((), update_executors_us) = measure_us!({
3640            let mut cache = None;
3641            for processing_result in &processing_results {
3642                if let Some(ProcessedTransaction::Executed(executed_tx)) =
3643                    processing_result.processed_transaction()
3644                {
3645                    let programs_modified_by_tx = &executed_tx.programs_modified_by_tx;
3646                    if executed_tx.was_successful() && !programs_modified_by_tx.is_empty() {
3647                        cache
3648                            .get_or_insert_with(|| {
3649                                self.transaction_processor
3650                                    .global_program_cache
3651                                    .write()
3652                                    .unwrap()
3653                            })
3654                            .merge(programs_modified_by_tx);
3655                    }
3656                }
3657            }
3658        });
3659
3660        let accounts_data_len_delta = processing_results
3661            .iter()
3662            .filter_map(|processing_result| processing_result.processed_transaction())
3663            .filter_map(|processed_tx| processed_tx.execution_details())
3664            .filter_map(|details| {
3665                details
3666                    .status
3667                    .is_ok()
3668                    .then_some(details.accounts_data_len_delta)
3669            })
3670            .sum();
3671        self.update_accounts_data_size_delta_on_chain(accounts_data_len_delta);
3672
3673        let ((), update_transaction_statuses_us) =
3674            measure_us!(self.update_transaction_statuses(sanitized_txs, &processing_results));
3675
3676        self.filter_program_errors_and_collect_fee_details(&processing_results);
3677
3678        timings.saturating_add_in_place(ExecuteTimingType::StoreUs, store_accounts_us);
3679        timings.saturating_add_in_place(
3680            ExecuteTimingType::UpdateStakesCacheUs,
3681            update_stakes_cache_us,
3682        );
3683        timings.saturating_add_in_place(ExecuteTimingType::UpdateExecutorsUs, update_executors_us);
3684        timings.saturating_add_in_place(
3685            ExecuteTimingType::UpdateTransactionStatuses,
3686            update_transaction_statuses_us,
3687        );
3688
3689        Self::create_commit_results(processing_results)
3690    }
3691
3692    fn create_commit_results(
3693        processing_results: Vec<TransactionProcessingResult>,
3694    ) -> Vec<TransactionCommitResult> {
3695        processing_results
3696            .into_iter()
3697            .map(|processing_result| {
3698                let processing_result = processing_result?;
3699                let executed_units = processing_result.executed_units();
3700                let loaded_accounts_data_size = processing_result.loaded_accounts_data_size();
3701
3702                match processing_result {
3703                    ProcessedTransaction::Executed(executed_tx) => {
3704                        let execution_details = executed_tx.execution_details;
3705                        let LoadedTransaction {
3706                            accounts: loaded_accounts,
3707                            fee_details,
3708                            ..
3709                        } = executed_tx.loaded_transaction;
3710
3711                        Ok(CommittedTransaction {
3712                            status: execution_details.status,
3713                            log_messages: execution_details.log_messages,
3714                            inner_instructions: execution_details.inner_instructions,
3715                            return_data: execution_details.return_data,
3716                            executed_units,
3717                            fee_details,
3718                            loaded_account_stats: TransactionLoadedAccountsStats {
3719                                loaded_accounts_count: loaded_accounts.len(),
3720                                loaded_accounts_data_size,
3721                            },
3722                        })
3723                    }
3724                    ProcessedTransaction::FeesOnly(fees_only_tx) => Ok(CommittedTransaction {
3725                        status: Err(fees_only_tx.load_error),
3726                        log_messages: None,
3727                        inner_instructions: None,
3728                        return_data: None,
3729                        executed_units,
3730                        fee_details: fees_only_tx.fee_details,
3731                        loaded_account_stats: TransactionLoadedAccountsStats {
3732                            loaded_accounts_count: fees_only_tx.rollback_accounts.count(),
3733                            loaded_accounts_data_size,
3734                        },
3735                    }),
3736                }
3737            })
3738            .collect()
3739    }
3740
3741    fn run_incinerator(&self) {
3742        if let Some((account, _)) =
3743            self.get_account_modified_since_parent_with_fixed_root(&incinerator::id())
3744        {
3745            self.capitalization.fetch_sub(account.lamports(), Relaxed);
3746            self.store_account(&incinerator::id(), &AccountSharedData::default());
3747        }
3748    }
3749
3750    /// Returns the accounts, sorted by pubkey, that were part of accounts lt hash calculation
3751    /// This is used when writing a bank hash details file.
3752    pub(crate) fn get_accounts_for_bank_hash_details(&self) -> Vec<(Pubkey, AccountSharedData)> {
3753        let mut accounts = self
3754            .rc
3755            .accounts
3756            .accounts_db
3757            .get_pubkey_account_for_slot(self.slot());
3758        // Sort the accounts by pubkey to make diff deterministic.
3759        accounts.sort_unstable_by(|a, b| a.0.cmp(&b.0));
3760        accounts
3761    }
3762
3763    pub fn cluster_type(&self) -> ClusterType {
3764        // unwrap is safe; self.cluster_type is ensured to be Some() always...
3765        // we only using Option here for ABI compatibility...
3766        self.cluster_type.unwrap()
3767    }
3768
3769    /// Process a batch of transactions.
3770    #[must_use]
3771    pub fn load_execute_and_commit_transactions(
3772        &self,
3773        batch: &TransactionBatch<impl TransactionWithMeta>,
3774        max_age: usize,
3775        recording_config: ExecutionRecordingConfig,
3776        timings: &mut ExecuteTimings,
3777        log_messages_bytes_limit: Option<usize>,
3778    ) -> (Vec<TransactionCommitResult>, Option<BalanceCollector>) {
3779        self.do_load_execute_and_commit_transactions_with_pre_commit_callback(
3780            batch,
3781            max_age,
3782            recording_config,
3783            timings,
3784            log_messages_bytes_limit,
3785            None::<fn(&mut _, &_) -> _>,
3786        )
3787        .unwrap()
3788    }
3789
3790    pub fn load_execute_and_commit_transactions_with_pre_commit_callback<'a>(
3791        &'a self,
3792        batch: &TransactionBatch<impl TransactionWithMeta>,
3793        max_age: usize,
3794        recording_config: ExecutionRecordingConfig,
3795        timings: &mut ExecuteTimings,
3796        log_messages_bytes_limit: Option<usize>,
3797        pre_commit_callback: impl FnOnce(
3798            &mut ExecuteTimings,
3799            &[TransactionProcessingResult],
3800        ) -> PreCommitResult<'a>,
3801    ) -> Result<(Vec<TransactionCommitResult>, Option<BalanceCollector>)> {
3802        self.do_load_execute_and_commit_transactions_with_pre_commit_callback(
3803            batch,
3804            max_age,
3805            recording_config,
3806            timings,
3807            log_messages_bytes_limit,
3808            Some(pre_commit_callback),
3809        )
3810    }
3811
3812    fn do_load_execute_and_commit_transactions_with_pre_commit_callback<'a>(
3813        &'a self,
3814        batch: &TransactionBatch<impl TransactionWithMeta>,
3815        max_age: usize,
3816        recording_config: ExecutionRecordingConfig,
3817        timings: &mut ExecuteTimings,
3818        log_messages_bytes_limit: Option<usize>,
3819        pre_commit_callback: Option<
3820            impl FnOnce(&mut ExecuteTimings, &[TransactionProcessingResult]) -> PreCommitResult<'a>,
3821        >,
3822    ) -> Result<(Vec<TransactionCommitResult>, Option<BalanceCollector>)> {
3823        let LoadAndExecuteTransactionsOutput {
3824            processing_results,
3825            processed_counts,
3826            balance_collector,
3827        } = self.load_and_execute_transactions(
3828            batch,
3829            max_age,
3830            timings,
3831            &mut TransactionErrorMetrics::default(),
3832            TransactionProcessingConfig {
3833                account_overrides: None,
3834                check_program_modification_slot: self.check_program_modification_slot,
3835                log_messages_bytes_limit,
3836                limit_to_load_programs: false,
3837                recording_config,
3838            },
3839        );
3840
3841        // pre_commit_callback could initiate an atomic operation (i.e. poh recording with block
3842        // producing unified scheduler). in that case, it returns Some(freeze_lock), which should
3843        // unlocked only after calling commit_transactions() immediately after calling the
3844        // callback.
3845        let freeze_lock = if let Some(pre_commit_callback) = pre_commit_callback {
3846            pre_commit_callback(timings, &processing_results)?
3847        } else {
3848            None
3849        };
3850        let commit_results = self.commit_transactions(
3851            batch.sanitized_transactions(),
3852            processing_results,
3853            &processed_counts,
3854            timings,
3855        );
3856        drop(freeze_lock);
3857        Ok((commit_results, balance_collector))
3858    }
3859
3860    /// Process a Transaction. This is used for unit tests and simply calls the vector
3861    /// Bank::process_transactions method.
3862    pub fn process_transaction(&self, tx: &Transaction) -> Result<()> {
3863        self.try_process_transactions(std::iter::once(tx))?[0].clone()
3864    }
3865
3866    /// Process a Transaction and store metadata. This is used for tests and the banks services. It
3867    /// replicates the vector Bank::process_transaction method with metadata recording enabled.
3868    pub fn process_transaction_with_metadata(
3869        &self,
3870        tx: impl Into<VersionedTransaction>,
3871    ) -> Result<CommittedTransaction> {
3872        let txs = vec![tx.into()];
3873        let batch = self.prepare_entry_batch(txs)?;
3874
3875        let (mut commit_results, ..) = self.load_execute_and_commit_transactions(
3876            &batch,
3877            MAX_PROCESSING_AGE,
3878            ExecutionRecordingConfig {
3879                enable_cpi_recording: false,
3880                enable_log_recording: true,
3881                enable_return_data_recording: true,
3882                enable_transaction_balance_recording: false,
3883            },
3884            &mut ExecuteTimings::default(),
3885            Some(1000 * 1000),
3886        );
3887
3888        commit_results.remove(0)
3889    }
3890
3891    /// Process multiple transaction in a single batch. This is used for benches and unit tests.
3892    /// Short circuits if any of the transactions do not pass sanitization checks.
3893    pub fn try_process_transactions<'a>(
3894        &self,
3895        txs: impl Iterator<Item = &'a Transaction>,
3896    ) -> Result<Vec<Result<()>>> {
3897        let txs = txs
3898            .map(|tx| VersionedTransaction::from(tx.clone()))
3899            .collect();
3900        self.try_process_entry_transactions(txs)
3901    }
3902
3903    /// Process multiple transaction in a single batch. This is used for benches and unit tests.
3904    /// Short circuits if any of the transactions do not pass sanitization checks.
3905    pub fn try_process_entry_transactions(
3906        &self,
3907        txs: Vec<VersionedTransaction>,
3908    ) -> Result<Vec<Result<()>>> {
3909        let batch = self.prepare_entry_batch(txs)?;
3910        Ok(self.process_transaction_batch(&batch))
3911    }
3912
3913    #[must_use]
3914    fn process_transaction_batch(
3915        &self,
3916        batch: &TransactionBatch<impl TransactionWithMeta>,
3917    ) -> Vec<Result<()>> {
3918        self.load_execute_and_commit_transactions(
3919            batch,
3920            MAX_PROCESSING_AGE,
3921            ExecutionRecordingConfig::new_single_setting(false),
3922            &mut ExecuteTimings::default(),
3923            None,
3924        )
3925        .0
3926        .into_iter()
3927        .map(|commit_result| commit_result.and_then(|committed_tx| committed_tx.status))
3928        .collect()
3929    }
3930
3931    /// Create, sign, and process a Transaction from `keypair` to `to` of
3932    /// `n` lamports where `blockhash` is the last Entry ID observed by the client.
3933    pub fn transfer(&self, n: u64, keypair: &Keypair, to: &Pubkey) -> Result<Signature> {
3934        let blockhash = self.last_blockhash();
3935        let tx = system_transaction::transfer(keypair, to, n, blockhash);
3936        let signature = tx.signatures[0];
3937        self.process_transaction(&tx).map(|_| signature)
3938    }
3939
3940    pub fn read_balance(account: &AccountSharedData) -> u64 {
3941        account.lamports()
3942    }
3943    /// Each program would need to be able to introspect its own state
3944    /// this is hard-coded to the Budget language
3945    pub fn get_balance(&self, pubkey: &Pubkey) -> u64 {
3946        self.get_account(pubkey)
3947            .map(|x| Self::read_balance(&x))
3948            .unwrap_or(0)
3949    }
3950
3951    /// Compute all the parents of the bank in order
3952    pub fn parents(&self) -> Vec<Arc<Bank>> {
3953        let mut parents = vec![];
3954        let mut bank = self.parent();
3955        while let Some(parent) = bank {
3956            parents.push(parent.clone());
3957            bank = parent.parent();
3958        }
3959        parents
3960    }
3961
3962    /// Compute all the parents of the bank including this bank itself
3963    pub fn parents_inclusive(self: Arc<Self>) -> Vec<Arc<Bank>> {
3964        let mut parents = self.parents();
3965        parents.insert(0, self);
3966        parents
3967    }
3968
3969    /// fn store the single `account` with `pubkey`.
3970    /// Uses `store_accounts`, which works on a vector of accounts.
3971    pub fn store_account(&self, pubkey: &Pubkey, account: &AccountSharedData) {
3972        self.store_accounts((self.slot(), &[(pubkey, account)][..]))
3973    }
3974
3975    pub fn store_accounts<'a>(&self, accounts: impl StorableAccounts<'a>) {
3976        assert!(!self.freeze_started());
3977        let mut m = Measure::start("stakes_cache.check_and_store");
3978        let new_warmup_cooldown_rate_epoch = self.new_warmup_cooldown_rate_epoch();
3979
3980        (0..accounts.len()).for_each(|i| {
3981            accounts.account(i, |account| {
3982                self.stakes_cache.check_and_store(
3983                    account.pubkey(),
3984                    &account,
3985                    new_warmup_cooldown_rate_epoch,
3986                )
3987            })
3988        });
3989        self.update_bank_hash_stats(&accounts);
3990        self.rc.accounts.store_accounts_par(accounts, None);
3991        m.stop();
3992        self.rc
3993            .accounts
3994            .accounts_db
3995            .stats
3996            .stakes_cache_check_and_store_us
3997            .fetch_add(m.as_us(), Relaxed);
3998    }
3999
4000    pub fn force_flush_accounts_cache(&self) {
4001        self.rc
4002            .accounts
4003            .accounts_db
4004            .flush_accounts_cache(true, Some(self.slot()))
4005    }
4006
4007    pub fn flush_accounts_cache_if_needed(&self) {
4008        self.rc
4009            .accounts
4010            .accounts_db
4011            .flush_accounts_cache(false, Some(self.slot()))
4012    }
4013
4014    /// Technically this issues (or even burns!) new lamports,
4015    /// so be extra careful for its usage
4016    fn store_account_and_update_capitalization(
4017        &self,
4018        pubkey: &Pubkey,
4019        new_account: &AccountSharedData,
4020    ) {
4021        let old_account_data_size = if let Some(old_account) =
4022            self.get_account_with_fixed_root_no_cache(pubkey)
4023        {
4024            match new_account.lamports().cmp(&old_account.lamports()) {
4025                std::cmp::Ordering::Greater => {
4026                    let diff = new_account.lamports() - old_account.lamports();
4027                    trace!("store_account_and_update_capitalization: increased: {pubkey} {diff}");
4028                    self.capitalization.fetch_add(diff, Relaxed);
4029                }
4030                std::cmp::Ordering::Less => {
4031                    let diff = old_account.lamports() - new_account.lamports();
4032                    trace!("store_account_and_update_capitalization: decreased: {pubkey} {diff}");
4033                    self.capitalization.fetch_sub(diff, Relaxed);
4034                }
4035                std::cmp::Ordering::Equal => {}
4036            }
4037            old_account.data().len()
4038        } else {
4039            trace!(
4040                "store_account_and_update_capitalization: created: {pubkey} {}",
4041                new_account.lamports()
4042            );
4043            self.capitalization
4044                .fetch_add(new_account.lamports(), Relaxed);
4045            0
4046        };
4047
4048        self.store_account(pubkey, new_account);
4049        self.calculate_and_update_accounts_data_size_delta_off_chain(
4050            old_account_data_size,
4051            new_account.data().len(),
4052        );
4053    }
4054
4055    pub fn accounts(&self) -> Arc<Accounts> {
4056        self.rc.accounts.clone()
4057    }
4058
4059    fn apply_simd_0306_cost_tracker_changes(&mut self) {
4060        let mut cost_tracker = self.write_cost_tracker().unwrap();
4061        let block_cost_limit = cost_tracker.get_block_limit();
4062        let vote_cost_limit = cost_tracker.get_vote_limit();
4063        // SIMD-0306 makes account cost limit 40% of the block cost limit.
4064        let account_cost_limit = block_cost_limit.saturating_mul(40).saturating_div(100);
4065        cost_tracker.set_limits(account_cost_limit, block_cost_limit, vote_cost_limit);
4066    }
4067
4068    fn finish_init(
4069        &mut self,
4070        genesis_config: &GenesisConfig,
4071        additional_builtins: Option<&[BuiltinPrototype]>,
4072        debug_do_not_add_builtins: bool,
4073    ) {
4074        if let Some(compute_budget) = self.compute_budget {
4075            self.transaction_processor
4076                .set_execution_cost(compute_budget.to_cost());
4077        }
4078
4079        self.rewards_pool_pubkeys =
4080            Arc::new(genesis_config.rewards_pools.keys().cloned().collect());
4081
4082        self.apply_feature_activations(
4083            ApplyFeatureActivationsCaller::FinishInit,
4084            debug_do_not_add_builtins,
4085        );
4086
4087        // Cost-Tracker is not serialized in snapshot or any configs.
4088        // We must apply previously activated features related to limits here
4089        // so that the initial bank state is consistent with the feature set.
4090        // Cost-tracker limits are propagated through children banks.
4091        if self
4092            .feature_set
4093            .is_active(&feature_set::raise_block_limits_to_100m::id())
4094        {
4095            let block_cost_limit = simd_0286_block_limits();
4096            let mut cost_tracker = self.write_cost_tracker().unwrap();
4097            let account_cost_limit = cost_tracker.get_account_limit();
4098            let vote_cost_limit = cost_tracker.get_vote_limit();
4099            cost_tracker.set_limits(account_cost_limit, block_cost_limit, vote_cost_limit);
4100        }
4101
4102        if self
4103            .feature_set
4104            .is_active(&feature_set::raise_account_cu_limit::id())
4105        {
4106            self.apply_simd_0306_cost_tracker_changes();
4107        }
4108
4109        if !debug_do_not_add_builtins {
4110            for builtin in BUILTINS
4111                .iter()
4112                .chain(additional_builtins.unwrap_or(&[]).iter())
4113            {
4114                // The builtin should be added if it has no enable feature ID
4115                // and it has not been migrated to Core BPF.
4116                //
4117                // If a program was previously migrated to Core BPF, accountsDB
4118                // from snapshot should contain the BPF program accounts.
4119                let builtin_is_bpf = |program_id: &Pubkey| {
4120                    self.get_account(program_id)
4121                        .map(|a| a.owner() == &bpf_loader_upgradeable::id())
4122                        .unwrap_or(false)
4123                };
4124                if builtin.enable_feature_id.is_none() && !builtin_is_bpf(&builtin.program_id) {
4125                    self.transaction_processor.add_builtin(
4126                        self,
4127                        builtin.program_id,
4128                        builtin.name,
4129                        ProgramCacheEntry::new_builtin(0, builtin.name.len(), builtin.entrypoint),
4130                    );
4131                }
4132            }
4133            for precompile in get_precompiles() {
4134                if precompile.feature.is_none() {
4135                    self.add_precompile(&precompile.program_id);
4136                }
4137            }
4138        }
4139
4140        let simd_0296_active = self
4141            .feature_set
4142            .is_active(&raise_cpi_nesting_limit_to_8::id());
4143
4144        self.transaction_processor
4145            .configure_program_runtime_environments(
4146                Some(Arc::new(
4147                    create_program_runtime_environment_v1(
4148                        &self.feature_set.runtime_features(),
4149                        &self
4150                            .compute_budget()
4151                            .unwrap_or(ComputeBudget::new_with_defaults(simd_0296_active))
4152                            .to_budget(),
4153                        false, /* deployment */
4154                        false, /* debugging_features */
4155                    )
4156                    .unwrap(),
4157                )),
4158                Some(Arc::new(create_program_runtime_environment_v2(
4159                    &self
4160                        .compute_budget()
4161                        .unwrap_or(ComputeBudget::new_with_defaults(simd_0296_active))
4162                        .to_budget(),
4163                    false, /* debugging_features */
4164                ))),
4165            );
4166    }
4167
4168    pub fn set_tick_height(&self, tick_height: u64) {
4169        self.tick_height.store(tick_height, Relaxed)
4170    }
4171
4172    pub fn set_inflation(&self, inflation: Inflation) {
4173        *self.inflation.write().unwrap() = inflation;
4174    }
4175
4176    /// Get a snapshot of the current set of hard forks
4177    pub fn hard_forks(&self) -> HardForks {
4178        self.hard_forks.read().unwrap().clone()
4179    }
4180
4181    pub fn register_hard_fork(&self, new_hard_fork_slot: Slot) {
4182        let bank_slot = self.slot();
4183
4184        let lock = self.freeze_lock();
4185        let bank_frozen = *lock != Hash::default();
4186        if new_hard_fork_slot < bank_slot {
4187            warn!(
4188                "Hard fork at slot {new_hard_fork_slot} ignored, the hard fork is older than the \
4189                 bank at slot {bank_slot} that attempted to register it."
4190            );
4191        } else if (new_hard_fork_slot == bank_slot) && bank_frozen {
4192            warn!(
4193                "Hard fork at slot {new_hard_fork_slot} ignored, the hard fork is the same slot \
4194                 as the bank at slot {bank_slot} that attempted to register it, but that bank is \
4195                 already frozen."
4196            );
4197        } else {
4198            self.hard_forks
4199                .write()
4200                .unwrap()
4201                .register(new_hard_fork_slot);
4202        }
4203    }
4204
4205    pub fn get_account_with_fixed_root_no_cache(
4206        &self,
4207        pubkey: &Pubkey,
4208    ) -> Option<AccountSharedData> {
4209        self.load_account_with(pubkey, false)
4210            .map(|(acc, _slot)| acc)
4211    }
4212
4213    fn load_account_with(
4214        &self,
4215        pubkey: &Pubkey,
4216        should_put_in_read_cache: bool,
4217    ) -> Option<(AccountSharedData, Slot)> {
4218        self.rc.accounts.accounts_db.load_account_with(
4219            &self.ancestors,
4220            pubkey,
4221            should_put_in_read_cache,
4222        )
4223    }
4224
4225    // Hi! leaky abstraction here....
4226    // try to use get_account_with_fixed_root() if it's called ONLY from on-chain runtime account
4227    // processing. That alternative fn provides more safety.
4228    pub fn get_account(&self, pubkey: &Pubkey) -> Option<AccountSharedData> {
4229        self.get_account_modified_slot(pubkey)
4230            .map(|(acc, _slot)| acc)
4231    }
4232
4233    // Hi! leaky abstraction here....
4234    // use this over get_account() if it's called ONLY from on-chain runtime account
4235    // processing (i.e. from in-band replay/banking stage; that ensures root is *fixed* while
4236    // running).
4237    // pro: safer assertion can be enabled inside AccountsDb
4238    // con: panics!() if called from off-chain processing
4239    pub fn get_account_with_fixed_root(&self, pubkey: &Pubkey) -> Option<AccountSharedData> {
4240        self.get_account_modified_slot_with_fixed_root(pubkey)
4241            .map(|(acc, _slot)| acc)
4242    }
4243
4244    // See note above get_account_with_fixed_root() about when to prefer this function
4245    pub fn get_account_modified_slot_with_fixed_root(
4246        &self,
4247        pubkey: &Pubkey,
4248    ) -> Option<(AccountSharedData, Slot)> {
4249        self.load_slow_with_fixed_root(&self.ancestors, pubkey)
4250    }
4251
4252    pub fn get_account_modified_slot(&self, pubkey: &Pubkey) -> Option<(AccountSharedData, Slot)> {
4253        self.load_slow(&self.ancestors, pubkey)
4254    }
4255
4256    fn load_slow(
4257        &self,
4258        ancestors: &Ancestors,
4259        pubkey: &Pubkey,
4260    ) -> Option<(AccountSharedData, Slot)> {
4261        // get_account (= primary this fn caller) may be called from on-chain Bank code even if we
4262        // try hard to use get_account_with_fixed_root for that purpose...
4263        // so pass safer LoadHint:Unspecified here as a fallback
4264        self.rc.accounts.load_without_fixed_root(ancestors, pubkey)
4265    }
4266
4267    fn load_slow_with_fixed_root(
4268        &self,
4269        ancestors: &Ancestors,
4270        pubkey: &Pubkey,
4271    ) -> Option<(AccountSharedData, Slot)> {
4272        self.rc.accounts.load_with_fixed_root(ancestors, pubkey)
4273    }
4274
4275    pub fn get_program_accounts(
4276        &self,
4277        program_id: &Pubkey,
4278        config: &ScanConfig,
4279    ) -> ScanResult<Vec<TransactionAccount>> {
4280        self.rc
4281            .accounts
4282            .load_by_program(&self.ancestors, self.bank_id, program_id, config)
4283    }
4284
4285    pub fn get_filtered_program_accounts<F: Fn(&AccountSharedData) -> bool>(
4286        &self,
4287        program_id: &Pubkey,
4288        filter: F,
4289        config: &ScanConfig,
4290    ) -> ScanResult<Vec<TransactionAccount>> {
4291        self.rc.accounts.load_by_program_with_filter(
4292            &self.ancestors,
4293            self.bank_id,
4294            program_id,
4295            filter,
4296            config,
4297        )
4298    }
4299
4300    pub fn get_filtered_indexed_accounts<F: Fn(&AccountSharedData) -> bool>(
4301        &self,
4302        index_key: &IndexKey,
4303        filter: F,
4304        config: &ScanConfig,
4305        byte_limit_for_scan: Option<usize>,
4306    ) -> ScanResult<Vec<TransactionAccount>> {
4307        self.rc.accounts.load_by_index_key_with_filter(
4308            &self.ancestors,
4309            self.bank_id,
4310            index_key,
4311            filter,
4312            config,
4313            byte_limit_for_scan,
4314        )
4315    }
4316
4317    pub fn account_indexes_include_key(&self, key: &Pubkey) -> bool {
4318        self.rc.accounts.account_indexes_include_key(key)
4319    }
4320
4321    /// Returns all the accounts this bank can load
4322    pub fn get_all_accounts(&self, sort_results: bool) -> ScanResult<Vec<PubkeyAccountSlot>> {
4323        self.rc
4324            .accounts
4325            .load_all(&self.ancestors, self.bank_id, sort_results)
4326    }
4327
4328    // Scans all the accounts this bank can load, applying `scan_func`
4329    pub fn scan_all_accounts<F>(&self, scan_func: F, sort_results: bool) -> ScanResult<()>
4330    where
4331        F: FnMut(Option<(&Pubkey, AccountSharedData, Slot)>),
4332    {
4333        self.rc
4334            .accounts
4335            .scan_all(&self.ancestors, self.bank_id, scan_func, sort_results)
4336    }
4337
4338    pub fn get_program_accounts_modified_since_parent(
4339        &self,
4340        program_id: &Pubkey,
4341    ) -> Vec<TransactionAccount> {
4342        self.rc
4343            .accounts
4344            .load_by_program_slot(self.slot(), Some(program_id))
4345    }
4346
4347    pub fn get_transaction_logs(
4348        &self,
4349        address: Option<&Pubkey>,
4350    ) -> Option<Vec<TransactionLogInfo>> {
4351        self.transaction_log_collector
4352            .read()
4353            .unwrap()
4354            .get_logs_for_address(address)
4355    }
4356
4357    /// Returns all the accounts stored in this slot
4358    pub fn get_all_accounts_modified_since_parent(&self) -> Vec<TransactionAccount> {
4359        self.rc.accounts.load_by_program_slot(self.slot(), None)
4360    }
4361
4362    // if you want get_account_modified_since_parent without fixed_root, please define so...
4363    fn get_account_modified_since_parent_with_fixed_root(
4364        &self,
4365        pubkey: &Pubkey,
4366    ) -> Option<(AccountSharedData, Slot)> {
4367        let just_self: Ancestors = Ancestors::from(vec![self.slot()]);
4368        if let Some((account, slot)) = self.load_slow_with_fixed_root(&just_self, pubkey) {
4369            if slot == self.slot() {
4370                return Some((account, slot));
4371            }
4372        }
4373        None
4374    }
4375
4376    pub fn get_largest_accounts(
4377        &self,
4378        num: usize,
4379        filter_by_address: &HashSet<Pubkey>,
4380        filter: AccountAddressFilter,
4381        sort_results: bool,
4382    ) -> ScanResult<Vec<(Pubkey, u64)>> {
4383        self.rc.accounts.load_largest_accounts(
4384            &self.ancestors,
4385            self.bank_id,
4386            num,
4387            filter_by_address,
4388            filter,
4389            sort_results,
4390        )
4391    }
4392
4393    /// Return the accumulated executed transaction count
4394    pub fn transaction_count(&self) -> u64 {
4395        self.transaction_count.load(Relaxed)
4396    }
4397
4398    /// Returns the number of non-vote transactions processed without error
4399    /// since the most recent boot from snapshot or genesis.
4400    /// This value is not shared though the network, nor retained
4401    /// within snapshots, but is preserved in `Bank::new_from_parent`.
4402    pub fn non_vote_transaction_count_since_restart(&self) -> u64 {
4403        self.non_vote_transaction_count_since_restart.load(Relaxed)
4404    }
4405
4406    /// Return the transaction count executed only in this bank
4407    pub fn executed_transaction_count(&self) -> u64 {
4408        self.transaction_count()
4409            .saturating_sub(self.parent().map_or(0, |parent| parent.transaction_count()))
4410    }
4411
4412    pub fn transaction_error_count(&self) -> u64 {
4413        self.transaction_error_count.load(Relaxed)
4414    }
4415
4416    pub fn transaction_entries_count(&self) -> u64 {
4417        self.transaction_entries_count.load(Relaxed)
4418    }
4419
4420    pub fn transactions_per_entry_max(&self) -> u64 {
4421        self.transactions_per_entry_max.load(Relaxed)
4422    }
4423
4424    fn increment_transaction_count(&self, tx_count: u64) {
4425        self.transaction_count.fetch_add(tx_count, Relaxed);
4426    }
4427
4428    fn increment_non_vote_transaction_count_since_restart(&self, tx_count: u64) {
4429        self.non_vote_transaction_count_since_restart
4430            .fetch_add(tx_count, Relaxed);
4431    }
4432
4433    pub fn signature_count(&self) -> u64 {
4434        self.signature_count.load(Relaxed)
4435    }
4436
4437    fn increment_signature_count(&self, signature_count: u64) {
4438        self.signature_count.fetch_add(signature_count, Relaxed);
4439    }
4440
4441    pub fn get_signature_status_processed_since_parent(
4442        &self,
4443        signature: &Signature,
4444    ) -> Option<Result<()>> {
4445        if let Some((slot, status)) = self.get_signature_status_slot(signature) {
4446            if slot <= self.slot() {
4447                return Some(status);
4448            }
4449        }
4450        None
4451    }
4452
4453    pub fn get_signature_status_with_blockhash(
4454        &self,
4455        signature: &Signature,
4456        blockhash: &Hash,
4457    ) -> Option<Result<()>> {
4458        let rcache = self.status_cache.read().unwrap();
4459        rcache
4460            .get_status(signature, blockhash, &self.ancestors)
4461            .map(|v| v.1)
4462    }
4463
4464    pub fn get_committed_transaction_status_and_slot(
4465        &self,
4466        message_hash: &Hash,
4467        transaction_blockhash: &Hash,
4468    ) -> Option<(Slot, bool)> {
4469        let rcache = self.status_cache.read().unwrap();
4470        rcache
4471            .get_status(message_hash, transaction_blockhash, &self.ancestors)
4472            .map(|(slot, status)| (slot, status.is_ok()))
4473    }
4474
4475    pub fn get_signature_status_slot(&self, signature: &Signature) -> Option<(Slot, Result<()>)> {
4476        let rcache = self.status_cache.read().unwrap();
4477        rcache.get_status_any_blockhash(signature, &self.ancestors)
4478    }
4479
4480    pub fn get_signature_status(&self, signature: &Signature) -> Option<Result<()>> {
4481        self.get_signature_status_slot(signature).map(|v| v.1)
4482    }
4483
4484    pub fn has_signature(&self, signature: &Signature) -> bool {
4485        self.get_signature_status_slot(signature).is_some()
4486    }
4487
4488    /// Hash the `accounts` HashMap. This represents a validator's interpretation
4489    ///  of the delta of the ledger since the last vote and up to now
4490    fn hash_internal_state(&self) -> Hash {
4491        let measure_total = Measure::start("");
4492        let slot = self.slot();
4493
4494        let mut hash = hashv(&[
4495            self.parent_hash.as_ref(),
4496            &self.signature_count().to_le_bytes(),
4497            self.last_blockhash().as_ref(),
4498        ]);
4499
4500        let accounts_lt_hash_checksum = {
4501            let accounts_lt_hash = &*self.accounts_lt_hash.lock().unwrap();
4502            let lt_hash_bytes = bytemuck::must_cast_slice(&accounts_lt_hash.0 .0);
4503            hash = hashv(&[hash.as_ref(), lt_hash_bytes]);
4504            accounts_lt_hash.0.checksum()
4505        };
4506
4507        let buf = self
4508            .hard_forks
4509            .read()
4510            .unwrap()
4511            .get_hash_data(slot, self.parent_slot());
4512        if let Some(buf) = buf {
4513            let hard_forked_hash = hashv(&[hash.as_ref(), &buf]);
4514            warn!("hard fork at slot {slot} by hashing {buf:?}: {hash} => {hard_forked_hash}");
4515            hash = hard_forked_hash;
4516        }
4517
4518        #[cfg(feature = "dev-context-only-utils")]
4519        let hash_override = self
4520            .hash_overrides
4521            .lock()
4522            .unwrap()
4523            .get_bank_hash_override(slot)
4524            .copied()
4525            .inspect(|&hash_override| {
4526                if hash_override != hash {
4527                    info!(
4528                        "bank: slot: {}: overrode bank hash: {} with {}",
4529                        self.slot(),
4530                        hash,
4531                        hash_override
4532                    );
4533                }
4534            });
4535        // Avoid to optimize out `hash` along with the whole computation by super smart rustc.
4536        // hash_override is used by ledger-tool's simulate-block-production, which prefers
4537        // the actual bank freezing processing for accurate simulation.
4538        #[cfg(feature = "dev-context-only-utils")]
4539        let hash = hash_override.unwrap_or(std::hint::black_box(hash));
4540
4541        let bank_hash_stats = self.bank_hash_stats.load();
4542
4543        let total_us = measure_total.end_as_us();
4544
4545        datapoint_info!(
4546            "bank-hash_internal_state",
4547            ("slot", slot, i64),
4548            ("total_us", total_us, i64),
4549        );
4550        info!(
4551            "bank frozen: {slot} hash: {hash} signature_count: {} last_blockhash: {} \
4552             capitalization: {}, accounts_lt_hash checksum: {accounts_lt_hash_checksum}, stats: \
4553             {bank_hash_stats:?}",
4554            self.signature_count(),
4555            self.last_blockhash(),
4556            self.capitalization(),
4557        );
4558        hash
4559    }
4560
4561    pub fn collector_fees(&self) -> u64 {
4562        self.collector_fees.load(Relaxed)
4563    }
4564
4565    /// Used by ledger tool to run a final hash calculation once all ledger replay has completed.
4566    /// This should not be called by validator code.
4567    pub fn run_final_hash_calc(&self) {
4568        self.force_flush_accounts_cache();
4569        // note that this slot may not be a root
4570        _ = self.verify_accounts(
4571            VerifyAccountsHashConfig {
4572                require_rooted_bank: false,
4573                run_in_background: false,
4574            },
4575            None,
4576        );
4577    }
4578
4579    /// Verify the account state as part of startup, typically from a snapshot.
4580    ///
4581    /// This fn calculates the accounts lt hash and compares it against the stored value in the
4582    /// bank.  Normal validator operation will do this calculation in the background, and use the
4583    /// account storage files for input. Tests/ledger-tool may opt to either do the calculation in
4584    /// the foreground, or use the accounts index for input.
4585    ///
4586    /// Returns true if all is good.
4587    /// Note, if calculation is running in the background, this fn will return as soon as the
4588    /// calculation *begins*, not when it has completed.
4589    ///
4590    /// Only intended to be called at startup, or from tests/ledger-tool.
4591    #[must_use]
4592    fn verify_accounts(
4593        &self,
4594        mut config: VerifyAccountsHashConfig,
4595        duplicates_lt_hash: Option<Box<DuplicatesLtHash>>,
4596    ) -> bool {
4597        let accounts_db = &self.rc.accounts.accounts_db;
4598        // Wait until initial hash calc is complete before starting a new hash calc.
4599        // This should only occur when we halt at a slot in ledger-tool.
4600        accounts_db
4601            .verify_accounts_hash_in_bg
4602            .join_background_thread();
4603
4604        let slot = self.slot();
4605
4606        if duplicates_lt_hash.is_none() {
4607            // Calculating the accounts lt hash from storages *requires* a duplicates_lt_hash.
4608            // If it is None here, then we must use the index instead, which also means we
4609            // cannot run in the background.
4610            config.run_in_background = false;
4611        }
4612
4613        if config.require_rooted_bank && !accounts_db.accounts_index.is_alive_root(slot) {
4614            if let Some(parent) = self.parent() {
4615                info!(
4616                    "slot {slot} is not a root, so verify accounts hash on parent bank at slot {}",
4617                    parent.slot(),
4618                );
4619                // The duplicates_lt_hash is only valid for the current slot, so we must fall
4620                // back to verifying the accounts lt hash with the index (which also means we
4621                // cannot run in the background).
4622                config.run_in_background = false;
4623                return parent.verify_accounts(config, None);
4624            } else {
4625                // this will result in mismatch errors
4626                // accounts hash calc doesn't include unrooted slots
4627                panic!("cannot verify accounts hash because slot {slot} is not a root");
4628            }
4629        }
4630
4631        fn check_lt_hash(
4632            expected_accounts_lt_hash: &AccountsLtHash,
4633            calculated_accounts_lt_hash: &AccountsLtHash,
4634        ) -> bool {
4635            let is_ok = calculated_accounts_lt_hash == expected_accounts_lt_hash;
4636            if !is_ok {
4637                let expected = expected_accounts_lt_hash.0.checksum();
4638                let calculated = calculated_accounts_lt_hash.0.checksum();
4639                error!(
4640                    "Verifying accounts failed: accounts lattice hashes do not match, expected: \
4641                     {expected}, calculated: {calculated}",
4642                );
4643            }
4644            is_ok
4645        }
4646
4647        // The snapshot storages must be captured *before* starting the background verification.
4648        // Otherwise, it is possible that a delayed call to `get_snapshot_storages()` will *not*
4649        // get the correct storages required to calculate and verify the accounts hashes.
4650        let snapshot_storages = accounts_db.get_storages(RangeFull);
4651        let expected_accounts_lt_hash = self.accounts_lt_hash.lock().unwrap().clone();
4652        if config.run_in_background {
4653            let accounts_db_ = Arc::clone(accounts_db);
4654            accounts_db.verify_accounts_hash_in_bg.start(|| {
4655                Builder::new()
4656                    .name("solBgVerfyAccts".into())
4657                    .spawn(move || {
4658                        info!("Verifying accounts in background...");
4659                        let start = Instant::now();
4660                        let num_threads = accounts_db_
4661                            .num_hash_threads
4662                            .unwrap_or_else(accounts_db::default_num_hash_threads);
4663                        let (calculated_accounts_lt_hash, lattice_verify_time) =
4664                            meas_dur!(accounts_db_
4665                                .calculate_accounts_lt_hash_at_startup_from_storages(
4666                                    snapshot_storages.0.as_slice(),
4667                                    &duplicates_lt_hash.unwrap(),
4668                                    slot,
4669                                    num_threads
4670                                ));
4671                        let is_ok =
4672                            check_lt_hash(&expected_accounts_lt_hash, &calculated_accounts_lt_hash);
4673                        accounts_db_
4674                            .verify_accounts_hash_in_bg
4675                            .background_finished();
4676                        let total_time = start.elapsed();
4677                        datapoint_info!(
4678                            "startup_verify_accounts",
4679                            ("total_us", total_time.as_micros(), i64),
4680                            (
4681                                "calculate_accounts_lt_hash_us",
4682                                lattice_verify_time.as_micros(),
4683                                i64
4684                            ),
4685                        );
4686                        info!("Verifying accounts in background... Done in {total_time:?}");
4687                        is_ok
4688                    })
4689                    .unwrap()
4690            });
4691            true // initial result is true. We haven't failed yet. If verification fails, we'll panic from bg thread.
4692        } else {
4693            info!("Verifying accounts in foreground...");
4694            let start = Instant::now();
4695            let num_threads = NonZeroUsize::new(num_cpus::get()).unwrap();
4696            let calculated_accounts_lt_hash = if let Some(duplicates_lt_hash) = duplicates_lt_hash {
4697                accounts_db.calculate_accounts_lt_hash_at_startup_from_storages(
4698                    snapshot_storages.0.as_slice(),
4699                    &duplicates_lt_hash,
4700                    slot,
4701                    num_threads,
4702                )
4703            } else {
4704                accounts_db.calculate_accounts_lt_hash_at_startup_from_index(&self.ancestors, slot)
4705            };
4706            let is_ok = check_lt_hash(&expected_accounts_lt_hash, &calculated_accounts_lt_hash);
4707            self.set_initial_accounts_hash_verification_completed();
4708            info!(
4709                "Verifying accounts in foreground... Done in {:?}",
4710                start.elapsed(),
4711            );
4712            is_ok
4713        }
4714    }
4715
4716    /// Specify that initial verification has completed.
4717    /// Called internally when verification runs in the foreground thread.
4718    /// Also has to be called by some tests which don't do verification on startup.
4719    pub fn set_initial_accounts_hash_verification_completed(&self) {
4720        self.rc
4721            .accounts
4722            .accounts_db
4723            .verify_accounts_hash_in_bg
4724            .verification_complete();
4725    }
4726
4727    /// return true if bg hash verification is complete
4728    /// return false if bg hash verification has not completed yet
4729    /// if hash verification failed, a panic will occur
4730    pub fn has_initial_accounts_hash_verification_completed(&self) -> bool {
4731        self.rc
4732            .accounts
4733            .accounts_db
4734            .verify_accounts_hash_in_bg
4735            .check_complete()
4736    }
4737
4738    /// Get this bank's storages to use for snapshots.
4739    ///
4740    /// If a base slot is provided, return only the storages that are *higher* than this slot.
4741    pub fn get_snapshot_storages(&self, base_slot: Option<Slot>) -> Vec<Arc<AccountStorageEntry>> {
4742        // if a base slot is provided, request storages starting at the slot *after*
4743        let start_slot = base_slot.map_or(0, |slot| slot.saturating_add(1));
4744        // we want to *include* the storage at our slot
4745        let requested_slots = start_slot..=self.slot();
4746
4747        self.rc.accounts.accounts_db.get_storages(requested_slots).0
4748    }
4749
4750    #[must_use]
4751    fn verify_hash(&self) -> bool {
4752        assert!(self.is_frozen());
4753        let calculated_hash = self.hash_internal_state();
4754        let expected_hash = self.hash();
4755
4756        if calculated_hash == expected_hash {
4757            true
4758        } else {
4759            warn!(
4760                "verify failed: slot: {}, {} (calculated) != {} (expected)",
4761                self.slot(),
4762                calculated_hash,
4763                expected_hash
4764            );
4765            false
4766        }
4767    }
4768
4769    pub fn verify_transaction(
4770        &self,
4771        tx: VersionedTransaction,
4772        verification_mode: TransactionVerificationMode,
4773    ) -> Result<RuntimeTransaction<SanitizedTransaction>> {
4774        let sanitized_tx = {
4775            let size =
4776                bincode::serialized_size(&tx).map_err(|_| TransactionError::SanitizeFailure)?;
4777            if size > PACKET_DATA_SIZE as u64 {
4778                return Err(TransactionError::SanitizeFailure);
4779            }
4780            let message_hash = if verification_mode == TransactionVerificationMode::FullVerification
4781            {
4782                tx.verify_and_hash_message()?
4783            } else {
4784                tx.message.hash()
4785            };
4786
4787            RuntimeTransaction::try_create(
4788                tx,
4789                MessageHash::Precomputed(message_hash),
4790                None,
4791                self,
4792                self.get_reserved_account_keys(),
4793            )
4794        }?;
4795
4796        Ok(sanitized_tx)
4797    }
4798
4799    pub fn fully_verify_transaction(
4800        &self,
4801        tx: VersionedTransaction,
4802    ) -> Result<RuntimeTransaction<SanitizedTransaction>> {
4803        self.verify_transaction(tx, TransactionVerificationMode::FullVerification)
4804    }
4805
4806    /// Checks if the transaction violates the bank's reserved keys.
4807    /// This needs to be checked upon epoch boundary crosses because the
4808    /// reserved key set may have changed since the initial sanitization.
4809    pub fn check_reserved_keys(&self, tx: &impl SVMMessage) -> Result<()> {
4810        // Check keys against the reserved set - these failures simply require us
4811        // to re-sanitize the transaction. We do not need to drop the transaction.
4812        let reserved_keys = self.get_reserved_account_keys();
4813        for (index, key) in tx.account_keys().iter().enumerate() {
4814            if tx.is_writable(index) && reserved_keys.contains(key) {
4815                return Err(TransactionError::ResanitizationNeeded);
4816            }
4817        }
4818
4819        Ok(())
4820    }
4821
4822    /// Calculates and returns the capitalization.
4823    ///
4824    /// Panics if capitalization overflows a u64.
4825    ///
4826    /// Note, this is *very* expensive!  It walks the whole accounts index,
4827    /// account-by-account, summing each account's balance.
4828    ///
4829    /// Only intended to be called at startup by ledger-tool or tests.
4830    /// (cannot be made DCOU due to solana-program-test)
4831    pub fn calculate_capitalization_for_tests(&self) -> u64 {
4832        self.rc
4833            .accounts
4834            .accounts_db
4835            .verify_accounts_hash_in_bg
4836            .join_background_thread();
4837        self.rc
4838            .accounts
4839            .accounts_db
4840            .calculate_capitalization_at_startup_from_index(&self.ancestors, self.slot())
4841    }
4842
4843    /// Sets the capitalization.
4844    ///
4845    /// Only intended to be called by ledger-tool or tests.
4846    /// (cannot be made DCOU due to solana-program-test)
4847    pub fn set_capitalization_for_tests(&self, capitalization: u64) {
4848        self.capitalization.store(capitalization, Relaxed);
4849    }
4850
4851    /// Returns the `SnapshotHash` for this bank's slot
4852    ///
4853    /// This fn is used at startup to verify the bank was rebuilt correctly.
4854    pub fn get_snapshot_hash(&self) -> SnapshotHash {
4855        SnapshotHash::new(self.accounts_lt_hash.lock().unwrap().0.checksum())
4856    }
4857
4858    pub fn load_account_into_read_cache(&self, key: &Pubkey) {
4859        self.rc
4860            .accounts
4861            .accounts_db
4862            .load_account_into_read_cache(&self.ancestors, key);
4863    }
4864
4865    /// A snapshot bank should be purged of 0 lamport accounts which are not part of the hash
4866    /// calculation and could shield other real accounts.
4867    pub fn verify_snapshot_bank(
4868        &self,
4869        skip_shrink: bool,
4870        force_clean: bool,
4871        latest_full_snapshot_slot: Slot,
4872        duplicates_lt_hash: Option<Box<DuplicatesLtHash>>,
4873    ) -> bool {
4874        // If we verify the accounts using the lattice-based hash *and* with storages (as opposed
4875        // to the index), then we rely on the DuplicatesLtHash as given by generate_index().  Since
4876        // the duplicates are based on a specific set of storages, we must use the exact same
4877        // storages to do the lattice-based accounts verification.  This means we must wait to
4878        // clean/shrink until *after* we've gotten Arcs to the storages (this prevents their
4879        // untimely removal).  Simply, we call `verify_accounts_hash()` before we call `clean` or
4880        // `shrink`.
4881        let (verified_accounts, verify_accounts_time_us) = measure_us!({
4882            let should_verify_accounts = !self.rc.accounts.accounts_db.skip_initial_hash_calc;
4883            if should_verify_accounts {
4884                self.verify_accounts(
4885                    VerifyAccountsHashConfig {
4886                        require_rooted_bank: false,
4887                        run_in_background: true,
4888                    },
4889                    duplicates_lt_hash,
4890                )
4891            } else {
4892                info!("Verifying accounts... Skipped.");
4893                self.set_initial_accounts_hash_verification_completed();
4894                true
4895            }
4896        });
4897
4898        let (_, clean_time_us) = measure_us!({
4899            let should_clean = force_clean || (!skip_shrink && self.slot() > 0);
4900            if should_clean {
4901                info!("Cleaning...");
4902                // We cannot clean past the latest full snapshot's slot because we are about to
4903                // perform an accounts hash calculation *up to that slot*.  If we cleaned *past*
4904                // that slot, then accounts could be removed from older storages, which would
4905                // change the accounts hash.
4906                self.rc.accounts.accounts_db.clean_accounts(
4907                    Some(latest_full_snapshot_slot),
4908                    true,
4909                    self.epoch_schedule(),
4910                );
4911                info!("Cleaning... Done.");
4912            } else {
4913                info!("Cleaning... Skipped.");
4914            }
4915        });
4916
4917        let (_, shrink_time_us) = measure_us!({
4918            let should_shrink = !skip_shrink && self.slot() > 0;
4919            if should_shrink {
4920                info!("Shrinking...");
4921                self.rc.accounts.accounts_db.shrink_all_slots(
4922                    true,
4923                    self.epoch_schedule(),
4924                    // we cannot allow the snapshot slot to be shrunk
4925                    Some(self.slot()),
4926                );
4927                info!("Shrinking... Done.");
4928            } else {
4929                info!("Shrinking... Skipped.");
4930            }
4931        });
4932
4933        info!("Verifying bank...");
4934        let (verified_bank, verify_bank_time_us) = measure_us!(self.verify_hash());
4935        info!("Verifying bank... Done.");
4936
4937        datapoint_info!(
4938            "verify_snapshot_bank",
4939            ("clean_us", clean_time_us, i64),
4940            ("shrink_us", shrink_time_us, i64),
4941            ("verify_accounts_us", verify_accounts_time_us, i64),
4942            ("verify_bank_us", verify_bank_time_us, i64),
4943        );
4944
4945        verified_accounts && verified_bank
4946    }
4947
4948    /// Return the number of hashes per tick
4949    pub fn hashes_per_tick(&self) -> &Option<u64> {
4950        &self.hashes_per_tick
4951    }
4952
4953    /// Return the number of ticks per slot
4954    pub fn ticks_per_slot(&self) -> u64 {
4955        self.ticks_per_slot
4956    }
4957
4958    /// Return the number of slots per year
4959    pub fn slots_per_year(&self) -> f64 {
4960        self.slots_per_year
4961    }
4962
4963    /// Return the number of ticks since genesis.
4964    pub fn tick_height(&self) -> u64 {
4965        self.tick_height.load(Relaxed)
4966    }
4967
4968    /// Return the inflation parameters of the Bank
4969    pub fn inflation(&self) -> Inflation {
4970        *self.inflation.read().unwrap()
4971    }
4972
4973    /// Return the rent collector for this Bank
4974    pub fn rent_collector(&self) -> &RentCollector {
4975        &self.rent_collector
4976    }
4977
4978    /// Return the total capitalization of the Bank
4979    pub fn capitalization(&self) -> u64 {
4980        self.capitalization.load(Relaxed)
4981    }
4982
4983    /// Return this bank's max_tick_height
4984    pub fn max_tick_height(&self) -> u64 {
4985        self.max_tick_height
4986    }
4987
4988    /// Return the block_height of this bank
4989    pub fn block_height(&self) -> u64 {
4990        self.block_height
4991    }
4992
4993    /// Return the number of slots per epoch for the given epoch
4994    pub fn get_slots_in_epoch(&self, epoch: Epoch) -> u64 {
4995        self.epoch_schedule().get_slots_in_epoch(epoch)
4996    }
4997
4998    /// returns the epoch for which this bank's leader_schedule_slot_offset and slot would
4999    ///  need to cache leader_schedule
5000    pub fn get_leader_schedule_epoch(&self, slot: Slot) -> Epoch {
5001        self.epoch_schedule().get_leader_schedule_epoch(slot)
5002    }
5003
5004    /// Returns whether the specified epoch should use the new vote account
5005    /// keyed leader schedule
5006    pub fn should_use_vote_keyed_leader_schedule(&self, epoch: Epoch) -> Option<bool> {
5007        let effective_epoch = self
5008            .feature_set
5009            .activated_slot(&agave_feature_set::enable_vote_address_leader_schedule::id())
5010            .map(|activation_slot| {
5011                // If the feature was activated at genesis, then the new leader
5012                // schedule should be effective immediately in the first epoch
5013                if activation_slot == 0 {
5014                    return 0;
5015                }
5016
5017                // Calculate the epoch that the feature became activated in
5018                let activation_epoch = self.epoch_schedule.get_epoch(activation_slot);
5019
5020                // The effective epoch is the epoch immediately after the
5021                // activation epoch
5022                activation_epoch.wrapping_add(1)
5023            });
5024
5025        // Starting from the effective epoch, always use the new leader schedule
5026        if let Some(effective_epoch) = effective_epoch {
5027            return Some(epoch >= effective_epoch);
5028        }
5029
5030        // Calculate the max epoch we can cache a leader schedule for
5031        let max_cached_leader_schedule = self.get_leader_schedule_epoch(self.slot());
5032        if epoch <= max_cached_leader_schedule {
5033            // The feature cannot be effective by the specified epoch
5034            Some(false)
5035        } else {
5036            // Cannot determine if an epoch should use the new leader schedule if the
5037            // the epoch is too far in the future because we won't know if the feature
5038            // will have been activated by then or not.
5039            None
5040        }
5041    }
5042
5043    /// a bank-level cache of vote accounts and stake delegation info
5044    fn update_stakes_cache(
5045        &self,
5046        txs: &[impl SVMMessage],
5047        processing_results: &[TransactionProcessingResult],
5048    ) {
5049        debug_assert_eq!(txs.len(), processing_results.len());
5050        let new_warmup_cooldown_rate_epoch = self.new_warmup_cooldown_rate_epoch();
5051        txs.iter()
5052            .zip(processing_results)
5053            .filter_map(|(tx, processing_result)| {
5054                processing_result
5055                    .processed_transaction()
5056                    .map(|processed_tx| (tx, processed_tx))
5057            })
5058            .filter_map(|(tx, processed_tx)| {
5059                processed_tx
5060                    .executed_transaction()
5061                    .map(|executed_tx| (tx, executed_tx))
5062            })
5063            .filter(|(_, executed_tx)| executed_tx.was_successful())
5064            .flat_map(|(tx, executed_tx)| {
5065                let num_account_keys = tx.account_keys().len();
5066                let loaded_tx = &executed_tx.loaded_transaction;
5067                loaded_tx.accounts.iter().take(num_account_keys)
5068            })
5069            .for_each(|(pubkey, account)| {
5070                // note that this could get timed to: self.rc.accounts.accounts_db.stats.stakes_cache_check_and_store_us,
5071                //  but this code path is captured separately in ExecuteTimingType::UpdateStakesCacheUs
5072                self.stakes_cache
5073                    .check_and_store(pubkey, account, new_warmup_cooldown_rate_epoch);
5074            });
5075    }
5076
5077    /// current vote accounts for this bank along with the stake
5078    ///   attributed to each account
5079    pub fn vote_accounts(&self) -> Arc<VoteAccountsHashMap> {
5080        let stakes = self.stakes_cache.stakes();
5081        Arc::from(stakes.vote_accounts())
5082    }
5083
5084    /// Vote account for the given vote account pubkey.
5085    pub fn get_vote_account(&self, vote_account: &Pubkey) -> Option<VoteAccount> {
5086        let stakes = self.stakes_cache.stakes();
5087        let vote_account = stakes.vote_accounts().get(vote_account)?;
5088        Some(vote_account.clone())
5089    }
5090
5091    /// Get the EpochStakes for the current Bank::epoch
5092    pub fn current_epoch_stakes(&self) -> &VersionedEpochStakes {
5093        // The stakes for a given epoch (E) in self.epoch_stakes are keyed by leader schedule epoch
5094        // (E + 1) so the stakes for the current epoch are stored at self.epoch_stakes[E + 1]
5095        self.epoch_stakes
5096            .get(&self.epoch.saturating_add(1))
5097            .expect("Current epoch stakes must exist")
5098    }
5099
5100    /// Get the EpochStakes for a given epoch
5101    pub fn epoch_stakes(&self, epoch: Epoch) -> Option<&VersionedEpochStakes> {
5102        self.epoch_stakes.get(&epoch)
5103    }
5104
5105    pub fn epoch_stakes_map(&self) -> &HashMap<Epoch, VersionedEpochStakes> {
5106        &self.epoch_stakes
5107    }
5108
5109    /// Get the staked nodes map for the current Bank::epoch
5110    pub fn current_epoch_staked_nodes(&self) -> Arc<HashMap<Pubkey, u64>> {
5111        self.current_epoch_stakes().stakes().staked_nodes()
5112    }
5113
5114    pub fn epoch_staked_nodes(&self, epoch: Epoch) -> Option<Arc<HashMap<Pubkey, u64>>> {
5115        Some(self.epoch_stakes.get(&epoch)?.stakes().staked_nodes())
5116    }
5117
5118    /// Get the total epoch stake for the given epoch.
5119    pub fn epoch_total_stake(&self, epoch: Epoch) -> Option<u64> {
5120        self.epoch_stakes
5121            .get(&epoch)
5122            .map(|epoch_stakes| epoch_stakes.total_stake())
5123    }
5124
5125    /// Get the total epoch stake for the current Bank::epoch
5126    pub fn get_current_epoch_total_stake(&self) -> u64 {
5127        self.current_epoch_stakes().total_stake()
5128    }
5129
5130    /// vote accounts for the specific epoch along with the stake
5131    ///   attributed to each account
5132    pub fn epoch_vote_accounts(&self, epoch: Epoch) -> Option<&VoteAccountsHashMap> {
5133        let epoch_stakes = self.epoch_stakes.get(&epoch)?.stakes();
5134        Some(epoch_stakes.vote_accounts().as_ref())
5135    }
5136
5137    /// Get the vote accounts along with the stake attributed to each account
5138    /// for the current Bank::epoch
5139    pub fn get_current_epoch_vote_accounts(&self) -> &VoteAccountsHashMap {
5140        self.current_epoch_stakes()
5141            .stakes()
5142            .vote_accounts()
5143            .as_ref()
5144    }
5145
5146    /// Get the fixed authorized voter for the given vote account for the
5147    /// current epoch
5148    pub fn epoch_authorized_voter(&self, vote_account: &Pubkey) -> Option<&Pubkey> {
5149        self.epoch_stakes
5150            .get(&self.epoch)
5151            .expect("Epoch stakes for bank's own epoch must exist")
5152            .epoch_authorized_voters()
5153            .get(vote_account)
5154    }
5155
5156    /// Get the fixed set of vote accounts for the given node id for the
5157    /// current epoch
5158    pub fn epoch_vote_accounts_for_node_id(&self, node_id: &Pubkey) -> Option<&NodeVoteAccounts> {
5159        self.epoch_stakes
5160            .get(&self.epoch)
5161            .expect("Epoch stakes for bank's own epoch must exist")
5162            .node_id_to_vote_accounts()
5163            .get(node_id)
5164    }
5165
5166    /// Get the total stake belonging to vote accounts associated with the given node id for the
5167    /// given epoch.
5168    pub fn epoch_node_id_to_stake(&self, epoch: Epoch, node_id: &Pubkey) -> Option<u64> {
5169        self.epoch_stakes(epoch)
5170            .and_then(|epoch_stakes| epoch_stakes.node_id_to_stake(node_id))
5171    }
5172
5173    /// Get the fixed total stake of all vote accounts for current epoch
5174    pub fn total_epoch_stake(&self) -> u64 {
5175        self.epoch_stakes
5176            .get(&self.epoch)
5177            .expect("Epoch stakes for bank's own epoch must exist")
5178            .total_stake()
5179    }
5180
5181    /// Get the fixed stake of the given vote account for the current epoch
5182    pub fn epoch_vote_account_stake(&self, vote_account: &Pubkey) -> u64 {
5183        *self
5184            .epoch_vote_accounts(self.epoch())
5185            .expect("Bank epoch vote accounts must contain entry for the bank's own epoch")
5186            .get(vote_account)
5187            .map(|(stake, _)| stake)
5188            .unwrap_or(&0)
5189    }
5190
5191    /// given a slot, return the epoch and offset into the epoch this slot falls
5192    /// e.g. with a fixed number for slots_per_epoch, the calculation is simply:
5193    ///
5194    ///  ( slot/slots_per_epoch, slot % slots_per_epoch )
5195    ///
5196    pub fn get_epoch_and_slot_index(&self, slot: Slot) -> (Epoch, SlotIndex) {
5197        self.epoch_schedule().get_epoch_and_slot_index(slot)
5198    }
5199
5200    pub fn get_epoch_info(&self) -> EpochInfo {
5201        let absolute_slot = self.slot();
5202        let block_height = self.block_height();
5203        let (epoch, slot_index) = self.get_epoch_and_slot_index(absolute_slot);
5204        let slots_in_epoch = self.get_slots_in_epoch(epoch);
5205        let transaction_count = Some(self.transaction_count());
5206        EpochInfo {
5207            epoch,
5208            slot_index,
5209            slots_in_epoch,
5210            absolute_slot,
5211            block_height,
5212            transaction_count,
5213        }
5214    }
5215
5216    pub fn is_empty(&self) -> bool {
5217        !self.is_delta.load(Relaxed)
5218    }
5219
5220    pub fn add_mockup_builtin(
5221        &mut self,
5222        program_id: Pubkey,
5223        builtin_function: BuiltinFunctionWithContext,
5224    ) {
5225        self.transaction_processor.add_builtin(
5226            self,
5227            program_id,
5228            "mockup",
5229            ProgramCacheEntry::new_builtin(self.slot, 0, builtin_function),
5230        );
5231    }
5232
5233    pub fn add_precompile(&mut self, program_id: &Pubkey) {
5234        debug!("Adding precompiled program {program_id}");
5235        self.add_precompiled_account(program_id);
5236        debug!("Added precompiled program {program_id:?}");
5237    }
5238
5239    // Call AccountsDb::clean_accounts()
5240    //
5241    // This fn is meant to be called by the snapshot handler in Accounts Background Service.  If
5242    // calling from elsewhere, ensure the same invariants hold/expectations are met.
5243    pub(crate) fn clean_accounts(&self) {
5244        // Don't clean the slot we're snapshotting because it may have zero-lamport
5245        // accounts that were included in the bank delta hash when the bank was frozen,
5246        // and if we clean them here, any newly created snapshot's hash for this bank
5247        // may not match the frozen hash.
5248        //
5249        // So when we're snapshotting, the highest slot to clean is lowered by one.
5250        let highest_slot_to_clean = self.slot().saturating_sub(1);
5251
5252        self.rc.accounts.accounts_db.clean_accounts(
5253            Some(highest_slot_to_clean),
5254            false,
5255            self.epoch_schedule(),
5256        );
5257    }
5258
5259    pub fn print_accounts_stats(&self) {
5260        self.rc.accounts.accounts_db.print_accounts_stats("");
5261    }
5262
5263    pub fn shrink_candidate_slots(&self) -> usize {
5264        self.rc
5265            .accounts
5266            .accounts_db
5267            .shrink_candidate_slots(self.epoch_schedule())
5268    }
5269
5270    pub(crate) fn shrink_ancient_slots(&self) {
5271        self.rc
5272            .accounts
5273            .accounts_db
5274            .shrink_ancient_slots(self.epoch_schedule())
5275    }
5276
5277    pub fn read_cost_tracker(&self) -> LockResult<RwLockReadGuard<CostTracker>> {
5278        self.cost_tracker.read()
5279    }
5280
5281    pub fn write_cost_tracker(&self) -> LockResult<RwLockWriteGuard<CostTracker>> {
5282        self.cost_tracker.write()
5283    }
5284
5285    // Check if the wallclock time from bank creation to now has exceeded the allotted
5286    // time for transaction processing
5287    pub fn should_bank_still_be_processing_txs(
5288        bank_creation_time: &Instant,
5289        max_tx_ingestion_nanos: u128,
5290    ) -> bool {
5291        // Do this check outside of the PoH lock, hence not a method on PohRecorder
5292        bank_creation_time.elapsed().as_nanos() <= max_tx_ingestion_nanos
5293    }
5294
5295    pub fn deactivate_feature(&mut self, id: &Pubkey) {
5296        let mut feature_set = Arc::make_mut(&mut self.feature_set).clone();
5297        feature_set.active_mut().remove(id);
5298        feature_set.inactive_mut().insert(*id);
5299        self.feature_set = Arc::new(feature_set);
5300    }
5301
5302    pub fn activate_feature(&mut self, id: &Pubkey) {
5303        let mut feature_set = Arc::make_mut(&mut self.feature_set).clone();
5304        feature_set.inactive_mut().remove(id);
5305        feature_set.active_mut().insert(*id, 0);
5306        self.feature_set = Arc::new(feature_set);
5307    }
5308
5309    pub fn fill_bank_with_ticks_for_tests(&self) {
5310        self.do_fill_bank_with_ticks_for_tests(&BankWithScheduler::no_scheduler_available())
5311    }
5312
5313    pub(crate) fn do_fill_bank_with_ticks_for_tests(&self, scheduler: &InstalledSchedulerRwLock) {
5314        if self.tick_height.load(Relaxed) < self.max_tick_height {
5315            let last_blockhash = self.last_blockhash();
5316            while self.last_blockhash() == last_blockhash {
5317                self.register_tick(&Hash::new_unique(), scheduler)
5318            }
5319        } else {
5320            warn!("Bank already reached max tick height, cannot fill it with more ticks");
5321        }
5322    }
5323
5324    /// Get a set of all actively reserved account keys that are not allowed to
5325    /// be write-locked during transaction processing.
5326    pub fn get_reserved_account_keys(&self) -> &HashSet<Pubkey> {
5327        &self.reserved_account_keys.active
5328    }
5329
5330    // This is called from snapshot restore AND for each epoch boundary
5331    // The entire code path herein must be idempotent
5332    fn apply_feature_activations(
5333        &mut self,
5334        caller: ApplyFeatureActivationsCaller,
5335        debug_do_not_add_builtins: bool,
5336    ) {
5337        use ApplyFeatureActivationsCaller as Caller;
5338        let allow_new_activations = match caller {
5339            Caller::FinishInit => false,
5340            Caller::NewFromParent => true,
5341            Caller::WarpFromParent => false,
5342        };
5343        let (feature_set, new_feature_activations) =
5344            self.compute_active_feature_set(allow_new_activations);
5345        self.feature_set = Arc::new(feature_set);
5346
5347        // Update activation slot of features in `new_feature_activations`
5348        for feature_id in new_feature_activations.iter() {
5349            if let Some(mut account) = self.get_account_with_fixed_root(feature_id) {
5350                if let Some(mut feature) = feature::state::from_account(&account) {
5351                    feature.activated_at = Some(self.slot());
5352                    if feature::state::to_account(&feature, &mut account).is_some() {
5353                        self.store_account(feature_id, &account);
5354                    }
5355                    info!("Feature {} activated at slot {}", feature_id, self.slot());
5356                }
5357            }
5358        }
5359
5360        // Update active set of reserved account keys which are not allowed to be write locked
5361        self.reserved_account_keys = {
5362            let mut reserved_keys = ReservedAccountKeys::clone(&self.reserved_account_keys);
5363            reserved_keys.update_active_set(&self.feature_set);
5364            Arc::new(reserved_keys)
5365        };
5366
5367        if new_feature_activations.contains(&feature_set::pico_inflation::id()) {
5368            *self.inflation.write().unwrap() = Inflation::pico();
5369            self.fee_rate_governor.burn_percent = solana_fee_calculator::DEFAULT_BURN_PERCENT; // 50% fee burn
5370            self.rent_collector.rent.burn_percent = 50; // 50% rent burn
5371        }
5372
5373        if !new_feature_activations.is_disjoint(&self.feature_set.full_inflation_features_enabled())
5374        {
5375            *self.inflation.write().unwrap() = Inflation::full();
5376            self.fee_rate_governor.burn_percent = solana_fee_calculator::DEFAULT_BURN_PERCENT; // 50% fee burn
5377            self.rent_collector.rent.burn_percent = 50; // 50% rent burn
5378        }
5379
5380        if !debug_do_not_add_builtins {
5381            self.apply_builtin_program_feature_transitions(
5382                allow_new_activations,
5383                &new_feature_activations,
5384            );
5385        }
5386
5387        if new_feature_activations.contains(&feature_set::raise_block_limits_to_100m::id()) {
5388            let block_cost_limit = simd_0286_block_limits();
5389            let mut cost_tracker = self.write_cost_tracker().unwrap();
5390            let account_cost_limit = cost_tracker.get_account_limit();
5391            let vote_cost_limit = cost_tracker.get_vote_limit();
5392            cost_tracker.set_limits(account_cost_limit, block_cost_limit, vote_cost_limit);
5393            drop(cost_tracker);
5394
5395            if self
5396                .feature_set
5397                .is_active(&feature_set::raise_account_cu_limit::id())
5398            {
5399                self.apply_simd_0306_cost_tracker_changes();
5400            }
5401        }
5402
5403        if new_feature_activations.contains(&feature_set::raise_account_cu_limit::id()) {
5404            self.apply_simd_0306_cost_tracker_changes();
5405        }
5406    }
5407
5408    fn adjust_sysvar_balance_for_rent(&self, account: &mut AccountSharedData) {
5409        account.set_lamports(
5410            self.get_minimum_balance_for_rent_exemption(account.data().len())
5411                .max(account.lamports()),
5412        );
5413    }
5414
5415    /// Compute the active feature set based on the current bank state,
5416    /// and return it together with the set of newly activated features.
5417    fn compute_active_feature_set(&self, include_pending: bool) -> (FeatureSet, AHashSet<Pubkey>) {
5418        let mut active = self.feature_set.active().clone();
5419        let mut inactive = AHashSet::new();
5420        let mut pending = AHashSet::new();
5421        let slot = self.slot();
5422
5423        for feature_id in self.feature_set.inactive() {
5424            let mut activated = None;
5425            if let Some(account) = self.get_account_with_fixed_root(feature_id) {
5426                if let Some(feature) = feature::state::from_account(&account) {
5427                    match feature.activated_at {
5428                        None if include_pending => {
5429                            // Feature activation is pending
5430                            pending.insert(*feature_id);
5431                            activated = Some(slot);
5432                        }
5433                        Some(activation_slot) if slot >= activation_slot => {
5434                            // Feature has been activated already
5435                            activated = Some(activation_slot);
5436                        }
5437                        _ => {}
5438                    }
5439                }
5440            }
5441            if let Some(slot) = activated {
5442                active.insert(*feature_id, slot);
5443            } else {
5444                inactive.insert(*feature_id);
5445            }
5446        }
5447
5448        (FeatureSet::new(active, inactive), pending)
5449    }
5450
5451    fn apply_builtin_program_feature_transitions(
5452        &mut self,
5453        only_apply_transitions_for_new_features: bool,
5454        new_feature_activations: &AHashSet<Pubkey>,
5455    ) {
5456        for builtin in BUILTINS.iter() {
5457            // The `builtin_is_bpf` flag is used to handle the case where a
5458            // builtin is scheduled to be enabled by one feature gate and
5459            // later migrated to Core BPF by another.
5460            //
5461            // There should never be a case where a builtin is set to be
5462            // migrated to Core BPF and is also set to be enabled on feature
5463            // activation on the same feature gate. However, the
5464            // `builtin_is_bpf` flag will handle this case as well, electing
5465            // to first attempt the migration to Core BPF.
5466            //
5467            // The migration to Core BPF will fail gracefully because the
5468            // program account will not exist. The builtin will subsequently
5469            // be enabled, but it will never be migrated to Core BPF.
5470            //
5471            // Using the same feature gate for both enabling and migrating a
5472            // builtin to Core BPF should be strictly avoided.
5473            let mut builtin_is_bpf = false;
5474            if let Some(core_bpf_migration_config) = &builtin.core_bpf_migration_config {
5475                // If the builtin is set to be migrated to Core BPF on feature
5476                // activation, perform the migration and do not add the program
5477                // to the bank's builtins. The migration will remove it from
5478                // the builtins list and the cache.
5479                if new_feature_activations.contains(&core_bpf_migration_config.feature_id) {
5480                    if let Err(e) = self
5481                        .migrate_builtin_to_core_bpf(&builtin.program_id, core_bpf_migration_config)
5482                    {
5483                        warn!(
5484                            "Failed to migrate builtin {} to Core BPF: {}",
5485                            builtin.name, e
5486                        );
5487                    } else {
5488                        builtin_is_bpf = true;
5489                    }
5490                } else {
5491                    // If the builtin has already been migrated to Core BPF, do not
5492                    // add it to the bank's builtins.
5493                    builtin_is_bpf = self
5494                        .get_account(&builtin.program_id)
5495                        .map(|a| a.owner() == &bpf_loader_upgradeable::id())
5496                        .unwrap_or(false);
5497                }
5498            };
5499
5500            if let Some(feature_id) = builtin.enable_feature_id {
5501                let should_enable_builtin_on_feature_transition = !builtin_is_bpf
5502                    && if only_apply_transitions_for_new_features {
5503                        new_feature_activations.contains(&feature_id)
5504                    } else {
5505                        self.feature_set.is_active(&feature_id)
5506                    };
5507
5508                if should_enable_builtin_on_feature_transition {
5509                    self.transaction_processor.add_builtin(
5510                        self,
5511                        builtin.program_id,
5512                        builtin.name,
5513                        ProgramCacheEntry::new_builtin(
5514                            self.feature_set.activated_slot(&feature_id).unwrap_or(0),
5515                            builtin.name.len(),
5516                            builtin.entrypoint,
5517                        ),
5518                    );
5519                }
5520            }
5521        }
5522
5523        // Migrate any necessary stateless builtins to core BPF.
5524        // Stateless builtins do not have an `enable_feature_id` since they
5525        // do not exist on-chain.
5526        for stateless_builtin in STATELESS_BUILTINS.iter() {
5527            if let Some(core_bpf_migration_config) = &stateless_builtin.core_bpf_migration_config {
5528                if new_feature_activations.contains(&core_bpf_migration_config.feature_id) {
5529                    if let Err(e) = self.migrate_builtin_to_core_bpf(
5530                        &stateless_builtin.program_id,
5531                        core_bpf_migration_config,
5532                    ) {
5533                        warn!(
5534                            "Failed to migrate stateless builtin {} to Core BPF: {}",
5535                            stateless_builtin.name, e
5536                        );
5537                    }
5538                }
5539            }
5540        }
5541
5542        for precompile in get_precompiles() {
5543            let should_add_precompile = precompile
5544                .feature
5545                .as_ref()
5546                .map(|feature_id| self.feature_set.is_active(feature_id))
5547                .unwrap_or(false);
5548            if should_add_precompile {
5549                self.add_precompile(&precompile.program_id);
5550            }
5551        }
5552    }
5553
5554    /// Use to replace programs by feature activation
5555    #[allow(dead_code)]
5556    fn replace_program_account(
5557        &mut self,
5558        old_address: &Pubkey,
5559        new_address: &Pubkey,
5560        datapoint_name: &'static str,
5561    ) {
5562        if let Some(old_account) = self.get_account_with_fixed_root(old_address) {
5563            if let Some(new_account) = self.get_account_with_fixed_root(new_address) {
5564                datapoint_info!(datapoint_name, ("slot", self.slot, i64));
5565
5566                // Burn lamports in the old account
5567                self.capitalization
5568                    .fetch_sub(old_account.lamports(), Relaxed);
5569
5570                // Transfer new account to old account
5571                self.store_account(old_address, &new_account);
5572
5573                // Clear new account
5574                self.store_account(new_address, &AccountSharedData::default());
5575
5576                // Unload a program from the bank's cache
5577                self.transaction_processor
5578                    .global_program_cache
5579                    .write()
5580                    .unwrap()
5581                    .remove_programs([*old_address].into_iter());
5582
5583                self.calculate_and_update_accounts_data_size_delta_off_chain(
5584                    old_account.data().len(),
5585                    new_account.data().len(),
5586                );
5587            }
5588        }
5589    }
5590
5591    /// Calculates the accounts data size of all accounts
5592    ///
5593    /// Panics if total overflows a u64.
5594    ///
5595    /// Note, this may be *very* expensive, as *all* accounts are collected
5596    /// into a Vec before summing each account's data size.
5597    ///
5598    /// Only intended to be called by tests or when the number of accounts is small.
5599    pub fn calculate_accounts_data_size(&self) -> ScanResult<u64> {
5600        let accounts = self.get_all_accounts(false)?;
5601        let accounts_data_size = accounts
5602            .into_iter()
5603            .map(|(_pubkey, account, _slot)| account.data().len() as u64)
5604            .try_fold(0, u64::checked_add)
5605            .expect("accounts data size cannot overflow");
5606        Ok(accounts_data_size)
5607    }
5608
5609    pub fn is_in_slot_hashes_history(&self, slot: &Slot) -> bool {
5610        if slot < &self.slot {
5611            if let Ok(slot_hashes) = self.transaction_processor.sysvar_cache().get_slot_hashes() {
5612                return slot_hashes.get(slot).is_some();
5613            }
5614        }
5615        false
5616    }
5617
5618    pub fn check_program_modification_slot(&self) -> bool {
5619        self.check_program_modification_slot
5620    }
5621
5622    pub fn set_check_program_modification_slot(&mut self, check: bool) {
5623        self.check_program_modification_slot = check;
5624    }
5625
5626    pub fn fee_structure(&self) -> &FeeStructure {
5627        &self.fee_structure
5628    }
5629
5630    pub fn block_id(&self) -> Option<Hash> {
5631        *self.block_id.read().unwrap()
5632    }
5633
5634    pub fn set_block_id(&self, block_id: Option<Hash>) {
5635        *self.block_id.write().unwrap() = block_id;
5636    }
5637
5638    pub fn compute_budget(&self) -> Option<ComputeBudget> {
5639        self.compute_budget
5640    }
5641
5642    pub fn add_builtin(&self, program_id: Pubkey, name: &str, builtin: ProgramCacheEntry) {
5643        self.transaction_processor
5644            .add_builtin(self, program_id, name, builtin)
5645    }
5646
5647    pub fn get_bank_hash_stats(&self) -> BankHashStats {
5648        self.bank_hash_stats.load()
5649    }
5650
5651    pub fn clear_epoch_rewards_cache(&self) {
5652        self.epoch_rewards_calculation_cache.lock().unwrap().clear();
5653    }
5654
5655    /// Sets the accounts lt hash, only to be used by SnapshotMinimizer
5656    pub fn set_accounts_lt_hash_for_snapshot_minimizer(&self, accounts_lt_hash: AccountsLtHash) {
5657        *self.accounts_lt_hash.lock().unwrap() = accounts_lt_hash;
5658    }
5659
5660    /// Return total transaction fee collected
5661    pub fn get_collector_fee_details(&self) -> CollectorFeeDetails {
5662        self.collector_fee_details.read().unwrap().clone()
5663    }
5664}
5665
5666impl InvokeContextCallback for Bank {
5667    fn get_epoch_stake(&self) -> u64 {
5668        self.get_current_epoch_total_stake()
5669    }
5670
5671    fn get_epoch_stake_for_vote_account(&self, vote_address: &Pubkey) -> u64 {
5672        self.get_current_epoch_vote_accounts()
5673            .get(vote_address)
5674            .map(|(stake, _)| (*stake))
5675            .unwrap_or(0)
5676    }
5677
5678    fn is_precompile(&self, program_id: &Pubkey) -> bool {
5679        is_precompile(program_id, |feature_id: &Pubkey| {
5680            self.feature_set.is_active(feature_id)
5681        })
5682    }
5683
5684    fn process_precompile(
5685        &self,
5686        program_id: &Pubkey,
5687        data: &[u8],
5688        instruction_datas: Vec<&[u8]>,
5689    ) -> std::result::Result<(), PrecompileError> {
5690        if let Some(precompile) = get_precompile(program_id, |feature_id: &Pubkey| {
5691            self.feature_set.is_active(feature_id)
5692        }) {
5693            precompile.verify(data, &instruction_datas, &self.feature_set)
5694        } else {
5695            Err(PrecompileError::InvalidPublicKey)
5696        }
5697    }
5698}
5699
5700impl TransactionProcessingCallback for Bank {
5701    fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option<(AccountSharedData, Slot)> {
5702        self.rc
5703            .accounts
5704            .accounts_db
5705            .load_with_fixed_root(&self.ancestors, pubkey)
5706    }
5707
5708    // NOTE: must hold idempotent for the same set of arguments
5709    /// Add a builtin program account
5710    fn add_builtin_account(&self, name: &str, program_id: &Pubkey) {
5711        let existing_genuine_program =
5712            self.get_account_with_fixed_root(program_id)
5713                .and_then(|account| {
5714                    // it's very unlikely to be squatted at program_id as non-system account because of burden to
5715                    // find victim's pubkey/hash. So, when account.owner is indeed native_loader's, it's
5716                    // safe to assume it's a genuine program.
5717                    if native_loader::check_id(account.owner()) {
5718                        Some(account)
5719                    } else {
5720                        // malicious account is pre-occupying at program_id
5721                        self.burn_and_purge_account(program_id, account);
5722                        None
5723                    }
5724                });
5725
5726        // introducing builtin program
5727        if existing_genuine_program.is_some() {
5728            // The existing account is sufficient
5729            return;
5730        }
5731
5732        assert!(
5733            !self.freeze_started(),
5734            "Can't change frozen bank by adding not-existing new builtin program ({name}, \
5735             {program_id}). Maybe, inconsistent program activation is detected on snapshot \
5736             restore?"
5737        );
5738
5739        // Add a bogus executable builtin account, which will be loaded and ignored.
5740        let (lamports, rent_epoch) =
5741            self.inherit_specially_retained_account_fields(&existing_genuine_program);
5742        let account: AccountSharedData = AccountSharedData::from(Account {
5743            lamports,
5744            data: name.as_bytes().to_vec(),
5745            owner: solana_sdk_ids::native_loader::id(),
5746            executable: true,
5747            rent_epoch,
5748        });
5749        self.store_account_and_update_capitalization(program_id, &account);
5750    }
5751
5752    fn inspect_account(&self, address: &Pubkey, account_state: AccountState, is_writable: bool) {
5753        self.inspect_account_for_accounts_lt_hash(address, &account_state, is_writable);
5754    }
5755}
5756
5757impl fmt::Debug for Bank {
5758    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
5759        f.debug_struct("Bank")
5760            .field("slot", &self.slot)
5761            .field("bank_id", &self.bank_id)
5762            .field("block_height", &self.block_height)
5763            .field("parent_slot", &self.parent_slot)
5764            .field("capitalization", &self.capitalization())
5765            .finish_non_exhaustive()
5766    }
5767}
5768
5769#[cfg(feature = "dev-context-only-utils")]
5770impl Bank {
5771    pub fn wrap_with_bank_forks_for_tests(self) -> (Arc<Self>, Arc<RwLock<BankForks>>) {
5772        let bank_forks = BankForks::new_rw_arc(self);
5773        let bank = bank_forks.read().unwrap().root_bank();
5774        (bank, bank_forks)
5775    }
5776
5777    pub fn default_for_tests() -> Self {
5778        let accounts_db = AccountsDb::default_for_tests();
5779        let accounts = Accounts::new(Arc::new(accounts_db));
5780        Self::default_with_accounts(accounts)
5781    }
5782
5783    pub fn new_with_bank_forks_for_tests(
5784        genesis_config: &GenesisConfig,
5785    ) -> (Arc<Self>, Arc<RwLock<BankForks>>) {
5786        let bank = Self::new_for_tests(genesis_config);
5787        bank.wrap_with_bank_forks_for_tests()
5788    }
5789
5790    pub fn new_for_tests(genesis_config: &GenesisConfig) -> Self {
5791        Self::new_with_config_for_tests(genesis_config, BankTestConfig::default())
5792    }
5793
5794    pub fn new_with_mockup_builtin_for_tests(
5795        genesis_config: &GenesisConfig,
5796        program_id: Pubkey,
5797        builtin_function: BuiltinFunctionWithContext,
5798    ) -> (Arc<Self>, Arc<RwLock<BankForks>>) {
5799        let mut bank = Self::new_for_tests(genesis_config);
5800        bank.add_mockup_builtin(program_id, builtin_function);
5801        bank.wrap_with_bank_forks_for_tests()
5802    }
5803
5804    pub fn new_no_wallclock_throttle_for_tests(
5805        genesis_config: &GenesisConfig,
5806    ) -> (Arc<Self>, Arc<RwLock<BankForks>>) {
5807        let mut bank = Self::new_for_tests(genesis_config);
5808
5809        bank.ns_per_slot = u128::MAX;
5810        bank.wrap_with_bank_forks_for_tests()
5811    }
5812
5813    pub fn new_with_config_for_tests(
5814        genesis_config: &GenesisConfig,
5815        test_config: BankTestConfig,
5816    ) -> Self {
5817        Self::new_with_paths_for_tests(
5818            genesis_config,
5819            Arc::new(RuntimeConfig::default()),
5820            test_config,
5821            Vec::new(),
5822        )
5823    }
5824
5825    pub fn new_with_paths_for_tests(
5826        genesis_config: &GenesisConfig,
5827        runtime_config: Arc<RuntimeConfig>,
5828        test_config: BankTestConfig,
5829        paths: Vec<PathBuf>,
5830    ) -> Self {
5831        Self::new_with_paths(
5832            genesis_config,
5833            runtime_config,
5834            paths,
5835            None,
5836            None,
5837            false,
5838            Some(test_config.accounts_db_config),
5839            None,
5840            Some(Pubkey::new_unique()),
5841            Arc::default(),
5842            None,
5843            None,
5844        )
5845    }
5846
5847    pub fn new_for_benches(genesis_config: &GenesisConfig) -> Self {
5848        Self::new_with_paths_for_benches(genesis_config, Vec::new())
5849    }
5850
5851    /// Intended for use by benches only.
5852    /// create new bank with the given config and paths.
5853    pub fn new_with_paths_for_benches(genesis_config: &GenesisConfig, paths: Vec<PathBuf>) -> Self {
5854        Self::new_with_paths(
5855            genesis_config,
5856            Arc::<RuntimeConfig>::default(),
5857            paths,
5858            None,
5859            None,
5860            false,
5861            Some(ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS),
5862            None,
5863            Some(Pubkey::new_unique()),
5864            Arc::default(),
5865            None,
5866            None,
5867        )
5868    }
5869
5870    /// Prepare a transaction batch from a list of legacy transactions. Used for tests only.
5871    #[cfg(feature = "dev-context-only-utils")]
5872    pub fn prepare_batch_for_tests(
5873        &self,
5874        txs: Vec<Transaction>,
5875    ) -> TransactionBatch<RuntimeTransaction<SanitizedTransaction>> {
5876        let sanitized_txs = txs
5877            .into_iter()
5878            .map(RuntimeTransaction::from_transaction_for_tests)
5879            .collect::<Vec<_>>();
5880        TransactionBatch::new(
5881            self.try_lock_accounts(&sanitized_txs),
5882            self,
5883            OwnedOrBorrowed::Owned(sanitized_txs),
5884        )
5885    }
5886
5887    /// Set the initial accounts data size
5888    /// NOTE: This fn is *ONLY FOR TESTS*
5889    pub fn set_accounts_data_size_initial_for_tests(&mut self, amount: u64) {
5890        self.accounts_data_size_initial = amount;
5891    }
5892
5893    /// Update the accounts data size off-chain delta
5894    /// NOTE: This fn is *ONLY FOR TESTS*
5895    pub fn update_accounts_data_size_delta_off_chain_for_tests(&self, amount: i64) {
5896        self.update_accounts_data_size_delta_off_chain(amount)
5897    }
5898
5899    /// Process multiple transaction in a single batch. This is used for benches and unit tests.
5900    ///
5901    /// # Panics
5902    ///
5903    /// Panics if any of the transactions do not pass sanitization checks.
5904    #[must_use]
5905    pub fn process_transactions<'a>(
5906        &self,
5907        txs: impl Iterator<Item = &'a Transaction>,
5908    ) -> Vec<Result<()>> {
5909        self.try_process_transactions(txs).unwrap()
5910    }
5911
5912    /// Process entry transactions in a single batch. This is used for benches and unit tests.
5913    ///
5914    /// # Panics
5915    ///
5916    /// Panics if any of the transactions do not pass sanitization checks.
5917    #[must_use]
5918    pub fn process_entry_transactions(&self, txs: Vec<VersionedTransaction>) -> Vec<Result<()>> {
5919        self.try_process_entry_transactions(txs).unwrap()
5920    }
5921
5922    #[cfg(test)]
5923    pub fn flush_accounts_cache_slot_for_tests(&self) {
5924        self.rc
5925            .accounts
5926            .accounts_db
5927            .flush_accounts_cache_slot_for_tests(self.slot())
5928    }
5929
5930    /// This is only valid to call from tests.
5931    /// block until initial accounts hash verification has completed
5932    pub fn wait_for_initial_accounts_hash_verification_completed_for_tests(&self) {
5933        self.rc
5934            .accounts
5935            .accounts_db
5936            .verify_accounts_hash_in_bg
5937            .join_background_thread()
5938    }
5939
5940    pub fn get_sysvar_cache_for_tests(&self) -> SysvarCache {
5941        self.transaction_processor.get_sysvar_cache_for_tests()
5942    }
5943
5944    pub fn calculate_accounts_lt_hash_for_tests(&self) -> AccountsLtHash {
5945        self.rc
5946            .accounts
5947            .accounts_db
5948            .calculate_accounts_lt_hash_at_startup_from_index(&self.ancestors, self.slot)
5949    }
5950
5951    pub fn new_program_cache_for_tx_batch_for_slot(&self, slot: Slot) -> ProgramCacheForTxBatch {
5952        ProgramCacheForTxBatch::new_from_cache(
5953            slot,
5954            self.epoch_schedule.get_epoch(slot),
5955            &self
5956                .transaction_processor
5957                .global_program_cache
5958                .read()
5959                .unwrap(),
5960        )
5961    }
5962
5963    pub fn get_transaction_processor(&self) -> &TransactionBatchProcessor<BankForks> {
5964        &self.transaction_processor
5965    }
5966
5967    pub fn set_fee_structure(&mut self, fee_structure: &FeeStructure) {
5968        self.fee_structure = fee_structure.clone();
5969    }
5970
5971    pub fn load_program(
5972        &self,
5973        pubkey: &Pubkey,
5974        reload: bool,
5975        effective_epoch: Epoch,
5976    ) -> Option<Arc<ProgramCacheEntry>> {
5977        let environments = self
5978            .transaction_processor
5979            .get_environments_for_epoch(effective_epoch)?;
5980        load_program_with_pubkey(
5981            self,
5982            &environments,
5983            pubkey,
5984            self.slot(),
5985            &mut ExecuteTimings::default(), // Called by ledger-tool, metrics not accumulated.
5986            reload,
5987        )
5988    }
5989
5990    pub fn withdraw(&self, pubkey: &Pubkey, lamports: u64) -> Result<()> {
5991        match self.get_account_with_fixed_root(pubkey) {
5992            Some(mut account) => {
5993                let min_balance = match get_system_account_kind(&account) {
5994                    Some(SystemAccountKind::Nonce) => self
5995                        .rent_collector
5996                        .rent
5997                        .minimum_balance(nonce::state::State::size()),
5998                    _ => 0,
5999                };
6000
6001                lamports
6002                    .checked_add(min_balance)
6003                    .filter(|required_balance| *required_balance <= account.lamports())
6004                    .ok_or(TransactionError::InsufficientFundsForFee)?;
6005                account
6006                    .checked_sub_lamports(lamports)
6007                    .map_err(|_| TransactionError::InsufficientFundsForFee)?;
6008                self.store_account(pubkey, &account);
6009
6010                Ok(())
6011            }
6012            None => Err(TransactionError::AccountNotFound),
6013        }
6014    }
6015
6016    pub fn set_hash_overrides(&self, hash_overrides: HashOverrides) {
6017        *self.hash_overrides.lock().unwrap() = hash_overrides;
6018    }
6019
6020    /// Get stake and stake node accounts
6021    pub(crate) fn get_stake_accounts(&self, minimized_account_set: &DashSet<Pubkey>) {
6022        self.stakes_cache
6023            .stakes()
6024            .stake_delegations()
6025            .iter()
6026            .for_each(|(pubkey, _)| {
6027                minimized_account_set.insert(*pubkey);
6028            });
6029
6030        self.stakes_cache
6031            .stakes()
6032            .staked_nodes()
6033            .par_iter()
6034            .for_each(|(pubkey, _)| {
6035                minimized_account_set.insert(*pubkey);
6036            });
6037    }
6038}
6039
6040/// Compute how much an account has changed size.  This function is useful when the data size delta
6041/// needs to be computed and passed to an `update_accounts_data_size_delta` function.
6042fn calculate_data_size_delta(old_data_size: usize, new_data_size: usize) -> i64 {
6043    assert!(old_data_size <= i64::MAX as usize);
6044    assert!(new_data_size <= i64::MAX as usize);
6045    let old_data_size = old_data_size as i64;
6046    let new_data_size = new_data_size as i64;
6047
6048    new_data_size.saturating_sub(old_data_size)
6049}
6050
6051/// Since `apply_feature_activations()` has different behavior depending on its caller, enumerate
6052/// those callers explicitly.
6053#[derive(Debug, Copy, Clone, Eq, PartialEq)]
6054enum ApplyFeatureActivationsCaller {
6055    FinishInit,
6056    NewFromParent,
6057    WarpFromParent,
6058}
6059
6060impl Drop for Bank {
6061    fn drop(&mut self) {
6062        if let Some(drop_callback) = self.drop_callback.read().unwrap().0.as_ref() {
6063            drop_callback.callback(self);
6064        } else {
6065            // Default case for tests
6066            self.rc
6067                .accounts
6068                .accounts_db
6069                .purge_slot(self.slot(), self.bank_id(), false);
6070        }
6071    }
6072}
6073
6074/// utility function used for testing and benchmarking.
6075pub mod test_utils {
6076    use {
6077        super::Bank,
6078        crate::installed_scheduler_pool::BankWithScheduler,
6079        solana_account::{ReadableAccount, WritableAccount},
6080        solana_instruction::error::LamportsError,
6081        solana_pubkey::Pubkey,
6082        solana_sha256_hasher::hashv,
6083        solana_vote_program::vote_state::{self, BlockTimestamp, VoteStateVersions},
6084        std::sync::Arc,
6085    };
6086    pub fn goto_end_of_slot(bank: Arc<Bank>) {
6087        goto_end_of_slot_with_scheduler(&BankWithScheduler::new_without_scheduler(bank))
6088    }
6089
6090    pub fn goto_end_of_slot_with_scheduler(bank: &BankWithScheduler) {
6091        let mut tick_hash = bank.last_blockhash();
6092        loop {
6093            tick_hash = hashv(&[tick_hash.as_ref(), &[42]]);
6094            bank.register_tick(&tick_hash);
6095            if tick_hash == bank.last_blockhash() {
6096                bank.freeze();
6097                return;
6098            }
6099        }
6100    }
6101
6102    pub fn update_vote_account_timestamp(
6103        timestamp: BlockTimestamp,
6104        bank: &Bank,
6105        vote_pubkey: &Pubkey,
6106    ) {
6107        let mut vote_account = bank.get_account(vote_pubkey).unwrap_or_default();
6108        let mut vote_state = vote_state::from(&vote_account).unwrap_or_default();
6109        vote_state.last_timestamp = timestamp;
6110        let versioned = VoteStateVersions::new_v3(vote_state);
6111        vote_state::to(&versioned, &mut vote_account).unwrap();
6112        bank.store_account(vote_pubkey, &vote_account);
6113    }
6114
6115    pub fn deposit(
6116        bank: &Bank,
6117        pubkey: &Pubkey,
6118        lamports: u64,
6119    ) -> std::result::Result<u64, LamportsError> {
6120        // This doesn't collect rents intentionally.
6121        // Rents should only be applied to actual TXes
6122        let mut account = bank
6123            .get_account_with_fixed_root_no_cache(pubkey)
6124            .unwrap_or_default();
6125        account.checked_add_lamports(lamports)?;
6126        bank.store_account(pubkey, &account);
6127        Ok(account.lamports())
6128    }
6129}