clone_solana_runtime/
bank.rs

1//! The `bank` module tracks client accounts and the progress of on-chain
2//! programs.
3//!
4//! A single bank relates to a block produced by a single leader and each bank
5//! except for the genesis bank points back to a parent bank.
6//!
7//! The bank is the main entrypoint for processing verified transactions with the function
8//! `Bank::process_transactions`
9//!
10//! It does this by loading the accounts using the reference it holds on the account store,
11//! and then passing those to an InvokeContext which handles loading the programs specified
12//! by the Transaction and executing it.
13//!
14//! The bank then stores the results to the accounts store.
15//!
16//! It then has APIs for retrieving if a transaction has been processed and it's status.
17//! See `get_signature_status` et al.
18//!
19//! Bank lifecycle:
20//!
21//! A bank is newly created and open to transactions. Transactions are applied
22//! until either the bank reached the tick count when the node is the leader for that slot, or the
23//! node has applied all transactions present in all `Entry`s in the slot.
24//!
25//! Once it is complete, the bank can then be frozen. After frozen, no more transactions can
26//! be applied or state changes made. At the frozen step, rent will be applied and various
27//! sysvar special accounts update to the new state of the system.
28//!
29//! After frozen, and the bank has had the appropriate number of votes on it, then it can become
30//! rooted. At this point, it will not be able to be removed from the chain and the
31//! state is finalized.
32//!
33//! It offers a high-level API that signs transactions
34//! on behalf of the caller, and a low-level API for when they have
35//! already been signed and verified.
36use {
37    crate::{
38        account_saver::collect_accounts_to_store,
39        bank::{
40            metrics::*,
41            partitioned_epoch_rewards::{EpochRewardStatus, StakeRewards, VoteRewardsAccounts},
42        },
43        bank_forks::BankForks,
44        epoch_stakes::{split_epoch_stakes, EpochStakes, NodeVoteAccounts, VersionedEpochStakes},
45        installed_scheduler_pool::{BankWithScheduler, InstalledSchedulerRwLock},
46        rent_collector::RentCollectorWithMetrics,
47        runtime_config::RuntimeConfig,
48        serde_snapshot::BankIncrementalSnapshotPersistence,
49        snapshot_hash::SnapshotHash,
50        stake_account::StakeAccount,
51        stake_weighted_timestamp::{
52            calculate_stake_weighted_timestamp, MaxAllowableDrift,
53            MAX_ALLOWABLE_DRIFT_PERCENTAGE_FAST, MAX_ALLOWABLE_DRIFT_PERCENTAGE_SLOW_V2,
54        },
55        stakes::{Stakes, StakesCache, StakesEnum},
56        status_cache::{SlotDelta, StatusCache},
57        transaction_batch::{OwnedOrBorrowed, TransactionBatch},
58        verify_precompiles::verify_precompiles,
59    },
60    accounts_lt_hash::{CacheValue as AccountsLtHashCacheValue, Stats as AccountsLtHashStats},
61    ahash::AHashSet,
62    clone_agave_feature_set::{self as feature_set, reward_full_priority_fee, FeatureSet},
63    clone_agave_precompiles::get_precompiles,
64    clone_agave_reserved_account_keys::ReservedAccountKeys,
65    clone_solana_accounts_db::{
66        account_locks::validate_account_locks,
67        accounts::{AccountAddressFilter, Accounts, PubkeyAccountSlot},
68        accounts_db::{
69            AccountStorageEntry, AccountsDb, AccountsDbConfig, CalcAccountsHashDataSource,
70            DuplicatesLtHash, OldStoragesPolicy, PubkeyHashAccount,
71            VerifyAccountsHashAndLamportsConfig,
72        },
73        accounts_hash::{
74            AccountHash, AccountsHash, AccountsLtHash, CalcAccountsHashConfig, HashStats,
75            IncrementalAccountsHash, MerkleOrLatticeAccountsHash,
76        },
77        accounts_index::{IndexKey, ScanConfig, ScanResult},
78        accounts_partition::{self, Partition, PartitionIndex},
79        accounts_update_notifier_interface::AccountsUpdateNotifier,
80        ancestors::{Ancestors, AncestorsForSerialization},
81        blockhash_queue::BlockhashQueue,
82        epoch_accounts_hash::EpochAccountsHash,
83        sorted_storages::SortedStorages,
84        storable_accounts::StorableAccounts,
85    },
86    clone_solana_bpf_loader_program::syscalls::{
87        create_program_runtime_environment_v1, create_program_runtime_environment_v2,
88    },
89    clone_solana_builtins::{prototype::BuiltinPrototype, BUILTINS, STATELESS_BUILTINS},
90    clone_solana_compute_budget::compute_budget::ComputeBudget,
91    clone_solana_compute_budget_instruction::instructions_processor::process_compute_budget_instructions,
92    clone_solana_cost_model::{
93        block_cost_limits::{simd_0207_block_limits, simd_0256_block_limits},
94        cost_tracker::CostTracker,
95    },
96    clone_solana_fee::FeeFeatures,
97    clone_solana_lattice_hash::lt_hash::LtHash,
98    clone_solana_measure::{meas_dur, measure::Measure, measure_time, measure_us},
99    clone_solana_program_runtime::{
100        invoke_context::BuiltinFunctionWithContext, loaded_programs::ProgramCacheEntry,
101    },
102    clone_solana_runtime_transaction::{
103        runtime_transaction::RuntimeTransaction, transaction_with_meta::TransactionWithMeta,
104    },
105    clone_solana_sdk::{
106        account::{
107            create_account_shared_data_with_fields as create_account, from_account, Account,
108            AccountSharedData, InheritableAccountFields, ReadableAccount, WritableAccount,
109        },
110        bpf_loader_upgradeable,
111        clock::{
112            BankId, Epoch, Slot, SlotCount, SlotIndex, UnixTimestamp, DEFAULT_HASHES_PER_TICK,
113            DEFAULT_TICKS_PER_SECOND, INITIAL_RENT_EPOCH, MAX_PROCESSING_AGE,
114            MAX_TRANSACTION_FORWARDING_DELAY, SECONDS_PER_DAY, UPDATED_HASHES_PER_TICK2,
115            UPDATED_HASHES_PER_TICK3, UPDATED_HASHES_PER_TICK4, UPDATED_HASHES_PER_TICK5,
116            UPDATED_HASHES_PER_TICK6,
117        },
118        epoch_info::EpochInfo,
119        epoch_schedule::EpochSchedule,
120        feature,
121        fee::{FeeBudgetLimits, FeeDetails, FeeStructure},
122        fee_calculator::FeeRateGovernor,
123        genesis_config::{ClusterType, GenesisConfig},
124        hard_forks::HardForks,
125        hash::{extend_and_hash, hashv, Hash},
126        incinerator,
127        inflation::Inflation,
128        inner_instruction::InnerInstructions,
129        message::{AccountKeys, SanitizedMessage},
130        native_loader,
131        native_token::LAMPORTS_PER_SOL,
132        packet::PACKET_DATA_SIZE,
133        pubkey::Pubkey,
134        rent_collector::{CollectedInfo, RentCollector},
135        rent_debits::RentDebits,
136        reward_info::RewardInfo,
137        signature::{Keypair, Signature},
138        slot_hashes::SlotHashes,
139        slot_history::{Check, SlotHistory},
140        stake::state::Delegation,
141        system_transaction,
142        sysvar::{self, last_restart_slot::LastRestartSlot, Sysvar, SysvarId},
143        timing::years_as_slots,
144        transaction::{
145            MessageHash, Result, SanitizedTransaction, Transaction, TransactionError,
146            TransactionVerificationMode, VersionedTransaction, MAX_TX_ACCOUNT_LOCKS,
147        },
148    },
149    clone_solana_stake_program::points::InflationPointCalculationEvent,
150    clone_solana_svm::{
151        account_loader::{collect_rent_from_account, LoadedTransaction},
152        account_overrides::AccountOverrides,
153        transaction_commit_result::{CommittedTransaction, TransactionCommitResult},
154        transaction_error_metrics::TransactionErrorMetrics,
155        transaction_execution_result::{
156            TransactionExecutionDetails, TransactionLoadedAccountsStats,
157        },
158        transaction_processing_callback::{AccountState, TransactionProcessingCallback},
159        transaction_processing_result::{
160            ProcessedTransaction, TransactionProcessingResult,
161            TransactionProcessingResultExtensions,
162        },
163        transaction_processor::{
164            ExecutionRecordingConfig, TransactionBatchProcessor, TransactionLogMessages,
165            TransactionProcessingConfig, TransactionProcessingEnvironment,
166        },
167    },
168    clone_solana_svm_transaction::svm_message::SVMMessage,
169    clone_solana_timings::{ExecuteTimingType, ExecuteTimings},
170    clone_solana_transaction_context::{TransactionAccount, TransactionReturnData},
171    clone_solana_vote::vote_account::{VoteAccount, VoteAccountsHashMap},
172    dashmap::{DashMap, DashSet},
173    log::*,
174    rayon::{
175        iter::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator},
176        ThreadPoolBuilder,
177    },
178    serde::Serialize,
179    std::{
180        collections::{HashMap, HashSet},
181        convert::TryFrom,
182        fmt,
183        ops::{AddAssign, RangeFull, RangeInclusive},
184        path::PathBuf,
185        slice,
186        sync::{
187            atomic::{
188                AtomicBool, AtomicI64, AtomicU64, AtomicUsize,
189                Ordering::{AcqRel, Acquire, Relaxed},
190            },
191            Arc, LockResult, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard, Weak,
192        },
193        thread::Builder,
194        time::{Duration, Instant},
195    },
196};
197#[cfg(feature = "dev-context-only-utils")]
198use {
199    clone_solana_accounts_db::accounts_db::{
200        ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, ACCOUNTS_DB_CONFIG_FOR_TESTING,
201    },
202    clone_solana_nonce_account::{get_system_account_kind, SystemAccountKind},
203    clone_solana_program_runtime::{
204        loaded_programs::ProgramCacheForTxBatch, sysvar_cache::SysvarCache,
205    },
206    clone_solana_sdk::nonce,
207    clone_solana_svm::program_loader::load_program_with_pubkey,
208};
209pub use {
210    clone_solana_sdk::reward_type::RewardType,
211    partitioned_epoch_rewards::KeyedRewardsAndNumPartitions,
212};
213
214/// params to `verify_accounts_hash`
215struct VerifyAccountsHashConfig {
216    test_hash_calculation: bool,
217    ignore_mismatch: bool,
218    require_rooted_bank: bool,
219    run_in_background: bool,
220    store_hash_raw_data_for_debug: bool,
221}
222
223mod accounts_lt_hash;
224mod address_lookup_table;
225pub mod bank_hash_details;
226mod builtin_programs;
227pub mod builtins;
228mod check_transactions;
229pub mod epoch_accounts_hash_utils;
230mod fee_distribution;
231mod metrics;
232pub(crate) mod partitioned_epoch_rewards;
233mod recent_blockhashes_account;
234mod serde_snapshot;
235mod sysvar_cache;
236pub(crate) mod tests;
237
238pub const SECONDS_PER_YEAR: f64 = 365.25 * 24.0 * 60.0 * 60.0;
239
240pub const MAX_LEADER_SCHEDULE_STAKES: Epoch = 5;
241
242#[derive(Default)]
243struct RentMetrics {
244    hold_range_us: AtomicU64,
245    load_us: AtomicU64,
246    collect_us: AtomicU64,
247    hash_us: AtomicU64,
248    store_us: AtomicU64,
249    count: AtomicUsize,
250}
251
252pub type BankStatusCache = StatusCache<Result<()>>;
253#[cfg_attr(
254    feature = "frozen-abi",
255    frozen_abi(digest = "4e7a7AAsQrM5Lp5bhREdVZ5QGZfyETbBthhWjYMYb6zS")
256)]
257pub type BankSlotDelta = SlotDelta<Result<()>>;
258
259#[derive(Default, Copy, Clone, Debug, PartialEq, Eq)]
260pub struct SquashTiming {
261    pub squash_accounts_ms: u64,
262    pub squash_accounts_cache_ms: u64,
263    pub squash_accounts_index_ms: u64,
264    pub squash_accounts_store_ms: u64,
265
266    pub squash_cache_ms: u64,
267}
268
269impl AddAssign for SquashTiming {
270    fn add_assign(&mut self, rhs: Self) {
271        self.squash_accounts_ms += rhs.squash_accounts_ms;
272        self.squash_accounts_cache_ms += rhs.squash_accounts_cache_ms;
273        self.squash_accounts_index_ms += rhs.squash_accounts_index_ms;
274        self.squash_accounts_store_ms += rhs.squash_accounts_store_ms;
275        self.squash_cache_ms += rhs.squash_cache_ms;
276    }
277}
278
279#[derive(Debug, Default, PartialEq)]
280pub(crate) struct CollectorFeeDetails {
281    transaction_fee: u64,
282    priority_fee: u64,
283}
284
285impl CollectorFeeDetails {
286    pub(crate) fn accumulate(&mut self, fee_details: &FeeDetails) {
287        self.transaction_fee = self
288            .transaction_fee
289            .saturating_add(fee_details.transaction_fee());
290        self.priority_fee = self
291            .priority_fee
292            .saturating_add(fee_details.prioritization_fee());
293    }
294
295    pub(crate) fn total(&self) -> u64 {
296        self.transaction_fee.saturating_add(self.priority_fee)
297    }
298}
299
300impl From<FeeDetails> for CollectorFeeDetails {
301    fn from(fee_details: FeeDetails) -> Self {
302        CollectorFeeDetails {
303            transaction_fee: fee_details.transaction_fee(),
304            priority_fee: fee_details.prioritization_fee(),
305        }
306    }
307}
308
309#[derive(Debug)]
310pub struct BankRc {
311    /// where all the Accounts are stored
312    pub accounts: Arc<Accounts>,
313
314    /// Previous checkpoint of this bank
315    pub(crate) parent: RwLock<Option<Arc<Bank>>>,
316
317    pub(crate) bank_id_generator: Arc<AtomicU64>,
318}
319
320impl BankRc {
321    pub(crate) fn new(accounts: Accounts) -> Self {
322        Self {
323            accounts: Arc::new(accounts),
324            parent: RwLock::new(None),
325            bank_id_generator: Arc::new(AtomicU64::new(0)),
326        }
327    }
328}
329
330pub struct LoadAndExecuteTransactionsOutput {
331    // Vector of results indicating whether a transaction was processed or could not
332    // be processed. Note processed transactions can still have failed!
333    pub processing_results: Vec<TransactionProcessingResult>,
334    // Processed transaction counts used to update bank transaction counts and
335    // for metrics reporting.
336    pub processed_counts: ProcessedTransactionCounts,
337}
338
339#[derive(Debug, PartialEq)]
340pub struct TransactionSimulationResult {
341    pub result: Result<()>,
342    pub logs: TransactionLogMessages,
343    pub post_simulation_accounts: Vec<TransactionAccount>,
344    pub units_consumed: u64,
345    pub return_data: Option<TransactionReturnData>,
346    pub inner_instructions: Option<Vec<InnerInstructions>>,
347}
348
349#[derive(Clone, Debug)]
350pub struct TransactionBalancesSet {
351    pub pre_balances: TransactionBalances,
352    pub post_balances: TransactionBalances,
353}
354
355impl TransactionBalancesSet {
356    pub fn new(pre_balances: TransactionBalances, post_balances: TransactionBalances) -> Self {
357        assert_eq!(pre_balances.len(), post_balances.len());
358        Self {
359            pre_balances,
360            post_balances,
361        }
362    }
363}
364pub type TransactionBalances = Vec<Vec<u64>>;
365
366pub type PreCommitResult<'a> = Result<Option<RwLockReadGuard<'a, Hash>>>;
367
368#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
369pub enum TransactionLogCollectorFilter {
370    All,
371    AllWithVotes,
372    None,
373    OnlyMentionedAddresses,
374}
375
376impl Default for TransactionLogCollectorFilter {
377    fn default() -> Self {
378        Self::None
379    }
380}
381
382#[derive(Debug, Default)]
383pub struct TransactionLogCollectorConfig {
384    pub mentioned_addresses: HashSet<Pubkey>,
385    pub filter: TransactionLogCollectorFilter,
386}
387
388#[derive(Clone, Debug, PartialEq, Eq)]
389pub struct TransactionLogInfo {
390    pub signature: Signature,
391    pub result: Result<()>,
392    pub is_vote: bool,
393    pub log_messages: TransactionLogMessages,
394}
395
396#[derive(Default, Debug)]
397pub struct TransactionLogCollector {
398    // All the logs collected for from this Bank.  Exact contents depend on the
399    // active `TransactionLogCollectorFilter`
400    pub logs: Vec<TransactionLogInfo>,
401
402    // For each `mentioned_addresses`, maintain a list of indices into `logs` to easily
403    // locate the logs from transactions that included the mentioned addresses.
404    pub mentioned_address_map: HashMap<Pubkey, Vec<usize>>,
405}
406
407impl TransactionLogCollector {
408    pub fn get_logs_for_address(
409        &self,
410        address: Option<&Pubkey>,
411    ) -> Option<Vec<TransactionLogInfo>> {
412        match address {
413            None => Some(self.logs.clone()),
414            Some(address) => self.mentioned_address_map.get(address).map(|log_indices| {
415                log_indices
416                    .iter()
417                    .filter_map(|i| self.logs.get(*i).cloned())
418                    .collect()
419            }),
420        }
421    }
422}
423
424/// Bank's common fields shared by all supported snapshot versions for deserialization.
425/// Sync fields with BankFieldsToSerialize! This is paired with it.
426/// All members are made public to remain Bank's members private and to make versioned deserializer workable on this
427/// Note that some fields are missing from the serializer struct. This is because of fields added later.
428/// Since it is difficult to insert fields to serialize/deserialize against existing code already deployed,
429/// new fields can be optionally serialized and optionally deserialized. At some point, the serialization and
430/// deserialization will use a new mechanism or otherwise be in sync more clearly.
431#[derive(Clone, Debug, Default)]
432#[cfg_attr(feature = "dev-context-only-utils", derive(PartialEq))]
433pub struct BankFieldsToDeserialize {
434    pub(crate) blockhash_queue: BlockhashQueue,
435    pub(crate) ancestors: AncestorsForSerialization,
436    pub(crate) hash: Hash,
437    pub(crate) parent_hash: Hash,
438    pub(crate) parent_slot: Slot,
439    pub(crate) hard_forks: HardForks,
440    pub(crate) transaction_count: u64,
441    pub(crate) tick_height: u64,
442    pub(crate) signature_count: u64,
443    pub(crate) capitalization: u64,
444    pub(crate) max_tick_height: u64,
445    pub(crate) hashes_per_tick: Option<u64>,
446    pub(crate) ticks_per_slot: u64,
447    pub(crate) ns_per_slot: u128,
448    pub(crate) genesis_creation_time: UnixTimestamp,
449    pub(crate) slots_per_year: f64,
450    pub(crate) slot: Slot,
451    pub(crate) epoch: Epoch,
452    pub(crate) block_height: u64,
453    pub(crate) collector_id: Pubkey,
454    pub(crate) collector_fees: u64,
455    pub(crate) fee_rate_governor: FeeRateGovernor,
456    pub(crate) collected_rent: u64,
457    pub(crate) rent_collector: RentCollector,
458    pub(crate) epoch_schedule: EpochSchedule,
459    pub(crate) inflation: Inflation,
460    pub(crate) stakes: Stakes<Delegation>,
461    pub(crate) epoch_stakes: HashMap<Epoch, EpochStakes>,
462    pub(crate) is_delta: bool,
463    pub(crate) accounts_data_len: u64,
464    pub(crate) incremental_snapshot_persistence: Option<BankIncrementalSnapshotPersistence>,
465    pub(crate) epoch_accounts_hash: Option<Hash>,
466    // When removing the accounts lt hash featurization code, also remove this Option wrapper
467    pub(crate) accounts_lt_hash: Option<AccountsLtHash>,
468    pub(crate) bank_hash_stats: BankHashStats,
469}
470
471/// Bank's common fields shared by all supported snapshot versions for serialization.
472/// This was separated from BankFieldsToDeserialize to avoid cloning by using refs.
473/// So, sync fields with BankFieldsToDeserialize!
474/// all members are made public to keep Bank private and to make versioned serializer workable on this.
475/// Note that some fields are missing from the serializer struct. This is because of fields added later.
476/// Since it is difficult to insert fields to serialize/deserialize against existing code already deployed,
477/// new fields can be optionally serialized and optionally deserialized. At some point, the serialization and
478/// deserialization will use a new mechanism or otherwise be in sync more clearly.
479#[derive(Debug)]
480pub struct BankFieldsToSerialize {
481    pub blockhash_queue: BlockhashQueue,
482    pub ancestors: AncestorsForSerialization,
483    pub hash: Hash,
484    pub parent_hash: Hash,
485    pub parent_slot: Slot,
486    pub hard_forks: HardForks,
487    pub transaction_count: u64,
488    pub tick_height: u64,
489    pub signature_count: u64,
490    pub capitalization: u64,
491    pub max_tick_height: u64,
492    pub hashes_per_tick: Option<u64>,
493    pub ticks_per_slot: u64,
494    pub ns_per_slot: u128,
495    pub genesis_creation_time: UnixTimestamp,
496    pub slots_per_year: f64,
497    pub slot: Slot,
498    pub epoch: Epoch,
499    pub block_height: u64,
500    pub collector_id: Pubkey,
501    pub collector_fees: u64,
502    pub fee_rate_governor: FeeRateGovernor,
503    pub collected_rent: u64,
504    pub rent_collector: RentCollector,
505    pub epoch_schedule: EpochSchedule,
506    pub inflation: Inflation,
507    pub stakes: StakesEnum,
508    pub epoch_stakes: HashMap<Epoch, EpochStakes>,
509    pub is_delta: bool,
510    pub accounts_data_len: u64,
511    pub versioned_epoch_stakes: HashMap<u64, VersionedEpochStakes>,
512    // When removing the accounts lt hash featurization code, also remove this Option wrapper
513    pub accounts_lt_hash: Option<AccountsLtHash>,
514}
515
516// Can't derive PartialEq because RwLock doesn't implement PartialEq
517#[cfg(feature = "dev-context-only-utils")]
518impl PartialEq for Bank {
519    fn eq(&self, other: &Self) -> bool {
520        if std::ptr::eq(self, other) {
521            return true;
522        }
523        // Suppress rustfmt until https://github.com/rust-lang/rustfmt/issues/5920 is fixed ...
524        #[rustfmt::skip]
525        let Self {
526            skipped_rewrites: _,
527            rc: _,
528            status_cache: _,
529            blockhash_queue,
530            ancestors,
531            hash,
532            parent_hash,
533            parent_slot,
534            hard_forks,
535            transaction_count,
536            non_vote_transaction_count_since_restart: _,
537            transaction_error_count: _,
538            transaction_entries_count: _,
539            transactions_per_entry_max: _,
540            tick_height,
541            signature_count,
542            capitalization,
543            max_tick_height,
544            hashes_per_tick,
545            ticks_per_slot,
546            ns_per_slot,
547            genesis_creation_time,
548            slots_per_year,
549            slot,
550            bank_id: _,
551            epoch,
552            block_height,
553            collector_id,
554            collector_fees,
555            fee_rate_governor,
556            collected_rent,
557            rent_collector,
558            epoch_schedule,
559            inflation,
560            stakes_cache,
561            epoch_stakes,
562            is_delta,
563            #[cfg(feature = "dev-context-only-utils")]
564            hash_overrides,
565            accounts_lt_hash,
566            // TODO: Confirm if all these fields are intentionally ignored!
567            rewards: _,
568            cluster_type: _,
569            lazy_rent_collection: _,
570            rewards_pool_pubkeys: _,
571            transaction_debug_keys: _,
572            transaction_log_collector_config: _,
573            transaction_log_collector: _,
574            feature_set: _,
575            reserved_account_keys: _,
576            drop_callback: _,
577            freeze_started: _,
578            vote_only_bank: _,
579            cost_tracker: _,
580            accounts_data_size_initial: _,
581            accounts_data_size_delta_on_chain: _,
582            accounts_data_size_delta_off_chain: _,
583            epoch_reward_status: _,
584            transaction_processor: _,
585            check_program_modification_slot: _,
586            collector_fee_details: _,
587            compute_budget: _,
588            transaction_account_lock_limit: _,
589            fee_structure: _,
590            cache_for_accounts_lt_hash: _,
591            stats_for_accounts_lt_hash: _,
592            block_id,
593            bank_hash_stats: _,
594            // Ignore new fields explicitly if they do not impact PartialEq.
595            // Adding ".." will remove compile-time checks that if a new field
596            // is added to the struct, this PartialEq is accordingly updated.
597        } = self;
598        *blockhash_queue.read().unwrap() == *other.blockhash_queue.read().unwrap()
599            && ancestors == &other.ancestors
600            && *hash.read().unwrap() == *other.hash.read().unwrap()
601            && parent_hash == &other.parent_hash
602            && parent_slot == &other.parent_slot
603            && *hard_forks.read().unwrap() == *other.hard_forks.read().unwrap()
604            && transaction_count.load(Relaxed) == other.transaction_count.load(Relaxed)
605            && tick_height.load(Relaxed) == other.tick_height.load(Relaxed)
606            && signature_count.load(Relaxed) == other.signature_count.load(Relaxed)
607            && capitalization.load(Relaxed) == other.capitalization.load(Relaxed)
608            && max_tick_height == &other.max_tick_height
609            && hashes_per_tick == &other.hashes_per_tick
610            && ticks_per_slot == &other.ticks_per_slot
611            && ns_per_slot == &other.ns_per_slot
612            && genesis_creation_time == &other.genesis_creation_time
613            && slots_per_year == &other.slots_per_year
614            && slot == &other.slot
615            && epoch == &other.epoch
616            && block_height == &other.block_height
617            && collector_id == &other.collector_id
618            && collector_fees.load(Relaxed) == other.collector_fees.load(Relaxed)
619            && fee_rate_governor == &other.fee_rate_governor
620            && collected_rent.load(Relaxed) == other.collected_rent.load(Relaxed)
621            && rent_collector == &other.rent_collector
622            && epoch_schedule == &other.epoch_schedule
623            && *inflation.read().unwrap() == *other.inflation.read().unwrap()
624            && *stakes_cache.stakes() == *other.stakes_cache.stakes()
625            && epoch_stakes == &other.epoch_stakes
626            && is_delta.load(Relaxed) == other.is_delta.load(Relaxed)
627            // No deadlock is possbile, when Arc::ptr_eq() returns false, because of being
628            // different Mutexes.
629            && (Arc::ptr_eq(hash_overrides, &other.hash_overrides) ||
630                *hash_overrides.lock().unwrap() == *other.hash_overrides.lock().unwrap())
631            && !(self.is_accounts_lt_hash_enabled() && other.is_accounts_lt_hash_enabled()
632                && *accounts_lt_hash.lock().unwrap() != *other.accounts_lt_hash.lock().unwrap())
633            && *block_id.read().unwrap() == *other.block_id.read().unwrap()
634    }
635}
636
637#[cfg(feature = "dev-context-only-utils")]
638impl BankFieldsToSerialize {
639    /// Create a new BankFieldsToSerialize where basically every field is defaulted.
640    /// Only use for tests; many of the fields are invalid!
641    pub fn default_for_tests() -> Self {
642        Self {
643            blockhash_queue: BlockhashQueue::default(),
644            ancestors: AncestorsForSerialization::default(),
645            hash: Hash::default(),
646            parent_hash: Hash::default(),
647            parent_slot: Slot::default(),
648            hard_forks: HardForks::default(),
649            transaction_count: u64::default(),
650            tick_height: u64::default(),
651            signature_count: u64::default(),
652            capitalization: u64::default(),
653            max_tick_height: u64::default(),
654            hashes_per_tick: Option::default(),
655            ticks_per_slot: u64::default(),
656            ns_per_slot: u128::default(),
657            genesis_creation_time: UnixTimestamp::default(),
658            slots_per_year: f64::default(),
659            slot: Slot::default(),
660            epoch: Epoch::default(),
661            block_height: u64::default(),
662            collector_id: Pubkey::default(),
663            collector_fees: u64::default(),
664            fee_rate_governor: FeeRateGovernor::default(),
665            collected_rent: u64::default(),
666            rent_collector: RentCollector::default(),
667            epoch_schedule: EpochSchedule::default(),
668            inflation: Inflation::default(),
669            stakes: Stakes::<Delegation>::default().into(),
670            epoch_stakes: HashMap::default(),
671            is_delta: bool::default(),
672            accounts_data_len: u64::default(),
673            versioned_epoch_stakes: HashMap::default(),
674            accounts_lt_hash: Some(AccountsLtHash(LtHash([0x7E57; LtHash::NUM_ELEMENTS]))),
675        }
676    }
677}
678
679#[derive(Debug)]
680pub enum RewardCalculationEvent<'a, 'b> {
681    Staking(&'a Pubkey, &'b InflationPointCalculationEvent),
682}
683
684/// type alias is not supported for trait in rust yet. As a workaround, we define the
685/// `RewardCalcTracer` trait explicitly and implement it on any type that implement
686/// `Fn(&RewardCalculationEvent) + Send + Sync`.
687pub trait RewardCalcTracer: Fn(&RewardCalculationEvent) + Send + Sync {}
688
689impl<T: Fn(&RewardCalculationEvent) + Send + Sync> RewardCalcTracer for T {}
690
691fn null_tracer() -> Option<impl RewardCalcTracer> {
692    None::<fn(&RewardCalculationEvent)>
693}
694
695pub trait DropCallback: fmt::Debug {
696    fn callback(&self, b: &Bank);
697    fn clone_box(&self) -> Box<dyn DropCallback + Send + Sync>;
698}
699
700#[derive(Debug, Default)]
701pub struct OptionalDropCallback(Option<Box<dyn DropCallback + Send + Sync>>);
702
703#[derive(Default, Debug, Clone, PartialEq)]
704#[cfg(feature = "dev-context-only-utils")]
705pub struct HashOverrides {
706    hashes: HashMap<Slot, HashOverride>,
707}
708
709#[cfg(feature = "dev-context-only-utils")]
710impl HashOverrides {
711    fn get_hash_override(&self, slot: Slot) -> Option<&HashOverride> {
712        self.hashes.get(&slot)
713    }
714
715    fn get_blockhash_override(&self, slot: Slot) -> Option<&Hash> {
716        self.get_hash_override(slot)
717            .map(|hash_override| &hash_override.blockhash)
718    }
719
720    fn get_bank_hash_override(&self, slot: Slot) -> Option<&Hash> {
721        self.get_hash_override(slot)
722            .map(|hash_override| &hash_override.bank_hash)
723    }
724
725    pub fn add_override(&mut self, slot: Slot, blockhash: Hash, bank_hash: Hash) {
726        let is_new = self
727            .hashes
728            .insert(
729                slot,
730                HashOverride {
731                    blockhash,
732                    bank_hash,
733                },
734            )
735            .is_none();
736        assert!(is_new);
737    }
738}
739
740#[derive(Debug, Clone, PartialEq)]
741#[cfg(feature = "dev-context-only-utils")]
742struct HashOverride {
743    blockhash: Hash,
744    bank_hash: Hash,
745}
746
747/// Manager for the state of all accounts and programs after processing its entries.
748#[derive(Debug)]
749pub struct Bank {
750    /// References to accounts, parent and signature status
751    pub rc: BankRc,
752
753    /// A cache of signature statuses
754    pub status_cache: Arc<RwLock<BankStatusCache>>,
755
756    /// FIFO queue of `recent_blockhash` items
757    blockhash_queue: RwLock<BlockhashQueue>,
758
759    /// The set of parents including this bank
760    pub ancestors: Ancestors,
761
762    /// Hash of this Bank's state. Only meaningful after freezing.
763    hash: RwLock<Hash>,
764
765    /// Hash of this Bank's parent's state
766    parent_hash: Hash,
767
768    /// parent's slot
769    parent_slot: Slot,
770
771    /// slots to hard fork at
772    hard_forks: Arc<RwLock<HardForks>>,
773
774    /// The number of committed transactions since genesis.
775    transaction_count: AtomicU64,
776
777    /// The number of non-vote transactions committed since the most
778    /// recent boot from snapshot or genesis. This value is only stored in
779    /// blockstore for the RPC method "getPerformanceSamples". It is not
780    /// retained within snapshots, but is preserved in `Bank::new_from_parent`.
781    non_vote_transaction_count_since_restart: AtomicU64,
782
783    /// The number of transaction errors in this slot
784    transaction_error_count: AtomicU64,
785
786    /// The number of transaction entries in this slot
787    transaction_entries_count: AtomicU64,
788
789    /// The max number of transaction in an entry in this slot
790    transactions_per_entry_max: AtomicU64,
791
792    /// Bank tick height
793    tick_height: AtomicU64,
794
795    /// The number of signatures from valid transactions in this slot
796    signature_count: AtomicU64,
797
798    /// Total capitalization, used to calculate inflation
799    capitalization: AtomicU64,
800
801    // Bank max_tick_height
802    max_tick_height: u64,
803
804    /// The number of hashes in each tick. None value means hashing is disabled.
805    hashes_per_tick: Option<u64>,
806
807    /// The number of ticks in each slot.
808    ticks_per_slot: u64,
809
810    /// length of a slot in ns
811    pub ns_per_slot: u128,
812
813    /// genesis time, used for computed clock
814    genesis_creation_time: UnixTimestamp,
815
816    /// The number of slots per year, used for inflation
817    slots_per_year: f64,
818
819    /// Bank slot (i.e. block)
820    slot: Slot,
821
822    bank_id: BankId,
823
824    /// Bank epoch
825    epoch: Epoch,
826
827    /// Bank block_height
828    block_height: u64,
829
830    /// The pubkey to send transactions fees to.
831    collector_id: Pubkey,
832
833    /// Fees that have been collected
834    collector_fees: AtomicU64,
835
836    /// Track cluster signature throughput and adjust fee rate
837    pub(crate) fee_rate_governor: FeeRateGovernor,
838
839    /// Rent that has been collected
840    collected_rent: AtomicU64,
841
842    /// latest rent collector, knows the epoch
843    rent_collector: RentCollector,
844
845    /// initialized from genesis
846    pub(crate) epoch_schedule: EpochSchedule,
847
848    /// inflation specs
849    inflation: Arc<RwLock<Inflation>>,
850
851    /// cache of vote_account and stake_account state for this fork
852    stakes_cache: StakesCache,
853
854    /// staked nodes on epoch boundaries, saved off when a bank.slot() is at
855    ///   a leader schedule calculation boundary
856    epoch_stakes: HashMap<Epoch, EpochStakes>,
857
858    /// A boolean reflecting whether any entries were recorded into the PoH
859    /// stream for the slot == self.slot
860    is_delta: AtomicBool,
861
862    /// Protocol-level rewards that were distributed by this bank
863    pub rewards: RwLock<Vec<(Pubkey, RewardInfo)>>,
864
865    pub cluster_type: Option<ClusterType>,
866
867    pub lazy_rent_collection: AtomicBool,
868
869    // this is temporary field only to remove rewards_pool entirely
870    pub rewards_pool_pubkeys: Arc<HashSet<Pubkey>>,
871
872    transaction_debug_keys: Option<Arc<HashSet<Pubkey>>>,
873
874    // Global configuration for how transaction logs should be collected across all banks
875    pub transaction_log_collector_config: Arc<RwLock<TransactionLogCollectorConfig>>,
876
877    // Logs from transactions that this Bank executed collected according to the criteria in
878    // `transaction_log_collector_config`
879    pub transaction_log_collector: Arc<RwLock<TransactionLogCollector>>,
880
881    pub feature_set: Arc<FeatureSet>,
882
883    /// Set of reserved account keys that cannot be write locked
884    reserved_account_keys: Arc<ReservedAccountKeys>,
885
886    /// callback function only to be called when dropping and should only be called once
887    pub drop_callback: RwLock<OptionalDropCallback>,
888
889    pub freeze_started: AtomicBool,
890
891    vote_only_bank: bool,
892
893    cost_tracker: RwLock<CostTracker>,
894
895    /// The initial accounts data size at the start of this Bank, before processing any transactions/etc
896    accounts_data_size_initial: u64,
897    /// The change to accounts data size in this Bank, due on-chain events (i.e. transactions)
898    accounts_data_size_delta_on_chain: AtomicI64,
899    /// The change to accounts data size in this Bank, due to off-chain events (i.e. rent collection)
900    accounts_data_size_delta_off_chain: AtomicI64,
901
902    /// until the skipped rewrites feature is activated, it is possible to skip rewrites and still include
903    /// the account hash of the accounts that would have been rewritten as bank hash expects.
904    skipped_rewrites: Mutex<HashMap<Pubkey, AccountHash>>,
905
906    epoch_reward_status: EpochRewardStatus,
907
908    transaction_processor: TransactionBatchProcessor<BankForks>,
909
910    check_program_modification_slot: bool,
911
912    /// Collected fee details
913    collector_fee_details: RwLock<CollectorFeeDetails>,
914
915    /// The compute budget to use for transaction execution.
916    compute_budget: Option<ComputeBudget>,
917
918    /// The max number of accounts that a transaction may lock.
919    transaction_account_lock_limit: Option<usize>,
920
921    /// Fee structure to use for assessing transaction fees.
922    fee_structure: FeeStructure,
923
924    /// blockhash and bank_hash overrides keyed by slot for simulated block production.
925    /// This _field_ was needed to be DCOU-ed to avoid 2 locks per bank freezing...
926    #[cfg(feature = "dev-context-only-utils")]
927    hash_overrides: Arc<Mutex<HashOverrides>>,
928
929    /// The lattice hash of all accounts
930    ///
931    /// The value is only meaningful after freezing.
932    accounts_lt_hash: Mutex<AccountsLtHash>,
933
934    /// A cache of *the initial state* of accounts modified in this slot
935    ///
936    /// The accounts lt hash needs both the initial and final state of each
937    /// account that was modified in this slot.  Cache the initial state here.
938    ///
939    /// Note: The initial state must be strictly from an ancestor,
940    /// and not an intermediate state within this slot.
941    cache_for_accounts_lt_hash: DashMap<Pubkey, AccountsLtHashCacheValue, ahash::RandomState>,
942
943    /// Stats related to the accounts lt hash
944    stats_for_accounts_lt_hash: AccountsLtHashStats,
945
946    /// The unique identifier for the corresponding block for this bank.
947    /// None for banks that have not yet completed replay or for leader banks as we cannot populate block_id
948    /// until bankless leader. Can be computed directly from shreds without needing to execute transactions.
949    block_id: RwLock<Option<Hash>>,
950
951    /// Accounts stats for computing the bank hash
952    bank_hash_stats: AtomicBankHashStats,
953}
954
955#[derive(Debug)]
956struct VoteReward {
957    vote_account: AccountSharedData,
958    commission: u8,
959    vote_rewards: u64,
960    vote_needs_store: bool,
961}
962
963type VoteRewards = DashMap<Pubkey, VoteReward>;
964
965#[derive(Debug, Default)]
966pub struct NewBankOptions {
967    pub vote_only_bank: bool,
968}
969
970#[cfg(feature = "dev-context-only-utils")]
971#[derive(Debug)]
972pub struct BankTestConfig {
973    pub accounts_db_config: AccountsDbConfig,
974}
975
976#[cfg(feature = "dev-context-only-utils")]
977impl Default for BankTestConfig {
978    fn default() -> Self {
979        Self {
980            accounts_db_config: ACCOUNTS_DB_CONFIG_FOR_TESTING,
981        }
982    }
983}
984
985#[derive(Debug)]
986struct PrevEpochInflationRewards {
987    validator_rewards: u64,
988    prev_epoch_duration_in_years: f64,
989    validator_rate: f64,
990    foundation_rate: f64,
991}
992
993#[derive(Debug, Default, PartialEq)]
994pub struct ProcessedTransactionCounts {
995    pub processed_transactions_count: u64,
996    pub processed_non_vote_transactions_count: u64,
997    pub processed_with_successful_result_count: u64,
998    pub signature_count: u64,
999}
1000
1001/// Account stats for computing the bank hash
1002/// This struct is serialized and stored in the snapshot.
1003#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
1004#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, Eq)]
1005pub struct BankHashStats {
1006    pub num_updated_accounts: u64,
1007    pub num_removed_accounts: u64,
1008    pub num_lamports_stored: u64,
1009    pub total_data_len: u64,
1010    pub num_executable_accounts: u64,
1011}
1012
1013impl BankHashStats {
1014    pub fn update<T: ReadableAccount>(&mut self, account: &T) {
1015        if account.lamports() == 0 {
1016            self.num_removed_accounts += 1;
1017        } else {
1018            self.num_updated_accounts += 1;
1019        }
1020        self.total_data_len = self
1021            .total_data_len
1022            .wrapping_add(account.data().len() as u64);
1023        if account.executable() {
1024            self.num_executable_accounts += 1;
1025        }
1026        self.num_lamports_stored = self.num_lamports_stored.wrapping_add(account.lamports());
1027    }
1028    pub fn accumulate(&mut self, other: &BankHashStats) {
1029        self.num_updated_accounts += other.num_updated_accounts;
1030        self.num_removed_accounts += other.num_removed_accounts;
1031        self.total_data_len = self.total_data_len.wrapping_add(other.total_data_len);
1032        self.num_lamports_stored = self
1033            .num_lamports_stored
1034            .wrapping_add(other.num_lamports_stored);
1035        self.num_executable_accounts += other.num_executable_accounts;
1036    }
1037}
1038
1039#[derive(Debug, Default)]
1040pub struct AtomicBankHashStats {
1041    pub num_updated_accounts: AtomicU64,
1042    pub num_removed_accounts: AtomicU64,
1043    pub num_lamports_stored: AtomicU64,
1044    pub total_data_len: AtomicU64,
1045    pub num_executable_accounts: AtomicU64,
1046}
1047
1048impl AtomicBankHashStats {
1049    pub fn new(stat: &BankHashStats) -> Self {
1050        AtomicBankHashStats {
1051            num_updated_accounts: AtomicU64::new(stat.num_updated_accounts),
1052            num_removed_accounts: AtomicU64::new(stat.num_removed_accounts),
1053            num_lamports_stored: AtomicU64::new(stat.num_lamports_stored),
1054            total_data_len: AtomicU64::new(stat.total_data_len),
1055            num_executable_accounts: AtomicU64::new(stat.num_executable_accounts),
1056        }
1057    }
1058
1059    pub fn accumulate(&self, other: &BankHashStats) {
1060        self.num_updated_accounts
1061            .fetch_add(other.num_updated_accounts, Relaxed);
1062        self.num_removed_accounts
1063            .fetch_add(other.num_removed_accounts, Relaxed);
1064        self.total_data_len.fetch_add(other.total_data_len, Relaxed);
1065        self.num_lamports_stored
1066            .fetch_add(other.num_lamports_stored, Relaxed);
1067        self.num_executable_accounts
1068            .fetch_add(other.num_executable_accounts, Relaxed);
1069    }
1070
1071    pub fn load(&self) -> BankHashStats {
1072        BankHashStats {
1073            num_updated_accounts: self.num_updated_accounts.load(Relaxed),
1074            num_removed_accounts: self.num_removed_accounts.load(Relaxed),
1075            num_lamports_stored: self.num_lamports_stored.load(Relaxed),
1076            total_data_len: self.total_data_len.load(Relaxed),
1077            num_executable_accounts: self.num_executable_accounts.load(Relaxed),
1078        }
1079    }
1080}
1081
1082impl Bank {
1083    fn default_with_accounts(accounts: Accounts) -> Self {
1084        let mut bank = Self {
1085            skipped_rewrites: Mutex::default(),
1086            rc: BankRc::new(accounts),
1087            status_cache: Arc::<RwLock<BankStatusCache>>::default(),
1088            blockhash_queue: RwLock::<BlockhashQueue>::default(),
1089            ancestors: Ancestors::default(),
1090            hash: RwLock::<Hash>::default(),
1091            parent_hash: Hash::default(),
1092            parent_slot: Slot::default(),
1093            hard_forks: Arc::<RwLock<HardForks>>::default(),
1094            transaction_count: AtomicU64::default(),
1095            non_vote_transaction_count_since_restart: AtomicU64::default(),
1096            transaction_error_count: AtomicU64::default(),
1097            transaction_entries_count: AtomicU64::default(),
1098            transactions_per_entry_max: AtomicU64::default(),
1099            tick_height: AtomicU64::default(),
1100            signature_count: AtomicU64::default(),
1101            capitalization: AtomicU64::default(),
1102            max_tick_height: u64::default(),
1103            hashes_per_tick: Option::<u64>::default(),
1104            ticks_per_slot: u64::default(),
1105            ns_per_slot: u128::default(),
1106            genesis_creation_time: UnixTimestamp::default(),
1107            slots_per_year: f64::default(),
1108            slot: Slot::default(),
1109            bank_id: BankId::default(),
1110            epoch: Epoch::default(),
1111            block_height: u64::default(),
1112            collector_id: Pubkey::default(),
1113            collector_fees: AtomicU64::default(),
1114            fee_rate_governor: FeeRateGovernor::default(),
1115            collected_rent: AtomicU64::default(),
1116            rent_collector: RentCollector::default(),
1117            epoch_schedule: EpochSchedule::default(),
1118            inflation: Arc::<RwLock<Inflation>>::default(),
1119            stakes_cache: StakesCache::default(),
1120            epoch_stakes: HashMap::<Epoch, EpochStakes>::default(),
1121            is_delta: AtomicBool::default(),
1122            rewards: RwLock::<Vec<(Pubkey, RewardInfo)>>::default(),
1123            cluster_type: Option::<ClusterType>::default(),
1124            lazy_rent_collection: AtomicBool::default(),
1125            rewards_pool_pubkeys: Arc::<HashSet<Pubkey>>::default(),
1126            transaction_debug_keys: Option::<Arc<HashSet<Pubkey>>>::default(),
1127            transaction_log_collector_config: Arc::<RwLock<TransactionLogCollectorConfig>>::default(
1128            ),
1129            transaction_log_collector: Arc::<RwLock<TransactionLogCollector>>::default(),
1130            feature_set: Arc::<FeatureSet>::default(),
1131            reserved_account_keys: Arc::<ReservedAccountKeys>::default(),
1132            drop_callback: RwLock::new(OptionalDropCallback(None)),
1133            freeze_started: AtomicBool::default(),
1134            vote_only_bank: false,
1135            cost_tracker: RwLock::<CostTracker>::default(),
1136            accounts_data_size_initial: 0,
1137            accounts_data_size_delta_on_chain: AtomicI64::new(0),
1138            accounts_data_size_delta_off_chain: AtomicI64::new(0),
1139            epoch_reward_status: EpochRewardStatus::default(),
1140            transaction_processor: TransactionBatchProcessor::default(),
1141            check_program_modification_slot: false,
1142            collector_fee_details: RwLock::new(CollectorFeeDetails::default()),
1143            compute_budget: None,
1144            transaction_account_lock_limit: None,
1145            fee_structure: FeeStructure::default(),
1146            #[cfg(feature = "dev-context-only-utils")]
1147            hash_overrides: Arc::new(Mutex::new(HashOverrides::default())),
1148            accounts_lt_hash: Mutex::new(AccountsLtHash(LtHash::identity())),
1149            cache_for_accounts_lt_hash: DashMap::default(),
1150            stats_for_accounts_lt_hash: AccountsLtHashStats::default(),
1151            block_id: RwLock::new(None),
1152            bank_hash_stats: AtomicBankHashStats::default(),
1153        };
1154
1155        bank.transaction_processor =
1156            TransactionBatchProcessor::new_uninitialized(bank.slot, bank.epoch);
1157
1158        let accounts_data_size_initial = bank.get_total_accounts_stats().unwrap().data_len as u64;
1159        bank.accounts_data_size_initial = accounts_data_size_initial;
1160
1161        bank
1162    }
1163
1164    #[allow(clippy::too_many_arguments)]
1165    pub fn new_with_paths(
1166        genesis_config: &GenesisConfig,
1167        runtime_config: Arc<RuntimeConfig>,
1168        paths: Vec<PathBuf>,
1169        debug_keys: Option<Arc<HashSet<Pubkey>>>,
1170        additional_builtins: Option<&[BuiltinPrototype]>,
1171        debug_do_not_add_builtins: bool,
1172        accounts_db_config: Option<AccountsDbConfig>,
1173        accounts_update_notifier: Option<AccountsUpdateNotifier>,
1174        #[allow(unused)] collector_id_for_tests: Option<Pubkey>,
1175        exit: Arc<AtomicBool>,
1176        #[allow(unused)] genesis_hash: Option<Hash>,
1177        #[allow(unused)] feature_set: Option<FeatureSet>,
1178    ) -> Self {
1179        let accounts_db =
1180            AccountsDb::new_with_config(paths, accounts_db_config, accounts_update_notifier, exit);
1181        let accounts = Accounts::new(Arc::new(accounts_db));
1182        let mut bank = Self::default_with_accounts(accounts);
1183        bank.ancestors = Ancestors::from(vec![bank.slot()]);
1184        bank.compute_budget = runtime_config.compute_budget;
1185        bank.transaction_account_lock_limit = runtime_config.transaction_account_lock_limit;
1186        bank.transaction_debug_keys = debug_keys;
1187        bank.cluster_type = Some(genesis_config.cluster_type);
1188
1189        #[cfg(feature = "dev-context-only-utils")]
1190        {
1191            bank.feature_set = Arc::new(feature_set.unwrap_or_default());
1192        }
1193
1194        #[cfg(not(feature = "dev-context-only-utils"))]
1195        bank.process_genesis_config(genesis_config);
1196        #[cfg(feature = "dev-context-only-utils")]
1197        bank.process_genesis_config(genesis_config, collector_id_for_tests, genesis_hash);
1198
1199        bank.finish_init(
1200            genesis_config,
1201            additional_builtins,
1202            debug_do_not_add_builtins,
1203        );
1204
1205        // genesis needs stakes for all epochs up to the epoch implied by
1206        //  slot = 0 and genesis configuration
1207        {
1208            let stakes = bank.stakes_cache.stakes().clone();
1209            let stakes = Arc::new(StakesEnum::from(stakes));
1210            for epoch in 0..=bank.get_leader_schedule_epoch(bank.slot) {
1211                bank.epoch_stakes
1212                    .insert(epoch, EpochStakes::new(stakes.clone(), epoch));
1213            }
1214            bank.update_stake_history(None);
1215        }
1216        bank.update_clock(None);
1217        bank.update_rent();
1218        bank.update_epoch_schedule();
1219        bank.update_recent_blockhashes();
1220        bank.update_last_restart_slot();
1221        bank.transaction_processor
1222            .fill_missing_sysvar_cache_entries(&bank);
1223        bank
1224    }
1225
1226    /// Create a new bank that points to an immutable checkpoint of another bank.
1227    pub fn new_from_parent(parent: Arc<Bank>, collector_id: &Pubkey, slot: Slot) -> Self {
1228        Self::_new_from_parent(
1229            parent,
1230            collector_id,
1231            slot,
1232            null_tracer(),
1233            NewBankOptions::default(),
1234        )
1235    }
1236
1237    pub fn new_from_parent_with_options(
1238        parent: Arc<Bank>,
1239        collector_id: &Pubkey,
1240        slot: Slot,
1241        new_bank_options: NewBankOptions,
1242    ) -> Self {
1243        Self::_new_from_parent(parent, collector_id, slot, null_tracer(), new_bank_options)
1244    }
1245
1246    pub fn new_from_parent_with_tracer(
1247        parent: Arc<Bank>,
1248        collector_id: &Pubkey,
1249        slot: Slot,
1250        reward_calc_tracer: impl RewardCalcTracer,
1251    ) -> Self {
1252        Self::_new_from_parent(
1253            parent,
1254            collector_id,
1255            slot,
1256            Some(reward_calc_tracer),
1257            NewBankOptions::default(),
1258        )
1259    }
1260
1261    fn get_rent_collector_from(rent_collector: &RentCollector, epoch: Epoch) -> RentCollector {
1262        rent_collector.clone_with_epoch(epoch)
1263    }
1264
1265    fn _new_from_parent(
1266        parent: Arc<Bank>,
1267        collector_id: &Pubkey,
1268        slot: Slot,
1269        reward_calc_tracer: Option<impl RewardCalcTracer>,
1270        new_bank_options: NewBankOptions,
1271    ) -> Self {
1272        let mut time = Measure::start("bank::new_from_parent");
1273        let NewBankOptions { vote_only_bank } = new_bank_options;
1274
1275        parent.freeze();
1276        assert_ne!(slot, parent.slot());
1277
1278        let epoch_schedule = parent.epoch_schedule().clone();
1279        let epoch = epoch_schedule.get_epoch(slot);
1280
1281        let (rc, bank_rc_creation_time_us) = measure_us!({
1282            let accounts_db = Arc::clone(&parent.rc.accounts.accounts_db);
1283            BankRc {
1284                accounts: Arc::new(Accounts::new(accounts_db)),
1285                parent: RwLock::new(Some(Arc::clone(&parent))),
1286                bank_id_generator: Arc::clone(&parent.rc.bank_id_generator),
1287            }
1288        });
1289
1290        let (status_cache, status_cache_time_us) = measure_us!(Arc::clone(&parent.status_cache));
1291
1292        let (fee_rate_governor, fee_components_time_us) = measure_us!(
1293            FeeRateGovernor::new_derived(&parent.fee_rate_governor, parent.signature_count())
1294        );
1295
1296        let bank_id = rc.bank_id_generator.fetch_add(1, Relaxed) + 1;
1297        let (blockhash_queue, blockhash_queue_time_us) =
1298            measure_us!(RwLock::new(parent.blockhash_queue.read().unwrap().clone()));
1299
1300        let (stakes_cache, stakes_cache_time_us) =
1301            measure_us!(StakesCache::new(parent.stakes_cache.stakes().clone()));
1302
1303        let (epoch_stakes, epoch_stakes_time_us) = measure_us!(parent.epoch_stakes.clone());
1304
1305        let (transaction_processor, builtin_program_ids_time_us) = measure_us!(
1306            TransactionBatchProcessor::new_from(&parent.transaction_processor, slot, epoch)
1307        );
1308
1309        let (rewards_pool_pubkeys, rewards_pool_pubkeys_time_us) =
1310            measure_us!(parent.rewards_pool_pubkeys.clone());
1311
1312        let (transaction_debug_keys, transaction_debug_keys_time_us) =
1313            measure_us!(parent.transaction_debug_keys.clone());
1314
1315        let (transaction_log_collector_config, transaction_log_collector_config_time_us) =
1316            measure_us!(parent.transaction_log_collector_config.clone());
1317
1318        let (feature_set, feature_set_time_us) = measure_us!(parent.feature_set.clone());
1319
1320        let accounts_data_size_initial = parent.load_accounts_data_size();
1321        let mut new = Self {
1322            skipped_rewrites: Mutex::default(),
1323            rc,
1324            status_cache,
1325            slot,
1326            bank_id,
1327            epoch,
1328            blockhash_queue,
1329
1330            // TODO: clean this up, so much special-case copying...
1331            hashes_per_tick: parent.hashes_per_tick,
1332            ticks_per_slot: parent.ticks_per_slot,
1333            ns_per_slot: parent.ns_per_slot,
1334            genesis_creation_time: parent.genesis_creation_time,
1335            slots_per_year: parent.slots_per_year,
1336            epoch_schedule,
1337            collected_rent: AtomicU64::new(0),
1338            rent_collector: Self::get_rent_collector_from(&parent.rent_collector, epoch),
1339            max_tick_height: slot
1340                .checked_add(1)
1341                .expect("max tick height addition overflowed")
1342                .checked_mul(parent.ticks_per_slot)
1343                .expect("max tick height multiplication overflowed"),
1344            block_height: parent
1345                .block_height
1346                .checked_add(1)
1347                .expect("block height addition overflowed"),
1348            fee_rate_governor,
1349            capitalization: AtomicU64::new(parent.capitalization()),
1350            vote_only_bank,
1351            inflation: parent.inflation.clone(),
1352            transaction_count: AtomicU64::new(parent.transaction_count()),
1353            non_vote_transaction_count_since_restart: AtomicU64::new(
1354                parent.non_vote_transaction_count_since_restart(),
1355            ),
1356            transaction_error_count: AtomicU64::new(0),
1357            transaction_entries_count: AtomicU64::new(0),
1358            transactions_per_entry_max: AtomicU64::new(0),
1359            // we will .clone_with_epoch() this soon after stake data update; so just .clone() for now
1360            stakes_cache,
1361            epoch_stakes,
1362            parent_hash: parent.hash(),
1363            parent_slot: parent.slot(),
1364            collector_id: *collector_id,
1365            collector_fees: AtomicU64::new(0),
1366            ancestors: Ancestors::default(),
1367            hash: RwLock::new(Hash::default()),
1368            is_delta: AtomicBool::new(false),
1369            tick_height: AtomicU64::new(parent.tick_height.load(Relaxed)),
1370            signature_count: AtomicU64::new(0),
1371            hard_forks: parent.hard_forks.clone(),
1372            rewards: RwLock::new(vec![]),
1373            cluster_type: parent.cluster_type,
1374            lazy_rent_collection: AtomicBool::new(parent.lazy_rent_collection.load(Relaxed)),
1375            rewards_pool_pubkeys,
1376            transaction_debug_keys,
1377            transaction_log_collector_config,
1378            transaction_log_collector: Arc::new(RwLock::new(TransactionLogCollector::default())),
1379            feature_set: Arc::clone(&feature_set),
1380            reserved_account_keys: parent.reserved_account_keys.clone(),
1381            drop_callback: RwLock::new(OptionalDropCallback(
1382                parent
1383                    .drop_callback
1384                    .read()
1385                    .unwrap()
1386                    .0
1387                    .as_ref()
1388                    .map(|drop_callback| drop_callback.clone_box()),
1389            )),
1390            freeze_started: AtomicBool::new(false),
1391            cost_tracker: RwLock::new(parent.read_cost_tracker().unwrap().new_from_parent_limits()),
1392            accounts_data_size_initial,
1393            accounts_data_size_delta_on_chain: AtomicI64::new(0),
1394            accounts_data_size_delta_off_chain: AtomicI64::new(0),
1395            epoch_reward_status: parent.epoch_reward_status.clone(),
1396            transaction_processor,
1397            check_program_modification_slot: false,
1398            collector_fee_details: RwLock::new(CollectorFeeDetails::default()),
1399            compute_budget: parent.compute_budget,
1400            transaction_account_lock_limit: parent.transaction_account_lock_limit,
1401            fee_structure: parent.fee_structure.clone(),
1402            #[cfg(feature = "dev-context-only-utils")]
1403            hash_overrides: parent.hash_overrides.clone(),
1404            accounts_lt_hash: Mutex::new(parent.accounts_lt_hash.lock().unwrap().clone()),
1405            cache_for_accounts_lt_hash: DashMap::default(),
1406            stats_for_accounts_lt_hash: AccountsLtHashStats::default(),
1407            block_id: RwLock::new(None),
1408            bank_hash_stats: AtomicBankHashStats::default(),
1409        };
1410
1411        let (_, ancestors_time_us) = measure_us!({
1412            let mut ancestors = Vec::with_capacity(1 + new.parents().len());
1413            ancestors.push(new.slot());
1414            new.parents().iter().for_each(|p| {
1415                ancestors.push(p.slot());
1416            });
1417            new.ancestors = Ancestors::from(ancestors);
1418        });
1419
1420        // Following code may touch AccountsDb, requiring proper ancestors
1421        let (_, update_epoch_time_us) = measure_us!({
1422            if parent.epoch() < new.epoch() {
1423                new.process_new_epoch(
1424                    parent.epoch(),
1425                    parent.slot(),
1426                    parent.block_height(),
1427                    reward_calc_tracer,
1428                );
1429            } else {
1430                // Save a snapshot of stakes for use in consensus and stake weighted networking
1431                let leader_schedule_epoch = new.epoch_schedule().get_leader_schedule_epoch(slot);
1432                new.update_epoch_stakes(leader_schedule_epoch);
1433            }
1434            new.distribute_partitioned_epoch_rewards();
1435        });
1436
1437        let (_epoch, slot_index) = new.epoch_schedule.get_epoch_and_slot_index(new.slot);
1438        let slots_in_epoch = new.epoch_schedule.get_slots_in_epoch(new.epoch);
1439
1440        let (_, cache_preparation_time_us) = measure_us!(new
1441            .transaction_processor
1442            .prepare_program_cache_for_upcoming_feature_set(
1443                &new,
1444                &new.compute_active_feature_set(true).0,
1445                &new.compute_budget.unwrap_or_default(),
1446                slot_index,
1447                slots_in_epoch,
1448            ));
1449
1450        // Update sysvars before processing transactions
1451        let (_, update_sysvars_time_us) = measure_us!({
1452            new.update_slot_hashes();
1453            new.update_stake_history(Some(parent.epoch()));
1454            new.update_clock(Some(parent.epoch()));
1455            new.update_last_restart_slot()
1456        });
1457
1458        let (_, fill_sysvar_cache_time_us) = measure_us!(new
1459            .transaction_processor
1460            .fill_missing_sysvar_cache_entries(&new));
1461
1462        let (num_accounts_modified_this_slot, populate_cache_for_accounts_lt_hash_us) = new
1463            .is_accounts_lt_hash_enabled()
1464            .then(|| {
1465                measure_us!({
1466                    // The cache for accounts lt hash needs to be made aware of accounts modified
1467                    // before transaction processing begins.  Otherwise we may calculate the wrong
1468                    // accounts lt hash due to having the wrong initial state of the account.  The
1469                    // lt hash cache's initial state must always be from an ancestor, and cannot be
1470                    // an intermediate state within this Bank's slot.  If the lt hash cache has the
1471                    // wrong initial account state, we'll mix out the wrong lt hash value, and thus
1472                    // have the wrong overall accounts lt hash, and diverge.
1473                    let accounts_modified_this_slot =
1474                        new.rc.accounts.accounts_db.get_pubkeys_for_slot(slot);
1475                    let num_accounts_modified_this_slot = accounts_modified_this_slot.len();
1476                    for pubkey in accounts_modified_this_slot {
1477                        new.cache_for_accounts_lt_hash
1478                            .entry(pubkey)
1479                            .or_insert(AccountsLtHashCacheValue::BankNew);
1480                    }
1481                    num_accounts_modified_this_slot
1482                })
1483            })
1484            .unzip();
1485
1486        time.stop();
1487        report_new_bank_metrics(
1488            slot,
1489            parent.slot(),
1490            new.block_height,
1491            num_accounts_modified_this_slot,
1492            NewBankTimings {
1493                bank_rc_creation_time_us,
1494                total_elapsed_time_us: time.as_us(),
1495                status_cache_time_us,
1496                fee_components_time_us,
1497                blockhash_queue_time_us,
1498                stakes_cache_time_us,
1499                epoch_stakes_time_us,
1500                builtin_program_ids_time_us,
1501                rewards_pool_pubkeys_time_us,
1502                executor_cache_time_us: 0,
1503                transaction_debug_keys_time_us,
1504                transaction_log_collector_config_time_us,
1505                feature_set_time_us,
1506                ancestors_time_us,
1507                update_epoch_time_us,
1508                cache_preparation_time_us,
1509                update_sysvars_time_us,
1510                fill_sysvar_cache_time_us,
1511                populate_cache_for_accounts_lt_hash_us,
1512            },
1513        );
1514
1515        report_loaded_programs_stats(
1516            &parent
1517                .transaction_processor
1518                .program_cache
1519                .read()
1520                .unwrap()
1521                .stats,
1522            parent.slot(),
1523        );
1524
1525        new.transaction_processor
1526            .program_cache
1527            .write()
1528            .unwrap()
1529            .stats
1530            .reset();
1531
1532        new
1533    }
1534
1535    pub fn set_fork_graph_in_program_cache(&self, fork_graph: Weak<RwLock<BankForks>>) {
1536        self.transaction_processor
1537            .program_cache
1538            .write()
1539            .unwrap()
1540            .set_fork_graph(fork_graph);
1541    }
1542
1543    pub fn prune_program_cache(&self, new_root_slot: Slot, new_root_epoch: Epoch) {
1544        self.transaction_processor
1545            .program_cache
1546            .write()
1547            .unwrap()
1548            .prune(new_root_slot, new_root_epoch);
1549    }
1550
1551    pub fn prune_program_cache_by_deployment_slot(&self, deployment_slot: Slot) {
1552        self.transaction_processor
1553            .program_cache
1554            .write()
1555            .unwrap()
1556            .prune_by_deployment_slot(deployment_slot);
1557    }
1558
1559    /// Epoch in which the new cooldown warmup rate for stake was activated
1560    pub fn new_warmup_cooldown_rate_epoch(&self) -> Option<Epoch> {
1561        self.feature_set
1562            .new_warmup_cooldown_rate_epoch(&self.epoch_schedule)
1563    }
1564
1565    /// process for the start of a new epoch
1566    fn process_new_epoch(
1567        &mut self,
1568        parent_epoch: Epoch,
1569        parent_slot: Slot,
1570        parent_height: u64,
1571        reward_calc_tracer: Option<impl RewardCalcTracer>,
1572    ) {
1573        let epoch = self.epoch();
1574        let slot = self.slot();
1575        let (thread_pool, thread_pool_time_us) = measure_us!(ThreadPoolBuilder::new()
1576            .thread_name(|i| format!("solBnkNewEpch{i:02}"))
1577            .build()
1578            .expect("new rayon threadpool"));
1579
1580        let (_, apply_feature_activations_time_us) = measure_us!(thread_pool.install(|| {
1581            self.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, false)
1582        }));
1583
1584        // Add new entry to stakes.stake_history, set appropriate epoch and
1585        // update vote accounts with warmed up stakes before saving a
1586        // snapshot of stakes in epoch stakes
1587        let (_, activate_epoch_time_us) = measure_us!(self.stakes_cache.activate_epoch(
1588            epoch,
1589            &thread_pool,
1590            self.new_warmup_cooldown_rate_epoch()
1591        ));
1592
1593        // Save a snapshot of stakes for use in consensus and stake weighted networking
1594        let leader_schedule_epoch = self.epoch_schedule.get_leader_schedule_epoch(slot);
1595        let (_, update_epoch_stakes_time_us) =
1596            measure_us!(self.update_epoch_stakes(leader_schedule_epoch));
1597
1598        let mut rewards_metrics = RewardsMetrics::default();
1599        // After saving a snapshot of stakes, apply stake rewards and commission
1600        let (_, update_rewards_with_thread_pool_time_us) = measure_us!(self
1601            .begin_partitioned_rewards(
1602                reward_calc_tracer,
1603                &thread_pool,
1604                parent_epoch,
1605                parent_slot,
1606                parent_height,
1607                &mut rewards_metrics,
1608            ));
1609
1610        report_new_epoch_metrics(
1611            epoch,
1612            slot,
1613            parent_slot,
1614            NewEpochTimings {
1615                thread_pool_time_us,
1616                apply_feature_activations_time_us,
1617                activate_epoch_time_us,
1618                update_epoch_stakes_time_us,
1619                update_rewards_with_thread_pool_time_us,
1620            },
1621            rewards_metrics,
1622        );
1623    }
1624
1625    pub fn byte_limit_for_scans(&self) -> Option<usize> {
1626        self.rc
1627            .accounts
1628            .accounts_db
1629            .accounts_index
1630            .scan_results_limit_bytes
1631    }
1632
1633    pub fn proper_ancestors_set(&self) -> HashSet<Slot> {
1634        HashSet::from_iter(self.proper_ancestors())
1635    }
1636
1637    /// Returns all ancestors excluding self.slot.
1638    pub(crate) fn proper_ancestors(&self) -> impl Iterator<Item = Slot> + '_ {
1639        self.ancestors
1640            .keys()
1641            .into_iter()
1642            .filter(move |slot| *slot != self.slot)
1643    }
1644
1645    pub fn set_callback(&self, callback: Option<Box<dyn DropCallback + Send + Sync>>) {
1646        *self.drop_callback.write().unwrap() = OptionalDropCallback(callback);
1647    }
1648
1649    pub fn vote_only_bank(&self) -> bool {
1650        self.vote_only_bank
1651    }
1652
1653    /// Like `new_from_parent` but additionally:
1654    /// * Doesn't assume that the parent is anywhere near `slot`, parent could be millions of slots
1655    ///   in the past
1656    /// * Adjusts the new bank's tick height to avoid having to run PoH for millions of slots
1657    /// * Freezes the new bank, assuming that the user will `Bank::new_from_parent` from this bank
1658    /// * Calculates and sets the epoch accounts hash from the parent
1659    pub fn warp_from_parent(
1660        parent: Arc<Bank>,
1661        collector_id: &Pubkey,
1662        slot: Slot,
1663        data_source: CalcAccountsHashDataSource,
1664    ) -> Self {
1665        parent.freeze();
1666        parent
1667            .rc
1668            .accounts
1669            .accounts_db
1670            .epoch_accounts_hash_manager
1671            .set_in_flight(parent.slot());
1672        let accounts_hash = parent.update_accounts_hash(data_source, false, true);
1673        let epoch_accounts_hash = accounts_hash.into();
1674        parent
1675            .rc
1676            .accounts
1677            .accounts_db
1678            .epoch_accounts_hash_manager
1679            .set_valid(epoch_accounts_hash, parent.slot());
1680
1681        let parent_timestamp = parent.clock().unix_timestamp;
1682        let mut new = Bank::new_from_parent(parent, collector_id, slot);
1683        new.apply_feature_activations(ApplyFeatureActivationsCaller::WarpFromParent, false);
1684        new.update_epoch_stakes(new.epoch_schedule().get_epoch(slot));
1685        new.tick_height.store(new.max_tick_height(), Relaxed);
1686
1687        let mut clock = new.clock();
1688        clock.epoch_start_timestamp = parent_timestamp;
1689        clock.unix_timestamp = parent_timestamp;
1690        new.update_sysvar_account(&sysvar::clock::id(), |account| {
1691            create_account(
1692                &clock,
1693                new.inherit_specially_retained_account_fields(account),
1694            )
1695        });
1696        new.transaction_processor
1697            .fill_missing_sysvar_cache_entries(&new);
1698        new.freeze();
1699        new
1700    }
1701
1702    /// Create a bank from explicit arguments and deserialized fields from snapshot
1703    pub(crate) fn new_from_fields(
1704        bank_rc: BankRc,
1705        genesis_config: &GenesisConfig,
1706        runtime_config: Arc<RuntimeConfig>,
1707        fields: BankFieldsToDeserialize,
1708        debug_keys: Option<Arc<HashSet<Pubkey>>>,
1709        additional_builtins: Option<&[BuiltinPrototype]>,
1710        debug_do_not_add_builtins: bool,
1711        accounts_data_size_initial: u64,
1712    ) -> Self {
1713        let now = Instant::now();
1714        let ancestors = Ancestors::from(&fields.ancestors);
1715        // For backward compatibility, we can only serialize and deserialize
1716        // Stakes<Delegation> in BankFieldsTo{Serialize,Deserialize}. But Bank
1717        // caches Stakes<StakeAccount>. Below Stakes<StakeAccount> is obtained
1718        // from Stakes<Delegation> by reading the full account state from
1719        // accounts-db. Note that it is crucial that these accounts are loaded
1720        // at the right slot and match precisely with serialized Delegations.
1721        //
1722        // Note that we are disabling the read cache while we populate the stakes cache.
1723        // The stakes accounts will not be expected to be loaded again.
1724        // If we populate the read cache with these loads, then we'll just soon have to evict these.
1725        let (stakes, stakes_time) = measure_time!(Stakes::new(&fields.stakes, |pubkey| {
1726            let (account, _slot) = bank_rc
1727                .accounts
1728                .load_with_fixed_root_do_not_populate_read_cache(&ancestors, pubkey)?;
1729            Some(account)
1730        })
1731        .expect(
1732            "Stakes cache is inconsistent with accounts-db. This can indicate \
1733            a corrupted snapshot or bugs in cached accounts or accounts-db.",
1734        ));
1735        info!("Loading Stakes took: {stakes_time}");
1736        let stakes_accounts_load_duration = now.elapsed();
1737        let mut bank = Self {
1738            skipped_rewrites: Mutex::default(),
1739            rc: bank_rc,
1740            status_cache: Arc::<RwLock<BankStatusCache>>::default(),
1741            blockhash_queue: RwLock::new(fields.blockhash_queue),
1742            ancestors,
1743            hash: RwLock::new(fields.hash),
1744            parent_hash: fields.parent_hash,
1745            parent_slot: fields.parent_slot,
1746            hard_forks: Arc::new(RwLock::new(fields.hard_forks)),
1747            transaction_count: AtomicU64::new(fields.transaction_count),
1748            non_vote_transaction_count_since_restart: AtomicU64::default(),
1749            transaction_error_count: AtomicU64::default(),
1750            transaction_entries_count: AtomicU64::default(),
1751            transactions_per_entry_max: AtomicU64::default(),
1752            tick_height: AtomicU64::new(fields.tick_height),
1753            signature_count: AtomicU64::new(fields.signature_count),
1754            capitalization: AtomicU64::new(fields.capitalization),
1755            max_tick_height: fields.max_tick_height,
1756            hashes_per_tick: fields.hashes_per_tick,
1757            ticks_per_slot: fields.ticks_per_slot,
1758            ns_per_slot: fields.ns_per_slot,
1759            genesis_creation_time: fields.genesis_creation_time,
1760            slots_per_year: fields.slots_per_year,
1761            slot: fields.slot,
1762            bank_id: 0,
1763            epoch: fields.epoch,
1764            block_height: fields.block_height,
1765            collector_id: fields.collector_id,
1766            collector_fees: AtomicU64::new(fields.collector_fees),
1767            fee_rate_governor: fields.fee_rate_governor,
1768            collected_rent: AtomicU64::new(fields.collected_rent),
1769            // clone()-ing is needed to consider a gated behavior in rent_collector
1770            rent_collector: Self::get_rent_collector_from(&fields.rent_collector, fields.epoch),
1771            epoch_schedule: fields.epoch_schedule,
1772            inflation: Arc::new(RwLock::new(fields.inflation)),
1773            stakes_cache: StakesCache::new(stakes),
1774            epoch_stakes: fields.epoch_stakes,
1775            is_delta: AtomicBool::new(fields.is_delta),
1776            rewards: RwLock::new(vec![]),
1777            cluster_type: Some(genesis_config.cluster_type),
1778            lazy_rent_collection: AtomicBool::default(),
1779            rewards_pool_pubkeys: Arc::<HashSet<Pubkey>>::default(),
1780            transaction_debug_keys: debug_keys,
1781            transaction_log_collector_config: Arc::<RwLock<TransactionLogCollectorConfig>>::default(
1782            ),
1783            transaction_log_collector: Arc::<RwLock<TransactionLogCollector>>::default(),
1784            feature_set: Arc::<FeatureSet>::default(),
1785            reserved_account_keys: Arc::<ReservedAccountKeys>::default(),
1786            drop_callback: RwLock::new(OptionalDropCallback(None)),
1787            freeze_started: AtomicBool::new(fields.hash != Hash::default()),
1788            vote_only_bank: false,
1789            cost_tracker: RwLock::new(CostTracker::default()),
1790            accounts_data_size_initial,
1791            accounts_data_size_delta_on_chain: AtomicI64::new(0),
1792            accounts_data_size_delta_off_chain: AtomicI64::new(0),
1793            epoch_reward_status: EpochRewardStatus::default(),
1794            transaction_processor: TransactionBatchProcessor::default(),
1795            check_program_modification_slot: false,
1796            // collector_fee_details is not serialized to snapshot
1797            collector_fee_details: RwLock::new(CollectorFeeDetails::default()),
1798            compute_budget: runtime_config.compute_budget,
1799            transaction_account_lock_limit: runtime_config.transaction_account_lock_limit,
1800            fee_structure: FeeStructure::default(),
1801            #[cfg(feature = "dev-context-only-utils")]
1802            hash_overrides: Arc::new(Mutex::new(HashOverrides::default())),
1803            accounts_lt_hash: Mutex::new(AccountsLtHash(LtHash([0xBAD1; LtHash::NUM_ELEMENTS]))),
1804            cache_for_accounts_lt_hash: DashMap::default(),
1805            stats_for_accounts_lt_hash: AccountsLtHashStats::default(),
1806            block_id: RwLock::new(None),
1807            bank_hash_stats: AtomicBankHashStats::new(&fields.bank_hash_stats),
1808        };
1809
1810        bank.transaction_processor =
1811            TransactionBatchProcessor::new_uninitialized(bank.slot, bank.epoch);
1812
1813        let thread_pool = ThreadPoolBuilder::new()
1814            .thread_name(|i| format!("solBnkNewFlds{i:02}"))
1815            .build()
1816            .expect("new rayon threadpool");
1817        bank.recalculate_partitioned_rewards(null_tracer(), &thread_pool);
1818
1819        bank.finish_init(
1820            genesis_config,
1821            additional_builtins,
1822            debug_do_not_add_builtins,
1823        );
1824        bank.transaction_processor
1825            .fill_missing_sysvar_cache_entries(&bank);
1826        bank.rebuild_skipped_rewrites();
1827
1828        let mut calculate_accounts_lt_hash_duration = None;
1829        if let Some(accounts_lt_hash) = fields.accounts_lt_hash {
1830            *bank.accounts_lt_hash.get_mut().unwrap() = accounts_lt_hash;
1831        } else {
1832            // Use the accounts lt hash from the snapshot, if present, otherwise calculate it.
1833            // When the feature gate is enabled, the snapshot *must* contain an accounts lt hash.
1834            assert!(
1835                !bank
1836                    .feature_set
1837                    .is_active(&feature_set::accounts_lt_hash::id()),
1838                "snapshot must have an accounts lt hash if the feature is enabled",
1839            );
1840            if bank.is_accounts_lt_hash_enabled() {
1841                info!(
1842                    "Calculating the accounts lt hash for slot {}...",
1843                    bank.slot(),
1844                );
1845                let (ancestors, slot) = if bank.is_frozen() {
1846                    // Loading from a snapshot necessarily means this slot was rooted, and thus
1847                    // the bank has been frozen.  So when calculating the accounts lt hash,
1848                    // do it based on *this slot*, not our parent, since
1849                    // update_accounts_lt_hash() will not be called on us again.
1850                    (bank.ancestors.clone(), bank.slot())
1851                } else {
1852                    // If the bank is not frozen (e.g. if called from tests), then when this bank
1853                    // is frozen later it will call `update_accounts_lt_hash()`.  Therefore, we
1854                    // must calculate the accounts lt hash *here* based on *our parent*, so that
1855                    // the accounts lt hash is correct after freezing.
1856                    let parent_ancestors = {
1857                        let mut ancestors = bank.ancestors.clone();
1858                        ancestors.remove(&bank.slot());
1859                        ancestors
1860                    };
1861                    (parent_ancestors, bank.parent_slot)
1862                };
1863                let (accounts_lt_hash, duration) = meas_dur!({
1864                    thread_pool.install(|| {
1865                        bank.rc
1866                            .accounts
1867                            .accounts_db
1868                            .calculate_accounts_lt_hash_at_startup_from_index(&ancestors, slot)
1869                    })
1870                });
1871                calculate_accounts_lt_hash_duration = Some(duration);
1872                *bank.accounts_lt_hash.get_mut().unwrap() = accounts_lt_hash;
1873                info!(
1874                    "Calculating the accounts lt hash for slot {}... \
1875                     Done in {duration:?}, accounts_lt_hash checksum: {}",
1876                    bank.slot(),
1877                    bank.accounts_lt_hash.get_mut().unwrap().0.checksum(),
1878                );
1879            }
1880        }
1881
1882        // Sanity assertions between bank snapshot and genesis config
1883        // Consider removing from serializable bank state
1884        // (BankFieldsToSerialize/BankFieldsToDeserialize) and initializing
1885        // from the passed in genesis_config instead (as new()/new_with_paths() already do)
1886        assert_eq!(
1887            bank.genesis_creation_time, genesis_config.creation_time,
1888            "Bank snapshot genesis creation time does not match genesis.bin creation time. \
1889             The snapshot and genesis.bin might pertain to different clusters"
1890        );
1891        assert_eq!(bank.ticks_per_slot, genesis_config.ticks_per_slot);
1892        assert_eq!(
1893            bank.ns_per_slot,
1894            genesis_config.poh_config.target_tick_duration.as_nanos()
1895                * genesis_config.ticks_per_slot as u128
1896        );
1897        assert_eq!(bank.max_tick_height, (bank.slot + 1) * bank.ticks_per_slot);
1898        assert_eq!(
1899            bank.slots_per_year,
1900            years_as_slots(
1901                1.0,
1902                &genesis_config.poh_config.target_tick_duration,
1903                bank.ticks_per_slot,
1904            )
1905        );
1906        assert_eq!(bank.epoch_schedule, genesis_config.epoch_schedule);
1907        assert_eq!(bank.epoch, bank.epoch_schedule.get_epoch(bank.slot));
1908
1909        datapoint_info!(
1910            "bank-new-from-fields",
1911            (
1912                "accounts_data_len-from-snapshot",
1913                fields.accounts_data_len as i64,
1914                i64
1915            ),
1916            (
1917                "accounts_data_len-from-generate_index",
1918                accounts_data_size_initial as i64,
1919                i64
1920            ),
1921            (
1922                "stakes_accounts_load_duration_us",
1923                stakes_accounts_load_duration.as_micros(),
1924                i64
1925            ),
1926            (
1927                "calculate_accounts_lt_hash_us",
1928                calculate_accounts_lt_hash_duration.as_ref().map(Duration::as_micros),
1929                Option<i64>
1930            ),
1931        );
1932        bank
1933    }
1934
1935    /// Return subset of bank fields representing serializable state
1936    pub(crate) fn get_fields_to_serialize(&self) -> BankFieldsToSerialize {
1937        let (epoch_stakes, versioned_epoch_stakes) = split_epoch_stakes(self.epoch_stakes.clone());
1938        BankFieldsToSerialize {
1939            blockhash_queue: self.blockhash_queue.read().unwrap().clone(),
1940            ancestors: AncestorsForSerialization::from(&self.ancestors),
1941            hash: *self.hash.read().unwrap(),
1942            parent_hash: self.parent_hash,
1943            parent_slot: self.parent_slot,
1944            hard_forks: self.hard_forks.read().unwrap().clone(),
1945            transaction_count: self.transaction_count.load(Relaxed),
1946            tick_height: self.tick_height.load(Relaxed),
1947            signature_count: self.signature_count.load(Relaxed),
1948            capitalization: self.capitalization.load(Relaxed),
1949            max_tick_height: self.max_tick_height,
1950            hashes_per_tick: self.hashes_per_tick,
1951            ticks_per_slot: self.ticks_per_slot,
1952            ns_per_slot: self.ns_per_slot,
1953            genesis_creation_time: self.genesis_creation_time,
1954            slots_per_year: self.slots_per_year,
1955            slot: self.slot,
1956            epoch: self.epoch,
1957            block_height: self.block_height,
1958            collector_id: self.collector_id,
1959            collector_fees: self.collector_fees.load(Relaxed),
1960            fee_rate_governor: self.fee_rate_governor.clone(),
1961            collected_rent: self.collected_rent.load(Relaxed),
1962            rent_collector: self.rent_collector.clone(),
1963            epoch_schedule: self.epoch_schedule.clone(),
1964            inflation: *self.inflation.read().unwrap(),
1965            stakes: StakesEnum::from(self.stakes_cache.stakes().clone()),
1966            epoch_stakes,
1967            is_delta: self.is_delta.load(Relaxed),
1968            accounts_data_len: self.load_accounts_data_size(),
1969            versioned_epoch_stakes,
1970            accounts_lt_hash: self
1971                .is_accounts_lt_hash_enabled()
1972                .then(|| self.accounts_lt_hash.lock().unwrap().clone()),
1973        }
1974    }
1975
1976    pub fn collector_id(&self) -> &Pubkey {
1977        &self.collector_id
1978    }
1979
1980    pub fn genesis_creation_time(&self) -> UnixTimestamp {
1981        self.genesis_creation_time
1982    }
1983
1984    pub fn slot(&self) -> Slot {
1985        self.slot
1986    }
1987
1988    pub fn bank_id(&self) -> BankId {
1989        self.bank_id
1990    }
1991
1992    pub fn epoch(&self) -> Epoch {
1993        self.epoch
1994    }
1995
1996    pub fn first_normal_epoch(&self) -> Epoch {
1997        self.epoch_schedule().first_normal_epoch
1998    }
1999
2000    pub fn freeze_lock(&self) -> RwLockReadGuard<Hash> {
2001        self.hash.read().unwrap()
2002    }
2003
2004    pub fn hash(&self) -> Hash {
2005        *self.hash.read().unwrap()
2006    }
2007
2008    pub fn is_frozen(&self) -> bool {
2009        *self.hash.read().unwrap() != Hash::default()
2010    }
2011
2012    pub fn freeze_started(&self) -> bool {
2013        self.freeze_started.load(Relaxed)
2014    }
2015
2016    pub fn status_cache_ancestors(&self) -> Vec<u64> {
2017        let mut roots = self.status_cache.read().unwrap().roots().clone();
2018        let min = roots.iter().min().cloned().unwrap_or(0);
2019        for ancestor in self.ancestors.keys() {
2020            if ancestor >= min {
2021                roots.insert(ancestor);
2022            }
2023        }
2024
2025        let mut ancestors: Vec<_> = roots.into_iter().collect();
2026        #[allow(clippy::stable_sort_primitive)]
2027        ancestors.sort();
2028        ancestors
2029    }
2030
2031    /// computed unix_timestamp at this slot height
2032    pub fn unix_timestamp_from_genesis(&self) -> i64 {
2033        self.genesis_creation_time.saturating_add(
2034            (self.slot as u128)
2035                .saturating_mul(self.ns_per_slot)
2036                .saturating_div(1_000_000_000) as i64,
2037        )
2038    }
2039
2040    fn update_sysvar_account<F>(&self, pubkey: &Pubkey, updater: F)
2041    where
2042        F: Fn(&Option<AccountSharedData>) -> AccountSharedData,
2043    {
2044        let old_account = self.get_account_with_fixed_root(pubkey);
2045        let mut new_account = updater(&old_account);
2046
2047        // When new sysvar comes into existence (with RENT_UNADJUSTED_INITIAL_BALANCE lamports),
2048        // this code ensures that the sysvar's balance is adjusted to be rent-exempt.
2049        //
2050        // More generally, this code always re-calculates for possible sysvar data size change,
2051        // although there is no such sysvars currently.
2052        self.adjust_sysvar_balance_for_rent(&mut new_account);
2053        self.store_account_and_update_capitalization(pubkey, &new_account);
2054    }
2055
2056    fn inherit_specially_retained_account_fields(
2057        &self,
2058        old_account: &Option<AccountSharedData>,
2059    ) -> InheritableAccountFields {
2060        const RENT_UNADJUSTED_INITIAL_BALANCE: u64 = 1;
2061
2062        (
2063            old_account
2064                .as_ref()
2065                .map(|a| a.lamports())
2066                .unwrap_or(RENT_UNADJUSTED_INITIAL_BALANCE),
2067            old_account
2068                .as_ref()
2069                .map(|a| a.rent_epoch())
2070                .unwrap_or(INITIAL_RENT_EPOCH),
2071        )
2072    }
2073
2074    pub fn clock(&self) -> sysvar::clock::Clock {
2075        from_account(&self.get_account(&sysvar::clock::id()).unwrap_or_default())
2076            .unwrap_or_default()
2077    }
2078
2079    fn update_clock(&self, parent_epoch: Option<Epoch>) {
2080        let mut unix_timestamp = self.clock().unix_timestamp;
2081        // set epoch_start_timestamp to None to warp timestamp
2082        let epoch_start_timestamp = {
2083            let epoch = if let Some(epoch) = parent_epoch {
2084                epoch
2085            } else {
2086                self.epoch()
2087            };
2088            let first_slot_in_epoch = self.epoch_schedule().get_first_slot_in_epoch(epoch);
2089            Some((first_slot_in_epoch, self.clock().epoch_start_timestamp))
2090        };
2091        let max_allowable_drift = MaxAllowableDrift {
2092            fast: MAX_ALLOWABLE_DRIFT_PERCENTAGE_FAST,
2093            slow: MAX_ALLOWABLE_DRIFT_PERCENTAGE_SLOW_V2,
2094        };
2095
2096        let ancestor_timestamp = self.clock().unix_timestamp;
2097        if let Some(timestamp_estimate) =
2098            self.get_timestamp_estimate(max_allowable_drift, epoch_start_timestamp)
2099        {
2100            unix_timestamp = timestamp_estimate;
2101            if timestamp_estimate < ancestor_timestamp {
2102                unix_timestamp = ancestor_timestamp;
2103            }
2104        }
2105        datapoint_info!(
2106            "bank-timestamp-correction",
2107            ("slot", self.slot(), i64),
2108            ("from_genesis", self.unix_timestamp_from_genesis(), i64),
2109            ("corrected", unix_timestamp, i64),
2110            ("ancestor_timestamp", ancestor_timestamp, i64),
2111        );
2112        let mut epoch_start_timestamp =
2113            // On epoch boundaries, update epoch_start_timestamp
2114            if parent_epoch.is_some() && parent_epoch.unwrap() != self.epoch() {
2115                unix_timestamp
2116            } else {
2117                self.clock().epoch_start_timestamp
2118            };
2119        if self.slot == 0 {
2120            unix_timestamp = self.unix_timestamp_from_genesis();
2121            epoch_start_timestamp = self.unix_timestamp_from_genesis();
2122        }
2123        let clock = sysvar::clock::Clock {
2124            slot: self.slot,
2125            epoch_start_timestamp,
2126            epoch: self.epoch_schedule().get_epoch(self.slot),
2127            leader_schedule_epoch: self.epoch_schedule().get_leader_schedule_epoch(self.slot),
2128            unix_timestamp,
2129        };
2130        self.update_sysvar_account(&sysvar::clock::id(), |account| {
2131            create_account(
2132                &clock,
2133                self.inherit_specially_retained_account_fields(account),
2134            )
2135        });
2136    }
2137
2138    pub fn update_last_restart_slot(&self) {
2139        let feature_flag = self
2140            .feature_set
2141            .is_active(&feature_set::last_restart_slot_sysvar::id());
2142
2143        if feature_flag {
2144            // First, see what the currently stored last restart slot is. This
2145            // account may not exist yet if the feature was just activated.
2146            let current_last_restart_slot = self
2147                .get_account(&sysvar::last_restart_slot::id())
2148                .and_then(|account| {
2149                    let lrs: Option<LastRestartSlot> = from_account(&account);
2150                    lrs
2151                })
2152                .map(|account| account.last_restart_slot);
2153
2154            let last_restart_slot = {
2155                let slot = self.slot;
2156                let hard_forks_r = self.hard_forks.read().unwrap();
2157
2158                // Only consider hard forks <= this bank's slot to avoid prematurely applying
2159                // a hard fork that is set to occur in the future.
2160                hard_forks_r
2161                    .iter()
2162                    .rev()
2163                    .find(|(hard_fork, _)| *hard_fork <= slot)
2164                    .map(|(slot, _)| *slot)
2165                    .unwrap_or(0)
2166            };
2167
2168            // Only need to write if the last restart has changed
2169            if current_last_restart_slot != Some(last_restart_slot) {
2170                self.update_sysvar_account(&sysvar::last_restart_slot::id(), |account| {
2171                    create_account(
2172                        &LastRestartSlot { last_restart_slot },
2173                        self.inherit_specially_retained_account_fields(account),
2174                    )
2175                });
2176            }
2177        }
2178    }
2179
2180    pub fn set_sysvar_for_tests<T>(&self, sysvar: &T)
2181    where
2182        T: Sysvar + SysvarId,
2183    {
2184        self.update_sysvar_account(&T::id(), |account| {
2185            create_account(
2186                sysvar,
2187                self.inherit_specially_retained_account_fields(account),
2188            )
2189        });
2190        // Simply force fill sysvar cache rather than checking which sysvar was
2191        // actually updated since tests don't need to be optimized for performance.
2192        self.transaction_processor.reset_sysvar_cache();
2193        self.transaction_processor
2194            .fill_missing_sysvar_cache_entries(self);
2195    }
2196
2197    fn update_slot_history(&self) {
2198        self.update_sysvar_account(&sysvar::slot_history::id(), |account| {
2199            let mut slot_history = account
2200                .as_ref()
2201                .map(|account| from_account::<SlotHistory, _>(account).unwrap())
2202                .unwrap_or_default();
2203            slot_history.add(self.slot());
2204            create_account(
2205                &slot_history,
2206                self.inherit_specially_retained_account_fields(account),
2207            )
2208        });
2209    }
2210
2211    fn update_slot_hashes(&self) {
2212        self.update_sysvar_account(&sysvar::slot_hashes::id(), |account| {
2213            let mut slot_hashes = account
2214                .as_ref()
2215                .map(|account| from_account::<SlotHashes, _>(account).unwrap())
2216                .unwrap_or_default();
2217            slot_hashes.add(self.parent_slot, self.parent_hash);
2218            create_account(
2219                &slot_hashes,
2220                self.inherit_specially_retained_account_fields(account),
2221            )
2222        });
2223    }
2224
2225    pub fn get_slot_history(&self) -> SlotHistory {
2226        from_account(&self.get_account(&sysvar::slot_history::id()).unwrap()).unwrap()
2227    }
2228
2229    fn update_epoch_stakes(&mut self, leader_schedule_epoch: Epoch) {
2230        // update epoch_stakes cache
2231        //  if my parent didn't populate for this staker's epoch, we've
2232        //  crossed a boundary
2233        if !self.epoch_stakes.contains_key(&leader_schedule_epoch) {
2234            self.epoch_stakes.retain(|&epoch, _| {
2235                epoch >= leader_schedule_epoch.saturating_sub(MAX_LEADER_SCHEDULE_STAKES)
2236            });
2237            let stakes = self.stakes_cache.stakes().clone();
2238            let stakes = Arc::new(StakesEnum::from(stakes));
2239            let new_epoch_stakes = EpochStakes::new(stakes, leader_schedule_epoch);
2240            info!(
2241                "new epoch stakes, epoch: {}, total_stake: {}",
2242                leader_schedule_epoch,
2243                new_epoch_stakes.total_stake(),
2244            );
2245
2246            // It is expensive to log the details of epoch stakes. Only log them at "trace"
2247            // level for debugging purpose.
2248            if log::log_enabled!(log::Level::Trace) {
2249                let vote_stakes: HashMap<_, _> = self
2250                    .stakes_cache
2251                    .stakes()
2252                    .vote_accounts()
2253                    .delegated_stakes()
2254                    .map(|(pubkey, stake)| (*pubkey, stake))
2255                    .collect();
2256                trace!("new epoch stakes, stakes: {vote_stakes:#?}");
2257            }
2258            self.epoch_stakes
2259                .insert(leader_schedule_epoch, new_epoch_stakes);
2260        }
2261    }
2262
2263    #[cfg(feature = "dev-context-only-utils")]
2264    pub fn set_epoch_stakes_for_test(&mut self, epoch: Epoch, stakes: EpochStakes) {
2265        self.epoch_stakes.insert(epoch, stakes);
2266    }
2267
2268    fn update_rent(&self) {
2269        self.update_sysvar_account(&sysvar::rent::id(), |account| {
2270            create_account(
2271                &self.rent_collector.rent,
2272                self.inherit_specially_retained_account_fields(account),
2273            )
2274        });
2275    }
2276
2277    fn update_epoch_schedule(&self) {
2278        self.update_sysvar_account(&sysvar::epoch_schedule::id(), |account| {
2279            create_account(
2280                self.epoch_schedule(),
2281                self.inherit_specially_retained_account_fields(account),
2282            )
2283        });
2284    }
2285
2286    fn update_stake_history(&self, epoch: Option<Epoch>) {
2287        if epoch == Some(self.epoch()) {
2288            return;
2289        }
2290        // if I'm the first Bank in an epoch, ensure stake_history is updated
2291        self.update_sysvar_account(&sysvar::stake_history::id(), |account| {
2292            create_account::<sysvar::stake_history::StakeHistory>(
2293                self.stakes_cache.stakes().history(),
2294                self.inherit_specially_retained_account_fields(account),
2295            )
2296        });
2297    }
2298
2299    pub fn epoch_duration_in_years(&self, prev_epoch: Epoch) -> f64 {
2300        // period: time that has passed as a fraction of a year, basically the length of
2301        //  an epoch as a fraction of a year
2302        //  calculated as: slots_elapsed / (slots / year)
2303        self.epoch_schedule().get_slots_in_epoch(prev_epoch) as f64 / self.slots_per_year
2304    }
2305
2306    // Calculates the starting-slot for inflation from the activation slot.
2307    // This method assumes that `pico_inflation` will be enabled before `full_inflation`, giving
2308    // precedence to the latter. However, since `pico_inflation` is fixed-rate Inflation, should
2309    // `pico_inflation` be enabled 2nd, the incorrect start slot provided here should have no
2310    // effect on the inflation calculation.
2311    fn get_inflation_start_slot(&self) -> Slot {
2312        let mut slots = self
2313            .feature_set
2314            .full_inflation_features_enabled()
2315            .iter()
2316            .filter_map(|id| self.feature_set.activated_slot(id))
2317            .collect::<Vec<_>>();
2318        slots.sort_unstable();
2319        slots.first().cloned().unwrap_or_else(|| {
2320            self.feature_set
2321                .activated_slot(&feature_set::pico_inflation::id())
2322                .unwrap_or(0)
2323        })
2324    }
2325
2326    fn get_inflation_num_slots(&self) -> u64 {
2327        let inflation_activation_slot = self.get_inflation_start_slot();
2328        // Normalize inflation_start to align with the start of rewards accrual.
2329        let inflation_start_slot = self.epoch_schedule().get_first_slot_in_epoch(
2330            self.epoch_schedule()
2331                .get_epoch(inflation_activation_slot)
2332                .saturating_sub(1),
2333        );
2334        self.epoch_schedule().get_first_slot_in_epoch(self.epoch()) - inflation_start_slot
2335    }
2336
2337    pub fn slot_in_year_for_inflation(&self) -> f64 {
2338        let num_slots = self.get_inflation_num_slots();
2339
2340        // calculated as: num_slots / (slots / year)
2341        num_slots as f64 / self.slots_per_year
2342    }
2343
2344    fn calculate_previous_epoch_inflation_rewards(
2345        &self,
2346        prev_epoch_capitalization: u64,
2347        prev_epoch: Epoch,
2348    ) -> PrevEpochInflationRewards {
2349        let slot_in_year = self.slot_in_year_for_inflation();
2350        let (validator_rate, foundation_rate) = {
2351            let inflation = self.inflation.read().unwrap();
2352            (
2353                (*inflation).validator(slot_in_year),
2354                (*inflation).foundation(slot_in_year),
2355            )
2356        };
2357
2358        let prev_epoch_duration_in_years = self.epoch_duration_in_years(prev_epoch);
2359        let validator_rewards = (validator_rate
2360            * prev_epoch_capitalization as f64
2361            * prev_epoch_duration_in_years) as u64;
2362
2363        PrevEpochInflationRewards {
2364            validator_rewards,
2365            prev_epoch_duration_in_years,
2366            validator_rate,
2367            foundation_rate,
2368        }
2369    }
2370
2371    fn assert_validator_rewards_paid(&self, validator_rewards_paid: u64) {
2372        assert_eq!(
2373            validator_rewards_paid,
2374            u64::try_from(
2375                self.rewards
2376                    .read()
2377                    .unwrap()
2378                    .par_iter()
2379                    .map(|(_address, reward_info)| {
2380                        match reward_info.reward_type {
2381                            RewardType::Voting | RewardType::Staking => reward_info.lamports,
2382                            _ => 0,
2383                        }
2384                    })
2385                    .sum::<i64>()
2386            )
2387            .unwrap()
2388        );
2389    }
2390
2391    fn filter_stake_delegations<'a>(
2392        &self,
2393        stakes: &'a Stakes<StakeAccount<Delegation>>,
2394    ) -> Vec<(&'a Pubkey, &'a StakeAccount<Delegation>)> {
2395        if self
2396            .feature_set
2397            .is_active(&feature_set::stake_minimum_delegation_for_rewards::id())
2398        {
2399            let num_stake_delegations = stakes.stake_delegations().len();
2400            let min_stake_delegation =
2401                clone_solana_stake_program::get_minimum_delegation(&self.feature_set)
2402                    .max(LAMPORTS_PER_SOL);
2403
2404            let (stake_delegations, filter_time_us) = measure_us!(stakes
2405                .stake_delegations()
2406                .iter()
2407                .filter(|(_stake_pubkey, cached_stake_account)| {
2408                    cached_stake_account.delegation().stake >= min_stake_delegation
2409                })
2410                .collect::<Vec<_>>());
2411
2412            datapoint_info!(
2413                "stake_account_filter_time",
2414                ("filter_time_us", filter_time_us, i64),
2415                ("num_stake_delegations_before", num_stake_delegations, i64),
2416                ("num_stake_delegations_after", stake_delegations.len(), i64)
2417            );
2418            stake_delegations
2419        } else {
2420            stakes.stake_delegations().iter().collect()
2421        }
2422    }
2423
2424    /// return reward info for each vote account
2425    /// return account data for each vote account that needs to be stored
2426    /// This return value is a little awkward at the moment so that downstream existing code in the non-partitioned rewards code path can be re-used without duplication or modification.
2427    /// This function is copied from the existing code path's `store_vote_accounts`.
2428    /// The primary differences:
2429    /// - we want this fn to have no side effects (such as actually storing vote accounts) so that we
2430    ///   can compare the expected results with the current code path
2431    /// - we want to be able to batch store the vote accounts later for improved performance/cache updating
2432    fn calc_vote_accounts_to_store(
2433        vote_account_rewards: DashMap<Pubkey, VoteReward>,
2434    ) -> VoteRewardsAccounts {
2435        let len = vote_account_rewards.len();
2436        let mut result = VoteRewardsAccounts {
2437            rewards: Vec::with_capacity(len),
2438            accounts_to_store: Vec::with_capacity(len),
2439        };
2440        vote_account_rewards.into_iter().for_each(
2441            |(
2442                vote_pubkey,
2443                VoteReward {
2444                    mut vote_account,
2445                    commission,
2446                    vote_rewards,
2447                    vote_needs_store,
2448                },
2449            )| {
2450                if let Err(err) = vote_account.checked_add_lamports(vote_rewards) {
2451                    debug!("reward redemption failed for {}: {:?}", vote_pubkey, err);
2452                    return;
2453                }
2454
2455                result.rewards.push((
2456                    vote_pubkey,
2457                    RewardInfo {
2458                        reward_type: RewardType::Voting,
2459                        lamports: vote_rewards as i64,
2460                        post_balance: vote_account.lamports(),
2461                        commission: Some(commission),
2462                    },
2463                ));
2464                result
2465                    .accounts_to_store
2466                    .push(vote_needs_store.then_some(vote_account));
2467            },
2468        );
2469        result
2470    }
2471
2472    fn update_reward_history(
2473        &self,
2474        stake_rewards: StakeRewards,
2475        mut vote_rewards: Vec<(Pubkey, RewardInfo)>,
2476    ) {
2477        let additional_reserve = stake_rewards.len() + vote_rewards.len();
2478        let mut rewards = self.rewards.write().unwrap();
2479        rewards.reserve(additional_reserve);
2480        rewards.append(&mut vote_rewards);
2481        stake_rewards
2482            .into_iter()
2483            .filter(|x| x.get_stake_reward() > 0)
2484            .for_each(|x| rewards.push((x.stake_pubkey, x.stake_reward_info)));
2485    }
2486
2487    fn update_recent_blockhashes_locked(&self, locked_blockhash_queue: &BlockhashQueue) {
2488        #[allow(deprecated)]
2489        self.update_sysvar_account(&sysvar::recent_blockhashes::id(), |account| {
2490            let recent_blockhash_iter = locked_blockhash_queue.get_recent_blockhashes();
2491            recent_blockhashes_account::create_account_with_data_and_fields(
2492                recent_blockhash_iter,
2493                self.inherit_specially_retained_account_fields(account),
2494            )
2495        });
2496    }
2497
2498    pub fn update_recent_blockhashes(&self) {
2499        let blockhash_queue = self.blockhash_queue.read().unwrap();
2500        self.update_recent_blockhashes_locked(&blockhash_queue);
2501    }
2502
2503    fn get_timestamp_estimate(
2504        &self,
2505        max_allowable_drift: MaxAllowableDrift,
2506        epoch_start_timestamp: Option<(Slot, UnixTimestamp)>,
2507    ) -> Option<UnixTimestamp> {
2508        let mut get_timestamp_estimate_time = Measure::start("get_timestamp_estimate");
2509        let slots_per_epoch = self.epoch_schedule().slots_per_epoch;
2510        let vote_accounts = self.vote_accounts();
2511        let recent_timestamps = vote_accounts.iter().filter_map(|(pubkey, (_, account))| {
2512            let vote_state = account.vote_state();
2513            let slot_delta = self.slot().checked_sub(vote_state.last_timestamp.slot)?;
2514            (slot_delta <= slots_per_epoch).then_some({
2515                (
2516                    *pubkey,
2517                    (
2518                        vote_state.last_timestamp.slot,
2519                        vote_state.last_timestamp.timestamp,
2520                    ),
2521                )
2522            })
2523        });
2524        let slot_duration = Duration::from_nanos(self.ns_per_slot as u64);
2525        let epoch = self.epoch_schedule().get_epoch(self.slot());
2526        let stakes = self.epoch_vote_accounts(epoch)?;
2527        let stake_weighted_timestamp = calculate_stake_weighted_timestamp(
2528            recent_timestamps,
2529            stakes,
2530            self.slot(),
2531            slot_duration,
2532            epoch_start_timestamp,
2533            max_allowable_drift,
2534            self.feature_set
2535                .is_active(&feature_set::warp_timestamp_again::id()),
2536        );
2537        get_timestamp_estimate_time.stop();
2538        datapoint_info!(
2539            "bank-timestamp",
2540            (
2541                "get_timestamp_estimate_us",
2542                get_timestamp_estimate_time.as_us(),
2543                i64
2544            ),
2545        );
2546        stake_weighted_timestamp
2547    }
2548
2549    /// Recalculates the bank hash
2550    ///
2551    /// This is used by ledger-tool when creating a snapshot, which
2552    /// recalcuates the bank hash.
2553    ///
2554    /// Note that the account state is *not* allowed to change by rehashing.
2555    /// If modifying accounts in ledger-tool is needed, create a new bank.
2556    pub fn rehash(&self) {
2557        let get_delta_hash = || {
2558            (!self
2559                .feature_set
2560                .is_active(&feature_set::remove_accounts_delta_hash::id()))
2561            .then(|| {
2562                self.rc
2563                    .accounts
2564                    .accounts_db
2565                    .get_accounts_delta_hash(self.slot())
2566            })
2567            .flatten()
2568        };
2569
2570        let mut hash = self.hash.write().unwrap();
2571        let curr_accounts_delta_hash = get_delta_hash();
2572        let new = self.hash_internal_state();
2573        if let Some(curr_accounts_delta_hash) = curr_accounts_delta_hash {
2574            let new_accounts_delta_hash = get_delta_hash().unwrap();
2575            assert_eq!(
2576                new_accounts_delta_hash, curr_accounts_delta_hash,
2577                "rehashing is not allowed to change the account state",
2578            );
2579        }
2580        if new != *hash {
2581            warn!("Updating bank hash to {new}");
2582            *hash = new;
2583        }
2584    }
2585
2586    pub fn freeze(&self) {
2587        // This lock prevents any new commits from BankingStage
2588        // `Consumer::execute_and_commit_transactions_locked()` from
2589        // coming in after the last tick is observed. This is because in
2590        // BankingStage, any transaction successfully recorded in
2591        // `record_transactions()` is recorded after this `hash` lock
2592        // is grabbed. At the time of the successful record,
2593        // this means the PoH has not yet reached the last tick,
2594        // so this means freeze() hasn't been called yet. And because
2595        // BankingStage doesn't release this hash lock until both
2596        // record and commit are finished, those transactions will be
2597        // committed before this write lock can be obtained here.
2598        let mut hash = self.hash.write().unwrap();
2599        if *hash == Hash::default() {
2600            // finish up any deferred changes to account state
2601            self.collect_rent_eagerly();
2602            if self.feature_set.is_active(&reward_full_priority_fee::id()) {
2603                self.distribute_transaction_fee_details();
2604            } else {
2605                self.distribute_transaction_fees();
2606            }
2607            self.distribute_rent_fees();
2608            self.update_slot_history();
2609            self.run_incinerator();
2610
2611            // freeze is a one-way trip, idempotent
2612            self.freeze_started.store(true, Relaxed);
2613            if self.is_accounts_lt_hash_enabled() {
2614                // updating the accounts lt hash must happen *outside* of hash_internal_state() so
2615                // that rehash() can be called and *not* modify self.accounts_lt_hash.
2616                self.update_accounts_lt_hash();
2617
2618                // For lattice-hash R&D, we have a CLI arg to do extra verfication.  If set, we'll
2619                // re-calculate the accounts lt hash every slot and compare it against the value
2620                // already stored in the bank.
2621                if self
2622                    .rc
2623                    .accounts
2624                    .accounts_db
2625                    .verify_experimental_accumulator_hash
2626                {
2627                    let slot = self.slot();
2628                    info!("Verifying the accounts lt hash for slot {slot}...");
2629                    let (calculated_accounts_lt_hash, duration) = meas_dur!({
2630                        self.rc
2631                            .accounts
2632                            .accounts_db
2633                            .calculate_accounts_lt_hash_at_startup_from_index(&self.ancestors, slot)
2634                    });
2635                    let actual_accounts_lt_hash = self.accounts_lt_hash.lock().unwrap();
2636                    assert_eq!(
2637                        calculated_accounts_lt_hash,
2638                        *actual_accounts_lt_hash,
2639                        "Verifying the accounts lt hash for slot {slot} failed! calculated checksum: {}, actual checksum: {}",
2640                        calculated_accounts_lt_hash.0.checksum(),
2641                        actual_accounts_lt_hash.0.checksum(),
2642                    );
2643                    info!("Verifying the accounts lt hash for slot {slot}... Done successfully in {duration:?}");
2644                }
2645            }
2646            *hash = self.hash_internal_state();
2647            self.rc.accounts.accounts_db.mark_slot_frozen(self.slot());
2648        }
2649    }
2650
2651    // dangerous; don't use this; this is only needed for ledger-tool's special command
2652    #[cfg(feature = "dev-context-only-utils")]
2653    pub fn unfreeze_for_ledger_tool(&self) {
2654        self.freeze_started.store(false, Relaxed);
2655    }
2656
2657    pub fn epoch_schedule(&self) -> &EpochSchedule {
2658        &self.epoch_schedule
2659    }
2660
2661    /// squash the parent's state up into this Bank,
2662    ///   this Bank becomes a root
2663    /// Note that this function is not thread-safe. If it is called concurrently on the same bank
2664    /// by multiple threads, the end result could be inconsistent.
2665    /// Calling code does not currently call this concurrently.
2666    pub fn squash(&self) -> SquashTiming {
2667        self.freeze();
2668
2669        //this bank and all its parents are now on the rooted path
2670        let mut roots = vec![self.slot()];
2671        roots.append(&mut self.parents().iter().map(|p| p.slot()).collect());
2672
2673        let mut total_index_us = 0;
2674        let mut total_cache_us = 0;
2675        let mut total_store_us = 0;
2676
2677        let mut squash_accounts_time = Measure::start("squash_accounts_time");
2678        for slot in roots.iter().rev() {
2679            // root forks cannot be purged
2680            let add_root_timing = self.rc.accounts.add_root(*slot);
2681            total_index_us += add_root_timing.index_us;
2682            total_cache_us += add_root_timing.cache_us;
2683            total_store_us += add_root_timing.store_us;
2684        }
2685        squash_accounts_time.stop();
2686
2687        *self.rc.parent.write().unwrap() = None;
2688
2689        let mut squash_cache_time = Measure::start("squash_cache_time");
2690        roots
2691            .iter()
2692            .for_each(|slot| self.status_cache.write().unwrap().add_root(*slot));
2693        squash_cache_time.stop();
2694
2695        SquashTiming {
2696            squash_accounts_ms: squash_accounts_time.as_ms(),
2697            squash_accounts_index_ms: total_index_us / 1000,
2698            squash_accounts_cache_ms: total_cache_us / 1000,
2699            squash_accounts_store_ms: total_store_us / 1000,
2700
2701            squash_cache_ms: squash_cache_time.as_ms(),
2702        }
2703    }
2704
2705    /// Return the more recent checkpoint of this bank instance.
2706    pub fn parent(&self) -> Option<Arc<Bank>> {
2707        self.rc.parent.read().unwrap().clone()
2708    }
2709
2710    pub fn parent_slot(&self) -> Slot {
2711        self.parent_slot
2712    }
2713
2714    pub fn parent_hash(&self) -> Hash {
2715        self.parent_hash
2716    }
2717
2718    fn process_genesis_config(
2719        &mut self,
2720        genesis_config: &GenesisConfig,
2721        #[cfg(feature = "dev-context-only-utils")] collector_id_for_tests: Option<Pubkey>,
2722        #[cfg(feature = "dev-context-only-utils")] genesis_hash: Option<Hash>,
2723    ) {
2724        // Bootstrap validator collects fees until `new_from_parent` is called.
2725        self.fee_rate_governor = genesis_config.fee_rate_governor.clone();
2726
2727        for (pubkey, account) in genesis_config.accounts.iter() {
2728            assert!(
2729                self.get_account(pubkey).is_none(),
2730                "{pubkey} repeated in genesis config"
2731            );
2732            self.store_account(pubkey, &account.to_account_shared_data());
2733            self.capitalization.fetch_add(account.lamports(), Relaxed);
2734            self.accounts_data_size_initial += account.data().len() as u64;
2735        }
2736
2737        for (pubkey, account) in genesis_config.rewards_pools.iter() {
2738            assert!(
2739                self.get_account(pubkey).is_none(),
2740                "{pubkey} repeated in genesis config"
2741            );
2742            self.store_account(pubkey, &account.to_account_shared_data());
2743            self.accounts_data_size_initial += account.data().len() as u64;
2744        }
2745
2746        // After storing genesis accounts, the bank stakes cache will be warmed
2747        // up and can be used to set the collector id to the highest staked
2748        // node. If no staked nodes exist, allow fallback to an unstaked test
2749        // collector id during tests.
2750        let collector_id = self.stakes_cache.stakes().highest_staked_node().copied();
2751        #[cfg(feature = "dev-context-only-utils")]
2752        let collector_id = collector_id.or(collector_id_for_tests);
2753        self.collector_id =
2754            collector_id.expect("genesis processing failed because no staked nodes exist");
2755
2756        #[cfg(not(feature = "dev-context-only-utils"))]
2757        let genesis_hash = genesis_config.hash();
2758        #[cfg(feature = "dev-context-only-utils")]
2759        let genesis_hash = genesis_hash.unwrap_or(genesis_config.hash());
2760
2761        self.blockhash_queue
2762            .write()
2763            .unwrap()
2764            .genesis_hash(&genesis_hash, self.fee_rate_governor.lamports_per_signature);
2765
2766        self.hashes_per_tick = genesis_config.hashes_per_tick();
2767        self.ticks_per_slot = genesis_config.ticks_per_slot();
2768        self.ns_per_slot = genesis_config.ns_per_slot();
2769        self.genesis_creation_time = genesis_config.creation_time;
2770        self.max_tick_height = (self.slot + 1) * self.ticks_per_slot;
2771        self.slots_per_year = genesis_config.slots_per_year();
2772
2773        self.epoch_schedule = genesis_config.epoch_schedule.clone();
2774
2775        self.inflation = Arc::new(RwLock::new(genesis_config.inflation));
2776
2777        self.rent_collector = RentCollector::new(
2778            self.epoch,
2779            self.epoch_schedule().clone(),
2780            self.slots_per_year,
2781            genesis_config.rent.clone(),
2782        );
2783
2784        // Add additional builtin programs specified in the genesis config
2785        for (name, program_id) in &genesis_config.native_instruction_processors {
2786            self.add_builtin_account(name, program_id);
2787        }
2788    }
2789
2790    fn burn_and_purge_account(&self, program_id: &Pubkey, mut account: AccountSharedData) {
2791        let old_data_size = account.data().len();
2792        self.capitalization.fetch_sub(account.lamports(), Relaxed);
2793        // Both resetting account balance to 0 and zeroing the account data
2794        // is needed to really purge from AccountsDb and flush the Stakes cache
2795        account.set_lamports(0);
2796        account.data_as_mut_slice().fill(0);
2797        self.store_account(program_id, &account);
2798        self.calculate_and_update_accounts_data_size_delta_off_chain(old_data_size, 0);
2799    }
2800
2801    /// Add a precompiled program account
2802    pub fn add_precompiled_account(&self, program_id: &Pubkey) {
2803        self.add_precompiled_account_with_owner(program_id, native_loader::id())
2804    }
2805
2806    // Used by tests to simulate clusters with precompiles that aren't owned by the native loader
2807    fn add_precompiled_account_with_owner(&self, program_id: &Pubkey, owner: Pubkey) {
2808        if let Some(account) = self.get_account_with_fixed_root(program_id) {
2809            if account.executable() {
2810                return;
2811            } else {
2812                // malicious account is pre-occupying at program_id
2813                self.burn_and_purge_account(program_id, account);
2814            }
2815        };
2816
2817        assert!(
2818            !self.freeze_started(),
2819            "Can't change frozen bank by adding not-existing new precompiled program ({program_id}). \
2820                Maybe, inconsistent program activation is detected on snapshot restore?"
2821        );
2822
2823        // Add a bogus executable account, which will be loaded and ignored.
2824        let (lamports, rent_epoch) = self.inherit_specially_retained_account_fields(&None);
2825
2826        let account = AccountSharedData::from(Account {
2827            lamports,
2828            owner,
2829            data: vec![],
2830            executable: true,
2831            rent_epoch,
2832        });
2833        self.store_account_and_update_capitalization(program_id, &account);
2834    }
2835
2836    pub fn set_rent_burn_percentage(&mut self, burn_percent: u8) {
2837        self.rent_collector.rent.burn_percent = burn_percent;
2838    }
2839
2840    pub fn set_hashes_per_tick(&mut self, hashes_per_tick: Option<u64>) {
2841        self.hashes_per_tick = hashes_per_tick;
2842    }
2843
2844    /// Return the last block hash registered.
2845    pub fn last_blockhash(&self) -> Hash {
2846        self.blockhash_queue.read().unwrap().last_hash()
2847    }
2848
2849    pub fn last_blockhash_and_lamports_per_signature(&self) -> (Hash, u64) {
2850        let blockhash_queue = self.blockhash_queue.read().unwrap();
2851        let last_hash = blockhash_queue.last_hash();
2852        let last_lamports_per_signature = blockhash_queue
2853            .get_lamports_per_signature(&last_hash)
2854            .unwrap(); // safe so long as the BlockhashQueue is consistent
2855        (last_hash, last_lamports_per_signature)
2856    }
2857
2858    pub fn is_blockhash_valid(&self, hash: &Hash) -> bool {
2859        let blockhash_queue = self.blockhash_queue.read().unwrap();
2860        blockhash_queue.is_hash_valid_for_age(hash, MAX_PROCESSING_AGE)
2861    }
2862
2863    pub fn get_minimum_balance_for_rent_exemption(&self, data_len: usize) -> u64 {
2864        self.rent_collector.rent.minimum_balance(data_len).max(1)
2865    }
2866
2867    pub fn get_lamports_per_signature(&self) -> u64 {
2868        self.fee_rate_governor.lamports_per_signature
2869    }
2870
2871    pub fn get_lamports_per_signature_for_blockhash(&self, hash: &Hash) -> Option<u64> {
2872        let blockhash_queue = self.blockhash_queue.read().unwrap();
2873        blockhash_queue.get_lamports_per_signature(hash)
2874    }
2875
2876    pub fn get_fee_for_message(&self, message: &SanitizedMessage) -> Option<u64> {
2877        let lamports_per_signature = {
2878            let blockhash_queue = self.blockhash_queue.read().unwrap();
2879            blockhash_queue.get_lamports_per_signature(message.recent_blockhash())
2880        }
2881        .or_else(|| {
2882            self.load_message_nonce_account(message).map(
2883                |(_nonce_address, _nonce_account, nonce_data)| {
2884                    nonce_data.get_lamports_per_signature()
2885                },
2886            )
2887        })?;
2888        Some(self.get_fee_for_message_with_lamports_per_signature(message, lamports_per_signature))
2889    }
2890
2891    /// Returns true when startup accounts hash verification has completed or never had to run in background.
2892    pub fn get_startup_verification_complete(&self) -> &Arc<AtomicBool> {
2893        &self
2894            .rc
2895            .accounts
2896            .accounts_db
2897            .verify_accounts_hash_in_bg
2898            .verified
2899    }
2900
2901    /// return true if bg hash verification is complete
2902    /// return false if bg hash verification has not completed yet
2903    /// if hash verification failed, a panic will occur
2904    pub fn is_startup_verification_complete(&self) -> bool {
2905        self.has_initial_accounts_hash_verification_completed()
2906    }
2907
2908    /// This can occur because it completed in the background
2909    /// or if the verification was run in the foreground.
2910    pub fn set_startup_verification_complete(&self) {
2911        self.set_initial_accounts_hash_verification_completed();
2912    }
2913
2914    pub fn get_fee_for_message_with_lamports_per_signature(
2915        &self,
2916        message: &impl SVMMessage,
2917        lamports_per_signature: u64,
2918    ) -> u64 {
2919        let fee_budget_limits = FeeBudgetLimits::from(
2920            process_compute_budget_instructions(
2921                message.program_instructions_iter(),
2922                &self.feature_set,
2923            )
2924            .unwrap_or_default(),
2925        );
2926        clone_solana_fee::calculate_fee(
2927            message,
2928            lamports_per_signature == 0,
2929            self.fee_structure().lamports_per_signature,
2930            fee_budget_limits.prioritization_fee,
2931            FeeFeatures::from(self.feature_set.as_ref()),
2932        )
2933    }
2934
2935    pub fn get_blockhash_last_valid_block_height(&self, blockhash: &Hash) -> Option<Slot> {
2936        let blockhash_queue = self.blockhash_queue.read().unwrap();
2937        // This calculation will need to be updated to consider epoch boundaries if BlockhashQueue
2938        // length is made variable by epoch
2939        blockhash_queue
2940            .get_hash_age(blockhash)
2941            .map(|age| self.block_height + MAX_PROCESSING_AGE as u64 - age)
2942    }
2943
2944    pub fn confirmed_last_blockhash(&self) -> Hash {
2945        const NUM_BLOCKHASH_CONFIRMATIONS: usize = 3;
2946
2947        let parents = self.parents();
2948        if parents.is_empty() {
2949            self.last_blockhash()
2950        } else {
2951            let index = NUM_BLOCKHASH_CONFIRMATIONS.min(parents.len() - 1);
2952            parents[index].last_blockhash()
2953        }
2954    }
2955
2956    /// Forget all signatures. Useful for benchmarking.
2957    pub fn clear_signatures(&self) {
2958        self.status_cache.write().unwrap().clear();
2959    }
2960
2961    pub fn clear_slot_signatures(&self, slot: Slot) {
2962        self.status_cache.write().unwrap().clear_slot_entries(slot);
2963    }
2964
2965    fn update_transaction_statuses(
2966        &self,
2967        sanitized_txs: &[impl TransactionWithMeta],
2968        processing_results: &[TransactionProcessingResult],
2969    ) {
2970        let mut status_cache = self.status_cache.write().unwrap();
2971        assert_eq!(sanitized_txs.len(), processing_results.len());
2972        for (tx, processing_result) in sanitized_txs.iter().zip(processing_results) {
2973            if let Ok(processed_tx) = &processing_result {
2974                // Add the message hash to the status cache to ensure that this message
2975                // won't be processed again with a different signature.
2976                status_cache.insert(
2977                    tx.recent_blockhash(),
2978                    tx.message_hash(),
2979                    self.slot(),
2980                    processed_tx.status(),
2981                );
2982                // Add the transaction signature to the status cache so that transaction status
2983                // can be queried by transaction signature over RPC. In the future, this should
2984                // only be added for API nodes because voting validators don't need to do this.
2985                status_cache.insert(
2986                    tx.recent_blockhash(),
2987                    tx.signature(),
2988                    self.slot(),
2989                    processed_tx.status(),
2990                );
2991            }
2992        }
2993    }
2994
2995    /// Register a new recent blockhash in the bank's recent blockhash queue. Called when a bank
2996    /// reaches its max tick height. Can be called by tests to get new blockhashes for transaction
2997    /// processing without advancing to a new bank slot.
2998    fn register_recent_blockhash(&self, blockhash: &Hash, scheduler: &InstalledSchedulerRwLock) {
2999        // This is needed because recent_blockhash updates necessitate synchronizations for
3000        // consistent tx check_age handling.
3001        BankWithScheduler::wait_for_paused_scheduler(self, scheduler);
3002
3003        // Only acquire the write lock for the blockhash queue on block boundaries because
3004        // readers can starve this write lock acquisition and ticks would be slowed down too
3005        // much if the write lock is acquired for each tick.
3006        let mut w_blockhash_queue = self.blockhash_queue.write().unwrap();
3007
3008        #[cfg(feature = "dev-context-only-utils")]
3009        let blockhash_override = self
3010            .hash_overrides
3011            .lock()
3012            .unwrap()
3013            .get_blockhash_override(self.slot())
3014            .copied()
3015            .inspect(|blockhash_override| {
3016                if blockhash_override != blockhash {
3017                    info!(
3018                        "bank: slot: {}: overrode blockhash: {} with {}",
3019                        self.slot(),
3020                        blockhash,
3021                        blockhash_override
3022                    );
3023                }
3024            });
3025        #[cfg(feature = "dev-context-only-utils")]
3026        let blockhash = blockhash_override.as_ref().unwrap_or(blockhash);
3027
3028        w_blockhash_queue.register_hash(blockhash, self.fee_rate_governor.lamports_per_signature);
3029        self.update_recent_blockhashes_locked(&w_blockhash_queue);
3030    }
3031
3032    // gating this under #[cfg(feature = "dev-context-only-utils")] isn't easy due to
3033    // solana-program-test's usage...
3034    pub fn register_unique_recent_blockhash_for_test(&self) {
3035        self.register_recent_blockhash(
3036            &Hash::new_unique(),
3037            &BankWithScheduler::no_scheduler_available(),
3038        )
3039    }
3040
3041    #[cfg(feature = "dev-context-only-utils")]
3042    pub fn register_recent_blockhash_for_test(
3043        &self,
3044        blockhash: &Hash,
3045        lamports_per_signature: Option<u64>,
3046    ) {
3047        // Only acquire the write lock for the blockhash queue on block boundaries because
3048        // readers can starve this write lock acquisition and ticks would be slowed down too
3049        // much if the write lock is acquired for each tick.
3050        let mut w_blockhash_queue = self.blockhash_queue.write().unwrap();
3051        if let Some(lamports_per_signature) = lamports_per_signature {
3052            w_blockhash_queue.register_hash(blockhash, lamports_per_signature);
3053        } else {
3054            w_blockhash_queue
3055                .register_hash(blockhash, self.fee_rate_governor.lamports_per_signature);
3056        }
3057    }
3058
3059    /// Tell the bank which Entry IDs exist on the ledger. This function assumes subsequent calls
3060    /// correspond to later entries, and will boot the oldest ones once its internal cache is full.
3061    /// Once boot, the bank will reject transactions using that `hash`.
3062    ///
3063    /// This is NOT thread safe because if tick height is updated by two different threads, the
3064    /// block boundary condition could be missed.
3065    pub fn register_tick(&self, hash: &Hash, scheduler: &InstalledSchedulerRwLock) {
3066        assert!(
3067            !self.freeze_started(),
3068            "register_tick() working on a bank that is already frozen or is undergoing freezing!"
3069        );
3070
3071        if self.is_block_boundary(self.tick_height.load(Relaxed) + 1) {
3072            self.register_recent_blockhash(hash, scheduler);
3073        }
3074
3075        // ReplayStage will start computing the accounts delta hash when it
3076        // detects the tick height has reached the boundary, so the system
3077        // needs to guarantee all account updates for the slot have been
3078        // committed before this tick height is incremented (like the blockhash
3079        // sysvar above)
3080        self.tick_height.fetch_add(1, Relaxed);
3081    }
3082
3083    #[cfg(feature = "dev-context-only-utils")]
3084    pub fn register_tick_for_test(&self, hash: &Hash) {
3085        self.register_tick(hash, &BankWithScheduler::no_scheduler_available())
3086    }
3087
3088    #[cfg(feature = "dev-context-only-utils")]
3089    pub fn register_default_tick_for_test(&self) {
3090        self.register_tick_for_test(&Hash::default())
3091    }
3092
3093    #[cfg(feature = "dev-context-only-utils")]
3094    pub fn register_unique_tick(&self) {
3095        self.register_tick_for_test(&Hash::new_unique())
3096    }
3097
3098    pub fn is_complete(&self) -> bool {
3099        self.tick_height() == self.max_tick_height()
3100    }
3101
3102    pub fn is_block_boundary(&self, tick_height: u64) -> bool {
3103        tick_height == self.max_tick_height
3104    }
3105
3106    /// Get the max number of accounts that a transaction may lock in this block
3107    pub fn get_transaction_account_lock_limit(&self) -> usize {
3108        if let Some(transaction_account_lock_limit) = self.transaction_account_lock_limit {
3109            transaction_account_lock_limit
3110        } else if self
3111            .feature_set
3112            .is_active(&feature_set::increase_tx_account_lock_limit::id())
3113        {
3114            MAX_TX_ACCOUNT_LOCKS
3115        } else {
3116            64
3117        }
3118    }
3119
3120    /// Prepare a transaction batch from a list of versioned transactions from
3121    /// an entry. Used for tests only.
3122    pub fn prepare_entry_batch(
3123        &self,
3124        txs: Vec<VersionedTransaction>,
3125    ) -> Result<TransactionBatch<RuntimeTransaction<SanitizedTransaction>>> {
3126        let sanitized_txs = txs
3127            .into_iter()
3128            .map(|tx| {
3129                RuntimeTransaction::try_create(
3130                    tx,
3131                    MessageHash::Compute,
3132                    None,
3133                    self,
3134                    self.get_reserved_account_keys(),
3135                )
3136            })
3137            .collect::<Result<Vec<_>>>()?;
3138        let tx_account_lock_limit = self.get_transaction_account_lock_limit();
3139        let lock_results = self
3140            .rc
3141            .accounts
3142            .lock_accounts(sanitized_txs.iter(), tx_account_lock_limit);
3143        Ok(TransactionBatch::new(
3144            lock_results,
3145            self,
3146            OwnedOrBorrowed::Owned(sanitized_txs),
3147        ))
3148    }
3149
3150    /// Attempt to take locks on the accounts in a transaction batch
3151    pub fn try_lock_accounts(&self, txs: &[impl SVMMessage]) -> Vec<Result<()>> {
3152        let tx_account_lock_limit = self.get_transaction_account_lock_limit();
3153        self.rc
3154            .accounts
3155            .lock_accounts(txs.iter(), tx_account_lock_limit)
3156    }
3157
3158    /// Prepare a locked transaction batch from a list of sanitized transactions.
3159    pub fn prepare_sanitized_batch<'a, 'b, Tx: SVMMessage>(
3160        &'a self,
3161        txs: &'b [Tx],
3162    ) -> TransactionBatch<'a, 'b, Tx> {
3163        TransactionBatch::new(
3164            self.try_lock_accounts(txs),
3165            self,
3166            OwnedOrBorrowed::Borrowed(txs),
3167        )
3168    }
3169
3170    /// Prepare a locked transaction batch from a list of sanitized transactions, and their cost
3171    /// limited packing status
3172    pub fn prepare_sanitized_batch_with_results<'a, 'b, Tx: SVMMessage>(
3173        &'a self,
3174        transactions: &'b [Tx],
3175        transaction_results: impl Iterator<Item = Result<()>>,
3176    ) -> TransactionBatch<'a, 'b, Tx> {
3177        // this lock_results could be: Ok, AccountInUse, WouldExceedBlockMaxLimit or WouldExceedAccountMaxLimit
3178        let tx_account_lock_limit = self.get_transaction_account_lock_limit();
3179        let lock_results = self.rc.accounts.lock_accounts_with_results(
3180            transactions.iter(),
3181            transaction_results,
3182            tx_account_lock_limit,
3183        );
3184        TransactionBatch::new(lock_results, self, OwnedOrBorrowed::Borrowed(transactions))
3185    }
3186
3187    /// Prepare a transaction batch from a single transaction without locking accounts
3188    pub fn prepare_unlocked_batch_from_single_tx<'a, Tx: SVMMessage>(
3189        &'a self,
3190        transaction: &'a Tx,
3191    ) -> TransactionBatch<'a, 'a, Tx> {
3192        let tx_account_lock_limit = self.get_transaction_account_lock_limit();
3193        let lock_result = validate_account_locks(transaction.account_keys(), tx_account_lock_limit);
3194        let mut batch = TransactionBatch::new(
3195            vec![lock_result],
3196            self,
3197            OwnedOrBorrowed::Borrowed(slice::from_ref(transaction)),
3198        );
3199        batch.set_needs_unlock(false);
3200        batch
3201    }
3202
3203    /// Run transactions against a frozen bank without committing the results
3204    pub fn simulate_transaction(
3205        &self,
3206        transaction: &impl TransactionWithMeta,
3207        enable_cpi_recording: bool,
3208    ) -> TransactionSimulationResult {
3209        assert!(self.is_frozen(), "simulation bank must be frozen");
3210
3211        self.simulate_transaction_unchecked(transaction, enable_cpi_recording)
3212    }
3213
3214    /// Run transactions against a bank without committing the results; does not check if the bank
3215    /// is frozen, enabling use in single-Bank test frameworks
3216    pub fn simulate_transaction_unchecked(
3217        &self,
3218        transaction: &impl TransactionWithMeta,
3219        enable_cpi_recording: bool,
3220    ) -> TransactionSimulationResult {
3221        let account_keys = transaction.account_keys();
3222        let number_of_accounts = account_keys.len();
3223        let account_overrides = self.get_account_overrides_for_simulation(&account_keys);
3224        let batch = self.prepare_unlocked_batch_from_single_tx(transaction);
3225        let mut timings = ExecuteTimings::default();
3226
3227        let LoadAndExecuteTransactionsOutput {
3228            mut processing_results,
3229            ..
3230        } = self.load_and_execute_transactions(
3231            &batch,
3232            // After simulation, transactions will need to be forwarded to the leader
3233            // for processing. During forwarding, the transaction could expire if the
3234            // delay is not accounted for.
3235            MAX_PROCESSING_AGE - MAX_TRANSACTION_FORWARDING_DELAY,
3236            &mut timings,
3237            &mut TransactionErrorMetrics::default(),
3238            TransactionProcessingConfig {
3239                account_overrides: Some(&account_overrides),
3240                check_program_modification_slot: self.check_program_modification_slot,
3241                compute_budget: self.compute_budget(),
3242                log_messages_bytes_limit: None,
3243                limit_to_load_programs: true,
3244                recording_config: ExecutionRecordingConfig {
3245                    enable_cpi_recording,
3246                    enable_log_recording: true,
3247                    enable_return_data_recording: true,
3248                },
3249                transaction_account_lock_limit: Some(self.get_transaction_account_lock_limit()),
3250            },
3251        );
3252
3253        let units_consumed =
3254            timings
3255                .details
3256                .per_program_timings
3257                .iter()
3258                .fold(0, |acc: u64, (_, program_timing)| {
3259                    (std::num::Saturating(acc)
3260                        + program_timing.accumulated_units
3261                        + program_timing.total_errored_units)
3262                        .0
3263                });
3264
3265        debug!("simulate_transaction: {:?}", timings);
3266
3267        let processing_result = processing_results
3268            .pop()
3269            .unwrap_or(Err(TransactionError::InvalidProgramForExecution));
3270        let (post_simulation_accounts, result, logs, return_data, inner_instructions) =
3271            match processing_result {
3272                Ok(processed_tx) => match processed_tx {
3273                    ProcessedTransaction::Executed(executed_tx) => {
3274                        let details = executed_tx.execution_details;
3275                        let post_simulation_accounts = executed_tx
3276                            .loaded_transaction
3277                            .accounts
3278                            .into_iter()
3279                            .take(number_of_accounts)
3280                            .collect::<Vec<_>>();
3281                        (
3282                            post_simulation_accounts,
3283                            details.status,
3284                            details.log_messages,
3285                            details.return_data,
3286                            details.inner_instructions,
3287                        )
3288                    }
3289                    ProcessedTransaction::FeesOnly(fees_only_tx) => {
3290                        (vec![], Err(fees_only_tx.load_error), None, None, None)
3291                    }
3292                },
3293                Err(error) => (vec![], Err(error), None, None, None),
3294            };
3295        let logs = logs.unwrap_or_default();
3296
3297        TransactionSimulationResult {
3298            result,
3299            logs,
3300            post_simulation_accounts,
3301            units_consumed,
3302            return_data,
3303            inner_instructions,
3304        }
3305    }
3306
3307    fn get_account_overrides_for_simulation(&self, account_keys: &AccountKeys) -> AccountOverrides {
3308        let mut account_overrides = AccountOverrides::default();
3309        let slot_history_id = sysvar::slot_history::id();
3310        if account_keys.iter().any(|pubkey| *pubkey == slot_history_id) {
3311            let current_account = self.get_account_with_fixed_root(&slot_history_id);
3312            let slot_history = current_account
3313                .as_ref()
3314                .map(|account| from_account::<SlotHistory, _>(account).unwrap())
3315                .unwrap_or_default();
3316            if slot_history.check(self.slot()) == Check::Found {
3317                let ancestors = Ancestors::from(self.proper_ancestors().collect::<Vec<_>>());
3318                if let Some((account, _)) =
3319                    self.load_slow_with_fixed_root(&ancestors, &slot_history_id)
3320                {
3321                    account_overrides.set_slot_history(Some(account));
3322                }
3323            }
3324        }
3325        account_overrides
3326    }
3327
3328    pub fn unlock_accounts<'a, Tx: SVMMessage + 'a>(
3329        &self,
3330        txs_and_results: impl Iterator<Item = (&'a Tx, &'a Result<()>)> + Clone,
3331    ) {
3332        self.rc.accounts.unlock_accounts(txs_and_results)
3333    }
3334
3335    pub fn remove_unrooted_slots(&self, slots: &[(Slot, BankId)]) {
3336        self.rc.accounts.accounts_db.remove_unrooted_slots(slots)
3337    }
3338
3339    pub fn get_hash_age(&self, hash: &Hash) -> Option<u64> {
3340        self.blockhash_queue.read().unwrap().get_hash_age(hash)
3341    }
3342
3343    pub fn is_hash_valid_for_age(&self, hash: &Hash, max_age: usize) -> bool {
3344        self.blockhash_queue
3345            .read()
3346            .unwrap()
3347            .is_hash_valid_for_age(hash, max_age)
3348    }
3349
3350    pub fn collect_balances(
3351        &self,
3352        batch: &TransactionBatch<impl SVMMessage>,
3353    ) -> TransactionBalances {
3354        let mut balances: TransactionBalances = vec![];
3355        for transaction in batch.sanitized_transactions() {
3356            let mut transaction_balances: Vec<u64> = vec![];
3357            for account_key in transaction.account_keys().iter() {
3358                transaction_balances.push(self.get_balance(account_key));
3359            }
3360            balances.push(transaction_balances);
3361        }
3362        balances
3363    }
3364
3365    pub fn load_and_execute_transactions(
3366        &self,
3367        batch: &TransactionBatch<impl TransactionWithMeta>,
3368        max_age: usize,
3369        timings: &mut ExecuteTimings,
3370        error_counters: &mut TransactionErrorMetrics,
3371        processing_config: TransactionProcessingConfig,
3372    ) -> LoadAndExecuteTransactionsOutput {
3373        let sanitized_txs = batch.sanitized_transactions();
3374
3375        let (check_results, check_us) = measure_us!(self.check_transactions(
3376            sanitized_txs,
3377            batch.lock_results(),
3378            max_age,
3379            error_counters,
3380        ));
3381        timings.saturating_add_in_place(ExecuteTimingType::CheckUs, check_us);
3382
3383        let (blockhash, blockhash_lamports_per_signature) =
3384            self.last_blockhash_and_lamports_per_signature();
3385        let rent_collector_with_metrics =
3386            RentCollectorWithMetrics::new(self.rent_collector.clone());
3387        let processing_environment = TransactionProcessingEnvironment {
3388            blockhash,
3389            blockhash_lamports_per_signature,
3390            epoch_total_stake: self.get_current_epoch_total_stake(),
3391            feature_set: Arc::clone(&self.feature_set),
3392            fee_lamports_per_signature: self.fee_structure.lamports_per_signature,
3393            rent_collector: Some(&rent_collector_with_metrics),
3394        };
3395
3396        let sanitized_output = self
3397            .transaction_processor
3398            .load_and_execute_sanitized_transactions(
3399                self,
3400                sanitized_txs,
3401                check_results,
3402                &processing_environment,
3403                &processing_config,
3404            );
3405
3406        // Accumulate the errors returned by the batch processor.
3407        error_counters.accumulate(&sanitized_output.error_metrics);
3408
3409        // Accumulate the transaction batch execution timings.
3410        timings.accumulate(&sanitized_output.execute_timings);
3411
3412        let ((), collect_logs_us) =
3413            measure_us!(self.collect_logs(sanitized_txs, &sanitized_output.processing_results));
3414        timings.saturating_add_in_place(ExecuteTimingType::CollectLogsUs, collect_logs_us);
3415
3416        let mut processed_counts = ProcessedTransactionCounts::default();
3417        let err_count = &mut error_counters.total;
3418
3419        for (processing_result, tx) in sanitized_output
3420            .processing_results
3421            .iter()
3422            .zip(sanitized_txs)
3423        {
3424            if let Some(debug_keys) = &self.transaction_debug_keys {
3425                for key in tx.account_keys().iter() {
3426                    if debug_keys.contains(key) {
3427                        let result = processing_result.flattened_result();
3428                        info!("slot: {} result: {:?} tx: {:?}", self.slot, result, tx);
3429                        break;
3430                    }
3431                }
3432            }
3433
3434            if processing_result.was_processed() {
3435                // Signature count must be accumulated only if the transaction
3436                // is processed, otherwise a mismatched count between banking
3437                // and replay could occur
3438                processed_counts.signature_count +=
3439                    tx.signature_details().num_transaction_signatures();
3440                processed_counts.processed_transactions_count += 1;
3441
3442                if !tx.is_simple_vote_transaction() {
3443                    processed_counts.processed_non_vote_transactions_count += 1;
3444                }
3445            }
3446
3447            match processing_result.flattened_result() {
3448                Ok(()) => {
3449                    processed_counts.processed_with_successful_result_count += 1;
3450                }
3451                Err(err) => {
3452                    if err_count.0 == 0 {
3453                        debug!("tx error: {:?} {:?}", err, tx);
3454                    }
3455                    *err_count += 1;
3456                }
3457            }
3458        }
3459
3460        LoadAndExecuteTransactionsOutput {
3461            processing_results: sanitized_output.processing_results,
3462            processed_counts,
3463        }
3464    }
3465
3466    fn collect_logs(
3467        &self,
3468        transactions: &[impl TransactionWithMeta],
3469        processing_results: &[TransactionProcessingResult],
3470    ) {
3471        let transaction_log_collector_config =
3472            self.transaction_log_collector_config.read().unwrap();
3473        if transaction_log_collector_config.filter == TransactionLogCollectorFilter::None {
3474            return;
3475        }
3476
3477        let collected_logs: Vec<_> = processing_results
3478            .iter()
3479            .zip(transactions)
3480            .filter_map(|(processing_result, transaction)| {
3481                // Skip log collection for unprocessed transactions
3482                let processed_tx = processing_result.processed_transaction()?;
3483                // Skip log collection for unexecuted transactions
3484                let execution_details = processed_tx.execution_details()?;
3485                Self::collect_transaction_logs(
3486                    &transaction_log_collector_config,
3487                    transaction,
3488                    execution_details,
3489                )
3490            })
3491            .collect();
3492
3493        if !collected_logs.is_empty() {
3494            let mut transaction_log_collector = self.transaction_log_collector.write().unwrap();
3495            for (log, filtered_mentioned_addresses) in collected_logs {
3496                let transaction_log_index = transaction_log_collector.logs.len();
3497                transaction_log_collector.logs.push(log);
3498                for key in filtered_mentioned_addresses.into_iter() {
3499                    transaction_log_collector
3500                        .mentioned_address_map
3501                        .entry(key)
3502                        .or_default()
3503                        .push(transaction_log_index);
3504                }
3505            }
3506        }
3507    }
3508
3509    fn collect_transaction_logs(
3510        transaction_log_collector_config: &TransactionLogCollectorConfig,
3511        transaction: &impl TransactionWithMeta,
3512        execution_details: &TransactionExecutionDetails,
3513    ) -> Option<(TransactionLogInfo, Vec<Pubkey>)> {
3514        // Skip log collection if no log messages were recorded
3515        let log_messages = execution_details.log_messages.as_ref()?;
3516
3517        let mut filtered_mentioned_addresses = Vec::new();
3518        if !transaction_log_collector_config
3519            .mentioned_addresses
3520            .is_empty()
3521        {
3522            for key in transaction.account_keys().iter() {
3523                if transaction_log_collector_config
3524                    .mentioned_addresses
3525                    .contains(key)
3526                {
3527                    filtered_mentioned_addresses.push(*key);
3528                }
3529            }
3530        }
3531
3532        let is_vote = transaction.is_simple_vote_transaction();
3533        let store = match transaction_log_collector_config.filter {
3534            TransactionLogCollectorFilter::All => {
3535                !is_vote || !filtered_mentioned_addresses.is_empty()
3536            }
3537            TransactionLogCollectorFilter::AllWithVotes => true,
3538            TransactionLogCollectorFilter::None => false,
3539            TransactionLogCollectorFilter::OnlyMentionedAddresses => {
3540                !filtered_mentioned_addresses.is_empty()
3541            }
3542        };
3543
3544        if store {
3545            Some((
3546                TransactionLogInfo {
3547                    signature: *transaction.signature(),
3548                    result: execution_details.status.clone(),
3549                    is_vote,
3550                    log_messages: log_messages.clone(),
3551                },
3552                filtered_mentioned_addresses,
3553            ))
3554        } else {
3555            None
3556        }
3557    }
3558
3559    /// Load the accounts data size, in bytes
3560    pub fn load_accounts_data_size(&self) -> u64 {
3561        self.accounts_data_size_initial
3562            .saturating_add_signed(self.load_accounts_data_size_delta())
3563    }
3564
3565    /// Load the change in accounts data size in this Bank, in bytes
3566    pub fn load_accounts_data_size_delta(&self) -> i64 {
3567        let delta_on_chain = self.load_accounts_data_size_delta_on_chain();
3568        let delta_off_chain = self.load_accounts_data_size_delta_off_chain();
3569        delta_on_chain.saturating_add(delta_off_chain)
3570    }
3571
3572    /// Load the change in accounts data size in this Bank, in bytes, from on-chain events
3573    /// i.e. transactions
3574    pub fn load_accounts_data_size_delta_on_chain(&self) -> i64 {
3575        self.accounts_data_size_delta_on_chain.load(Acquire)
3576    }
3577
3578    /// Load the change in accounts data size in this Bank, in bytes, from off-chain events
3579    /// i.e. rent collection
3580    pub fn load_accounts_data_size_delta_off_chain(&self) -> i64 {
3581        self.accounts_data_size_delta_off_chain.load(Acquire)
3582    }
3583
3584    /// Update the accounts data size delta from on-chain events by adding `amount`.
3585    /// The arithmetic saturates.
3586    fn update_accounts_data_size_delta_on_chain(&self, amount: i64) {
3587        if amount == 0 {
3588            return;
3589        }
3590
3591        self.accounts_data_size_delta_on_chain
3592            .fetch_update(AcqRel, Acquire, |accounts_data_size_delta_on_chain| {
3593                Some(accounts_data_size_delta_on_chain.saturating_add(amount))
3594            })
3595            // SAFETY: unwrap() is safe since our update fn always returns `Some`
3596            .unwrap();
3597    }
3598
3599    /// Update the accounts data size delta from off-chain events by adding `amount`.
3600    /// The arithmetic saturates.
3601    fn update_accounts_data_size_delta_off_chain(&self, amount: i64) {
3602        if amount == 0 {
3603            return;
3604        }
3605
3606        self.accounts_data_size_delta_off_chain
3607            .fetch_update(AcqRel, Acquire, |accounts_data_size_delta_off_chain| {
3608                Some(accounts_data_size_delta_off_chain.saturating_add(amount))
3609            })
3610            // SAFETY: unwrap() is safe since our update fn always returns `Some`
3611            .unwrap();
3612    }
3613
3614    /// Calculate the data size delta and update the off-chain accounts data size delta
3615    fn calculate_and_update_accounts_data_size_delta_off_chain(
3616        &self,
3617        old_data_size: usize,
3618        new_data_size: usize,
3619    ) {
3620        let data_size_delta = calculate_data_size_delta(old_data_size, new_data_size);
3621        self.update_accounts_data_size_delta_off_chain(data_size_delta);
3622    }
3623
3624    fn filter_program_errors_and_collect_fee(
3625        &self,
3626        processing_results: &[TransactionProcessingResult],
3627    ) {
3628        let mut fees = 0;
3629
3630        processing_results.iter().for_each(|processing_result| {
3631            if let Ok(processed_tx) = processing_result {
3632                fees += processed_tx.fee_details().total_fee();
3633            }
3634        });
3635
3636        self.collector_fees.fetch_add(fees, Relaxed);
3637    }
3638
3639    // Note: this function is not yet used; next PR will call it behind a feature gate
3640    fn filter_program_errors_and_collect_fee_details(
3641        &self,
3642        processing_results: &[TransactionProcessingResult],
3643    ) {
3644        let mut accumulated_fee_details = FeeDetails::default();
3645
3646        processing_results.iter().for_each(|processing_result| {
3647            if let Ok(processed_tx) = processing_result {
3648                accumulated_fee_details.accumulate(&processed_tx.fee_details());
3649            }
3650        });
3651
3652        self.collector_fee_details
3653            .write()
3654            .unwrap()
3655            .accumulate(&accumulated_fee_details);
3656    }
3657
3658    fn update_bank_hash_stats<'a>(&self, accounts: &impl StorableAccounts<'a>) {
3659        let mut stats = BankHashStats::default();
3660        (0..accounts.len()).for_each(|i| {
3661            accounts.account(i, |account| {
3662                stats.update(&account);
3663            })
3664        });
3665        self.bank_hash_stats.accumulate(&stats);
3666    }
3667
3668    pub fn commit_transactions(
3669        &self,
3670        sanitized_txs: &[impl TransactionWithMeta],
3671        processing_results: Vec<TransactionProcessingResult>,
3672        processed_counts: &ProcessedTransactionCounts,
3673        timings: &mut ExecuteTimings,
3674    ) -> Vec<TransactionCommitResult> {
3675        assert!(
3676            !self.freeze_started(),
3677            "commit_transactions() working on a bank that is already frozen or is undergoing freezing!"
3678        );
3679
3680        let ProcessedTransactionCounts {
3681            processed_transactions_count,
3682            processed_non_vote_transactions_count,
3683            processed_with_successful_result_count,
3684            signature_count,
3685        } = *processed_counts;
3686
3687        self.increment_transaction_count(processed_transactions_count);
3688        self.increment_non_vote_transaction_count_since_restart(
3689            processed_non_vote_transactions_count,
3690        );
3691        self.increment_signature_count(signature_count);
3692
3693        let processed_with_failure_result_count =
3694            processed_transactions_count.saturating_sub(processed_with_successful_result_count);
3695        self.transaction_error_count
3696            .fetch_add(processed_with_failure_result_count, Relaxed);
3697
3698        if processed_transactions_count > 0 {
3699            self.is_delta.store(true, Relaxed);
3700            self.transaction_entries_count.fetch_add(1, Relaxed);
3701            self.transactions_per_entry_max
3702                .fetch_max(processed_transactions_count, Relaxed);
3703        }
3704
3705        let ((), store_accounts_us) = measure_us!({
3706            // If geyser is present, we must collect `SanitizedTransaction`
3707            // references in order to comply with that interface - until it
3708            // is changed.
3709            let maybe_transaction_refs = self
3710                .accounts()
3711                .accounts_db
3712                .has_accounts_update_notifier()
3713                .then(|| {
3714                    sanitized_txs
3715                        .iter()
3716                        .map(|tx| tx.as_sanitized_transaction())
3717                        .collect::<Vec<_>>()
3718                });
3719
3720            let (accounts_to_store, transactions) = collect_accounts_to_store(
3721                sanitized_txs,
3722                &maybe_transaction_refs,
3723                &processing_results,
3724            );
3725
3726            let to_store = (self.slot(), accounts_to_store.as_slice());
3727            self.update_bank_hash_stats(&to_store);
3728            self.rc
3729                .accounts
3730                .store_cached(to_store, transactions.as_deref());
3731        });
3732
3733        self.collect_rent(&processing_results);
3734
3735        // Cached vote and stake accounts are synchronized with accounts-db
3736        // after each transaction.
3737        let ((), update_stakes_cache_us) =
3738            measure_us!(self.update_stakes_cache(sanitized_txs, &processing_results));
3739
3740        let ((), update_executors_us) = measure_us!({
3741            let mut cache = None;
3742            for processing_result in &processing_results {
3743                if let Some(ProcessedTransaction::Executed(executed_tx)) =
3744                    processing_result.processed_transaction()
3745                {
3746                    let programs_modified_by_tx = &executed_tx.programs_modified_by_tx;
3747                    if executed_tx.was_successful() && !programs_modified_by_tx.is_empty() {
3748                        cache
3749                            .get_or_insert_with(|| {
3750                                self.transaction_processor.program_cache.write().unwrap()
3751                            })
3752                            .merge(programs_modified_by_tx);
3753                    }
3754                }
3755            }
3756        });
3757
3758        let accounts_data_len_delta = processing_results
3759            .iter()
3760            .filter_map(|processing_result| processing_result.processed_transaction())
3761            .filter_map(|processed_tx| processed_tx.execution_details())
3762            .filter_map(|details| {
3763                details
3764                    .status
3765                    .is_ok()
3766                    .then_some(details.accounts_data_len_delta)
3767            })
3768            .sum();
3769        self.update_accounts_data_size_delta_on_chain(accounts_data_len_delta);
3770
3771        let ((), update_transaction_statuses_us) =
3772            measure_us!(self.update_transaction_statuses(sanitized_txs, &processing_results));
3773
3774        if self.feature_set.is_active(&reward_full_priority_fee::id()) {
3775            self.filter_program_errors_and_collect_fee_details(&processing_results)
3776        } else {
3777            self.filter_program_errors_and_collect_fee(&processing_results)
3778        };
3779
3780        timings.saturating_add_in_place(ExecuteTimingType::StoreUs, store_accounts_us);
3781        timings.saturating_add_in_place(
3782            ExecuteTimingType::UpdateStakesCacheUs,
3783            update_stakes_cache_us,
3784        );
3785        timings.saturating_add_in_place(ExecuteTimingType::UpdateExecutorsUs, update_executors_us);
3786        timings.saturating_add_in_place(
3787            ExecuteTimingType::UpdateTransactionStatuses,
3788            update_transaction_statuses_us,
3789        );
3790
3791        Self::create_commit_results(processing_results)
3792    }
3793
3794    fn create_commit_results(
3795        processing_results: Vec<TransactionProcessingResult>,
3796    ) -> Vec<TransactionCommitResult> {
3797        processing_results
3798            .into_iter()
3799            .map(|processing_result| {
3800                let processing_result = processing_result?;
3801                let executed_units = processing_result.executed_units();
3802                let loaded_accounts_data_size = processing_result.loaded_accounts_data_size();
3803
3804                match processing_result {
3805                    ProcessedTransaction::Executed(executed_tx) => {
3806                        let execution_details = executed_tx.execution_details;
3807                        let LoadedTransaction {
3808                            rent_debits,
3809                            accounts: loaded_accounts,
3810                            fee_details,
3811                            ..
3812                        } = executed_tx.loaded_transaction;
3813
3814                        // Rent is only collected for successfully executed transactions
3815                        let rent_debits = if execution_details.was_successful() {
3816                            rent_debits
3817                        } else {
3818                            RentDebits::default()
3819                        };
3820
3821                        Ok(CommittedTransaction {
3822                            status: execution_details.status,
3823                            log_messages: execution_details.log_messages,
3824                            inner_instructions: execution_details.inner_instructions,
3825                            return_data: execution_details.return_data,
3826                            executed_units,
3827                            fee_details,
3828                            rent_debits,
3829                            loaded_account_stats: TransactionLoadedAccountsStats {
3830                                loaded_accounts_count: loaded_accounts.len(),
3831                                loaded_accounts_data_size,
3832                            },
3833                        })
3834                    }
3835                    ProcessedTransaction::FeesOnly(fees_only_tx) => Ok(CommittedTransaction {
3836                        status: Err(fees_only_tx.load_error),
3837                        log_messages: None,
3838                        inner_instructions: None,
3839                        return_data: None,
3840                        executed_units,
3841                        rent_debits: RentDebits::default(),
3842                        fee_details: fees_only_tx.fee_details,
3843                        loaded_account_stats: TransactionLoadedAccountsStats {
3844                            loaded_accounts_count: fees_only_tx.rollback_accounts.count(),
3845                            loaded_accounts_data_size,
3846                        },
3847                    }),
3848                }
3849            })
3850            .collect()
3851    }
3852
3853    fn collect_rent(&self, processing_results: &[TransactionProcessingResult]) {
3854        let collected_rent = processing_results
3855            .iter()
3856            .filter_map(|processing_result| processing_result.processed_transaction())
3857            .filter_map(|processed_tx| processed_tx.executed_transaction())
3858            .filter(|executed_tx| executed_tx.was_successful())
3859            .map(|executed_tx| executed_tx.loaded_transaction.rent)
3860            .sum();
3861        self.collected_rent.fetch_add(collected_rent, Relaxed);
3862    }
3863
3864    fn run_incinerator(&self) {
3865        if let Some((account, _)) =
3866            self.get_account_modified_since_parent_with_fixed_root(&incinerator::id())
3867        {
3868            self.capitalization.fetch_sub(account.lamports(), Relaxed);
3869            self.store_account(&incinerator::id(), &AccountSharedData::default());
3870        }
3871    }
3872
3873    /// Get stake and stake node accounts
3874    pub(crate) fn get_stake_accounts(&self, minimized_account_set: &DashSet<Pubkey>) {
3875        self.stakes_cache
3876            .stakes()
3877            .stake_delegations()
3878            .iter()
3879            .for_each(|(pubkey, _)| {
3880                minimized_account_set.insert(*pubkey);
3881            });
3882
3883        self.stakes_cache
3884            .stakes()
3885            .staked_nodes()
3886            .par_iter()
3887            .for_each(|(pubkey, _)| {
3888                minimized_account_set.insert(*pubkey);
3889            });
3890    }
3891
3892    /// After deserialize, populate skipped rewrites with accounts that would normally
3893    /// have had their data rewritten in this slot due to rent collection (but didn't).
3894    ///
3895    /// This is required when starting up from a snapshot to verify the bank hash.
3896    ///
3897    /// A second usage is from the `bank_to_xxx_snapshot_archive()` functions.  These fns call
3898    /// `Bank::rehash()` to handle if the user manually modified any accounts and thus requires
3899    /// calculating the bank hash again.  Since calculating the bank hash *takes* the skipped
3900    /// rewrites, this second time will not have any skipped rewrites, and thus the hash would be
3901    /// updated to the wrong value.  So, rebuild the skipped rewrites before rehashing.
3902    fn rebuild_skipped_rewrites(&self) {
3903        // If the feature gate to *not* add rent collection rewrites to the bank hash is enabled,
3904        // then do *not* add anything to our skipped_rewrites.
3905        if self.bank_hash_skips_rent_rewrites() {
3906            return;
3907        }
3908
3909        let (skipped_rewrites, measure_skipped_rewrites) =
3910            measure_time!(self.calculate_skipped_rewrites());
3911        info!(
3912            "Rebuilding skipped rewrites of {} accounts{measure_skipped_rewrites}",
3913            skipped_rewrites.len()
3914        );
3915
3916        *self.skipped_rewrites.lock().unwrap() = skipped_rewrites;
3917    }
3918
3919    /// Calculates (and returns) skipped rewrites for this bank
3920    ///
3921    /// Refer to `rebuild_skipped_rewrites()` for more documentation.
3922    /// This implementation is purposely separate to facilitate testing.
3923    ///
3924    /// The key observation is that accounts in Bank::skipped_rewrites are only used IFF the
3925    /// specific account is *not* already in the accounts delta hash.  If an account is not in
3926    /// the accounts delta hash, then it means the account was not modified.  Since (basically)
3927    /// all accounts are rent exempt, this means (basically) all accounts are unmodified by rent
3928    /// collection.  So we just need to load the accounts that would've been checked for rent
3929    /// collection, hash them, and add them to Bank::skipped_rewrites.
3930    ///
3931    /// As of this writing, there are ~350 million acounts on mainnet-beta.
3932    /// Rent collection almost always collects a single slot at a time.
3933    /// So 1 slot of 432,000, of 350 million accounts, is ~800 accounts per slot.
3934    /// Since we haven't started processing anything yet, it should be fast enough to simply
3935    /// load the accounts directly.
3936    /// Empirically, this takes about 3-4 milliseconds.
3937    fn calculate_skipped_rewrites(&self) -> HashMap<Pubkey, AccountHash> {
3938        // The returned skipped rewrites may include accounts that were actually *not* skipped!
3939        // (This is safe, as per the fn's documentation above.)
3940        self.get_accounts_for_skipped_rewrites()
3941            .map(|(pubkey, account_hash, _account)| (pubkey, account_hash))
3942            .collect()
3943    }
3944
3945    /// Loads accounts that were selected for rent collection this slot.
3946    /// After loading the accounts, also calculate and return the account hashes.
3947    /// This is used when dealing with skipped rewrites.
3948    fn get_accounts_for_skipped_rewrites(
3949        &self,
3950    ) -> impl Iterator<Item = (Pubkey, AccountHash, AccountSharedData)> + '_ {
3951        self.rent_collection_partitions()
3952            .into_iter()
3953            .map(accounts_partition::pubkey_range_from_partition)
3954            .flat_map(|pubkey_range| {
3955                self.rc
3956                    .accounts
3957                    .load_to_collect_rent_eagerly(&self.ancestors, pubkey_range)
3958            })
3959            .map(|(pubkey, account, _slot)| {
3960                let account_hash = AccountsDb::hash_account(&account, &pubkey);
3961                (pubkey, account_hash, account)
3962            })
3963    }
3964
3965    /// Returns the accounts, sorted by pubkey, that were part of accounts delta hash calculation
3966    /// This is used when writing a bank hash details file.
3967    pub(crate) fn get_accounts_for_bank_hash_details(&self) -> Vec<PubkeyHashAccount> {
3968        let accounts_db = &self.rc.accounts.accounts_db;
3969
3970        let mut accounts_written_this_slot =
3971            accounts_db.get_pubkey_hash_account_for_slot(self.slot());
3972
3973        // If we are skipping rewrites but also include them in the accounts delta hash, then we
3974        // need to go load those accounts and add them to the list of accounts written this slot.
3975        if !self.bank_hash_skips_rent_rewrites()
3976            && accounts_db.test_skip_rewrites_but_include_in_bank_hash
3977        {
3978            let pubkeys_written_this_slot: HashSet<_> = accounts_written_this_slot
3979                .iter()
3980                .map(|pubkey_hash_account| pubkey_hash_account.pubkey)
3981                .collect();
3982
3983            let rent_collection_accounts = self.get_accounts_for_skipped_rewrites();
3984            for (pubkey, hash, account) in rent_collection_accounts {
3985                if !pubkeys_written_this_slot.contains(&pubkey) {
3986                    accounts_written_this_slot.push(PubkeyHashAccount {
3987                        pubkey,
3988                        hash,
3989                        account,
3990                    });
3991                }
3992            }
3993        }
3994
3995        // Sort the accounts by pubkey to match the order of the accounts delta hash.
3996        // This also makes comparison of files from different nodes deterministic.
3997        accounts_written_this_slot.sort_unstable_by_key(|account| account.pubkey);
3998        accounts_written_this_slot
3999    }
4000
4001    fn collect_rent_eagerly(&self) {
4002        if self.lazy_rent_collection.load(Relaxed) {
4003            return;
4004        }
4005
4006        if self
4007            .feature_set
4008            .is_active(&feature_set::disable_partitioned_rent_collection::id())
4009        {
4010            return;
4011        }
4012
4013        let mut measure = Measure::start("collect_rent_eagerly-ms");
4014        let partitions = self.rent_collection_partitions();
4015        let count = partitions.len();
4016        let rent_metrics = RentMetrics::default();
4017        // partitions will usually be 1, but could be more if we skip slots
4018        let mut parallel = count > 1;
4019        if parallel {
4020            let ranges = partitions
4021                .iter()
4022                .map(|partition| {
4023                    (
4024                        *partition,
4025                        accounts_partition::pubkey_range_from_partition(*partition),
4026                    )
4027                })
4028                .collect::<Vec<_>>();
4029            // test every range to make sure ranges are not overlapping
4030            // some tests collect rent from overlapping ranges
4031            // example: [(0, 31, 32), (0, 0, 128), (0, 27, 128)]
4032            // read-modify-write of an account for rent collection cannot be done in parallel
4033            'outer: for i in 0..ranges.len() {
4034                for j in 0..ranges.len() {
4035                    if i == j {
4036                        continue;
4037                    }
4038
4039                    let i = &ranges[i].1;
4040                    let j = &ranges[j].1;
4041                    // make sure i doesn't contain j
4042                    if i.contains(j.start()) || i.contains(j.end()) {
4043                        parallel = false;
4044                        break 'outer;
4045                    }
4046                }
4047            }
4048
4049            if parallel {
4050                let thread_pool = &self.rc.accounts.accounts_db.thread_pool;
4051                thread_pool.install(|| {
4052                    ranges.into_par_iter().for_each(|range| {
4053                        self.collect_rent_in_range(range.0, range.1, &rent_metrics)
4054                    });
4055                });
4056            }
4057        }
4058        if !parallel {
4059            // collect serially
4060            partitions
4061                .into_iter()
4062                .for_each(|partition| self.collect_rent_in_partition(partition, &rent_metrics));
4063        }
4064        measure.stop();
4065        datapoint_info!(
4066            "collect_rent_eagerly",
4067            ("accounts", rent_metrics.count.load(Relaxed), i64),
4068            ("partitions", count, i64),
4069            ("total_time_us", measure.as_us(), i64),
4070            (
4071                "hold_range_us",
4072                rent_metrics.hold_range_us.load(Relaxed),
4073                i64
4074            ),
4075            ("load_us", rent_metrics.load_us.load(Relaxed), i64),
4076            ("collect_us", rent_metrics.collect_us.load(Relaxed), i64),
4077            ("hash_us", rent_metrics.hash_us.load(Relaxed), i64),
4078            ("store_us", rent_metrics.store_us.load(Relaxed), i64),
4079        );
4080    }
4081
4082    fn rent_collection_partitions(&self) -> Vec<Partition> {
4083        if !self.use_fixed_collection_cycle() {
4084            // This mode is for production/development/testing.
4085            // In this mode, we iterate over the whole pubkey value range for each epochs
4086            // including warm-up epochs.
4087            // The only exception is the situation where normal epochs are relatively short
4088            // (currently less than 2 day). In that case, we arrange a single collection
4089            // cycle to be multiple of epochs so that a cycle could be greater than the 2 day.
4090            self.variable_cycle_partitions()
4091        } else {
4092            // This mode is mainly for benchmarking only.
4093            // In this mode, we always iterate over the whole pubkey value range with
4094            // <slot_count_in_two_day> slots as a collection cycle, regardless warm-up or
4095            // alignment between collection cycles and epochs.
4096            // Thus, we can simulate stable processing load of eager rent collection,
4097            // strictly proportional to the number of pubkeys since genesis.
4098            self.fixed_cycle_partitions()
4099        }
4100    }
4101
4102    /// true if rent collection does NOT rewrite accounts whose pubkey indicates
4103    ///  it is time for rent collection, but the account is rent exempt.
4104    /// false if rent collection DOES rewrite accounts if the account is rent exempt
4105    /// This is the default behavior historically.
4106    fn bank_hash_skips_rent_rewrites(&self) -> bool {
4107        self.feature_set
4108            .is_active(&feature_set::skip_rent_rewrites::id())
4109    }
4110
4111    /// true if rent fees should be collected (i.e. disable_rent_fees_collection is NOT enabled)
4112    fn should_collect_rent(&self) -> bool {
4113        !self
4114            .feature_set
4115            .is_active(&feature_set::disable_rent_fees_collection::id())
4116    }
4117
4118    /// Collect rent from `accounts`
4119    ///
4120    /// This fn is called inside a parallel loop from `collect_rent_in_partition()`.  Avoid adding
4121    /// any code that causes contention on shared memory/data (i.e. do not update atomic metrics).
4122    ///
4123    /// The return value is a struct of computed values that `collect_rent_in_partition()` will
4124    /// reduce at the end of its parallel loop.  If possible, place data/computation that cause
4125    /// contention/take locks in the return struct and process them in
4126    /// `collect_rent_from_partition()` after reducing the parallel loop.
4127    fn collect_rent_from_accounts(
4128        &self,
4129        mut accounts: Vec<(Pubkey, AccountSharedData, Slot)>,
4130        rent_paying_pubkeys: Option<&HashSet<Pubkey>>,
4131        partition_index: PartitionIndex,
4132    ) -> CollectRentFromAccountsInfo {
4133        let mut rent_debits = RentDebits::default();
4134        let mut total_rent_collected_info = CollectedInfo::default();
4135        let mut accounts_to_store =
4136            Vec::<(&Pubkey, &AccountSharedData)>::with_capacity(accounts.len());
4137        let mut time_collecting_rent_us = 0;
4138        let mut time_storing_accounts_us = 0;
4139        let can_skip_rewrites = self.bank_hash_skips_rent_rewrites();
4140        let test_skip_rewrites_but_include_in_bank_hash = self
4141            .rc
4142            .accounts
4143            .accounts_db
4144            .test_skip_rewrites_but_include_in_bank_hash;
4145        let mut skipped_rewrites = Vec::default();
4146        for (pubkey, account, _loaded_slot) in accounts.iter_mut() {
4147            let rent_epoch_pre = account.rent_epoch();
4148            let (rent_collected_info, collect_rent_us) = measure_us!(collect_rent_from_account(
4149                &self.feature_set,
4150                &self.rent_collector,
4151                pubkey,
4152                account
4153            ));
4154            time_collecting_rent_us += collect_rent_us;
4155            let rent_epoch_post = account.rent_epoch();
4156
4157            // did the account change in any way due to rent collection?
4158            let rent_epoch_changed = rent_epoch_post != rent_epoch_pre;
4159            let account_changed = rent_collected_info.rent_amount != 0 || rent_epoch_changed;
4160
4161            // always store the account, regardless if it changed or not
4162            let always_store_accounts =
4163                !can_skip_rewrites && !test_skip_rewrites_but_include_in_bank_hash;
4164
4165            // only store accounts where we collected rent
4166            // but get the hash for all these accounts even if collected rent is 0 (= not updated).
4167            // Also, there's another subtle side-effect from rewrites: this
4168            // ensures we verify the whole on-chain state (= all accounts)
4169            // via the bank delta hash slowly once per an epoch.
4170            if account_changed || always_store_accounts {
4171                if rent_collected_info.rent_amount > 0 {
4172                    if let Some(rent_paying_pubkeys) = rent_paying_pubkeys {
4173                        if !rent_paying_pubkeys.contains(pubkey) {
4174                            let partition_from_pubkey = accounts_partition::partition_from_pubkey(
4175                                pubkey,
4176                                self.epoch_schedule.slots_per_epoch,
4177                            );
4178                            // Submit datapoint instead of assert while we verify this is correct
4179                            datapoint_warn!(
4180                                "bank-unexpected_rent_paying_pubkey",
4181                                ("slot", self.slot(), i64),
4182                                ("pubkey", pubkey.to_string(), String),
4183                                ("partition_index", partition_index, i64),
4184                                ("partition_from_pubkey", partition_from_pubkey, i64)
4185                            );
4186                            warn!(
4187                                "Collecting rent from unexpected pubkey: {}, slot: {}, parent_slot: {:?}, \
4188                                partition_index: {}, partition_from_pubkey: {}",
4189                                pubkey,
4190                                self.slot(),
4191                                self.parent().map(|bank| bank.slot()),
4192                                partition_index,
4193                                partition_from_pubkey,
4194                            );
4195                        }
4196                    }
4197                } else {
4198                    debug_assert_eq!(rent_collected_info.rent_amount, 0);
4199                    if rent_epoch_changed {
4200                        datapoint_info!(
4201                            "bank-rent_collection_updated_only_rent_epoch",
4202                            ("slot", self.slot(), i64),
4203                            ("pubkey", pubkey.to_string(), String),
4204                            ("rent_epoch_pre", rent_epoch_pre, i64),
4205                            ("rent_epoch_post", rent_epoch_post, i64),
4206                        );
4207                    }
4208                }
4209                total_rent_collected_info += rent_collected_info;
4210                accounts_to_store.push((pubkey, account));
4211            } else if !account_changed
4212                && !can_skip_rewrites
4213                && test_skip_rewrites_but_include_in_bank_hash
4214            {
4215                // include rewrites that we skipped in the accounts delta hash.
4216                // This is what consensus requires prior to activation of bank_hash_skips_rent_rewrites.
4217                // This code path exists to allow us to test the long term effects on validators when the skipped rewrites
4218                // feature is enabled.
4219                let hash = AccountsDb::hash_account(account, pubkey);
4220                skipped_rewrites.push((*pubkey, hash));
4221            }
4222            rent_debits.insert(pubkey, rent_collected_info.rent_amount, account.lamports());
4223        }
4224
4225        if !accounts_to_store.is_empty() {
4226            // TODO: Maybe do not call `store_accounts()` here.  Instead return `accounts_to_store`
4227            // and have `collect_rent_in_partition()` perform all the stores.
4228            let (_, store_accounts_us) =
4229                measure_us!(self.store_accounts((self.slot(), &accounts_to_store[..])));
4230            time_storing_accounts_us += store_accounts_us;
4231        }
4232
4233        CollectRentFromAccountsInfo {
4234            skipped_rewrites,
4235            rent_collected_info: total_rent_collected_info,
4236            rent_rewards: rent_debits.into_unordered_rewards_iter().collect(),
4237            time_collecting_rent_us,
4238            time_storing_accounts_us,
4239            num_accounts: accounts.len(),
4240        }
4241    }
4242
4243    /// convert 'partition' to a pubkey range and 'collect_rent_in_range'
4244    fn collect_rent_in_partition(&self, partition: Partition, metrics: &RentMetrics) {
4245        let subrange_full = accounts_partition::pubkey_range_from_partition(partition);
4246        self.collect_rent_in_range(partition, subrange_full, metrics)
4247    }
4248
4249    /// get all pubkeys that we expect to be rent-paying or None, if this was not initialized at load time (that should only exist in test cases)
4250    fn get_rent_paying_pubkeys(&self, partition: &Partition) -> Option<HashSet<Pubkey>> {
4251        self.rc
4252            .accounts
4253            .accounts_db
4254            .accounts_index
4255            .rent_paying_accounts_by_partition
4256            .get()
4257            .and_then(|rent_paying_accounts| {
4258                rent_paying_accounts.is_initialized().then(|| {
4259                    accounts_partition::get_partition_end_indexes(partition)
4260                        .into_iter()
4261                        .flat_map(|end_index| {
4262                            rent_paying_accounts.get_pubkeys_in_partition_index(end_index)
4263                        })
4264                        .cloned()
4265                        .collect::<HashSet<_>>()
4266                })
4267            })
4268    }
4269
4270    /// load accounts with pubkeys in 'subrange_full'
4271    /// collect rent and update 'account.rent_epoch' as necessary
4272    /// store accounts, whether rent was collected or not (depending on whether we skipping rewrites is enabled)
4273    /// update bank's rewrites set for all rewrites that were skipped
4274    fn collect_rent_in_range(
4275        &self,
4276        partition: Partition,
4277        subrange_full: RangeInclusive<Pubkey>,
4278        metrics: &RentMetrics,
4279    ) {
4280        let mut hold_range = Measure::start("hold_range");
4281        let thread_pool = &self.rc.accounts.accounts_db.thread_pool;
4282        thread_pool.install(|| {
4283            self.rc
4284                .accounts
4285                .hold_range_in_memory(&subrange_full, true, thread_pool);
4286            hold_range.stop();
4287            metrics.hold_range_us.fetch_add(hold_range.as_us(), Relaxed);
4288
4289            let rent_paying_pubkeys_ = self.get_rent_paying_pubkeys(&partition);
4290            let rent_paying_pubkeys = rent_paying_pubkeys_.as_ref();
4291
4292            // divide the range into num_threads smaller ranges and process in parallel
4293            // Note that 'pubkey_range_from_partition' cannot easily be re-used here to break the range smaller.
4294            // It has special handling of 0..0 and partition_count changes affect all ranges unevenly.
4295            let num_threads = clone_solana_accounts_db::accounts_db::quarter_thread_count() as u64;
4296            let sz = std::mem::size_of::<u64>();
4297            let start_prefix = accounts_partition::prefix_from_pubkey(subrange_full.start());
4298            let end_prefix_inclusive = accounts_partition::prefix_from_pubkey(subrange_full.end());
4299            let range = end_prefix_inclusive - start_prefix;
4300            let increment = range / num_threads;
4301            let mut results = (0..num_threads)
4302                .into_par_iter()
4303                .map(|chunk| {
4304                    let offset = |chunk| start_prefix + chunk * increment;
4305                    let start = offset(chunk);
4306                    let last = chunk == num_threads - 1;
4307                    let merge_prefix = |prefix: u64, mut bound: Pubkey| {
4308                        bound.as_mut()[0..sz].copy_from_slice(&prefix.to_be_bytes());
4309                        bound
4310                    };
4311                    let start = merge_prefix(start, *subrange_full.start());
4312                    let (accounts, measure_load_accounts) = measure_time!(if last {
4313                        let end = *subrange_full.end();
4314                        let subrange = start..=end; // IN-clusive
4315                        self.rc
4316                            .accounts
4317                            .load_to_collect_rent_eagerly(&self.ancestors, subrange)
4318                    } else {
4319                        let end = merge_prefix(offset(chunk + 1), *subrange_full.start());
4320                        let subrange = start..end; // EX-clusive, the next 'start' will be this same value
4321                        self.rc
4322                            .accounts
4323                            .load_to_collect_rent_eagerly(&self.ancestors, subrange)
4324                    });
4325                    CollectRentInPartitionInfo::new(
4326                        self.collect_rent_from_accounts(accounts, rent_paying_pubkeys, partition.1),
4327                        Duration::from_nanos(measure_load_accounts.as_ns()),
4328                    )
4329                })
4330                .reduce(
4331                    CollectRentInPartitionInfo::default,
4332                    CollectRentInPartitionInfo::reduce,
4333                );
4334
4335            self.skipped_rewrites
4336                .lock()
4337                .unwrap()
4338                .extend(results.skipped_rewrites);
4339
4340            // We cannot assert here that we collected from all expected keys.
4341            // Some accounts may have been topped off or may have had all funds removed and gone to 0 lamports.
4342
4343            self.rc
4344                .accounts
4345                .hold_range_in_memory(&subrange_full, false, thread_pool);
4346
4347            self.collected_rent
4348                .fetch_add(results.rent_collected, Relaxed);
4349            self.update_accounts_data_size_delta_off_chain(
4350                -(results.accounts_data_size_reclaimed as i64),
4351            );
4352            self.rewards
4353                .write()
4354                .unwrap()
4355                .append(&mut results.rent_rewards);
4356
4357            metrics
4358                .load_us
4359                .fetch_add(results.time_loading_accounts_us, Relaxed);
4360            metrics
4361                .collect_us
4362                .fetch_add(results.time_collecting_rent_us, Relaxed);
4363            metrics
4364                .store_us
4365                .fetch_add(results.time_storing_accounts_us, Relaxed);
4366            metrics.count.fetch_add(results.num_accounts, Relaxed);
4367        });
4368    }
4369
4370    pub(crate) fn fixed_cycle_partitions_between_slots(
4371        &self,
4372        starting_slot: Slot,
4373        ending_slot: Slot,
4374    ) -> Vec<Partition> {
4375        let slot_count_in_two_day = self.slot_count_in_two_day();
4376        accounts_partition::get_partitions(ending_slot, starting_slot, slot_count_in_two_day)
4377    }
4378
4379    fn fixed_cycle_partitions(&self) -> Vec<Partition> {
4380        self.fixed_cycle_partitions_between_slots(self.parent_slot(), self.slot())
4381    }
4382
4383    pub(crate) fn variable_cycle_partitions_between_slots(
4384        &self,
4385        starting_slot: Slot,
4386        ending_slot: Slot,
4387    ) -> Vec<Partition> {
4388        let (starting_epoch, mut starting_slot_index) =
4389            self.get_epoch_and_slot_index(starting_slot);
4390        let (ending_epoch, ending_slot_index) = self.get_epoch_and_slot_index(ending_slot);
4391
4392        let mut partitions = vec![];
4393        if starting_epoch < ending_epoch {
4394            let slot_skipped = (ending_slot - starting_slot) > 1;
4395            if slot_skipped {
4396                // Generate special partitions because there are skipped slots
4397                // exactly at the epoch transition.
4398
4399                let parent_last_slot_index = self.get_slots_in_epoch(starting_epoch) - 1;
4400
4401                // ... for parent epoch
4402                partitions.push(self.partition_from_slot_indexes_with_gapped_epochs(
4403                    starting_slot_index,
4404                    parent_last_slot_index,
4405                    starting_epoch,
4406                ));
4407
4408                if ending_slot_index > 0 {
4409                    // ... for current epoch
4410                    partitions.push(self.partition_from_slot_indexes_with_gapped_epochs(
4411                        0,
4412                        0,
4413                        ending_epoch,
4414                    ));
4415                }
4416            }
4417            starting_slot_index = 0;
4418        }
4419
4420        partitions.push(self.partition_from_normal_slot_indexes(
4421            starting_slot_index,
4422            ending_slot_index,
4423            ending_epoch,
4424        ));
4425
4426        partitions
4427    }
4428
4429    fn variable_cycle_partitions(&self) -> Vec<Partition> {
4430        self.variable_cycle_partitions_between_slots(self.parent_slot(), self.slot())
4431    }
4432
4433    fn do_partition_from_slot_indexes(
4434        &self,
4435        start_slot_index: SlotIndex,
4436        end_slot_index: SlotIndex,
4437        epoch: Epoch,
4438        generated_for_gapped_epochs: bool,
4439    ) -> Partition {
4440        let slot_count_per_epoch = self.get_slots_in_epoch(epoch);
4441
4442        let cycle_params = if !self.use_multi_epoch_collection_cycle(epoch) {
4443            // mnb should always go through this code path
4444            accounts_partition::rent_single_epoch_collection_cycle_params(
4445                epoch,
4446                slot_count_per_epoch,
4447            )
4448        } else {
4449            accounts_partition::rent_multi_epoch_collection_cycle_params(
4450                epoch,
4451                slot_count_per_epoch,
4452                self.first_normal_epoch(),
4453                self.slot_count_in_two_day() / slot_count_per_epoch,
4454            )
4455        };
4456        accounts_partition::get_partition_from_slot_indexes(
4457            cycle_params,
4458            start_slot_index,
4459            end_slot_index,
4460            generated_for_gapped_epochs,
4461        )
4462    }
4463
4464    fn partition_from_normal_slot_indexes(
4465        &self,
4466        start_slot_index: SlotIndex,
4467        end_slot_index: SlotIndex,
4468        epoch: Epoch,
4469    ) -> Partition {
4470        self.do_partition_from_slot_indexes(start_slot_index, end_slot_index, epoch, false)
4471    }
4472
4473    fn partition_from_slot_indexes_with_gapped_epochs(
4474        &self,
4475        start_slot_index: SlotIndex,
4476        end_slot_index: SlotIndex,
4477        epoch: Epoch,
4478    ) -> Partition {
4479        self.do_partition_from_slot_indexes(start_slot_index, end_slot_index, epoch, true)
4480    }
4481
4482    // Given short epochs, it's too costly to collect rent eagerly
4483    // within an epoch, so lower the frequency of it.
4484    // These logic isn't strictly eager anymore and should only be used
4485    // for development/performance purpose.
4486    // Absolutely not under ClusterType::MainnetBeta!!!!
4487    fn use_multi_epoch_collection_cycle(&self, epoch: Epoch) -> bool {
4488        // Force normal behavior, disabling multi epoch collection cycle for manual local testing
4489        #[cfg(not(test))]
4490        if self.slot_count_per_normal_epoch()
4491            == clone_solana_sdk::epoch_schedule::MINIMUM_SLOTS_PER_EPOCH
4492        {
4493            return false;
4494        }
4495
4496        epoch >= self.first_normal_epoch()
4497            && self.slot_count_per_normal_epoch() < self.slot_count_in_two_day()
4498    }
4499
4500    pub(crate) fn use_fixed_collection_cycle(&self) -> bool {
4501        // Force normal behavior, disabling fixed collection cycle for manual local testing
4502        #[cfg(not(test))]
4503        if self.slot_count_per_normal_epoch()
4504            == clone_solana_sdk::epoch_schedule::MINIMUM_SLOTS_PER_EPOCH
4505        {
4506            return false;
4507        }
4508
4509        self.cluster_type() != ClusterType::MainnetBeta
4510            && self.slot_count_per_normal_epoch() < self.slot_count_in_two_day()
4511    }
4512
4513    fn slot_count_in_two_day(&self) -> SlotCount {
4514        Self::slot_count_in_two_day_helper(self.ticks_per_slot)
4515    }
4516
4517    // This value is specially chosen to align with slots per epoch in mainnet-beta and testnet
4518    // Also, assume 500GB account data set as the extreme, then for 2 day (=48 hours) to collect
4519    // rent eagerly, we'll consume 5.7 MB/s IO bandwidth, bidirectionally.
4520    pub fn slot_count_in_two_day_helper(ticks_per_slot: SlotCount) -> SlotCount {
4521        2 * DEFAULT_TICKS_PER_SECOND * SECONDS_PER_DAY / ticks_per_slot
4522    }
4523
4524    fn slot_count_per_normal_epoch(&self) -> SlotCount {
4525        self.get_slots_in_epoch(self.first_normal_epoch())
4526    }
4527
4528    pub fn cluster_type(&self) -> ClusterType {
4529        // unwrap is safe; self.cluster_type is ensured to be Some() always...
4530        // we only using Option here for ABI compatibility...
4531        self.cluster_type.unwrap()
4532    }
4533
4534    /// Process a batch of transactions.
4535    #[must_use]
4536    pub fn load_execute_and_commit_transactions(
4537        &self,
4538        batch: &TransactionBatch<impl TransactionWithMeta>,
4539        max_age: usize,
4540        collect_balances: bool,
4541        recording_config: ExecutionRecordingConfig,
4542        timings: &mut ExecuteTimings,
4543        log_messages_bytes_limit: Option<usize>,
4544    ) -> (Vec<TransactionCommitResult>, TransactionBalancesSet) {
4545        self.do_load_execute_and_commit_transactions_with_pre_commit_callback(
4546            batch,
4547            max_age,
4548            collect_balances,
4549            recording_config,
4550            timings,
4551            log_messages_bytes_limit,
4552            None::<fn(&mut _, &_) -> _>,
4553        )
4554        .unwrap()
4555    }
4556
4557    pub fn load_execute_and_commit_transactions_with_pre_commit_callback<'a>(
4558        &'a self,
4559        batch: &TransactionBatch<impl TransactionWithMeta>,
4560        max_age: usize,
4561        collect_balances: bool,
4562        recording_config: ExecutionRecordingConfig,
4563        timings: &mut ExecuteTimings,
4564        log_messages_bytes_limit: Option<usize>,
4565        pre_commit_callback: impl FnOnce(
4566            &mut ExecuteTimings,
4567            &[TransactionProcessingResult],
4568        ) -> PreCommitResult<'a>,
4569    ) -> Result<(Vec<TransactionCommitResult>, TransactionBalancesSet)> {
4570        self.do_load_execute_and_commit_transactions_with_pre_commit_callback(
4571            batch,
4572            max_age,
4573            collect_balances,
4574            recording_config,
4575            timings,
4576            log_messages_bytes_limit,
4577            Some(pre_commit_callback),
4578        )
4579    }
4580
4581    fn do_load_execute_and_commit_transactions_with_pre_commit_callback<'a>(
4582        &'a self,
4583        batch: &TransactionBatch<impl TransactionWithMeta>,
4584        max_age: usize,
4585        collect_balances: bool,
4586        recording_config: ExecutionRecordingConfig,
4587        timings: &mut ExecuteTimings,
4588        log_messages_bytes_limit: Option<usize>,
4589        pre_commit_callback: Option<
4590            impl FnOnce(&mut ExecuteTimings, &[TransactionProcessingResult]) -> PreCommitResult<'a>,
4591        >,
4592    ) -> Result<(Vec<TransactionCommitResult>, TransactionBalancesSet)> {
4593        let pre_balances = if collect_balances {
4594            self.collect_balances(batch)
4595        } else {
4596            vec![]
4597        };
4598
4599        let LoadAndExecuteTransactionsOutput {
4600            processing_results,
4601            processed_counts,
4602        } = self.load_and_execute_transactions(
4603            batch,
4604            max_age,
4605            timings,
4606            &mut TransactionErrorMetrics::default(),
4607            TransactionProcessingConfig {
4608                account_overrides: None,
4609                check_program_modification_slot: self.check_program_modification_slot,
4610                compute_budget: self.compute_budget(),
4611                log_messages_bytes_limit,
4612                limit_to_load_programs: false,
4613                recording_config,
4614                transaction_account_lock_limit: Some(self.get_transaction_account_lock_limit()),
4615            },
4616        );
4617
4618        // pre_commit_callback could initiate an atomic operation (i.e. poh recording with block
4619        // producing unified scheduler). in that case, it returns Some(freeze_lock), which should
4620        // unlocked only after calling commit_transactions() immediately after calling the
4621        // callback.
4622        let freeze_lock = if let Some(pre_commit_callback) = pre_commit_callback {
4623            pre_commit_callback(timings, &processing_results)?
4624        } else {
4625            None
4626        };
4627        let commit_results = self.commit_transactions(
4628            batch.sanitized_transactions(),
4629            processing_results,
4630            &processed_counts,
4631            timings,
4632        );
4633        drop(freeze_lock);
4634        let post_balances = if collect_balances {
4635            self.collect_balances(batch)
4636        } else {
4637            vec![]
4638        };
4639        Ok((
4640            commit_results,
4641            TransactionBalancesSet::new(pre_balances, post_balances),
4642        ))
4643    }
4644
4645    /// Process a Transaction. This is used for unit tests and simply calls the vector
4646    /// Bank::process_transactions method.
4647    pub fn process_transaction(&self, tx: &Transaction) -> Result<()> {
4648        self.try_process_transactions(std::iter::once(tx))?[0].clone()?;
4649        tx.signatures
4650            .first()
4651            .map_or(Ok(()), |sig| self.get_signature_status(sig).unwrap())
4652    }
4653
4654    /// Process a Transaction and store metadata. This is used for tests and the banks services. It
4655    /// replicates the vector Bank::process_transaction method with metadata recording enabled.
4656    pub fn process_transaction_with_metadata(
4657        &self,
4658        tx: impl Into<VersionedTransaction>,
4659    ) -> Result<CommittedTransaction> {
4660        let txs = vec![tx.into()];
4661        let batch = self.prepare_entry_batch(txs)?;
4662
4663        let (mut commit_results, ..) = self.load_execute_and_commit_transactions(
4664            &batch,
4665            MAX_PROCESSING_AGE,
4666            false, // collect_balances
4667            ExecutionRecordingConfig {
4668                enable_cpi_recording: false,
4669                enable_log_recording: true,
4670                enable_return_data_recording: true,
4671            },
4672            &mut ExecuteTimings::default(),
4673            Some(1000 * 1000),
4674        );
4675
4676        commit_results.remove(0)
4677    }
4678
4679    /// Process multiple transaction in a single batch. This is used for benches and unit tests.
4680    /// Short circuits if any of the transactions do not pass sanitization checks.
4681    pub fn try_process_transactions<'a>(
4682        &self,
4683        txs: impl Iterator<Item = &'a Transaction>,
4684    ) -> Result<Vec<Result<()>>> {
4685        let txs = txs
4686            .map(|tx| VersionedTransaction::from(tx.clone()))
4687            .collect();
4688        self.try_process_entry_transactions(txs)
4689    }
4690
4691    /// Process multiple transaction in a single batch. This is used for benches and unit tests.
4692    /// Short circuits if any of the transactions do not pass sanitization checks.
4693    pub fn try_process_entry_transactions(
4694        &self,
4695        txs: Vec<VersionedTransaction>,
4696    ) -> Result<Vec<Result<()>>> {
4697        let batch = self.prepare_entry_batch(txs)?;
4698        Ok(self.process_transaction_batch(&batch))
4699    }
4700
4701    #[must_use]
4702    fn process_transaction_batch(
4703        &self,
4704        batch: &TransactionBatch<impl TransactionWithMeta>,
4705    ) -> Vec<Result<()>> {
4706        self.load_execute_and_commit_transactions(
4707            batch,
4708            MAX_PROCESSING_AGE,
4709            false,
4710            ExecutionRecordingConfig::new_single_setting(false),
4711            &mut ExecuteTimings::default(),
4712            None,
4713        )
4714        .0
4715        .into_iter()
4716        .map(|commit_result| commit_result.map(|_| ()))
4717        .collect()
4718    }
4719
4720    /// Create, sign, and process a Transaction from `keypair` to `to` of
4721    /// `n` lamports where `blockhash` is the last Entry ID observed by the client.
4722    pub fn transfer(&self, n: u64, keypair: &Keypair, to: &Pubkey) -> Result<Signature> {
4723        let blockhash = self.last_blockhash();
4724        let tx = system_transaction::transfer(keypair, to, n, blockhash);
4725        let signature = tx.signatures[0];
4726        self.process_transaction(&tx).map(|_| signature)
4727    }
4728
4729    pub fn read_balance(account: &AccountSharedData) -> u64 {
4730        account.lamports()
4731    }
4732    /// Each program would need to be able to introspect its own state
4733    /// this is hard-coded to the Budget language
4734    pub fn get_balance(&self, pubkey: &Pubkey) -> u64 {
4735        self.get_account(pubkey)
4736            .map(|x| Self::read_balance(&x))
4737            .unwrap_or(0)
4738    }
4739
4740    /// Compute all the parents of the bank in order
4741    pub fn parents(&self) -> Vec<Arc<Bank>> {
4742        let mut parents = vec![];
4743        let mut bank = self.parent();
4744        while let Some(parent) = bank {
4745            parents.push(parent.clone());
4746            bank = parent.parent();
4747        }
4748        parents
4749    }
4750
4751    /// Compute all the parents of the bank including this bank itself
4752    pub fn parents_inclusive(self: Arc<Self>) -> Vec<Arc<Bank>> {
4753        let mut parents = self.parents();
4754        parents.insert(0, self);
4755        parents
4756    }
4757
4758    /// fn store the single `account` with `pubkey`.
4759    /// Uses `store_accounts`, which works on a vector of accounts.
4760    pub fn store_account(&self, pubkey: &Pubkey, account: &AccountSharedData) {
4761        self.store_accounts((self.slot(), &[(pubkey, account)][..]))
4762    }
4763
4764    pub fn store_accounts<'a>(&self, accounts: impl StorableAccounts<'a>) {
4765        assert!(!self.freeze_started());
4766        let mut m = Measure::start("stakes_cache.check_and_store");
4767        let new_warmup_cooldown_rate_epoch = self.new_warmup_cooldown_rate_epoch();
4768
4769        (0..accounts.len()).for_each(|i| {
4770            accounts.account(i, |account| {
4771                self.stakes_cache.check_and_store(
4772                    account.pubkey(),
4773                    &account,
4774                    new_warmup_cooldown_rate_epoch,
4775                )
4776            })
4777        });
4778        self.update_bank_hash_stats(&accounts);
4779        self.rc.accounts.store_accounts_cached(accounts);
4780        m.stop();
4781        self.rc
4782            .accounts
4783            .accounts_db
4784            .stats
4785            .stakes_cache_check_and_store_us
4786            .fetch_add(m.as_us(), Relaxed);
4787    }
4788
4789    pub fn force_flush_accounts_cache(&self) {
4790        self.rc
4791            .accounts
4792            .accounts_db
4793            .flush_accounts_cache(true, Some(self.slot()))
4794    }
4795
4796    pub fn flush_accounts_cache_if_needed(&self) {
4797        self.rc
4798            .accounts
4799            .accounts_db
4800            .flush_accounts_cache(false, Some(self.slot()))
4801    }
4802
4803    /// Technically this issues (or even burns!) new lamports,
4804    /// so be extra careful for its usage
4805    fn store_account_and_update_capitalization(
4806        &self,
4807        pubkey: &Pubkey,
4808        new_account: &AccountSharedData,
4809    ) {
4810        let old_account_data_size =
4811            if let Some(old_account) = self.get_account_with_fixed_root_no_cache(pubkey) {
4812                match new_account.lamports().cmp(&old_account.lamports()) {
4813                    std::cmp::Ordering::Greater => {
4814                        let increased = new_account.lamports() - old_account.lamports();
4815                        trace!(
4816                            "store_account_and_update_capitalization: increased: {} {}",
4817                            pubkey,
4818                            increased
4819                        );
4820                        self.capitalization.fetch_add(increased, Relaxed);
4821                    }
4822                    std::cmp::Ordering::Less => {
4823                        let decreased = old_account.lamports() - new_account.lamports();
4824                        trace!(
4825                            "store_account_and_update_capitalization: decreased: {} {}",
4826                            pubkey,
4827                            decreased
4828                        );
4829                        self.capitalization.fetch_sub(decreased, Relaxed);
4830                    }
4831                    std::cmp::Ordering::Equal => {}
4832                }
4833                old_account.data().len()
4834            } else {
4835                trace!(
4836                    "store_account_and_update_capitalization: created: {} {}",
4837                    pubkey,
4838                    new_account.lamports()
4839                );
4840                self.capitalization
4841                    .fetch_add(new_account.lamports(), Relaxed);
4842                0
4843            };
4844
4845        self.store_account(pubkey, new_account);
4846        self.calculate_and_update_accounts_data_size_delta_off_chain(
4847            old_account_data_size,
4848            new_account.data().len(),
4849        );
4850    }
4851
4852    pub fn accounts(&self) -> Arc<Accounts> {
4853        self.rc.accounts.clone()
4854    }
4855
4856    fn finish_init(
4857        &mut self,
4858        genesis_config: &GenesisConfig,
4859        additional_builtins: Option<&[BuiltinPrototype]>,
4860        debug_do_not_add_builtins: bool,
4861    ) {
4862        self.rewards_pool_pubkeys =
4863            Arc::new(genesis_config.rewards_pools.keys().cloned().collect());
4864
4865        self.apply_feature_activations(
4866            ApplyFeatureActivationsCaller::FinishInit,
4867            debug_do_not_add_builtins,
4868        );
4869
4870        // Cost-Tracker is not serialized in snapshot or any configs.
4871        // We must apply previously activated features related to limits here
4872        // so that the initial bank state is consistent with the feature set.
4873        // Cost-tracker limits are propagated through children banks.
4874        if self
4875            .feature_set
4876            .is_active(&feature_set::raise_block_limits_to_50m::id())
4877        {
4878            let (account_cost_limit, block_cost_limit, vote_cost_limit) = simd_0207_block_limits();
4879            self.write_cost_tracker().unwrap().set_limits(
4880                account_cost_limit,
4881                block_cost_limit,
4882                vote_cost_limit,
4883            );
4884        }
4885
4886        if self
4887            .feature_set
4888            .is_active(&feature_set::raise_block_limits_to_60m::id())
4889        {
4890            let (account_cost_limit, block_cost_limit, vote_cost_limit) = simd_0256_block_limits();
4891            self.write_cost_tracker().unwrap().set_limits(
4892                account_cost_limit,
4893                block_cost_limit,
4894                vote_cost_limit,
4895            );
4896        }
4897
4898        // If the accounts delta hash is still in use, start the background account hasher
4899        if !self
4900            .feature_set
4901            .is_active(&feature_set::remove_accounts_delta_hash::id())
4902        {
4903            self.rc.accounts.accounts_db.start_background_hasher();
4904        }
4905
4906        if !debug_do_not_add_builtins {
4907            for builtin in BUILTINS
4908                .iter()
4909                .chain(additional_builtins.unwrap_or(&[]).iter())
4910            {
4911                // The builtin should be added if it has no enable feature ID
4912                // and it has not been migrated to Core BPF.
4913                //
4914                // If a program was previously migrated to Core BPF, accountsDB
4915                // from snapshot should contain the BPF program accounts.
4916                let builtin_is_bpf = |program_id: &Pubkey| {
4917                    self.get_account(program_id)
4918                        .map(|a| a.owner() == &bpf_loader_upgradeable::id())
4919                        .unwrap_or(false)
4920                };
4921                if builtin.enable_feature_id.is_none() && !builtin_is_bpf(&builtin.program_id) {
4922                    self.transaction_processor.add_builtin(
4923                        self,
4924                        builtin.program_id,
4925                        builtin.name,
4926                        ProgramCacheEntry::new_builtin(0, builtin.name.len(), builtin.entrypoint),
4927                    );
4928                }
4929            }
4930            for precompile in get_precompiles() {
4931                if precompile.feature.is_none() {
4932                    self.add_precompile(&precompile.program_id);
4933                }
4934            }
4935        }
4936
4937        self.transaction_processor
4938            .configure_program_runtime_environments(
4939                Some(Arc::new(
4940                    create_program_runtime_environment_v1(
4941                        &self.feature_set,
4942                        &self.compute_budget().unwrap_or_default(),
4943                        false, /* deployment */
4944                        false, /* debugging_features */
4945                    )
4946                    .unwrap(),
4947                )),
4948                Some(Arc::new(create_program_runtime_environment_v2(
4949                    &self.compute_budget().unwrap_or_default(),
4950                    false, /* debugging_features */
4951                ))),
4952            );
4953    }
4954
4955    pub fn set_inflation(&self, inflation: Inflation) {
4956        *self.inflation.write().unwrap() = inflation;
4957    }
4958
4959    /// Get a snapshot of the current set of hard forks
4960    pub fn hard_forks(&self) -> HardForks {
4961        self.hard_forks.read().unwrap().clone()
4962    }
4963
4964    pub fn register_hard_fork(&self, new_hard_fork_slot: Slot) {
4965        let bank_slot = self.slot();
4966
4967        let lock = self.freeze_lock();
4968        let bank_frozen = *lock != Hash::default();
4969        if new_hard_fork_slot < bank_slot {
4970            warn!(
4971                "Hard fork at slot {new_hard_fork_slot} ignored, the hard fork is older \
4972                than the bank at slot {bank_slot} that attempted to register it."
4973            );
4974        } else if (new_hard_fork_slot == bank_slot) && bank_frozen {
4975            warn!(
4976                "Hard fork at slot {new_hard_fork_slot} ignored, the hard fork is the same \
4977                slot as the bank at slot {bank_slot} that attempted to register it, but that \
4978                bank is already frozen."
4979            );
4980        } else {
4981            self.hard_forks
4982                .write()
4983                .unwrap()
4984                .register(new_hard_fork_slot);
4985        }
4986    }
4987
4988    pub fn get_account_with_fixed_root_no_cache(
4989        &self,
4990        pubkey: &Pubkey,
4991    ) -> Option<AccountSharedData> {
4992        self.load_account_with(pubkey, |_| false)
4993            .map(|(acc, _slot)| acc)
4994    }
4995
4996    fn load_account_with(
4997        &self,
4998        pubkey: &Pubkey,
4999        callback: impl for<'local> Fn(&'local AccountSharedData) -> bool,
5000    ) -> Option<(AccountSharedData, Slot)> {
5001        self.rc
5002            .accounts
5003            .accounts_db
5004            .load_account_with(&self.ancestors, pubkey, callback)
5005    }
5006
5007    // Hi! leaky abstraction here....
5008    // try to use get_account_with_fixed_root() if it's called ONLY from on-chain runtime account
5009    // processing. That alternative fn provides more safety.
5010    pub fn get_account(&self, pubkey: &Pubkey) -> Option<AccountSharedData> {
5011        self.get_account_modified_slot(pubkey)
5012            .map(|(acc, _slot)| acc)
5013    }
5014
5015    // Hi! leaky abstraction here....
5016    // use this over get_account() if it's called ONLY from on-chain runtime account
5017    // processing (i.e. from in-band replay/banking stage; that ensures root is *fixed* while
5018    // running).
5019    // pro: safer assertion can be enabled inside AccountsDb
5020    // con: panics!() if called from off-chain processing
5021    pub fn get_account_with_fixed_root(&self, pubkey: &Pubkey) -> Option<AccountSharedData> {
5022        self.get_account_modified_slot_with_fixed_root(pubkey)
5023            .map(|(acc, _slot)| acc)
5024    }
5025
5026    // See note above get_account_with_fixed_root() about when to prefer this function
5027    pub fn get_account_modified_slot_with_fixed_root(
5028        &self,
5029        pubkey: &Pubkey,
5030    ) -> Option<(AccountSharedData, Slot)> {
5031        self.load_slow_with_fixed_root(&self.ancestors, pubkey)
5032    }
5033
5034    pub fn get_account_modified_slot(&self, pubkey: &Pubkey) -> Option<(AccountSharedData, Slot)> {
5035        self.load_slow(&self.ancestors, pubkey)
5036    }
5037
5038    fn load_slow(
5039        &self,
5040        ancestors: &Ancestors,
5041        pubkey: &Pubkey,
5042    ) -> Option<(AccountSharedData, Slot)> {
5043        // get_account (= primary this fn caller) may be called from on-chain Bank code even if we
5044        // try hard to use get_account_with_fixed_root for that purpose...
5045        // so pass safer LoadHint:Unspecified here as a fallback
5046        self.rc.accounts.load_without_fixed_root(ancestors, pubkey)
5047    }
5048
5049    fn load_slow_with_fixed_root(
5050        &self,
5051        ancestors: &Ancestors,
5052        pubkey: &Pubkey,
5053    ) -> Option<(AccountSharedData, Slot)> {
5054        self.rc.accounts.load_with_fixed_root(ancestors, pubkey)
5055    }
5056
5057    pub fn get_program_accounts(
5058        &self,
5059        program_id: &Pubkey,
5060        config: &ScanConfig,
5061    ) -> ScanResult<Vec<TransactionAccount>> {
5062        self.rc
5063            .accounts
5064            .load_by_program(&self.ancestors, self.bank_id, program_id, config)
5065    }
5066
5067    pub fn get_filtered_program_accounts<F: Fn(&AccountSharedData) -> bool>(
5068        &self,
5069        program_id: &Pubkey,
5070        filter: F,
5071        config: &ScanConfig,
5072    ) -> ScanResult<Vec<TransactionAccount>> {
5073        self.rc.accounts.load_by_program_with_filter(
5074            &self.ancestors,
5075            self.bank_id,
5076            program_id,
5077            filter,
5078            config,
5079        )
5080    }
5081
5082    pub fn get_filtered_indexed_accounts<F: Fn(&AccountSharedData) -> bool>(
5083        &self,
5084        index_key: &IndexKey,
5085        filter: F,
5086        config: &ScanConfig,
5087        byte_limit_for_scan: Option<usize>,
5088    ) -> ScanResult<Vec<TransactionAccount>> {
5089        self.rc.accounts.load_by_index_key_with_filter(
5090            &self.ancestors,
5091            self.bank_id,
5092            index_key,
5093            filter,
5094            config,
5095            byte_limit_for_scan,
5096        )
5097    }
5098
5099    pub fn account_indexes_include_key(&self, key: &Pubkey) -> bool {
5100        self.rc.accounts.account_indexes_include_key(key)
5101    }
5102
5103    /// Returns all the accounts this bank can load
5104    pub fn get_all_accounts(&self, sort_results: bool) -> ScanResult<Vec<PubkeyAccountSlot>> {
5105        self.rc
5106            .accounts
5107            .load_all(&self.ancestors, self.bank_id, sort_results)
5108    }
5109
5110    // Scans all the accounts this bank can load, applying `scan_func`
5111    pub fn scan_all_accounts<F>(&self, scan_func: F, sort_results: bool) -> ScanResult<()>
5112    where
5113        F: FnMut(Option<(&Pubkey, AccountSharedData, Slot)>),
5114    {
5115        self.rc
5116            .accounts
5117            .scan_all(&self.ancestors, self.bank_id, scan_func, sort_results)
5118    }
5119
5120    pub fn get_program_accounts_modified_since_parent(
5121        &self,
5122        program_id: &Pubkey,
5123    ) -> Vec<TransactionAccount> {
5124        self.rc
5125            .accounts
5126            .load_by_program_slot(self.slot(), Some(program_id))
5127    }
5128
5129    pub fn get_transaction_logs(
5130        &self,
5131        address: Option<&Pubkey>,
5132    ) -> Option<Vec<TransactionLogInfo>> {
5133        self.transaction_log_collector
5134            .read()
5135            .unwrap()
5136            .get_logs_for_address(address)
5137    }
5138
5139    /// Returns all the accounts stored in this slot
5140    pub fn get_all_accounts_modified_since_parent(&self) -> Vec<TransactionAccount> {
5141        self.rc.accounts.load_by_program_slot(self.slot(), None)
5142    }
5143
5144    // if you want get_account_modified_since_parent without fixed_root, please define so...
5145    fn get_account_modified_since_parent_with_fixed_root(
5146        &self,
5147        pubkey: &Pubkey,
5148    ) -> Option<(AccountSharedData, Slot)> {
5149        let just_self: Ancestors = Ancestors::from(vec![self.slot()]);
5150        if let Some((account, slot)) = self.load_slow_with_fixed_root(&just_self, pubkey) {
5151            if slot == self.slot() {
5152                return Some((account, slot));
5153            }
5154        }
5155        None
5156    }
5157
5158    pub fn get_largest_accounts(
5159        &self,
5160        num: usize,
5161        filter_by_address: &HashSet<Pubkey>,
5162        filter: AccountAddressFilter,
5163        sort_results: bool,
5164    ) -> ScanResult<Vec<(Pubkey, u64)>> {
5165        self.rc.accounts.load_largest_accounts(
5166            &self.ancestors,
5167            self.bank_id,
5168            num,
5169            filter_by_address,
5170            filter,
5171            sort_results,
5172        )
5173    }
5174
5175    /// Return the accumulated executed transaction count
5176    pub fn transaction_count(&self) -> u64 {
5177        self.transaction_count.load(Relaxed)
5178    }
5179
5180    /// Returns the number of non-vote transactions processed without error
5181    /// since the most recent boot from snapshot or genesis.
5182    /// This value is not shared though the network, nor retained
5183    /// within snapshots, but is preserved in `Bank::new_from_parent`.
5184    pub fn non_vote_transaction_count_since_restart(&self) -> u64 {
5185        self.non_vote_transaction_count_since_restart.load(Relaxed)
5186    }
5187
5188    /// Return the transaction count executed only in this bank
5189    pub fn executed_transaction_count(&self) -> u64 {
5190        self.transaction_count()
5191            .saturating_sub(self.parent().map_or(0, |parent| parent.transaction_count()))
5192    }
5193
5194    pub fn transaction_error_count(&self) -> u64 {
5195        self.transaction_error_count.load(Relaxed)
5196    }
5197
5198    pub fn transaction_entries_count(&self) -> u64 {
5199        self.transaction_entries_count.load(Relaxed)
5200    }
5201
5202    pub fn transactions_per_entry_max(&self) -> u64 {
5203        self.transactions_per_entry_max.load(Relaxed)
5204    }
5205
5206    fn increment_transaction_count(&self, tx_count: u64) {
5207        self.transaction_count.fetch_add(tx_count, Relaxed);
5208    }
5209
5210    fn increment_non_vote_transaction_count_since_restart(&self, tx_count: u64) {
5211        self.non_vote_transaction_count_since_restart
5212            .fetch_add(tx_count, Relaxed);
5213    }
5214
5215    pub fn signature_count(&self) -> u64 {
5216        self.signature_count.load(Relaxed)
5217    }
5218
5219    fn increment_signature_count(&self, signature_count: u64) {
5220        self.signature_count.fetch_add(signature_count, Relaxed);
5221    }
5222
5223    pub fn get_signature_status_processed_since_parent(
5224        &self,
5225        signature: &Signature,
5226    ) -> Option<Result<()>> {
5227        if let Some((slot, status)) = self.get_signature_status_slot(signature) {
5228            if slot <= self.slot() {
5229                return Some(status);
5230            }
5231        }
5232        None
5233    }
5234
5235    pub fn get_signature_status_with_blockhash(
5236        &self,
5237        signature: &Signature,
5238        blockhash: &Hash,
5239    ) -> Option<Result<()>> {
5240        let rcache = self.status_cache.read().unwrap();
5241        rcache
5242            .get_status(signature, blockhash, &self.ancestors)
5243            .map(|v| v.1)
5244    }
5245
5246    pub fn get_signature_status_slot(&self, signature: &Signature) -> Option<(Slot, Result<()>)> {
5247        let rcache = self.status_cache.read().unwrap();
5248        rcache.get_status_any_blockhash(signature, &self.ancestors)
5249    }
5250
5251    pub fn get_signature_status(&self, signature: &Signature) -> Option<Result<()>> {
5252        self.get_signature_status_slot(signature).map(|v| v.1)
5253    }
5254
5255    pub fn has_signature(&self, signature: &Signature) -> bool {
5256        self.get_signature_status_slot(signature).is_some()
5257    }
5258
5259    /// Hash the `accounts` HashMap. This represents a validator's interpretation
5260    ///  of the delta of the ledger since the last vote and up to now
5261    fn hash_internal_state(&self) -> Hash {
5262        let measure_total = Measure::start("");
5263        let slot = self.slot();
5264
5265        let delta_hash_info = (!self
5266            .feature_set
5267            .is_active(&feature_set::remove_accounts_delta_hash::id()))
5268        .then(|| {
5269            measure_us!({
5270                self.rc
5271                    .accounts
5272                    .accounts_db
5273                    .calculate_accounts_delta_hash_internal(
5274                        slot,
5275                        None,
5276                        self.skipped_rewrites.lock().unwrap().clone(),
5277                    )
5278            })
5279        });
5280
5281        let mut hash = if let Some((accounts_delta_hash, _measure)) = delta_hash_info.as_ref() {
5282            hashv(&[
5283                self.parent_hash.as_ref(),
5284                accounts_delta_hash.0.as_ref(),
5285                &self.signature_count().to_le_bytes(),
5286                self.last_blockhash().as_ref(),
5287            ])
5288        } else {
5289            hashv(&[
5290                self.parent_hash.as_ref(),
5291                &self.signature_count().to_le_bytes(),
5292                self.last_blockhash().as_ref(),
5293            ])
5294        };
5295
5296        let accounts_hash_info = if self
5297            .feature_set
5298            .is_active(&feature_set::accounts_lt_hash::id())
5299        {
5300            let accounts_lt_hash = &*self.accounts_lt_hash.lock().unwrap();
5301            let lt_hash_bytes = bytemuck::must_cast_slice(&accounts_lt_hash.0 .0);
5302            hash = hashv(&[hash.as_ref(), lt_hash_bytes]);
5303            let checksum = accounts_lt_hash.0.checksum();
5304            Some(format!(", accounts_lt_hash checksum: {checksum}"))
5305        } else {
5306            let epoch_accounts_hash = self.wait_get_epoch_accounts_hash();
5307            epoch_accounts_hash.map(|epoch_accounts_hash| {
5308                hash = hashv(&[hash.as_ref(), epoch_accounts_hash.as_ref().as_ref()]);
5309                format!(", epoch_accounts_hash: {:?}", epoch_accounts_hash.as_ref())
5310            })
5311        };
5312
5313        let buf = self
5314            .hard_forks
5315            .read()
5316            .unwrap()
5317            .get_hash_data(slot, self.parent_slot());
5318        if let Some(buf) = buf {
5319            let hard_forked_hash = extend_and_hash(&hash, &buf);
5320            warn!("hard fork at slot {slot} by hashing {buf:?}: {hash} => {hard_forked_hash}");
5321            hash = hard_forked_hash;
5322        }
5323
5324        #[cfg(feature = "dev-context-only-utils")]
5325        let hash_override = self
5326            .hash_overrides
5327            .lock()
5328            .unwrap()
5329            .get_bank_hash_override(slot)
5330            .copied()
5331            .inspect(|&hash_override| {
5332                if hash_override != hash {
5333                    info!(
5334                        "bank: slot: {}: overrode bank hash: {} with {}",
5335                        self.slot(),
5336                        hash,
5337                        hash_override
5338                    );
5339                }
5340            });
5341        // Avoid to optimize out `hash` along with the whole computation by super smart rustc.
5342        // hash_override is used by ledger-tool's simulate-block-production, which prefers
5343        // the actual bank freezing processing for accurate simulation.
5344        #[cfg(feature = "dev-context-only-utils")]
5345        let hash = hash_override.unwrap_or(std::hint::black_box(hash));
5346
5347        let bank_hash_stats = self.bank_hash_stats.load();
5348
5349        let total_us = measure_total.end_as_us();
5350
5351        let (accounts_delta_hash_us, accounts_delta_hash_log) = delta_hash_info
5352            .map(|(hash, us)| (us, format!(" accounts_delta: {}", hash.0)))
5353            .unzip();
5354        datapoint_info!(
5355            "bank-hash_internal_state",
5356            ("slot", slot, i64),
5357            ("total_us", total_us, i64),
5358            ("accounts_delta_hash_us", accounts_delta_hash_us, Option<i64>),
5359        );
5360        info!(
5361            "bank frozen: {slot} hash: {hash}{} signature_count: {} last_blockhash: {} capitalization: {}{}, stats: {bank_hash_stats:?}",
5362            accounts_delta_hash_log.unwrap_or_default(),
5363            self.signature_count(),
5364            self.last_blockhash(),
5365            self.capitalization(),
5366            accounts_hash_info.unwrap_or_default(),
5367        );
5368        hash
5369    }
5370
5371    pub fn collector_fees(&self) -> u64 {
5372        self.collector_fees.load(Relaxed)
5373    }
5374
5375    /// The epoch accounts hash is hashed into the bank's hash once per epoch at a predefined slot.
5376    /// Should it be included in *this* bank?
5377    fn should_include_epoch_accounts_hash(&self) -> bool {
5378        if !epoch_accounts_hash_utils::is_enabled_this_epoch(self) {
5379            return false;
5380        }
5381
5382        let stop_slot = epoch_accounts_hash_utils::calculation_stop(self);
5383        self.parent_slot() < stop_slot && self.slot() >= stop_slot
5384    }
5385
5386    /// If the epoch accounts hash should be included in this Bank, then fetch it. If the EAH
5387    /// calculation has not completed yet, this fn will block until it does complete.
5388    fn wait_get_epoch_accounts_hash(&self) -> Option<EpochAccountsHash> {
5389        if !self.should_include_epoch_accounts_hash() {
5390            return None;
5391        }
5392
5393        let (epoch_accounts_hash, waiting_time_us) = measure_us!(self
5394            .rc
5395            .accounts
5396            .accounts_db
5397            .epoch_accounts_hash_manager
5398            .wait_get_epoch_accounts_hash());
5399
5400        datapoint_info!(
5401            "bank-wait_get_epoch_accounts_hash",
5402            ("slot", self.slot(), i64),
5403            ("waiting-time-us", waiting_time_us, i64),
5404        );
5405        Some(epoch_accounts_hash)
5406    }
5407
5408    /// Used by ledger tool to run a final hash calculation once all ledger replay has completed.
5409    /// This should not be called by validator code.
5410    pub fn run_final_hash_calc(&self, on_halt_store_hash_raw_data_for_debug: bool) {
5411        self.force_flush_accounts_cache();
5412        // note that this slot may not be a root
5413        _ = self.verify_accounts_hash(
5414            None,
5415            VerifyAccountsHashConfig {
5416                test_hash_calculation: false,
5417                ignore_mismatch: true,
5418                require_rooted_bank: false,
5419                run_in_background: false,
5420                store_hash_raw_data_for_debug: on_halt_store_hash_raw_data_for_debug,
5421            },
5422            None,
5423        );
5424    }
5425
5426    /// Recalculate the accounts hash from the account stores. Used to verify a snapshot.
5427    /// return true if all is good
5428    /// Only called from startup or test code.
5429    #[must_use]
5430    fn verify_accounts_hash(
5431        &self,
5432        base: Option<(Slot, /*capitalization*/ u64)>,
5433        mut config: VerifyAccountsHashConfig,
5434        duplicates_lt_hash: Option<Box<DuplicatesLtHash>>,
5435    ) -> bool {
5436        #[derive(Debug, Eq, PartialEq)]
5437        enum VerifyKind {
5438            Merkle,
5439            Lattice,
5440        }
5441
5442        let accounts = &self.rc.accounts;
5443        // Wait until initial hash calc is complete before starting a new hash calc.
5444        // This should only occur when we halt at a slot in ledger-tool.
5445        accounts
5446            .accounts_db
5447            .verify_accounts_hash_in_bg
5448            .join_background_thread();
5449
5450        let slot = self.slot();
5451
5452        let verify_kind = match (
5453            duplicates_lt_hash.is_some(),
5454            self.rc
5455                .accounts
5456                .accounts_db
5457                .is_experimental_accumulator_hash_enabled(),
5458        ) {
5459            (true, _) => VerifyKind::Lattice,
5460            (false, false) => VerifyKind::Merkle,
5461            (false, true) => {
5462                // Calculating the accounts lt hash from storages *requires* a duplicates_lt_hash.
5463                // If it is None here, then we must use the index instead, which also means we
5464                // cannot run in the background.
5465                config.run_in_background = false;
5466                VerifyKind::Lattice
5467            }
5468        };
5469
5470        if config.require_rooted_bank && !accounts.accounts_db.accounts_index.is_alive_root(slot) {
5471            if let Some(parent) = self.parent() {
5472                info!(
5473                    "slot {slot} is not a root, so verify accounts hash on parent bank at slot {}",
5474                    parent.slot(),
5475                );
5476                if verify_kind == VerifyKind::Lattice {
5477                    // The duplicates_lt_hash is only valid for the current slot, so we must fall
5478                    // back to verifying the accounts lt hash with the index (which also means we
5479                    // cannot run in the background).
5480                    config.run_in_background = false;
5481                }
5482                return parent.verify_accounts_hash(base, config, None);
5483            } else {
5484                // this will result in mismatch errors
5485                // accounts hash calc doesn't include unrooted slots
5486                panic!("cannot verify accounts hash because slot {slot} is not a root");
5487            }
5488        }
5489
5490        // The snapshot storages must be captured *before* starting the background verification.
5491        // Otherwise, it is possible that a delayed call to `get_snapshot_storages()` will *not*
5492        // get the correct storages required to calculate and verify the accounts hashes.
5493        let snapshot_storages = self.rc.accounts.accounts_db.get_storages(RangeFull);
5494        let capitalization = self.capitalization();
5495        let verify_config = VerifyAccountsHashAndLamportsConfig {
5496            ancestors: &self.ancestors,
5497            epoch_schedule: self.epoch_schedule(),
5498            rent_collector: self.rent_collector(),
5499            test_hash_calculation: config.test_hash_calculation,
5500            ignore_mismatch: config.ignore_mismatch,
5501            store_detailed_debug_info: config.store_hash_raw_data_for_debug,
5502            use_bg_thread_pool: config.run_in_background,
5503        };
5504
5505        info!(
5506            "Verifying accounts, in background? {}, verify kind: {verify_kind:?}",
5507            config.run_in_background,
5508        );
5509        if config.run_in_background {
5510            let accounts = Arc::clone(accounts);
5511            let accounts_ = Arc::clone(&accounts);
5512            let ancestors = self.ancestors.clone();
5513            let epoch_schedule = self.epoch_schedule().clone();
5514            let rent_collector = self.rent_collector().clone();
5515            let expected_accounts_lt_hash = self.accounts_lt_hash.lock().unwrap().clone();
5516            accounts.accounts_db.verify_accounts_hash_in_bg.start(|| {
5517                Builder::new()
5518                    .name("solBgHashVerify".into())
5519                    .spawn(move || {
5520                        info!("Initial background accounts hash verification has started");
5521                        let start = Instant::now();
5522                        let mut lattice_verify_time = None;
5523                        let mut merkle_verify_time = None;
5524                        let is_ok = match verify_kind {
5525                            VerifyKind::Lattice => {
5526                                // accounts lt hash is *enabled* so use lattice-based verification
5527                                let accounts_db = &accounts_.accounts_db;
5528                                let (calculated_accounts_lt_hash, duration) =
5529                                    meas_dur!(accounts_db.thread_pool_hash.install(|| {
5530                                        accounts_db
5531                                            .calculate_accounts_lt_hash_at_startup_from_storages(
5532                                                snapshot_storages.0.as_slice(),
5533                                                &duplicates_lt_hash.unwrap(),
5534                                            )
5535                                    }));
5536                                let is_ok =
5537                                    calculated_accounts_lt_hash == expected_accounts_lt_hash;
5538                                if !is_ok {
5539                                    let expected = expected_accounts_lt_hash.0.checksum();
5540                                    let calculated = calculated_accounts_lt_hash.0.checksum();
5541                                    error!(
5542                                        "Verifying accounts failed: accounts lattice hashes do not \
5543                                         match, expected: {expected}, calculated: {calculated}",
5544                                    );
5545                                }
5546                                lattice_verify_time = Some(duration);
5547                                is_ok
5548                            }
5549                            VerifyKind::Merkle => {
5550                                // accounts lt hash is *disabled* so use merkle-based verification
5551                                let snapshot_storages_and_slots = (
5552                                    snapshot_storages.0.as_slice(),
5553                                    snapshot_storages.1.as_slice(),
5554                                );
5555                                let (is_ok, duration) = meas_dur!(accounts_
5556                                    .verify_accounts_hash_and_lamports(
5557                                        snapshot_storages_and_slots,
5558                                        slot,
5559                                        capitalization,
5560                                        base,
5561                                        VerifyAccountsHashAndLamportsConfig {
5562                                            ancestors: &ancestors,
5563                                            epoch_schedule: &epoch_schedule,
5564                                            rent_collector: &rent_collector,
5565                                            ..verify_config
5566                                        },
5567                                    ));
5568                                merkle_verify_time = Some(duration);
5569                                is_ok
5570                            }
5571                        };
5572                        accounts_
5573                            .accounts_db
5574                            .verify_accounts_hash_in_bg
5575                            .background_finished();
5576                        let total_time = start.elapsed();
5577                        datapoint_info!(
5578                            "startup_verify_accounts",
5579                            ("total_us", total_time.as_micros(), i64),
5580                            (
5581                                "verify_accounts_lt_hash_us",
5582                                lattice_verify_time.as_ref().map(Duration::as_micros),
5583                                Option<i64>
5584                            ),
5585                            ("verify_accounts_hash_us",
5586                                merkle_verify_time.as_ref().map(Duration::as_micros),
5587                                Option<i64>
5588                            ),
5589                        );
5590                        info!("Initial background accounts hash verification has stopped");
5591                        is_ok
5592                    })
5593                    .unwrap()
5594            });
5595            true // initial result is true. We haven't failed yet. If verification fails, we'll panic from bg thread.
5596        } else {
5597            match verify_kind {
5598                VerifyKind::Lattice => {
5599                    let expected_accounts_lt_hash = self.accounts_lt_hash.lock().unwrap().clone();
5600                    let calculated_accounts_lt_hash = if let Some(duplicates_lt_hash) =
5601                        duplicates_lt_hash
5602                    {
5603                        accounts
5604                            .accounts_db
5605                            .calculate_accounts_lt_hash_at_startup_from_storages(
5606                                snapshot_storages.0.as_slice(),
5607                                &duplicates_lt_hash,
5608                            )
5609                    } else {
5610                        accounts
5611                            .accounts_db
5612                            .calculate_accounts_lt_hash_at_startup_from_index(&self.ancestors, slot)
5613                    };
5614                    let is_ok = calculated_accounts_lt_hash == expected_accounts_lt_hash;
5615                    if !is_ok {
5616                        let expected = expected_accounts_lt_hash.0.checksum();
5617                        let calculated = calculated_accounts_lt_hash.0.checksum();
5618                        error!(
5619                            "Verifying accounts failed: accounts lattice hashes do not \
5620                             match, expected: {expected}, calculated: {calculated}",
5621                        );
5622                    }
5623                    is_ok
5624                }
5625                VerifyKind::Merkle => {
5626                    let snapshot_storages_and_slots = (
5627                        snapshot_storages.0.as_slice(),
5628                        snapshot_storages.1.as_slice(),
5629                    );
5630                    let is_ok = accounts.verify_accounts_hash_and_lamports(
5631                        snapshot_storages_and_slots,
5632                        slot,
5633                        capitalization,
5634                        base,
5635                        verify_config,
5636                    );
5637                    self.set_initial_accounts_hash_verification_completed();
5638                    is_ok
5639                }
5640            }
5641        }
5642    }
5643
5644    /// Specify that initial verification has completed.
5645    /// Called internally when verification runs in the foreground thread.
5646    /// Also has to be called by some tests which don't do verification on startup.
5647    pub fn set_initial_accounts_hash_verification_completed(&self) {
5648        self.rc
5649            .accounts
5650            .accounts_db
5651            .verify_accounts_hash_in_bg
5652            .verification_complete();
5653    }
5654
5655    /// return true if bg hash verification is complete
5656    /// return false if bg hash verification has not completed yet
5657    /// if hash verification failed, a panic will occur
5658    pub fn has_initial_accounts_hash_verification_completed(&self) -> bool {
5659        self.rc
5660            .accounts
5661            .accounts_db
5662            .verify_accounts_hash_in_bg
5663            .check_complete()
5664    }
5665
5666    /// Get this bank's storages to use for snapshots.
5667    ///
5668    /// If a base slot is provided, return only the storages that are *higher* than this slot.
5669    pub fn get_snapshot_storages(&self, base_slot: Option<Slot>) -> Vec<Arc<AccountStorageEntry>> {
5670        // if a base slot is provided, request storages starting at the slot *after*
5671        let start_slot = base_slot.map_or(0, |slot| slot.saturating_add(1));
5672        // we want to *include* the storage at our slot
5673        let requested_slots = start_slot..=self.slot();
5674
5675        self.rc.accounts.accounts_db.get_storages(requested_slots).0
5676    }
5677
5678    #[must_use]
5679    fn verify_hash(&self) -> bool {
5680        assert!(self.is_frozen());
5681        let calculated_hash = self.hash_internal_state();
5682        let expected_hash = self.hash();
5683
5684        if calculated_hash == expected_hash {
5685            true
5686        } else {
5687            warn!(
5688                "verify failed: slot: {}, {} (calculated) != {} (expected)",
5689                self.slot(),
5690                calculated_hash,
5691                expected_hash
5692            );
5693            false
5694        }
5695    }
5696
5697    pub fn verify_transaction(
5698        &self,
5699        tx: VersionedTransaction,
5700        verification_mode: TransactionVerificationMode,
5701    ) -> Result<RuntimeTransaction<SanitizedTransaction>> {
5702        let sanitized_tx = {
5703            let size =
5704                bincode::serialized_size(&tx).map_err(|_| TransactionError::SanitizeFailure)?;
5705            if size > PACKET_DATA_SIZE as u64 {
5706                return Err(TransactionError::SanitizeFailure);
5707            }
5708            let message_hash = if verification_mode == TransactionVerificationMode::FullVerification
5709            {
5710                tx.verify_and_hash_message()?
5711            } else {
5712                tx.message.hash()
5713            };
5714
5715            RuntimeTransaction::try_create(
5716                tx,
5717                MessageHash::Precomputed(message_hash),
5718                None,
5719                self,
5720                self.get_reserved_account_keys(),
5721            )
5722        }?;
5723
5724        let move_precompile_verification_to_svm = self
5725            .feature_set
5726            .is_active(&feature_set::move_precompile_verification_to_svm::id());
5727        if !move_precompile_verification_to_svm && {
5728            verification_mode == TransactionVerificationMode::HashAndVerifyPrecompiles
5729                || verification_mode == TransactionVerificationMode::FullVerification
5730        } {
5731            verify_precompiles(&sanitized_tx, &self.feature_set)?;
5732        }
5733
5734        Ok(sanitized_tx)
5735    }
5736
5737    pub fn fully_verify_transaction(
5738        &self,
5739        tx: VersionedTransaction,
5740    ) -> Result<RuntimeTransaction<SanitizedTransaction>> {
5741        self.verify_transaction(tx, TransactionVerificationMode::FullVerification)
5742    }
5743
5744    /// Checks if the transaction violates the bank's reserved keys.
5745    /// This needs to be checked upon epoch boundary crosses because the
5746    /// reserved key set may have changed since the initial sanitization.
5747    pub fn check_reserved_keys(&self, tx: &impl SVMMessage) -> Result<()> {
5748        // Check keys against the reserved set - these failures simply require us
5749        // to re-sanitize the transaction. We do not need to drop the transaction.
5750        let reserved_keys = self.get_reserved_account_keys();
5751        for (index, key) in tx.account_keys().iter().enumerate() {
5752            if tx.is_writable(index) && reserved_keys.contains(key) {
5753                return Err(TransactionError::ResanitizationNeeded);
5754            }
5755        }
5756
5757        Ok(())
5758    }
5759
5760    /// only called from ledger-tool or tests
5761    fn calculate_capitalization(&self, debug_verify: bool) -> u64 {
5762        let is_startup = true;
5763        self.rc
5764            .accounts
5765            .accounts_db
5766            .verify_accounts_hash_in_bg
5767            .join_background_thread();
5768        self.rc
5769            .accounts
5770            .accounts_db
5771            .update_accounts_hash_with_verify_from(
5772                // we have to use the index since the slot could be in the write cache still
5773                CalcAccountsHashDataSource::IndexForTests,
5774                debug_verify,
5775                self.slot(),
5776                &self.ancestors,
5777                None,
5778                self.epoch_schedule(),
5779                &self.rent_collector,
5780                is_startup,
5781            )
5782            .1
5783    }
5784
5785    /// only called from tests or ledger tool
5786    pub fn calculate_and_verify_capitalization(&self, debug_verify: bool) -> bool {
5787        let calculated = self.calculate_capitalization(debug_verify);
5788        let expected = self.capitalization();
5789        if calculated == expected {
5790            true
5791        } else {
5792            warn!(
5793                "Capitalization mismatch: calculated: {} != expected: {}",
5794                calculated, expected
5795            );
5796            false
5797        }
5798    }
5799
5800    /// Forcibly overwrites current capitalization by actually recalculating accounts' balances.
5801    /// This should only be used for developing purposes.
5802    pub fn set_capitalization(&self) -> u64 {
5803        let old = self.capitalization();
5804        // We cannot debug verify the hash calculation here because calculate_capitalization will use the index calculation due to callers using the write cache.
5805        // debug_verify only exists as an extra debugging step under the assumption that this code path is only used for tests. But, this is used by ledger-tool create-snapshot
5806        // for example.
5807        let debug_verify = false;
5808        self.capitalization
5809            .store(self.calculate_capitalization(debug_verify), Relaxed);
5810        old
5811    }
5812
5813    /// Returns the `AccountsHash` that was calculated for this bank's slot
5814    ///
5815    /// This fn is used when creating a snapshot with ledger-tool, or when
5816    /// packaging a snapshot into an archive (used to get the `SnapshotHash`).
5817    pub fn get_accounts_hash(&self) -> Option<AccountsHash> {
5818        self.rc
5819            .accounts
5820            .accounts_db
5821            .get_accounts_hash(self.slot())
5822            .map(|(accounts_hash, _)| accounts_hash)
5823    }
5824
5825    /// Returns the `IncrementalAccountsHash` that was calculated for this bank's slot
5826    ///
5827    /// This fn is used when creating an incremental snapshot with ledger-tool, or when
5828    /// packaging a snapshot into an archive (used to get the `SnapshotHash`).
5829    pub fn get_incremental_accounts_hash(&self) -> Option<IncrementalAccountsHash> {
5830        self.rc
5831            .accounts
5832            .accounts_db
5833            .get_incremental_accounts_hash(self.slot())
5834            .map(|(incremental_accounts_hash, _)| incremental_accounts_hash)
5835    }
5836
5837    /// Returns the `SnapshotHash` for this bank's slot
5838    ///
5839    /// This fn is used at startup to verify the bank was rebuilt correctly.
5840    ///
5841    /// # Panics
5842    ///
5843    /// If the snapshots lt hash feature is not enabled, panics if there is both-or-neither of an
5844    /// `AccountsHash` and an `IncrementalAccountsHash` for this bank's slot.  There may only be
5845    /// one or the other.
5846    pub fn get_snapshot_hash(&self) -> SnapshotHash {
5847        if self.is_snapshots_lt_hash_enabled() {
5848            self.get_lattice_snapshot_hash()
5849        } else {
5850            self.get_merkle_snapshot_hash()
5851        }
5852    }
5853
5854    /// Returns the merkle-based `SnapshotHash` for this bank's slot
5855    ///
5856    /// This fn is used at startup to verify the bank was rebuilt correctly.
5857    ///
5858    /// # Panics
5859    ///
5860    /// If the snapshots lt hash feature is not enabled, panics if there is both-or-neither of an
5861    /// `AccountsHash` and an `IncrementalAccountsHash` for this bank's slot.  There may only be
5862    /// one or the other.
5863    pub fn get_merkle_snapshot_hash(&self) -> SnapshotHash {
5864        let accounts_hash = self.get_accounts_hash();
5865        let incremental_accounts_hash = self.get_incremental_accounts_hash();
5866        let accounts_hash_kind = match (accounts_hash, incremental_accounts_hash) {
5867            (Some(_), Some(_)) => panic!("Both full and incremental accounts hashes are present for slot {}; it is ambiguous which one to use for the snapshot hash!", self.slot()),
5868            (Some(accounts_hash), None) => accounts_hash.into(),
5869            (None, Some(incremental_accounts_hash)) => incremental_accounts_hash.into(),
5870            (None, None) => panic!("accounts hash is required to get snapshot hash"),
5871        };
5872        let epoch_accounts_hash = self.get_epoch_accounts_hash_to_serialize();
5873        SnapshotHash::new(
5874            &MerkleOrLatticeAccountsHash::Merkle(accounts_hash_kind),
5875            epoch_accounts_hash.as_ref(),
5876            None,
5877        )
5878    }
5879
5880    /// Returns the lattice-based `SnapshotHash` for this bank's slot
5881    ///
5882    /// This fn is used at startup to verify the bank was rebuilt correctly.
5883    pub fn get_lattice_snapshot_hash(&self) -> SnapshotHash {
5884        SnapshotHash::new(
5885            &MerkleOrLatticeAccountsHash::Lattice,
5886            None,
5887            Some(self.accounts_lt_hash.lock().unwrap().0.checksum()),
5888        )
5889    }
5890
5891    pub fn load_account_into_read_cache(&self, key: &Pubkey) {
5892        self.rc
5893            .accounts
5894            .accounts_db
5895            .load_account_into_read_cache(&self.ancestors, key);
5896    }
5897
5898    pub fn update_accounts_hash(
5899        &self,
5900        data_source: CalcAccountsHashDataSource,
5901        mut debug_verify: bool,
5902        is_startup: bool,
5903    ) -> AccountsHash {
5904        let (accounts_hash, total_lamports) = self
5905            .rc
5906            .accounts
5907            .accounts_db
5908            .update_accounts_hash_with_verify_from(
5909                data_source,
5910                debug_verify,
5911                self.slot(),
5912                &self.ancestors,
5913                Some(self.capitalization()),
5914                self.epoch_schedule(),
5915                &self.rent_collector,
5916                is_startup,
5917            );
5918        if total_lamports != self.capitalization() {
5919            datapoint_info!(
5920                "capitalization_mismatch",
5921                ("slot", self.slot(), i64),
5922                ("calculated_lamports", total_lamports, i64),
5923                ("capitalization", self.capitalization(), i64),
5924            );
5925
5926            if !debug_verify {
5927                // cap mismatch detected. It has been logged to metrics above.
5928                // Run both versions of the calculation to attempt to get more info.
5929                debug_verify = true;
5930                self.rc
5931                    .accounts
5932                    .accounts_db
5933                    .update_accounts_hash_with_verify_from(
5934                        data_source,
5935                        debug_verify,
5936                        self.slot(),
5937                        &self.ancestors,
5938                        Some(self.capitalization()),
5939                        self.epoch_schedule(),
5940                        &self.rent_collector,
5941                        is_startup,
5942                    );
5943            }
5944
5945            panic!(
5946                "capitalization_mismatch. slot: {}, calculated_lamports: {}, capitalization: {}",
5947                self.slot(),
5948                total_lamports,
5949                self.capitalization()
5950            );
5951        }
5952        accounts_hash
5953    }
5954
5955    /// Calculate the incremental accounts hash from `base_slot` to `self`
5956    pub fn update_incremental_accounts_hash(&self, base_slot: Slot) -> IncrementalAccountsHash {
5957        let config = CalcAccountsHashConfig {
5958            use_bg_thread_pool: true,
5959            ancestors: None, // does not matter, will not be used
5960            epoch_schedule: &self.epoch_schedule,
5961            rent_collector: &self.rent_collector,
5962            store_detailed_debug_info_on_failure: false,
5963        };
5964        let storages = self.get_snapshot_storages(Some(base_slot));
5965        let sorted_storages = SortedStorages::new(&storages);
5966        self.rc
5967            .accounts
5968            .accounts_db
5969            .update_incremental_accounts_hash(
5970                &config,
5971                &sorted_storages,
5972                self.slot(),
5973                HashStats::default(),
5974            )
5975            .0
5976    }
5977
5978    /// A snapshot bank should be purged of 0 lamport accounts which are not part of the hash
5979    /// calculation and could shield other real accounts.
5980    pub fn verify_snapshot_bank(
5981        &self,
5982        test_hash_calculation: bool,
5983        skip_shrink: bool,
5984        force_clean: bool,
5985        latest_full_snapshot_slot: Slot,
5986        base: Option<(Slot, /*capitalization*/ u64)>,
5987        duplicates_lt_hash: Option<Box<DuplicatesLtHash>>,
5988    ) -> bool {
5989        // If we verify the accounts using the lattice-based hash *and* with storages (as opposed
5990        // to the index), then we rely on the DuplicatesLtHash as given by generate_index().  Since
5991        // the duplicates are based on a specific set of storages, we must use the exact same
5992        // storages to do the lattice-based accounts verification.  This means we must wait to
5993        // clean/shrink until *after* we've gotten Arcs to the storages (this prevents their
5994        // untimely removal).  Simply, we call `verify_accounts_hash()` before we call `clean` or
5995        // `shrink`.
5996        let (verified_accounts, verify_accounts_time_us) = measure_us!({
5997            let should_verify_accounts = !self.rc.accounts.accounts_db.skip_initial_hash_calc;
5998            if should_verify_accounts {
5999                info!("Verifying accounts...");
6000                let verified = self.verify_accounts_hash(
6001                    base,
6002                    VerifyAccountsHashConfig {
6003                        test_hash_calculation,
6004                        ignore_mismatch: false,
6005                        require_rooted_bank: false,
6006                        run_in_background: true,
6007                        store_hash_raw_data_for_debug: false,
6008                    },
6009                    duplicates_lt_hash,
6010                );
6011                info!("Verifying accounts... In background.");
6012                verified
6013            } else {
6014                info!("Verifying accounts... Skipped.");
6015                self.rc
6016                    .accounts
6017                    .accounts_db
6018                    .verify_accounts_hash_in_bg
6019                    .verification_complete();
6020                true
6021            }
6022        });
6023
6024        let (_, clean_time_us) = measure_us!({
6025            let should_clean = force_clean || (!skip_shrink && self.slot() > 0);
6026            if should_clean {
6027                info!("Cleaning...");
6028                // We cannot clean past the latest full snapshot's slot because we are about to
6029                // perform an accounts hash calculation *up to that slot*.  If we cleaned *past*
6030                // that slot, then accounts could be removed from older storages, which would
6031                // change the accounts hash.
6032                self.rc.accounts.accounts_db.clean_accounts(
6033                    Some(latest_full_snapshot_slot),
6034                    true,
6035                    self.epoch_schedule(),
6036                    self.clean_accounts_old_storages_policy(),
6037                );
6038                info!("Cleaning... Done.");
6039            } else {
6040                info!("Cleaning... Skipped.");
6041            }
6042        });
6043
6044        let (_, shrink_time_us) = measure_us!({
6045            let should_shrink = !skip_shrink && self.slot() > 0;
6046            if should_shrink {
6047                info!("Shrinking...");
6048                self.rc.accounts.accounts_db.shrink_all_slots(
6049                    true,
6050                    self.epoch_schedule(),
6051                    // we cannot allow the snapshot slot to be shrunk
6052                    Some(self.slot()),
6053                );
6054                info!("Shrinking... Done.");
6055            } else {
6056                info!("Shrinking... Skipped.");
6057            }
6058        });
6059
6060        info!("Verifying bank...");
6061        let (verified_bank, verify_bank_time_us) = measure_us!(self.verify_hash());
6062        info!("Verifying bank... Done.");
6063
6064        datapoint_info!(
6065            "verify_snapshot_bank",
6066            ("clean_us", clean_time_us, i64),
6067            ("shrink_us", shrink_time_us, i64),
6068            ("verify_accounts_us", verify_accounts_time_us, i64),
6069            ("verify_bank_us", verify_bank_time_us, i64),
6070        );
6071
6072        verified_accounts && verified_bank
6073    }
6074
6075    /// Return the number of hashes per tick
6076    pub fn hashes_per_tick(&self) -> &Option<u64> {
6077        &self.hashes_per_tick
6078    }
6079
6080    /// Return the number of ticks per slot
6081    pub fn ticks_per_slot(&self) -> u64 {
6082        self.ticks_per_slot
6083    }
6084
6085    /// Return the number of slots per year
6086    pub fn slots_per_year(&self) -> f64 {
6087        self.slots_per_year
6088    }
6089
6090    /// Return the number of ticks since genesis.
6091    pub fn tick_height(&self) -> u64 {
6092        self.tick_height.load(Relaxed)
6093    }
6094
6095    /// Return the inflation parameters of the Bank
6096    pub fn inflation(&self) -> Inflation {
6097        *self.inflation.read().unwrap()
6098    }
6099
6100    /// Return the rent collector for this Bank
6101    pub fn rent_collector(&self) -> &RentCollector {
6102        &self.rent_collector
6103    }
6104
6105    /// Return the total capitalization of the Bank
6106    pub fn capitalization(&self) -> u64 {
6107        self.capitalization.load(Relaxed)
6108    }
6109
6110    /// Return this bank's max_tick_height
6111    pub fn max_tick_height(&self) -> u64 {
6112        self.max_tick_height
6113    }
6114
6115    /// Return the block_height of this bank
6116    pub fn block_height(&self) -> u64 {
6117        self.block_height
6118    }
6119
6120    /// Return the number of slots per epoch for the given epoch
6121    pub fn get_slots_in_epoch(&self, epoch: Epoch) -> u64 {
6122        self.epoch_schedule().get_slots_in_epoch(epoch)
6123    }
6124
6125    /// returns the epoch for which this bank's leader_schedule_slot_offset and slot would
6126    ///  need to cache leader_schedule
6127    pub fn get_leader_schedule_epoch(&self, slot: Slot) -> Epoch {
6128        self.epoch_schedule().get_leader_schedule_epoch(slot)
6129    }
6130
6131    /// a bank-level cache of vote accounts and stake delegation info
6132    fn update_stakes_cache(
6133        &self,
6134        txs: &[impl SVMMessage],
6135        processing_results: &[TransactionProcessingResult],
6136    ) {
6137        debug_assert_eq!(txs.len(), processing_results.len());
6138        let new_warmup_cooldown_rate_epoch = self.new_warmup_cooldown_rate_epoch();
6139        txs.iter()
6140            .zip(processing_results)
6141            .filter_map(|(tx, processing_result)| {
6142                processing_result
6143                    .processed_transaction()
6144                    .map(|processed_tx| (tx, processed_tx))
6145            })
6146            .filter_map(|(tx, processed_tx)| {
6147                processed_tx
6148                    .executed_transaction()
6149                    .map(|executed_tx| (tx, executed_tx))
6150            })
6151            .filter(|(_, executed_tx)| executed_tx.was_successful())
6152            .flat_map(|(tx, executed_tx)| {
6153                let num_account_keys = tx.account_keys().len();
6154                let loaded_tx = &executed_tx.loaded_transaction;
6155                loaded_tx.accounts.iter().take(num_account_keys)
6156            })
6157            .for_each(|(pubkey, account)| {
6158                // note that this could get timed to: self.rc.accounts.accounts_db.stats.stakes_cache_check_and_store_us,
6159                //  but this code path is captured separately in ExecuteTimingType::UpdateStakesCacheUs
6160                self.stakes_cache
6161                    .check_and_store(pubkey, account, new_warmup_cooldown_rate_epoch);
6162            });
6163    }
6164
6165    /// current vote accounts for this bank along with the stake
6166    ///   attributed to each account
6167    pub fn vote_accounts(&self) -> Arc<VoteAccountsHashMap> {
6168        let stakes = self.stakes_cache.stakes();
6169        Arc::from(stakes.vote_accounts())
6170    }
6171
6172    /// Vote account for the given vote account pubkey.
6173    pub fn get_vote_account(&self, vote_account: &Pubkey) -> Option<VoteAccount> {
6174        let stakes = self.stakes_cache.stakes();
6175        let vote_account = stakes.vote_accounts().get(vote_account)?;
6176        Some(vote_account.clone())
6177    }
6178
6179    /// Get the EpochStakes for the current Bank::epoch
6180    pub fn current_epoch_stakes(&self) -> &EpochStakes {
6181        // The stakes for a given epoch (E) in self.epoch_stakes are keyed by leader schedule epoch
6182        // (E + 1) so the stakes for the current epoch are stored at self.epoch_stakes[E + 1]
6183        self.epoch_stakes
6184            .get(&self.epoch.saturating_add(1))
6185            .expect("Current epoch stakes must exist")
6186    }
6187
6188    /// Get the EpochStakes for a given epoch
6189    pub fn epoch_stakes(&self, epoch: Epoch) -> Option<&EpochStakes> {
6190        self.epoch_stakes.get(&epoch)
6191    }
6192
6193    pub fn epoch_stakes_map(&self) -> &HashMap<Epoch, EpochStakes> {
6194        &self.epoch_stakes
6195    }
6196
6197    /// Get the staked nodes map for the current Bank::epoch
6198    pub fn current_epoch_staked_nodes(&self) -> Arc<HashMap<Pubkey, u64>> {
6199        self.current_epoch_stakes().stakes().staked_nodes()
6200    }
6201
6202    pub fn epoch_staked_nodes(&self, epoch: Epoch) -> Option<Arc<HashMap<Pubkey, u64>>> {
6203        Some(self.epoch_stakes.get(&epoch)?.stakes().staked_nodes())
6204    }
6205
6206    /// Get the total epoch stake for the given epoch.
6207    pub fn epoch_total_stake(&self, epoch: Epoch) -> Option<u64> {
6208        self.epoch_stakes
6209            .get(&epoch)
6210            .map(|epoch_stakes| epoch_stakes.total_stake())
6211    }
6212
6213    /// Get the total epoch stake for the current Bank::epoch
6214    pub fn get_current_epoch_total_stake(&self) -> u64 {
6215        self.current_epoch_stakes().total_stake()
6216    }
6217
6218    /// vote accounts for the specific epoch along with the stake
6219    ///   attributed to each account
6220    pub fn epoch_vote_accounts(&self, epoch: Epoch) -> Option<&VoteAccountsHashMap> {
6221        let epoch_stakes = self.epoch_stakes.get(&epoch)?.stakes();
6222        Some(epoch_stakes.vote_accounts().as_ref())
6223    }
6224
6225    /// Get the vote accounts along with the stake attributed to each account
6226    /// for the current Bank::epoch
6227    pub fn get_current_epoch_vote_accounts(&self) -> &VoteAccountsHashMap {
6228        self.current_epoch_stakes()
6229            .stakes()
6230            .vote_accounts()
6231            .as_ref()
6232    }
6233
6234    /// Get the fixed authorized voter for the given vote account for the
6235    /// current epoch
6236    pub fn epoch_authorized_voter(&self, vote_account: &Pubkey) -> Option<&Pubkey> {
6237        self.epoch_stakes
6238            .get(&self.epoch)
6239            .expect("Epoch stakes for bank's own epoch must exist")
6240            .epoch_authorized_voters()
6241            .get(vote_account)
6242    }
6243
6244    /// Get the fixed set of vote accounts for the given node id for the
6245    /// current epoch
6246    pub fn epoch_vote_accounts_for_node_id(&self, node_id: &Pubkey) -> Option<&NodeVoteAccounts> {
6247        self.epoch_stakes
6248            .get(&self.epoch)
6249            .expect("Epoch stakes for bank's own epoch must exist")
6250            .node_id_to_vote_accounts()
6251            .get(node_id)
6252    }
6253
6254    /// Get the total stake belonging to vote accounts associated with the given node id for the
6255    /// given epoch.
6256    pub fn epoch_node_id_to_stake(&self, epoch: Epoch, node_id: &Pubkey) -> Option<u64> {
6257        self.epoch_stakes(epoch)
6258            .and_then(|epoch_stakes| epoch_stakes.node_id_to_stake(node_id))
6259    }
6260
6261    /// Get the fixed total stake of all vote accounts for current epoch
6262    pub fn total_epoch_stake(&self) -> u64 {
6263        self.epoch_stakes
6264            .get(&self.epoch)
6265            .expect("Epoch stakes for bank's own epoch must exist")
6266            .total_stake()
6267    }
6268
6269    /// Get the fixed stake of the given vote account for the current epoch
6270    pub fn epoch_vote_account_stake(&self, vote_account: &Pubkey) -> u64 {
6271        *self
6272            .epoch_vote_accounts(self.epoch())
6273            .expect("Bank epoch vote accounts must contain entry for the bank's own epoch")
6274            .get(vote_account)
6275            .map(|(stake, _)| stake)
6276            .unwrap_or(&0)
6277    }
6278
6279    /// given a slot, return the epoch and offset into the epoch this slot falls
6280    /// e.g. with a fixed number for slots_per_epoch, the calculation is simply:
6281    ///
6282    ///  ( slot/slots_per_epoch, slot % slots_per_epoch )
6283    ///
6284    pub fn get_epoch_and_slot_index(&self, slot: Slot) -> (Epoch, SlotIndex) {
6285        self.epoch_schedule().get_epoch_and_slot_index(slot)
6286    }
6287
6288    pub fn get_epoch_info(&self) -> EpochInfo {
6289        let absolute_slot = self.slot();
6290        let block_height = self.block_height();
6291        let (epoch, slot_index) = self.get_epoch_and_slot_index(absolute_slot);
6292        let slots_in_epoch = self.get_slots_in_epoch(epoch);
6293        let transaction_count = Some(self.transaction_count());
6294        EpochInfo {
6295            epoch,
6296            slot_index,
6297            slots_in_epoch,
6298            absolute_slot,
6299            block_height,
6300            transaction_count,
6301        }
6302    }
6303
6304    pub fn is_empty(&self) -> bool {
6305        !self.is_delta.load(Relaxed)
6306    }
6307
6308    pub fn add_mockup_builtin(
6309        &mut self,
6310        program_id: Pubkey,
6311        builtin_function: BuiltinFunctionWithContext,
6312    ) {
6313        self.transaction_processor.add_builtin(
6314            self,
6315            program_id,
6316            "mockup",
6317            ProgramCacheEntry::new_builtin(self.slot, 0, builtin_function),
6318        );
6319    }
6320
6321    pub fn add_precompile(&mut self, program_id: &Pubkey) {
6322        debug!("Adding precompiled program {}", program_id);
6323        self.add_precompiled_account(program_id);
6324        debug!("Added precompiled program {:?}", program_id);
6325    }
6326
6327    // Call AccountsDb::clean_accounts()
6328    //
6329    // This fn is meant to be called by the snapshot handler in Accounts Background Service.  If
6330    // calling from elsewhere, ensure the same invariants hold/expectations are met.
6331    pub(crate) fn clean_accounts(&self) {
6332        // Don't clean the slot we're snapshotting because it may have zero-lamport
6333        // accounts that were included in the bank delta hash when the bank was frozen,
6334        // and if we clean them here, any newly created snapshot's hash for this bank
6335        // may not match the frozen hash.
6336        //
6337        // So when we're snapshotting, the highest slot to clean is lowered by one.
6338        let highest_slot_to_clean = self.slot().saturating_sub(1);
6339
6340        self.rc.accounts.accounts_db.clean_accounts(
6341            Some(highest_slot_to_clean),
6342            false,
6343            self.epoch_schedule(),
6344            self.clean_accounts_old_storages_policy(),
6345        );
6346    }
6347
6348    pub fn print_accounts_stats(&self) {
6349        self.rc.accounts.accounts_db.print_accounts_stats("");
6350    }
6351
6352    pub fn shrink_candidate_slots(&self) -> usize {
6353        self.rc
6354            .accounts
6355            .accounts_db
6356            .shrink_candidate_slots(self.epoch_schedule())
6357    }
6358
6359    pub(crate) fn shrink_ancient_slots(&self) {
6360        // Invoke ancient slot shrinking only when the validator is
6361        // explicitly configured to do so. This condition may be
6362        // removed when the skip rewrites feature is enabled.
6363        if self.are_ancient_storages_enabled() {
6364            self.rc
6365                .accounts
6366                .accounts_db
6367                .shrink_ancient_slots(self.epoch_schedule())
6368        }
6369    }
6370
6371    /// Returns if ancient storages are enabled or not
6372    pub fn are_ancient_storages_enabled(&self) -> bool {
6373        let can_skip_rewrites = self.bank_hash_skips_rent_rewrites();
6374        let test_skip_rewrites_but_include_in_bank_hash = self
6375            .rc
6376            .accounts
6377            .accounts_db
6378            .test_skip_rewrites_but_include_in_bank_hash;
6379        can_skip_rewrites || test_skip_rewrites_but_include_in_bank_hash
6380    }
6381
6382    /// Returns how clean_accounts() should handle old storages
6383    fn clean_accounts_old_storages_policy(&self) -> OldStoragesPolicy {
6384        if self.are_ancient_storages_enabled() {
6385            OldStoragesPolicy::Leave
6386        } else {
6387            OldStoragesPolicy::Clean
6388        }
6389    }
6390
6391    pub fn read_cost_tracker(&self) -> LockResult<RwLockReadGuard<CostTracker>> {
6392        self.cost_tracker.read()
6393    }
6394
6395    pub fn write_cost_tracker(&self) -> LockResult<RwLockWriteGuard<CostTracker>> {
6396        self.cost_tracker.write()
6397    }
6398
6399    // Check if the wallclock time from bank creation to now has exceeded the allotted
6400    // time for transaction processing
6401    pub fn should_bank_still_be_processing_txs(
6402        bank_creation_time: &Instant,
6403        max_tx_ingestion_nanos: u128,
6404    ) -> bool {
6405        // Do this check outside of the PoH lock, hence not a method on PohRecorder
6406        bank_creation_time.elapsed().as_nanos() <= max_tx_ingestion_nanos
6407    }
6408
6409    pub fn deactivate_feature(&mut self, id: &Pubkey) {
6410        let mut feature_set = Arc::make_mut(&mut self.feature_set).clone();
6411        feature_set.active_mut().remove(id);
6412        feature_set.inactive_mut().insert(*id);
6413        self.feature_set = Arc::new(feature_set);
6414    }
6415
6416    pub fn activate_feature(&mut self, id: &Pubkey) {
6417        let mut feature_set = Arc::make_mut(&mut self.feature_set).clone();
6418        feature_set.inactive_mut().remove(id);
6419        feature_set.active_mut().insert(*id, 0);
6420        self.feature_set = Arc::new(feature_set);
6421    }
6422
6423    pub fn fill_bank_with_ticks_for_tests(&self) {
6424        self.do_fill_bank_with_ticks_for_tests(&BankWithScheduler::no_scheduler_available())
6425    }
6426
6427    pub(crate) fn do_fill_bank_with_ticks_for_tests(&self, scheduler: &InstalledSchedulerRwLock) {
6428        if self.tick_height.load(Relaxed) < self.max_tick_height {
6429            let last_blockhash = self.last_blockhash();
6430            while self.last_blockhash() == last_blockhash {
6431                self.register_tick(&Hash::new_unique(), scheduler)
6432            }
6433        } else {
6434            warn!("Bank already reached max tick height, cannot fill it with more ticks");
6435        }
6436    }
6437
6438    /// Get a set of all actively reserved account keys that are not allowed to
6439    /// be write-locked during transaction processing.
6440    pub fn get_reserved_account_keys(&self) -> &HashSet<Pubkey> {
6441        &self.reserved_account_keys.active
6442    }
6443
6444    // This is called from snapshot restore AND for each epoch boundary
6445    // The entire code path herein must be idempotent
6446    fn apply_feature_activations(
6447        &mut self,
6448        caller: ApplyFeatureActivationsCaller,
6449        debug_do_not_add_builtins: bool,
6450    ) {
6451        use ApplyFeatureActivationsCaller as Caller;
6452        let allow_new_activations = match caller {
6453            Caller::FinishInit => false,
6454            Caller::NewFromParent => true,
6455            Caller::WarpFromParent => false,
6456        };
6457        let (feature_set, new_feature_activations) =
6458            self.compute_active_feature_set(allow_new_activations);
6459        self.feature_set = Arc::new(feature_set);
6460
6461        // Update activation slot of features in `new_feature_activations`
6462        for feature_id in new_feature_activations.iter() {
6463            if let Some(mut account) = self.get_account_with_fixed_root(feature_id) {
6464                if let Some(mut feature) = feature::from_account(&account) {
6465                    feature.activated_at = Some(self.slot());
6466                    if feature::to_account(&feature, &mut account).is_some() {
6467                        self.store_account(feature_id, &account);
6468                    }
6469                    info!("Feature {} activated at slot {}", feature_id, self.slot());
6470                }
6471            }
6472        }
6473
6474        // Update active set of reserved account keys which are not allowed to be write locked
6475        self.reserved_account_keys = {
6476            let mut reserved_keys = ReservedAccountKeys::clone(&self.reserved_account_keys);
6477            reserved_keys.update_active_set(&self.feature_set);
6478            Arc::new(reserved_keys)
6479        };
6480
6481        if new_feature_activations.contains(&feature_set::pico_inflation::id()) {
6482            *self.inflation.write().unwrap() = Inflation::pico();
6483            self.fee_rate_governor.burn_percent = 50; // 50% fee burn
6484            self.rent_collector.rent.burn_percent = 50; // 50% rent burn
6485        }
6486
6487        if !new_feature_activations.is_disjoint(&self.feature_set.full_inflation_features_enabled())
6488        {
6489            *self.inflation.write().unwrap() = Inflation::full();
6490            self.fee_rate_governor.burn_percent = 50; // 50% fee burn
6491            self.rent_collector.rent.burn_percent = 50; // 50% rent burn
6492        }
6493
6494        if !debug_do_not_add_builtins {
6495            self.apply_builtin_program_feature_transitions(
6496                allow_new_activations,
6497                &new_feature_activations,
6498            );
6499        }
6500
6501        if new_feature_activations.contains(&feature_set::update_hashes_per_tick::id()) {
6502            self.apply_updated_hashes_per_tick(DEFAULT_HASHES_PER_TICK);
6503        }
6504
6505        if new_feature_activations.contains(&feature_set::update_hashes_per_tick2::id()) {
6506            self.apply_updated_hashes_per_tick(UPDATED_HASHES_PER_TICK2);
6507        }
6508
6509        if new_feature_activations.contains(&feature_set::update_hashes_per_tick3::id()) {
6510            self.apply_updated_hashes_per_tick(UPDATED_HASHES_PER_TICK3);
6511        }
6512
6513        if new_feature_activations.contains(&feature_set::update_hashes_per_tick4::id()) {
6514            self.apply_updated_hashes_per_tick(UPDATED_HASHES_PER_TICK4);
6515        }
6516
6517        if new_feature_activations.contains(&feature_set::update_hashes_per_tick5::id()) {
6518            self.apply_updated_hashes_per_tick(UPDATED_HASHES_PER_TICK5);
6519        }
6520
6521        if new_feature_activations.contains(&feature_set::update_hashes_per_tick6::id()) {
6522            self.apply_updated_hashes_per_tick(UPDATED_HASHES_PER_TICK6);
6523        }
6524
6525        if new_feature_activations.contains(&feature_set::accounts_lt_hash::id()) {
6526            // Activating the accounts lt hash feature means we need to have an accounts lt hash
6527            // value at the end of this if-block.  If the cli arg has been used, that means we
6528            // already have an accounts lt hash and do not need to recalculate it.
6529            if self
6530                .rc
6531                .accounts
6532                .accounts_db
6533                .is_experimental_accumulator_hash_enabled()
6534            {
6535                // We already have an accounts lt hash value, so no need to recalculate it.
6536                // Nothing else to do here.
6537            } else {
6538                let parent_slot = self.parent_slot;
6539                info!(
6540                    "Calculating the accounts lt hash for slot {parent_slot} \
6541                     as part of feature activation; this may take some time...",
6542                );
6543                // We must calculate the accounts lt hash now as part of feature activation.
6544                // Note, this bank is *not* frozen yet, which means it will later call
6545                // `update_accounts_lt_hash()`.  Therefore, we calculate the accounts lt hash based
6546                // on *our parent*, not us!
6547                let parent_ancestors = {
6548                    let mut ancestors = self.ancestors.clone();
6549                    ancestors.remove(&self.slot());
6550                    ancestors
6551                };
6552                let (parent_accounts_lt_hash, duration) = meas_dur!({
6553                    self.rc
6554                        .accounts
6555                        .accounts_db
6556                        .calculate_accounts_lt_hash_at_startup_from_index(
6557                            &parent_ancestors,
6558                            parent_slot,
6559                        )
6560                });
6561                *self.accounts_lt_hash.get_mut().unwrap() = parent_accounts_lt_hash;
6562                info!(
6563                    "Calculating the accounts lt hash for slot {parent_slot} \
6564                     completed in {duration:?}, accounts_lt_hash checksum: {}",
6565                    self.accounts_lt_hash.get_mut().unwrap().0.checksum(),
6566                );
6567            }
6568        }
6569
6570        if new_feature_activations.contains(&feature_set::raise_block_limits_to_50m::id())
6571            && !self
6572                .feature_set
6573                .is_active(&feature_set::raise_block_limits_to_60m::id())
6574        {
6575            let (account_cost_limit, block_cost_limit, vote_cost_limit) = simd_0207_block_limits();
6576            self.write_cost_tracker().unwrap().set_limits(
6577                account_cost_limit,
6578                block_cost_limit,
6579                vote_cost_limit,
6580            );
6581        }
6582
6583        if new_feature_activations.contains(&feature_set::raise_block_limits_to_60m::id()) {
6584            let (account_cost_limit, block_cost_limit, vote_cost_limit) = simd_0256_block_limits();
6585            self.write_cost_tracker().unwrap().set_limits(
6586                account_cost_limit,
6587                block_cost_limit,
6588                vote_cost_limit,
6589            );
6590        }
6591
6592        if new_feature_activations.contains(&feature_set::remove_accounts_delta_hash::id()) {
6593            // If the accounts delta hash has been removed, then we no longer need to compute the
6594            // AccountHash for modified accounts, and can stop the background account hasher.
6595            self.rc.accounts.accounts_db.stop_background_hasher();
6596        }
6597    }
6598
6599    fn apply_updated_hashes_per_tick(&mut self, hashes_per_tick: u64) {
6600        info!(
6601            "Activating update_hashes_per_tick {} at slot {}",
6602            hashes_per_tick,
6603            self.slot(),
6604        );
6605        self.hashes_per_tick = Some(hashes_per_tick);
6606    }
6607
6608    fn adjust_sysvar_balance_for_rent(&self, account: &mut AccountSharedData) {
6609        account.set_lamports(
6610            self.get_minimum_balance_for_rent_exemption(account.data().len())
6611                .max(account.lamports()),
6612        );
6613    }
6614
6615    /// Compute the active feature set based on the current bank state,
6616    /// and return it together with the set of newly activated features.
6617    fn compute_active_feature_set(&self, include_pending: bool) -> (FeatureSet, AHashSet<Pubkey>) {
6618        let mut active = self.feature_set.active().clone();
6619        let mut inactive = AHashSet::new();
6620        let mut pending = AHashSet::new();
6621        let slot = self.slot();
6622
6623        for feature_id in self.feature_set.inactive() {
6624            let mut activated = None;
6625            if let Some(account) = self.get_account_with_fixed_root(feature_id) {
6626                if let Some(feature) = feature::from_account(&account) {
6627                    match feature.activated_at {
6628                        None if include_pending => {
6629                            // Feature activation is pending
6630                            pending.insert(*feature_id);
6631                            activated = Some(slot);
6632                        }
6633                        Some(activation_slot) if slot >= activation_slot => {
6634                            // Feature has been activated already
6635                            activated = Some(activation_slot);
6636                        }
6637                        _ => {}
6638                    }
6639                }
6640            }
6641            if let Some(slot) = activated {
6642                active.insert(*feature_id, slot);
6643            } else {
6644                inactive.insert(*feature_id);
6645            }
6646        }
6647
6648        (FeatureSet::new(active, inactive), pending)
6649    }
6650
6651    fn apply_builtin_program_feature_transitions(
6652        &mut self,
6653        only_apply_transitions_for_new_features: bool,
6654        new_feature_activations: &AHashSet<Pubkey>,
6655    ) {
6656        for builtin in BUILTINS.iter() {
6657            // The `builtin_is_bpf` flag is used to handle the case where a
6658            // builtin is scheduled to be enabled by one feature gate and
6659            // later migrated to Core BPF by another.
6660            //
6661            // There should never be a case where a builtin is set to be
6662            // migrated to Core BPF and is also set to be enabled on feature
6663            // activation on the same feature gate. However, the
6664            // `builtin_is_bpf` flag will handle this case as well, electing
6665            // to first attempt the migration to Core BPF.
6666            //
6667            // The migration to Core BPF will fail gracefully because the
6668            // program account will not exist. The builtin will subsequently
6669            // be enabled, but it will never be migrated to Core BPF.
6670            //
6671            // Using the same feature gate for both enabling and migrating a
6672            // builtin to Core BPF should be strictly avoided.
6673            let mut builtin_is_bpf = false;
6674            if let Some(core_bpf_migration_config) = &builtin.core_bpf_migration_config {
6675                // If the builtin is set to be migrated to Core BPF on feature
6676                // activation, perform the migration and do not add the program
6677                // to the bank's builtins. The migration will remove it from
6678                // the builtins list and the cache.
6679                if new_feature_activations.contains(&core_bpf_migration_config.feature_id) {
6680                    if let Err(e) = self
6681                        .migrate_builtin_to_core_bpf(&builtin.program_id, core_bpf_migration_config)
6682                    {
6683                        warn!(
6684                            "Failed to migrate builtin {} to Core BPF: {}",
6685                            builtin.name, e
6686                        );
6687                    } else {
6688                        builtin_is_bpf = true;
6689                    }
6690                } else {
6691                    // If the builtin has already been migrated to Core BPF, do not
6692                    // add it to the bank's builtins.
6693                    builtin_is_bpf = self
6694                        .get_account(&builtin.program_id)
6695                        .map(|a| a.owner() == &bpf_loader_upgradeable::id())
6696                        .unwrap_or(false);
6697                }
6698            };
6699
6700            if let Some(feature_id) = builtin.enable_feature_id {
6701                let should_enable_builtin_on_feature_transition = !builtin_is_bpf
6702                    && if only_apply_transitions_for_new_features {
6703                        new_feature_activations.contains(&feature_id)
6704                    } else {
6705                        self.feature_set.is_active(&feature_id)
6706                    };
6707
6708                if should_enable_builtin_on_feature_transition {
6709                    self.transaction_processor.add_builtin(
6710                        self,
6711                        builtin.program_id,
6712                        builtin.name,
6713                        ProgramCacheEntry::new_builtin(
6714                            self.feature_set.activated_slot(&feature_id).unwrap_or(0),
6715                            builtin.name.len(),
6716                            builtin.entrypoint,
6717                        ),
6718                    );
6719                }
6720            }
6721        }
6722
6723        // Migrate any necessary stateless builtins to core BPF.
6724        // Stateless builtins do not have an `enable_feature_id` since they
6725        // do not exist on-chain.
6726        for stateless_builtin in STATELESS_BUILTINS.iter() {
6727            if let Some(core_bpf_migration_config) = &stateless_builtin.core_bpf_migration_config {
6728                if new_feature_activations.contains(&core_bpf_migration_config.feature_id) {
6729                    if let Err(e) = self.migrate_builtin_to_core_bpf(
6730                        &stateless_builtin.program_id,
6731                        core_bpf_migration_config,
6732                    ) {
6733                        warn!(
6734                            "Failed to migrate stateless builtin {} to Core BPF: {}",
6735                            stateless_builtin.name, e
6736                        );
6737                    }
6738                }
6739            }
6740        }
6741
6742        for precompile in get_precompiles() {
6743            let should_add_precompile = precompile
6744                .feature
6745                .as_ref()
6746                .map(|feature_id| self.feature_set.is_active(feature_id))
6747                .unwrap_or(false);
6748            if should_add_precompile {
6749                self.add_precompile(&precompile.program_id);
6750            }
6751        }
6752    }
6753
6754    /// Use to replace programs by feature activation
6755    #[allow(dead_code)]
6756    fn replace_program_account(
6757        &mut self,
6758        old_address: &Pubkey,
6759        new_address: &Pubkey,
6760        datapoint_name: &'static str,
6761    ) {
6762        if let Some(old_account) = self.get_account_with_fixed_root(old_address) {
6763            if let Some(new_account) = self.get_account_with_fixed_root(new_address) {
6764                datapoint_info!(datapoint_name, ("slot", self.slot, i64));
6765
6766                // Burn lamports in the old account
6767                self.capitalization
6768                    .fetch_sub(old_account.lamports(), Relaxed);
6769
6770                // Transfer new account to old account
6771                self.store_account(old_address, &new_account);
6772
6773                // Clear new account
6774                self.store_account(new_address, &AccountSharedData::default());
6775
6776                // Unload a program from the bank's cache
6777                self.transaction_processor
6778                    .program_cache
6779                    .write()
6780                    .unwrap()
6781                    .remove_programs([*old_address].into_iter());
6782
6783                self.calculate_and_update_accounts_data_size_delta_off_chain(
6784                    old_account.data().len(),
6785                    new_account.data().len(),
6786                );
6787            }
6788        }
6789    }
6790
6791    /// Get all the accounts for this bank and calculate stats
6792    pub fn get_total_accounts_stats(&self) -> ScanResult<TotalAccountsStats> {
6793        let accounts = self.get_all_accounts(false)?;
6794        Ok(self.calculate_total_accounts_stats(
6795            accounts
6796                .iter()
6797                .map(|(pubkey, account, _slot)| (pubkey, account)),
6798        ))
6799    }
6800
6801    /// Given all the accounts for a bank, calculate stats
6802    pub fn calculate_total_accounts_stats<'a>(
6803        &self,
6804        accounts: impl Iterator<Item = (&'a Pubkey, &'a AccountSharedData)>,
6805    ) -> TotalAccountsStats {
6806        let rent_collector = self.rent_collector();
6807        let mut total_accounts_stats = TotalAccountsStats::default();
6808        accounts.for_each(|(pubkey, account)| {
6809            total_accounts_stats.accumulate_account(pubkey, account, rent_collector);
6810        });
6811
6812        total_accounts_stats
6813    }
6814
6815    /// Must a snapshot of this bank include the EAH?
6816    pub fn must_include_epoch_accounts_hash_in_snapshot(&self) -> bool {
6817        epoch_accounts_hash_utils::is_enabled_this_epoch(self)
6818            && epoch_accounts_hash_utils::is_in_calculation_window(self)
6819    }
6820
6821    /// Get the EAH that will be used by snapshots
6822    ///
6823    /// Since snapshots are taken on roots, if the bank is in the EAH calculation window then an
6824    /// EAH *must* be included.  This means if an EAH calculation is currently in-flight we will
6825    /// wait for it to complete.
6826    pub fn get_epoch_accounts_hash_to_serialize(&self) -> Option<EpochAccountsHash> {
6827        if !self.must_include_epoch_accounts_hash_in_snapshot() {
6828            return None;
6829        }
6830
6831        let (epoch_accounts_hash, waiting_time_us) = measure_us!(self
6832            .rc
6833            .accounts
6834            .accounts_db
6835            .epoch_accounts_hash_manager
6836            .wait_get_epoch_accounts_hash());
6837
6838        datapoint_info!(
6839            "bank-get_epoch_accounts_hash_to_serialize",
6840            ("slot", self.slot(), i64),
6841            ("waiting-time-us", waiting_time_us, i64),
6842        );
6843        Some(epoch_accounts_hash)
6844    }
6845
6846    /// Convenience fn to get the Epoch Accounts Hash
6847    pub fn epoch_accounts_hash(&self) -> Option<EpochAccountsHash> {
6848        self.rc
6849            .accounts
6850            .accounts_db
6851            .epoch_accounts_hash_manager
6852            .try_get_epoch_accounts_hash()
6853    }
6854
6855    pub fn is_in_slot_hashes_history(&self, slot: &Slot) -> bool {
6856        if slot < &self.slot {
6857            if let Ok(slot_hashes) = self.transaction_processor.sysvar_cache().get_slot_hashes() {
6858                return slot_hashes.get(slot).is_some();
6859            }
6860        }
6861        false
6862    }
6863
6864    pub fn check_program_modification_slot(&self) -> bool {
6865        self.check_program_modification_slot
6866    }
6867
6868    pub fn set_check_program_modification_slot(&mut self, check: bool) {
6869        self.check_program_modification_slot = check;
6870    }
6871
6872    pub fn fee_structure(&self) -> &FeeStructure {
6873        &self.fee_structure
6874    }
6875
6876    pub fn block_id(&self) -> Option<Hash> {
6877        *self.block_id.read().unwrap()
6878    }
6879
6880    pub fn set_block_id(&self, block_id: Option<Hash>) {
6881        *self.block_id.write().unwrap() = block_id;
6882    }
6883
6884    pub fn compute_budget(&self) -> Option<ComputeBudget> {
6885        self.compute_budget
6886    }
6887
6888    pub fn add_builtin(&self, program_id: Pubkey, name: &str, builtin: ProgramCacheEntry) {
6889        self.transaction_processor
6890            .add_builtin(self, program_id, name, builtin)
6891    }
6892
6893    pub fn get_bank_hash_stats(&self) -> BankHashStats {
6894        self.bank_hash_stats.load()
6895    }
6896}
6897
6898impl TransactionProcessingCallback for Bank {
6899    fn account_matches_owners(&self, account: &Pubkey, owners: &[Pubkey]) -> Option<usize> {
6900        self.rc
6901            .accounts
6902            .accounts_db
6903            .account_matches_owners(&self.ancestors, account, owners)
6904            .ok()
6905    }
6906
6907    fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option<AccountSharedData> {
6908        self.rc
6909            .accounts
6910            .accounts_db
6911            .load_with_fixed_root(&self.ancestors, pubkey)
6912            .map(|(acc, _)| acc)
6913    }
6914
6915    // NOTE: must hold idempotent for the same set of arguments
6916    /// Add a builtin program account
6917    fn add_builtin_account(&self, name: &str, program_id: &Pubkey) {
6918        let existing_genuine_program =
6919            self.get_account_with_fixed_root(program_id)
6920                .and_then(|account| {
6921                    // it's very unlikely to be squatted at program_id as non-system account because of burden to
6922                    // find victim's pubkey/hash. So, when account.owner is indeed native_loader's, it's
6923                    // safe to assume it's a genuine program.
6924                    if native_loader::check_id(account.owner()) {
6925                        Some(account)
6926                    } else {
6927                        // malicious account is pre-occupying at program_id
6928                        self.burn_and_purge_account(program_id, account);
6929                        None
6930                    }
6931                });
6932
6933        // introducing builtin program
6934        if existing_genuine_program.is_some() {
6935            // The existing account is sufficient
6936            return;
6937        }
6938
6939        assert!(
6940            !self.freeze_started(),
6941            "Can't change frozen bank by adding not-existing new builtin program ({name}, {program_id}). \
6942            Maybe, inconsistent program activation is detected on snapshot restore?"
6943        );
6944
6945        // Add a bogus executable builtin account, which will be loaded and ignored.
6946        let account = native_loader::create_loadable_account_with_fields(
6947            name,
6948            self.inherit_specially_retained_account_fields(&existing_genuine_program),
6949        );
6950        self.store_account_and_update_capitalization(program_id, &account);
6951    }
6952
6953    fn inspect_account(&self, address: &Pubkey, account_state: AccountState, is_writable: bool) {
6954        if self.is_accounts_lt_hash_enabled() {
6955            self.inspect_account_for_accounts_lt_hash(address, &account_state, is_writable);
6956        }
6957    }
6958
6959    fn get_current_epoch_vote_account_stake(&self, vote_address: &Pubkey) -> u64 {
6960        self.get_current_epoch_vote_accounts()
6961            .get(vote_address)
6962            .map(|(stake, _)| (*stake))
6963            .unwrap_or(0)
6964    }
6965
6966    fn calculate_fee(
6967        &self,
6968        message: &impl SVMMessage,
6969        lamports_per_signature: u64,
6970        prioritization_fee: u64,
6971        feature_set: &FeatureSet,
6972    ) -> FeeDetails {
6973        clone_solana_fee::calculate_fee_details(
6974            message,
6975            false, /* zero_fees_for_test */
6976            lamports_per_signature,
6977            prioritization_fee,
6978            FeeFeatures::from(feature_set),
6979        )
6980    }
6981}
6982
6983#[cfg(feature = "dev-context-only-utils")]
6984impl Bank {
6985    pub fn wrap_with_bank_forks_for_tests(self) -> (Arc<Self>, Arc<RwLock<BankForks>>) {
6986        let bank_forks = BankForks::new_rw_arc(self);
6987        let bank = bank_forks.read().unwrap().root_bank();
6988        (bank, bank_forks)
6989    }
6990
6991    pub fn default_for_tests() -> Self {
6992        let accounts_db = AccountsDb::default_for_tests();
6993        let accounts = Accounts::new(Arc::new(accounts_db));
6994        Self::default_with_accounts(accounts)
6995    }
6996
6997    pub fn new_with_bank_forks_for_tests(
6998        genesis_config: &GenesisConfig,
6999    ) -> (Arc<Self>, Arc<RwLock<BankForks>>) {
7000        let bank = Self::new_for_tests(genesis_config);
7001        bank.wrap_with_bank_forks_for_tests()
7002    }
7003
7004    pub fn new_for_tests(genesis_config: &GenesisConfig) -> Self {
7005        Self::new_with_config_for_tests(genesis_config, BankTestConfig::default())
7006    }
7007
7008    pub fn new_with_mockup_builtin_for_tests(
7009        genesis_config: &GenesisConfig,
7010        program_id: Pubkey,
7011        builtin_function: BuiltinFunctionWithContext,
7012    ) -> (Arc<Self>, Arc<RwLock<BankForks>>) {
7013        let mut bank = Self::new_for_tests(genesis_config);
7014        bank.add_mockup_builtin(program_id, builtin_function);
7015        bank.wrap_with_bank_forks_for_tests()
7016    }
7017
7018    pub fn new_no_wallclock_throttle_for_tests(
7019        genesis_config: &GenesisConfig,
7020    ) -> (Arc<Self>, Arc<RwLock<BankForks>>) {
7021        let mut bank = Self::new_for_tests(genesis_config);
7022
7023        bank.ns_per_slot = u128::MAX;
7024        bank.wrap_with_bank_forks_for_tests()
7025    }
7026
7027    pub fn new_with_config_for_tests(
7028        genesis_config: &GenesisConfig,
7029        test_config: BankTestConfig,
7030    ) -> Self {
7031        Self::new_with_paths_for_tests(
7032            genesis_config,
7033            Arc::new(RuntimeConfig::default()),
7034            test_config,
7035            Vec::new(),
7036        )
7037    }
7038
7039    pub fn new_with_paths_for_tests(
7040        genesis_config: &GenesisConfig,
7041        runtime_config: Arc<RuntimeConfig>,
7042        test_config: BankTestConfig,
7043        paths: Vec<PathBuf>,
7044    ) -> Self {
7045        Self::new_with_paths(
7046            genesis_config,
7047            runtime_config,
7048            paths,
7049            None,
7050            None,
7051            false,
7052            Some(test_config.accounts_db_config),
7053            None,
7054            Some(Pubkey::new_unique()),
7055            Arc::default(),
7056            None,
7057            None,
7058        )
7059    }
7060
7061    pub fn new_for_benches(genesis_config: &GenesisConfig) -> Self {
7062        Self::new_with_paths_for_benches(genesis_config, Vec::new())
7063    }
7064
7065    /// Intended for use by benches only.
7066    /// create new bank with the given config and paths.
7067    pub fn new_with_paths_for_benches(genesis_config: &GenesisConfig, paths: Vec<PathBuf>) -> Self {
7068        Self::new_with_paths(
7069            genesis_config,
7070            Arc::<RuntimeConfig>::default(),
7071            paths,
7072            None,
7073            None,
7074            false,
7075            Some(ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS),
7076            None,
7077            Some(Pubkey::new_unique()),
7078            Arc::default(),
7079            None,
7080            None,
7081        )
7082    }
7083
7084    /// Prepare a transaction batch from a list of legacy transactions. Used for tests only.
7085    #[cfg(feature = "dev-context-only-utils")]
7086    pub fn prepare_batch_for_tests(
7087        &self,
7088        txs: Vec<Transaction>,
7089    ) -> TransactionBatch<RuntimeTransaction<SanitizedTransaction>> {
7090        let transaction_account_lock_limit = self.get_transaction_account_lock_limit();
7091        let sanitized_txs = txs
7092            .into_iter()
7093            .map(RuntimeTransaction::from_transaction_for_tests)
7094            .collect::<Vec<_>>();
7095        let lock_results = self
7096            .rc
7097            .accounts
7098            .lock_accounts(sanitized_txs.iter(), transaction_account_lock_limit);
7099        TransactionBatch::new(lock_results, self, OwnedOrBorrowed::Owned(sanitized_txs))
7100    }
7101
7102    /// Set the initial accounts data size
7103    /// NOTE: This fn is *ONLY FOR TESTS*
7104    pub fn set_accounts_data_size_initial_for_tests(&mut self, amount: u64) {
7105        self.accounts_data_size_initial = amount;
7106    }
7107
7108    /// Update the accounts data size off-chain delta
7109    /// NOTE: This fn is *ONLY FOR TESTS*
7110    pub fn update_accounts_data_size_delta_off_chain_for_tests(&self, amount: i64) {
7111        self.update_accounts_data_size_delta_off_chain(amount)
7112    }
7113
7114    #[cfg(test)]
7115    fn restore_old_behavior_for_fragile_tests(&self) {
7116        self.lazy_rent_collection.store(true, Relaxed);
7117    }
7118
7119    /// Process multiple transaction in a single batch. This is used for benches and unit tests.
7120    ///
7121    /// # Panics
7122    ///
7123    /// Panics if any of the transactions do not pass sanitization checks.
7124    #[must_use]
7125    pub fn process_transactions<'a>(
7126        &self,
7127        txs: impl Iterator<Item = &'a Transaction>,
7128    ) -> Vec<Result<()>> {
7129        self.try_process_transactions(txs).unwrap()
7130    }
7131
7132    /// Process entry transactions in a single batch. This is used for benches and unit tests.
7133    ///
7134    /// # Panics
7135    ///
7136    /// Panics if any of the transactions do not pass sanitization checks.
7137    #[must_use]
7138    pub fn process_entry_transactions(&self, txs: Vec<VersionedTransaction>) -> Vec<Result<()>> {
7139        self.try_process_entry_transactions(txs).unwrap()
7140    }
7141
7142    #[cfg(test)]
7143    pub fn flush_accounts_cache_slot_for_tests(&self) {
7144        self.rc
7145            .accounts
7146            .accounts_db
7147            .flush_accounts_cache_slot_for_tests(self.slot())
7148    }
7149
7150    /// This is only valid to call from tests.
7151    /// block until initial accounts hash verification has completed
7152    pub fn wait_for_initial_accounts_hash_verification_completed_for_tests(&self) {
7153        self.rc
7154            .accounts
7155            .accounts_db
7156            .verify_accounts_hash_in_bg
7157            .join_background_thread()
7158    }
7159
7160    pub fn get_sysvar_cache_for_tests(&self) -> SysvarCache {
7161        self.transaction_processor.get_sysvar_cache_for_tests()
7162    }
7163
7164    pub fn update_accounts_hash_for_tests(&self) -> AccountsHash {
7165        self.update_accounts_hash(CalcAccountsHashDataSource::IndexForTests, false, false)
7166    }
7167
7168    pub fn new_program_cache_for_tx_batch_for_slot(&self, slot: Slot) -> ProgramCacheForTxBatch {
7169        ProgramCacheForTxBatch::new_from_cache(
7170            slot,
7171            self.epoch_schedule.get_epoch(slot),
7172            &self.transaction_processor.program_cache.read().unwrap(),
7173        )
7174    }
7175
7176    pub fn get_transaction_processor(&self) -> &TransactionBatchProcessor<BankForks> {
7177        &self.transaction_processor
7178    }
7179
7180    pub fn set_fee_structure(&mut self, fee_structure: &FeeStructure) {
7181        self.fee_structure = fee_structure.clone();
7182    }
7183
7184    pub fn load_program(
7185        &self,
7186        pubkey: &Pubkey,
7187        reload: bool,
7188        effective_epoch: Epoch,
7189    ) -> Option<Arc<ProgramCacheEntry>> {
7190        let environments = self
7191            .transaction_processor
7192            .get_environments_for_epoch(effective_epoch)?;
7193        load_program_with_pubkey(
7194            self,
7195            &environments,
7196            pubkey,
7197            self.slot(),
7198            &mut ExecuteTimings::default(), // Called by ledger-tool, metrics not accumulated.
7199            reload,
7200        )
7201    }
7202
7203    pub fn withdraw(&self, pubkey: &Pubkey, lamports: u64) -> Result<()> {
7204        match self.get_account_with_fixed_root(pubkey) {
7205            Some(mut account) => {
7206                let min_balance = match get_system_account_kind(&account) {
7207                    Some(SystemAccountKind::Nonce) => self
7208                        .rent_collector
7209                        .rent
7210                        .minimum_balance(nonce::State::size()),
7211                    _ => 0,
7212                };
7213
7214                lamports
7215                    .checked_add(min_balance)
7216                    .filter(|required_balance| *required_balance <= account.lamports())
7217                    .ok_or(TransactionError::InsufficientFundsForFee)?;
7218                account
7219                    .checked_sub_lamports(lamports)
7220                    .map_err(|_| TransactionError::InsufficientFundsForFee)?;
7221                self.store_account(pubkey, &account);
7222
7223                Ok(())
7224            }
7225            None => Err(TransactionError::AccountNotFound),
7226        }
7227    }
7228
7229    pub fn set_hash_overrides(&self, hash_overrides: HashOverrides) {
7230        *self.hash_overrides.lock().unwrap() = hash_overrides;
7231    }
7232}
7233
7234/// Compute how much an account has changed size.  This function is useful when the data size delta
7235/// needs to be computed and passed to an `update_accounts_data_size_delta` function.
7236fn calculate_data_size_delta(old_data_size: usize, new_data_size: usize) -> i64 {
7237    assert!(old_data_size <= i64::MAX as usize);
7238    assert!(new_data_size <= i64::MAX as usize);
7239    let old_data_size = old_data_size as i64;
7240    let new_data_size = new_data_size as i64;
7241
7242    new_data_size.saturating_sub(old_data_size)
7243}
7244
7245/// Since `apply_feature_activations()` has different behavior depending on its caller, enumerate
7246/// those callers explicitly.
7247#[derive(Debug, Copy, Clone, Eq, PartialEq)]
7248enum ApplyFeatureActivationsCaller {
7249    FinishInit,
7250    NewFromParent,
7251    WarpFromParent,
7252}
7253
7254/// Return the computed values from `collect_rent_from_accounts()`
7255///
7256/// Since `collect_rent_from_accounts()` is running in parallel, instead of updating the
7257/// atomics/shared data inside this function, return those values in this struct for the caller to
7258/// process later.
7259#[derive(Debug, Default)]
7260struct CollectRentFromAccountsInfo {
7261    skipped_rewrites: Vec<(Pubkey, AccountHash)>,
7262    rent_collected_info: CollectedInfo,
7263    rent_rewards: Vec<(Pubkey, RewardInfo)>,
7264    time_collecting_rent_us: u64,
7265    time_storing_accounts_us: u64,
7266    num_accounts: usize,
7267}
7268
7269/// Return the computed values—of each iteration in the parallel loop inside
7270/// `collect_rent_in_partition()`—and then perform a reduce on all of them.
7271#[derive(Debug, Default)]
7272struct CollectRentInPartitionInfo {
7273    skipped_rewrites: Vec<(Pubkey, AccountHash)>,
7274    rent_collected: u64,
7275    accounts_data_size_reclaimed: u64,
7276    rent_rewards: Vec<(Pubkey, RewardInfo)>,
7277    time_loading_accounts_us: u64,
7278    time_collecting_rent_us: u64,
7279    time_storing_accounts_us: u64,
7280    num_accounts: usize,
7281}
7282
7283impl CollectRentInPartitionInfo {
7284    /// Create a new `CollectRentInPartitionInfo` from the results of loading accounts and
7285    /// collecting rent on them.
7286    #[must_use]
7287    fn new(info: CollectRentFromAccountsInfo, time_loading_accounts: Duration) -> Self {
7288        Self {
7289            skipped_rewrites: info.skipped_rewrites,
7290            rent_collected: info.rent_collected_info.rent_amount,
7291            accounts_data_size_reclaimed: info.rent_collected_info.account_data_len_reclaimed,
7292            rent_rewards: info.rent_rewards,
7293            time_loading_accounts_us: time_loading_accounts.as_micros() as u64,
7294            time_collecting_rent_us: info.time_collecting_rent_us,
7295            time_storing_accounts_us: info.time_storing_accounts_us,
7296            num_accounts: info.num_accounts,
7297        }
7298    }
7299
7300    /// Reduce (i.e. 'combine') two `CollectRentInPartitionInfo`s into one.
7301    ///
7302    /// This fn is used by `collect_rent_in_partition()` as the reduce step (of map-reduce) in its
7303    /// parallel loop of rent collection.
7304    #[must_use]
7305    fn reduce(lhs: Self, rhs: Self) -> Self {
7306        Self {
7307            skipped_rewrites: [lhs.skipped_rewrites, rhs.skipped_rewrites].concat(),
7308            rent_collected: lhs.rent_collected.saturating_add(rhs.rent_collected),
7309            accounts_data_size_reclaimed: lhs
7310                .accounts_data_size_reclaimed
7311                .saturating_add(rhs.accounts_data_size_reclaimed),
7312            rent_rewards: [lhs.rent_rewards, rhs.rent_rewards].concat(),
7313            time_loading_accounts_us: lhs
7314                .time_loading_accounts_us
7315                .saturating_add(rhs.time_loading_accounts_us),
7316            time_collecting_rent_us: lhs
7317                .time_collecting_rent_us
7318                .saturating_add(rhs.time_collecting_rent_us),
7319            time_storing_accounts_us: lhs
7320                .time_storing_accounts_us
7321                .saturating_add(rhs.time_storing_accounts_us),
7322            num_accounts: lhs.num_accounts.saturating_add(rhs.num_accounts),
7323        }
7324    }
7325}
7326
7327/// Struct to collect stats when scanning all accounts in `get_total_accounts_stats()`
7328#[derive(Debug, Default, Copy, Clone, Serialize)]
7329pub struct TotalAccountsStats {
7330    /// Total number of accounts
7331    pub num_accounts: usize,
7332    /// Total data size of all accounts
7333    pub data_len: usize,
7334
7335    /// Total number of executable accounts
7336    pub num_executable_accounts: usize,
7337    /// Total data size of executable accounts
7338    pub executable_data_len: usize,
7339
7340    /// Total number of rent exempt accounts
7341    pub num_rent_exempt_accounts: usize,
7342    /// Total number of rent paying accounts
7343    pub num_rent_paying_accounts: usize,
7344    /// Total number of rent paying accounts without data
7345    pub num_rent_paying_accounts_without_data: usize,
7346    /// Total amount of lamports in rent paying accounts
7347    pub lamports_in_rent_paying_accounts: u64,
7348}
7349
7350impl TotalAccountsStats {
7351    pub fn accumulate_account(
7352        &mut self,
7353        address: &Pubkey,
7354        account: &AccountSharedData,
7355        rent_collector: &RentCollector,
7356    ) {
7357        let data_len = account.data().len();
7358        self.num_accounts += 1;
7359        self.data_len += data_len;
7360
7361        if account.executable() {
7362            self.num_executable_accounts += 1;
7363            self.executable_data_len += data_len;
7364        }
7365
7366        if !rent_collector.should_collect_rent(address, account.executable())
7367            || rent_collector
7368                .get_rent_due(
7369                    account.lamports(),
7370                    account.data().len(),
7371                    account.rent_epoch(),
7372                )
7373                .is_exempt()
7374        {
7375            self.num_rent_exempt_accounts += 1;
7376        } else {
7377            self.num_rent_paying_accounts += 1;
7378            self.lamports_in_rent_paying_accounts += account.lamports();
7379            if data_len == 0 {
7380                self.num_rent_paying_accounts_without_data += 1;
7381            }
7382        }
7383    }
7384}
7385
7386impl Drop for Bank {
7387    fn drop(&mut self) {
7388        if let Some(drop_callback) = self.drop_callback.read().unwrap().0.as_ref() {
7389            drop_callback.callback(self);
7390        } else {
7391            // Default case for tests
7392            self.rc
7393                .accounts
7394                .accounts_db
7395                .purge_slot(self.slot(), self.bank_id(), false);
7396        }
7397    }
7398}
7399
7400/// utility function used for testing and benchmarking.
7401pub mod test_utils {
7402    use {
7403        super::Bank,
7404        crate::installed_scheduler_pool::BankWithScheduler,
7405        clone_solana_sdk::{
7406            account::{ReadableAccount, WritableAccount},
7407            hash::hashv,
7408            lamports::LamportsError,
7409            pubkey::Pubkey,
7410        },
7411        clone_solana_vote_program::vote_state::{self, BlockTimestamp, VoteStateVersions},
7412        std::sync::Arc,
7413    };
7414    pub fn goto_end_of_slot(bank: Arc<Bank>) {
7415        goto_end_of_slot_with_scheduler(&BankWithScheduler::new_without_scheduler(bank))
7416    }
7417
7418    pub fn goto_end_of_slot_with_scheduler(bank: &BankWithScheduler) {
7419        let mut tick_hash = bank.last_blockhash();
7420        loop {
7421            tick_hash = hashv(&[tick_hash.as_ref(), &[42]]);
7422            bank.register_tick(&tick_hash);
7423            if tick_hash == bank.last_blockhash() {
7424                bank.freeze();
7425                return;
7426            }
7427        }
7428    }
7429
7430    pub fn update_vote_account_timestamp(
7431        timestamp: BlockTimestamp,
7432        bank: &Bank,
7433        vote_pubkey: &Pubkey,
7434    ) {
7435        let mut vote_account = bank.get_account(vote_pubkey).unwrap_or_default();
7436        let mut vote_state = vote_state::from(&vote_account).unwrap_or_default();
7437        vote_state.last_timestamp = timestamp;
7438        let versioned = VoteStateVersions::new_current(vote_state);
7439        vote_state::to(&versioned, &mut vote_account).unwrap();
7440        bank.store_account(vote_pubkey, &vote_account);
7441    }
7442
7443    pub fn deposit(
7444        bank: &Bank,
7445        pubkey: &Pubkey,
7446        lamports: u64,
7447    ) -> std::result::Result<u64, LamportsError> {
7448        // This doesn't collect rents intentionally.
7449        // Rents should only be applied to actual TXes
7450        let mut account = bank
7451            .get_account_with_fixed_root_no_cache(pubkey)
7452            .unwrap_or_default();
7453        account.checked_add_lamports(lamports)?;
7454        bank.store_account(pubkey, &account);
7455        Ok(account.lamports())
7456    }
7457}