miraland_runtime/
bank.rs

1//! The `bank` module tracks client accounts and the progress of on-chain
2//! programs.
3//!
4//! A single bank relates to a block produced by a single leader and each bank
5//! except for the genesis bank points back to a parent bank.
6//!
7//! The bank is the main entrypoint for processing verified transactions with the function
8//! `Bank::process_transactions`
9//!
10//! It does this by loading the accounts using the reference it holds on the account store,
11//! and then passing those to an InvokeContext which handles loading the programs specified
12//! by the Transaction and executing it.
13//!
14//! The bank then stores the results to the accounts store.
15//!
16//! It then has APIs for retrieving if a transaction has been processed and it's status.
17//! See `get_signature_status` et al.
18//!
19//! Bank lifecycle:
20//!
21//! A bank is newly created and open to transactions. Transactions are applied
22//! until either the bank reached the tick count when the node is the leader for that slot, or the
23//! node has applied all transactions present in all `Entry`s in the slot.
24//!
25//! Once it is complete, the bank can then be frozen. After frozen, no more transactions can
26//! be applied or state changes made. At the frozen step, rent will be applied and various
27//! sysvar special accounts update to the new state of the system.
28//!
29//! After frozen, and the bank has had the appropriate number of votes on it, then it can become
30//! rooted. At this point, it will not be able to be removed from the chain and the
31//! state is finalized.
32//!
33//! It offers a high-level API that signs transactions
34//! on behalf of the caller, and a low-level API for when they have
35//! already been signed and verified.
36#[cfg(feature = "dev-context-only-utils")]
37use miraland_accounts_db::accounts_db::{
38    ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, ACCOUNTS_DB_CONFIG_FOR_TESTING,
39};
40#[allow(deprecated)]
41use miraland_sdk::recent_blockhashes_account;
42pub use miraland_sdk::reward_type::RewardType;
43use {
44    crate::{
45        bank::metrics::*,
46        bank_forks::BankForks,
47        builtins::{BuiltinPrototype, BUILTINS},
48        epoch_rewards_hasher::hash_rewards_into_partitions,
49        epoch_stakes::{EpochStakes, NodeVoteAccounts},
50        installed_scheduler_pool::{BankWithScheduler, InstalledSchedulerRwLock},
51        serde_snapshot::BankIncrementalSnapshotPersistence,
52        snapshot_hash::SnapshotHash,
53        stake_account::StakeAccount,
54        stake_history::StakeHistory,
55        stake_weighted_timestamp::{
56            calculate_stake_weighted_timestamp, MaxAllowableDrift,
57            MAX_ALLOWABLE_DRIFT_PERCENTAGE_FAST, MAX_ALLOWABLE_DRIFT_PERCENTAGE_SLOW_V2,
58        },
59        stakes::{InvalidCacheEntryReason, Stakes, StakesCache, StakesEnum},
60        status_cache::{SlotDelta, StatusCache},
61        transaction_batch::TransactionBatch,
62    },
63    byteorder::{ByteOrder, LittleEndian},
64    dashmap::{DashMap, DashSet},
65    itertools::izip,
66    log::*,
67    miraland_accounts_db::{
68        accounts::{AccountAddressFilter, Accounts, PubkeyAccountSlot},
69        accounts_db::{
70            AccountShrinkThreshold, AccountStorageEntry, AccountsDb, AccountsDbConfig,
71            CalcAccountsHashDataSource, VerifyAccountsHashAndLamportsConfig,
72        },
73        accounts_hash::{
74            AccountHash, AccountsHash, CalcAccountsHashConfig, HashStats, IncrementalAccountsHash,
75        },
76        accounts_index::{AccountSecondaryIndexes, IndexKey, ScanConfig, ScanResult, ZeroLamport},
77        accounts_partition::{self, Partition, PartitionIndex},
78        accounts_update_notifier_interface::AccountsUpdateNotifier,
79        ancestors::{Ancestors, AncestorsForSerialization},
80        blockhash_queue::BlockhashQueue,
81        epoch_accounts_hash::EpochAccountsHash,
82        partitioned_rewards::PartitionedEpochRewardsConfig,
83        sorted_storages::SortedStorages,
84        stake_rewards::StakeReward,
85        storable_accounts::StorableAccounts,
86    },
87    miraland_cost_model::cost_tracker::CostTracker,
88    miraland_loader_v4_program::create_program_runtime_environment_v2,
89    miraland_measure::{measure, measure::Measure, measure_us},
90    miraland_perf::perf_libs,
91    miraland_svm::{
92        account_loader::{TransactionCheckResult, TransactionLoadResult},
93        account_overrides::AccountOverrides,
94        runtime_config::RuntimeConfig,
95        transaction_error_metrics::TransactionErrorMetrics,
96        transaction_processor::{
97            TransactionBatchProcessor, TransactionLogMessages, TransactionProcessingCallback,
98        },
99        transaction_results::{
100            TransactionExecutionDetails, TransactionExecutionResult, TransactionResults,
101        },
102    },
103    rayon::{
104        iter::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator},
105        slice::ParallelSlice,
106        ThreadPool, ThreadPoolBuilder,
107    },
108    serde::Serialize,
109    miraland_bpf_loader_program::syscalls::create_program_runtime_environment_v1,
110    miraland_program_runtime::{
111        compute_budget_processor::process_compute_budget_instructions,
112        invoke_context::BuiltinFunctionWithContext,
113        loaded_programs::{LoadedProgram, LoadedProgramType, LoadedPrograms},
114        timings::{ExecuteTimingType, ExecuteTimings},
115    },
116    miraland_sdk::{
117        account::{
118            create_account_shared_data_with_fields as create_account, create_executable_meta,
119            from_account, Account, AccountSharedData, InheritableAccountFields, ReadableAccount,
120            WritableAccount,
121        },
122        clock::{
123            BankId, Epoch, Slot, SlotCount, SlotIndex, UnixTimestamp, DEFAULT_HASHES_PER_TICK,
124            DEFAULT_TICKS_PER_SECOND, INITIAL_RENT_EPOCH, MAX_PROCESSING_AGE,
125            MAX_TRANSACTION_FORWARDING_DELAY, MAX_TRANSACTION_FORWARDING_DELAY_GPU,
126            SECONDS_PER_DAY, UPDATED_HASHES_PER_TICK2, UPDATED_HASHES_PER_TICK3,
127            UPDATED_HASHES_PER_TICK4, UPDATED_HASHES_PER_TICK5, UPDATED_HASHES_PER_TICK6,
128        },
129        epoch_info::EpochInfo,
130        epoch_schedule::EpochSchedule,
131        feature,
132        feature_set::{
133            self, include_loaded_accounts_data_size_in_fee_calculation,
134            remove_rounding_in_fee_calculation, FeatureSet,
135        },
136        fee::FeeStructure,
137        fee_calculator::{FeeCalculator, FeeRateGovernor},
138        genesis_config::{ClusterType, GenesisConfig},
139        hard_forks::HardForks,
140        hash::{extend_and_hash, hashv, Hash},
141        incinerator,
142        inflation::Inflation,
143        inner_instruction::InnerInstructions,
144        message::{AccountKeys, SanitizedMessage},
145        native_loader,
146        native_token::LAMPORTS_PER_MLN,
147        nonce::{self, state::DurableNonce, NONCED_TX_MARKER_IX_INDEX},
148        nonce_account,
149        nonce_info::{NonceInfo, NoncePartial},
150        packet::PACKET_DATA_SIZE,
151        precompiles::get_precompiles,
152        pubkey::Pubkey,
153        rent::RentDue,
154        rent_collector::{CollectedInfo, RentCollector, RENT_EXEMPT_RENT_EPOCH},
155        rent_debits::RentDebits,
156        reward_info::RewardInfo,
157        saturating_add_assign,
158        signature::{Keypair, Signature},
159        slot_hashes::SlotHashes,
160        slot_history::{Check, SlotHistory},
161        stake::state::Delegation,
162        system_transaction,
163        sysvar::{self, last_restart_slot::LastRestartSlot, Sysvar, SysvarId},
164        timing::years_as_slots,
165        transaction::{
166            self, MessageHash, Result, SanitizedTransaction, Transaction, TransactionError,
167            TransactionVerificationMode, VersionedTransaction, MAX_TX_ACCOUNT_LOCKS,
168        },
169        transaction_context::{TransactionAccount, TransactionReturnData},
170    },
171    miraland_stake_program::stake_state::{
172        self, InflationPointCalculationEvent, PointValue, StakeStateV2,
173    },
174    miraland_system_program::{get_system_account_kind, SystemAccountKind},
175    miraland_vote::vote_account::{VoteAccount, VoteAccounts, VoteAccountsHashMap},
176    miraland_vote_program::vote_state::VoteState,
177    std::{
178        borrow::Cow,
179        collections::{HashMap, HashSet},
180        convert::TryFrom,
181        fmt, mem,
182        ops::{AddAssign, RangeInclusive},
183        path::PathBuf,
184        slice,
185        sync::{
186            atomic::{
187                AtomicBool, AtomicI64, AtomicU64, AtomicUsize,
188                Ordering::{AcqRel, Acquire, Relaxed},
189            },
190            Arc, LockResult, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard,
191        },
192        thread::Builder,
193        time::{Duration, Instant},
194    },
195};
196
197/// params to `verify_accounts_hash`
198struct VerifyAccountsHashConfig {
199    test_hash_calculation: bool,
200    ignore_mismatch: bool,
201    require_rooted_bank: bool,
202    run_in_background: bool,
203    store_hash_raw_data_for_debug: bool,
204}
205
206mod address_lookup_table;
207pub mod bank_hash_details;
208mod builtin_programs;
209pub mod epoch_accounts_hash_utils;
210mod fee_distribution;
211mod metrics;
212mod serde_snapshot;
213mod sysvar_cache;
214#[cfg(test)]
215pub(crate) mod tests;
216
217pub const SECONDS_PER_YEAR: f64 = 365.25 * 24.0 * 60.0 * 60.0;
218
219pub const MAX_LEADER_SCHEDULE_STAKES: Epoch = 5;
220
221#[derive(Default)]
222struct RentMetrics {
223    hold_range_us: AtomicU64,
224    load_us: AtomicU64,
225    collect_us: AtomicU64,
226    hash_us: AtomicU64,
227    store_us: AtomicU64,
228    count: AtomicUsize,
229}
230
231pub type BankStatusCache = StatusCache<Result<()>>;
232#[frozen_abi(digest = "EzAXfE2xG3ZqdAj8KMC8CeqoSxjo5hxrEaP7fta8LT9u")]
233pub type BankSlotDelta = SlotDelta<Result<()>>;
234
235#[derive(Default, Copy, Clone, Debug, PartialEq, Eq)]
236pub struct SquashTiming {
237    pub squash_accounts_ms: u64,
238    pub squash_accounts_cache_ms: u64,
239    pub squash_accounts_index_ms: u64,
240    pub squash_accounts_store_ms: u64,
241
242    pub squash_cache_ms: u64,
243}
244
245impl AddAssign for SquashTiming {
246    fn add_assign(&mut self, rhs: Self) {
247        self.squash_accounts_ms += rhs.squash_accounts_ms;
248        self.squash_accounts_cache_ms += rhs.squash_accounts_cache_ms;
249        self.squash_accounts_index_ms += rhs.squash_accounts_index_ms;
250        self.squash_accounts_store_ms += rhs.squash_accounts_store_ms;
251        self.squash_cache_ms += rhs.squash_cache_ms;
252    }
253}
254
255#[derive(Debug)]
256pub struct BankRc {
257    /// where all the Accounts are stored
258    pub accounts: Arc<Accounts>,
259
260    /// Previous checkpoint of this bank
261    pub(crate) parent: RwLock<Option<Arc<Bank>>>,
262
263    /// Current slot
264    pub(crate) slot: Slot,
265
266    pub(crate) bank_id_generator: Arc<AtomicU64>,
267}
268
269#[cfg(RUSTC_WITH_SPECIALIZATION)]
270use miraland_frozen_abi::abi_example::AbiExample;
271
272#[cfg(RUSTC_WITH_SPECIALIZATION)]
273impl AbiExample for BankRc {
274    fn example() -> Self {
275        BankRc {
276            // Set parent to None to cut the recursion into another Bank
277            parent: RwLock::new(None),
278            // AbiExample for Accounts is specially implemented to contain a storage example
279            accounts: AbiExample::example(),
280            slot: AbiExample::example(),
281            bank_id_generator: Arc::new(AtomicU64::new(0)),
282        }
283    }
284}
285
286impl BankRc {
287    pub(crate) fn new(accounts: Accounts, slot: Slot) -> Self {
288        Self {
289            accounts: Arc::new(accounts),
290            parent: RwLock::new(None),
291            slot,
292            bank_id_generator: Arc::new(AtomicU64::new(0)),
293        }
294    }
295}
296
297pub struct LoadAndExecuteTransactionsOutput {
298    pub loaded_transactions: Vec<TransactionLoadResult>,
299    // Vector of results indicating whether a transaction was executed or could not
300    // be executed. Note executed transactions can still have failed!
301    pub execution_results: Vec<TransactionExecutionResult>,
302    pub retryable_transaction_indexes: Vec<usize>,
303    // Total number of transactions that were executed
304    pub executed_transactions_count: usize,
305    // Number of non-vote transactions that were executed
306    pub executed_non_vote_transactions_count: usize,
307    // Total number of the executed transactions that returned success/not
308    // an error.
309    pub executed_with_successful_result_count: usize,
310    pub signature_count: u64,
311    pub error_counters: TransactionErrorMetrics,
312}
313
314pub struct TransactionSimulationResult {
315    pub result: Result<()>,
316    pub logs: TransactionLogMessages,
317    pub post_simulation_accounts: Vec<TransactionAccount>,
318    pub units_consumed: u64,
319    pub return_data: Option<TransactionReturnData>,
320    pub inner_instructions: Option<Vec<InnerInstructions>>,
321}
322pub struct TransactionBalancesSet {
323    pub pre_balances: TransactionBalances,
324    pub post_balances: TransactionBalances,
325}
326
327impl TransactionBalancesSet {
328    pub fn new(pre_balances: TransactionBalances, post_balances: TransactionBalances) -> Self {
329        assert_eq!(pre_balances.len(), post_balances.len());
330        Self {
331            pre_balances,
332            post_balances,
333        }
334    }
335}
336pub type TransactionBalances = Vec<Vec<u64>>;
337
338#[derive(Serialize, Deserialize, AbiExample, AbiEnumVisitor, Debug, PartialEq, Eq)]
339pub enum TransactionLogCollectorFilter {
340    All,
341    AllWithVotes,
342    None,
343    OnlyMentionedAddresses,
344}
345
346impl Default for TransactionLogCollectorFilter {
347    fn default() -> Self {
348        Self::None
349    }
350}
351
352#[derive(AbiExample, Debug, Default)]
353pub struct TransactionLogCollectorConfig {
354    pub mentioned_addresses: HashSet<Pubkey>,
355    pub filter: TransactionLogCollectorFilter,
356}
357
358#[derive(AbiExample, Clone, Debug, PartialEq, Eq)]
359pub struct TransactionLogInfo {
360    pub signature: Signature,
361    pub result: Result<()>,
362    pub is_vote: bool,
363    pub log_messages: TransactionLogMessages,
364}
365
366#[derive(AbiExample, Default, Debug)]
367pub struct TransactionLogCollector {
368    // All the logs collected for from this Bank.  Exact contents depend on the
369    // active `TransactionLogCollectorFilter`
370    pub logs: Vec<TransactionLogInfo>,
371
372    // For each `mentioned_addresses`, maintain a list of indices into `logs` to easily
373    // locate the logs from transactions that included the mentioned addresses.
374    pub mentioned_address_map: HashMap<Pubkey, Vec<usize>>,
375}
376
377impl TransactionLogCollector {
378    pub fn get_logs_for_address(
379        &self,
380        address: Option<&Pubkey>,
381    ) -> Option<Vec<TransactionLogInfo>> {
382        match address {
383            None => Some(self.logs.clone()),
384            Some(address) => self.mentioned_address_map.get(address).map(|log_indices| {
385                log_indices
386                    .iter()
387                    .filter_map(|i| self.logs.get(*i).cloned())
388                    .collect()
389            }),
390        }
391    }
392}
393
394/// Bank's common fields shared by all supported snapshot versions for deserialization.
395/// Sync fields with BankFieldsToSerialize! This is paired with it.
396/// All members are made public to remain Bank's members private and to make versioned deserializer workable on this
397/// Note that some fields are missing from the serializer struct. This is because of fields added later.
398/// Since it is difficult to insert fields to serialize/deserialize against existing code already deployed,
399/// new fields can be optionally serialized and optionally deserialized. At some point, the serialization and
400/// deserialization will use a new mechanism or otherwise be in sync more clearly.
401#[derive(Clone, Debug, Default, PartialEq)]
402pub struct BankFieldsToDeserialize {
403    pub(crate) blockhash_queue: BlockhashQueue,
404    pub(crate) ancestors: AncestorsForSerialization,
405    pub(crate) hash: Hash,
406    pub(crate) parent_hash: Hash,
407    pub(crate) parent_slot: Slot,
408    pub(crate) hard_forks: HardForks,
409    pub(crate) transaction_count: u64,
410    pub(crate) tick_height: u64,
411    pub(crate) signature_count: u64,
412    pub(crate) capitalization: u64,
413    pub(crate) max_tick_height: u64,
414    pub(crate) hashes_per_tick: Option<u64>,
415    pub(crate) ticks_per_slot: u64,
416    pub(crate) ns_per_slot: u128,
417    pub(crate) genesis_creation_time: UnixTimestamp,
418    pub(crate) slots_per_year: f64,
419    pub(crate) slot: Slot,
420    pub(crate) epoch: Epoch,
421    pub(crate) block_height: u64,
422    pub(crate) collector_id: Pubkey,
423    pub(crate) collector_fees: u64,
424    pub(crate) fee_calculator: FeeCalculator,
425    pub(crate) fee_rate_governor: FeeRateGovernor,
426    pub(crate) collected_rent: u64,
427    pub(crate) rent_collector: RentCollector,
428    pub(crate) epoch_schedule: EpochSchedule,
429    pub(crate) inflation: Inflation,
430    pub(crate) stakes: Stakes<Delegation>,
431    pub(crate) epoch_stakes: HashMap<Epoch, EpochStakes>,
432    pub(crate) is_delta: bool,
433    pub(crate) accounts_data_len: u64,
434    pub(crate) incremental_snapshot_persistence: Option<BankIncrementalSnapshotPersistence>,
435    pub(crate) epoch_accounts_hash: Option<Hash>,
436    pub(crate) epoch_reward_status: EpochRewardStatus,
437}
438
439/// Bank's common fields shared by all supported snapshot versions for serialization.
440/// This is separated from BankFieldsToDeserialize to avoid cloning by using refs.
441/// So, sync fields with BankFieldsToDeserialize!
442/// all members are made public to keep Bank private and to make versioned serializer workable on this.
443/// Note that some fields are missing from the serializer struct. This is because of fields added later.
444/// Since it is difficult to insert fields to serialize/deserialize against existing code already deployed,
445/// new fields can be optionally serialized and optionally deserialized. At some point, the serialization and
446/// deserialization will use a new mechanism or otherwise be in sync more clearly.
447#[derive(Debug)]
448pub(crate) struct BankFieldsToSerialize<'a> {
449    pub(crate) blockhash_queue: &'a RwLock<BlockhashQueue>,
450    pub(crate) ancestors: &'a AncestorsForSerialization,
451    pub(crate) hash: Hash,
452    pub(crate) parent_hash: Hash,
453    pub(crate) parent_slot: Slot,
454    pub(crate) hard_forks: &'a RwLock<HardForks>,
455    pub(crate) transaction_count: u64,
456    pub(crate) tick_height: u64,
457    pub(crate) signature_count: u64,
458    pub(crate) capitalization: u64,
459    pub(crate) max_tick_height: u64,
460    pub(crate) hashes_per_tick: Option<u64>,
461    pub(crate) ticks_per_slot: u64,
462    pub(crate) ns_per_slot: u128,
463    pub(crate) genesis_creation_time: UnixTimestamp,
464    pub(crate) slots_per_year: f64,
465    pub(crate) slot: Slot,
466    pub(crate) epoch: Epoch,
467    pub(crate) block_height: u64,
468    pub(crate) collector_id: Pubkey,
469    pub(crate) collector_fees: u64,
470    pub(crate) fee_calculator: FeeCalculator,
471    pub(crate) fee_rate_governor: FeeRateGovernor,
472    pub(crate) collected_rent: u64,
473    pub(crate) rent_collector: RentCollector,
474    pub(crate) epoch_schedule: EpochSchedule,
475    pub(crate) inflation: Inflation,
476    pub(crate) stakes: &'a StakesCache,
477    pub(crate) epoch_stakes: &'a HashMap<Epoch, EpochStakes>,
478    pub(crate) is_delta: bool,
479    pub(crate) accounts_data_len: u64,
480}
481
482// Can't derive PartialEq because RwLock doesn't implement PartialEq
483impl PartialEq for Bank {
484    fn eq(&self, other: &Self) -> bool {
485        if std::ptr::eq(self, other) {
486            return true;
487        }
488        let Self {
489            skipped_rewrites: _,
490            rc: _,
491            status_cache: _,
492            blockhash_queue,
493            ancestors,
494            hash,
495            parent_hash,
496            parent_slot,
497            hard_forks,
498            transaction_count,
499            non_vote_transaction_count_since_restart: _,
500            transaction_error_count: _,
501            transaction_entries_count: _,
502            transactions_per_entry_max: _,
503            tick_height,
504            signature_count,
505            capitalization,
506            max_tick_height,
507            hashes_per_tick,
508            ticks_per_slot,
509            ns_per_slot,
510            genesis_creation_time,
511            slots_per_year,
512            slot,
513            bank_id: _,
514            epoch,
515            block_height,
516            collector_id,
517            collector_fees,
518            fee_rate_governor,
519            collected_rent,
520            rent_collector,
521            epoch_schedule,
522            inflation,
523            stakes_cache,
524            epoch_stakes,
525            is_delta,
526            // TODO: Confirm if all these fields are intentionally ignored!
527            builtin_programs: _,
528            runtime_config: _,
529            rewards: _,
530            cluster_type: _,
531            lazy_rent_collection: _,
532            rewards_pool_pubkeys: _,
533            transaction_debug_keys: _,
534            transaction_log_collector_config: _,
535            transaction_log_collector: _,
536            feature_set: _,
537            drop_callback: _,
538            freeze_started: _,
539            vote_only_bank: _,
540            cost_tracker: _,
541            accounts_data_size_initial: _,
542            accounts_data_size_delta_on_chain: _,
543            accounts_data_size_delta_off_chain: _,
544            fee_structure: _,
545            incremental_snapshot_persistence: _,
546            loaded_programs_cache: _,
547            epoch_reward_status: _,
548            transaction_processor: _,
549            // Ignore new fields explicitly if they do not impact PartialEq.
550            // Adding ".." will remove compile-time checks that if a new field
551            // is added to the struct, this PartialEq is accordingly updated.
552        } = self;
553        *blockhash_queue.read().unwrap() == *other.blockhash_queue.read().unwrap()
554            && ancestors == &other.ancestors
555            && *hash.read().unwrap() == *other.hash.read().unwrap()
556            && parent_hash == &other.parent_hash
557            && parent_slot == &other.parent_slot
558            && *hard_forks.read().unwrap() == *other.hard_forks.read().unwrap()
559            && transaction_count.load(Relaxed) == other.transaction_count.load(Relaxed)
560            && tick_height.load(Relaxed) == other.tick_height.load(Relaxed)
561            && signature_count.load(Relaxed) == other.signature_count.load(Relaxed)
562            && capitalization.load(Relaxed) == other.capitalization.load(Relaxed)
563            && max_tick_height == &other.max_tick_height
564            && hashes_per_tick == &other.hashes_per_tick
565            && ticks_per_slot == &other.ticks_per_slot
566            && ns_per_slot == &other.ns_per_slot
567            && genesis_creation_time == &other.genesis_creation_time
568            && slots_per_year == &other.slots_per_year
569            && slot == &other.slot
570            && epoch == &other.epoch
571            && block_height == &other.block_height
572            && collector_id == &other.collector_id
573            && collector_fees.load(Relaxed) == other.collector_fees.load(Relaxed)
574            && fee_rate_governor == &other.fee_rate_governor
575            && collected_rent.load(Relaxed) == other.collected_rent.load(Relaxed)
576            && rent_collector == &other.rent_collector
577            && epoch_schedule == &other.epoch_schedule
578            && *inflation.read().unwrap() == *other.inflation.read().unwrap()
579            && *stakes_cache.stakes() == *other.stakes_cache.stakes()
580            && epoch_stakes == &other.epoch_stakes
581            && is_delta.load(Relaxed) == other.is_delta.load(Relaxed)
582    }
583}
584
585#[derive(Debug)]
586pub enum RewardCalculationEvent<'a, 'b> {
587    Staking(&'a Pubkey, &'b InflationPointCalculationEvent),
588}
589
590/// type alias is not supported for trait in rust yet. As a workaround, we define the
591/// `RewardCalcTracer` trait explicitly and implement it on any type that implement
592/// `Fn(&RewardCalculationEvent) + Send + Sync`.
593pub trait RewardCalcTracer: Fn(&RewardCalculationEvent) + Send + Sync {}
594
595impl<T: Fn(&RewardCalculationEvent) + Send + Sync> RewardCalcTracer for T {}
596
597fn null_tracer() -> Option<impl RewardCalcTracer> {
598    None::<fn(&RewardCalculationEvent)>
599}
600
601pub trait DropCallback: fmt::Debug {
602    fn callback(&self, b: &Bank);
603    fn clone_box(&self) -> Box<dyn DropCallback + Send + Sync>;
604}
605
606#[derive(Debug, Default)]
607pub struct OptionalDropCallback(Option<Box<dyn DropCallback + Send + Sync>>);
608
609#[cfg(RUSTC_WITH_SPECIALIZATION)]
610impl AbiExample for OptionalDropCallback {
611    fn example() -> Self {
612        Self(None)
613    }
614}
615
616#[derive(AbiExample, Debug, Clone, PartialEq, Serialize, Deserialize)]
617pub(crate) struct StartBlockHeightAndRewards {
618    /// the block height of the slot at which rewards distribution began
619    pub(crate) start_block_height: u64,
620    /// calculated epoch rewards pending distribution, outer Vec is by partition (one partition per block)
621    pub(crate) stake_rewards_by_partition: Arc<Vec<StakeRewards>>,
622}
623
624/// Represent whether bank is in the reward phase or not.
625#[derive(AbiExample, AbiEnumVisitor, Debug, Clone, PartialEq, Serialize, Deserialize, Default)]
626pub(crate) enum EpochRewardStatus {
627    /// this bank is in the reward phase.
628    /// Contents are the start point for epoch reward calculation,
629    /// i.e. parent_slot and parent_block height for the starting
630    /// block of the current epoch.
631    Active(StartBlockHeightAndRewards),
632    /// this bank is outside of the rewarding phase.
633    #[default]
634    Inactive,
635}
636
637/// Manager for the state of all accounts and programs after processing its entries.
638/// AbiExample is needed even without Serialize/Deserialize; actual (de-)serialization
639/// are implemented elsewhere for versioning
640#[derive(AbiExample, Debug)]
641pub struct Bank {
642    /// References to accounts, parent and signature status
643    pub rc: BankRc,
644
645    /// A cache of signature statuses
646    pub status_cache: Arc<RwLock<BankStatusCache>>,
647
648    /// FIFO queue of `recent_blockhash` items
649    blockhash_queue: RwLock<BlockhashQueue>,
650
651    /// The set of parents including this bank
652    pub ancestors: Ancestors,
653
654    /// Hash of this Bank's state. Only meaningful after freezing.
655    hash: RwLock<Hash>,
656
657    /// Hash of this Bank's parent's state
658    parent_hash: Hash,
659
660    /// parent's slot
661    parent_slot: Slot,
662
663    /// slots to hard fork at
664    hard_forks: Arc<RwLock<HardForks>>,
665
666    /// The number of transactions processed without error
667    transaction_count: AtomicU64,
668
669    /// The number of non-vote transactions processed without error since the most recent boot from
670    /// snapshot or genesis. This value is not shared though the network, nor retained within
671    /// snapshots, but is preserved in `Bank::new_from_parent`.
672    non_vote_transaction_count_since_restart: AtomicU64,
673
674    /// The number of transaction errors in this slot
675    transaction_error_count: AtomicU64,
676
677    /// The number of transaction entries in this slot
678    transaction_entries_count: AtomicU64,
679
680    /// The max number of transaction in an entry in this slot
681    transactions_per_entry_max: AtomicU64,
682
683    /// Bank tick height
684    tick_height: AtomicU64,
685
686    /// The number of signatures from valid transactions in this slot
687    signature_count: AtomicU64,
688
689    /// Total capitalization, used to calculate inflation
690    capitalization: AtomicU64,
691
692    // Bank max_tick_height
693    max_tick_height: u64,
694
695    /// The number of hashes in each tick. None value means hashing is disabled.
696    hashes_per_tick: Option<u64>,
697
698    /// The number of ticks in each slot.
699    ticks_per_slot: u64,
700
701    /// length of a slot in ns
702    pub ns_per_slot: u128,
703
704    /// genesis time, used for computed clock
705    genesis_creation_time: UnixTimestamp,
706
707    /// The number of slots per year, used for inflation
708    slots_per_year: f64,
709
710    /// Bank slot (i.e. block)
711    slot: Slot,
712
713    bank_id: BankId,
714
715    /// Bank epoch
716    epoch: Epoch,
717
718    /// Bank block_height
719    block_height: u64,
720
721    /// The pubkey to send transactions fees to.
722    collector_id: Pubkey,
723
724    /// Fees that have been collected
725    collector_fees: AtomicU64,
726
727    /// Track cluster signature throughput and adjust fee rate
728    pub(crate) fee_rate_governor: FeeRateGovernor,
729
730    /// Rent that has been collected
731    collected_rent: AtomicU64,
732
733    /// latest rent collector, knows the epoch
734    rent_collector: RentCollector,
735
736    /// initialized from genesis
737    pub(crate) epoch_schedule: EpochSchedule,
738
739    /// inflation specs
740    inflation: Arc<RwLock<Inflation>>,
741
742    /// cache of vote_account and stake_account state for this fork
743    stakes_cache: StakesCache,
744
745    /// staked nodes on epoch boundaries, saved off when a bank.slot() is at
746    ///   a leader schedule calculation boundary
747    epoch_stakes: HashMap<Epoch, EpochStakes>,
748
749    /// A boolean reflecting whether any entries were recorded into the PoH
750    /// stream for the slot == self.slot
751    is_delta: AtomicBool,
752
753    builtin_programs: HashSet<Pubkey>,
754
755    /// Optional config parameters that can override runtime behavior
756    pub(crate) runtime_config: Arc<RuntimeConfig>,
757
758    /// Protocol-level rewards that were distributed by this bank
759    pub rewards: RwLock<Vec<(Pubkey, RewardInfo)>>,
760
761    pub cluster_type: Option<ClusterType>,
762
763    pub lazy_rent_collection: AtomicBool,
764
765    // this is temporary field only to remove rewards_pool entirely
766    pub rewards_pool_pubkeys: Arc<HashSet<Pubkey>>,
767
768    transaction_debug_keys: Option<Arc<HashSet<Pubkey>>>,
769
770    // Global configuration for how transaction logs should be collected across all banks
771    pub transaction_log_collector_config: Arc<RwLock<TransactionLogCollectorConfig>>,
772
773    // Logs from transactions that this Bank executed collected according to the criteria in
774    // `transaction_log_collector_config`
775    pub transaction_log_collector: Arc<RwLock<TransactionLogCollector>>,
776
777    pub feature_set: Arc<FeatureSet>,
778
779    /// callback function only to be called when dropping and should only be called once
780    pub drop_callback: RwLock<OptionalDropCallback>,
781
782    pub freeze_started: AtomicBool,
783
784    vote_only_bank: bool,
785
786    cost_tracker: RwLock<CostTracker>,
787
788    /// The initial accounts data size at the start of this Bank, before processing any transactions/etc
789    accounts_data_size_initial: u64,
790    /// The change to accounts data size in this Bank, due on-chain events (i.e. transactions)
791    accounts_data_size_delta_on_chain: AtomicI64,
792    /// The change to accounts data size in this Bank, due to off-chain events (i.e. rent collection)
793    accounts_data_size_delta_off_chain: AtomicI64,
794
795    /// until the skipped rewrites feature is activated, it is possible to skip rewrites and still include
796    /// the account hash of the accounts that would have been rewritten as bank hash expects.
797    skipped_rewrites: Mutex<HashMap<Pubkey, AccountHash>>,
798
799    /// Transaction fee structure
800    pub fee_structure: FeeStructure,
801
802    pub incremental_snapshot_persistence: Option<BankIncrementalSnapshotPersistence>,
803
804    pub loaded_programs_cache: Arc<RwLock<LoadedPrograms<BankForks>>>,
805
806    epoch_reward_status: EpochRewardStatus,
807
808    transaction_processor: TransactionBatchProcessor<BankForks>,
809}
810
811struct VoteWithStakeDelegations {
812    vote_state: Arc<VoteState>,
813    vote_account: AccountSharedData,
814    delegations: Vec<(Pubkey, StakeAccount<Delegation>)>,
815}
816
817type VoteWithStakeDelegationsMap = DashMap<Pubkey, VoteWithStakeDelegations>;
818
819type InvalidCacheKeyMap = DashMap<Pubkey, InvalidCacheEntryReason>;
820
821struct LoadVoteAndStakeAccountsResult {
822    vote_with_stake_delegations_map: VoteWithStakeDelegationsMap,
823    invalid_vote_keys: InvalidCacheKeyMap,
824    vote_accounts_cache_miss_count: usize,
825}
826
827#[derive(Debug)]
828struct VoteReward {
829    vote_account: AccountSharedData,
830    commission: u8,
831    vote_rewards: u64,
832    vote_needs_store: bool,
833}
834
835type VoteRewards = DashMap<Pubkey, VoteReward>;
836#[derive(Debug, Default)]
837struct VoteRewardsAccounts {
838    /// reward info for each vote account pubkey.
839    /// This type is used by `update_reward_history()`
840    rewards: Vec<(Pubkey, RewardInfo)>,
841    /// corresponds to pubkey in `rewards`
842    /// Some if account is to be stored.
843    /// None if to be skipped.
844    accounts_to_store: Vec<Option<AccountSharedData>>,
845}
846
847/// hold reward calc info to avoid recalculation across functions
848struct EpochRewardCalculateParamInfo<'a> {
849    stake_history: StakeHistory,
850    stake_delegations: Vec<(&'a Pubkey, &'a StakeAccount<Delegation>)>,
851    cached_vote_accounts: &'a VoteAccounts,
852}
853
854/// Hold all results from calculating the rewards for partitioned distribution.
855/// This struct exists so we can have a function which does all the calculation with no
856/// side effects.
857struct PartitionedRewardsCalculation {
858    vote_account_rewards: VoteRewardsAccounts,
859    stake_rewards_by_partition: StakeRewardCalculationPartitioned,
860    old_vote_balance_and_staked: u64,
861    validator_rewards: u64,
862    validator_rate: f64,
863    foundation_rate: f64,
864    prev_epoch_duration_in_years: f64,
865    capitalization: u64,
866}
867
868/// result of calculating the stake rewards at beginning of new epoch
869struct StakeRewardCalculationPartitioned {
870    /// each individual stake account to reward, grouped by partition
871    stake_rewards_by_partition: Vec<StakeRewards>,
872    /// total lamports across all `stake_rewards`
873    total_stake_rewards_lamports: u64,
874}
875
876struct CalculateRewardsAndDistributeVoteRewardsResult {
877    /// total rewards for the epoch (including both vote rewards and stake rewards)
878    total_rewards: u64,
879    /// distributed vote rewards
880    distributed_rewards: u64,
881    /// stake rewards that still need to be distributed, grouped by partition
882    stake_rewards_by_partition: Vec<StakeRewards>,
883}
884
885pub(crate) type StakeRewards = Vec<StakeReward>;
886
887#[derive(Debug, Default)]
888pub struct NewBankOptions {
889    pub vote_only_bank: bool,
890}
891
892#[derive(Debug, Default)]
893pub struct BankTestConfig {
894    pub secondary_indexes: AccountSecondaryIndexes,
895}
896
897#[derive(Debug)]
898struct PrevEpochInflationRewards {
899    validator_rewards: u64,
900    prev_epoch_duration_in_years: f64,
901    validator_rate: f64,
902    foundation_rate: f64,
903}
904
905pub struct CommitTransactionCounts {
906    pub committed_transactions_count: u64,
907    pub committed_non_vote_transactions_count: u64,
908    pub committed_with_failure_result_count: u64,
909    pub signature_count: u64,
910}
911
912#[derive(Debug, Default)]
913/// result of calculating the stake rewards at end of epoch
914struct StakeRewardCalculation {
915    /// each individual stake account to reward
916    stake_rewards: StakeRewards,
917    /// total lamports across all `stake_rewards`
918    total_stake_rewards_lamports: u64,
919}
920
921#[derive(Debug, PartialEq, Eq, Copy, Clone)]
922pub(super) enum RewardInterval {
923    /// the slot within the epoch is INSIDE the reward distribution interval
924    InsideInterval,
925    /// the slot within the epoch is OUTSIDE the reward distribution interval
926    OutsideInterval,
927}
928
929impl Bank {
930    fn default_with_accounts(accounts: Accounts) -> Self {
931        let mut bank = Self {
932            skipped_rewrites: Mutex::default(),
933            incremental_snapshot_persistence: None,
934            rc: BankRc::new(accounts, Slot::default()),
935            status_cache: Arc::<RwLock<BankStatusCache>>::default(),
936            blockhash_queue: RwLock::<BlockhashQueue>::default(),
937            ancestors: Ancestors::default(),
938            hash: RwLock::<Hash>::default(),
939            parent_hash: Hash::default(),
940            parent_slot: Slot::default(),
941            hard_forks: Arc::<RwLock<HardForks>>::default(),
942            transaction_count: AtomicU64::default(),
943            non_vote_transaction_count_since_restart: AtomicU64::default(),
944            transaction_error_count: AtomicU64::default(),
945            transaction_entries_count: AtomicU64::default(),
946            transactions_per_entry_max: AtomicU64::default(),
947            tick_height: AtomicU64::default(),
948            signature_count: AtomicU64::default(),
949            capitalization: AtomicU64::default(),
950            max_tick_height: u64::default(),
951            hashes_per_tick: Option::<u64>::default(),
952            ticks_per_slot: u64::default(),
953            ns_per_slot: u128::default(),
954            genesis_creation_time: UnixTimestamp::default(),
955            slots_per_year: f64::default(),
956            slot: Slot::default(),
957            bank_id: BankId::default(),
958            epoch: Epoch::default(),
959            block_height: u64::default(),
960            collector_id: Pubkey::default(),
961            collector_fees: AtomicU64::default(),
962            fee_rate_governor: FeeRateGovernor::default(),
963            collected_rent: AtomicU64::default(),
964            rent_collector: RentCollector::default(),
965            epoch_schedule: EpochSchedule::default(),
966            inflation: Arc::<RwLock<Inflation>>::default(),
967            stakes_cache: StakesCache::default(),
968            epoch_stakes: HashMap::<Epoch, EpochStakes>::default(),
969            is_delta: AtomicBool::default(),
970            builtin_programs: HashSet::<Pubkey>::default(),
971            runtime_config: Arc::<RuntimeConfig>::default(),
972            rewards: RwLock::<Vec<(Pubkey, RewardInfo)>>::default(),
973            cluster_type: Option::<ClusterType>::default(),
974            lazy_rent_collection: AtomicBool::default(),
975            rewards_pool_pubkeys: Arc::<HashSet<Pubkey>>::default(),
976            transaction_debug_keys: Option::<Arc<HashSet<Pubkey>>>::default(),
977            transaction_log_collector_config: Arc::<RwLock<TransactionLogCollectorConfig>>::default(
978            ),
979            transaction_log_collector: Arc::<RwLock<TransactionLogCollector>>::default(),
980            feature_set: Arc::<FeatureSet>::default(),
981            drop_callback: RwLock::new(OptionalDropCallback(None)),
982            freeze_started: AtomicBool::default(),
983            vote_only_bank: false,
984            cost_tracker: RwLock::<CostTracker>::default(),
985            accounts_data_size_initial: 0,
986            accounts_data_size_delta_on_chain: AtomicI64::new(0),
987            accounts_data_size_delta_off_chain: AtomicI64::new(0),
988            fee_structure: FeeStructure::default(),
989            loaded_programs_cache: Arc::new(RwLock::new(LoadedPrograms::new(
990                Slot::default(),
991                Epoch::default(),
992            ))),
993            epoch_reward_status: EpochRewardStatus::default(),
994            transaction_processor: TransactionBatchProcessor::default(),
995        };
996
997        bank.transaction_processor = TransactionBatchProcessor::new(
998            bank.slot,
999            bank.epoch,
1000            bank.epoch_schedule.clone(),
1001            bank.fee_structure.clone(),
1002            bank.runtime_config.clone(),
1003            bank.loaded_programs_cache.clone(),
1004        );
1005
1006        let accounts_data_size_initial = bank.get_total_accounts_stats().unwrap().data_len as u64;
1007        bank.accounts_data_size_initial = accounts_data_size_initial;
1008
1009        bank
1010    }
1011
1012    #[allow(clippy::too_many_arguments)]
1013    pub fn new_with_paths(
1014        genesis_config: &GenesisConfig,
1015        runtime_config: Arc<RuntimeConfig>,
1016        paths: Vec<PathBuf>,
1017        debug_keys: Option<Arc<HashSet<Pubkey>>>,
1018        additional_builtins: Option<&[BuiltinPrototype]>,
1019        account_indexes: AccountSecondaryIndexes,
1020        shrink_ratio: AccountShrinkThreshold,
1021        debug_do_not_add_builtins: bool,
1022        accounts_db_config: Option<AccountsDbConfig>,
1023        accounts_update_notifier: Option<AccountsUpdateNotifier>,
1024        #[allow(unused)] collector_id_for_tests: Option<Pubkey>,
1025        exit: Arc<AtomicBool>,
1026    ) -> Self {
1027        let accounts_db = AccountsDb::new_with_config(
1028            paths,
1029            &genesis_config.cluster_type,
1030            account_indexes,
1031            shrink_ratio,
1032            accounts_db_config,
1033            accounts_update_notifier,
1034            exit,
1035        );
1036        let accounts = Accounts::new(Arc::new(accounts_db));
1037        let mut bank = Self::default_with_accounts(accounts);
1038        bank.ancestors = Ancestors::from(vec![bank.slot()]);
1039        bank.transaction_debug_keys = debug_keys;
1040        bank.runtime_config = runtime_config;
1041        bank.cluster_type = Some(genesis_config.cluster_type);
1042
1043        #[cfg(not(feature = "dev-context-only-utils"))]
1044        bank.process_genesis_config(genesis_config);
1045        #[cfg(feature = "dev-context-only-utils")]
1046        bank.process_genesis_config(genesis_config, collector_id_for_tests);
1047
1048        bank.finish_init(
1049            genesis_config,
1050            additional_builtins,
1051            debug_do_not_add_builtins,
1052        );
1053
1054        // genesis needs stakes for all epochs up to the epoch implied by
1055        //  slot = 0 and genesis configuration
1056        {
1057            let stakes = bank.stakes_cache.stakes().clone();
1058            let stakes = Arc::new(StakesEnum::from(stakes));
1059            for epoch in 0..=bank.get_leader_schedule_epoch(bank.slot) {
1060                bank.epoch_stakes
1061                    .insert(epoch, EpochStakes::new(stakes.clone(), epoch));
1062            }
1063            bank.update_stake_history(None);
1064        }
1065        bank.update_clock(None);
1066        bank.update_rent();
1067        bank.update_epoch_schedule();
1068        bank.update_recent_blockhashes();
1069        bank.update_last_restart_slot();
1070        bank.fill_missing_sysvar_cache_entries();
1071        bank
1072    }
1073
1074    /// Create a new bank that points to an immutable checkpoint of another bank.
1075    pub fn new_from_parent(parent: Arc<Bank>, collector_id: &Pubkey, slot: Slot) -> Self {
1076        Self::_new_from_parent(
1077            parent,
1078            collector_id,
1079            slot,
1080            null_tracer(),
1081            NewBankOptions::default(),
1082        )
1083    }
1084
1085    pub fn new_from_parent_with_options(
1086        parent: Arc<Bank>,
1087        collector_id: &Pubkey,
1088        slot: Slot,
1089        new_bank_options: NewBankOptions,
1090    ) -> Self {
1091        Self::_new_from_parent(parent, collector_id, slot, null_tracer(), new_bank_options)
1092    }
1093
1094    pub fn new_from_parent_with_tracer(
1095        parent: Arc<Bank>,
1096        collector_id: &Pubkey,
1097        slot: Slot,
1098        reward_calc_tracer: impl RewardCalcTracer,
1099    ) -> Self {
1100        Self::_new_from_parent(
1101            parent,
1102            collector_id,
1103            slot,
1104            Some(reward_calc_tracer),
1105            NewBankOptions::default(),
1106        )
1107    }
1108
1109    fn get_rent_collector_from(rent_collector: &RentCollector, epoch: Epoch) -> RentCollector {
1110        rent_collector.clone_with_epoch(epoch)
1111    }
1112
1113    fn is_partitioned_rewards_feature_enabled(&self) -> bool {
1114        self.feature_set
1115            .is_active(&feature_set::enable_partitioned_epoch_reward::id())
1116    }
1117
1118    pub(crate) fn set_epoch_reward_status_active(
1119        &mut self,
1120        stake_rewards_by_partition: Vec<StakeRewards>,
1121    ) {
1122        self.epoch_reward_status = EpochRewardStatus::Active(StartBlockHeightAndRewards {
1123            start_block_height: self.block_height,
1124            stake_rewards_by_partition: Arc::new(stake_rewards_by_partition),
1125        });
1126    }
1127
1128    fn partitioned_epoch_rewards_config(&self) -> &PartitionedEpochRewardsConfig {
1129        &self
1130            .rc
1131            .accounts
1132            .accounts_db
1133            .partitioned_epoch_rewards_config
1134    }
1135
1136    /// # stake accounts to store in one block during partitioned reward interval
1137    fn partitioned_rewards_stake_account_stores_per_block(&self) -> u64 {
1138        self.partitioned_epoch_rewards_config()
1139            .stake_account_stores_per_block
1140    }
1141
1142    /// reward calculation happens synchronously during the first block of the epoch boundary.
1143    /// So, # blocks for reward calculation is 1.
1144    fn get_reward_calculation_num_blocks(&self) -> Slot {
1145        self.partitioned_epoch_rewards_config()
1146            .reward_calculation_num_blocks
1147    }
1148
1149    /// Calculate the number of blocks required to distribute rewards to all stake accounts.
1150    fn get_reward_distribution_num_blocks(&self, rewards: &StakeRewards) -> u64 {
1151        let total_stake_accounts = rewards.len();
1152        if self.epoch_schedule.warmup && self.epoch < self.first_normal_epoch() {
1153            1
1154        } else {
1155            const MAX_FACTOR_OF_REWARD_BLOCKS_IN_EPOCH: u64 = 10;
1156            let num_chunks = miraland_accounts_db::accounts_hash::AccountsHasher::div_ceil(
1157                total_stake_accounts,
1158                self.partitioned_rewards_stake_account_stores_per_block() as usize,
1159            ) as u64;
1160
1161            // Limit the reward credit interval to 10% of the total number of slots in a epoch
1162            num_chunks.clamp(
1163                1,
1164                (self.epoch_schedule.slots_per_epoch / MAX_FACTOR_OF_REWARD_BLOCKS_IN_EPOCH).max(1),
1165            )
1166        }
1167    }
1168
1169    /// Return `RewardInterval` enum for current bank
1170    fn get_reward_interval(&self) -> RewardInterval {
1171        if matches!(self.epoch_reward_status, EpochRewardStatus::Active(_)) {
1172            RewardInterval::InsideInterval
1173        } else {
1174            RewardInterval::OutsideInterval
1175        }
1176    }
1177
1178    /// For testing only
1179    pub fn force_reward_interval_end_for_tests(&mut self) {
1180        self.epoch_reward_status = EpochRewardStatus::Inactive;
1181    }
1182
1183    fn _new_from_parent(
1184        parent: Arc<Bank>,
1185        collector_id: &Pubkey,
1186        slot: Slot,
1187        reward_calc_tracer: Option<impl RewardCalcTracer>,
1188        new_bank_options: NewBankOptions,
1189    ) -> Self {
1190        let mut time = Measure::start("bank::new_from_parent");
1191        let NewBankOptions { vote_only_bank } = new_bank_options;
1192
1193        parent.freeze();
1194        assert_ne!(slot, parent.slot());
1195
1196        let epoch_schedule = parent.epoch_schedule().clone();
1197        let epoch = epoch_schedule.get_epoch(slot);
1198
1199        let (rc, bank_rc_creation_time_us) = measure_us!({
1200            let accounts_db = Arc::clone(&parent.rc.accounts.accounts_db);
1201            accounts_db.insert_default_bank_hash_stats(slot, parent.slot());
1202            BankRc {
1203                accounts: Arc::new(Accounts::new(accounts_db)),
1204                parent: RwLock::new(Some(Arc::clone(&parent))),
1205                slot,
1206                bank_id_generator: Arc::clone(&parent.rc.bank_id_generator),
1207            }
1208        });
1209
1210        let (status_cache, status_cache_time_us) = measure_us!(Arc::clone(&parent.status_cache));
1211
1212        let (fee_rate_governor, fee_components_time_us) = measure_us!(
1213            FeeRateGovernor::new_derived(&parent.fee_rate_governor, parent.signature_count())
1214        );
1215
1216        let bank_id = rc.bank_id_generator.fetch_add(1, Relaxed) + 1;
1217        let (blockhash_queue, blockhash_queue_time_us) =
1218            measure_us!(RwLock::new(parent.blockhash_queue.read().unwrap().clone()));
1219
1220        let (stakes_cache, stakes_cache_time_us) =
1221            measure_us!(StakesCache::new(parent.stakes_cache.stakes().clone()));
1222
1223        let (epoch_stakes, epoch_stakes_time_us) = measure_us!(parent.epoch_stakes.clone());
1224
1225        let (builtin_programs, builtin_programs_time_us) =
1226            measure_us!(parent.builtin_programs.clone());
1227
1228        let (rewards_pool_pubkeys, rewards_pool_pubkeys_time_us) =
1229            measure_us!(parent.rewards_pool_pubkeys.clone());
1230
1231        let (transaction_debug_keys, transaction_debug_keys_time_us) =
1232            measure_us!(parent.transaction_debug_keys.clone());
1233
1234        let (transaction_log_collector_config, transaction_log_collector_config_time_us) =
1235            measure_us!(parent.transaction_log_collector_config.clone());
1236
1237        let (feature_set, feature_set_time_us) = measure_us!(parent.feature_set.clone());
1238
1239        let accounts_data_size_initial = parent.load_accounts_data_size();
1240        let mut new = Self {
1241            skipped_rewrites: Mutex::default(),
1242            incremental_snapshot_persistence: None,
1243            rc,
1244            status_cache,
1245            slot,
1246            bank_id,
1247            epoch,
1248            blockhash_queue,
1249
1250            // TODO: clean this up, so much special-case copying...
1251            hashes_per_tick: parent.hashes_per_tick,
1252            ticks_per_slot: parent.ticks_per_slot,
1253            ns_per_slot: parent.ns_per_slot,
1254            genesis_creation_time: parent.genesis_creation_time,
1255            slots_per_year: parent.slots_per_year,
1256            epoch_schedule,
1257            collected_rent: AtomicU64::new(0),
1258            rent_collector: Self::get_rent_collector_from(&parent.rent_collector, epoch),
1259            max_tick_height: (slot + 1) * parent.ticks_per_slot,
1260            block_height: parent.block_height + 1,
1261            fee_rate_governor,
1262            capitalization: AtomicU64::new(parent.capitalization()),
1263            vote_only_bank,
1264            inflation: parent.inflation.clone(),
1265            transaction_count: AtomicU64::new(parent.transaction_count()),
1266            non_vote_transaction_count_since_restart: AtomicU64::new(
1267                parent.non_vote_transaction_count_since_restart(),
1268            ),
1269            transaction_error_count: AtomicU64::new(0),
1270            transaction_entries_count: AtomicU64::new(0),
1271            transactions_per_entry_max: AtomicU64::new(0),
1272            // we will .clone_with_epoch() this soon after stake data update; so just .clone() for now
1273            stakes_cache,
1274            epoch_stakes,
1275            parent_hash: parent.hash(),
1276            parent_slot: parent.slot(),
1277            collector_id: *collector_id,
1278            collector_fees: AtomicU64::new(0),
1279            ancestors: Ancestors::default(),
1280            hash: RwLock::new(Hash::default()),
1281            is_delta: AtomicBool::new(false),
1282            builtin_programs,
1283            tick_height: AtomicU64::new(parent.tick_height.load(Relaxed)),
1284            signature_count: AtomicU64::new(0),
1285            runtime_config: parent.runtime_config.clone(),
1286            hard_forks: parent.hard_forks.clone(),
1287            rewards: RwLock::new(vec![]),
1288            cluster_type: parent.cluster_type,
1289            lazy_rent_collection: AtomicBool::new(parent.lazy_rent_collection.load(Relaxed)),
1290            rewards_pool_pubkeys,
1291            transaction_debug_keys,
1292            transaction_log_collector_config,
1293            transaction_log_collector: Arc::new(RwLock::new(TransactionLogCollector::default())),
1294            feature_set: Arc::clone(&feature_set),
1295            drop_callback: RwLock::new(OptionalDropCallback(
1296                parent
1297                    .drop_callback
1298                    .read()
1299                    .unwrap()
1300                    .0
1301                    .as_ref()
1302                    .map(|drop_callback| drop_callback.clone_box()),
1303            )),
1304            freeze_started: AtomicBool::new(false),
1305            cost_tracker: RwLock::new(CostTracker::default()),
1306            accounts_data_size_initial,
1307            accounts_data_size_delta_on_chain: AtomicI64::new(0),
1308            accounts_data_size_delta_off_chain: AtomicI64::new(0),
1309            fee_structure: parent.fee_structure.clone(),
1310            loaded_programs_cache: parent.loaded_programs_cache.clone(),
1311            epoch_reward_status: parent.epoch_reward_status.clone(),
1312            transaction_processor: TransactionBatchProcessor::default(),
1313        };
1314
1315        new.transaction_processor = TransactionBatchProcessor::new(
1316            new.slot,
1317            new.epoch,
1318            new.epoch_schedule.clone(),
1319            new.fee_structure.clone(),
1320            new.runtime_config.clone(),
1321            new.loaded_programs_cache.clone(),
1322        );
1323
1324        let (_, ancestors_time_us) = measure_us!({
1325            let mut ancestors = Vec::with_capacity(1 + new.parents().len());
1326            ancestors.push(new.slot());
1327            new.parents().iter().for_each(|p| {
1328                ancestors.push(p.slot());
1329            });
1330            new.ancestors = Ancestors::from(ancestors);
1331        });
1332
1333        // Following code may touch AccountsDb, requiring proper ancestors
1334        let (_, update_epoch_time_us) = measure_us!({
1335            if parent.epoch() < new.epoch() {
1336                new.process_new_epoch(
1337                    parent.epoch(),
1338                    parent.slot(),
1339                    parent.block_height(),
1340                    reward_calc_tracer,
1341                );
1342            } else {
1343                // Save a snapshot of stakes for use in consensus and stake weighted networking
1344                let leader_schedule_epoch = new.epoch_schedule().get_leader_schedule_epoch(slot);
1345                new.update_epoch_stakes(leader_schedule_epoch);
1346            }
1347            if new.is_partitioned_rewards_code_enabled() {
1348                new.distribute_partitioned_epoch_rewards();
1349            }
1350        });
1351
1352        let (_, recompilation_time_us) = measure_us!({
1353            // Recompile loaded programs one at a time before the next epoch hits
1354            let (_epoch, slot_index) = new.get_epoch_and_slot_index(new.slot());
1355            let slots_in_epoch = new.get_slots_in_epoch(new.epoch());
1356            let slots_in_recompilation_phase =
1357                (miraland_program_runtime::loaded_programs::MAX_LOADED_ENTRY_COUNT as u64)
1358                    .min(slots_in_epoch)
1359                    .checked_div(2)
1360                    .unwrap();
1361            let mut loaded_programs_cache = new.loaded_programs_cache.write().unwrap();
1362            if loaded_programs_cache.upcoming_environments.is_some() {
1363                if let Some((key, program_to_recompile)) =
1364                    loaded_programs_cache.programs_to_recompile.pop()
1365                {
1366                    let effective_epoch = loaded_programs_cache.latest_root_epoch.saturating_add(1);
1367                    drop(loaded_programs_cache);
1368                    let recompiled = new.load_program(&key, false, effective_epoch);
1369                    recompiled
1370                        .tx_usage_counter
1371                        .fetch_add(program_to_recompile.tx_usage_counter.load(Relaxed), Relaxed);
1372                    recompiled
1373                        .ix_usage_counter
1374                        .fetch_add(program_to_recompile.ix_usage_counter.load(Relaxed), Relaxed);
1375                    let mut loaded_programs_cache = new.loaded_programs_cache.write().unwrap();
1376                    loaded_programs_cache.assign_program(key, recompiled);
1377                }
1378            } else if new.epoch() != loaded_programs_cache.latest_root_epoch
1379                || slot_index.saturating_add(slots_in_recompilation_phase) >= slots_in_epoch
1380            {
1381                // Anticipate the upcoming program runtime environment for the next epoch,
1382                // so we can try to recompile loaded programs before the feature transition hits.
1383                drop(loaded_programs_cache);
1384                let (feature_set, _new_feature_activations) = new.compute_active_feature_set(true);
1385                let mut loaded_programs_cache = new.loaded_programs_cache.write().unwrap();
1386                let program_runtime_environment_v1 = create_program_runtime_environment_v1(
1387                    &feature_set,
1388                    &new.runtime_config.compute_budget.unwrap_or_default(),
1389                    false, /* deployment */
1390                    false, /* debugging_features */
1391                )
1392                .unwrap();
1393                let program_runtime_environment_v2 = create_program_runtime_environment_v2(
1394                    &new.runtime_config.compute_budget.unwrap_or_default(),
1395                    false, /* debugging_features */
1396                );
1397                let mut upcoming_environments = loaded_programs_cache.environments.clone();
1398                let changed_program_runtime_v1 =
1399                    *upcoming_environments.program_runtime_v1 != program_runtime_environment_v1;
1400                let changed_program_runtime_v2 =
1401                    *upcoming_environments.program_runtime_v2 != program_runtime_environment_v2;
1402                if changed_program_runtime_v1 {
1403                    upcoming_environments.program_runtime_v1 =
1404                        Arc::new(program_runtime_environment_v1);
1405                }
1406                if changed_program_runtime_v2 {
1407                    upcoming_environments.program_runtime_v2 =
1408                        Arc::new(program_runtime_environment_v2);
1409                }
1410                loaded_programs_cache.upcoming_environments = Some(upcoming_environments);
1411                loaded_programs_cache.programs_to_recompile = loaded_programs_cache
1412                    .get_flattened_entries(changed_program_runtime_v1, changed_program_runtime_v2);
1413                loaded_programs_cache
1414                    .programs_to_recompile
1415                    .sort_by_cached_key(|(_id, program)| program.decayed_usage_counter(slot));
1416            }
1417        });
1418
1419        // Update sysvars before processing transactions
1420        let (_, update_sysvars_time_us) = measure_us!({
1421            new.update_slot_hashes();
1422            new.update_stake_history(Some(parent.epoch()));
1423            new.update_clock(Some(parent.epoch()));
1424            new.update_fees();
1425            new.update_last_restart_slot()
1426        });
1427
1428        let (_, fill_sysvar_cache_time_us) = measure_us!(new.fill_missing_sysvar_cache_entries());
1429        time.stop();
1430
1431        report_new_bank_metrics(
1432            slot,
1433            parent.slot(),
1434            new.block_height,
1435            NewBankTimings {
1436                bank_rc_creation_time_us,
1437                total_elapsed_time_us: time.as_us(),
1438                status_cache_time_us,
1439                fee_components_time_us,
1440                blockhash_queue_time_us,
1441                stakes_cache_time_us,
1442                epoch_stakes_time_us,
1443                builtin_programs_time_us,
1444                rewards_pool_pubkeys_time_us,
1445                executor_cache_time_us: 0,
1446                transaction_debug_keys_time_us,
1447                transaction_log_collector_config_time_us,
1448                feature_set_time_us,
1449                ancestors_time_us,
1450                update_epoch_time_us,
1451                recompilation_time_us,
1452                update_sysvars_time_us,
1453                fill_sysvar_cache_time_us,
1454            },
1455        );
1456
1457        parent
1458            .loaded_programs_cache
1459            .read()
1460            .unwrap()
1461            .stats
1462            .submit(parent.slot());
1463
1464        new.loaded_programs_cache.write().unwrap().stats.reset();
1465        new
1466    }
1467
1468    /// Epoch in which the new cooldown warmup rate for stake was activated
1469    pub fn new_warmup_cooldown_rate_epoch(&self) -> Option<Epoch> {
1470        self.feature_set
1471            .new_warmup_cooldown_rate_epoch(&self.epoch_schedule)
1472    }
1473
1474    /// process for the start of a new epoch
1475    fn process_new_epoch(
1476        &mut self,
1477        parent_epoch: Epoch,
1478        parent_slot: Slot,
1479        parent_height: u64,
1480        reward_calc_tracer: Option<impl RewardCalcTracer>,
1481    ) {
1482        let epoch = self.epoch();
1483        let slot = self.slot();
1484        let (thread_pool, thread_pool_time) = measure!(
1485            ThreadPoolBuilder::new().build().unwrap(),
1486            "thread_pool_creation",
1487        );
1488
1489        let (_, apply_feature_activations_time) = measure!(
1490            self.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, false),
1491            "apply_feature_activation",
1492        );
1493
1494        // Add new entry to stakes.stake_history, set appropriate epoch and
1495        // update vote accounts with warmed up stakes before saving a
1496        // snapshot of stakes in epoch stakes
1497        let (_, activate_epoch_time) = measure!(
1498            self.stakes_cache.activate_epoch(
1499                epoch,
1500                &thread_pool,
1501                self.new_warmup_cooldown_rate_epoch()
1502            ),
1503            "activate_epoch",
1504        );
1505
1506        // Save a snapshot of stakes for use in consensus and stake weighted networking
1507        let leader_schedule_epoch = self.epoch_schedule.get_leader_schedule_epoch(slot);
1508        let (_, update_epoch_stakes_time) = measure!(
1509            self.update_epoch_stakes(leader_schedule_epoch),
1510            "update_epoch_stakes",
1511        );
1512
1513        let mut rewards_metrics = RewardsMetrics::default();
1514        // After saving a snapshot of stakes, apply stake rewards and commission
1515        let (_, update_rewards_with_thread_pool_time) = measure!(
1516            {
1517                if self.is_partitioned_rewards_code_enabled() {
1518                    self.begin_partitioned_rewards(
1519                        reward_calc_tracer,
1520                        &thread_pool,
1521                        parent_epoch,
1522                        parent_slot,
1523                        parent_height,
1524                        &mut rewards_metrics,
1525                    );
1526                } else {
1527                    self.update_rewards_with_thread_pool(
1528                        parent_epoch,
1529                        reward_calc_tracer,
1530                        &thread_pool,
1531                        &mut rewards_metrics,
1532                    )
1533                }
1534            },
1535            "update_rewards_with_thread_pool",
1536        );
1537
1538        report_new_epoch_metrics(
1539            epoch,
1540            slot,
1541            parent_slot,
1542            NewEpochTimings {
1543                thread_pool_time_us: thread_pool_time.as_us(),
1544                apply_feature_activations_time_us: apply_feature_activations_time.as_us(),
1545                activate_epoch_time_us: activate_epoch_time.as_us(),
1546                update_epoch_stakes_time_us: update_epoch_stakes_time.as_us(),
1547                update_rewards_with_thread_pool_time_us: update_rewards_with_thread_pool_time
1548                    .as_us(),
1549            },
1550            rewards_metrics,
1551        );
1552    }
1553
1554    /// partitioned reward distribution is complete.
1555    /// So, deactivate the epoch rewards sysvar.
1556    fn deactivate_epoch_reward_status(&mut self) {
1557        assert!(matches!(
1558            self.epoch_reward_status,
1559            EpochRewardStatus::Active(_)
1560        ));
1561        self.epoch_reward_status = EpochRewardStatus::Inactive;
1562        if let Some(account) = self.get_account(&sysvar::epoch_rewards::id()) {
1563            if account.lamports() > 0 {
1564                info!(
1565                    "burning {} extra lamports in EpochRewards sysvar account at slot {}",
1566                    account.lamports(),
1567                    self.slot()
1568                );
1569                self.log_epoch_rewards_sysvar("burn");
1570                self.burn_and_purge_account(&sysvar::epoch_rewards::id(), account);
1571            }
1572        }
1573    }
1574
1575    fn force_partition_rewards_in_first_block_of_epoch(&self) -> bool {
1576        self.partitioned_epoch_rewards_config()
1577            .test_enable_partitioned_rewards
1578            && self.get_reward_calculation_num_blocks() == 0
1579            && self.partitioned_rewards_stake_account_stores_per_block() == u64::MAX
1580    }
1581
1582    /// Begin the process of calculating and distributing rewards.
1583    /// This process can take multiple slots.
1584    fn begin_partitioned_rewards(
1585        &mut self,
1586        reward_calc_tracer: Option<impl Fn(&RewardCalculationEvent) + Send + Sync>,
1587        thread_pool: &ThreadPool,
1588        parent_epoch: Epoch,
1589        parent_slot: Slot,
1590        parent_block_height: u64,
1591        rewards_metrics: &mut RewardsMetrics,
1592    ) {
1593        let CalculateRewardsAndDistributeVoteRewardsResult {
1594            total_rewards,
1595            distributed_rewards,
1596            stake_rewards_by_partition,
1597        } = self.calculate_rewards_and_distribute_vote_rewards(
1598            parent_epoch,
1599            reward_calc_tracer,
1600            thread_pool,
1601            rewards_metrics,
1602        );
1603
1604        let slot = self.slot();
1605        let credit_start = self.block_height() + self.get_reward_calculation_num_blocks();
1606        let credit_end_exclusive = credit_start + stake_rewards_by_partition.len() as u64;
1607
1608        self.set_epoch_reward_status_active(stake_rewards_by_partition);
1609
1610        // create EpochRewards sysvar that holds the balance of undistributed rewards with
1611        // (total_rewards, distributed_rewards, credit_end_exclusive), total capital will increase by (total_rewards - distributed_rewards)
1612        self.create_epoch_rewards_sysvar(total_rewards, distributed_rewards, credit_end_exclusive);
1613
1614        datapoint_info!(
1615            "epoch-rewards-status-update",
1616            ("start_slot", slot, i64),
1617            ("start_block_height", self.block_height(), i64),
1618            ("active", 1, i64),
1619            ("parent_slot", parent_slot, i64),
1620            ("parent_block_height", parent_block_height, i64),
1621        );
1622    }
1623
1624    /// Process reward distribution for the block if it is inside reward interval.
1625    fn distribute_partitioned_epoch_rewards(&mut self) {
1626        let EpochRewardStatus::Active(status) = &self.epoch_reward_status else {
1627            return;
1628        };
1629
1630        let height = self.block_height();
1631        let start_block_height = status.start_block_height;
1632        let credit_start = start_block_height + self.get_reward_calculation_num_blocks();
1633        let credit_end_exclusive = credit_start + status.stake_rewards_by_partition.len() as u64;
1634        assert!(
1635            self.epoch_schedule.get_slots_in_epoch(self.epoch)
1636                > credit_end_exclusive.saturating_sub(credit_start)
1637        );
1638
1639        if height >= credit_start && height < credit_end_exclusive {
1640            let partition_index = height - credit_start;
1641            self.distribute_epoch_rewards_in_partition(
1642                &status.stake_rewards_by_partition,
1643                partition_index,
1644            );
1645        }
1646
1647        if height.saturating_add(1) >= credit_end_exclusive {
1648            datapoint_info!(
1649                "epoch-rewards-status-update",
1650                ("slot", self.slot(), i64),
1651                ("block_height", height, i64),
1652                ("active", 0, i64),
1653                ("start_block_height", start_block_height, i64),
1654            );
1655
1656            self.deactivate_epoch_reward_status();
1657        }
1658    }
1659
1660    pub fn byte_limit_for_scans(&self) -> Option<usize> {
1661        self.rc
1662            .accounts
1663            .accounts_db
1664            .accounts_index
1665            .scan_results_limit_bytes
1666    }
1667
1668    pub fn proper_ancestors_set(&self) -> HashSet<Slot> {
1669        HashSet::from_iter(self.proper_ancestors())
1670    }
1671
1672    /// Returns all ancestors excluding self.slot.
1673    pub(crate) fn proper_ancestors(&self) -> impl Iterator<Item = Slot> + '_ {
1674        self.ancestors
1675            .keys()
1676            .into_iter()
1677            .filter(move |slot| *slot != self.slot)
1678    }
1679
1680    pub fn set_callback(&self, callback: Option<Box<dyn DropCallback + Send + Sync>>) {
1681        *self.drop_callback.write().unwrap() = OptionalDropCallback(callback);
1682    }
1683
1684    pub fn vote_only_bank(&self) -> bool {
1685        self.vote_only_bank
1686    }
1687
1688    /// Like `new_from_parent` but additionally:
1689    /// * Doesn't assume that the parent is anywhere near `slot`, parent could be millions of slots
1690    /// in the past
1691    /// * Adjusts the new bank's tick height to avoid having to run PoH for millions of slots
1692    /// * Freezes the new bank, assuming that the user will `Bank::new_from_parent` from this bank
1693    /// * Calculates and sets the epoch accounts hash from the parent
1694    pub fn warp_from_parent(
1695        parent: Arc<Bank>,
1696        collector_id: &Pubkey,
1697        slot: Slot,
1698        data_source: CalcAccountsHashDataSource,
1699    ) -> Self {
1700        parent.freeze();
1701        parent
1702            .rc
1703            .accounts
1704            .accounts_db
1705            .epoch_accounts_hash_manager
1706            .set_in_flight(parent.slot());
1707        let accounts_hash = parent.update_accounts_hash(data_source, false, true);
1708        let epoch_accounts_hash = accounts_hash.into();
1709        parent
1710            .rc
1711            .accounts
1712            .accounts_db
1713            .epoch_accounts_hash_manager
1714            .set_valid(epoch_accounts_hash, parent.slot());
1715
1716        let parent_timestamp = parent.clock().unix_timestamp;
1717        let mut new = Bank::new_from_parent(parent, collector_id, slot);
1718        new.apply_feature_activations(ApplyFeatureActivationsCaller::WarpFromParent, false);
1719        new.update_epoch_stakes(new.epoch_schedule().get_epoch(slot));
1720        new.tick_height.store(new.max_tick_height(), Relaxed);
1721
1722        let mut clock = new.clock();
1723        clock.epoch_start_timestamp = parent_timestamp;
1724        clock.unix_timestamp = parent_timestamp;
1725        new.update_sysvar_account(&sysvar::clock::id(), |account| {
1726            create_account(
1727                &clock,
1728                new.inherit_specially_retained_account_fields(account),
1729            )
1730        });
1731        new.fill_missing_sysvar_cache_entries();
1732        new.freeze();
1733        new
1734    }
1735
1736    /// Create a bank from explicit arguments and deserialized fields from snapshot
1737    #[allow(clippy::float_cmp)]
1738    pub(crate) fn new_from_fields(
1739        bank_rc: BankRc,
1740        genesis_config: &GenesisConfig,
1741        runtime_config: Arc<RuntimeConfig>,
1742        fields: BankFieldsToDeserialize,
1743        debug_keys: Option<Arc<HashSet<Pubkey>>>,
1744        additional_builtins: Option<&[BuiltinPrototype]>,
1745        debug_do_not_add_builtins: bool,
1746        accounts_data_size_initial: u64,
1747    ) -> Self {
1748        let now = Instant::now();
1749        let ancestors = Ancestors::from(&fields.ancestors);
1750        // For backward compatibility, we can only serialize and deserialize
1751        // Stakes<Delegation> in BankFieldsTo{Serialize,Deserialize}. But Bank
1752        // caches Stakes<StakeAccount>. Below Stakes<StakeAccount> is obtained
1753        // from Stakes<Delegation> by reading the full account state from
1754        // accounts-db. Note that it is crucial that these accounts are loaded
1755        // at the right slot and match precisely with serialized Delegations.
1756        let stakes = Stakes::new(&fields.stakes, |pubkey| {
1757            let (account, _slot) = bank_rc.accounts.load_with_fixed_root(&ancestors, pubkey)?;
1758            Some(account)
1759        })
1760        .expect(
1761            "Stakes cache is inconsistent with accounts-db. This can indicate \
1762            a corrupted snapshot or bugs in cached accounts or accounts-db.",
1763        );
1764        let stakes_accounts_load_duration = now.elapsed();
1765        let mut bank = Self {
1766            skipped_rewrites: Mutex::default(),
1767            incremental_snapshot_persistence: fields.incremental_snapshot_persistence,
1768            rc: bank_rc,
1769            status_cache: Arc::<RwLock<BankStatusCache>>::default(),
1770            blockhash_queue: RwLock::new(fields.blockhash_queue),
1771            ancestors,
1772            hash: RwLock::new(fields.hash),
1773            parent_hash: fields.parent_hash,
1774            parent_slot: fields.parent_slot,
1775            hard_forks: Arc::new(RwLock::new(fields.hard_forks)),
1776            transaction_count: AtomicU64::new(fields.transaction_count),
1777            non_vote_transaction_count_since_restart: AtomicU64::default(),
1778            transaction_error_count: AtomicU64::default(),
1779            transaction_entries_count: AtomicU64::default(),
1780            transactions_per_entry_max: AtomicU64::default(),
1781            tick_height: AtomicU64::new(fields.tick_height),
1782            signature_count: AtomicU64::new(fields.signature_count),
1783            capitalization: AtomicU64::new(fields.capitalization),
1784            max_tick_height: fields.max_tick_height,
1785            hashes_per_tick: fields.hashes_per_tick,
1786            ticks_per_slot: fields.ticks_per_slot,
1787            ns_per_slot: fields.ns_per_slot,
1788            genesis_creation_time: fields.genesis_creation_time,
1789            slots_per_year: fields.slots_per_year,
1790            slot: fields.slot,
1791            bank_id: 0,
1792            epoch: fields.epoch,
1793            block_height: fields.block_height,
1794            collector_id: fields.collector_id,
1795            collector_fees: AtomicU64::new(fields.collector_fees),
1796            fee_rate_governor: fields.fee_rate_governor,
1797            collected_rent: AtomicU64::new(fields.collected_rent),
1798            // clone()-ing is needed to consider a gated behavior in rent_collector
1799            rent_collector: Self::get_rent_collector_from(&fields.rent_collector, fields.epoch),
1800            epoch_schedule: fields.epoch_schedule,
1801            inflation: Arc::new(RwLock::new(fields.inflation)),
1802            stakes_cache: StakesCache::new(stakes),
1803            epoch_stakes: fields.epoch_stakes,
1804            is_delta: AtomicBool::new(fields.is_delta),
1805            builtin_programs: HashSet::<Pubkey>::default(),
1806            runtime_config,
1807            rewards: RwLock::new(vec![]),
1808            cluster_type: Some(genesis_config.cluster_type),
1809            lazy_rent_collection: AtomicBool::default(),
1810            rewards_pool_pubkeys: Arc::<HashSet<Pubkey>>::default(),
1811            transaction_debug_keys: debug_keys,
1812            transaction_log_collector_config: Arc::<RwLock<TransactionLogCollectorConfig>>::default(
1813            ),
1814            transaction_log_collector: Arc::<RwLock<TransactionLogCollector>>::default(),
1815            feature_set: Arc::<FeatureSet>::default(),
1816            drop_callback: RwLock::new(OptionalDropCallback(None)),
1817            freeze_started: AtomicBool::new(fields.hash != Hash::default()),
1818            vote_only_bank: false,
1819            cost_tracker: RwLock::new(CostTracker::default()),
1820            accounts_data_size_initial,
1821            accounts_data_size_delta_on_chain: AtomicI64::new(0),
1822            accounts_data_size_delta_off_chain: AtomicI64::new(0),
1823            fee_structure: FeeStructure::default(),
1824            loaded_programs_cache: Arc::new(RwLock::new(LoadedPrograms::new(
1825                fields.slot,
1826                fields.epoch,
1827            ))),
1828            epoch_reward_status: fields.epoch_reward_status,
1829            transaction_processor: TransactionBatchProcessor::default(),
1830        };
1831
1832        bank.transaction_processor = TransactionBatchProcessor::new(
1833            bank.slot,
1834            bank.epoch,
1835            bank.epoch_schedule.clone(),
1836            bank.fee_structure.clone(),
1837            bank.runtime_config.clone(),
1838            bank.loaded_programs_cache.clone(),
1839        );
1840
1841        bank.finish_init(
1842            genesis_config,
1843            additional_builtins,
1844            debug_do_not_add_builtins,
1845        );
1846        bank.fill_missing_sysvar_cache_entries();
1847        bank.rebuild_skipped_rewrites();
1848
1849        // Sanity assertions between bank snapshot and genesis config
1850        // Consider removing from serializable bank state
1851        // (BankFieldsToSerialize/BankFieldsToDeserialize) and initializing
1852        // from the passed in genesis_config instead (as new()/new_with_paths() already do)
1853        assert_eq!(
1854            bank.genesis_creation_time, genesis_config.creation_time,
1855            "Bank snapshot genesis creation time does not match genesis.bin creation time.\
1856             The snapshot and genesis.bin might pertain to different clusters"
1857        );
1858        assert_eq!(bank.ticks_per_slot, genesis_config.ticks_per_slot);
1859        assert_eq!(
1860            bank.ns_per_slot,
1861            genesis_config.poh_config.target_tick_duration.as_nanos()
1862                * genesis_config.ticks_per_slot as u128
1863        );
1864        assert_eq!(bank.max_tick_height, (bank.slot + 1) * bank.ticks_per_slot);
1865        assert_eq!(
1866            bank.slots_per_year,
1867            years_as_slots(
1868                1.0,
1869                &genesis_config.poh_config.target_tick_duration,
1870                bank.ticks_per_slot,
1871            )
1872        );
1873        assert_eq!(bank.epoch_schedule, genesis_config.epoch_schedule);
1874        assert_eq!(bank.epoch, bank.epoch_schedule.get_epoch(bank.slot));
1875
1876        datapoint_info!(
1877            "bank-new-from-fields",
1878            (
1879                "accounts_data_len-from-snapshot",
1880                fields.accounts_data_len as i64,
1881                i64
1882            ),
1883            (
1884                "accounts_data_len-from-generate_index",
1885                accounts_data_size_initial as i64,
1886                i64
1887            ),
1888            (
1889                "stakes_accounts_load_duration_us",
1890                stakes_accounts_load_duration.as_micros(),
1891                i64
1892            ),
1893        );
1894        bank
1895    }
1896
1897    /// Return subset of bank fields representing serializable state
1898    pub(crate) fn get_fields_to_serialize<'a>(
1899        &'a self,
1900        ancestors: &'a HashMap<Slot, usize>,
1901    ) -> BankFieldsToSerialize<'a> {
1902        BankFieldsToSerialize {
1903            blockhash_queue: &self.blockhash_queue,
1904            ancestors,
1905            hash: *self.hash.read().unwrap(),
1906            parent_hash: self.parent_hash,
1907            parent_slot: self.parent_slot,
1908            hard_forks: &self.hard_forks,
1909            transaction_count: self.transaction_count.load(Relaxed),
1910            tick_height: self.tick_height.load(Relaxed),
1911            signature_count: self.signature_count.load(Relaxed),
1912            capitalization: self.capitalization.load(Relaxed),
1913            max_tick_height: self.max_tick_height,
1914            hashes_per_tick: self.hashes_per_tick,
1915            ticks_per_slot: self.ticks_per_slot,
1916            ns_per_slot: self.ns_per_slot,
1917            genesis_creation_time: self.genesis_creation_time,
1918            slots_per_year: self.slots_per_year,
1919            slot: self.slot,
1920            epoch: self.epoch,
1921            block_height: self.block_height,
1922            collector_id: self.collector_id,
1923            collector_fees: self.collector_fees.load(Relaxed),
1924            fee_calculator: FeeCalculator::default(),
1925            fee_rate_governor: self.fee_rate_governor.clone(),
1926            collected_rent: self.collected_rent.load(Relaxed),
1927            rent_collector: self.rent_collector.clone(),
1928            epoch_schedule: self.epoch_schedule.clone(),
1929            inflation: *self.inflation.read().unwrap(),
1930            stakes: &self.stakes_cache,
1931            epoch_stakes: &self.epoch_stakes,
1932            is_delta: self.is_delta.load(Relaxed),
1933            accounts_data_len: self.load_accounts_data_size(),
1934        }
1935    }
1936
1937    pub fn collector_id(&self) -> &Pubkey {
1938        &self.collector_id
1939    }
1940
1941    pub fn genesis_creation_time(&self) -> UnixTimestamp {
1942        self.genesis_creation_time
1943    }
1944
1945    pub fn slot(&self) -> Slot {
1946        self.slot
1947    }
1948
1949    pub fn bank_id(&self) -> BankId {
1950        self.bank_id
1951    }
1952
1953    pub fn epoch(&self) -> Epoch {
1954        self.epoch
1955    }
1956
1957    pub fn first_normal_epoch(&self) -> Epoch {
1958        self.epoch_schedule().first_normal_epoch
1959    }
1960
1961    pub fn freeze_lock(&self) -> RwLockReadGuard<Hash> {
1962        self.hash.read().unwrap()
1963    }
1964
1965    pub fn hash(&self) -> Hash {
1966        *self.hash.read().unwrap()
1967    }
1968
1969    pub fn is_frozen(&self) -> bool {
1970        *self.hash.read().unwrap() != Hash::default()
1971    }
1972
1973    pub fn freeze_started(&self) -> bool {
1974        self.freeze_started.load(Relaxed)
1975    }
1976
1977    pub fn status_cache_ancestors(&self) -> Vec<u64> {
1978        let mut roots = self.status_cache.read().unwrap().roots().clone();
1979        let min = roots.iter().min().cloned().unwrap_or(0);
1980        for ancestor in self.ancestors.keys() {
1981            if ancestor >= min {
1982                roots.insert(ancestor);
1983            }
1984        }
1985
1986        let mut ancestors: Vec<_> = roots.into_iter().collect();
1987        #[allow(clippy::stable_sort_primitive)]
1988        ancestors.sort();
1989        ancestors
1990    }
1991
1992    /// computed unix_timestamp at this slot height
1993    pub fn unix_timestamp_from_genesis(&self) -> i64 {
1994        self.genesis_creation_time + ((self.slot as u128 * self.ns_per_slot) / 1_000_000_000) as i64
1995    }
1996
1997    fn update_sysvar_account<F>(&self, pubkey: &Pubkey, updater: F)
1998    where
1999        F: Fn(&Option<AccountSharedData>) -> AccountSharedData,
2000    {
2001        let old_account = self.get_account_with_fixed_root(pubkey);
2002        let mut new_account = updater(&old_account);
2003
2004        // When new sysvar comes into existence (with RENT_UNADJUSTED_INITIAL_BALANCE lamports),
2005        // this code ensures that the sysvar's balance is adjusted to be rent-exempt.
2006        //
2007        // More generally, this code always re-calculates for possible sysvar data size change,
2008        // although there is no such sysvars currently.
2009        self.adjust_sysvar_balance_for_rent(&mut new_account);
2010        self.store_account_and_update_capitalization(pubkey, &new_account);
2011    }
2012
2013    fn inherit_specially_retained_account_fields(
2014        &self,
2015        old_account: &Option<AccountSharedData>,
2016    ) -> InheritableAccountFields {
2017        const RENT_UNADJUSTED_INITIAL_BALANCE: u64 = 1;
2018
2019        (
2020            old_account
2021                .as_ref()
2022                .map(|a| a.lamports())
2023                .unwrap_or(RENT_UNADJUSTED_INITIAL_BALANCE),
2024            old_account
2025                .as_ref()
2026                .map(|a| a.rent_epoch())
2027                .unwrap_or(INITIAL_RENT_EPOCH),
2028        )
2029    }
2030
2031    pub fn clock(&self) -> sysvar::clock::Clock {
2032        from_account(&self.get_account(&sysvar::clock::id()).unwrap_or_default())
2033            .unwrap_or_default()
2034    }
2035
2036    fn update_clock(&self, parent_epoch: Option<Epoch>) {
2037        let mut unix_timestamp = self.clock().unix_timestamp;
2038        // set epoch_start_timestamp to None to warp timestamp
2039        let epoch_start_timestamp = {
2040            let epoch = if let Some(epoch) = parent_epoch {
2041                epoch
2042            } else {
2043                self.epoch()
2044            };
2045            let first_slot_in_epoch = self.epoch_schedule().get_first_slot_in_epoch(epoch);
2046            Some((first_slot_in_epoch, self.clock().epoch_start_timestamp))
2047        };
2048        let max_allowable_drift = MaxAllowableDrift {
2049            fast: MAX_ALLOWABLE_DRIFT_PERCENTAGE_FAST,
2050            slow: MAX_ALLOWABLE_DRIFT_PERCENTAGE_SLOW_V2,
2051        };
2052
2053        let ancestor_timestamp = self.clock().unix_timestamp;
2054        if let Some(timestamp_estimate) =
2055            self.get_timestamp_estimate(max_allowable_drift, epoch_start_timestamp)
2056        {
2057            unix_timestamp = timestamp_estimate;
2058            if timestamp_estimate < ancestor_timestamp {
2059                unix_timestamp = ancestor_timestamp;
2060            }
2061        }
2062        datapoint_info!(
2063            "bank-timestamp-correction",
2064            ("slot", self.slot(), i64),
2065            ("from_genesis", self.unix_timestamp_from_genesis(), i64),
2066            ("corrected", unix_timestamp, i64),
2067            ("ancestor_timestamp", ancestor_timestamp, i64),
2068        );
2069        let mut epoch_start_timestamp =
2070            // On epoch boundaries, update epoch_start_timestamp
2071            if parent_epoch.is_some() && parent_epoch.unwrap() != self.epoch() {
2072                unix_timestamp
2073            } else {
2074                self.clock().epoch_start_timestamp
2075            };
2076        if self.slot == 0 {
2077            unix_timestamp = self.unix_timestamp_from_genesis();
2078            epoch_start_timestamp = self.unix_timestamp_from_genesis();
2079        }
2080        let clock = sysvar::clock::Clock {
2081            slot: self.slot,
2082            epoch_start_timestamp,
2083            epoch: self.epoch_schedule().get_epoch(self.slot),
2084            leader_schedule_epoch: self.epoch_schedule().get_leader_schedule_epoch(self.slot),
2085            unix_timestamp,
2086        };
2087        self.update_sysvar_account(&sysvar::clock::id(), |account| {
2088            create_account(
2089                &clock,
2090                self.inherit_specially_retained_account_fields(account),
2091            )
2092        });
2093    }
2094
2095    pub fn update_last_restart_slot(&self) {
2096        let feature_flag = self
2097            .feature_set
2098            .is_active(&feature_set::last_restart_slot_sysvar::id());
2099
2100        if feature_flag {
2101            // First, see what the currently stored last restart slot is. This
2102            // account may not exist yet if the feature was just activated.
2103            let current_last_restart_slot = self
2104                .get_account(&sysvar::last_restart_slot::id())
2105                .and_then(|account| {
2106                    let lrs: Option<LastRestartSlot> = from_account(&account);
2107                    lrs
2108                })
2109                .map(|account| account.last_restart_slot);
2110
2111            let last_restart_slot = {
2112                let slot = self.slot;
2113                let hard_forks_r = self.hard_forks.read().unwrap();
2114
2115                // Only consider hard forks <= this bank's slot to avoid prematurely applying
2116                // a hard fork that is set to occur in the future.
2117                hard_forks_r
2118                    .iter()
2119                    .rev()
2120                    .find(|(hard_fork, _)| *hard_fork <= slot)
2121                    .map(|(slot, _)| *slot)
2122                    .unwrap_or(0)
2123            };
2124
2125            // Only need to write if the last restart has changed
2126            if current_last_restart_slot != Some(last_restart_slot) {
2127                self.update_sysvar_account(&sysvar::last_restart_slot::id(), |account| {
2128                    create_account(
2129                        &LastRestartSlot { last_restart_slot },
2130                        self.inherit_specially_retained_account_fields(account),
2131                    )
2132                });
2133            }
2134        }
2135    }
2136
2137    pub fn set_sysvar_for_tests<T>(&self, sysvar: &T)
2138    where
2139        T: Sysvar + SysvarId,
2140    {
2141        self.update_sysvar_account(&T::id(), |account| {
2142            create_account(
2143                sysvar,
2144                self.inherit_specially_retained_account_fields(account),
2145            )
2146        });
2147        // Simply force fill sysvar cache rather than checking which sysvar was
2148        // actually updated since tests don't need to be optimized for performance.
2149        self.reset_sysvar_cache();
2150        self.fill_missing_sysvar_cache_entries();
2151    }
2152
2153    fn update_slot_history(&self) {
2154        self.update_sysvar_account(&sysvar::slot_history::id(), |account| {
2155            let mut slot_history = account
2156                .as_ref()
2157                .map(|account| from_account::<SlotHistory, _>(account).unwrap())
2158                .unwrap_or_default();
2159            slot_history.add(self.slot());
2160            create_account(
2161                &slot_history,
2162                self.inherit_specially_retained_account_fields(account),
2163            )
2164        });
2165    }
2166
2167    fn update_slot_hashes(&self) {
2168        self.update_sysvar_account(&sysvar::slot_hashes::id(), |account| {
2169            let mut slot_hashes = account
2170                .as_ref()
2171                .map(|account| from_account::<SlotHashes, _>(account).unwrap())
2172                .unwrap_or_default();
2173            slot_hashes.add(self.parent_slot, self.parent_hash);
2174            create_account(
2175                &slot_hashes,
2176                self.inherit_specially_retained_account_fields(account),
2177            )
2178        });
2179    }
2180
2181    pub fn get_slot_history(&self) -> SlotHistory {
2182        from_account(&self.get_account(&sysvar::slot_history::id()).unwrap()).unwrap()
2183    }
2184
2185    fn update_epoch_stakes(&mut self, leader_schedule_epoch: Epoch) {
2186        // update epoch_stakes cache
2187        //  if my parent didn't populate for this staker's epoch, we've
2188        //  crossed a boundary
2189        if self.epoch_stakes.get(&leader_schedule_epoch).is_none() {
2190            self.epoch_stakes.retain(|&epoch, _| {
2191                epoch >= leader_schedule_epoch.saturating_sub(MAX_LEADER_SCHEDULE_STAKES)
2192            });
2193            let stakes = self.stakes_cache.stakes().clone();
2194            let stakes = Arc::new(StakesEnum::from(stakes));
2195            let new_epoch_stakes = EpochStakes::new(stakes, leader_schedule_epoch);
2196            info!(
2197                "new epoch stakes, epoch: {}, total_stake: {}",
2198                leader_schedule_epoch,
2199                new_epoch_stakes.total_stake(),
2200            );
2201
2202            // It is expensive to log the details of epoch stakes. Only log them at "trace"
2203            // level for debugging purpose.
2204            if log::log_enabled!(log::Level::Trace) {
2205                let vote_stakes: HashMap<_, _> = self
2206                    .stakes_cache
2207                    .stakes()
2208                    .vote_accounts()
2209                    .delegated_stakes()
2210                    .map(|(pubkey, stake)| (*pubkey, stake))
2211                    .collect();
2212                trace!("new epoch stakes, stakes: {vote_stakes:#?}");
2213            }
2214            self.epoch_stakes
2215                .insert(leader_schedule_epoch, new_epoch_stakes);
2216        }
2217    }
2218
2219    #[allow(deprecated)]
2220    fn update_fees(&self) {
2221        if !self
2222            .feature_set
2223            .is_active(&feature_set::disable_fees_sysvar::id())
2224        {
2225            self.update_sysvar_account(&sysvar::fees::id(), |account| {
2226                create_account(
2227                    &sysvar::fees::Fees::new(&self.fee_rate_governor.create_fee_calculator()),
2228                    self.inherit_specially_retained_account_fields(account),
2229                )
2230            });
2231        }
2232    }
2233
2234    fn update_rent(&self) {
2235        self.update_sysvar_account(&sysvar::rent::id(), |account| {
2236            create_account(
2237                &self.rent_collector.rent,
2238                self.inherit_specially_retained_account_fields(account),
2239            )
2240        });
2241    }
2242
2243    fn update_epoch_schedule(&self) {
2244        self.update_sysvar_account(&sysvar::epoch_schedule::id(), |account| {
2245            create_account(
2246                self.epoch_schedule(),
2247                self.inherit_specially_retained_account_fields(account),
2248            )
2249        });
2250    }
2251
2252    fn update_stake_history(&self, epoch: Option<Epoch>) {
2253        if epoch == Some(self.epoch()) {
2254            return;
2255        }
2256        // if I'm the first Bank in an epoch, ensure stake_history is updated
2257        self.update_sysvar_account(&sysvar::stake_history::id(), |account| {
2258            create_account::<sysvar::stake_history::StakeHistory>(
2259                self.stakes_cache.stakes().history(),
2260                self.inherit_specially_retained_account_fields(account),
2261            )
2262        });
2263    }
2264
2265    pub fn epoch_duration_in_years(&self, prev_epoch: Epoch) -> f64 {
2266        // period: time that has passed as a fraction of a year, basically the length of
2267        //  an epoch as a fraction of a year
2268        //  calculated as: slots_elapsed / (slots / year)
2269        self.epoch_schedule().get_slots_in_epoch(prev_epoch) as f64 / self.slots_per_year
2270    }
2271
2272    // Calculates the starting-slot for inflation from the activation slot.
2273    // This method assumes that `pico_inflation` will be enabled before `full_inflation`, giving
2274    // precedence to the latter. However, since `pico_inflation` is fixed-rate Inflation, should
2275    // `pico_inflation` be enabled 2nd, the incorrect start slot provided here should have no
2276    // effect on the inflation calculation.
2277    fn get_inflation_start_slot(&self) -> Slot {
2278        let mut slots = self
2279            .feature_set
2280            .full_inflation_features_enabled()
2281            .iter()
2282            .filter_map(|id| self.feature_set.activated_slot(id))
2283            .collect::<Vec<_>>();
2284        slots.sort_unstable();
2285        slots.first().cloned().unwrap_or_else(|| {
2286            self.feature_set
2287                .activated_slot(&feature_set::pico_inflation::id())
2288                .unwrap_or(0)
2289        })
2290    }
2291
2292    fn get_inflation_num_slots(&self) -> u64 {
2293        let inflation_activation_slot = self.get_inflation_start_slot();
2294        // Normalize inflation_start to align with the start of rewards accrual.
2295        let inflation_start_slot = self.epoch_schedule().get_first_slot_in_epoch(
2296            self.epoch_schedule()
2297                .get_epoch(inflation_activation_slot)
2298                .saturating_sub(1),
2299        );
2300        self.epoch_schedule().get_first_slot_in_epoch(self.epoch()) - inflation_start_slot
2301    }
2302
2303    pub fn slot_in_year_for_inflation(&self) -> f64 {
2304        let num_slots = self.get_inflation_num_slots();
2305
2306        // calculated as: num_slots / (slots / year)
2307        num_slots as f64 / self.slots_per_year
2308    }
2309
2310    fn calculate_previous_epoch_inflation_rewards(
2311        &self,
2312        prev_epoch_capitalization: u64,
2313        prev_epoch: Epoch,
2314    ) -> PrevEpochInflationRewards {
2315        let slot_in_year = self.slot_in_year_for_inflation();
2316        let (validator_rate, foundation_rate) = {
2317            let inflation = self.inflation.read().unwrap();
2318            (
2319                (*inflation).validator(slot_in_year),
2320                (*inflation).foundation(slot_in_year),
2321            )
2322        };
2323
2324        let prev_epoch_duration_in_years = self.epoch_duration_in_years(prev_epoch);
2325        let validator_rewards = (validator_rate
2326            * prev_epoch_capitalization as f64
2327            * prev_epoch_duration_in_years) as u64;
2328
2329        PrevEpochInflationRewards {
2330            validator_rewards,
2331            prev_epoch_duration_in_years,
2332            validator_rate,
2333            foundation_rate,
2334        }
2335    }
2336
2337    /// Calculate rewards from previous epoch to prepare for partitioned distribution.
2338    fn calculate_rewards_for_partitioning(
2339        &self,
2340        prev_epoch: Epoch,
2341        reward_calc_tracer: Option<impl Fn(&RewardCalculationEvent) + Send + Sync>,
2342        thread_pool: &ThreadPool,
2343        metrics: &mut RewardsMetrics,
2344    ) -> PartitionedRewardsCalculation {
2345        let capitalization = self.capitalization();
2346        let PrevEpochInflationRewards {
2347            validator_rewards,
2348            prev_epoch_duration_in_years,
2349            validator_rate,
2350            foundation_rate,
2351        } = self.calculate_previous_epoch_inflation_rewards(capitalization, prev_epoch);
2352
2353        let old_vote_balance_and_staked = self.stakes_cache.stakes().vote_balance_and_staked();
2354
2355        let (vote_account_rewards, mut stake_rewards) = self
2356            .calculate_validator_rewards(
2357                prev_epoch,
2358                validator_rewards,
2359                reward_calc_tracer,
2360                thread_pool,
2361                metrics,
2362            )
2363            .unwrap_or_default();
2364
2365        let num_partitions = self.get_reward_distribution_num_blocks(&stake_rewards.stake_rewards);
2366        let parent_blockhash = self
2367            .parent()
2368            .expect("Partitioned rewards calculation must still have access to parent Bank.")
2369            .last_blockhash();
2370        let stake_rewards_by_partition = hash_rewards_into_partitions(
2371            std::mem::take(&mut stake_rewards.stake_rewards),
2372            &parent_blockhash,
2373            num_partitions as usize,
2374        );
2375
2376        PartitionedRewardsCalculation {
2377            vote_account_rewards,
2378            stake_rewards_by_partition: StakeRewardCalculationPartitioned {
2379                stake_rewards_by_partition,
2380                total_stake_rewards_lamports: stake_rewards.total_stake_rewards_lamports,
2381            },
2382            old_vote_balance_and_staked,
2383            validator_rewards,
2384            validator_rate,
2385            foundation_rate,
2386            prev_epoch_duration_in_years,
2387            capitalization,
2388        }
2389    }
2390
2391    // Calculate rewards from previous epoch and distribute vote rewards
2392    fn calculate_rewards_and_distribute_vote_rewards(
2393        &self,
2394        prev_epoch: Epoch,
2395        reward_calc_tracer: Option<impl Fn(&RewardCalculationEvent) + Send + Sync>,
2396        thread_pool: &ThreadPool,
2397        metrics: &mut RewardsMetrics,
2398    ) -> CalculateRewardsAndDistributeVoteRewardsResult {
2399        let PartitionedRewardsCalculation {
2400            vote_account_rewards,
2401            stake_rewards_by_partition,
2402            old_vote_balance_and_staked,
2403            validator_rewards,
2404            validator_rate,
2405            foundation_rate,
2406            prev_epoch_duration_in_years,
2407            capitalization,
2408        } = self.calculate_rewards_for_partitioning(
2409            prev_epoch,
2410            reward_calc_tracer,
2411            thread_pool,
2412            metrics,
2413        );
2414        let vote_rewards = self.store_vote_accounts_partitioned(vote_account_rewards, metrics);
2415
2416        // update reward history of JUST vote_rewards, stake_rewards is vec![] here
2417        self.update_reward_history(vec![], vote_rewards);
2418
2419        let StakeRewardCalculationPartitioned {
2420            stake_rewards_by_partition,
2421            total_stake_rewards_lamports,
2422        } = stake_rewards_by_partition;
2423
2424        // the remaining code mirrors `update_rewards_with_thread_pool()`
2425
2426        let new_vote_balance_and_staked = self.stakes_cache.stakes().vote_balance_and_staked();
2427
2428        // This is for vote rewards only.
2429        let validator_rewards_paid = new_vote_balance_and_staked - old_vote_balance_and_staked;
2430        self.assert_validator_rewards_paid(validator_rewards_paid);
2431
2432        // verify that we didn't pay any more than we expected to
2433        assert!(validator_rewards >= validator_rewards_paid + total_stake_rewards_lamports);
2434
2435        info!(
2436            "distributed vote rewards: {} out of {}, remaining {}",
2437            validator_rewards_paid, validator_rewards, total_stake_rewards_lamports
2438        );
2439
2440        let (num_stake_accounts, num_vote_accounts) = {
2441            let stakes = self.stakes_cache.stakes();
2442            (
2443                stakes.stake_delegations().len(),
2444                stakes.vote_accounts().len(),
2445            )
2446        };
2447        self.capitalization
2448            .fetch_add(validator_rewards_paid, Relaxed);
2449
2450        let active_stake = if let Some(stake_history_entry) =
2451            self.stakes_cache.stakes().history().get(prev_epoch)
2452        {
2453            stake_history_entry.effective
2454        } else {
2455            0
2456        };
2457
2458        datapoint_info!(
2459            "epoch_rewards",
2460            ("slot", self.slot, i64),
2461            ("epoch", prev_epoch, i64),
2462            ("validator_rate", validator_rate, f64),
2463            ("foundation_rate", foundation_rate, f64),
2464            ("epoch_duration_in_years", prev_epoch_duration_in_years, f64),
2465            ("validator_rewards", validator_rewards_paid, i64),
2466            ("active_stake", active_stake, i64),
2467            ("pre_capitalization", capitalization, i64),
2468            ("post_capitalization", self.capitalization(), i64),
2469            ("num_stake_accounts", num_stake_accounts, i64),
2470            ("num_vote_accounts", num_vote_accounts, i64),
2471        );
2472
2473        CalculateRewardsAndDistributeVoteRewardsResult {
2474            total_rewards: validator_rewards_paid + total_stake_rewards_lamports,
2475            distributed_rewards: validator_rewards_paid,
2476            stake_rewards_by_partition,
2477        }
2478    }
2479
2480    fn assert_validator_rewards_paid(&self, validator_rewards_paid: u64) {
2481        assert_eq!(
2482            validator_rewards_paid,
2483            u64::try_from(
2484                self.rewards
2485                    .read()
2486                    .unwrap()
2487                    .par_iter()
2488                    .map(|(_address, reward_info)| {
2489                        match reward_info.reward_type {
2490                            RewardType::Voting | RewardType::Staking => reward_info.lamports,
2491                            _ => 0,
2492                        }
2493                    })
2494                    .sum::<i64>()
2495            )
2496            .unwrap()
2497        );
2498    }
2499
2500    // update rewards based on the previous epoch
2501    fn update_rewards_with_thread_pool(
2502        &mut self,
2503        prev_epoch: Epoch,
2504        reward_calc_tracer: Option<impl Fn(&RewardCalculationEvent) + Send + Sync>,
2505        thread_pool: &ThreadPool,
2506        metrics: &mut RewardsMetrics,
2507    ) {
2508        let capitalization = self.capitalization();
2509        let PrevEpochInflationRewards {
2510            validator_rewards,
2511            prev_epoch_duration_in_years,
2512            validator_rate,
2513            foundation_rate,
2514        } = self.calculate_previous_epoch_inflation_rewards(capitalization, prev_epoch);
2515
2516        let old_vote_balance_and_staked = self.stakes_cache.stakes().vote_balance_and_staked();
2517
2518        self.pay_validator_rewards_with_thread_pool(
2519            prev_epoch,
2520            validator_rewards,
2521            reward_calc_tracer,
2522            thread_pool,
2523            metrics,
2524        );
2525
2526        let new_vote_balance_and_staked = self.stakes_cache.stakes().vote_balance_and_staked();
2527        let validator_rewards_paid = new_vote_balance_and_staked - old_vote_balance_and_staked;
2528        assert_eq!(
2529            validator_rewards_paid,
2530            u64::try_from(
2531                self.rewards
2532                    .read()
2533                    .unwrap()
2534                    .iter()
2535                    .map(|(_address, reward_info)| {
2536                        match reward_info.reward_type {
2537                            RewardType::Voting | RewardType::Staking => reward_info.lamports,
2538                            _ => 0,
2539                        }
2540                    })
2541                    .sum::<i64>()
2542            )
2543            .unwrap()
2544        );
2545
2546        // verify that we didn't pay any more than we expected to
2547        assert!(validator_rewards >= validator_rewards_paid);
2548
2549        info!(
2550            "distributed inflation: {} (rounded from: {})",
2551            validator_rewards_paid, validator_rewards
2552        );
2553        let (num_stake_accounts, num_vote_accounts) = {
2554            let stakes = self.stakes_cache.stakes();
2555            (
2556                stakes.stake_delegations().len(),
2557                stakes.vote_accounts().len(),
2558            )
2559        };
2560        self.capitalization
2561            .fetch_add(validator_rewards_paid, Relaxed);
2562
2563        let active_stake = if let Some(stake_history_entry) =
2564            self.stakes_cache.stakes().history().get(prev_epoch)
2565        {
2566            stake_history_entry.effective
2567        } else {
2568            0
2569        };
2570
2571        datapoint_warn!(
2572            "epoch_rewards",
2573            ("slot", self.slot, i64),
2574            ("epoch", prev_epoch, i64),
2575            ("validator_rate", validator_rate, f64),
2576            ("foundation_rate", foundation_rate, f64),
2577            ("epoch_duration_in_years", prev_epoch_duration_in_years, f64),
2578            ("validator_rewards", validator_rewards_paid, i64),
2579            ("active_stake", active_stake, i64),
2580            ("pre_capitalization", capitalization, i64),
2581            ("post_capitalization", self.capitalization(), i64),
2582            ("num_stake_accounts", num_stake_accounts, i64),
2583            ("num_vote_accounts", num_vote_accounts, i64),
2584        );
2585    }
2586
2587    fn filter_stake_delegations<'a>(
2588        &self,
2589        stakes: &'a Stakes<StakeAccount<Delegation>>,
2590    ) -> Vec<(&'a Pubkey, &'a StakeAccount<Delegation>)> {
2591        if self
2592            .feature_set
2593            .is_active(&feature_set::stake_minimum_delegation_for_rewards::id())
2594        {
2595            let num_stake_delegations = stakes.stake_delegations().len();
2596            let min_stake_delegation =
2597                miraland_stake_program::get_minimum_delegation(&self.feature_set)
2598                    .max(LAMPORTS_PER_MLN);
2599
2600            let (stake_delegations, filter_timer) = measure!(stakes
2601                .stake_delegations()
2602                .iter()
2603                .filter(|(_stake_pubkey, cached_stake_account)| {
2604                    cached_stake_account.delegation().stake >= min_stake_delegation
2605                })
2606                .collect::<Vec<_>>());
2607
2608            datapoint_info!(
2609                "stake_account_filter_time",
2610                ("filter_time_us", filter_timer.as_us(), i64),
2611                ("num_stake_delegations_before", num_stake_delegations, i64),
2612                ("num_stake_delegations_after", stake_delegations.len(), i64)
2613            );
2614            stake_delegations
2615        } else {
2616            stakes.stake_delegations().iter().collect()
2617        }
2618    }
2619
2620    fn _load_vote_and_stake_accounts(
2621        &self,
2622        thread_pool: &ThreadPool,
2623        reward_calc_tracer: Option<impl RewardCalcTracer>,
2624    ) -> LoadVoteAndStakeAccountsResult {
2625        let stakes = self.stakes_cache.stakes();
2626        let stake_delegations = self.filter_stake_delegations(&stakes);
2627
2628        // Obtain all unique voter pubkeys from stake delegations.
2629        fn merge(mut acc: HashSet<Pubkey>, other: HashSet<Pubkey>) -> HashSet<Pubkey> {
2630            if acc.len() < other.len() {
2631                return merge(other, acc);
2632            }
2633            acc.extend(other);
2634            acc
2635        }
2636        let voter_pubkeys = thread_pool.install(|| {
2637            stake_delegations
2638                .par_iter()
2639                .fold(
2640                    HashSet::default,
2641                    |mut voter_pubkeys, (_stake_pubkey, stake_account)| {
2642                        let delegation = stake_account.delegation();
2643                        voter_pubkeys.insert(delegation.voter_pubkey);
2644                        voter_pubkeys
2645                    },
2646                )
2647                .reduce(HashSet::default, merge)
2648        });
2649        // Obtain vote-accounts for unique voter pubkeys.
2650        let cached_vote_accounts = stakes.vote_accounts();
2651        let miraland_vote_program: Pubkey = miraland_vote_program::id();
2652        let vote_accounts_cache_miss_count = AtomicUsize::default();
2653        let get_vote_account = |vote_pubkey: &Pubkey| -> Option<VoteAccount> {
2654            if let Some(vote_account) = cached_vote_accounts.get(vote_pubkey) {
2655                return Some(vote_account.clone());
2656            }
2657            // If accounts-db contains a valid vote account, then it should
2658            // already have been cached in cached_vote_accounts; so the code
2659            // below is only for sanity check, and can be removed once
2660            // vote_accounts_cache_miss_count is shown to be always zero.
2661            let account = self.get_account_with_fixed_root(vote_pubkey)?;
2662            if account.owner() == &miraland_vote_program
2663                && VoteState::deserialize(account.data()).is_ok()
2664            {
2665                vote_accounts_cache_miss_count.fetch_add(1, Relaxed);
2666            }
2667            VoteAccount::try_from(account).ok()
2668        };
2669        let invalid_vote_keys = DashMap::<Pubkey, InvalidCacheEntryReason>::new();
2670        let make_vote_delegations_entry = |vote_pubkey| {
2671            let Some(vote_account) = get_vote_account(&vote_pubkey) else {
2672                invalid_vote_keys.insert(vote_pubkey, InvalidCacheEntryReason::Missing);
2673                return None;
2674            };
2675            if vote_account.owner() != &miraland_vote_program {
2676                invalid_vote_keys.insert(vote_pubkey, InvalidCacheEntryReason::WrongOwner);
2677                return None;
2678            }
2679            let Ok(vote_state) = vote_account.vote_state().cloned() else {
2680                invalid_vote_keys.insert(vote_pubkey, InvalidCacheEntryReason::BadState);
2681                return None;
2682            };
2683            let vote_with_stake_delegations = VoteWithStakeDelegations {
2684                vote_state: Arc::new(vote_state),
2685                vote_account: AccountSharedData::from(vote_account),
2686                delegations: Vec::default(),
2687            };
2688            Some((vote_pubkey, vote_with_stake_delegations))
2689        };
2690        let vote_with_stake_delegations_map: DashMap<Pubkey, VoteWithStakeDelegations> =
2691            thread_pool.install(|| {
2692                voter_pubkeys
2693                    .into_par_iter()
2694                    .filter_map(make_vote_delegations_entry)
2695                    .collect()
2696            });
2697        // Join stake accounts with vote-accounts.
2698        let push_stake_delegation = |(stake_pubkey, stake_account): (&Pubkey, &StakeAccount<_>)| {
2699            let delegation = stake_account.delegation();
2700            let Some(mut vote_delegations) =
2701                vote_with_stake_delegations_map.get_mut(&delegation.voter_pubkey)
2702            else {
2703                return;
2704            };
2705            if let Some(reward_calc_tracer) = reward_calc_tracer.as_ref() {
2706                let delegation =
2707                    InflationPointCalculationEvent::Delegation(delegation, miraland_vote_program);
2708                let event = RewardCalculationEvent::Staking(stake_pubkey, &delegation);
2709                reward_calc_tracer(&event);
2710            }
2711            let stake_delegation = (*stake_pubkey, stake_account.clone());
2712            vote_delegations.delegations.push(stake_delegation);
2713        };
2714        thread_pool.install(|| {
2715            stake_delegations
2716                .into_par_iter()
2717                .for_each(push_stake_delegation);
2718        });
2719        LoadVoteAndStakeAccountsResult {
2720            vote_with_stake_delegations_map,
2721            invalid_vote_keys,
2722            vote_accounts_cache_miss_count: vote_accounts_cache_miss_count.into_inner(),
2723        }
2724    }
2725
2726    /// calculate and return some reward calc info to avoid recalculation across functions
2727    fn get_epoch_reward_calculate_param_info<'a>(
2728        &self,
2729        stakes: &'a Stakes<StakeAccount<Delegation>>,
2730    ) -> EpochRewardCalculateParamInfo<'a> {
2731        let stake_history = self.stakes_cache.stakes().history().clone();
2732
2733        let stake_delegations = self.filter_stake_delegations(stakes);
2734
2735        let cached_vote_accounts = stakes.vote_accounts();
2736
2737        EpochRewardCalculateParamInfo {
2738            stake_history,
2739            stake_delegations,
2740            cached_vote_accounts,
2741        }
2742    }
2743
2744    /// Calculate epoch reward and return vote and stake rewards.
2745    fn calculate_validator_rewards(
2746        &self,
2747        rewarded_epoch: Epoch,
2748        rewards: u64,
2749        reward_calc_tracer: Option<impl RewardCalcTracer>,
2750        thread_pool: &ThreadPool,
2751        metrics: &mut RewardsMetrics,
2752    ) -> Option<(VoteRewardsAccounts, StakeRewardCalculation)> {
2753        let stakes = self.stakes_cache.stakes();
2754        let reward_calculate_param = self.get_epoch_reward_calculate_param_info(&stakes);
2755
2756        self.calculate_reward_points_partitioned(
2757            &reward_calculate_param,
2758            rewards,
2759            thread_pool,
2760            metrics,
2761        )
2762        .map(|point_value| {
2763            self.calculate_stake_vote_rewards(
2764                &reward_calculate_param,
2765                rewarded_epoch,
2766                point_value,
2767                thread_pool,
2768                reward_calc_tracer,
2769                metrics,
2770            )
2771        })
2772    }
2773
2774    /// Load, calculate and payout epoch rewards for stake and vote accounts
2775    fn pay_validator_rewards_with_thread_pool(
2776        &mut self,
2777        rewarded_epoch: Epoch,
2778        rewards: u64,
2779        reward_calc_tracer: Option<impl RewardCalcTracer>,
2780        thread_pool: &ThreadPool,
2781        metrics: &mut RewardsMetrics,
2782    ) {
2783        let stake_history = self.stakes_cache.stakes().history().clone();
2784        let vote_with_stake_delegations_map =
2785            self.load_vote_and_stake_accounts(thread_pool, reward_calc_tracer.as_ref(), metrics);
2786
2787        let point_value = self.calculate_reward_points(
2788            &vote_with_stake_delegations_map,
2789            rewards,
2790            &stake_history,
2791            thread_pool,
2792            metrics,
2793        );
2794
2795        if let Some(point_value) = point_value {
2796            let (vote_account_rewards, stake_rewards) = self.redeem_rewards(
2797                vote_with_stake_delegations_map,
2798                rewarded_epoch,
2799                point_value,
2800                &stake_history,
2801                thread_pool,
2802                reward_calc_tracer.as_ref(),
2803                metrics,
2804            );
2805
2806            // this checking of an unactivated feature can be enabled in tests or with a validator by passing `--partitioned-epoch-rewards-compare-calculation`
2807            if self
2808                .partitioned_epoch_rewards_config()
2809                .test_compare_partitioned_epoch_rewards
2810            {
2811                // immutable `&self` to avoid side effects
2812                (self as &Bank).compare_with_partitioned_rewards(
2813                    &stake_rewards,
2814                    &vote_account_rewards,
2815                    rewarded_epoch,
2816                    thread_pool,
2817                    null_tracer(),
2818                );
2819            }
2820
2821            self.store_stake_accounts(thread_pool, &stake_rewards, metrics);
2822            let vote_rewards = self.store_vote_accounts(vote_account_rewards, metrics);
2823            self.update_reward_history(stake_rewards, vote_rewards);
2824        }
2825    }
2826
2827    /// compare the vote and stake accounts between the normal rewards calculation code
2828    /// and the partitioned rewards calculation code
2829    /// `stake_rewards_expected` and `vote_rewards_expected` are the results of the normal rewards calculation code
2830    /// This fn should have NO side effects.
2831    /// This fn is only called in tests or with a debug cli arg prior to partitioned rewards feature activation.
2832    fn compare_with_partitioned_rewards_results(
2833        stake_rewards_expected: &[StakeReward],
2834        vote_rewards_expected: &DashMap<Pubkey, VoteReward>,
2835        partitioned_rewards: PartitionedRewardsCalculation,
2836    ) {
2837        // put partitioned stake rewards in a hashmap
2838        let mut stake_rewards: HashMap<Pubkey, &StakeReward> = HashMap::default();
2839        partitioned_rewards
2840            .stake_rewards_by_partition
2841            .stake_rewards_by_partition
2842            .iter()
2843            .flatten()
2844            .for_each(|stake_reward| {
2845                stake_rewards.insert(stake_reward.stake_pubkey, stake_reward);
2846            });
2847
2848        // verify stake rewards match expected
2849        stake_rewards_expected.iter().for_each(|stake_reward| {
2850            let partitioned = stake_rewards.remove(&stake_reward.stake_pubkey).unwrap();
2851            assert_eq!(partitioned, stake_reward);
2852        });
2853        assert!(stake_rewards.is_empty(), "{stake_rewards:?}");
2854
2855        let mut vote_rewards: HashMap<Pubkey, (RewardInfo, AccountSharedData)> = HashMap::default();
2856        partitioned_rewards
2857            .vote_account_rewards
2858            .accounts_to_store
2859            .iter()
2860            .enumerate()
2861            .for_each(|(i, account)| {
2862                if let Some(account) = account {
2863                    let reward = &partitioned_rewards.vote_account_rewards.rewards[i];
2864                    vote_rewards.insert(reward.0, (reward.1, account.clone()));
2865                }
2866            });
2867
2868        // verify vote rewards match expected
2869        vote_rewards_expected.iter().for_each(|entry| {
2870            if entry.value().vote_needs_store {
2871                let partitioned = vote_rewards.remove(entry.key()).unwrap();
2872                let mut to_store_partitioned = partitioned.1.clone();
2873                to_store_partitioned.set_lamports(partitioned.0.post_balance);
2874                let mut to_store_normal = entry.value().vote_account.clone();
2875                _ = to_store_normal.checked_add_lamports(entry.value().vote_rewards);
2876                assert_eq!(to_store_partitioned, to_store_normal, "{:?}", entry.key());
2877            }
2878        });
2879        assert!(vote_rewards.is_empty(), "{vote_rewards:?}");
2880        info!(
2881            "verified partitioned rewards calculation matching: {}, {}",
2882            partitioned_rewards
2883                .stake_rewards_by_partition
2884                .stake_rewards_by_partition
2885                .iter()
2886                .map(|rewards| rewards.len())
2887                .sum::<usize>(),
2888            partitioned_rewards
2889                .vote_account_rewards
2890                .accounts_to_store
2891                .len()
2892        );
2893    }
2894
2895    /// compare the vote and stake accounts between the normal rewards calculation code
2896    /// and the partitioned rewards calculation code
2897    /// `stake_rewards_expected` and `vote_rewards_expected` are the results of the normal rewards calculation code
2898    /// This fn should have NO side effects.
2899    fn compare_with_partitioned_rewards(
2900        &self,
2901        stake_rewards_expected: &[StakeReward],
2902        vote_rewards_expected: &DashMap<Pubkey, VoteReward>,
2903        rewarded_epoch: Epoch,
2904        thread_pool: &ThreadPool,
2905        reward_calc_tracer: Option<impl RewardCalcTracer>,
2906    ) {
2907        let partitioned_rewards = self.calculate_rewards_for_partitioning(
2908            rewarded_epoch,
2909            reward_calc_tracer,
2910            thread_pool,
2911            &mut RewardsMetrics::default(),
2912        );
2913        Self::compare_with_partitioned_rewards_results(
2914            stake_rewards_expected,
2915            vote_rewards_expected,
2916            partitioned_rewards,
2917        );
2918    }
2919
2920    fn load_vote_and_stake_accounts(
2921        &mut self,
2922        thread_pool: &ThreadPool,
2923        reward_calc_tracer: Option<impl RewardCalcTracer>,
2924        metrics: &mut RewardsMetrics,
2925    ) -> VoteWithStakeDelegationsMap {
2926        let (
2927            LoadVoteAndStakeAccountsResult {
2928                vote_with_stake_delegations_map,
2929                invalid_vote_keys,
2930                vote_accounts_cache_miss_count,
2931            },
2932            measure,
2933        ) = measure!({
2934            self._load_vote_and_stake_accounts(thread_pool, reward_calc_tracer.as_ref())
2935        });
2936        metrics
2937            .load_vote_and_stake_accounts_us
2938            .fetch_add(measure.as_us(), Relaxed);
2939        metrics.vote_accounts_cache_miss_count += vote_accounts_cache_miss_count;
2940        self.stakes_cache
2941            .handle_invalid_keys(invalid_vote_keys, self.slot());
2942        vote_with_stake_delegations_map
2943    }
2944
2945    /// Calculates epoch reward points from stake/vote accounts.
2946    /// Returns reward lamports and points for the epoch or none if points == 0.
2947    fn calculate_reward_points_partitioned(
2948        &self,
2949        reward_calculate_params: &EpochRewardCalculateParamInfo,
2950        rewards: u64,
2951        thread_pool: &ThreadPool,
2952        metrics: &RewardsMetrics,
2953    ) -> Option<PointValue> {
2954        let EpochRewardCalculateParamInfo {
2955            stake_history,
2956            stake_delegations,
2957            cached_vote_accounts,
2958        } = reward_calculate_params;
2959
2960        let miraland_vote_program: Pubkey = miraland_vote_program::id();
2961
2962        let get_vote_account = |vote_pubkey: &Pubkey| -> Option<VoteAccount> {
2963            if let Some(vote_account) = cached_vote_accounts.get(vote_pubkey) {
2964                return Some(vote_account.clone());
2965            }
2966            // If accounts-db contains a valid vote account, then it should
2967            // already have been cached in cached_vote_accounts; so the code
2968            // below is only for sanity checking, and can be removed once
2969            // the cache is deemed to be reliable.
2970            let account = self.get_account_with_fixed_root(vote_pubkey)?;
2971            VoteAccount::try_from(account).ok()
2972        };
2973
2974        let new_warmup_cooldown_rate_epoch = self.new_warmup_cooldown_rate_epoch();
2975        let (points, measure_us) = measure_us!(thread_pool.install(|| {
2976            stake_delegations
2977                .par_iter()
2978                .map(|(_stake_pubkey, stake_account)| {
2979                    let delegation = stake_account.delegation();
2980                    let vote_pubkey = delegation.voter_pubkey;
2981
2982                    let Some(vote_account) = get_vote_account(&vote_pubkey) else {
2983                        return 0;
2984                    };
2985                    if vote_account.owner() != &miraland_vote_program {
2986                        return 0;
2987                    }
2988                    let Ok(vote_state) = vote_account.vote_state() else {
2989                        return 0;
2990                    };
2991
2992                    stake_state::calculate_points(
2993                        stake_account.stake_state(),
2994                        vote_state,
2995                        stake_history,
2996                        new_warmup_cooldown_rate_epoch,
2997                    )
2998                    .unwrap_or(0)
2999                })
3000                .sum::<u128>()
3001        }));
3002        metrics.calculate_points_us.fetch_add(measure_us, Relaxed);
3003
3004        (points > 0).then_some(PointValue { rewards, points })
3005    }
3006
3007    fn calculate_reward_points(
3008        &self,
3009        vote_with_stake_delegations_map: &VoteWithStakeDelegationsMap,
3010        rewards: u64,
3011        stake_history: &StakeHistory,
3012        thread_pool: &ThreadPool,
3013        metrics: &RewardsMetrics,
3014    ) -> Option<PointValue> {
3015        let new_warmup_cooldown_rate_epoch = self.new_warmup_cooldown_rate_epoch();
3016        let (points, measure) = measure!(thread_pool.install(|| {
3017            vote_with_stake_delegations_map
3018                .par_iter()
3019                .map(|entry| {
3020                    let VoteWithStakeDelegations {
3021                        vote_state,
3022                        delegations,
3023                        ..
3024                    } = entry.value();
3025
3026                    delegations
3027                        .par_iter()
3028                        .map(|(_stake_pubkey, stake_account)| {
3029                            stake_state::calculate_points(
3030                                stake_account.stake_state(),
3031                                vote_state,
3032                                stake_history,
3033                                new_warmup_cooldown_rate_epoch,
3034                            )
3035                            .unwrap_or(0)
3036                        })
3037                        .sum::<u128>()
3038                })
3039                .sum()
3040        }));
3041        metrics
3042            .calculate_points_us
3043            .fetch_add(measure.as_us(), Relaxed);
3044
3045        (points > 0).then_some(PointValue { rewards, points })
3046    }
3047
3048    /// Calculates epoch rewards for stake/vote accounts
3049    /// Returns vote rewards, stake rewards, and the sum of all stake rewards in lamports
3050    fn calculate_stake_vote_rewards(
3051        &self,
3052        reward_calculate_params: &EpochRewardCalculateParamInfo,
3053        rewarded_epoch: Epoch,
3054        point_value: PointValue,
3055        thread_pool: &ThreadPool,
3056        reward_calc_tracer: Option<impl RewardCalcTracer>,
3057        metrics: &mut RewardsMetrics,
3058    ) -> (VoteRewardsAccounts, StakeRewardCalculation) {
3059        let EpochRewardCalculateParamInfo {
3060            stake_history,
3061            stake_delegations,
3062            cached_vote_accounts,
3063        } = reward_calculate_params;
3064
3065        let miraland_vote_program: Pubkey = miraland_vote_program::id();
3066
3067        let get_vote_account = |vote_pubkey: &Pubkey| -> Option<VoteAccount> {
3068            if let Some(vote_account) = cached_vote_accounts.get(vote_pubkey) {
3069                return Some(vote_account.clone());
3070            }
3071            // If accounts-db contains a valid vote account, then it should
3072            // already have been cached in cached_vote_accounts; so the code
3073            // below is only for sanity checking, and can be removed once
3074            // the cache is deemed to be reliable.
3075            let account = self.get_account_with_fixed_root(vote_pubkey)?;
3076            VoteAccount::try_from(account).ok()
3077        };
3078
3079        let new_warmup_cooldown_rate_epoch = self.new_warmup_cooldown_rate_epoch();
3080        let vote_account_rewards: VoteRewards = DashMap::new();
3081        let total_stake_rewards = AtomicU64::default();
3082        let (stake_rewards, measure_stake_rewards_us) = measure_us!(thread_pool.install(|| {
3083            stake_delegations
3084                .par_iter()
3085                .filter_map(|(stake_pubkey, stake_account)| {
3086                    // curry closure to add the contextual stake_pubkey
3087                    let reward_calc_tracer = reward_calc_tracer.as_ref().map(|outer| {
3088                        // inner
3089                        move |inner_event: &_| {
3090                            outer(&RewardCalculationEvent::Staking(stake_pubkey, inner_event))
3091                        }
3092                    });
3093
3094                    let stake_pubkey = **stake_pubkey;
3095                    let stake_account = (*stake_account).to_owned();
3096
3097                    let delegation = stake_account.delegation();
3098                    let (mut stake_account, stake_state) =
3099                        <(AccountSharedData, StakeStateV2)>::from(stake_account);
3100                    let vote_pubkey = delegation.voter_pubkey;
3101                    let vote_account = get_vote_account(&vote_pubkey)?;
3102                    if vote_account.owner() != &miraland_vote_program {
3103                        return None;
3104                    }
3105                    let vote_state = vote_account.vote_state().cloned().ok()?;
3106
3107                    let pre_lamport = stake_account.lamports();
3108
3109                    let redeemed = stake_state::redeem_rewards(
3110                        rewarded_epoch,
3111                        stake_state,
3112                        &mut stake_account,
3113                        &vote_state,
3114                        &point_value,
3115                        stake_history,
3116                        reward_calc_tracer.as_ref(),
3117                        new_warmup_cooldown_rate_epoch,
3118                    );
3119
3120                    let post_lamport = stake_account.lamports();
3121
3122                    if let Ok((stakers_reward, voters_reward)) = redeemed {
3123                        debug!(
3124                            "calculated reward: {} {} {} {}",
3125                            stake_pubkey, pre_lamport, post_lamport, stakers_reward
3126                        );
3127
3128                        // track voter rewards
3129                        let mut voters_reward_entry = vote_account_rewards
3130                            .entry(vote_pubkey)
3131                            .or_insert(VoteReward {
3132                                vote_account: vote_account.into(),
3133                                commission: vote_state.commission,
3134                                vote_rewards: 0,
3135                                vote_needs_store: false,
3136                            });
3137
3138                        voters_reward_entry.vote_needs_store = true;
3139                        voters_reward_entry.vote_rewards = voters_reward_entry
3140                            .vote_rewards
3141                            .saturating_add(voters_reward);
3142
3143                        let post_balance = stake_account.lamports();
3144                        total_stake_rewards.fetch_add(stakers_reward, Relaxed);
3145                        return Some(StakeReward {
3146                            stake_pubkey,
3147                            stake_reward_info: RewardInfo {
3148                                reward_type: RewardType::Staking,
3149                                lamports: i64::try_from(stakers_reward).unwrap(),
3150                                post_balance,
3151                                commission: Some(vote_state.commission),
3152                            },
3153                            stake_account,
3154                        });
3155                    } else {
3156                        debug!(
3157                            "stake_state::redeem_rewards() failed for {}: {:?}",
3158                            stake_pubkey, redeemed
3159                        );
3160                    }
3161                    None
3162                })
3163                .collect()
3164        }));
3165        let (vote_rewards, measure_vote_rewards_us) =
3166            measure_us!(Self::calc_vote_accounts_to_store(vote_account_rewards));
3167
3168        metrics.redeem_rewards_us += measure_stake_rewards_us + measure_vote_rewards_us;
3169
3170        (
3171            vote_rewards,
3172            StakeRewardCalculation {
3173                stake_rewards,
3174                total_stake_rewards_lamports: total_stake_rewards.load(Relaxed),
3175            },
3176        )
3177    }
3178
3179    fn redeem_rewards(
3180        &self,
3181        vote_with_stake_delegations_map: DashMap<Pubkey, VoteWithStakeDelegations>,
3182        rewarded_epoch: Epoch,
3183        point_value: PointValue,
3184        stake_history: &StakeHistory,
3185        thread_pool: &ThreadPool,
3186        reward_calc_tracer: Option<impl RewardCalcTracer>,
3187        metrics: &mut RewardsMetrics,
3188    ) -> (VoteRewards, StakeRewards) {
3189        let new_warmup_cooldown_rate_epoch = self.new_warmup_cooldown_rate_epoch();
3190        let vote_account_rewards: VoteRewards =
3191            DashMap::with_capacity(vote_with_stake_delegations_map.len());
3192        let stake_delegation_iterator = vote_with_stake_delegations_map.into_par_iter().flat_map(
3193            |(
3194                vote_pubkey,
3195                VoteWithStakeDelegations {
3196                    vote_state,
3197                    vote_account,
3198                    delegations,
3199                },
3200            )| {
3201                vote_account_rewards.insert(
3202                    vote_pubkey,
3203                    VoteReward {
3204                        vote_account,
3205                        commission: vote_state.commission,
3206                        vote_rewards: 0,
3207                        vote_needs_store: false,
3208                    },
3209                );
3210                delegations
3211                    .into_par_iter()
3212                    .map(move |delegation| (vote_pubkey, Arc::clone(&vote_state), delegation))
3213            },
3214        );
3215
3216        let (stake_rewards, measure) = measure!(thread_pool.install(|| {
3217            stake_delegation_iterator
3218                .filter_map(|(vote_pubkey, vote_state, (stake_pubkey, stake_account))| {
3219                    // curry closure to add the contextual stake_pubkey
3220                    let reward_calc_tracer = reward_calc_tracer.as_ref().map(|outer| {
3221                        // inner
3222                        move |inner_event: &_| {
3223                            outer(&RewardCalculationEvent::Staking(&stake_pubkey, inner_event))
3224                        }
3225                    });
3226                    let (mut stake_account, stake_state) =
3227                        <(AccountSharedData, StakeStateV2)>::from(stake_account);
3228                    let redeemed = stake_state::redeem_rewards(
3229                        rewarded_epoch,
3230                        stake_state,
3231                        &mut stake_account,
3232                        &vote_state,
3233                        &point_value,
3234                        stake_history,
3235                        reward_calc_tracer.as_ref(),
3236                        new_warmup_cooldown_rate_epoch,
3237                    );
3238                    if let Ok((stakers_reward, voters_reward)) = redeemed {
3239                        // track voter rewards
3240                        if let Some(VoteReward {
3241                            vote_account: _,
3242                            commission: _,
3243                            vote_rewards: vote_rewards_sum,
3244                            vote_needs_store,
3245                        }) = vote_account_rewards.get_mut(&vote_pubkey).as_deref_mut()
3246                        {
3247                            *vote_needs_store = true;
3248                            *vote_rewards_sum = vote_rewards_sum.saturating_add(voters_reward);
3249                        }
3250
3251                        let post_balance = stake_account.lamports();
3252                        return Some(StakeReward {
3253                            stake_pubkey,
3254                            stake_reward_info: RewardInfo {
3255                                reward_type: RewardType::Staking,
3256                                lamports: i64::try_from(stakers_reward).unwrap(),
3257                                post_balance,
3258                                commission: Some(vote_state.commission),
3259                            },
3260                            stake_account,
3261                        });
3262                    } else {
3263                        debug!(
3264                            "stake_state::redeem_rewards() failed for {}: {:?}",
3265                            stake_pubkey, redeemed
3266                        );
3267                    }
3268                    None
3269                })
3270                .collect()
3271        }));
3272        metrics.redeem_rewards_us += measure.as_us();
3273        (vote_account_rewards, stake_rewards)
3274    }
3275
3276    fn store_stake_accounts(
3277        &self,
3278        thread_pool: &ThreadPool,
3279        stake_rewards: &[StakeReward],
3280        metrics: &RewardsMetrics,
3281    ) {
3282        // store stake account even if stake_reward is 0
3283        // because credits observed has changed
3284        let now = Instant::now();
3285        let slot = self.slot();
3286        self.stakes_cache.update_stake_accounts(
3287            thread_pool,
3288            stake_rewards,
3289            self.new_warmup_cooldown_rate_epoch(),
3290        );
3291        assert!(!self.freeze_started());
3292        thread_pool.install(|| {
3293            stake_rewards
3294                .par_chunks(512)
3295                .for_each(|chunk| self.rc.accounts.store_accounts_cached((slot, chunk)))
3296        });
3297        metrics
3298            .store_stake_accounts_us
3299            .fetch_add(now.elapsed().as_micros() as u64, Relaxed);
3300    }
3301
3302    /// store stake rewards in partition
3303    /// return the sum of all the stored rewards
3304    ///
3305    /// Note: even if staker's reward is 0, the stake account still needs to be stored because
3306    /// credits observed has changed
3307    fn store_stake_accounts_in_partition(&self, stake_rewards: &[StakeReward]) -> u64 {
3308        // Verify that stake account `lamports + reward_amount` matches what we have in the
3309        // rewarded account. This code will have a performance hit - an extra load and compare of
3310        // the stake accounts. This is for debugging. Once we are confident, we can disable the
3311        // check.
3312        const VERIFY_REWARD_LAMPORT: bool = true;
3313
3314        if VERIFY_REWARD_LAMPORT {
3315            for r in stake_rewards {
3316                let stake_pubkey = r.stake_pubkey;
3317                let reward_amount = r.get_stake_reward();
3318                let post_stake_account = &r.stake_account;
3319                if let Some(curr_stake_account) = self.get_account_with_fixed_root(&stake_pubkey) {
3320                    let pre_lamport = curr_stake_account.lamports();
3321                    let post_lamport = post_stake_account.lamports();
3322                    assert_eq!(pre_lamport + u64::try_from(reward_amount).unwrap(), post_lamport,
3323                               "stake account balance has changed since the reward calculation! account: {stake_pubkey}, pre balance: {pre_lamport}, post balance: {post_lamport}, rewards: {reward_amount}");
3324                }
3325            }
3326        }
3327
3328        self.store_accounts((self.slot(), stake_rewards));
3329        stake_rewards
3330            .iter()
3331            .map(|stake_reward| stake_reward.stake_reward_info.lamports)
3332            .sum::<i64>() as u64
3333    }
3334
3335    fn store_vote_accounts_partitioned(
3336        &self,
3337        vote_account_rewards: VoteRewardsAccounts,
3338        metrics: &RewardsMetrics,
3339    ) -> Vec<(Pubkey, RewardInfo)> {
3340        let (_, measure_us) = measure_us!({
3341            // reformat data to make it not sparse.
3342            // `StorableAccounts` does not efficiently handle sparse data.
3343            // Not all entries in `vote_account_rewards.accounts_to_store` have a Some(account) to store.
3344            let to_store = vote_account_rewards
3345                .accounts_to_store
3346                .iter()
3347                .filter_map(|account| account.as_ref())
3348                .enumerate()
3349                .map(|(i, account)| (&vote_account_rewards.rewards[i].0, account))
3350                .collect::<Vec<_>>();
3351            self.store_accounts((self.slot(), &to_store[..]));
3352        });
3353
3354        metrics
3355            .store_vote_accounts_us
3356            .fetch_add(measure_us, Relaxed);
3357
3358        vote_account_rewards.rewards
3359    }
3360
3361    fn store_vote_accounts(
3362        &self,
3363        vote_account_rewards: VoteRewards,
3364        metrics: &RewardsMetrics,
3365    ) -> Vec<(Pubkey, RewardInfo)> {
3366        let (vote_rewards, measure) = measure!(vote_account_rewards
3367            .into_iter()
3368            .filter_map(
3369                |(
3370                    vote_pubkey,
3371                    VoteReward {
3372                        mut vote_account,
3373                        commission,
3374                        vote_rewards,
3375                        vote_needs_store,
3376                    },
3377                )| {
3378                    if let Err(err) = vote_account.checked_add_lamports(vote_rewards) {
3379                        debug!("reward redemption failed for {}: {:?}", vote_pubkey, err);
3380                        return None;
3381                    }
3382
3383                    if vote_needs_store {
3384                        self.store_account(&vote_pubkey, &vote_account);
3385                    }
3386
3387                    Some((
3388                        vote_pubkey,
3389                        RewardInfo {
3390                            reward_type: RewardType::Voting,
3391                            lamports: vote_rewards as i64,
3392                            post_balance: vote_account.lamports(),
3393                            commission: Some(commission),
3394                        },
3395                    ))
3396                },
3397            )
3398            .collect::<Vec<_>>());
3399
3400        metrics
3401            .store_vote_accounts_us
3402            .fetch_add(measure.as_us(), Relaxed);
3403        vote_rewards
3404    }
3405
3406    /// return reward info for each vote account
3407    /// return account data for each vote account that needs to be stored
3408    /// This return value is a little awkward at the moment so that downstream existing code in the non-partitioned rewards code path can be re-used without duplication or modification.
3409    /// This function is copied from the existing code path's `store_vote_accounts`.
3410    /// The primary differences:
3411    /// - we want this fn to have no side effects (such as actually storing vote accounts) so that we
3412    ///   can compare the expected results with the current code path
3413    /// - we want to be able to batch store the vote accounts later for improved performance/cache updating
3414    fn calc_vote_accounts_to_store(
3415        vote_account_rewards: DashMap<Pubkey, VoteReward>,
3416    ) -> VoteRewardsAccounts {
3417        let len = vote_account_rewards.len();
3418        let mut result = VoteRewardsAccounts {
3419            rewards: Vec::with_capacity(len),
3420            accounts_to_store: Vec::with_capacity(len),
3421        };
3422        vote_account_rewards.into_iter().for_each(
3423            |(
3424                vote_pubkey,
3425                VoteReward {
3426                    mut vote_account,
3427                    commission,
3428                    vote_rewards,
3429                    vote_needs_store,
3430                },
3431            )| {
3432                if let Err(err) = vote_account.checked_add_lamports(vote_rewards) {
3433                    debug!("reward redemption failed for {}: {:?}", vote_pubkey, err);
3434                    return;
3435                }
3436
3437                result.rewards.push((
3438                    vote_pubkey,
3439                    RewardInfo {
3440                        reward_type: RewardType::Voting,
3441                        lamports: vote_rewards as i64,
3442                        post_balance: vote_account.lamports(),
3443                        commission: Some(commission),
3444                    },
3445                ));
3446                result
3447                    .accounts_to_store
3448                    .push(vote_needs_store.then_some(vote_account));
3449            },
3450        );
3451        result
3452    }
3453
3454    fn update_reward_history(
3455        &self,
3456        stake_rewards: StakeRewards,
3457        mut vote_rewards: Vec<(Pubkey, RewardInfo)>,
3458    ) {
3459        let additional_reserve = stake_rewards.len() + vote_rewards.len();
3460        let mut rewards = self.rewards.write().unwrap();
3461        rewards.reserve(additional_reserve);
3462        rewards.append(&mut vote_rewards);
3463        stake_rewards
3464            .into_iter()
3465            .filter(|x| x.get_stake_reward() > 0)
3466            .for_each(|x| rewards.push((x.stake_pubkey, x.stake_reward_info)));
3467    }
3468
3469    /// insert non-zero stake rewards to self.rewards
3470    /// Return the number of rewards inserted
3471    fn update_reward_history_in_partition(&self, stake_rewards: &[StakeReward]) -> usize {
3472        let mut rewards = self.rewards.write().unwrap();
3473        rewards.reserve(stake_rewards.len());
3474        let initial_len = rewards.len();
3475        stake_rewards
3476            .iter()
3477            .filter(|x| x.get_stake_reward() > 0)
3478            .for_each(|x| rewards.push((x.stake_pubkey, x.stake_reward_info)));
3479        rewards.len().saturating_sub(initial_len)
3480    }
3481
3482    /// Process reward credits for a partition of rewards
3483    /// Store the rewards to AccountsDB, update reward history record and total capitalization.
3484    fn distribute_epoch_rewards_in_partition(
3485        &self,
3486        all_stake_rewards: &[Vec<StakeReward>],
3487        partition_index: u64,
3488    ) {
3489        let pre_capitalization = self.capitalization();
3490        let this_partition_stake_rewards = &all_stake_rewards[partition_index as usize];
3491
3492        let (total_rewards_in_lamports, store_stake_accounts_us) =
3493            measure_us!(self.store_stake_accounts_in_partition(this_partition_stake_rewards));
3494
3495        // increase total capitalization by the distributed rewards
3496        self.capitalization
3497            .fetch_add(total_rewards_in_lamports, Relaxed);
3498
3499        // decrease distributed capital from epoch rewards sysvar
3500        self.update_epoch_rewards_sysvar(total_rewards_in_lamports);
3501
3502        // update reward history for this partitioned distribution
3503        self.update_reward_history_in_partition(this_partition_stake_rewards);
3504
3505        let metrics = RewardsStoreMetrics {
3506            pre_capitalization,
3507            post_capitalization: self.capitalization(),
3508            total_stake_accounts_count: all_stake_rewards.len(),
3509            partition_index,
3510            store_stake_accounts_us,
3511            store_stake_accounts_count: this_partition_stake_rewards.len(),
3512            distributed_rewards: total_rewards_in_lamports,
3513        };
3514
3515        report_partitioned_reward_metrics(self, metrics);
3516    }
3517
3518    /// true if it is ok to run partitioned rewards code.
3519    /// This means the feature is activated or certain testing situations.
3520    fn is_partitioned_rewards_code_enabled(&self) -> bool {
3521        self.is_partitioned_rewards_feature_enabled()
3522            || self
3523                .partitioned_epoch_rewards_config()
3524                .test_enable_partitioned_rewards
3525    }
3526
3527    /// Helper fn to log epoch_rewards sysvar
3528    fn log_epoch_rewards_sysvar(&self, prefix: &str) {
3529        if let Some(account) = self.get_account(&sysvar::epoch_rewards::id()) {
3530            let epoch_rewards: sysvar::epoch_rewards::EpochRewards =
3531                from_account(&account).unwrap();
3532            info!(
3533                "{prefix} epoch_rewards sysvar: {:?}",
3534                (account.lamports(), epoch_rewards)
3535            );
3536        } else {
3537            info!("{prefix} epoch_rewards sysvar: none");
3538        }
3539    }
3540
3541    /// Create EpochRewards sysvar with calculated rewards
3542    fn create_epoch_rewards_sysvar(
3543        &self,
3544        total_rewards: u64,
3545        distributed_rewards: u64,
3546        distribution_complete_block_height: u64,
3547    ) {
3548        assert!(self.is_partitioned_rewards_code_enabled());
3549
3550        let epoch_rewards = sysvar::epoch_rewards::EpochRewards {
3551            total_rewards,
3552            distributed_rewards,
3553            distribution_complete_block_height,
3554        };
3555
3556        self.update_sysvar_account(&sysvar::epoch_rewards::id(), |account| {
3557            let mut inherited_account_fields =
3558                self.inherit_specially_retained_account_fields(account);
3559
3560            assert!(total_rewards >= distributed_rewards);
3561            // set the account lamports to the undistributed rewards
3562            inherited_account_fields.0 = total_rewards - distributed_rewards;
3563            create_account(&epoch_rewards, inherited_account_fields)
3564        });
3565
3566        self.log_epoch_rewards_sysvar("create");
3567    }
3568
3569    /// Update EpochRewards sysvar with distributed rewards
3570    fn update_epoch_rewards_sysvar(&self, distributed: u64) {
3571        assert!(self.is_partitioned_rewards_code_enabled());
3572
3573        let mut epoch_rewards: sysvar::epoch_rewards::EpochRewards =
3574            from_account(&self.get_account(&sysvar::epoch_rewards::id()).unwrap()).unwrap();
3575        epoch_rewards.distribute(distributed);
3576
3577        self.update_sysvar_account(&sysvar::epoch_rewards::id(), |account| {
3578            let mut inherited_account_fields =
3579                self.inherit_specially_retained_account_fields(account);
3580
3581            let lamports = inherited_account_fields.0;
3582            assert!(lamports >= distributed);
3583            inherited_account_fields.0 = lamports - distributed;
3584            create_account(&epoch_rewards, inherited_account_fields)
3585        });
3586
3587        self.log_epoch_rewards_sysvar("update");
3588    }
3589
3590    fn update_recent_blockhashes_locked(&self, locked_blockhash_queue: &BlockhashQueue) {
3591        #[allow(deprecated)]
3592        self.update_sysvar_account(&sysvar::recent_blockhashes::id(), |account| {
3593            let recent_blockhash_iter = locked_blockhash_queue.get_recent_blockhashes();
3594            recent_blockhashes_account::create_account_with_data_and_fields(
3595                recent_blockhash_iter,
3596                self.inherit_specially_retained_account_fields(account),
3597            )
3598        });
3599    }
3600
3601    pub fn update_recent_blockhashes(&self) {
3602        let blockhash_queue = self.blockhash_queue.read().unwrap();
3603        self.update_recent_blockhashes_locked(&blockhash_queue);
3604    }
3605
3606    fn get_timestamp_estimate(
3607        &self,
3608        max_allowable_drift: MaxAllowableDrift,
3609        epoch_start_timestamp: Option<(Slot, UnixTimestamp)>,
3610    ) -> Option<UnixTimestamp> {
3611        let mut get_timestamp_estimate_time = Measure::start("get_timestamp_estimate");
3612        let slots_per_epoch = self.epoch_schedule().slots_per_epoch;
3613        let vote_accounts = self.vote_accounts();
3614        let recent_timestamps = vote_accounts.iter().filter_map(|(pubkey, (_, account))| {
3615            let vote_state = account.vote_state();
3616            let vote_state = vote_state.as_ref().ok()?;
3617            let slot_delta = self.slot().checked_sub(vote_state.last_timestamp.slot)?;
3618            (slot_delta <= slots_per_epoch).then_some({
3619                (
3620                    *pubkey,
3621                    (
3622                        vote_state.last_timestamp.slot,
3623                        vote_state.last_timestamp.timestamp,
3624                    ),
3625                )
3626            })
3627        });
3628        let slot_duration = Duration::from_nanos(self.ns_per_slot as u64);
3629        let epoch = self.epoch_schedule().get_epoch(self.slot());
3630        let stakes = self.epoch_vote_accounts(epoch)?;
3631        let stake_weighted_timestamp = calculate_stake_weighted_timestamp(
3632            recent_timestamps,
3633            stakes,
3634            self.slot(),
3635            slot_duration,
3636            epoch_start_timestamp,
3637            max_allowable_drift,
3638            self.feature_set
3639                .is_active(&feature_set::warp_timestamp_again::id()),
3640        );
3641        get_timestamp_estimate_time.stop();
3642        datapoint_info!(
3643            "bank-timestamp",
3644            (
3645                "get_timestamp_estimate_us",
3646                get_timestamp_estimate_time.as_us(),
3647                i64
3648            ),
3649        );
3650        stake_weighted_timestamp
3651    }
3652
3653    pub fn rehash(&self) {
3654        let mut hash = self.hash.write().unwrap();
3655        let new = self.hash_internal_state();
3656        if new != *hash {
3657            warn!("Updating bank hash to {}", new);
3658            *hash = new;
3659        }
3660    }
3661
3662    pub fn freeze(&self) {
3663        // This lock prevents any new commits from BankingStage
3664        // `Consumer::execute_and_commit_transactions_locked()` from
3665        // coming in after the last tick is observed. This is because in
3666        // BankingStage, any transaction successfully recorded in
3667        // `record_transactions()` is recorded after this `hash` lock
3668        // is grabbed. At the time of the successful record,
3669        // this means the PoH has not yet reached the last tick,
3670        // so this means freeze() hasn't been called yet. And because
3671        // BankingStage doesn't release this hash lock until both
3672        // record and commit are finished, those transactions will be
3673        // committed before this write lock can be obtained here.
3674        let mut hash = self.hash.write().unwrap();
3675        if *hash == Hash::default() {
3676            // finish up any deferred changes to account state
3677            self.collect_rent_eagerly();
3678            self.distribute_transaction_fees();
3679            self.distribute_rent_fees();
3680            self.update_slot_history();
3681            self.run_incinerator();
3682
3683            // freeze is a one-way trip, idempotent
3684            self.freeze_started.store(true, Relaxed);
3685            *hash = self.hash_internal_state();
3686            self.rc.accounts.accounts_db.mark_slot_frozen(self.slot());
3687        }
3688    }
3689
3690    // dangerous; don't use this; this is only needed for ledger-tool's special command
3691    pub fn unfreeze_for_ledger_tool(&self) {
3692        self.freeze_started.store(false, Relaxed);
3693    }
3694
3695    pub fn epoch_schedule(&self) -> &EpochSchedule {
3696        &self.epoch_schedule
3697    }
3698
3699    /// squash the parent's state up into this Bank,
3700    ///   this Bank becomes a root
3701    /// Note that this function is not thread-safe. If it is called concurrently on the same bank
3702    /// by multiple threads, the end result could be inconsistent.
3703    /// Calling code does not currently call this concurrently.
3704    pub fn squash(&self) -> SquashTiming {
3705        self.freeze();
3706
3707        //this bank and all its parents are now on the rooted path
3708        let mut roots = vec![self.slot()];
3709        roots.append(&mut self.parents().iter().map(|p| p.slot()).collect());
3710
3711        let mut total_index_us = 0;
3712        let mut total_cache_us = 0;
3713        let mut total_store_us = 0;
3714
3715        let mut squash_accounts_time = Measure::start("squash_accounts_time");
3716        for slot in roots.iter().rev() {
3717            // root forks cannot be purged
3718            let add_root_timing = self.rc.accounts.add_root(*slot);
3719            total_index_us += add_root_timing.index_us;
3720            total_cache_us += add_root_timing.cache_us;
3721            total_store_us += add_root_timing.store_us;
3722        }
3723        squash_accounts_time.stop();
3724
3725        *self.rc.parent.write().unwrap() = None;
3726
3727        let mut squash_cache_time = Measure::start("squash_cache_time");
3728        roots
3729            .iter()
3730            .for_each(|slot| self.status_cache.write().unwrap().add_root(*slot));
3731        squash_cache_time.stop();
3732
3733        SquashTiming {
3734            squash_accounts_ms: squash_accounts_time.as_ms(),
3735            squash_accounts_index_ms: total_index_us / 1000,
3736            squash_accounts_cache_ms: total_cache_us / 1000,
3737            squash_accounts_store_ms: total_store_us / 1000,
3738
3739            squash_cache_ms: squash_cache_time.as_ms(),
3740        }
3741    }
3742
3743    /// Return the more recent checkpoint of this bank instance.
3744    pub fn parent(&self) -> Option<Arc<Bank>> {
3745        self.rc.parent.read().unwrap().clone()
3746    }
3747
3748    pub fn parent_slot(&self) -> Slot {
3749        self.parent_slot
3750    }
3751
3752    pub fn parent_hash(&self) -> Hash {
3753        self.parent_hash
3754    }
3755
3756    fn process_genesis_config(
3757        &mut self,
3758        genesis_config: &GenesisConfig,
3759        #[cfg(feature = "dev-context-only-utils")] collector_id_for_tests: Option<Pubkey>,
3760    ) {
3761        // Bootstrap validator collects fees until `new_from_parent` is called.
3762        self.fee_rate_governor = genesis_config.fee_rate_governor.clone();
3763
3764        for (pubkey, account) in genesis_config.accounts.iter() {
3765            assert!(
3766                self.get_account(pubkey).is_none(),
3767                "{pubkey} repeated in genesis config"
3768            );
3769            self.store_account(pubkey, account);
3770            self.capitalization.fetch_add(account.lamports(), Relaxed);
3771            self.accounts_data_size_initial += account.data().len() as u64;
3772        }
3773        // updating sysvars (the fees sysvar in this case) now depends on feature activations in
3774        // genesis_config.accounts above
3775        self.update_fees();
3776
3777        for (pubkey, account) in genesis_config.rewards_pools.iter() {
3778            assert!(
3779                self.get_account(pubkey).is_none(),
3780                "{pubkey} repeated in genesis config"
3781            );
3782            self.store_account(pubkey, account);
3783            self.accounts_data_size_initial += account.data().len() as u64;
3784        }
3785
3786        // After storing genesis accounts, the bank stakes cache will be warmed
3787        // up and can be used to set the collector id to the highest staked
3788        // node. If no staked nodes exist, allow fallback to an unstaked test
3789        // collector id during tests.
3790        let collector_id = self.stakes_cache.stakes().highest_staked_node();
3791        #[cfg(feature = "dev-context-only-utils")]
3792        let collector_id = collector_id.or(collector_id_for_tests);
3793        self.collector_id =
3794            collector_id.expect("genesis processing failed because no staked nodes exist");
3795
3796        self.blockhash_queue.write().unwrap().genesis_hash(
3797            &genesis_config.hash(),
3798            self.fee_rate_governor.lamports_per_signature,
3799        );
3800
3801        self.hashes_per_tick = genesis_config.hashes_per_tick();
3802        self.ticks_per_slot = genesis_config.ticks_per_slot();
3803        self.ns_per_slot = genesis_config.ns_per_slot();
3804        self.genesis_creation_time = genesis_config.creation_time;
3805        self.max_tick_height = (self.slot + 1) * self.ticks_per_slot;
3806        self.slots_per_year = genesis_config.slots_per_year();
3807
3808        self.epoch_schedule = genesis_config.epoch_schedule.clone();
3809
3810        self.inflation = Arc::new(RwLock::new(genesis_config.inflation));
3811
3812        self.rent_collector = RentCollector::new(
3813            self.epoch,
3814            self.epoch_schedule().clone(),
3815            self.slots_per_year,
3816            genesis_config.rent.clone(),
3817        );
3818
3819        // Add additional builtin programs specified in the genesis config
3820        for (name, program_id) in &genesis_config.native_instruction_processors {
3821            self.add_builtin_account(name, program_id, false);
3822        }
3823    }
3824
3825    fn burn_and_purge_account(&self, program_id: &Pubkey, mut account: AccountSharedData) {
3826        let old_data_size = account.data().len();
3827        self.capitalization.fetch_sub(account.lamports(), Relaxed);
3828        // Both resetting account balance to 0 and zeroing the account data
3829        // is needed to really purge from AccountsDb and flush the Stakes cache
3830        account.set_lamports(0);
3831        account.data_as_mut_slice().fill(0);
3832        self.store_account(program_id, &account);
3833        self.calculate_and_update_accounts_data_size_delta_off_chain(old_data_size, 0);
3834    }
3835
3836    // NOTE: must hold idempotent for the same set of arguments
3837    /// Add a builtin program account
3838    pub fn add_builtin_account(&self, name: &str, program_id: &Pubkey, must_replace: bool) {
3839        let existing_genuine_program =
3840            self.get_account_with_fixed_root(program_id)
3841                .and_then(|account| {
3842                    // it's very unlikely to be squatted at program_id as non-system account because of burden to
3843                    // find victim's pubkey/hash. So, when account.owner is indeed native_loader's, it's
3844                    // safe to assume it's a genuine program.
3845                    if native_loader::check_id(account.owner()) {
3846                        Some(account)
3847                    } else {
3848                        // malicious account is pre-occupying at program_id
3849                        self.burn_and_purge_account(program_id, account);
3850                        None
3851                    }
3852                });
3853
3854        if must_replace {
3855            // updating builtin program
3856            match &existing_genuine_program {
3857                None => panic!(
3858                    "There is no account to replace with builtin program ({name}, {program_id})."
3859                ),
3860                Some(account) => {
3861                    if *name == String::from_utf8_lossy(account.data()) {
3862                        // The existing account is well formed
3863                        return;
3864                    }
3865                }
3866            }
3867        } else {
3868            // introducing builtin program
3869            if existing_genuine_program.is_some() {
3870                // The existing account is sufficient
3871                return;
3872            }
3873        }
3874
3875        assert!(
3876            !self.freeze_started(),
3877            "Can't change frozen bank by adding not-existing new builtin program ({name}, {program_id}). \
3878            Maybe, inconsistent program activation is detected on snapshot restore?"
3879        );
3880
3881        // Add a bogus executable builtin account, which will be loaded and ignored.
3882        let account = native_loader::create_loadable_account_with_fields(
3883            name,
3884            self.inherit_specially_retained_account_fields(&existing_genuine_program),
3885        );
3886        self.store_account_and_update_capitalization(program_id, &account);
3887    }
3888
3889    /// Add a precompiled program account
3890    pub fn add_precompiled_account(&self, program_id: &Pubkey) {
3891        self.add_precompiled_account_with_owner(program_id, native_loader::id())
3892    }
3893
3894    // Used by tests to simulate clusters with precompiles that aren't owned by the native loader
3895    fn add_precompiled_account_with_owner(&self, program_id: &Pubkey, owner: Pubkey) {
3896        if let Some(account) = self.get_account_with_fixed_root(program_id) {
3897            if account.executable() {
3898                return;
3899            } else {
3900                // malicious account is pre-occupying at program_id
3901                self.burn_and_purge_account(program_id, account);
3902            }
3903        };
3904
3905        assert!(
3906            !self.freeze_started(),
3907            "Can't change frozen bank by adding not-existing new precompiled program ({program_id}). \
3908                Maybe, inconsistent program activation is detected on snapshot restore?"
3909        );
3910
3911        // Add a bogus executable account, which will be loaded and ignored.
3912        let (lamports, rent_epoch) = self.inherit_specially_retained_account_fields(&None);
3913
3914        // Mock account_data with executable_meta so that the account is executable.
3915        let account_data = create_executable_meta(&owner);
3916        let account = AccountSharedData::from(Account {
3917            lamports,
3918            owner,
3919            data: account_data.to_vec(),
3920            executable: true,
3921            rent_epoch,
3922        });
3923        self.store_account_and_update_capitalization(program_id, &account);
3924    }
3925
3926    pub fn set_rent_burn_percentage(&mut self, burn_percent: u8) {
3927        self.rent_collector.rent.burn_percent = burn_percent;
3928    }
3929
3930    pub fn set_hashes_per_tick(&mut self, hashes_per_tick: Option<u64>) {
3931        self.hashes_per_tick = hashes_per_tick;
3932    }
3933
3934    /// Return the last block hash registered.
3935    pub fn last_blockhash(&self) -> Hash {
3936        self.blockhash_queue.read().unwrap().last_hash()
3937    }
3938
3939    pub fn last_blockhash_and_lamports_per_signature(&self) -> (Hash, u64) {
3940        let blockhash_queue = self.blockhash_queue.read().unwrap();
3941        let last_hash = blockhash_queue.last_hash();
3942        let last_lamports_per_signature = blockhash_queue
3943            .get_lamports_per_signature(&last_hash)
3944            .unwrap(); // safe so long as the BlockhashQueue is consistent
3945        (last_hash, last_lamports_per_signature)
3946    }
3947
3948    pub fn is_blockhash_valid(&self, hash: &Hash) -> bool {
3949        let blockhash_queue = self.blockhash_queue.read().unwrap();
3950        blockhash_queue.is_hash_valid(hash)
3951    }
3952
3953    pub fn get_minimum_balance_for_rent_exemption(&self, data_len: usize) -> u64 {
3954        self.rent_collector.rent.minimum_balance(data_len).max(1)
3955    }
3956
3957    pub fn get_lamports_per_signature(&self) -> u64 {
3958        self.fee_rate_governor.lamports_per_signature
3959    }
3960
3961    pub fn get_lamports_per_signature_for_blockhash(&self, hash: &Hash) -> Option<u64> {
3962        let blockhash_queue = self.blockhash_queue.read().unwrap();
3963        blockhash_queue.get_lamports_per_signature(hash)
3964    }
3965
3966    #[deprecated(since = "1.9.0", note = "Please use `get_fee_for_message` instead")]
3967    pub fn get_fee_rate_governor(&self) -> &FeeRateGovernor {
3968        &self.fee_rate_governor
3969    }
3970
3971    pub fn get_fee_for_message(&self, message: &SanitizedMessage) -> Option<u64> {
3972        let lamports_per_signature = {
3973            let blockhash_queue = self.blockhash_queue.read().unwrap();
3974            blockhash_queue.get_lamports_per_signature(message.recent_blockhash())
3975        }
3976        .or_else(|| {
3977            self.check_message_for_nonce(message)
3978                .and_then(|(address, account)| {
3979                    NoncePartial::new(address, account).lamports_per_signature()
3980                })
3981        })?;
3982        Some(self.get_fee_for_message_with_lamports_per_signature(message, lamports_per_signature))
3983    }
3984
3985    /// Returns true when startup accounts hash verification has completed or never had to run in background.
3986    pub fn get_startup_verification_complete(&self) -> &Arc<AtomicBool> {
3987        &self
3988            .rc
3989            .accounts
3990            .accounts_db
3991            .verify_accounts_hash_in_bg
3992            .verified
3993    }
3994
3995    /// return true if bg hash verification is complete
3996    /// return false if bg hash verification has not completed yet
3997    /// if hash verification failed, a panic will occur
3998    pub fn is_startup_verification_complete(&self) -> bool {
3999        self.rc
4000            .accounts
4001            .accounts_db
4002            .verify_accounts_hash_in_bg
4003            .check_complete()
4004    }
4005
4006    /// This can occur because it completed in the background
4007    /// or if the verification was run in the foreground.
4008    pub fn set_startup_verification_complete(&self) {
4009        self.rc
4010            .accounts
4011            .accounts_db
4012            .verify_accounts_hash_in_bg
4013            .verification_complete()
4014    }
4015
4016    pub fn get_fee_for_message_with_lamports_per_signature(
4017        &self,
4018        message: &SanitizedMessage,
4019        lamports_per_signature: u64,
4020    ) -> u64 {
4021        self.fee_structure.calculate_fee(
4022            message,
4023            lamports_per_signature,
4024            &process_compute_budget_instructions(message.program_instructions_iter())
4025                .unwrap_or_default()
4026                .into(),
4027            self.feature_set
4028                .is_active(&include_loaded_accounts_data_size_in_fee_calculation::id()),
4029            self.feature_set
4030                .is_active(&remove_rounding_in_fee_calculation::id()),
4031        )
4032    }
4033
4034    #[deprecated(
4035        since = "1.6.11",
4036        note = "Please use `get_blockhash_last_valid_block_height`"
4037    )]
4038    pub fn get_blockhash_last_valid_slot(&self, blockhash: &Hash) -> Option<Slot> {
4039        let blockhash_queue = self.blockhash_queue.read().unwrap();
4040        // This calculation will need to be updated to consider epoch boundaries if BlockhashQueue
4041        // length is made variable by epoch
4042        blockhash_queue
4043            .get_hash_age(blockhash)
4044            .map(|age| self.slot + blockhash_queue.get_max_age() as u64 - age)
4045    }
4046
4047    pub fn get_blockhash_last_valid_block_height(&self, blockhash: &Hash) -> Option<Slot> {
4048        let blockhash_queue = self.blockhash_queue.read().unwrap();
4049        // This calculation will need to be updated to consider epoch boundaries if BlockhashQueue
4050        // length is made variable by epoch
4051        blockhash_queue
4052            .get_hash_age(blockhash)
4053            .map(|age| self.block_height + blockhash_queue.get_max_age() as u64 - age)
4054    }
4055
4056    pub fn confirmed_last_blockhash(&self) -> Hash {
4057        const NUM_BLOCKHASH_CONFIRMATIONS: usize = 3;
4058
4059        let parents = self.parents();
4060        if parents.is_empty() {
4061            self.last_blockhash()
4062        } else {
4063            let index = NUM_BLOCKHASH_CONFIRMATIONS.min(parents.len() - 1);
4064            parents[index].last_blockhash()
4065        }
4066    }
4067
4068    /// Forget all signatures. Useful for benchmarking.
4069    pub fn clear_signatures(&self) {
4070        self.status_cache.write().unwrap().clear();
4071    }
4072
4073    pub fn clear_slot_signatures(&self, slot: Slot) {
4074        self.status_cache.write().unwrap().clear_slot_entries(slot);
4075    }
4076
4077    fn update_transaction_statuses(
4078        &self,
4079        sanitized_txs: &[SanitizedTransaction],
4080        execution_results: &[TransactionExecutionResult],
4081    ) {
4082        let mut status_cache = self.status_cache.write().unwrap();
4083        assert_eq!(sanitized_txs.len(), execution_results.len());
4084        for (tx, execution_result) in sanitized_txs.iter().zip(execution_results) {
4085            if let Some(details) = execution_result.details() {
4086                // Add the message hash to the status cache to ensure that this message
4087                // won't be processed again with a different signature.
4088                status_cache.insert(
4089                    tx.message().recent_blockhash(),
4090                    tx.message_hash(),
4091                    self.slot(),
4092                    details.status.clone(),
4093                );
4094                // Add the transaction signature to the status cache so that transaction status
4095                // can be queried by transaction signature over RPC. In the future, this should
4096                // only be added for API nodes because voting validators don't need to do this.
4097                status_cache.insert(
4098                    tx.message().recent_blockhash(),
4099                    tx.signature(),
4100                    self.slot(),
4101                    details.status.clone(),
4102                );
4103            }
4104        }
4105    }
4106
4107    /// Register a new recent blockhash in the bank's recent blockhash queue. Called when a bank
4108    /// reaches its max tick height. Can be called by tests to get new blockhashes for transaction
4109    /// processing without advancing to a new bank slot.
4110    fn register_recent_blockhash(&self, blockhash: &Hash, scheduler: &InstalledSchedulerRwLock) {
4111        // This is needed because recent_blockhash updates necessitate synchronizations for
4112        // consistent tx check_age handling.
4113        BankWithScheduler::wait_for_paused_scheduler(self, scheduler);
4114
4115        // Only acquire the write lock for the blockhash queue on block boundaries because
4116        // readers can starve this write lock acquisition and ticks would be slowed down too
4117        // much if the write lock is acquired for each tick.
4118        let mut w_blockhash_queue = self.blockhash_queue.write().unwrap();
4119        w_blockhash_queue.register_hash(blockhash, self.fee_rate_governor.lamports_per_signature);
4120        self.update_recent_blockhashes_locked(&w_blockhash_queue);
4121    }
4122
4123    // gating this under #[cfg(feature = "dev-context-only-utils")] isn't easy due to
4124    // miraland-program-test's usage...
4125    pub fn register_unique_recent_blockhash_for_test(&self) {
4126        self.register_recent_blockhash(
4127            &Hash::new_unique(),
4128            &BankWithScheduler::no_scheduler_available(),
4129        )
4130    }
4131
4132    /// Tell the bank which Entry IDs exist on the ledger. This function assumes subsequent calls
4133    /// correspond to later entries, and will boot the oldest ones once its internal cache is full.
4134    /// Once boot, the bank will reject transactions using that `hash`.
4135    ///
4136    /// This is NOT thread safe because if tick height is updated by two different threads, the
4137    /// block boundary condition could be missed.
4138    pub fn register_tick(&self, hash: &Hash, scheduler: &InstalledSchedulerRwLock) {
4139        assert!(
4140            !self.freeze_started(),
4141            "register_tick() working on a bank that is already frozen or is undergoing freezing!"
4142        );
4143
4144        if self.is_block_boundary(self.tick_height.load(Relaxed) + 1) {
4145            self.register_recent_blockhash(hash, scheduler);
4146        }
4147
4148        // ReplayStage will start computing the accounts delta hash when it
4149        // detects the tick height has reached the boundary, so the system
4150        // needs to guarantee all account updates for the slot have been
4151        // committed before this tick height is incremented (like the blockhash
4152        // sysvar above)
4153        self.tick_height.fetch_add(1, Relaxed);
4154    }
4155
4156    #[cfg(feature = "dev-context-only-utils")]
4157    pub fn register_tick_for_test(&self, hash: &Hash) {
4158        self.register_tick(hash, &BankWithScheduler::no_scheduler_available())
4159    }
4160
4161    #[cfg(feature = "dev-context-only-utils")]
4162    pub fn register_default_tick_for_test(&self) {
4163        self.register_tick_for_test(&Hash::default())
4164    }
4165
4166    #[cfg(feature = "dev-context-only-utils")]
4167    pub fn register_unique_tick(&self) {
4168        self.register_tick_for_test(&Hash::new_unique())
4169    }
4170
4171    pub fn is_complete(&self) -> bool {
4172        self.tick_height() == self.max_tick_height()
4173    }
4174
4175    pub fn is_block_boundary(&self, tick_height: u64) -> bool {
4176        tick_height == self.max_tick_height
4177    }
4178
4179    /// Get the max number of accounts that a transaction may lock in this block
4180    pub fn get_transaction_account_lock_limit(&self) -> usize {
4181        if let Some(transaction_account_lock_limit) =
4182            self.runtime_config.transaction_account_lock_limit
4183        {
4184            transaction_account_lock_limit
4185        } else if self
4186            .feature_set
4187            .is_active(&feature_set::increase_tx_account_lock_limit::id())
4188        {
4189            MAX_TX_ACCOUNT_LOCKS
4190        } else {
4191            64
4192        }
4193    }
4194
4195    /// Prepare a transaction batch from a list of versioned transactions from
4196    /// an entry. Used for tests only.
4197    pub fn prepare_entry_batch(&self, txs: Vec<VersionedTransaction>) -> Result<TransactionBatch> {
4198        let sanitized_txs = txs
4199            .into_iter()
4200            .map(|tx| SanitizedTransaction::try_create(tx, MessageHash::Compute, None, self))
4201            .collect::<Result<Vec<_>>>()?;
4202        let tx_account_lock_limit = self.get_transaction_account_lock_limit();
4203        let lock_results = self
4204            .rc
4205            .accounts
4206            .lock_accounts(sanitized_txs.iter(), tx_account_lock_limit);
4207        Ok(TransactionBatch::new(
4208            lock_results,
4209            self,
4210            Cow::Owned(sanitized_txs),
4211        ))
4212    }
4213
4214    /// Prepare a locked transaction batch from a list of sanitized transactions.
4215    pub fn prepare_sanitized_batch<'a, 'b>(
4216        &'a self,
4217        txs: &'b [SanitizedTransaction],
4218    ) -> TransactionBatch<'a, 'b> {
4219        let tx_account_lock_limit = self.get_transaction_account_lock_limit();
4220        let lock_results = self
4221            .rc
4222            .accounts
4223            .lock_accounts(txs.iter(), tx_account_lock_limit);
4224        TransactionBatch::new(lock_results, self, Cow::Borrowed(txs))
4225    }
4226
4227    /// Prepare a locked transaction batch from a list of sanitized transactions, and their cost
4228    /// limited packing status
4229    pub fn prepare_sanitized_batch_with_results<'a, 'b>(
4230        &'a self,
4231        transactions: &'b [SanitizedTransaction],
4232        transaction_results: impl Iterator<Item = Result<()>>,
4233    ) -> TransactionBatch<'a, 'b> {
4234        // this lock_results could be: Ok, AccountInUse, WouldExceedBlockMaxLimit or WouldExceedAccountMaxLimit
4235        let tx_account_lock_limit = self.get_transaction_account_lock_limit();
4236        let lock_results = self.rc.accounts.lock_accounts_with_results(
4237            transactions.iter(),
4238            transaction_results,
4239            tx_account_lock_limit,
4240        );
4241        TransactionBatch::new(lock_results, self, Cow::Borrowed(transactions))
4242    }
4243
4244    /// Prepare a transaction batch from a single transaction without locking accounts
4245    pub fn prepare_unlocked_batch_from_single_tx<'a>(
4246        &'a self,
4247        transaction: &'a SanitizedTransaction,
4248    ) -> TransactionBatch<'_, '_> {
4249        let tx_account_lock_limit = self.get_transaction_account_lock_limit();
4250        let lock_result = transaction
4251            .get_account_locks(tx_account_lock_limit)
4252            .map(|_| ());
4253        let mut batch = TransactionBatch::new(
4254            vec![lock_result],
4255            self,
4256            Cow::Borrowed(slice::from_ref(transaction)),
4257        );
4258        batch.set_needs_unlock(false);
4259        batch
4260    }
4261
4262    /// Run transactions against a frozen bank without committing the results
4263    pub fn simulate_transaction(
4264        &self,
4265        transaction: &SanitizedTransaction,
4266        enable_cpi_recording: bool,
4267    ) -> TransactionSimulationResult {
4268        assert!(self.is_frozen(), "simulation bank must be frozen");
4269
4270        self.simulate_transaction_unchecked(transaction, enable_cpi_recording)
4271    }
4272
4273    /// Run transactions against a bank without committing the results; does not check if the bank
4274    /// is frozen, enabling use in single-Bank test frameworks
4275    pub fn simulate_transaction_unchecked(
4276        &self,
4277        transaction: &SanitizedTransaction,
4278        enable_cpi_recording: bool,
4279    ) -> TransactionSimulationResult {
4280        let account_keys = transaction.message().account_keys();
4281        let number_of_accounts = account_keys.len();
4282        let account_overrides = self.get_account_overrides_for_simulation(&account_keys);
4283        let batch = self.prepare_unlocked_batch_from_single_tx(transaction);
4284        let mut timings = ExecuteTimings::default();
4285
4286        let LoadAndExecuteTransactionsOutput {
4287            loaded_transactions,
4288            mut execution_results,
4289            ..
4290        } = self.load_and_execute_transactions(
4291            &batch,
4292            // After simulation, transactions will need to be forwarded to the leader
4293            // for processing. During forwarding, the transaction could expire if the
4294            // delay is not accounted for.
4295            MAX_PROCESSING_AGE - MAX_TRANSACTION_FORWARDING_DELAY,
4296            enable_cpi_recording,
4297            true,
4298            true,
4299            &mut timings,
4300            Some(&account_overrides),
4301            None,
4302        );
4303
4304        let post_simulation_accounts = loaded_transactions
4305            .into_iter()
4306            .next()
4307            .unwrap()
4308            .0
4309            .ok()
4310            .map(|loaded_transaction| {
4311                loaded_transaction
4312                    .accounts
4313                    .into_iter()
4314                    .take(number_of_accounts)
4315                    .collect::<Vec<_>>()
4316            })
4317            .unwrap_or_default();
4318
4319        let units_consumed =
4320            timings
4321                .details
4322                .per_program_timings
4323                .iter()
4324                .fold(0, |acc: u64, (_, program_timing)| {
4325                    acc.saturating_add(program_timing.accumulated_units)
4326                        .saturating_add(program_timing.total_errored_units)
4327                });
4328
4329        debug!("simulate_transaction: {:?}", timings);
4330
4331        let execution_result = execution_results.pop().unwrap();
4332        let flattened_result = execution_result.flattened_result();
4333        let (logs, return_data, inner_instructions) = match execution_result {
4334            TransactionExecutionResult::Executed { details, .. } => (
4335                details.log_messages,
4336                details.return_data,
4337                details.inner_instructions,
4338            ),
4339            TransactionExecutionResult::NotExecuted(_) => (None, None, None),
4340        };
4341        let logs = logs.unwrap_or_default();
4342
4343        TransactionSimulationResult {
4344            result: flattened_result,
4345            logs,
4346            post_simulation_accounts,
4347            units_consumed,
4348            return_data,
4349            inner_instructions,
4350        }
4351    }
4352
4353    fn get_account_overrides_for_simulation(&self, account_keys: &AccountKeys) -> AccountOverrides {
4354        let mut account_overrides = AccountOverrides::default();
4355        let slot_history_id = sysvar::slot_history::id();
4356        if account_keys.iter().any(|pubkey| *pubkey == slot_history_id) {
4357            let current_account = self.get_account_with_fixed_root(&slot_history_id);
4358            let slot_history = current_account
4359                .as_ref()
4360                .map(|account| from_account::<SlotHistory, _>(account).unwrap())
4361                .unwrap_or_default();
4362            if slot_history.check(self.slot()) == Check::Found {
4363                let ancestors = Ancestors::from(self.proper_ancestors().collect::<Vec<_>>());
4364                if let Some((account, _)) =
4365                    self.load_slow_with_fixed_root(&ancestors, &slot_history_id)
4366                {
4367                    account_overrides.set_slot_history(Some(account));
4368                }
4369            }
4370        }
4371        account_overrides
4372    }
4373
4374    pub fn unlock_accounts(&self, batch: &mut TransactionBatch) {
4375        if batch.needs_unlock() {
4376            batch.set_needs_unlock(false);
4377            self.rc
4378                .accounts
4379                .unlock_accounts(batch.sanitized_transactions().iter(), batch.lock_results())
4380        }
4381    }
4382
4383    pub fn remove_unrooted_slots(&self, slots: &[(Slot, BankId)]) {
4384        self.rc.accounts.accounts_db.remove_unrooted_slots(slots)
4385    }
4386
4387    fn check_age(
4388        &self,
4389        sanitized_txs: &[impl core::borrow::Borrow<SanitizedTransaction>],
4390        lock_results: &[Result<()>],
4391        max_age: usize,
4392        error_counters: &mut TransactionErrorMetrics,
4393    ) -> Vec<TransactionCheckResult> {
4394        let hash_queue = self.blockhash_queue.read().unwrap();
4395        let last_blockhash = hash_queue.last_hash();
4396        let next_durable_nonce = DurableNonce::from_blockhash(&last_blockhash);
4397
4398        sanitized_txs
4399            .iter()
4400            .zip(lock_results)
4401            .map(|(tx, lock_res)| match lock_res {
4402                Ok(()) => self.check_transaction_age(
4403                    tx.borrow(),
4404                    max_age,
4405                    &next_durable_nonce,
4406                    &hash_queue,
4407                    error_counters,
4408                ),
4409                Err(e) => (Err(e.clone()), None, None),
4410            })
4411            .collect()
4412    }
4413
4414    fn check_transaction_age(
4415        &self,
4416        tx: &SanitizedTransaction,
4417        max_age: usize,
4418        next_durable_nonce: &DurableNonce,
4419        hash_queue: &BlockhashQueue,
4420        error_counters: &mut TransactionErrorMetrics,
4421    ) -> TransactionCheckResult {
4422        let recent_blockhash = tx.message().recent_blockhash();
4423        if hash_queue.is_hash_valid_for_age(recent_blockhash, max_age) {
4424            (
4425                Ok(()),
4426                None,
4427                hash_queue.get_lamports_per_signature(tx.message().recent_blockhash()),
4428            )
4429        } else if let Some((address, account)) =
4430            self.check_transaction_for_nonce(tx, next_durable_nonce)
4431        {
4432            let nonce = NoncePartial::new(address, account);
4433            let lamports_per_signature = nonce.lamports_per_signature();
4434            (Ok(()), Some(nonce), lamports_per_signature)
4435        } else {
4436            error_counters.blockhash_not_found += 1;
4437            (Err(TransactionError::BlockhashNotFound), None, None)
4438        }
4439    }
4440
4441    fn is_transaction_already_processed(
4442        &self,
4443        sanitized_tx: &SanitizedTransaction,
4444        status_cache: &BankStatusCache,
4445    ) -> bool {
4446        let key = sanitized_tx.message_hash();
4447        let transaction_blockhash = sanitized_tx.message().recent_blockhash();
4448        status_cache
4449            .get_status(key, transaction_blockhash, &self.ancestors)
4450            .is_some()
4451    }
4452
4453    fn check_status_cache(
4454        &self,
4455        sanitized_txs: &[impl core::borrow::Borrow<SanitizedTransaction>],
4456        lock_results: Vec<TransactionCheckResult>,
4457        error_counters: &mut TransactionErrorMetrics,
4458    ) -> Vec<TransactionCheckResult> {
4459        let rcache = self.status_cache.read().unwrap();
4460        sanitized_txs
4461            .iter()
4462            .zip(lock_results)
4463            .map(|(sanitized_tx, (lock_result, nonce, lamports))| {
4464                let sanitized_tx = sanitized_tx.borrow();
4465                if lock_result.is_ok()
4466                    && self.is_transaction_already_processed(sanitized_tx, &rcache)
4467                {
4468                    error_counters.already_processed += 1;
4469                    return (Err(TransactionError::AlreadyProcessed), None, None);
4470                }
4471
4472                (lock_result, nonce, lamports)
4473            })
4474            .collect()
4475    }
4476
4477    pub fn get_hash_age(&self, hash: &Hash) -> Option<u64> {
4478        self.blockhash_queue.read().unwrap().get_hash_age(hash)
4479    }
4480
4481    pub fn is_hash_valid_for_age(&self, hash: &Hash, max_age: usize) -> bool {
4482        self.blockhash_queue
4483            .read()
4484            .unwrap()
4485            .is_hash_valid_for_age(hash, max_age)
4486    }
4487
4488    fn check_message_for_nonce(&self, message: &SanitizedMessage) -> Option<TransactionAccount> {
4489        let nonce_address = message.get_durable_nonce()?;
4490        let nonce_account = self.get_account_with_fixed_root(nonce_address)?;
4491        let nonce_data =
4492            nonce_account::verify_nonce_account(&nonce_account, message.recent_blockhash())?;
4493
4494        let nonce_is_authorized = message
4495            .get_ix_signers(NONCED_TX_MARKER_IX_INDEX as usize)
4496            .any(|signer| signer == &nonce_data.authority);
4497        if !nonce_is_authorized {
4498            return None;
4499        }
4500
4501        Some((*nonce_address, nonce_account))
4502    }
4503
4504    fn check_transaction_for_nonce(
4505        &self,
4506        tx: &SanitizedTransaction,
4507        next_durable_nonce: &DurableNonce,
4508    ) -> Option<TransactionAccount> {
4509        let nonce_is_advanceable = tx.message().recent_blockhash() != next_durable_nonce.as_hash();
4510        if nonce_is_advanceable {
4511            self.check_message_for_nonce(tx.message())
4512        } else {
4513            None
4514        }
4515    }
4516
4517    pub fn check_transactions(
4518        &self,
4519        sanitized_txs: &[impl core::borrow::Borrow<SanitizedTransaction>],
4520        lock_results: &[Result<()>],
4521        max_age: usize,
4522        error_counters: &mut TransactionErrorMetrics,
4523    ) -> Vec<TransactionCheckResult> {
4524        let lock_results = self.check_age(sanitized_txs, lock_results, max_age, error_counters);
4525        self.check_status_cache(sanitized_txs, lock_results, error_counters)
4526    }
4527
4528    pub fn collect_balances(&self, batch: &TransactionBatch) -> TransactionBalances {
4529        let mut balances: TransactionBalances = vec![];
4530        for transaction in batch.sanitized_transactions() {
4531            let mut transaction_balances: Vec<u64> = vec![];
4532            for account_key in transaction.message().account_keys().iter() {
4533                transaction_balances.push(self.get_balance(account_key));
4534            }
4535            balances.push(transaction_balances);
4536        }
4537        balances
4538    }
4539
4540    #[allow(clippy::type_complexity)]
4541    pub fn load_and_execute_transactions(
4542        &self,
4543        batch: &TransactionBatch,
4544        max_age: usize,
4545        enable_cpi_recording: bool,
4546        enable_log_recording: bool,
4547        enable_return_data_recording: bool,
4548        timings: &mut ExecuteTimings,
4549        account_overrides: Option<&AccountOverrides>,
4550        log_messages_bytes_limit: Option<usize>,
4551    ) -> LoadAndExecuteTransactionsOutput {
4552        let sanitized_txs = batch.sanitized_transactions();
4553        debug!("processing transactions: {}", sanitized_txs.len());
4554        let mut error_counters = TransactionErrorMetrics::default();
4555
4556        let retryable_transaction_indexes: Vec<_> = batch
4557            .lock_results()
4558            .iter()
4559            .enumerate()
4560            .filter_map(|(index, res)| match res {
4561                // following are retryable errors
4562                Err(TransactionError::AccountInUse) => {
4563                    error_counters.account_in_use += 1;
4564                    Some(index)
4565                }
4566                Err(TransactionError::WouldExceedMaxBlockCostLimit) => {
4567                    error_counters.would_exceed_max_block_cost_limit += 1;
4568                    Some(index)
4569                }
4570                Err(TransactionError::WouldExceedMaxVoteCostLimit) => {
4571                    error_counters.would_exceed_max_vote_cost_limit += 1;
4572                    Some(index)
4573                }
4574                Err(TransactionError::WouldExceedMaxAccountCostLimit) => {
4575                    error_counters.would_exceed_max_account_cost_limit += 1;
4576                    Some(index)
4577                }
4578                Err(TransactionError::WouldExceedAccountDataBlockLimit) => {
4579                    error_counters.would_exceed_account_data_block_limit += 1;
4580                    Some(index)
4581                }
4582                // following are non-retryable errors
4583                Err(TransactionError::TooManyAccountLocks) => {
4584                    error_counters.too_many_account_locks += 1;
4585                    None
4586                }
4587                Err(_) => None,
4588                Ok(_) => None,
4589            })
4590            .collect();
4591
4592        let mut check_time = Measure::start("check_transactions");
4593        let mut check_results = self.check_transactions(
4594            sanitized_txs,
4595            batch.lock_results(),
4596            max_age,
4597            &mut error_counters,
4598        );
4599        check_time.stop();
4600        debug!("check: {}us", check_time.as_us());
4601        timings.saturating_add_in_place(ExecuteTimingType::CheckUs, check_time.as_us());
4602
4603        let sanitized_output = self
4604            .transaction_processor
4605            .load_and_execute_sanitized_transactions(
4606                self,
4607                sanitized_txs,
4608                &mut check_results,
4609                &mut error_counters,
4610                enable_cpi_recording,
4611                enable_log_recording,
4612                enable_return_data_recording,
4613                timings,
4614                account_overrides,
4615                self.builtin_programs.iter(),
4616                log_messages_bytes_limit,
4617            );
4618
4619        let mut signature_count = 0;
4620
4621        let mut executed_transactions_count: usize = 0;
4622        let mut executed_non_vote_transactions_count: usize = 0;
4623        let mut executed_with_successful_result_count: usize = 0;
4624        let err_count = &mut error_counters.total;
4625        let transaction_log_collector_config =
4626            self.transaction_log_collector_config.read().unwrap();
4627
4628        let mut collect_logs_time = Measure::start("collect_logs_time");
4629        for (execution_result, tx) in sanitized_output.execution_results.iter().zip(sanitized_txs) {
4630            if let Some(debug_keys) = &self.transaction_debug_keys {
4631                for key in tx.message().account_keys().iter() {
4632                    if debug_keys.contains(key) {
4633                        let result = execution_result.flattened_result();
4634                        info!("slot: {} result: {:?} tx: {:?}", self.slot, result, tx);
4635                        break;
4636                    }
4637                }
4638            }
4639
4640            let is_vote = tx.is_simple_vote_transaction();
4641
4642            if execution_result.was_executed() // Skip log collection for unprocessed transactions
4643                && transaction_log_collector_config.filter != TransactionLogCollectorFilter::None
4644            {
4645                let mut filtered_mentioned_addresses = Vec::new();
4646                if !transaction_log_collector_config
4647                    .mentioned_addresses
4648                    .is_empty()
4649                {
4650                    for key in tx.message().account_keys().iter() {
4651                        if transaction_log_collector_config
4652                            .mentioned_addresses
4653                            .contains(key)
4654                        {
4655                            filtered_mentioned_addresses.push(*key);
4656                        }
4657                    }
4658                }
4659
4660                let store = match transaction_log_collector_config.filter {
4661                    TransactionLogCollectorFilter::All => {
4662                        !is_vote || !filtered_mentioned_addresses.is_empty()
4663                    }
4664                    TransactionLogCollectorFilter::AllWithVotes => true,
4665                    TransactionLogCollectorFilter::None => false,
4666                    TransactionLogCollectorFilter::OnlyMentionedAddresses => {
4667                        !filtered_mentioned_addresses.is_empty()
4668                    }
4669                };
4670
4671                if store {
4672                    if let Some(TransactionExecutionDetails {
4673                        status,
4674                        log_messages: Some(log_messages),
4675                        ..
4676                    }) = execution_result.details()
4677                    {
4678                        let mut transaction_log_collector =
4679                            self.transaction_log_collector.write().unwrap();
4680                        let transaction_log_index = transaction_log_collector.logs.len();
4681
4682                        transaction_log_collector.logs.push(TransactionLogInfo {
4683                            signature: *tx.signature(),
4684                            result: status.clone(),
4685                            is_vote,
4686                            log_messages: log_messages.clone(),
4687                        });
4688                        for key in filtered_mentioned_addresses.into_iter() {
4689                            transaction_log_collector
4690                                .mentioned_address_map
4691                                .entry(key)
4692                                .or_default()
4693                                .push(transaction_log_index);
4694                        }
4695                    }
4696                }
4697            }
4698
4699            if execution_result.was_executed() {
4700                // Signature count must be accumulated only if the transaction
4701                // is executed, otherwise a mismatched count between banking and
4702                // replay could occur
4703                signature_count += u64::from(tx.message().header().num_required_signatures);
4704                executed_transactions_count += 1;
4705            }
4706
4707            match execution_result.flattened_result() {
4708                Ok(()) => {
4709                    if !is_vote {
4710                        executed_non_vote_transactions_count += 1;
4711                    }
4712                    executed_with_successful_result_count += 1;
4713                }
4714                Err(err) => {
4715                    if *err_count == 0 {
4716                        debug!("tx error: {:?} {:?}", err, tx);
4717                    }
4718                    *err_count += 1;
4719                }
4720            }
4721        }
4722        collect_logs_time.stop();
4723        timings
4724            .saturating_add_in_place(ExecuteTimingType::CollectLogsUs, collect_logs_time.as_us());
4725
4726        if *err_count > 0 {
4727            debug!(
4728                "{} errors of {} txs",
4729                *err_count,
4730                *err_count + executed_with_successful_result_count
4731            );
4732        }
4733
4734        LoadAndExecuteTransactionsOutput {
4735            loaded_transactions: sanitized_output.loaded_transactions,
4736            execution_results: sanitized_output.execution_results,
4737            retryable_transaction_indexes,
4738            executed_transactions_count,
4739            executed_non_vote_transactions_count,
4740            executed_with_successful_result_count,
4741            signature_count,
4742            error_counters,
4743        }
4744    }
4745
4746    /// Load the accounts data size, in bytes
4747    pub fn load_accounts_data_size(&self) -> u64 {
4748        self.accounts_data_size_initial
4749            .saturating_add_signed(self.load_accounts_data_size_delta())
4750    }
4751
4752    /// Load the change in accounts data size in this Bank, in bytes
4753    pub fn load_accounts_data_size_delta(&self) -> i64 {
4754        let delta_on_chain = self.load_accounts_data_size_delta_on_chain();
4755        let delta_off_chain = self.load_accounts_data_size_delta_off_chain();
4756        delta_on_chain.saturating_add(delta_off_chain)
4757    }
4758
4759    /// Load the change in accounts data size in this Bank, in bytes, from on-chain events
4760    /// i.e. transactions
4761    pub fn load_accounts_data_size_delta_on_chain(&self) -> i64 {
4762        self.accounts_data_size_delta_on_chain.load(Acquire)
4763    }
4764
4765    /// Load the change in accounts data size in this Bank, in bytes, from off-chain events
4766    /// i.e. rent collection
4767    pub fn load_accounts_data_size_delta_off_chain(&self) -> i64 {
4768        self.accounts_data_size_delta_off_chain.load(Acquire)
4769    }
4770
4771    /// Update the accounts data size delta from on-chain events by adding `amount`.
4772    /// The arithmetic saturates.
4773    fn update_accounts_data_size_delta_on_chain(&self, amount: i64) {
4774        if amount == 0 {
4775            return;
4776        }
4777
4778        self.accounts_data_size_delta_on_chain
4779            .fetch_update(AcqRel, Acquire, |accounts_data_size_delta_on_chain| {
4780                Some(accounts_data_size_delta_on_chain.saturating_add(amount))
4781            })
4782            // SAFETY: unwrap() is safe since our update fn always returns `Some`
4783            .unwrap();
4784    }
4785
4786    /// Update the accounts data size delta from off-chain events by adding `amount`.
4787    /// The arithmetic saturates.
4788    fn update_accounts_data_size_delta_off_chain(&self, amount: i64) {
4789        if amount == 0 {
4790            return;
4791        }
4792
4793        self.accounts_data_size_delta_off_chain
4794            .fetch_update(AcqRel, Acquire, |accounts_data_size_delta_off_chain| {
4795                Some(accounts_data_size_delta_off_chain.saturating_add(amount))
4796            })
4797            // SAFETY: unwrap() is safe since our update fn always returns `Some`
4798            .unwrap();
4799    }
4800
4801    /// Calculate the data size delta and update the off-chain accounts data size delta
4802    fn calculate_and_update_accounts_data_size_delta_off_chain(
4803        &self,
4804        old_data_size: usize,
4805        new_data_size: usize,
4806    ) {
4807        let data_size_delta = calculate_data_size_delta(old_data_size, new_data_size);
4808        self.update_accounts_data_size_delta_off_chain(data_size_delta);
4809    }
4810
4811    fn filter_program_errors_and_collect_fee(
4812        &self,
4813        txs: &[SanitizedTransaction],
4814        execution_results: &[TransactionExecutionResult],
4815    ) -> Vec<Result<()>> {
4816        let hash_queue = self.blockhash_queue.read().unwrap();
4817        let mut fees = 0;
4818
4819        let results = txs
4820            .iter()
4821            .zip(execution_results)
4822            .map(|(tx, execution_result)| {
4823                let (execution_status, durable_nonce_fee) = match &execution_result {
4824                    TransactionExecutionResult::Executed { details, .. } => {
4825                        Ok((&details.status, details.durable_nonce_fee.as_ref()))
4826                    }
4827                    TransactionExecutionResult::NotExecuted(err) => Err(err.clone()),
4828                }?;
4829
4830                let (lamports_per_signature, is_nonce) = durable_nonce_fee
4831                    .map(|durable_nonce_fee| durable_nonce_fee.lamports_per_signature())
4832                    .map(|maybe_lamports_per_signature| (maybe_lamports_per_signature, true))
4833                    .unwrap_or_else(|| {
4834                        (
4835                            hash_queue.get_lamports_per_signature(tx.message().recent_blockhash()),
4836                            false,
4837                        )
4838                    });
4839
4840                let lamports_per_signature =
4841                    lamports_per_signature.ok_or(TransactionError::BlockhashNotFound)?;
4842                let fee = self.get_fee_for_message_with_lamports_per_signature(
4843                    tx.message(),
4844                    lamports_per_signature,
4845                );
4846
4847                // In case of instruction error, even though no accounts
4848                // were stored we still need to charge the payer the
4849                // fee.
4850                //
4851                //...except nonce accounts, which already have their
4852                // post-load, fee deducted, pre-execute account state
4853                // stored
4854                if execution_status.is_err() && !is_nonce {
4855                    self.withdraw(tx.message().fee_payer(), fee)?;
4856                }
4857
4858                fees += fee;
4859                Ok(())
4860            })
4861            .collect();
4862
4863        self.collector_fees.fetch_add(fees, Relaxed);
4864        results
4865    }
4866
4867    /// `committed_transactions_count` is the number of transactions out of `sanitized_txs`
4868    /// that was executed. Of those, `committed_transactions_count`,
4869    /// `committed_with_failure_result_count` is the number of executed transactions that returned
4870    /// a failure result.
4871    pub fn commit_transactions(
4872        &self,
4873        sanitized_txs: &[SanitizedTransaction],
4874        loaded_txs: &mut [TransactionLoadResult],
4875        execution_results: Vec<TransactionExecutionResult>,
4876        last_blockhash: Hash,
4877        lamports_per_signature: u64,
4878        counts: CommitTransactionCounts,
4879        timings: &mut ExecuteTimings,
4880    ) -> TransactionResults {
4881        assert!(
4882            !self.freeze_started(),
4883            "commit_transactions() working on a bank that is already frozen or is undergoing freezing!"
4884        );
4885
4886        let CommitTransactionCounts {
4887            committed_transactions_count,
4888            committed_non_vote_transactions_count,
4889            committed_with_failure_result_count,
4890            signature_count,
4891        } = counts;
4892
4893        self.increment_transaction_count(committed_transactions_count);
4894        self.increment_non_vote_transaction_count_since_restart(
4895            committed_non_vote_transactions_count,
4896        );
4897        self.increment_signature_count(signature_count);
4898
4899        if committed_with_failure_result_count > 0 {
4900            self.transaction_error_count
4901                .fetch_add(committed_with_failure_result_count, Relaxed);
4902        }
4903
4904        // Should be equivalent to checking `committed_transactions_count > 0`
4905        if execution_results.iter().any(|result| result.was_executed()) {
4906            self.is_delta.store(true, Relaxed);
4907            self.transaction_entries_count.fetch_add(1, Relaxed);
4908            self.transactions_per_entry_max
4909                .fetch_max(committed_transactions_count, Relaxed);
4910        }
4911
4912        let mut write_time = Measure::start("write_time");
4913        let durable_nonce = DurableNonce::from_blockhash(&last_blockhash);
4914        self.rc.accounts.store_cached(
4915            self.slot(),
4916            sanitized_txs,
4917            &execution_results,
4918            loaded_txs,
4919            &durable_nonce,
4920            lamports_per_signature,
4921        );
4922        let rent_debits = self.collect_rent(&execution_results, loaded_txs);
4923
4924        // Cached vote and stake accounts are synchronized with accounts-db
4925        // after each transaction.
4926        let mut update_stakes_cache_time = Measure::start("update_stakes_cache_time");
4927        self.update_stakes_cache(sanitized_txs, &execution_results, loaded_txs);
4928        update_stakes_cache_time.stop();
4929
4930        // once committed there is no way to unroll
4931        write_time.stop();
4932        debug!(
4933            "store: {}us txs_len={}",
4934            write_time.as_us(),
4935            sanitized_txs.len()
4936        );
4937
4938        let mut store_executors_which_were_deployed_time =
4939            Measure::start("store_executors_which_were_deployed_time");
4940        for execution_result in &execution_results {
4941            if let TransactionExecutionResult::Executed {
4942                details,
4943                programs_modified_by_tx,
4944            } = execution_result
4945            {
4946                if details.status.is_ok() {
4947                    let mut cache = self.loaded_programs_cache.write().unwrap();
4948                    cache.merge(programs_modified_by_tx);
4949                }
4950            }
4951        }
4952        store_executors_which_were_deployed_time.stop();
4953        saturating_add_assign!(
4954            timings.execute_accessories.update_executors_us,
4955            store_executors_which_were_deployed_time.as_us()
4956        );
4957
4958        let accounts_data_len_delta = execution_results
4959            .iter()
4960            .filter_map(TransactionExecutionResult::details)
4961            .filter_map(|details| {
4962                details
4963                    .status
4964                    .is_ok()
4965                    .then_some(details.accounts_data_len_delta)
4966            })
4967            .sum();
4968        self.update_accounts_data_size_delta_on_chain(accounts_data_len_delta);
4969
4970        timings.saturating_add_in_place(ExecuteTimingType::StoreUs, write_time.as_us());
4971        timings.saturating_add_in_place(
4972            ExecuteTimingType::UpdateStakesCacheUs,
4973            update_stakes_cache_time.as_us(),
4974        );
4975
4976        let mut update_transaction_statuses_time = Measure::start("update_transaction_statuses");
4977        self.update_transaction_statuses(sanitized_txs, &execution_results);
4978        let fee_collection_results =
4979            self.filter_program_errors_and_collect_fee(sanitized_txs, &execution_results);
4980        update_transaction_statuses_time.stop();
4981        timings.saturating_add_in_place(
4982            ExecuteTimingType::UpdateTransactionStatuses,
4983            update_transaction_statuses_time.as_us(),
4984        );
4985
4986        TransactionResults {
4987            fee_collection_results,
4988            execution_results,
4989            rent_debits,
4990        }
4991    }
4992
4993    fn collect_rent(
4994        &self,
4995        execution_results: &[TransactionExecutionResult],
4996        loaded_txs: &mut [TransactionLoadResult],
4997    ) -> Vec<RentDebits> {
4998        let mut collected_rent: u64 = 0;
4999        let rent_debits: Vec<_> = loaded_txs
5000            .iter_mut()
5001            .zip(execution_results)
5002            .map(|((load_result, _nonce), execution_result)| {
5003                if let (Ok(loaded_transaction), true) =
5004                    (load_result, execution_result.was_executed_successfully())
5005                {
5006                    collected_rent += loaded_transaction.rent;
5007                    mem::take(&mut loaded_transaction.rent_debits)
5008                } else {
5009                    RentDebits::default()
5010                }
5011            })
5012            .collect();
5013        self.collected_rent.fetch_add(collected_rent, Relaxed);
5014        rent_debits
5015    }
5016
5017    fn run_incinerator(&self) {
5018        if let Some((account, _)) =
5019            self.get_account_modified_since_parent_with_fixed_root(&incinerator::id())
5020        {
5021            self.capitalization.fetch_sub(account.lamports(), Relaxed);
5022            self.store_account(&incinerator::id(), &AccountSharedData::default());
5023        }
5024    }
5025
5026    /// Get stake and stake node accounts
5027    pub(crate) fn get_stake_accounts(&self, minimized_account_set: &DashSet<Pubkey>) {
5028        self.stakes_cache
5029            .stakes()
5030            .stake_delegations()
5031            .iter()
5032            .for_each(|(pubkey, _)| {
5033                minimized_account_set.insert(*pubkey);
5034            });
5035
5036        self.stakes_cache
5037            .stakes()
5038            .staked_nodes()
5039            .par_iter()
5040            .for_each(|(pubkey, _)| {
5041                minimized_account_set.insert(*pubkey);
5042            });
5043    }
5044
5045    /// After deserialize, populate skipped rewrites with accounts that would normally
5046    /// have had their data rewritten in this slot due to rent collection (but didn't).
5047    ///
5048    /// This is required when starting up from a snapshot to verify the bank hash.
5049    ///
5050    /// A second usage is from the `bank_to_xxx_snapshot_archive()` functions.  These fns call
5051    /// `Bank::rehash()` to handle if the user manually modified any accounts and thus requires
5052    /// calculating the bank hash again.  Since calculating the bank hash *takes* the skipped
5053    /// rewrites, this second time will not have any skipped rewrites, and thus the hash would be
5054    /// updated to the wrong value.  So, rebuild the skipped rewrites before rehashing.
5055    fn rebuild_skipped_rewrites(&self) {
5056        // If the feature gate to *not* add rent collection rewrites to the bank hash is enabled,
5057        // then do *not* add anything to our skipped_rewrites.
5058        if self.bank_hash_skips_rent_rewrites() {
5059            return;
5060        }
5061
5062        let (skipped_rewrites, measure_skipped_rewrites) =
5063            measure!(self.calculate_skipped_rewrites());
5064        info!(
5065            "Rebuilding skipped rewrites of {} accounts{measure_skipped_rewrites}",
5066            skipped_rewrites.len()
5067        );
5068
5069        *self.skipped_rewrites.lock().unwrap() = skipped_rewrites;
5070    }
5071
5072    /// Calculates (and returns) skipped rewrites for this bank
5073    ///
5074    /// Refer to `rebuild_skipped_rewrites()` for more documentation.
5075    /// This implementation is purposely separate to facilitate testing.
5076    ///
5077    /// The key observation is that accounts in Bank::skipped_rewrites are only used IFF the
5078    /// specific account is *not* already in the accounts delta hash.  If an account is not in
5079    /// the accounts delta hash, then it means the account was not modified.  Since (basically)
5080    /// all accounts are rent exempt, this means (basically) all accounts are unmodified by rent
5081    /// collection.  So we just need to load the accounts that would've been checked for rent
5082    /// collection, hash them, and add them to Bank::skipped_rewrites.
5083    ///
5084    /// As of this writing, there are ~350 million acounts on mainnet-beta.
5085    /// Rent collection almost always collects a single slot at a time.
5086    /// So 1 slot of 432,000, of 350 million accounts, is ~800 accounts per slot.
5087    /// Since we haven't started processing anything yet, it should be fast enough to simply
5088    /// load the accounts directly.
5089    /// Empirically, this takes about 3-4 milliseconds.
5090    fn calculate_skipped_rewrites(&self) -> HashMap<Pubkey, AccountHash> {
5091        // The returned skipped rewrites may include accounts that were actually *not* skipped!
5092        // (This is safe, as per the fn's documentation above.)
5093        HashMap::from_iter(
5094            self.rent_collection_partitions()
5095                .into_iter()
5096                .map(accounts_partition::pubkey_range_from_partition)
5097                .flat_map(|pubkey_range| {
5098                    self.rc
5099                        .accounts
5100                        .load_to_collect_rent_eagerly(&self.ancestors, pubkey_range)
5101                })
5102                .map(|(pubkey, account, _slot)| {
5103                    let account_hash = AccountsDb::hash_account(&account, &pubkey);
5104                    (pubkey, account_hash)
5105                }),
5106        )
5107    }
5108
5109    fn collect_rent_eagerly(&self) {
5110        if self.lazy_rent_collection.load(Relaxed) {
5111            return;
5112        }
5113
5114        let mut measure = Measure::start("collect_rent_eagerly-ms");
5115        let partitions = self.rent_collection_partitions();
5116        let count = partitions.len();
5117        let rent_metrics = RentMetrics::default();
5118        // partitions will usually be 1, but could be more if we skip slots
5119        let mut parallel = count > 1;
5120        if parallel {
5121            let ranges = partitions
5122                .iter()
5123                .map(|partition| {
5124                    (
5125                        *partition,
5126                        accounts_partition::pubkey_range_from_partition(*partition),
5127                    )
5128                })
5129                .collect::<Vec<_>>();
5130            // test every range to make sure ranges are not overlapping
5131            // some tests collect rent from overlapping ranges
5132            // example: [(0, 31, 32), (0, 0, 128), (0, 27, 128)]
5133            // read-modify-write of an account for rent collection cannot be done in parallel
5134            'outer: for i in 0..ranges.len() {
5135                for j in 0..ranges.len() {
5136                    if i == j {
5137                        continue;
5138                    }
5139
5140                    let i = &ranges[i].1;
5141                    let j = &ranges[j].1;
5142                    // make sure i doesn't contain j
5143                    if i.contains(j.start()) || i.contains(j.end()) {
5144                        parallel = false;
5145                        break 'outer;
5146                    }
5147                }
5148            }
5149
5150            if parallel {
5151                let thread_pool = &self.rc.accounts.accounts_db.thread_pool;
5152                thread_pool.install(|| {
5153                    ranges.into_par_iter().for_each(|range| {
5154                        self.collect_rent_in_range(range.0, range.1, &rent_metrics)
5155                    });
5156                });
5157            }
5158        }
5159        if !parallel {
5160            // collect serially
5161            partitions
5162                .into_iter()
5163                .for_each(|partition| self.collect_rent_in_partition(partition, &rent_metrics));
5164        }
5165        measure.stop();
5166        datapoint_info!(
5167            "collect_rent_eagerly",
5168            ("accounts", rent_metrics.count.load(Relaxed), i64),
5169            ("partitions", count, i64),
5170            ("total_time_us", measure.as_us(), i64),
5171            (
5172                "hold_range_us",
5173                rent_metrics.hold_range_us.load(Relaxed),
5174                i64
5175            ),
5176            ("load_us", rent_metrics.load_us.load(Relaxed), i64),
5177            ("collect_us", rent_metrics.collect_us.load(Relaxed), i64),
5178            ("hash_us", rent_metrics.hash_us.load(Relaxed), i64),
5179            ("store_us", rent_metrics.store_us.load(Relaxed), i64),
5180        );
5181    }
5182
5183    fn rent_collection_partitions(&self) -> Vec<Partition> {
5184        if !self.use_fixed_collection_cycle() {
5185            // This mode is for production/development/testing.
5186            // In this mode, we iterate over the whole pubkey value range for each epochs
5187            // including warm-up epochs.
5188            // The only exception is the situation where normal epochs are relatively short
5189            // (currently less than 2 day). In that case, we arrange a single collection
5190            // cycle to be multiple of epochs so that a cycle could be greater than the 2 day.
5191            self.variable_cycle_partitions()
5192        } else {
5193            // This mode is mainly for benchmarking only.
5194            // In this mode, we always iterate over the whole pubkey value range with
5195            // <slot_count_in_two_day> slots as a collection cycle, regardless warm-up or
5196            // alignment between collection cycles and epochs.
5197            // Thus, we can simulate stable processing load of eager rent collection,
5198            // strictly proportional to the number of pubkeys since genesis.
5199            self.fixed_cycle_partitions()
5200        }
5201    }
5202
5203    /// true if rent collection does NOT rewrite accounts whose pubkey indicates
5204    ///  it is time for rent collection, but the account is rent exempt.
5205    /// false if rent collection DOES rewrite accounts if the account is rent exempt
5206    /// This is the default behavior historically.
5207    fn bank_hash_skips_rent_rewrites(&self) -> bool {
5208        self.feature_set
5209            .is_active(&feature_set::skip_rent_rewrites::id())
5210    }
5211
5212    /// true if rent fees should be collected (i.e. disable_rent_fees_collection is NOT enabled)
5213    fn should_collect_rent(&self) -> bool {
5214        !self
5215            .feature_set
5216            .is_active(&feature_set::disable_rent_fees_collection::id())
5217    }
5218
5219    /// Collect rent from `accounts`
5220    ///
5221    /// This fn is called inside a parallel loop from `collect_rent_in_partition()`.  Avoid adding
5222    /// any code that causes contention on shared memory/data (i.e. do not update atomic metrics).
5223    ///
5224    /// The return value is a struct of computed values that `collect_rent_in_partition()` will
5225    /// reduce at the end of its parallel loop.  If possible, place data/computation that cause
5226    /// contention/take locks in the return struct and process them in
5227    /// `collect_rent_from_partition()` after reducing the parallel loop.
5228    fn collect_rent_from_accounts(
5229        &self,
5230        mut accounts: Vec<(Pubkey, AccountSharedData, Slot)>,
5231        rent_paying_pubkeys: Option<&HashSet<Pubkey>>,
5232        partition_index: PartitionIndex,
5233    ) -> CollectRentFromAccountsInfo {
5234        let mut rent_debits = RentDebits::default();
5235        let mut total_rent_collected_info = CollectedInfo::default();
5236        let mut accounts_to_store =
5237            Vec::<(&Pubkey, &AccountSharedData)>::with_capacity(accounts.len());
5238        let mut time_collecting_rent_us = 0;
5239        let mut time_storing_accounts_us = 0;
5240        let can_skip_rewrites = self.bank_hash_skips_rent_rewrites();
5241        let test_skip_rewrites_but_include_hash_in_bank_hash = !can_skip_rewrites
5242            && self
5243                .rc
5244                .accounts
5245                .accounts_db
5246                .test_skip_rewrites_but_include_in_bank_hash;
5247        let mut skipped_rewrites = Vec::default();
5248        for (pubkey, account, _loaded_slot) in accounts.iter_mut() {
5249            let rent_collected_info = if self.should_collect_rent() {
5250                let (rent_collected_info, measure) = measure!(self
5251                    .rent_collector
5252                    .collect_from_existing_account(pubkey, account));
5253                time_collecting_rent_us += measure.as_us();
5254                rent_collected_info
5255            } else {
5256                // When rent fee collection is disabled, we won't collect rent for any account. If there
5257                // are any rent paying accounts, their `rent_epoch` won't change either. However, if the
5258                // account itself is rent-exempted but its `rent_epoch` is not u64::MAX, we will set its
5259                // `rent_epoch` to u64::MAX. In such case, the behavior stays the same as before.
5260                if account.rent_epoch() != RENT_EXEMPT_RENT_EPOCH
5261                    && self.rent_collector.get_rent_due(account) == RentDue::Exempt
5262                {
5263                    account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH);
5264                }
5265                CollectedInfo::default()
5266            };
5267            // only store accounts where we collected rent
5268            // but get the hash for all these accounts even if collected rent is 0 (= not updated).
5269            // Also, there's another subtle side-effect from rewrites: this
5270            // ensures we verify the whole on-chain state (= all accounts)
5271            // via the bank delta hash slowly once per an epoch.
5272            if (!can_skip_rewrites && !test_skip_rewrites_but_include_hash_in_bank_hash)
5273                || !Self::skip_rewrite(rent_collected_info.rent_amount, account)
5274            {
5275                if rent_collected_info.rent_amount > 0 {
5276                    if let Some(rent_paying_pubkeys) = rent_paying_pubkeys {
5277                        if !rent_paying_pubkeys.contains(pubkey) {
5278                            let partition_from_pubkey = accounts_partition::partition_from_pubkey(
5279                                pubkey,
5280                                self.epoch_schedule.slots_per_epoch,
5281                            );
5282                            // Submit datapoint instead of assert while we verify this is correct
5283                            datapoint_warn!(
5284                                "bank-unexpected_rent_paying_pubkey",
5285                                ("slot", self.slot(), i64),
5286                                ("pubkey", pubkey.to_string(), String),
5287                                ("partition_index", partition_index, i64),
5288                                ("partition_from_pubkey", partition_from_pubkey, i64)
5289                            );
5290                            warn!(
5291                                "Collecting rent from unexpected pubkey: {}, slot: {}, parent_slot: {:?}, \
5292                                partition_index: {}, partition_from_pubkey: {}",
5293                                pubkey,
5294                                self.slot(),
5295                                self.parent().map(|bank| bank.slot()),
5296                                partition_index,
5297                                partition_from_pubkey,
5298                            );
5299                        }
5300                    }
5301                }
5302                total_rent_collected_info += rent_collected_info;
5303                accounts_to_store.push((pubkey, account));
5304            } else if test_skip_rewrites_but_include_hash_in_bank_hash {
5305                // include rewrites that we skipped in the accounts delta hash.
5306                // This is what consensus requires prior to activation of bank_hash_skips_rent_rewrites.
5307                // This code path exists to allow us to test the long term effects on validators when the skipped rewrites
5308                // feature is enabled.
5309                let hash = AccountsDb::hash_account(account, pubkey);
5310                skipped_rewrites.push((*pubkey, hash));
5311            }
5312            rent_debits.insert(pubkey, rent_collected_info.rent_amount, account.lamports());
5313        }
5314
5315        if !accounts_to_store.is_empty() {
5316            // TODO: Maybe do not call `store_accounts()` here.  Instead return `accounts_to_store`
5317            // and have `collect_rent_in_partition()` perform all the stores.
5318            let (_, measure) =
5319                measure!(self.store_accounts((self.slot(), &accounts_to_store[..],)));
5320            time_storing_accounts_us += measure.as_us();
5321        }
5322
5323        CollectRentFromAccountsInfo {
5324            skipped_rewrites,
5325            rent_collected_info: total_rent_collected_info,
5326            rent_rewards: rent_debits.into_unordered_rewards_iter().collect(),
5327            time_collecting_rent_us,
5328            time_storing_accounts_us,
5329            num_accounts: accounts.len(),
5330        }
5331    }
5332
5333    /// convert 'partition' to a pubkey range and 'collect_rent_in_range'
5334    fn collect_rent_in_partition(&self, partition: Partition, metrics: &RentMetrics) {
5335        let subrange_full = accounts_partition::pubkey_range_from_partition(partition);
5336        self.collect_rent_in_range(partition, subrange_full, metrics)
5337    }
5338
5339    /// get all pubkeys that we expect to be rent-paying or None, if this was not initialized at load time (that should only exist in test cases)
5340    fn get_rent_paying_pubkeys(&self, partition: &Partition) -> Option<HashSet<Pubkey>> {
5341        self.rc
5342            .accounts
5343            .accounts_db
5344            .accounts_index
5345            .rent_paying_accounts_by_partition
5346            .get()
5347            .and_then(|rent_paying_accounts| {
5348                rent_paying_accounts.is_initialized().then(|| {
5349                    accounts_partition::get_partition_end_indexes(partition)
5350                        .into_iter()
5351                        .flat_map(|end_index| {
5352                            rent_paying_accounts.get_pubkeys_in_partition_index(end_index)
5353                        })
5354                        .cloned()
5355                        .collect::<HashSet<_>>()
5356                })
5357            })
5358    }
5359
5360    /// load accounts with pubkeys in 'subrange_full'
5361    /// collect rent and update 'account.rent_epoch' as necessary
5362    /// store accounts, whether rent was collected or not (depending on whether we skipping rewrites is enabled)
5363    /// update bank's rewrites set for all rewrites that were skipped
5364    fn collect_rent_in_range(
5365        &self,
5366        partition: Partition,
5367        subrange_full: RangeInclusive<Pubkey>,
5368        metrics: &RentMetrics,
5369    ) {
5370        let mut hold_range = Measure::start("hold_range");
5371        let thread_pool = &self.rc.accounts.accounts_db.thread_pool;
5372        thread_pool.install(|| {
5373            self.rc
5374                .accounts
5375                .hold_range_in_memory(&subrange_full, true, thread_pool);
5376            hold_range.stop();
5377            metrics.hold_range_us.fetch_add(hold_range.as_us(), Relaxed);
5378
5379            let rent_paying_pubkeys_ = self.get_rent_paying_pubkeys(&partition);
5380            let rent_paying_pubkeys = rent_paying_pubkeys_.as_ref();
5381
5382            // divide the range into num_threads smaller ranges and process in parallel
5383            // Note that 'pubkey_range_from_partition' cannot easily be re-used here to break the range smaller.
5384            // It has special handling of 0..0 and partition_count changes affect all ranges unevenly.
5385            let num_threads = miraland_accounts_db::accounts_db::quarter_thread_count() as u64;
5386            let sz = std::mem::size_of::<u64>();
5387            let start_prefix = accounts_partition::prefix_from_pubkey(subrange_full.start());
5388            let end_prefix_inclusive = accounts_partition::prefix_from_pubkey(subrange_full.end());
5389            let range = end_prefix_inclusive - start_prefix;
5390            let increment = range / num_threads;
5391            let mut results = (0..num_threads)
5392                .into_par_iter()
5393                .map(|chunk| {
5394                    let offset = |chunk| start_prefix + chunk * increment;
5395                    let start = offset(chunk);
5396                    let last = chunk == num_threads - 1;
5397                    let merge_prefix = |prefix: u64, mut bound: Pubkey| {
5398                        bound.as_mut()[0..sz].copy_from_slice(&prefix.to_be_bytes());
5399                        bound
5400                    };
5401                    let start = merge_prefix(start, *subrange_full.start());
5402                    let (accounts, measure_load_accounts) = measure!(if last {
5403                        let end = *subrange_full.end();
5404                        let subrange = start..=end; // IN-clusive
5405                        self.rc
5406                            .accounts
5407                            .load_to_collect_rent_eagerly(&self.ancestors, subrange)
5408                    } else {
5409                        let end = merge_prefix(offset(chunk + 1), *subrange_full.start());
5410                        let subrange = start..end; // EX-clusive, the next 'start' will be this same value
5411                        self.rc
5412                            .accounts
5413                            .load_to_collect_rent_eagerly(&self.ancestors, subrange)
5414                    });
5415                    CollectRentInPartitionInfo::new(
5416                        self.collect_rent_from_accounts(accounts, rent_paying_pubkeys, partition.1),
5417                        Duration::from_nanos(measure_load_accounts.as_ns()),
5418                    )
5419                })
5420                .reduce(
5421                    CollectRentInPartitionInfo::default,
5422                    CollectRentInPartitionInfo::reduce,
5423                );
5424
5425            self.skipped_rewrites
5426                .lock()
5427                .unwrap()
5428                .extend(results.skipped_rewrites);
5429
5430            // We cannot assert here that we collected from all expected keys.
5431            // Some accounts may have been topped off or may have had all funds removed and gone to 0 lamports.
5432
5433            self.rc
5434                .accounts
5435                .hold_range_in_memory(&subrange_full, false, thread_pool);
5436
5437            self.collected_rent
5438                .fetch_add(results.rent_collected, Relaxed);
5439            self.update_accounts_data_size_delta_off_chain(
5440                -(results.accounts_data_size_reclaimed as i64),
5441            );
5442            self.rewards
5443                .write()
5444                .unwrap()
5445                .append(&mut results.rent_rewards);
5446
5447            metrics
5448                .load_us
5449                .fetch_add(results.time_loading_accounts_us, Relaxed);
5450            metrics
5451                .collect_us
5452                .fetch_add(results.time_collecting_rent_us, Relaxed);
5453            metrics
5454                .store_us
5455                .fetch_add(results.time_storing_accounts_us, Relaxed);
5456            metrics.count.fetch_add(results.num_accounts, Relaxed);
5457        });
5458    }
5459
5460    /// return true iff storing this account is just a rewrite and can be skipped
5461    fn skip_rewrite(rent_amount: u64, account: &AccountSharedData) -> bool {
5462        // if rent was != 0
5463        // or special case for default rent value
5464        // these cannot be skipped and must be written
5465        rent_amount == 0 && account.rent_epoch() != 0
5466    }
5467
5468    pub(crate) fn fixed_cycle_partitions_between_slots(
5469        &self,
5470        starting_slot: Slot,
5471        ending_slot: Slot,
5472    ) -> Vec<Partition> {
5473        let slot_count_in_two_day = self.slot_count_in_two_day();
5474        accounts_partition::get_partitions(ending_slot, starting_slot, slot_count_in_two_day)
5475    }
5476
5477    fn fixed_cycle_partitions(&self) -> Vec<Partition> {
5478        self.fixed_cycle_partitions_between_slots(self.parent_slot(), self.slot())
5479    }
5480
5481    pub(crate) fn variable_cycle_partitions_between_slots(
5482        &self,
5483        starting_slot: Slot,
5484        ending_slot: Slot,
5485    ) -> Vec<Partition> {
5486        let (starting_epoch, mut starting_slot_index) =
5487            self.get_epoch_and_slot_index(starting_slot);
5488        let (ending_epoch, ending_slot_index) = self.get_epoch_and_slot_index(ending_slot);
5489
5490        let mut partitions = vec![];
5491        if starting_epoch < ending_epoch {
5492            let slot_skipped = (ending_slot - starting_slot) > 1;
5493            if slot_skipped {
5494                // Generate special partitions because there are skipped slots
5495                // exactly at the epoch transition.
5496
5497                let parent_last_slot_index = self.get_slots_in_epoch(starting_epoch) - 1;
5498
5499                // ... for parent epoch
5500                partitions.push(self.partition_from_slot_indexes_with_gapped_epochs(
5501                    starting_slot_index,
5502                    parent_last_slot_index,
5503                    starting_epoch,
5504                ));
5505
5506                if ending_slot_index > 0 {
5507                    // ... for current epoch
5508                    partitions.push(self.partition_from_slot_indexes_with_gapped_epochs(
5509                        0,
5510                        0,
5511                        ending_epoch,
5512                    ));
5513                }
5514            }
5515            starting_slot_index = 0;
5516        }
5517
5518        partitions.push(self.partition_from_normal_slot_indexes(
5519            starting_slot_index,
5520            ending_slot_index,
5521            ending_epoch,
5522        ));
5523
5524        partitions
5525    }
5526
5527    fn variable_cycle_partitions(&self) -> Vec<Partition> {
5528        self.variable_cycle_partitions_between_slots(self.parent_slot(), self.slot())
5529    }
5530
5531    fn do_partition_from_slot_indexes(
5532        &self,
5533        start_slot_index: SlotIndex,
5534        end_slot_index: SlotIndex,
5535        epoch: Epoch,
5536        generated_for_gapped_epochs: bool,
5537    ) -> Partition {
5538        let slot_count_per_epoch = self.get_slots_in_epoch(epoch);
5539
5540        let cycle_params = if !self.use_multi_epoch_collection_cycle(epoch) {
5541            // mnb should always go through this code path
5542            accounts_partition::rent_single_epoch_collection_cycle_params(
5543                epoch,
5544                slot_count_per_epoch,
5545            )
5546        } else {
5547            accounts_partition::rent_multi_epoch_collection_cycle_params(
5548                epoch,
5549                slot_count_per_epoch,
5550                self.first_normal_epoch(),
5551                self.slot_count_in_two_day() / slot_count_per_epoch,
5552            )
5553        };
5554        accounts_partition::get_partition_from_slot_indexes(
5555            cycle_params,
5556            start_slot_index,
5557            end_slot_index,
5558            generated_for_gapped_epochs,
5559        )
5560    }
5561
5562    fn partition_from_normal_slot_indexes(
5563        &self,
5564        start_slot_index: SlotIndex,
5565        end_slot_index: SlotIndex,
5566        epoch: Epoch,
5567    ) -> Partition {
5568        self.do_partition_from_slot_indexes(start_slot_index, end_slot_index, epoch, false)
5569    }
5570
5571    fn partition_from_slot_indexes_with_gapped_epochs(
5572        &self,
5573        start_slot_index: SlotIndex,
5574        end_slot_index: SlotIndex,
5575        epoch: Epoch,
5576    ) -> Partition {
5577        self.do_partition_from_slot_indexes(start_slot_index, end_slot_index, epoch, true)
5578    }
5579
5580    // Given short epochs, it's too costly to collect rent eagerly
5581    // within an epoch, so lower the frequency of it.
5582    // These logic isn't strictly eager anymore and should only be used
5583    // for development/performance purpose.
5584    // Absolutely not under ClusterType::Mainnet!!!!
5585    fn use_multi_epoch_collection_cycle(&self, epoch: Epoch) -> bool {
5586        // Force normal behavior, disabling multi epoch collection cycle for manual local testing
5587        #[cfg(not(test))]
5588        if self.slot_count_per_normal_epoch() == miraland_sdk::epoch_schedule::MINIMUM_SLOTS_PER_EPOCH
5589        {
5590            return false;
5591        }
5592
5593        epoch >= self.first_normal_epoch()
5594            && self.slot_count_per_normal_epoch() < self.slot_count_in_two_day()
5595    }
5596
5597    pub(crate) fn use_fixed_collection_cycle(&self) -> bool {
5598        // Force normal behavior, disabling fixed collection cycle for manual local testing
5599        #[cfg(not(test))]
5600        if self.slot_count_per_normal_epoch() == miraland_sdk::epoch_schedule::MINIMUM_SLOTS_PER_EPOCH
5601        {
5602            return false;
5603        }
5604
5605        self.cluster_type() != ClusterType::Mainnet
5606            && self.slot_count_per_normal_epoch() < self.slot_count_in_two_day()
5607    }
5608
5609    fn slot_count_in_two_day(&self) -> SlotCount {
5610        Self::slot_count_in_two_day_helper(self.ticks_per_slot)
5611    }
5612
5613    // This value is specially chosen to align with slots per epoch in mainnet and testnet
5614    // Also, assume 500GB account data set as the extreme, then for 2 day (=48 hours) to collect
5615    // rent eagerly, we'll consume 5.7 MB/s IO bandwidth, bidirectionally.
5616    pub fn slot_count_in_two_day_helper(ticks_per_slot: SlotCount) -> SlotCount {
5617        2 * DEFAULT_TICKS_PER_SECOND * SECONDS_PER_DAY / ticks_per_slot
5618    }
5619
5620    fn slot_count_per_normal_epoch(&self) -> SlotCount {
5621        self.get_slots_in_epoch(self.first_normal_epoch())
5622    }
5623
5624    pub fn cluster_type(&self) -> ClusterType {
5625        // unwrap is safe; self.cluster_type is ensured to be Some() always...
5626        // we only using Option here for ABI compatibility...
5627        self.cluster_type.unwrap()
5628    }
5629
5630    /// Process a batch of transactions.
5631    #[must_use]
5632    pub fn load_execute_and_commit_transactions(
5633        &self,
5634        batch: &TransactionBatch,
5635        max_age: usize,
5636        collect_balances: bool,
5637        enable_cpi_recording: bool,
5638        enable_log_recording: bool,
5639        enable_return_data_recording: bool,
5640        timings: &mut ExecuteTimings,
5641        log_messages_bytes_limit: Option<usize>,
5642    ) -> (TransactionResults, TransactionBalancesSet) {
5643        let pre_balances = if collect_balances {
5644            self.collect_balances(batch)
5645        } else {
5646            vec![]
5647        };
5648
5649        let LoadAndExecuteTransactionsOutput {
5650            mut loaded_transactions,
5651            execution_results,
5652            executed_transactions_count,
5653            executed_non_vote_transactions_count,
5654            executed_with_successful_result_count,
5655            signature_count,
5656            ..
5657        } = self.load_and_execute_transactions(
5658            batch,
5659            max_age,
5660            enable_cpi_recording,
5661            enable_log_recording,
5662            enable_return_data_recording,
5663            timings,
5664            None,
5665            log_messages_bytes_limit,
5666        );
5667
5668        let (last_blockhash, lamports_per_signature) =
5669            self.last_blockhash_and_lamports_per_signature();
5670        let results = self.commit_transactions(
5671            batch.sanitized_transactions(),
5672            &mut loaded_transactions,
5673            execution_results,
5674            last_blockhash,
5675            lamports_per_signature,
5676            CommitTransactionCounts {
5677                committed_transactions_count: executed_transactions_count as u64,
5678                committed_non_vote_transactions_count: executed_non_vote_transactions_count as u64,
5679                committed_with_failure_result_count: executed_transactions_count
5680                    .saturating_sub(executed_with_successful_result_count)
5681                    as u64,
5682                signature_count,
5683            },
5684            timings,
5685        );
5686        let post_balances = if collect_balances {
5687            self.collect_balances(batch)
5688        } else {
5689            vec![]
5690        };
5691        (
5692            results,
5693            TransactionBalancesSet::new(pre_balances, post_balances),
5694        )
5695    }
5696
5697    /// Process a Transaction. This is used for unit tests and simply calls the vector
5698    /// Bank::process_transactions method.
5699    pub fn process_transaction(&self, tx: &Transaction) -> Result<()> {
5700        self.try_process_transactions(std::iter::once(tx))?[0].clone()?;
5701        tx.signatures
5702            .first()
5703            .map_or(Ok(()), |sig| self.get_signature_status(sig).unwrap())
5704    }
5705
5706    /// Process a Transaction and store metadata. This is used for tests and the banks services. It
5707    /// replicates the vector Bank::process_transaction method with metadata recording enabled.
5708    #[must_use]
5709    pub fn process_transaction_with_metadata(
5710        &self,
5711        tx: impl Into<VersionedTransaction>,
5712    ) -> TransactionExecutionResult {
5713        let txs = vec![tx.into()];
5714        let batch = match self.prepare_entry_batch(txs) {
5715            Ok(batch) => batch,
5716            Err(err) => return TransactionExecutionResult::NotExecuted(err),
5717        };
5718
5719        let (
5720            TransactionResults {
5721                mut execution_results,
5722                ..
5723            },
5724            ..,
5725        ) = self.load_execute_and_commit_transactions(
5726            &batch,
5727            MAX_PROCESSING_AGE,
5728            false, // collect_balances
5729            false, // enable_cpi_recording
5730            true,  // enable_log_recording
5731            true,  // enable_return_data_recording
5732            &mut ExecuteTimings::default(),
5733            Some(1000 * 1000),
5734        );
5735
5736        execution_results.remove(0)
5737    }
5738
5739    /// Process multiple transaction in a single batch. This is used for benches and unit tests.
5740    /// Short circuits if any of the transactions do not pass sanitization checks.
5741    pub fn try_process_transactions<'a>(
5742        &self,
5743        txs: impl Iterator<Item = &'a Transaction>,
5744    ) -> Result<Vec<Result<()>>> {
5745        let txs = txs
5746            .map(|tx| VersionedTransaction::from(tx.clone()))
5747            .collect();
5748        self.try_process_entry_transactions(txs)
5749    }
5750
5751    /// Process multiple transaction in a single batch. This is used for benches and unit tests.
5752    /// Short circuits if any of the transactions do not pass sanitization checks.
5753    pub fn try_process_entry_transactions(
5754        &self,
5755        txs: Vec<VersionedTransaction>,
5756    ) -> Result<Vec<Result<()>>> {
5757        let batch = self.prepare_entry_batch(txs)?;
5758        Ok(self.process_transaction_batch(&batch))
5759    }
5760
5761    #[must_use]
5762    fn process_transaction_batch(&self, batch: &TransactionBatch) -> Vec<Result<()>> {
5763        self.load_execute_and_commit_transactions(
5764            batch,
5765            MAX_PROCESSING_AGE,
5766            false,
5767            false,
5768            false,
5769            false,
5770            &mut ExecuteTimings::default(),
5771            None,
5772        )
5773        .0
5774        .fee_collection_results
5775    }
5776
5777    /// Create, sign, and process a Transaction from `keypair` to `to` of
5778    /// `n` lamports where `blockhash` is the last Entry ID observed by the client.
5779    pub fn transfer(&self, n: u64, keypair: &Keypair, to: &Pubkey) -> Result<Signature> {
5780        let blockhash = self.last_blockhash();
5781        let tx = system_transaction::transfer(keypair, to, n, blockhash);
5782        let signature = tx.signatures[0];
5783        self.process_transaction(&tx).map(|_| signature)
5784    }
5785
5786    pub fn read_balance(account: &AccountSharedData) -> u64 {
5787        account.lamports()
5788    }
5789    /// Each program would need to be able to introspect its own state
5790    /// this is hard-coded to the Budget language
5791    pub fn get_balance(&self, pubkey: &Pubkey) -> u64 {
5792        self.get_account(pubkey)
5793            .map(|x| Self::read_balance(&x))
5794            .unwrap_or(0)
5795    }
5796
5797    /// Compute all the parents of the bank in order
5798    pub fn parents(&self) -> Vec<Arc<Bank>> {
5799        let mut parents = vec![];
5800        let mut bank = self.parent();
5801        while let Some(parent) = bank {
5802            parents.push(parent.clone());
5803            bank = parent.parent();
5804        }
5805        parents
5806    }
5807
5808    /// Compute all the parents of the bank including this bank itself
5809    pub fn parents_inclusive(self: Arc<Self>) -> Vec<Arc<Bank>> {
5810        let mut parents = self.parents();
5811        parents.insert(0, self);
5812        parents
5813    }
5814
5815    /// fn store the single `account` with `pubkey`.
5816    /// Uses `store_accounts`, which works on a vector of accounts.
5817    pub fn store_account<T: ReadableAccount + Sync + ZeroLamport>(
5818        &self,
5819        pubkey: &Pubkey,
5820        account: &T,
5821    ) {
5822        self.store_accounts((self.slot(), &[(pubkey, account)][..]))
5823    }
5824
5825    pub fn store_accounts<'a, T: ReadableAccount + Sync + ZeroLamport + 'a>(
5826        &self,
5827        accounts: impl StorableAccounts<'a, T>,
5828    ) {
5829        assert!(!self.freeze_started());
5830        let mut m = Measure::start("stakes_cache.check_and_store");
5831        let new_warmup_cooldown_rate_epoch = self.new_warmup_cooldown_rate_epoch();
5832        (0..accounts.len()).for_each(|i| {
5833            self.stakes_cache.check_and_store(
5834                accounts.pubkey(i),
5835                accounts.account(i),
5836                new_warmup_cooldown_rate_epoch,
5837            )
5838        });
5839        self.rc.accounts.store_accounts_cached(accounts);
5840        m.stop();
5841        self.rc
5842            .accounts
5843            .accounts_db
5844            .stats
5845            .stakes_cache_check_and_store_us
5846            .fetch_add(m.as_us(), Relaxed);
5847    }
5848
5849    pub fn force_flush_accounts_cache(&self) {
5850        self.rc
5851            .accounts
5852            .accounts_db
5853            .flush_accounts_cache(true, Some(self.slot()))
5854    }
5855
5856    pub fn flush_accounts_cache_if_needed(&self) {
5857        self.rc
5858            .accounts
5859            .accounts_db
5860            .flush_accounts_cache(false, Some(self.slot()))
5861    }
5862
5863    pub fn expire_old_recycle_stores(&self) {
5864        self.rc.accounts.accounts_db.expire_old_recycle_stores()
5865    }
5866
5867    /// Technically this issues (or even burns!) new lamports,
5868    /// so be extra careful for its usage
5869    fn store_account_and_update_capitalization(
5870        &self,
5871        pubkey: &Pubkey,
5872        new_account: &AccountSharedData,
5873    ) {
5874        let old_account_data_size =
5875            if let Some(old_account) = self.get_account_with_fixed_root(pubkey) {
5876                match new_account.lamports().cmp(&old_account.lamports()) {
5877                    std::cmp::Ordering::Greater => {
5878                        let increased = new_account.lamports() - old_account.lamports();
5879                        trace!(
5880                            "store_account_and_update_capitalization: increased: {} {}",
5881                            pubkey,
5882                            increased
5883                        );
5884                        self.capitalization.fetch_add(increased, Relaxed);
5885                    }
5886                    std::cmp::Ordering::Less => {
5887                        let decreased = old_account.lamports() - new_account.lamports();
5888                        trace!(
5889                            "store_account_and_update_capitalization: decreased: {} {}",
5890                            pubkey,
5891                            decreased
5892                        );
5893                        self.capitalization.fetch_sub(decreased, Relaxed);
5894                    }
5895                    std::cmp::Ordering::Equal => {}
5896                }
5897                old_account.data().len()
5898            } else {
5899                trace!(
5900                    "store_account_and_update_capitalization: created: {} {}",
5901                    pubkey,
5902                    new_account.lamports()
5903                );
5904                self.capitalization
5905                    .fetch_add(new_account.lamports(), Relaxed);
5906                0
5907            };
5908
5909        self.store_account(pubkey, new_account);
5910        self.calculate_and_update_accounts_data_size_delta_off_chain(
5911            old_account_data_size,
5912            new_account.data().len(),
5913        );
5914    }
5915
5916    fn withdraw(&self, pubkey: &Pubkey, lamports: u64) -> Result<()> {
5917        match self.get_account_with_fixed_root(pubkey) {
5918            Some(mut account) => {
5919                let min_balance = match get_system_account_kind(&account) {
5920                    Some(SystemAccountKind::Nonce) => self
5921                        .rent_collector
5922                        .rent
5923                        .minimum_balance(nonce::State::size()),
5924                    _ => 0,
5925                };
5926
5927                lamports
5928                    .checked_add(min_balance)
5929                    .filter(|required_balance| *required_balance <= account.lamports())
5930                    .ok_or(TransactionError::InsufficientFundsForFee)?;
5931                account
5932                    .checked_sub_lamports(lamports)
5933                    .map_err(|_| TransactionError::InsufficientFundsForFee)?;
5934                self.store_account(pubkey, &account);
5935
5936                Ok(())
5937            }
5938            None => Err(TransactionError::AccountNotFound),
5939        }
5940    }
5941
5942    pub fn accounts(&self) -> Arc<Accounts> {
5943        self.rc.accounts.clone()
5944    }
5945
5946    fn finish_init(
5947        &mut self,
5948        genesis_config: &GenesisConfig,
5949        additional_builtins: Option<&[BuiltinPrototype]>,
5950        debug_do_not_add_builtins: bool,
5951    ) {
5952        self.rewards_pool_pubkeys =
5953            Arc::new(genesis_config.rewards_pools.keys().cloned().collect());
5954
5955        self.apply_feature_activations(
5956            ApplyFeatureActivationsCaller::FinishInit,
5957            debug_do_not_add_builtins,
5958        );
5959
5960        if !debug_do_not_add_builtins {
5961            for builtin in BUILTINS
5962                .iter()
5963                .chain(additional_builtins.unwrap_or(&[]).iter())
5964            {
5965                if builtin.feature_id.is_none() {
5966                    self.add_builtin(
5967                        builtin.program_id,
5968                        builtin.name.to_string(),
5969                        LoadedProgram::new_builtin(0, builtin.name.len(), builtin.entrypoint),
5970                    );
5971                }
5972            }
5973            for precompile in get_precompiles() {
5974                if precompile.feature.is_none() {
5975                    self.add_precompile(&precompile.program_id);
5976                }
5977            }
5978        }
5979
5980        let mut loaded_programs_cache = self.loaded_programs_cache.write().unwrap();
5981        loaded_programs_cache.latest_root_slot = self.slot();
5982        loaded_programs_cache.latest_root_epoch = self.epoch();
5983        loaded_programs_cache.environments.program_runtime_v1 = Arc::new(
5984            create_program_runtime_environment_v1(
5985                &self.feature_set,
5986                &self.runtime_config.compute_budget.unwrap_or_default(),
5987                false, /* deployment */
5988                false, /* debugging_features */
5989            )
5990            .unwrap(),
5991        );
5992        loaded_programs_cache.environments.program_runtime_v2 =
5993            Arc::new(create_program_runtime_environment_v2(
5994                &self.runtime_config.compute_budget.unwrap_or_default(),
5995                false, /* debugging_features */
5996            ));
5997    }
5998
5999    pub fn set_inflation(&self, inflation: Inflation) {
6000        *self.inflation.write().unwrap() = inflation;
6001    }
6002
6003    /// Get a snapshot of the current set of hard forks
6004    pub fn hard_forks(&self) -> HardForks {
6005        self.hard_forks.read().unwrap().clone()
6006    }
6007
6008    pub fn register_hard_fork(&self, new_hard_fork_slot: Slot) {
6009        let bank_slot = self.slot();
6010
6011        let lock = self.freeze_lock();
6012        let bank_frozen = *lock != Hash::default();
6013        if new_hard_fork_slot < bank_slot {
6014            warn!(
6015                "Hard fork at slot {new_hard_fork_slot} ignored, the hard fork is older \
6016                than the bank at slot {bank_slot} that attempted to register it."
6017            );
6018        } else if (new_hard_fork_slot == bank_slot) && bank_frozen {
6019            warn!(
6020                "Hard fork at slot {new_hard_fork_slot} ignored, the hard fork is the same \
6021                slot as the bank at slot {bank_slot} that attempted to register it, but that \
6022                bank is already frozen."
6023            );
6024        } else {
6025            self.hard_forks
6026                .write()
6027                .unwrap()
6028                .register(new_hard_fork_slot);
6029        }
6030    }
6031
6032    // Hi! leaky abstraction here....
6033    // try to use get_account_with_fixed_root() if it's called ONLY from on-chain runtime account
6034    // processing. That alternative fn provides more safety.
6035    pub fn get_account(&self, pubkey: &Pubkey) -> Option<AccountSharedData> {
6036        self.get_account_modified_slot(pubkey)
6037            .map(|(acc, _slot)| acc)
6038    }
6039
6040    // Hi! leaky abstraction here....
6041    // use this over get_account() if it's called ONLY from on-chain runtime account
6042    // processing (i.e. from in-band replay/banking stage; that ensures root is *fixed* while
6043    // running).
6044    // pro: safer assertion can be enabled inside AccountsDb
6045    // con: panics!() if called from off-chain processing
6046    pub fn get_account_with_fixed_root(&self, pubkey: &Pubkey) -> Option<AccountSharedData> {
6047        self.get_account_modified_slot_with_fixed_root(pubkey)
6048            .map(|(acc, _slot)| acc)
6049    }
6050
6051    // See note above get_account_with_fixed_root() about when to prefer this function
6052    pub fn get_account_modified_slot_with_fixed_root(
6053        &self,
6054        pubkey: &Pubkey,
6055    ) -> Option<(AccountSharedData, Slot)> {
6056        self.load_slow_with_fixed_root(&self.ancestors, pubkey)
6057    }
6058
6059    pub fn get_account_modified_slot(&self, pubkey: &Pubkey) -> Option<(AccountSharedData, Slot)> {
6060        self.load_slow(&self.ancestors, pubkey)
6061    }
6062
6063    fn load_slow(
6064        &self,
6065        ancestors: &Ancestors,
6066        pubkey: &Pubkey,
6067    ) -> Option<(AccountSharedData, Slot)> {
6068        // get_account (= primary this fn caller) may be called from on-chain Bank code even if we
6069        // try hard to use get_account_with_fixed_root for that purpose...
6070        // so pass safer LoadHint:Unspecified here as a fallback
6071        self.rc.accounts.load_without_fixed_root(ancestors, pubkey)
6072    }
6073
6074    fn load_slow_with_fixed_root(
6075        &self,
6076        ancestors: &Ancestors,
6077        pubkey: &Pubkey,
6078    ) -> Option<(AccountSharedData, Slot)> {
6079        self.rc.accounts.load_with_fixed_root(ancestors, pubkey)
6080    }
6081
6082    pub fn get_program_accounts(
6083        &self,
6084        program_id: &Pubkey,
6085        config: &ScanConfig,
6086    ) -> ScanResult<Vec<TransactionAccount>> {
6087        self.rc
6088            .accounts
6089            .load_by_program(&self.ancestors, self.bank_id, program_id, config)
6090    }
6091
6092    pub fn get_filtered_program_accounts<F: Fn(&AccountSharedData) -> bool>(
6093        &self,
6094        program_id: &Pubkey,
6095        filter: F,
6096        config: &ScanConfig,
6097    ) -> ScanResult<Vec<TransactionAccount>> {
6098        self.rc.accounts.load_by_program_with_filter(
6099            &self.ancestors,
6100            self.bank_id,
6101            program_id,
6102            filter,
6103            config,
6104        )
6105    }
6106
6107    pub fn get_filtered_indexed_accounts<F: Fn(&AccountSharedData) -> bool>(
6108        &self,
6109        index_key: &IndexKey,
6110        filter: F,
6111        config: &ScanConfig,
6112        byte_limit_for_scan: Option<usize>,
6113    ) -> ScanResult<Vec<TransactionAccount>> {
6114        self.rc.accounts.load_by_index_key_with_filter(
6115            &self.ancestors,
6116            self.bank_id,
6117            index_key,
6118            filter,
6119            config,
6120            byte_limit_for_scan,
6121        )
6122    }
6123
6124    pub fn account_indexes_include_key(&self, key: &Pubkey) -> bool {
6125        self.rc.accounts.account_indexes_include_key(key)
6126    }
6127
6128    /// Returns all the accounts this bank can load
6129    pub fn get_all_accounts(&self) -> ScanResult<Vec<PubkeyAccountSlot>> {
6130        self.rc.accounts.load_all(&self.ancestors, self.bank_id)
6131    }
6132
6133    // Scans all the accounts this bank can load, applying `scan_func`
6134    pub fn scan_all_accounts<F>(&self, scan_func: F) -> ScanResult<()>
6135    where
6136        F: FnMut(Option<(&Pubkey, AccountSharedData, Slot)>),
6137    {
6138        self.rc
6139            .accounts
6140            .scan_all(&self.ancestors, self.bank_id, scan_func)
6141    }
6142
6143    pub fn get_program_accounts_modified_since_parent(
6144        &self,
6145        program_id: &Pubkey,
6146    ) -> Vec<TransactionAccount> {
6147        self.rc
6148            .accounts
6149            .load_by_program_slot(self.slot(), Some(program_id))
6150    }
6151
6152    pub fn get_transaction_logs(
6153        &self,
6154        address: Option<&Pubkey>,
6155    ) -> Option<Vec<TransactionLogInfo>> {
6156        self.transaction_log_collector
6157            .read()
6158            .unwrap()
6159            .get_logs_for_address(address)
6160    }
6161
6162    /// Returns all the accounts stored in this slot
6163    pub fn get_all_accounts_modified_since_parent(&self) -> Vec<TransactionAccount> {
6164        self.rc.accounts.load_by_program_slot(self.slot(), None)
6165    }
6166
6167    // if you want get_account_modified_since_parent without fixed_root, please define so...
6168    fn get_account_modified_since_parent_with_fixed_root(
6169        &self,
6170        pubkey: &Pubkey,
6171    ) -> Option<(AccountSharedData, Slot)> {
6172        let just_self: Ancestors = Ancestors::from(vec![self.slot()]);
6173        if let Some((account, slot)) = self.load_slow_with_fixed_root(&just_self, pubkey) {
6174            if slot == self.slot() {
6175                return Some((account, slot));
6176            }
6177        }
6178        None
6179    }
6180
6181    pub fn get_largest_accounts(
6182        &self,
6183        num: usize,
6184        filter_by_address: &HashSet<Pubkey>,
6185        filter: AccountAddressFilter,
6186    ) -> ScanResult<Vec<(Pubkey, u64)>> {
6187        self.rc.accounts.load_largest_accounts(
6188            &self.ancestors,
6189            self.bank_id,
6190            num,
6191            filter_by_address,
6192            filter,
6193        )
6194    }
6195
6196    /// Return the accumulated executed transaction count
6197    pub fn transaction_count(&self) -> u64 {
6198        self.transaction_count.load(Relaxed)
6199    }
6200
6201    /// Returns the number of non-vote transactions processed without error
6202    /// since the most recent boot from snapshot or genesis.
6203    /// This value is not shared though the network, nor retained
6204    /// within snapshots, but is preserved in `Bank::new_from_parent`.
6205    pub fn non_vote_transaction_count_since_restart(&self) -> u64 {
6206        self.non_vote_transaction_count_since_restart.load(Relaxed)
6207    }
6208
6209    /// Return the transaction count executed only in this bank
6210    pub fn executed_transaction_count(&self) -> u64 {
6211        self.transaction_count()
6212            .saturating_sub(self.parent().map_or(0, |parent| parent.transaction_count()))
6213    }
6214
6215    pub fn transaction_error_count(&self) -> u64 {
6216        self.transaction_error_count.load(Relaxed)
6217    }
6218
6219    pub fn transaction_entries_count(&self) -> u64 {
6220        self.transaction_entries_count.load(Relaxed)
6221    }
6222
6223    pub fn transactions_per_entry_max(&self) -> u64 {
6224        self.transactions_per_entry_max.load(Relaxed)
6225    }
6226
6227    fn increment_transaction_count(&self, tx_count: u64) {
6228        self.transaction_count.fetch_add(tx_count, Relaxed);
6229    }
6230
6231    fn increment_non_vote_transaction_count_since_restart(&self, tx_count: u64) {
6232        self.non_vote_transaction_count_since_restart
6233            .fetch_add(tx_count, Relaxed);
6234    }
6235
6236    pub fn signature_count(&self) -> u64 {
6237        self.signature_count.load(Relaxed)
6238    }
6239
6240    fn increment_signature_count(&self, signature_count: u64) {
6241        self.signature_count.fetch_add(signature_count, Relaxed);
6242    }
6243
6244    pub fn get_signature_status_processed_since_parent(
6245        &self,
6246        signature: &Signature,
6247    ) -> Option<Result<()>> {
6248        if let Some((slot, status)) = self.get_signature_status_slot(signature) {
6249            if slot <= self.slot() {
6250                return Some(status);
6251            }
6252        }
6253        None
6254    }
6255
6256    pub fn get_signature_status_with_blockhash(
6257        &self,
6258        signature: &Signature,
6259        blockhash: &Hash,
6260    ) -> Option<Result<()>> {
6261        let rcache = self.status_cache.read().unwrap();
6262        rcache
6263            .get_status(signature, blockhash, &self.ancestors)
6264            .map(|v| v.1)
6265    }
6266
6267    pub fn get_signature_status_slot(&self, signature: &Signature) -> Option<(Slot, Result<()>)> {
6268        let rcache = self.status_cache.read().unwrap();
6269        rcache.get_status_any_blockhash(signature, &self.ancestors)
6270    }
6271
6272    pub fn get_signature_status(&self, signature: &Signature) -> Option<Result<()>> {
6273        self.get_signature_status_slot(signature).map(|v| v.1)
6274    }
6275
6276    pub fn has_signature(&self, signature: &Signature) -> bool {
6277        self.get_signature_status_slot(signature).is_some()
6278    }
6279
6280    /// Hash the `accounts` HashMap. This represents a validator's interpretation
6281    ///  of the delta of the ledger since the last vote and up to now
6282    fn hash_internal_state(&self) -> Hash {
6283        let slot = self.slot();
6284        let ignore = (!self.is_partitioned_rewards_feature_enabled()
6285            && self.force_partition_rewards_in_first_block_of_epoch())
6286        .then_some(sysvar::epoch_rewards::id());
6287        let accounts_delta_hash = self
6288            .rc
6289            .accounts
6290            .accounts_db
6291            .calculate_accounts_delta_hash_internal(
6292                slot,
6293                ignore,
6294                self.skipped_rewrites.lock().unwrap().clone(),
6295            );
6296
6297        let mut signature_count_buf = [0u8; 8];
6298        LittleEndian::write_u64(&mut signature_count_buf[..], self.signature_count());
6299
6300        let mut hash = hashv(&[
6301            self.parent_hash.as_ref(),
6302            accounts_delta_hash.0.as_ref(),
6303            &signature_count_buf,
6304            self.last_blockhash().as_ref(),
6305        ]);
6306
6307        let epoch_accounts_hash = self.should_include_epoch_accounts_hash().then(|| {
6308            let epoch_accounts_hash = self.wait_get_epoch_accounts_hash();
6309            hash = hashv(&[hash.as_ref(), epoch_accounts_hash.as_ref().as_ref()]);
6310            epoch_accounts_hash
6311        });
6312
6313        let buf = self
6314            .hard_forks
6315            .read()
6316            .unwrap()
6317            .get_hash_data(slot, self.parent_slot());
6318        if let Some(buf) = buf {
6319            let hard_forked_hash = extend_and_hash(&hash, &buf);
6320            warn!("hard fork at slot {slot} by hashing {buf:?}: {hash} => {hard_forked_hash}");
6321            hash = hard_forked_hash;
6322        }
6323
6324        let bank_hash_stats = self
6325            .rc
6326            .accounts
6327            .accounts_db
6328            .get_bank_hash_stats(slot)
6329            .expect("No bank hash stats were found for this bank, that should not be possible");
6330        info!(
6331            "bank frozen: {slot} hash: {hash} accounts_delta: {} signature_count: {} last_blockhash: {} capitalization: {}{}, stats: {bank_hash_stats:?}",
6332            accounts_delta_hash.0,
6333            self.signature_count(),
6334            self.last_blockhash(),
6335            self.capitalization(),
6336            if let Some(epoch_accounts_hash) = epoch_accounts_hash {
6337                format!(", epoch_accounts_hash: {:?}", epoch_accounts_hash.as_ref())
6338            } else {
6339                "".to_string()
6340            }
6341        );
6342        hash
6343    }
6344
6345    /// The epoch accounts hash is hashed into the bank's hash once per epoch at a predefined slot.
6346    /// Should it be included in *this* bank?
6347    fn should_include_epoch_accounts_hash(&self) -> bool {
6348        if !epoch_accounts_hash_utils::is_enabled_this_epoch(self) {
6349            return false;
6350        }
6351
6352        let stop_slot = epoch_accounts_hash_utils::calculation_stop(self);
6353        self.parent_slot() < stop_slot && self.slot() >= stop_slot
6354    }
6355
6356    /// If the epoch accounts hash should be included in this Bank, then fetch it.  If the EAH
6357    /// calculation has not completed yet, this fn will block until it does complete.
6358    fn wait_get_epoch_accounts_hash(&self) -> EpochAccountsHash {
6359        let (epoch_accounts_hash, measure) = measure!(self
6360            .rc
6361            .accounts
6362            .accounts_db
6363            .epoch_accounts_hash_manager
6364            .wait_get_epoch_accounts_hash());
6365
6366        datapoint_info!(
6367            "bank-wait_get_epoch_accounts_hash",
6368            ("slot", self.slot() as i64, i64),
6369            ("waiting-time-us", measure.as_us() as i64, i64),
6370        );
6371        epoch_accounts_hash
6372    }
6373
6374    /// Used by ledger tool to run a final hash calculation once all ledger replay has completed.
6375    /// This should not be called by validator code.
6376    pub fn run_final_hash_calc(&self, on_halt_store_hash_raw_data_for_debug: bool) {
6377        self.force_flush_accounts_cache();
6378        // note that this slot may not be a root
6379        _ = self.verify_accounts_hash(
6380            None,
6381            VerifyAccountsHashConfig {
6382                test_hash_calculation: false,
6383                ignore_mismatch: true,
6384                require_rooted_bank: false,
6385                run_in_background: false,
6386                store_hash_raw_data_for_debug: on_halt_store_hash_raw_data_for_debug,
6387            },
6388        );
6389    }
6390
6391    /// Recalculate the hash_internal_state from the account stores. Would be used to verify a
6392    /// snapshot.
6393    /// return true if all is good
6394    /// Only called from startup or test code.
6395    #[must_use]
6396    fn verify_accounts_hash(
6397        &self,
6398        base: Option<(Slot, /*capitalization*/ u64)>,
6399        config: VerifyAccountsHashConfig,
6400    ) -> bool {
6401        let accounts = &self.rc.accounts;
6402        // Wait until initial hash calc is complete before starting a new hash calc.
6403        // This should only occur when we halt at a slot in ledger-tool.
6404        accounts
6405            .accounts_db
6406            .verify_accounts_hash_in_bg
6407            .wait_for_complete();
6408
6409        if config.require_rooted_bank
6410            && !accounts
6411                .accounts_db
6412                .accounts_index
6413                .is_alive_root(self.slot())
6414        {
6415            if let Some(parent) = self.parent() {
6416                info!("{} is not a root, so attempting to verify bank hash on parent bank at slot: {}", self.slot(), parent.slot());
6417                return parent.verify_accounts_hash(base, config);
6418            } else {
6419                // this will result in mismatch errors
6420                // accounts hash calc doesn't include unrooted slots
6421                panic!("cannot verify bank hash when bank is not a root");
6422            }
6423        }
6424        let slot = self.slot();
6425        let ancestors = &self.ancestors;
6426        let cap = self.capitalization();
6427        let epoch_schedule = self.epoch_schedule();
6428        let rent_collector = self.rent_collector();
6429        if config.run_in_background {
6430            let ancestors = ancestors.clone();
6431            let accounts = Arc::clone(accounts);
6432            let epoch_schedule = epoch_schedule.clone();
6433            let rent_collector = rent_collector.clone();
6434            let accounts_ = Arc::clone(&accounts);
6435            accounts.accounts_db.verify_accounts_hash_in_bg.start(|| {
6436                Builder::new()
6437                    .name("mlnBgHashVerify".into())
6438                    .spawn(move || {
6439                        info!("Initial background accounts hash verification has started");
6440                        let result = accounts_.verify_accounts_hash_and_lamports(
6441                            slot,
6442                            cap,
6443                            base,
6444                            VerifyAccountsHashAndLamportsConfig {
6445                                ancestors: &ancestors,
6446                                test_hash_calculation: config.test_hash_calculation,
6447                                epoch_schedule: &epoch_schedule,
6448                                rent_collector: &rent_collector,
6449                                ignore_mismatch: config.ignore_mismatch,
6450                                store_detailed_debug_info: config.store_hash_raw_data_for_debug,
6451                                use_bg_thread_pool: true,
6452                            },
6453                        );
6454                        accounts_
6455                            .accounts_db
6456                            .verify_accounts_hash_in_bg
6457                            .background_finished();
6458                        info!("Initial background accounts hash verification has stopped");
6459                        result
6460                    })
6461                    .unwrap()
6462            });
6463            true // initial result is true. We haven't failed yet. If verification fails, we'll panic from bg thread.
6464        } else {
6465            let result = accounts.verify_accounts_hash_and_lamports(
6466                slot,
6467                cap,
6468                base,
6469                VerifyAccountsHashAndLamportsConfig {
6470                    ancestors,
6471                    test_hash_calculation: config.test_hash_calculation,
6472                    epoch_schedule,
6473                    rent_collector,
6474                    ignore_mismatch: config.ignore_mismatch,
6475                    store_detailed_debug_info: config.store_hash_raw_data_for_debug,
6476                    use_bg_thread_pool: false, // fg is waiting for this to run, so we can use the fg thread pool
6477                },
6478            );
6479            self.set_initial_accounts_hash_verification_completed();
6480            result
6481        }
6482    }
6483
6484    /// Specify that initial verification has completed.
6485    /// Called internally when verification runs in the foreground thread.
6486    /// Also has to be called by some tests which don't do verification on startup.
6487    pub fn set_initial_accounts_hash_verification_completed(&self) {
6488        self.rc
6489            .accounts
6490            .accounts_db
6491            .verify_accounts_hash_in_bg
6492            .verification_complete();
6493    }
6494
6495    /// return true if bg hash verification is complete
6496    /// return false if bg hash verification has not completed yet
6497    /// if hash verification failed, a panic will occur
6498    pub fn has_initial_accounts_hash_verification_completed(&self) -> bool {
6499        self.rc
6500            .accounts
6501            .accounts_db
6502            .verify_accounts_hash_in_bg
6503            .check_complete()
6504    }
6505
6506    /// Get this bank's storages to use for snapshots.
6507    ///
6508    /// If a base slot is provided, return only the storages that are *higher* than this slot.
6509    pub fn get_snapshot_storages(&self, base_slot: Option<Slot>) -> Vec<Arc<AccountStorageEntry>> {
6510        // if a base slot is provided, request storages starting at the slot *after*
6511        let start_slot = base_slot.map_or(0, |slot| slot.saturating_add(1));
6512        // we want to *include* the storage at our slot
6513        let requested_slots = start_slot..=self.slot();
6514
6515        self.rc
6516            .accounts
6517            .accounts_db
6518            .get_snapshot_storages(requested_slots)
6519            .0
6520    }
6521
6522    #[must_use]
6523    fn verify_hash(&self) -> bool {
6524        assert!(self.is_frozen());
6525        let calculated_hash = self.hash_internal_state();
6526        let expected_hash = self.hash();
6527
6528        if calculated_hash == expected_hash {
6529            true
6530        } else {
6531            warn!(
6532                "verify failed: slot: {}, {} (calculated) != {} (expected)",
6533                self.slot(),
6534                calculated_hash,
6535                expected_hash
6536            );
6537            false
6538        }
6539    }
6540
6541    pub fn verify_transaction(
6542        &self,
6543        tx: VersionedTransaction,
6544        verification_mode: TransactionVerificationMode,
6545    ) -> Result<SanitizedTransaction> {
6546        let sanitized_tx = {
6547            let size =
6548                bincode::serialized_size(&tx).map_err(|_| TransactionError::SanitizeFailure)?;
6549            if size > PACKET_DATA_SIZE as u64 {
6550                return Err(TransactionError::SanitizeFailure);
6551            }
6552            let message_hash = if verification_mode == TransactionVerificationMode::FullVerification
6553            {
6554                tx.verify_and_hash_message()?
6555            } else {
6556                tx.message.hash()
6557            };
6558
6559            SanitizedTransaction::try_create(tx, message_hash, None, self)
6560        }?;
6561
6562        if verification_mode == TransactionVerificationMode::HashAndVerifyPrecompiles
6563            || verification_mode == TransactionVerificationMode::FullVerification
6564        {
6565            sanitized_tx.verify_precompiles(&self.feature_set)?;
6566        }
6567
6568        Ok(sanitized_tx)
6569    }
6570
6571    pub fn fully_verify_transaction(
6572        &self,
6573        tx: VersionedTransaction,
6574    ) -> Result<SanitizedTransaction> {
6575        self.verify_transaction(tx, TransactionVerificationMode::FullVerification)
6576    }
6577
6578    /// only called from ledger-tool or tests
6579    fn calculate_capitalization(&self, debug_verify: bool) -> u64 {
6580        let is_startup = true;
6581        self.rc
6582            .accounts
6583            .accounts_db
6584            .verify_accounts_hash_in_bg
6585            .wait_for_complete();
6586        self.rc
6587            .accounts
6588            .accounts_db
6589            .update_accounts_hash_with_verify(
6590                // we have to use the index since the slot could be in the write cache still
6591                CalcAccountsHashDataSource::IndexForTests,
6592                debug_verify,
6593                self.slot(),
6594                &self.ancestors,
6595                None,
6596                self.epoch_schedule(),
6597                &self.rent_collector,
6598                is_startup,
6599            )
6600            .1
6601    }
6602
6603    /// only called from tests or ledger tool
6604    pub fn calculate_and_verify_capitalization(&self, debug_verify: bool) -> bool {
6605        let calculated = self.calculate_capitalization(debug_verify);
6606        let expected = self.capitalization();
6607        if calculated == expected {
6608            true
6609        } else {
6610            warn!(
6611                "Capitalization mismatch: calculated: {} != expected: {}",
6612                calculated, expected
6613            );
6614            false
6615        }
6616    }
6617
6618    /// Forcibly overwrites current capitalization by actually recalculating accounts' balances.
6619    /// This should only be used for developing purposes.
6620    pub fn set_capitalization(&self) -> u64 {
6621        let old = self.capitalization();
6622        // We cannot debug verify the hash calculation here because calculate_capitalization will use the index calculation due to callers using the write cache.
6623        // debug_verify only exists as an extra debugging step under the assumption that this code path is only used for tests. But, this is used by ledger-tool create-snapshot
6624        // for example.
6625        let debug_verify = false;
6626        self.capitalization
6627            .store(self.calculate_capitalization(debug_verify), Relaxed);
6628        old
6629    }
6630
6631    /// Returns the `AccountsHash` that was calculated for this bank's slot
6632    ///
6633    /// This fn is used when creating a snapshot with ledger-tool, or when
6634    /// packaging a snapshot into an archive (used to get the `SnapshotHash`).
6635    pub fn get_accounts_hash(&self) -> Option<AccountsHash> {
6636        self.rc
6637            .accounts
6638            .accounts_db
6639            .get_accounts_hash(self.slot())
6640            .map(|(accounts_hash, _)| accounts_hash)
6641    }
6642
6643    /// Returns the `IncrementalAccountsHash` that was calculated for this bank's slot
6644    ///
6645    /// This fn is used when creating an incremental snapshot with ledger-tool, or when
6646    /// packaging a snapshot into an archive (used to get the `SnapshotHash`).
6647    pub fn get_incremental_accounts_hash(&self) -> Option<IncrementalAccountsHash> {
6648        self.rc
6649            .accounts
6650            .accounts_db
6651            .get_incremental_accounts_hash(self.slot())
6652            .map(|(incremental_accounts_hash, _)| incremental_accounts_hash)
6653    }
6654
6655    /// Returns the `SnapshotHash` for this bank's slot
6656    ///
6657    /// This fn is used at startup to verify the bank was rebuilt correctly.
6658    ///
6659    /// # Panics
6660    ///
6661    /// Panics if there is both-or-neither of an `AccountsHash` and an `IncrementalAccountsHash`
6662    /// for this bank's slot.  There may only be one or the other.
6663    pub fn get_snapshot_hash(&self) -> SnapshotHash {
6664        let accounts_hash = self.get_accounts_hash();
6665        let incremental_accounts_hash = self.get_incremental_accounts_hash();
6666
6667        let accounts_hash = match (accounts_hash, incremental_accounts_hash) {
6668            (Some(_), Some(_)) => panic!("Both full and incremental accounts hashes are present for slot {}; it is ambiguous which one to use for the snapshot hash!", self.slot()),
6669            (Some(accounts_hash), None) => accounts_hash.into(),
6670            (None, Some(incremental_accounts_hash)) => incremental_accounts_hash.into(),
6671            (None, None) => panic!("accounts hash is required to get snapshot hash"),
6672        };
6673        let epoch_accounts_hash = self.get_epoch_accounts_hash_to_serialize();
6674        SnapshotHash::new(&accounts_hash, epoch_accounts_hash.as_ref())
6675    }
6676
6677    pub fn get_thread_pool(&self) -> &ThreadPool {
6678        &self.rc.accounts.accounts_db.thread_pool_clean
6679    }
6680
6681    pub fn load_account_into_read_cache(&self, key: &Pubkey) {
6682        self.rc
6683            .accounts
6684            .accounts_db
6685            .load_account_into_read_cache(&self.ancestors, key);
6686    }
6687
6688    pub fn update_accounts_hash(
6689        &self,
6690        data_source: CalcAccountsHashDataSource,
6691        mut debug_verify: bool,
6692        is_startup: bool,
6693    ) -> AccountsHash {
6694        let (accounts_hash, total_lamports) = self
6695            .rc
6696            .accounts
6697            .accounts_db
6698            .update_accounts_hash_with_verify(
6699                data_source,
6700                debug_verify,
6701                self.slot(),
6702                &self.ancestors,
6703                Some(self.capitalization()),
6704                self.epoch_schedule(),
6705                &self.rent_collector,
6706                is_startup,
6707            );
6708        if total_lamports != self.capitalization() {
6709            datapoint_info!(
6710                "capitalization_mismatch",
6711                ("slot", self.slot(), i64),
6712                ("calculated_lamports", total_lamports, i64),
6713                ("capitalization", self.capitalization(), i64),
6714            );
6715
6716            if !debug_verify {
6717                // cap mismatch detected. It has been logged to metrics above.
6718                // Run both versions of the calculation to attempt to get more info.
6719                debug_verify = true;
6720                self.rc
6721                    .accounts
6722                    .accounts_db
6723                    .update_accounts_hash_with_verify(
6724                        data_source,
6725                        debug_verify,
6726                        self.slot(),
6727                        &self.ancestors,
6728                        Some(self.capitalization()),
6729                        self.epoch_schedule(),
6730                        &self.rent_collector,
6731                        is_startup,
6732                    );
6733            }
6734
6735            panic!(
6736                "capitalization_mismatch. slot: {}, calculated_lamports: {}, capitalization: {}",
6737                self.slot(),
6738                total_lamports,
6739                self.capitalization()
6740            );
6741        }
6742        accounts_hash
6743    }
6744
6745    /// Calculate the incremental accounts hash from `base_slot` to `self`
6746    pub fn update_incremental_accounts_hash(&self, base_slot: Slot) -> IncrementalAccountsHash {
6747        let config = CalcAccountsHashConfig {
6748            use_bg_thread_pool: true,
6749            check_hash: false,
6750            ancestors: None, // does not matter, will not be used
6751            epoch_schedule: &self.epoch_schedule,
6752            rent_collector: &self.rent_collector,
6753            store_detailed_debug_info_on_failure: false,
6754        };
6755        let storages = self.get_snapshot_storages(Some(base_slot));
6756        let sorted_storages = SortedStorages::new(&storages);
6757        self.rc
6758            .accounts
6759            .accounts_db
6760            .update_incremental_accounts_hash(
6761                &config,
6762                &sorted_storages,
6763                self.slot(),
6764                HashStats::default(),
6765            )
6766            .unwrap() // unwrap here will never fail since check_hash = false
6767            .0
6768    }
6769
6770    /// A snapshot bank should be purged of 0 lamport accounts which are not part of the hash
6771    /// calculation and could shield other real accounts.
6772    pub fn verify_snapshot_bank(
6773        &self,
6774        test_hash_calculation: bool,
6775        skip_shrink: bool,
6776        force_clean: bool,
6777        last_full_snapshot_slot: Slot,
6778        base: Option<(Slot, /*capitalization*/ u64)>,
6779    ) -> bool {
6780        let (_, clean_time_us) = measure_us!({
6781            let should_clean = force_clean || (!skip_shrink && self.slot() > 0);
6782            if should_clean {
6783                info!("Cleaning...");
6784                // We cannot clean past the last full snapshot's slot because we are about to
6785                // perform an accounts hash calculation *up to that slot*.  If we cleaned *past*
6786                // that slot, then accounts could be removed from older storages, which would
6787                // change the accounts hash.
6788                self.rc.accounts.accounts_db.clean_accounts(
6789                    Some(last_full_snapshot_slot),
6790                    true,
6791                    Some(last_full_snapshot_slot),
6792                    self.epoch_schedule(),
6793                );
6794                info!("Cleaning... Done.");
6795            } else {
6796                info!("Cleaning... Skipped.");
6797            }
6798        });
6799
6800        let (_, shrink_time_us) = measure_us!({
6801            let should_shrink = !skip_shrink && self.slot() > 0;
6802            if should_shrink {
6803                info!("Shrinking...");
6804                self.rc.accounts.accounts_db.shrink_all_slots(
6805                    true,
6806                    Some(last_full_snapshot_slot),
6807                    self.epoch_schedule(),
6808                );
6809                info!("Shrinking... Done.");
6810            } else {
6811                info!("Shrinking... Skipped.");
6812            }
6813        });
6814
6815        let (verified_accounts, verify_accounts_time_us) = measure_us!({
6816            let should_verify_accounts = !self.rc.accounts.accounts_db.skip_initial_hash_calc;
6817            if should_verify_accounts {
6818                info!("Verifying accounts...");
6819                let verified = self.verify_accounts_hash(
6820                    base,
6821                    VerifyAccountsHashConfig {
6822                        test_hash_calculation,
6823                        ignore_mismatch: false,
6824                        require_rooted_bank: false,
6825                        run_in_background: true,
6826                        store_hash_raw_data_for_debug: false,
6827                    },
6828                );
6829                info!("Verifying accounts... In background.");
6830                verified
6831            } else {
6832                info!("Verifying accounts... Skipped.");
6833                self.rc
6834                    .accounts
6835                    .accounts_db
6836                    .verify_accounts_hash_in_bg
6837                    .verification_complete();
6838                true
6839            }
6840        });
6841
6842        info!("Verifying bank...");
6843        let (verified_bank, verify_bank_time_us) = measure_us!(self.verify_hash());
6844        info!("Verifying bank... Done.");
6845
6846        datapoint_info!(
6847            "verify_snapshot_bank",
6848            ("clean_us", clean_time_us, i64),
6849            ("shrink_us", shrink_time_us, i64),
6850            ("verify_accounts_us", verify_accounts_time_us, i64),
6851            ("verify_bank_us", verify_bank_time_us, i64),
6852        );
6853
6854        verified_accounts && verified_bank
6855    }
6856
6857    /// Return the number of hashes per tick
6858    pub fn hashes_per_tick(&self) -> &Option<u64> {
6859        &self.hashes_per_tick
6860    }
6861
6862    /// Return the number of ticks per slot
6863    pub fn ticks_per_slot(&self) -> u64 {
6864        self.ticks_per_slot
6865    }
6866
6867    /// Return the number of slots per year
6868    pub fn slots_per_year(&self) -> f64 {
6869        self.slots_per_year
6870    }
6871
6872    /// Return the number of ticks since genesis.
6873    pub fn tick_height(&self) -> u64 {
6874        self.tick_height.load(Relaxed)
6875    }
6876
6877    /// Return the inflation parameters of the Bank
6878    pub fn inflation(&self) -> Inflation {
6879        *self.inflation.read().unwrap()
6880    }
6881
6882    /// Return the rent collector for this Bank
6883    pub fn rent_collector(&self) -> &RentCollector {
6884        &self.rent_collector
6885    }
6886
6887    /// Return the total capitalization of the Bank
6888    pub fn capitalization(&self) -> u64 {
6889        self.capitalization.load(Relaxed)
6890    }
6891
6892    /// Return this bank's max_tick_height
6893    pub fn max_tick_height(&self) -> u64 {
6894        self.max_tick_height
6895    }
6896
6897    /// Return the block_height of this bank
6898    pub fn block_height(&self) -> u64 {
6899        self.block_height
6900    }
6901
6902    /// Return the number of slots per epoch for the given epoch
6903    pub fn get_slots_in_epoch(&self, epoch: Epoch) -> u64 {
6904        self.epoch_schedule().get_slots_in_epoch(epoch)
6905    }
6906
6907    /// returns the epoch for which this bank's leader_schedule_slot_offset and slot would
6908    ///  need to cache leader_schedule
6909    pub fn get_leader_schedule_epoch(&self, slot: Slot) -> Epoch {
6910        self.epoch_schedule().get_leader_schedule_epoch(slot)
6911    }
6912
6913    /// a bank-level cache of vote accounts and stake delegation info
6914    fn update_stakes_cache(
6915        &self,
6916        txs: &[SanitizedTransaction],
6917        execution_results: &[TransactionExecutionResult],
6918        loaded_txs: &[TransactionLoadResult],
6919    ) {
6920        debug_assert_eq!(txs.len(), execution_results.len());
6921        debug_assert_eq!(txs.len(), loaded_txs.len());
6922        let new_warmup_cooldown_rate_epoch = self.new_warmup_cooldown_rate_epoch();
6923        izip!(txs, execution_results, loaded_txs)
6924            .filter(|(_, execution_result, _)| execution_result.was_executed_successfully())
6925            .flat_map(|(tx, _, (load_result, _))| {
6926                load_result.iter().flat_map(|loaded_transaction| {
6927                    let num_account_keys = tx.message().account_keys().len();
6928                    loaded_transaction.accounts.iter().take(num_account_keys)
6929                })
6930            })
6931            .for_each(|(pubkey, account)| {
6932                // note that this could get timed to: self.rc.accounts.accounts_db.stats.stakes_cache_check_and_store_us,
6933                //  but this code path is captured separately in ExecuteTimingType::UpdateStakesCacheUs
6934                self.stakes_cache
6935                    .check_and_store(pubkey, account, new_warmup_cooldown_rate_epoch);
6936            });
6937    }
6938
6939    pub fn staked_nodes(&self) -> Arc<HashMap<Pubkey, u64>> {
6940        self.stakes_cache.stakes().staked_nodes()
6941    }
6942
6943    /// current vote accounts for this bank along with the stake
6944    ///   attributed to each account
6945    pub fn vote_accounts(&self) -> Arc<VoteAccountsHashMap> {
6946        let stakes = self.stakes_cache.stakes();
6947        Arc::from(stakes.vote_accounts())
6948    }
6949
6950    /// Vote account for the given vote account pubkey.
6951    pub fn get_vote_account(&self, vote_account: &Pubkey) -> Option<VoteAccount> {
6952        let stakes = self.stakes_cache.stakes();
6953        let vote_account = stakes.vote_accounts().get(vote_account)?;
6954        Some(vote_account.clone())
6955    }
6956
6957    /// Get the EpochStakes for a given epoch
6958    pub fn epoch_stakes(&self, epoch: Epoch) -> Option<&EpochStakes> {
6959        self.epoch_stakes.get(&epoch)
6960    }
6961
6962    pub fn epoch_stakes_map(&self) -> &HashMap<Epoch, EpochStakes> {
6963        &self.epoch_stakes
6964    }
6965
6966    pub fn epoch_staked_nodes(&self, epoch: Epoch) -> Option<Arc<HashMap<Pubkey, u64>>> {
6967        Some(self.epoch_stakes.get(&epoch)?.stakes().staked_nodes())
6968    }
6969
6970    /// vote accounts for the specific epoch along with the stake
6971    ///   attributed to each account
6972    pub fn epoch_vote_accounts(&self, epoch: Epoch) -> Option<&VoteAccountsHashMap> {
6973        let epoch_stakes = self.epoch_stakes.get(&epoch)?.stakes();
6974        Some(epoch_stakes.vote_accounts().as_ref())
6975    }
6976
6977    /// Get the fixed authorized voter for the given vote account for the
6978    /// current epoch
6979    pub fn epoch_authorized_voter(&self, vote_account: &Pubkey) -> Option<&Pubkey> {
6980        self.epoch_stakes
6981            .get(&self.epoch)
6982            .expect("Epoch stakes for bank's own epoch must exist")
6983            .epoch_authorized_voters()
6984            .get(vote_account)
6985    }
6986
6987    /// Get the fixed set of vote accounts for the given node id for the
6988    /// current epoch
6989    pub fn epoch_vote_accounts_for_node_id(&self, node_id: &Pubkey) -> Option<&NodeVoteAccounts> {
6990        self.epoch_stakes
6991            .get(&self.epoch)
6992            .expect("Epoch stakes for bank's own epoch must exist")
6993            .node_id_to_vote_accounts()
6994            .get(node_id)
6995    }
6996
6997    /// Get the fixed total stake of all vote accounts for current epoch
6998    pub fn total_epoch_stake(&self) -> u64 {
6999        self.epoch_stakes
7000            .get(&self.epoch)
7001            .expect("Epoch stakes for bank's own epoch must exist")
7002            .total_stake()
7003    }
7004
7005    /// Get the fixed stake of the given vote account for the current epoch
7006    pub fn epoch_vote_account_stake(&self, vote_account: &Pubkey) -> u64 {
7007        *self
7008            .epoch_vote_accounts(self.epoch())
7009            .expect("Bank epoch vote accounts must contain entry for the bank's own epoch")
7010            .get(vote_account)
7011            .map(|(stake, _)| stake)
7012            .unwrap_or(&0)
7013    }
7014
7015    /// given a slot, return the epoch and offset into the epoch this slot falls
7016    /// e.g. with a fixed number for slots_per_epoch, the calculation is simply:
7017    ///
7018    ///  ( slot/slots_per_epoch, slot % slots_per_epoch )
7019    ///
7020    pub fn get_epoch_and_slot_index(&self, slot: Slot) -> (Epoch, SlotIndex) {
7021        self.epoch_schedule().get_epoch_and_slot_index(slot)
7022    }
7023
7024    pub fn get_epoch_info(&self) -> EpochInfo {
7025        let absolute_slot = self.slot();
7026        let block_height = self.block_height();
7027        let (epoch, slot_index) = self.get_epoch_and_slot_index(absolute_slot);
7028        let slots_in_epoch = self.get_slots_in_epoch(epoch);
7029        let transaction_count = Some(self.transaction_count());
7030        EpochInfo {
7031            epoch,
7032            slot_index,
7033            slots_in_epoch,
7034            absolute_slot,
7035            block_height,
7036            transaction_count,
7037        }
7038    }
7039
7040    pub fn is_empty(&self) -> bool {
7041        !self.is_delta.load(Relaxed)
7042    }
7043
7044    pub fn add_mockup_builtin(
7045        &mut self,
7046        program_id: Pubkey,
7047        builtin_function: BuiltinFunctionWithContext,
7048    ) {
7049        self.add_builtin(
7050            program_id,
7051            "mockup".to_string(),
7052            LoadedProgram::new_builtin(self.slot, 0, builtin_function),
7053        );
7054    }
7055
7056    /// Add a built-in program
7057    pub fn add_builtin(&mut self, program_id: Pubkey, name: String, builtin: LoadedProgram) {
7058        debug!("Adding program {} under {:?}", name, program_id);
7059        self.add_builtin_account(name.as_str(), &program_id, false);
7060        self.builtin_programs.insert(program_id);
7061        self.loaded_programs_cache
7062            .write()
7063            .unwrap()
7064            .assign_program(program_id, Arc::new(builtin));
7065        debug!("Added program {} under {:?}", name, program_id);
7066    }
7067
7068    /// Remove a built-in instruction processor
7069    pub fn remove_builtin(&mut self, program_id: Pubkey, name: String) {
7070        debug!("Removing program {}", program_id);
7071        // Don't remove the account since the bank expects the account state to
7072        // be idempotent
7073        self.add_builtin(
7074            program_id,
7075            name,
7076            LoadedProgram::new_tombstone(self.slot, LoadedProgramType::Closed),
7077        );
7078        debug!("Removed program {}", program_id);
7079    }
7080
7081    pub fn add_precompile(&mut self, program_id: &Pubkey) {
7082        debug!("Adding precompiled program {}", program_id);
7083        self.add_precompiled_account(program_id);
7084        debug!("Added precompiled program {:?}", program_id);
7085    }
7086
7087    // Call AccountsDb::clean_accounts()
7088    //
7089    // This fn is meant to be called by the snapshot handler in Accounts Background Service.  If
7090    // calling from elsewhere, ensure the same invariants hold/expectations are met.
7091    pub(crate) fn clean_accounts(&self, last_full_snapshot_slot: Option<Slot>) {
7092        // Don't clean the slot we're snapshotting because it may have zero-lamport
7093        // accounts that were included in the bank delta hash when the bank was frozen,
7094        // and if we clean them here, any newly created snapshot's hash for this bank
7095        // may not match the frozen hash.
7096        //
7097        // So when we're snapshotting, the highest slot to clean is lowered by one.
7098        let highest_slot_to_clean = self.slot().saturating_sub(1);
7099
7100        self.rc.accounts.accounts_db.clean_accounts(
7101            Some(highest_slot_to_clean),
7102            false,
7103            last_full_snapshot_slot,
7104            self.epoch_schedule(),
7105        );
7106    }
7107
7108    pub fn print_accounts_stats(&self) {
7109        self.rc.accounts.accounts_db.print_accounts_stats("");
7110    }
7111
7112    pub fn shrink_candidate_slots(&self) -> usize {
7113        self.rc
7114            .accounts
7115            .accounts_db
7116            .shrink_candidate_slots(self.epoch_schedule())
7117    }
7118
7119    pub(crate) fn shrink_ancient_slots(&self) {
7120        self.rc
7121            .accounts
7122            .accounts_db
7123            .shrink_ancient_slots(self.epoch_schedule())
7124    }
7125
7126    pub fn validate_fee_collector_account(&self) -> bool {
7127        self.feature_set
7128            .is_active(&feature_set::validate_fee_collector_account::id())
7129    }
7130
7131    pub fn read_cost_tracker(&self) -> LockResult<RwLockReadGuard<CostTracker>> {
7132        self.cost_tracker.read()
7133    }
7134
7135    pub fn write_cost_tracker(&self) -> LockResult<RwLockWriteGuard<CostTracker>> {
7136        self.cost_tracker.write()
7137    }
7138
7139    // Check if the wallclock time from bank creation to now has exceeded the allotted
7140    // time for transaction processing
7141    pub fn should_bank_still_be_processing_txs(
7142        bank_creation_time: &Instant,
7143        max_tx_ingestion_nanos: u128,
7144    ) -> bool {
7145        // Do this check outside of the PoH lock, hence not a method on PohRecorder
7146        bank_creation_time.elapsed().as_nanos() <= max_tx_ingestion_nanos
7147    }
7148
7149    pub fn deactivate_feature(&mut self, id: &Pubkey) {
7150        let mut feature_set = Arc::make_mut(&mut self.feature_set).clone();
7151        feature_set.active.remove(id);
7152        feature_set.inactive.insert(*id);
7153        self.feature_set = Arc::new(feature_set);
7154    }
7155
7156    pub fn activate_feature(&mut self, id: &Pubkey) {
7157        let mut feature_set = Arc::make_mut(&mut self.feature_set).clone();
7158        feature_set.inactive.remove(id);
7159        feature_set.active.insert(*id, 0);
7160        self.feature_set = Arc::new(feature_set);
7161    }
7162
7163    pub fn fill_bank_with_ticks_for_tests(&self) {
7164        self.do_fill_bank_with_ticks_for_tests(&BankWithScheduler::no_scheduler_available())
7165    }
7166
7167    pub(crate) fn do_fill_bank_with_ticks_for_tests(&self, scheduler: &InstalledSchedulerRwLock) {
7168        if self.tick_height.load(Relaxed) < self.max_tick_height {
7169            let last_blockhash = self.last_blockhash();
7170            while self.last_blockhash() == last_blockhash {
7171                self.register_tick(&Hash::new_unique(), scheduler)
7172            }
7173        } else {
7174            warn!("Bank already reached max tick height, cannot fill it with more ticks");
7175        }
7176    }
7177
7178    // This is called from snapshot restore AND for each epoch boundary
7179    // The entire code path herein must be idempotent
7180    fn apply_feature_activations(
7181        &mut self,
7182        caller: ApplyFeatureActivationsCaller,
7183        debug_do_not_add_builtins: bool,
7184    ) {
7185        use ApplyFeatureActivationsCaller as Caller;
7186        let allow_new_activations = match caller {
7187            Caller::FinishInit => false,
7188            Caller::NewFromParent => true,
7189            Caller::WarpFromParent => false,
7190        };
7191        let (feature_set, new_feature_activations) =
7192            self.compute_active_feature_set(allow_new_activations);
7193        self.feature_set = Arc::new(feature_set);
7194
7195        // Update activation slot of features in `new_feature_activations`
7196        for feature_id in new_feature_activations.iter() {
7197            if let Some(mut account) = self.get_account_with_fixed_root(feature_id) {
7198                if let Some(mut feature) = feature::from_account(&account) {
7199                    feature.activated_at = Some(self.slot());
7200                    if feature::to_account(&feature, &mut account).is_some() {
7201                        self.store_account(feature_id, &account);
7202                    }
7203                    info!("Feature {} activated at slot {}", feature_id, self.slot());
7204                }
7205            }
7206        }
7207
7208        if new_feature_activations.contains(&feature_set::pico_inflation::id()) {
7209            *self.inflation.write().unwrap() = Inflation::pico();
7210            self.fee_rate_governor.burn_percent = 50; // 50% fee burn
7211            self.rent_collector.rent.burn_percent = 50; // 50% rent burn
7212        }
7213
7214        if !new_feature_activations.is_disjoint(&self.feature_set.full_inflation_features_enabled())
7215        {
7216            *self.inflation.write().unwrap() = Inflation::full();
7217            self.fee_rate_governor.burn_percent = 50; // 50% fee burn
7218            self.rent_collector.rent.burn_percent = 50; // 50% rent burn
7219        }
7220
7221        if !debug_do_not_add_builtins {
7222            self.apply_builtin_program_feature_transitions(
7223                allow_new_activations,
7224                &new_feature_activations,
7225            );
7226        }
7227
7228        if new_feature_activations.contains(&feature_set::update_hashes_per_tick::id()) {
7229            self.apply_updated_hashes_per_tick(DEFAULT_HASHES_PER_TICK);
7230        }
7231
7232        if new_feature_activations.contains(&feature_set::update_hashes_per_tick2::id()) {
7233            self.apply_updated_hashes_per_tick(UPDATED_HASHES_PER_TICK2);
7234        }
7235
7236        if new_feature_activations.contains(&feature_set::update_hashes_per_tick3::id()) {
7237            self.apply_updated_hashes_per_tick(UPDATED_HASHES_PER_TICK3);
7238        }
7239
7240        if new_feature_activations.contains(&feature_set::update_hashes_per_tick4::id()) {
7241            self.apply_updated_hashes_per_tick(UPDATED_HASHES_PER_TICK4);
7242        }
7243
7244        if new_feature_activations.contains(&feature_set::update_hashes_per_tick5::id()) {
7245            self.apply_updated_hashes_per_tick(UPDATED_HASHES_PER_TICK5);
7246        }
7247
7248        if new_feature_activations.contains(&feature_set::update_hashes_per_tick6::id()) {
7249            self.apply_updated_hashes_per_tick(UPDATED_HASHES_PER_TICK6);
7250        }
7251    }
7252
7253    fn apply_updated_hashes_per_tick(&mut self, hashes_per_tick: u64) {
7254        info!(
7255            "Activating update_hashes_per_tick {} at slot {}",
7256            hashes_per_tick,
7257            self.slot(),
7258        );
7259        self.hashes_per_tick = Some(hashes_per_tick);
7260    }
7261
7262    fn adjust_sysvar_balance_for_rent(&self, account: &mut AccountSharedData) {
7263        account.set_lamports(
7264            self.get_minimum_balance_for_rent_exemption(account.data().len())
7265                .max(account.lamports()),
7266        );
7267    }
7268
7269    /// Compute the active feature set based on the current bank state,
7270    /// and return it together with the set of newly activated features.
7271    fn compute_active_feature_set(&self, include_pending: bool) -> (FeatureSet, HashSet<Pubkey>) {
7272        let mut active = self.feature_set.active.clone();
7273        let mut inactive = HashSet::new();
7274        let mut pending = HashSet::new();
7275        let slot = self.slot();
7276
7277        for feature_id in &self.feature_set.inactive {
7278            let mut activated = None;
7279            if let Some(account) = self.get_account_with_fixed_root(feature_id) {
7280                if let Some(feature) = feature::from_account(&account) {
7281                    match feature.activated_at {
7282                        None if include_pending => {
7283                            // Feature activation is pending
7284                            pending.insert(*feature_id);
7285                            activated = Some(slot);
7286                        }
7287                        Some(activation_slot) if slot >= activation_slot => {
7288                            // Feature has been activated already
7289                            activated = Some(activation_slot);
7290                        }
7291                        _ => {}
7292                    }
7293                }
7294            }
7295            if let Some(slot) = activated {
7296                active.insert(*feature_id, slot);
7297            } else {
7298                inactive.insert(*feature_id);
7299            }
7300        }
7301
7302        (FeatureSet { active, inactive }, pending)
7303    }
7304
7305    fn apply_builtin_program_feature_transitions(
7306        &mut self,
7307        only_apply_transitions_for_new_features: bool,
7308        new_feature_activations: &HashSet<Pubkey>,
7309    ) {
7310        for builtin in BUILTINS.iter() {
7311            if let Some(feature_id) = builtin.feature_id {
7312                let should_apply_action_for_feature_transition =
7313                    if only_apply_transitions_for_new_features {
7314                        new_feature_activations.contains(&feature_id)
7315                    } else {
7316                        self.feature_set.is_active(&feature_id)
7317                    };
7318                if should_apply_action_for_feature_transition {
7319                    self.add_builtin(
7320                        builtin.program_id,
7321                        builtin.name.to_string(),
7322                        LoadedProgram::new_builtin(
7323                            self.feature_set.activated_slot(&feature_id).unwrap_or(0),
7324                            builtin.name.len(),
7325                            builtin.entrypoint,
7326                        ),
7327                    );
7328                }
7329            }
7330        }
7331        for precompile in get_precompiles() {
7332            let should_add_precompile = precompile
7333                .feature
7334                .as_ref()
7335                .map(|feature_id| self.feature_set.is_active(feature_id))
7336                .unwrap_or(false);
7337            if should_add_precompile {
7338                self.add_precompile(&precompile.program_id);
7339            }
7340        }
7341    }
7342
7343    /// Use to replace programs by feature activation
7344    #[allow(dead_code)]
7345    fn replace_program_account(
7346        &mut self,
7347        old_address: &Pubkey,
7348        new_address: &Pubkey,
7349        datapoint_name: &'static str,
7350    ) {
7351        if let Some(old_account) = self.get_account_with_fixed_root(old_address) {
7352            if let Some(new_account) = self.get_account_with_fixed_root(new_address) {
7353                datapoint_info!(datapoint_name, ("slot", self.slot, i64));
7354
7355                // Burn lamports in the old account
7356                self.capitalization
7357                    .fetch_sub(old_account.lamports(), Relaxed);
7358
7359                // Transfer new account to old account
7360                self.store_account(old_address, &new_account);
7361
7362                // Clear new account
7363                self.store_account(new_address, &AccountSharedData::default());
7364
7365                // Unload a program from the bank's cache
7366                self.loaded_programs_cache
7367                    .write()
7368                    .unwrap()
7369                    .remove_programs([*old_address].into_iter());
7370
7371                self.calculate_and_update_accounts_data_size_delta_off_chain(
7372                    old_account.data().len(),
7373                    new_account.data().len(),
7374                );
7375            }
7376        }
7377    }
7378
7379    /// Get all the accounts for this bank and calculate stats
7380    pub fn get_total_accounts_stats(&self) -> ScanResult<TotalAccountsStats> {
7381        let accounts = self.get_all_accounts()?;
7382        Ok(self.calculate_total_accounts_stats(
7383            accounts
7384                .iter()
7385                .map(|(pubkey, account, _slot)| (pubkey, account)),
7386        ))
7387    }
7388
7389    /// Given all the accounts for a bank, calculate stats
7390    pub fn calculate_total_accounts_stats<'a>(
7391        &self,
7392        accounts: impl Iterator<Item = (&'a Pubkey, &'a AccountSharedData)>,
7393    ) -> TotalAccountsStats {
7394        let rent_collector = self.rent_collector();
7395        let mut total_accounts_stats = TotalAccountsStats::default();
7396        accounts.for_each(|(pubkey, account)| {
7397            total_accounts_stats.accumulate_account(pubkey, account, rent_collector);
7398        });
7399
7400        total_accounts_stats
7401    }
7402
7403    /// Get the EAH that will be used by snapshots
7404    ///
7405    /// Since snapshots are taken on roots, if the bank is in the EAH calculation window then an
7406    /// EAH *must* be included.  This means if an EAH calculation is currently in-flight we will
7407    /// wait for it to complete.
7408    pub fn get_epoch_accounts_hash_to_serialize(&self) -> Option<EpochAccountsHash> {
7409        let should_get_epoch_accounts_hash = epoch_accounts_hash_utils::is_enabled_this_epoch(self)
7410            && epoch_accounts_hash_utils::is_in_calculation_window(self);
7411        if !should_get_epoch_accounts_hash {
7412            return None;
7413        }
7414
7415        let (epoch_accounts_hash, measure) = measure!(self
7416            .rc
7417            .accounts
7418            .accounts_db
7419            .epoch_accounts_hash_manager
7420            .wait_get_epoch_accounts_hash());
7421
7422        datapoint_info!(
7423            "bank-get_epoch_accounts_hash_to_serialize",
7424            ("slot", self.slot(), i64),
7425            ("waiting-time-us", measure.as_us(), i64),
7426        );
7427        Some(epoch_accounts_hash)
7428    }
7429
7430    /// Return the epoch_reward_status field on the bank to serialize
7431    /// Returns none if we are NOT in the reward interval.
7432    pub(crate) fn get_epoch_reward_status_to_serialize(&self) -> Option<&EpochRewardStatus> {
7433        matches!(self.epoch_reward_status, EpochRewardStatus::Active(_))
7434            .then_some(&self.epoch_reward_status)
7435    }
7436
7437    /// Convenience fn to get the Epoch Accounts Hash
7438    pub fn epoch_accounts_hash(&self) -> Option<EpochAccountsHash> {
7439        self.rc
7440            .accounts
7441            .accounts_db
7442            .epoch_accounts_hash_manager
7443            .try_get_epoch_accounts_hash()
7444    }
7445
7446    /// Checks a batch of sanitized transactions again bank for age and status
7447    pub fn check_transactions_with_forwarding_delay(
7448        &self,
7449        transactions: &[SanitizedTransaction],
7450        filter: &[transaction::Result<()>],
7451        forward_transactions_to_leader_at_slot_offset: u64,
7452    ) -> Vec<TransactionCheckResult> {
7453        let mut error_counters = TransactionErrorMetrics::default();
7454        // The following code also checks if the blockhash for a transaction is too old
7455        // The check accounts for
7456        //  1. Transaction forwarding delay
7457        //  2. The slot at which the next leader will actually process the transaction
7458        // Drop the transaction if it will expire by the time the next node receives and processes it
7459        let api = perf_libs::api();
7460        let max_tx_fwd_delay = if api.is_none() {
7461            MAX_TRANSACTION_FORWARDING_DELAY
7462        } else {
7463            MAX_TRANSACTION_FORWARDING_DELAY_GPU
7464        };
7465
7466        self.check_transactions(
7467            transactions,
7468            filter,
7469            (MAX_PROCESSING_AGE)
7470                .saturating_sub(max_tx_fwd_delay)
7471                .saturating_sub(forward_transactions_to_leader_at_slot_offset as usize),
7472            &mut error_counters,
7473        )
7474    }
7475
7476    pub fn is_in_slot_hashes_history(&self, slot: &Slot) -> bool {
7477        if slot < &self.slot {
7478            if let Ok(sysvar_cache) = self.transaction_processor.sysvar_cache.read() {
7479                if let Ok(slot_hashes) = sysvar_cache.get_slot_hashes() {
7480                    return slot_hashes.get(slot).is_some();
7481                }
7482            }
7483        }
7484        false
7485    }
7486
7487    pub fn check_program_modification_slot(&mut self) {
7488        self.transaction_processor.check_program_modification_slot = true;
7489    }
7490
7491    pub fn load_program(
7492        &self,
7493        pubkey: &Pubkey,
7494        reload: bool,
7495        effective_epoch: Epoch,
7496    ) -> Arc<LoadedProgram> {
7497        self.transaction_processor
7498            .load_program(self, pubkey, reload, effective_epoch)
7499    }
7500}
7501
7502impl TransactionProcessingCallback for Bank {
7503    fn account_matches_owners(&self, account: &Pubkey, owners: &[Pubkey]) -> Option<usize> {
7504        self.rc
7505            .accounts
7506            .accounts_db
7507            .account_matches_owners(&self.ancestors, account, owners)
7508            .ok()
7509    }
7510
7511    fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option<AccountSharedData> {
7512        self.rc
7513            .accounts
7514            .accounts_db
7515            .load_with_fixed_root(&self.ancestors, pubkey)
7516            .map(|(acc, _)| acc)
7517    }
7518
7519    fn get_last_blockhash_and_lamports_per_signature(&self) -> (Hash, u64) {
7520        self.last_blockhash_and_lamports_per_signature()
7521    }
7522
7523    fn get_rent_collector(&self) -> &RentCollector {
7524        &self.rent_collector
7525    }
7526
7527    fn get_feature_set(&self) -> Arc<FeatureSet> {
7528        self.feature_set.clone()
7529    }
7530
7531    fn check_account_access(
7532        &self,
7533        tx: &SanitizedTransaction,
7534        account_index: usize,
7535        account: &AccountSharedData,
7536        error_counters: &mut TransactionErrorMetrics,
7537    ) -> Result<()> {
7538        if self.get_reward_interval() == RewardInterval::InsideInterval
7539            && tx.message().is_writable(account_index)
7540            && miraland_stake_program::check_id(account.owner())
7541        {
7542            error_counters.program_execution_temporarily_restricted += 1;
7543            Err(TransactionError::ProgramExecutionTemporarilyRestricted {
7544                account_index: account_index as u8,
7545            })
7546        } else {
7547            Ok(())
7548        }
7549    }
7550}
7551
7552#[cfg(feature = "dev-context-only-utils")]
7553impl Bank {
7554    pub fn wrap_with_bank_forks_for_tests(self) -> (Arc<Self>, Arc<RwLock<BankForks>>) {
7555        let bank_forks = BankForks::new_rw_arc(self);
7556        let bank = bank_forks.read().unwrap().root_bank();
7557        (bank, bank_forks)
7558    }
7559
7560    pub fn default_for_tests() -> Self {
7561        let accounts_db = AccountsDb::default_for_tests();
7562        let accounts = Accounts::new(Arc::new(accounts_db));
7563        Self::default_with_accounts(accounts)
7564    }
7565
7566    pub fn new_with_bank_forks_for_tests(
7567        genesis_config: &GenesisConfig,
7568    ) -> (Arc<Self>, Arc<RwLock<BankForks>>) {
7569        let bank = Self::new_for_tests(genesis_config);
7570        bank.wrap_with_bank_forks_for_tests()
7571    }
7572
7573    pub fn new_for_tests(genesis_config: &GenesisConfig) -> Self {
7574        Self::new_for_tests_with_config(genesis_config, BankTestConfig::default())
7575    }
7576
7577    pub fn new_with_mockup_builtin_for_tests(
7578        genesis_config: &GenesisConfig,
7579        program_id: Pubkey,
7580        builtin_function: BuiltinFunctionWithContext,
7581    ) -> (Arc<Self>, Arc<RwLock<BankForks>>) {
7582        let mut bank = Self::new_for_tests(genesis_config);
7583        bank.add_mockup_builtin(program_id, builtin_function);
7584        bank.wrap_with_bank_forks_for_tests()
7585    }
7586
7587    pub fn new_for_tests_with_config(
7588        genesis_config: &GenesisConfig,
7589        test_config: BankTestConfig,
7590    ) -> Self {
7591        Self::new_with_config_for_tests(
7592            genesis_config,
7593            test_config.secondary_indexes,
7594            AccountShrinkThreshold::default(),
7595        )
7596    }
7597
7598    pub fn new_no_wallclock_throttle_for_tests(
7599        genesis_config: &GenesisConfig,
7600    ) -> (Arc<Self>, Arc<RwLock<BankForks>>) {
7601        let mut bank = Self::new_for_tests(genesis_config);
7602
7603        bank.ns_per_slot = std::u128::MAX;
7604        bank.wrap_with_bank_forks_for_tests()
7605    }
7606
7607    pub(crate) fn new_with_config_for_tests(
7608        genesis_config: &GenesisConfig,
7609        account_indexes: AccountSecondaryIndexes,
7610        shrink_ratio: AccountShrinkThreshold,
7611    ) -> Self {
7612        Self::new_with_paths_for_tests(
7613            genesis_config,
7614            Arc::new(RuntimeConfig::default()),
7615            Vec::new(),
7616            account_indexes,
7617            shrink_ratio,
7618        )
7619    }
7620
7621    pub fn new_with_paths_for_tests(
7622        genesis_config: &GenesisConfig,
7623        runtime_config: Arc<RuntimeConfig>,
7624        paths: Vec<PathBuf>,
7625        account_indexes: AccountSecondaryIndexes,
7626        shrink_ratio: AccountShrinkThreshold,
7627    ) -> Self {
7628        Self::new_with_paths(
7629            genesis_config,
7630            runtime_config,
7631            paths,
7632            None,
7633            None,
7634            account_indexes,
7635            shrink_ratio,
7636            false,
7637            Some(ACCOUNTS_DB_CONFIG_FOR_TESTING),
7638            None,
7639            Some(Pubkey::new_unique()),
7640            Arc::default(),
7641        )
7642    }
7643
7644    pub fn new_for_benches(genesis_config: &GenesisConfig) -> Self {
7645        Self::new_with_paths_for_benches(genesis_config, Vec::new())
7646    }
7647
7648    /// Intended for use by benches only.
7649    /// create new bank with the given config and paths.
7650    pub fn new_with_paths_for_benches(genesis_config: &GenesisConfig, paths: Vec<PathBuf>) -> Self {
7651        Self::new_with_paths(
7652            genesis_config,
7653            Arc::<RuntimeConfig>::default(),
7654            paths,
7655            None,
7656            None,
7657            AccountSecondaryIndexes::default(),
7658            AccountShrinkThreshold::default(),
7659            false,
7660            Some(ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS),
7661            None,
7662            Some(Pubkey::new_unique()),
7663            Arc::default(),
7664        )
7665    }
7666
7667    /// Prepare a transaction batch from a list of legacy transactions. Used for tests only.
7668    pub fn prepare_batch_for_tests(&self, txs: Vec<Transaction>) -> TransactionBatch {
7669        let transaction_account_lock_limit = self.get_transaction_account_lock_limit();
7670        let sanitized_txs = txs
7671            .into_iter()
7672            .map(SanitizedTransaction::from_transaction_for_tests)
7673            .collect::<Vec<_>>();
7674        let lock_results = self
7675            .rc
7676            .accounts
7677            .lock_accounts(sanitized_txs.iter(), transaction_account_lock_limit);
7678        TransactionBatch::new(lock_results, self, Cow::Owned(sanitized_txs))
7679    }
7680
7681    /// Set the initial accounts data size
7682    /// NOTE: This fn is *ONLY FOR TESTS*
7683    pub fn set_accounts_data_size_initial_for_tests(&mut self, amount: u64) {
7684        self.accounts_data_size_initial = amount;
7685    }
7686
7687    /// Update the accounts data size off-chain delta
7688    /// NOTE: This fn is *ONLY FOR TESTS*
7689    pub fn update_accounts_data_size_delta_off_chain_for_tests(&self, amount: i64) {
7690        self.update_accounts_data_size_delta_off_chain(amount)
7691    }
7692
7693    #[cfg(test)]
7694    fn restore_old_behavior_for_fragile_tests(&self) {
7695        self.lazy_rent_collection.store(true, Relaxed);
7696    }
7697
7698    /// Process multiple transaction in a single batch. This is used for benches and unit tests.
7699    ///
7700    /// # Panics
7701    ///
7702    /// Panics if any of the transactions do not pass sanitization checks.
7703    #[must_use]
7704    pub fn process_transactions<'a>(
7705        &self,
7706        txs: impl Iterator<Item = &'a Transaction>,
7707    ) -> Vec<Result<()>> {
7708        self.try_process_transactions(txs).unwrap()
7709    }
7710
7711    /// Process entry transactions in a single batch. This is used for benches and unit tests.
7712    ///
7713    /// # Panics
7714    ///
7715    /// Panics if any of the transactions do not pass sanitization checks.
7716    #[must_use]
7717    pub fn process_entry_transactions(&self, txs: Vec<VersionedTransaction>) -> Vec<Result<()>> {
7718        self.try_process_entry_transactions(txs).unwrap()
7719    }
7720
7721    #[cfg(test)]
7722    pub fn flush_accounts_cache_slot_for_tests(&self) {
7723        self.rc
7724            .accounts
7725            .accounts_db
7726            .flush_accounts_cache_slot_for_tests(self.slot())
7727    }
7728
7729    /// This is only valid to call from tests.
7730    /// block until initial accounts hash verification has completed
7731    pub fn wait_for_initial_accounts_hash_verification_completed_for_tests(&self) {
7732        self.rc
7733            .accounts
7734            .accounts_db
7735            .verify_accounts_hash_in_bg
7736            .wait_for_complete()
7737    }
7738
7739    pub fn update_accounts_hash_for_tests(&self) -> AccountsHash {
7740        self.update_accounts_hash(CalcAccountsHashDataSource::IndexForTests, false, false)
7741    }
7742}
7743
7744/// Compute how much an account has changed size.  This function is useful when the data size delta
7745/// needs to be computed and passed to an `update_accounts_data_size_delta` function.
7746fn calculate_data_size_delta(old_data_size: usize, new_data_size: usize) -> i64 {
7747    assert!(old_data_size <= i64::MAX as usize);
7748    assert!(new_data_size <= i64::MAX as usize);
7749    let old_data_size = old_data_size as i64;
7750    let new_data_size = new_data_size as i64;
7751
7752    new_data_size.saturating_sub(old_data_size)
7753}
7754
7755/// Since `apply_feature_activations()` has different behavior depending on its caller, enumerate
7756/// those callers explicitly.
7757#[derive(Debug, Copy, Clone, Eq, PartialEq)]
7758enum ApplyFeatureActivationsCaller {
7759    FinishInit,
7760    NewFromParent,
7761    WarpFromParent,
7762}
7763
7764/// Return the computed values from `collect_rent_from_accounts()`
7765///
7766/// Since `collect_rent_from_accounts()` is running in parallel, instead of updating the
7767/// atomics/shared data inside this function, return those values in this struct for the caller to
7768/// process later.
7769#[derive(Debug, Default)]
7770struct CollectRentFromAccountsInfo {
7771    skipped_rewrites: Vec<(Pubkey, AccountHash)>,
7772    rent_collected_info: CollectedInfo,
7773    rent_rewards: Vec<(Pubkey, RewardInfo)>,
7774    time_collecting_rent_us: u64,
7775    time_storing_accounts_us: u64,
7776    num_accounts: usize,
7777}
7778
7779/// Return the computed values—of each iteration in the parallel loop inside
7780/// `collect_rent_in_partition()`—and then perform a reduce on all of them.
7781#[derive(Debug, Default)]
7782struct CollectRentInPartitionInfo {
7783    skipped_rewrites: Vec<(Pubkey, AccountHash)>,
7784    rent_collected: u64,
7785    accounts_data_size_reclaimed: u64,
7786    rent_rewards: Vec<(Pubkey, RewardInfo)>,
7787    time_loading_accounts_us: u64,
7788    time_collecting_rent_us: u64,
7789    time_storing_accounts_us: u64,
7790    num_accounts: usize,
7791}
7792
7793impl CollectRentInPartitionInfo {
7794    /// Create a new `CollectRentInPartitionInfo` from the results of loading accounts and
7795    /// collecting rent on them.
7796    #[must_use]
7797    fn new(info: CollectRentFromAccountsInfo, time_loading_accounts: Duration) -> Self {
7798        Self {
7799            skipped_rewrites: info.skipped_rewrites,
7800            rent_collected: info.rent_collected_info.rent_amount,
7801            accounts_data_size_reclaimed: info.rent_collected_info.account_data_len_reclaimed,
7802            rent_rewards: info.rent_rewards,
7803            time_loading_accounts_us: time_loading_accounts.as_micros() as u64,
7804            time_collecting_rent_us: info.time_collecting_rent_us,
7805            time_storing_accounts_us: info.time_storing_accounts_us,
7806            num_accounts: info.num_accounts,
7807        }
7808    }
7809
7810    /// Reduce (i.e. 'combine') two `CollectRentInPartitionInfo`s into one.
7811    ///
7812    /// This fn is used by `collect_rent_in_partition()` as the reduce step (of map-reduce) in its
7813    /// parallel loop of rent collection.
7814    #[must_use]
7815    fn reduce(lhs: Self, rhs: Self) -> Self {
7816        Self {
7817            skipped_rewrites: [lhs.skipped_rewrites, rhs.skipped_rewrites].concat(),
7818            rent_collected: lhs.rent_collected.saturating_add(rhs.rent_collected),
7819            accounts_data_size_reclaimed: lhs
7820                .accounts_data_size_reclaimed
7821                .saturating_add(rhs.accounts_data_size_reclaimed),
7822            rent_rewards: [lhs.rent_rewards, rhs.rent_rewards].concat(),
7823            time_loading_accounts_us: lhs
7824                .time_loading_accounts_us
7825                .saturating_add(rhs.time_loading_accounts_us),
7826            time_collecting_rent_us: lhs
7827                .time_collecting_rent_us
7828                .saturating_add(rhs.time_collecting_rent_us),
7829            time_storing_accounts_us: lhs
7830                .time_storing_accounts_us
7831                .saturating_add(rhs.time_storing_accounts_us),
7832            num_accounts: lhs.num_accounts.saturating_add(rhs.num_accounts),
7833        }
7834    }
7835}
7836
7837/// Struct to collect stats when scanning all accounts in `get_total_accounts_stats()`
7838#[derive(Debug, Default, Copy, Clone, Serialize)]
7839pub struct TotalAccountsStats {
7840    /// Total number of accounts
7841    pub num_accounts: usize,
7842    /// Total data size of all accounts
7843    pub data_len: usize,
7844
7845    /// Total number of executable accounts
7846    pub num_executable_accounts: usize,
7847    /// Total data size of executable accounts
7848    pub executable_data_len: usize,
7849
7850    /// Total number of rent exempt accounts
7851    pub num_rent_exempt_accounts: usize,
7852    /// Total number of rent paying accounts
7853    pub num_rent_paying_accounts: usize,
7854    /// Total number of rent paying accounts without data
7855    pub num_rent_paying_accounts_without_data: usize,
7856    /// Total amount of lamports in rent paying accounts
7857    pub lamports_in_rent_paying_accounts: u64,
7858}
7859
7860impl TotalAccountsStats {
7861    pub fn accumulate_account(
7862        &mut self,
7863        address: &Pubkey,
7864        account: &AccountSharedData,
7865        rent_collector: &RentCollector,
7866    ) {
7867        let data_len = account.data().len();
7868        self.num_accounts += 1;
7869        self.data_len += data_len;
7870
7871        if account.executable() {
7872            self.num_executable_accounts += 1;
7873            self.executable_data_len += data_len;
7874        }
7875
7876        if !rent_collector.should_collect_rent(address, account)
7877            || rent_collector.get_rent_due(account).is_exempt()
7878        {
7879            self.num_rent_exempt_accounts += 1;
7880        } else {
7881            self.num_rent_paying_accounts += 1;
7882            self.lamports_in_rent_paying_accounts += account.lamports();
7883            if data_len == 0 {
7884                self.num_rent_paying_accounts_without_data += 1;
7885            }
7886        }
7887    }
7888}
7889
7890impl Drop for Bank {
7891    fn drop(&mut self) {
7892        if let Some(drop_callback) = self.drop_callback.read().unwrap().0.as_ref() {
7893            drop_callback.callback(self);
7894        } else {
7895            // Default case for tests
7896            self.rc
7897                .accounts
7898                .accounts_db
7899                .purge_slot(self.slot(), self.bank_id(), false);
7900        }
7901    }
7902}
7903
7904/// utility function used for testing and benchmarking.
7905pub mod test_utils {
7906    use {
7907        super::Bank,
7908        crate::installed_scheduler_pool::BankWithScheduler,
7909        miraland_sdk::{
7910            account::{ReadableAccount, WritableAccount},
7911            hash::hashv,
7912            lamports::LamportsError,
7913            pubkey::Pubkey,
7914        },
7915        miraland_vote_program::vote_state::{self, BlockTimestamp, VoteStateVersions},
7916        std::sync::Arc,
7917    };
7918    pub fn goto_end_of_slot(bank: Arc<Bank>) {
7919        goto_end_of_slot_with_scheduler(&BankWithScheduler::new_without_scheduler(bank))
7920    }
7921
7922    pub fn goto_end_of_slot_with_scheduler(bank: &BankWithScheduler) {
7923        let mut tick_hash = bank.last_blockhash();
7924        loop {
7925            tick_hash = hashv(&[tick_hash.as_ref(), &[42]]);
7926            bank.register_tick(&tick_hash);
7927            if tick_hash == bank.last_blockhash() {
7928                bank.freeze();
7929                return;
7930            }
7931        }
7932    }
7933
7934    pub fn update_vote_account_timestamp(
7935        timestamp: BlockTimestamp,
7936        bank: &Bank,
7937        vote_pubkey: &Pubkey,
7938    ) {
7939        let mut vote_account = bank.get_account(vote_pubkey).unwrap_or_default();
7940        let mut vote_state = vote_state::from(&vote_account).unwrap_or_default();
7941        vote_state.last_timestamp = timestamp;
7942        let versioned = VoteStateVersions::new_current(vote_state);
7943        vote_state::to(&versioned, &mut vote_account).unwrap();
7944        bank.store_account(vote_pubkey, &vote_account);
7945    }
7946
7947    pub fn deposit(
7948        bank: &Bank,
7949        pubkey: &Pubkey,
7950        lamports: u64,
7951    ) -> std::result::Result<u64, LamportsError> {
7952        // This doesn't collect rents intentionally.
7953        // Rents should only be applied to actual TXes
7954        let mut account = bank.get_account_with_fixed_root(pubkey).unwrap_or_default();
7955        account.checked_add_lamports(lamports)?;
7956        bank.store_account(pubkey, &account);
7957        Ok(account.lamports())
7958    }
7959}