solana_core/
consensus.rs

1pub mod fork_choice;
2pub mod heaviest_subtree_fork_choice;
3pub(crate) mod latest_validator_votes_for_frozen_banks;
4pub mod progress_map;
5mod tower1_14_11;
6mod tower1_7_14;
7pub mod tower_storage;
8pub(crate) mod tower_vote_state;
9pub mod tree_diff;
10pub mod vote_stake_tracker;
11
12use {
13    self::{
14        heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice,
15        latest_validator_votes_for_frozen_banks::LatestValidatorVotesForFrozenBanks,
16        progress_map::{LockoutIntervals, ProgressMap},
17        tower1_14_11::Tower1_14_11,
18        tower1_7_14::Tower1_7_14,
19        tower_storage::{SavedTower, SavedTowerVersions, TowerStorage},
20        tower_vote_state::TowerVoteState,
21    },
22    crate::{consensus::progress_map::LockoutInterval, replay_stage::DUPLICATE_THRESHOLD},
23    chrono::prelude::*,
24    solana_clock::{Slot, UnixTimestamp},
25    solana_hash::Hash,
26    solana_instruction::Instruction,
27    solana_keypair::Keypair,
28    solana_ledger::{
29        ancestor_iterator::AncestorIterator,
30        blockstore::{self, Blockstore},
31    },
32    solana_pubkey::Pubkey,
33    solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::VOTE_THRESHOLD_SIZE},
34    solana_slot_history::{Check, SlotHistory},
35    solana_vote::{vote_account::VoteAccountsHashMap, vote_transaction::VoteTransaction},
36    solana_vote_program::{
37        vote_error::VoteError,
38        vote_instruction,
39        vote_state::{BlockTimestamp, Lockout, TowerSync, Vote, VoteState1_14_11, VoteStateUpdate},
40    },
41    std::{
42        cmp::Ordering,
43        collections::{HashMap, HashSet},
44        ops::Deref,
45    },
46    thiserror::Error,
47};
48
49#[derive(PartialEq, Eq, Clone, Copy, Debug, Default)]
50pub enum ThresholdDecision {
51    #[default]
52    PassedThreshold,
53    FailedThreshold(/* vote depth */ u64, /* Observed stake */ u64),
54}
55
56impl ThresholdDecision {
57    pub fn passed(&self) -> bool {
58        matches!(self, Self::PassedThreshold)
59    }
60}
61
62#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
63#[derive(PartialEq, Eq, Clone, Debug)]
64pub enum SwitchForkDecision {
65    SwitchProof(Hash),
66    SameFork,
67    FailedSwitchThreshold(
68        /* Switch proof stake */ u64,
69        /* Total stake */ u64,
70    ),
71    FailedSwitchDuplicateRollback(Slot),
72}
73
74impl SwitchForkDecision {
75    pub fn to_vote_instruction(
76        &self,
77        vote: VoteTransaction,
78        vote_account_pubkey: &Pubkey,
79        authorized_voter_pubkey: &Pubkey,
80    ) -> Option<Instruction> {
81        match (self, vote) {
82            (SwitchForkDecision::FailedSwitchThreshold(_, total_stake), _) => {
83                assert_ne!(*total_stake, 0);
84                None
85            }
86            (SwitchForkDecision::FailedSwitchDuplicateRollback(_), _) => None,
87            (SwitchForkDecision::SameFork, VoteTransaction::Vote(v)) => Some(
88                vote_instruction::vote(vote_account_pubkey, authorized_voter_pubkey, v),
89            ),
90            (SwitchForkDecision::SameFork, VoteTransaction::VoteStateUpdate(v)) => {
91                Some(vote_instruction::update_vote_state(
92                    vote_account_pubkey,
93                    authorized_voter_pubkey,
94                    v,
95                ))
96            }
97            (SwitchForkDecision::SameFork, VoteTransaction::TowerSync(t)) => Some(
98                vote_instruction::tower_sync(vote_account_pubkey, authorized_voter_pubkey, t),
99            ),
100            (SwitchForkDecision::SwitchProof(switch_proof_hash), VoteTransaction::Vote(v)) => {
101                Some(vote_instruction::vote_switch(
102                    vote_account_pubkey,
103                    authorized_voter_pubkey,
104                    v,
105                    *switch_proof_hash,
106                ))
107            }
108            (
109                SwitchForkDecision::SwitchProof(switch_proof_hash),
110                VoteTransaction::VoteStateUpdate(v),
111            ) => Some(vote_instruction::update_vote_state_switch(
112                vote_account_pubkey,
113                authorized_voter_pubkey,
114                v,
115                *switch_proof_hash,
116            )),
117            (SwitchForkDecision::SwitchProof(switch_proof_hash), VoteTransaction::TowerSync(t)) => {
118                Some(vote_instruction::tower_sync_switch(
119                    vote_account_pubkey,
120                    authorized_voter_pubkey,
121                    t,
122                    *switch_proof_hash,
123                ))
124            }
125            (SwitchForkDecision::SameFork, VoteTransaction::CompactVoteStateUpdate(v)) => {
126                Some(vote_instruction::compact_update_vote_state(
127                    vote_account_pubkey,
128                    authorized_voter_pubkey,
129                    v,
130                ))
131            }
132            (
133                SwitchForkDecision::SwitchProof(switch_proof_hash),
134                VoteTransaction::CompactVoteStateUpdate(v),
135            ) => Some(vote_instruction::compact_update_vote_state_switch(
136                vote_account_pubkey,
137                authorized_voter_pubkey,
138                v,
139                *switch_proof_hash,
140            )),
141        }
142    }
143
144    pub fn can_vote(&self) -> bool {
145        match self {
146            SwitchForkDecision::FailedSwitchThreshold(_, _) => false,
147            SwitchForkDecision::FailedSwitchDuplicateRollback(_) => false,
148            SwitchForkDecision::SameFork => true,
149            SwitchForkDecision::SwitchProof(_) => true,
150        }
151    }
152}
153
154const VOTE_THRESHOLD_DEPTH_SHALLOW: usize = 4;
155pub const VOTE_THRESHOLD_DEPTH: usize = 8;
156pub const SWITCH_FORK_THRESHOLD: f64 = 0.38;
157
158pub type Result<T> = std::result::Result<T, TowerError>;
159
160pub type Stake = u64;
161pub type VotedStakes = HashMap<Slot, Stake, ahash::RandomState>;
162pub type PubkeyVotes = Vec<(Pubkey, Slot)>;
163
164pub(crate) struct ComputedBankState {
165    pub voted_stakes: VotedStakes,
166    pub total_stake: Stake,
167    pub fork_stake: Stake,
168    /// Flat list of intervals of lockouts of the form {voter, start, end}
169    /// ([`crate::consensus::progress_map::LockoutInterval`]).
170    pub lockout_intervals: LockoutIntervals,
171    pub my_latest_landed_vote: Option<Slot>,
172}
173
174#[derive(Debug, PartialEq, Clone)]
175#[allow(clippy::large_enum_variant)]
176pub enum TowerVersions {
177    V1_7_14(Tower1_7_14),
178    V1_14_11(Tower1_14_11),
179    Current(Tower),
180}
181
182impl TowerVersions {
183    pub fn new_current(tower: Tower) -> Self {
184        Self::Current(tower)
185    }
186
187    pub fn convert_to_current(self) -> Tower {
188        match self {
189            TowerVersions::V1_7_14(tower) => tower.into(),
190            TowerVersions::V1_14_11(tower) => tower.into(),
191            TowerVersions::Current(tower) => tower,
192        }
193    }
194}
195
196#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
197#[derive(PartialEq, Eq, Debug, Default, Clone, Copy)]
198pub(crate) enum BlockhashStatus {
199    /// No vote since restart
200    #[default]
201    Uninitialized,
202    /// Non voting validator
203    NonVoting,
204    /// Hot spare validator
205    HotSpare,
206    /// Successfully generated vote tx with blockhash
207    Blockhash(Hash),
208}
209
210#[derive(Clone, Debug, PartialEq)]
211pub struct Tower {
212    pub node_pubkey: Pubkey,
213    pub(crate) threshold_depth: usize,
214    threshold_size: f64,
215    pub(crate) vote_state: TowerVoteState,
216    last_vote: VoteTransaction,
217    // The blockhash used in the last vote transaction, may or may not equal the
218    // blockhash of the voted block itself, depending if the vote slot was refreshed.
219    // For instance, a vote for slot 5, may be refreshed/resubmitted for inclusion in
220    //  block 10, in  which case `last_vote_tx_blockhash` equals the blockhash of 10, not 5.
221    // For non voting validators this is NonVoting
222    last_vote_tx_blockhash: BlockhashStatus,
223    last_timestamp: BlockTimestamp,
224    // Restored last voted slot which cannot be found in SlotHistory at replayed root
225    // (This is a special field for slashing-free validator restart with edge cases).
226    // This could be emptied after some time; but left intact indefinitely for easier
227    // implementation
228    // Further, stray slot can be stale or not. `Stale` here means whether given
229    // bank_forks (=~ ledger) lacks the slot or not.
230    stray_restored_slot: Option<Slot>,
231    pub last_switch_threshold_check: Option<(Slot, SwitchForkDecision)>,
232}
233
234impl Default for Tower {
235    fn default() -> Self {
236        let mut tower = Self {
237            node_pubkey: Pubkey::default(),
238            threshold_depth: VOTE_THRESHOLD_DEPTH,
239            threshold_size: VOTE_THRESHOLD_SIZE,
240            vote_state: TowerVoteState::default(),
241            last_vote: VoteTransaction::from(TowerSync::default()),
242            last_timestamp: BlockTimestamp::default(),
243            last_vote_tx_blockhash: BlockhashStatus::default(),
244            stray_restored_slot: Option::default(),
245            last_switch_threshold_check: Option::default(),
246        };
247        // VoteState::root_slot is ensured to be Some in Tower
248        tower.vote_state.root_slot = Some(Slot::default());
249        tower
250    }
251}
252
253// Tower1_14_11 is the persisted data format for the Tower,
254// decoupling it from VoteState::Current.
255impl From<Tower> for Tower1_14_11 {
256    fn from(tower: Tower) -> Self {
257        Self {
258            node_pubkey: tower.node_pubkey,
259            threshold_depth: tower.threshold_depth,
260            threshold_size: tower.threshold_size,
261            vote_state: VoteState1_14_11::from(tower.vote_state),
262            last_vote: tower.last_vote,
263            last_vote_tx_blockhash: tower.last_vote_tx_blockhash,
264            last_timestamp: tower.last_timestamp,
265            stray_restored_slot: tower.stray_restored_slot,
266            last_switch_threshold_check: tower.last_switch_threshold_check,
267        }
268    }
269}
270
271// Tower1_14_11 is the persisted data format for the Tower,
272// decoupling it from VoteState::Current.
273impl From<Tower1_14_11> for Tower {
274    fn from(tower: Tower1_14_11) -> Self {
275        Self {
276            node_pubkey: tower.node_pubkey,
277            threshold_depth: tower.threshold_depth,
278            threshold_size: tower.threshold_size,
279            vote_state: TowerVoteState::from(tower.vote_state),
280            last_vote: tower.last_vote,
281            last_vote_tx_blockhash: tower.last_vote_tx_blockhash,
282            last_timestamp: tower.last_timestamp,
283            stray_restored_slot: tower.stray_restored_slot,
284            last_switch_threshold_check: tower.last_switch_threshold_check,
285        }
286    }
287}
288
289impl From<Tower1_7_14> for Tower {
290    fn from(tower: Tower1_7_14) -> Self {
291        let box_last_vote = VoteTransaction::from(tower.last_vote.clone());
292
293        Self {
294            node_pubkey: tower.node_pubkey,
295            threshold_depth: tower.threshold_depth,
296            threshold_size: tower.threshold_size,
297            vote_state: TowerVoteState::from(tower.vote_state),
298            last_vote: box_last_vote,
299            last_vote_tx_blockhash: tower.last_vote_tx_blockhash,
300            last_timestamp: tower.last_timestamp,
301            stray_restored_slot: tower.stray_restored_slot,
302            last_switch_threshold_check: tower.last_switch_threshold_check,
303        }
304    }
305}
306
307impl Tower {
308    pub fn new(
309        node_pubkey: &Pubkey,
310        vote_account_pubkey: &Pubkey,
311        root: Slot,
312        bank: &Bank,
313    ) -> Self {
314        let mut tower = Tower {
315            node_pubkey: *node_pubkey,
316            ..Tower::default()
317        };
318        tower.initialize_lockouts_from_bank(vote_account_pubkey, root, bank);
319        tower
320    }
321
322    #[cfg(test)]
323    pub fn new_for_tests(threshold_depth: usize, threshold_size: f64) -> Self {
324        Self {
325            threshold_depth,
326            threshold_size,
327            ..Tower::default()
328        }
329    }
330
331    #[cfg(test)]
332    pub fn new_random(node_pubkey: Pubkey) -> Self {
333        use {
334            rand::Rng,
335            solana_vote_program::vote_state::{LandedVote, VoteStateV4},
336        };
337
338        let mut rng = rand::thread_rng();
339        let root_slot = rng.gen();
340        let votes = (1..32)
341            .map(|x| LandedVote {
342                latency: 0,
343                lockout: Lockout::new_with_confirmation_count(
344                    u64::from(x).saturating_add(root_slot),
345                    32_u32.saturating_sub(x),
346                ),
347            })
348            .collect();
349        let vote_state = VoteStateV4 {
350            node_pubkey,
351            root_slot: Some(root_slot),
352            votes,
353            ..VoteStateV4::default()
354        };
355        let last_vote = TowerSync::from(
356            vote_state
357                .votes
358                .iter()
359                .map(|lv| (lv.slot(), lv.confirmation_count()))
360                .collect::<Vec<_>>(),
361        );
362        Self {
363            node_pubkey,
364            vote_state: TowerVoteState::from(vote_state),
365            last_vote: VoteTransaction::from(last_vote),
366            ..Tower::default()
367        }
368    }
369
370    pub fn new_from_bankforks(
371        bank_forks: &BankForks,
372        node_pubkey: &Pubkey,
373        vote_account: &Pubkey,
374    ) -> Self {
375        let root_bank = bank_forks.root_bank();
376        let frozen_banks: Vec<_> = bank_forks
377            .frozen_banks()
378            .map(|(_slot, bank)| bank)
379            .collect();
380        let (_progress, heaviest_subtree_fork_choice) =
381            crate::replay_stage::ReplayStage::initialize_progress_and_fork_choice(
382                root_bank.deref(),
383                frozen_banks,
384                node_pubkey,
385                vote_account,
386                vec![],
387            );
388        let root = root_bank.slot();
389
390        let (best_slot, best_hash) = heaviest_subtree_fork_choice.best_overall_slot();
391        let heaviest_bank = bank_forks
392            .get_with_checked_hash((best_slot, best_hash))
393            .expect(
394                "The best overall slot must be one of `frozen_banks` which all exist in bank_forks",
395            );
396
397        Self::new(node_pubkey, vote_account, root, &heaviest_bank)
398    }
399
400    pub(crate) fn collect_vote_lockouts(
401        vote_account_pubkey: &Pubkey,
402        bank_slot: Slot,
403        root_slot: Slot,
404        vote_accounts: &VoteAccountsHashMap,
405        ancestors: &HashMap<Slot, HashSet<Slot>>,
406        get_frozen_hash: impl Fn(Slot) -> Option<Hash>,
407        latest_validator_votes_for_frozen_banks: &mut LatestValidatorVotesForFrozenBanks,
408        vote_slots: &mut HashSet<Slot, ahash::RandomState>,
409    ) -> ComputedBankState {
410        let total_slots = bank_slot.saturating_sub(root_slot) as usize;
411        vote_slots.reserve(total_slots);
412        let mut voted_stakes =
413            HashMap::with_capacity_and_hasher(total_slots, ahash::RandomState::default());
414        let mut total_stake = 0;
415
416        let total_votes = vote_accounts
417            .values()
418            .filter(|(voted_stake, _)| *voted_stake != 0)
419            .map(|(_, account)| account.vote_state_view().votes_len())
420            .sum();
421        // Flat list of intervals of lockouts of the form {voter, start, end}.
422        let mut lockout_intervals = LockoutIntervals::with_capacity(total_votes);
423        let mut my_latest_landed_vote = None;
424        for (&key, (voted_stake, account)) in vote_accounts.iter() {
425            let voted_stake = *voted_stake;
426            if voted_stake == 0 {
427                continue;
428            }
429            trace!("{vote_account_pubkey} {key} with stake {voted_stake}");
430            let mut vote_state = TowerVoteState::from(account.vote_state_view());
431            lockout_intervals.extend(vote_state.votes.iter().map(|v| LockoutInterval {
432                start: v.slot(),
433                end: v.last_locked_out_slot(),
434                voter: key,
435            }));
436
437            if key == *vote_account_pubkey {
438                my_latest_landed_vote = vote_state.nth_recent_lockout(0).map(|l| l.slot());
439                debug!("vote state {vote_state:?}");
440                debug!(
441                    "observed slot {}",
442                    vote_state
443                        .nth_recent_lockout(0)
444                        .map(|l| l.slot())
445                        .unwrap_or(0) as i64
446                );
447                debug!("observed root {}", vote_state.root_slot.unwrap_or(0) as i64);
448                datapoint_info!(
449                    "tower-observed",
450                    (
451                        "slot",
452                        vote_state
453                            .nth_recent_lockout(0)
454                            .map(|l| l.slot())
455                            .unwrap_or(0),
456                        i64
457                    ),
458                    ("root", vote_state.root_slot.unwrap_or(0), i64)
459                );
460            }
461            let start_root = vote_state.root_slot;
462
463            // Add the last vote to update the `heaviest_subtree_fork_choice`
464            if let Some(last_landed_voted_slot) = vote_state.last_voted_slot() {
465                latest_validator_votes_for_frozen_banks.check_add_vote(
466                    key,
467                    last_landed_voted_slot,
468                    get_frozen_hash(last_landed_voted_slot),
469                    true,
470                );
471            }
472
473            vote_state.process_next_vote_slot(bank_slot);
474
475            // Only record vote slots greater than the fork root. Votes earlier
476            // than the fork root will not have entries in `ancestors` and are ignored by
477            // `populate_ancestor_voted_stakes`, and there can be no landed votes
478            // >= `bank_slot`. Bounding here prevents unnecessary range expansion
479            // in the dense maps and keeps behavior identical.
480            for slot in vote_state.votes.iter().filter_map(|v| {
481                let slot = v.slot();
482                (slot > root_slot).then_some(slot)
483            }) {
484                vote_slots.insert(slot);
485            }
486
487            if start_root != vote_state.root_slot {
488                if let Some(root) = start_root {
489                    // The account's prior root can be older than this fork's root; clamp to
490                    // the same range for the same reason as above.
491                    if root > root_slot {
492                        trace!("ROOT: {root}");
493                        vote_slots.insert(root);
494                    }
495                }
496            }
497            if let Some(root) = vote_state.root_slot {
498                // Likewise, only include the (new) root if it lies within the range this
499                // bank will ever query in `ancestors`.
500                if root > root_slot {
501                    vote_slots.insert(root);
502                }
503            }
504
505            // The last vote in the vote stack is a simulated vote on bank_slot, which
506            // we added to the vote stack earlier in this function by calling process_vote().
507            // We don't want to update the ancestors stakes of this vote b/c it does not
508            // represent an actual vote by the validator.
509
510            // Note: It should not be possible for any vote state in this bank to have
511            // a vote for a slot >= bank_slot, so we are guaranteed that the last vote in
512            // this vote stack is the simulated vote, so this fetch should be sufficient
513            // to find the last unsimulated vote.
514            assert_eq!(
515                vote_state.nth_recent_lockout(0).map(|l| l.slot()),
516                Some(bank_slot)
517            );
518            if let Some(vote) = vote_state.nth_recent_lockout(1) {
519                // Update all the parents of this last vote with the stake of this vote account
520                Self::update_ancestor_voted_stakes(
521                    &mut voted_stakes,
522                    vote.slot(),
523                    voted_stake,
524                    ancestors,
525                );
526            }
527            total_stake += voted_stake;
528        }
529
530        // TODO: populate_ancestor_voted_stakes only adds zeros. Comment why
531        // that is necessary (if so).
532        Self::populate_ancestor_voted_stakes(
533            &mut voted_stakes,
534            vote_slots.iter().copied(),
535            ancestors,
536        );
537
538        // As commented above, since the votes at current bank_slot are
539        // simulated votes, the voted_stake for `bank_slot` is not populated.
540        // Therefore, we use the voted_stake for the parent of bank_slot as the
541        // `fork_stake` instead.
542        let fork_stake = ancestors
543            .get(&bank_slot)
544            .and_then(|ancestors| {
545                ancestors
546                    .iter()
547                    .max()
548                    .and_then(|parent| voted_stakes.get(parent))
549                    .copied()
550            })
551            .unwrap_or(0);
552
553        vote_slots.clear();
554
555        ComputedBankState {
556            voted_stakes,
557            total_stake,
558            fork_stake,
559            lockout_intervals,
560            my_latest_landed_vote,
561        }
562    }
563
564    #[cfg(test)]
565    fn is_slot_confirmed(
566        &self,
567        slot: Slot,
568        voted_stakes: &VotedStakes,
569        total_stake: Stake,
570    ) -> bool {
571        voted_stakes
572            .get(&slot)
573            .map(|stake| (*stake as f64 / total_stake as f64) > self.threshold_size)
574            .unwrap_or(false)
575    }
576
577    pub(crate) fn is_slot_duplicate_confirmed(
578        &self,
579        slot: Slot,
580        voted_stakes: &VotedStakes,
581        total_stake: Stake,
582    ) -> bool {
583        voted_stakes
584            .get(&slot)
585            .map(|stake| (*stake as f64 / total_stake as f64) > DUPLICATE_THRESHOLD)
586            .unwrap_or(false)
587    }
588
589    pub fn tower_slots(&self) -> Vec<Slot> {
590        self.vote_state.tower()
591    }
592
593    pub(crate) fn last_vote_tx_blockhash(&self) -> BlockhashStatus {
594        self.last_vote_tx_blockhash
595    }
596
597    pub fn refresh_last_vote_timestamp(&mut self, heaviest_slot_on_same_fork: Slot) {
598        let timestamp = if let Some(last_vote_timestamp) = self.last_vote.timestamp() {
599            // To avoid a refreshed vote tx getting caught in deduplication filters,
600            // we need to update timestamp. Increment by smallest amount to avoid skewing
601            // the Timestamp Oracle.
602            last_vote_timestamp.saturating_add(1)
603        } else {
604            // If the previous vote did not send a timestamp due to clock error,
605            // use the last good timestamp + 1
606            datapoint_info!(
607                "refresh-timestamp-missing",
608                ("heaviest-slot", heaviest_slot_on_same_fork, i64),
609                ("last-timestamp", self.last_timestamp.timestamp, i64),
610                ("last-slot", self.last_timestamp.slot, i64),
611            );
612            self.last_timestamp.timestamp.saturating_add(1)
613        };
614
615        if let Some(last_voted_slot) = self.last_vote.last_voted_slot() {
616            if heaviest_slot_on_same_fork <= last_voted_slot {
617                warn!(
618                    "Trying to refresh timestamp for vote on {last_voted_slot} using smaller \
619                     heaviest bank {heaviest_slot_on_same_fork}"
620                );
621                return;
622            }
623            self.last_timestamp = BlockTimestamp {
624                slot: last_voted_slot,
625                timestamp,
626            };
627            self.last_vote.set_timestamp(Some(timestamp));
628        } else {
629            warn!(
630                "Trying to refresh timestamp for last vote on heaviest bank on same fork \
631                 {heaviest_slot_on_same_fork}, but there is no vote to refresh"
632            );
633        }
634    }
635
636    pub fn refresh_last_vote_tx_blockhash(&mut self, new_vote_tx_blockhash: Hash) {
637        self.last_vote_tx_blockhash = BlockhashStatus::Blockhash(new_vote_tx_blockhash);
638    }
639
640    pub(crate) fn mark_last_vote_tx_blockhash_non_voting(&mut self) {
641        self.last_vote_tx_blockhash = BlockhashStatus::NonVoting;
642    }
643
644    pub(crate) fn mark_last_vote_tx_blockhash_hot_spare(&mut self) {
645        self.last_vote_tx_blockhash = BlockhashStatus::HotSpare;
646    }
647
648    pub fn last_voted_slot_in_bank(bank: &Bank, vote_account_pubkey: &Pubkey) -> Option<Slot> {
649        let vote_account = bank.get_vote_account(vote_account_pubkey)?;
650        vote_account.vote_state_view().last_voted_slot()
651    }
652
653    pub fn record_bank_vote(&mut self, bank: &Bank) -> Option<Slot> {
654        // Returns the new root if one is made after applying a vote for the given bank to
655        // `self.vote_state`
656        let block_id = bank.block_id().unwrap_or_else(|| {
657            // This can only happen for our leader bank
658            // Note: since the new shred format is yet to be rolled out to all clusters,
659            // this can also happen for non-leader banks. Once rolled out we can assert
660            // here that this is our leader bank.
661            Hash::default()
662        });
663        self.record_bank_vote_and_update_lockouts(
664            bank.slot(),
665            bank.hash(),
666            bank.feature_set
667                .is_active(&agave_feature_set::enable_tower_sync_ix::id()),
668            block_id,
669        )
670    }
671
672    /// If we've recently updated the vote state by applying a new vote
673    /// or syncing from a bank, generate the proper last_vote.
674    pub(crate) fn update_last_vote_from_vote_state(
675        &mut self,
676        vote_hash: Hash,
677        enable_tower_sync_ix: bool,
678        block_id: Hash,
679    ) {
680        let mut new_vote = if enable_tower_sync_ix {
681            VoteTransaction::from(TowerSync::new(
682                self.vote_state.votes.clone(),
683                self.vote_state.root_slot,
684                vote_hash,
685                block_id,
686            ))
687        } else {
688            VoteTransaction::from(VoteStateUpdate::new(
689                self.vote_state.votes.clone(),
690                self.vote_state.root_slot,
691                vote_hash,
692            ))
693        };
694
695        new_vote.set_timestamp(self.maybe_timestamp(self.last_voted_slot().unwrap_or_default()));
696        self.last_vote = new_vote;
697    }
698
699    fn record_bank_vote_and_update_lockouts(
700        &mut self,
701        vote_slot: Slot,
702        vote_hash: Hash,
703        enable_tower_sync_ix: bool,
704        block_id: Hash,
705    ) -> Option<Slot> {
706        if let Some(last_voted_slot) = self.vote_state.last_voted_slot() {
707            if vote_slot <= last_voted_slot {
708                panic!(
709                    "Error while recording vote {} {} in local tower {:?}",
710                    vote_slot,
711                    vote_hash,
712                    VoteError::VoteTooOld
713                );
714            }
715        }
716
717        trace!("{} record_vote for {}", self.node_pubkey, vote_slot);
718        let old_root = self.root();
719
720        self.vote_state.process_next_vote_slot(vote_slot);
721        self.update_last_vote_from_vote_state(vote_hash, enable_tower_sync_ix, block_id);
722
723        let new_root = self.root();
724
725        datapoint_info!(
726            "tower-vote",
727            ("latest", vote_slot, i64),
728            ("root", new_root, i64)
729        );
730        if old_root != new_root {
731            Some(new_root)
732        } else {
733            None
734        }
735    }
736
737    #[cfg(feature = "dev-context-only-utils")]
738    pub fn record_vote(&mut self, slot: Slot, hash: Hash) -> Option<Slot> {
739        self.record_bank_vote_and_update_lockouts(slot, hash, true, Hash::default())
740    }
741
742    #[cfg(feature = "dev-context-only-utils")]
743    pub fn increase_lockout(&mut self, confirmation_count_increase: u32) {
744        for vote in self.vote_state.votes.iter_mut() {
745            vote.increase_confirmation_count(confirmation_count_increase);
746        }
747    }
748
749    pub fn last_voted_slot(&self) -> Option<Slot> {
750        if self.last_vote.is_empty() {
751            None
752        } else {
753            Some(self.last_vote.slot(self.last_vote.len() - 1))
754        }
755    }
756
757    pub fn last_voted_slot_hash(&self) -> Option<(Slot, Hash)> {
758        Some((self.last_voted_slot()?, self.last_vote.hash()))
759    }
760
761    pub fn stray_restored_slot(&self) -> Option<Slot> {
762        self.stray_restored_slot
763    }
764
765    pub fn last_vote(&self) -> VoteTransaction {
766        self.last_vote.clone()
767    }
768
769    fn maybe_timestamp(&mut self, current_slot: Slot) -> Option<UnixTimestamp> {
770        if current_slot > self.last_timestamp.slot
771            || self.last_timestamp.slot == 0 && current_slot == self.last_timestamp.slot
772        {
773            let timestamp = Utc::now().timestamp();
774            if timestamp >= self.last_timestamp.timestamp {
775                self.last_timestamp = BlockTimestamp {
776                    slot: current_slot,
777                    timestamp,
778                };
779                return Some(timestamp);
780            } else {
781                datapoint_info!(
782                    "backwards-timestamp",
783                    ("slot", current_slot, i64),
784                    ("timestamp", timestamp, i64),
785                    ("last-timestamp", self.last_timestamp.timestamp, i64),
786                )
787            }
788        }
789        None
790    }
791
792    // root may be forcibly set by arbitrary replay root slot, for example from a root
793    // after replaying a snapshot.
794    // Also, tower.root() couldn't be None; initialize_lockouts() ensures that.
795    // Conceptually, every tower must have been constructed from a concrete starting point,
796    // which establishes the origin of trust (i.e. root) whether booting from genesis (slot 0) or
797    // snapshot (slot N). In other words, there should be no possibility a Tower doesn't have
798    // root, unlike young vote accounts.
799    pub fn root(&self) -> Slot {
800        self.vote_state.root_slot.unwrap()
801    }
802
803    // a slot is recent if it's newer than the last vote we have. If we haven't voted yet
804    // but have a root (hard forks situation) then compare it to the root
805    pub fn is_recent(&self, slot: Slot) -> bool {
806        if let Some(last_voted_slot) = self.vote_state.last_voted_slot() {
807            if slot <= last_voted_slot {
808                return false;
809            }
810        } else if let Some(root) = self.vote_state.root_slot {
811            if slot <= root {
812                return false;
813            }
814        }
815        true
816    }
817
818    pub fn has_voted(&self, slot: Slot) -> bool {
819        for vote in &self.vote_state.votes {
820            if slot == vote.slot() {
821                return true;
822            }
823        }
824        false
825    }
826
827    pub fn is_locked_out(&self, slot: Slot, ancestors: &HashSet<Slot>) -> bool {
828        if !self.is_recent(slot) {
829            return true;
830        }
831
832        // Check if a slot is locked out by simulating adding a vote for that
833        // slot to the current lockouts to pop any expired votes. If any of the
834        // remaining voted slots are on a different fork from the checked slot,
835        // it's still locked out.
836        let mut vote_state = self.vote_state.clone();
837        vote_state.process_next_vote_slot(slot);
838        for vote in &vote_state.votes {
839            if slot != vote.slot() && !ancestors.contains(&vote.slot()) {
840                return true;
841            }
842        }
843
844        if let Some(root_slot) = vote_state.root_slot {
845            if slot != root_slot {
846                // This case should never happen because bank forks purges all
847                // non-descendants of the root every time root is set
848                assert!(
849                    ancestors.contains(&root_slot),
850                    "ancestors: {ancestors:?}, slot: {slot} root: {root_slot}"
851                );
852            }
853        }
854
855        false
856    }
857
858    /// Checks if a vote for `candidate_slot` is usable in a switching proof
859    /// from `last_voted_slot` to `switch_slot`.
860    /// We assume `candidate_slot` is not an ancestor of `last_voted_slot`.
861    ///
862    /// Returns None if `candidate_slot` or `switch_slot` is not present in `ancestors`
863    fn is_valid_switching_proof_vote(
864        &self,
865        candidate_slot: Slot,
866        last_voted_slot: Slot,
867        switch_slot: Slot,
868        ancestors: &HashMap<Slot, HashSet<Slot>>,
869        last_vote_ancestors: &HashSet<Slot>,
870    ) -> Option<bool> {
871        trace!(
872            "Checking if {candidate_slot} is a valid switching proof vote from {last_voted_slot} \
873             to {switch_slot}"
874        );
875        // Ignore if the `candidate_slot` is a descendant of the `last_voted_slot`, since we do not
876        // want to count votes on the same fork.
877        if Self::is_descendant_slot(candidate_slot, last_voted_slot, ancestors)? {
878            return Some(false);
879        }
880
881        if last_vote_ancestors.is_empty() {
882            // If `last_vote_ancestors` is empty, this means we must have a last vote that is stray. If the `last_voted_slot`
883            // is stray, it must be descended from some earlier root than the latest root (the anchor at startup).
884            // The above check also guarentees that the candidate slot is not a descendant of this stray last vote.
885            //
886            // This gives us a fork graph:
887            //     / ------------- stray `last_voted_slot`
888            // old root
889            //     \- latest root (anchor) - ... - candidate slot
890            //                                \- switch slot
891            //
892            // Thus the common acnestor of `last_voted_slot` and `candidate_slot` is `old_root`, which the `switch_slot`
893            // descends from. Thus it is safe to use `candidate_slot` in the switching proof.
894            //
895            // Note: the calling function should have already panicked if we do not have ancestors and the last vote is not stray.
896            assert!(self.is_stray_last_vote());
897            return Some(true);
898        }
899
900        // Only consider forks that split at the common_ancestor of `switch_slot` and `last_voted_slot` or earlier.
901        // This is to prevent situations like this from being included in the switching proof:
902        //
903        //         /-- `last_voted_slot`
904        //     /--Y
905        //    X    \-- `candidate_slot`
906        //     \-- `switch_slot`
907        //
908        // The common ancestor of `last_voted_slot` and `switch_slot` is `X`. Votes for the `candidate_slot`
909        // should not count towards the switch proof since `candidate_slot` is "on the same fork" as `last_voted_slot`
910        // in relation to `switch_slot`.
911        // However these candidate slots should be allowed:
912        //
913        //             /-- Y -- `last_voted_slot`
914        //    V - W - X
915        //        \    \-- `candidate_slot` -- `switch_slot`
916        //         \    \-- `candidate_slot`
917        //          \-- `candidate_slot`
918        //
919        // As the `candidate_slot`s forked off from `X` or earlier.
920        //
921        // To differentiate, we check the common ancestor of `last_voted_slot` and `candidate_slot`.
922        // If the `switch_slot` descends from this ancestor, then the vote for `candidate_slot` can be included.
923        Self::greatest_common_ancestor(ancestors, candidate_slot, last_voted_slot)
924            .and_then(|ancestor| Self::is_descendant_slot(switch_slot, ancestor, ancestors))
925    }
926
927    /// Checks if `maybe_descendant` is a descendant of `slot`.
928    ///
929    /// Returns None if `maybe_descendant` is not present in `ancestors`
930    fn is_descendant_slot(
931        maybe_descendant: Slot,
932        slot: Slot,
933        ancestors: &HashMap<Slot, HashSet<u64>>,
934    ) -> Option<bool> {
935        ancestors
936            .get(&maybe_descendant)
937            .map(|candidate_slot_ancestors| candidate_slot_ancestors.contains(&slot))
938    }
939
940    /// Returns `Some(gca)` where `gca` is the greatest (by slot number)
941    /// common ancestor of both `slot_a` and `slot_b`.
942    ///
943    /// Returns `None` if:
944    /// * `slot_a` is not in `ancestors`
945    /// * `slot_b` is not in `ancestors`
946    /// * There is no common ancestor of slot_a and slot_b in `ancestors`
947    fn greatest_common_ancestor(
948        ancestors: &HashMap<Slot, HashSet<Slot>>,
949        slot_a: Slot,
950        slot_b: Slot,
951    ) -> Option<Slot> {
952        (ancestors.get(&slot_a)?)
953            .intersection(ancestors.get(&slot_b)?)
954            .max()
955            .copied()
956    }
957
958    #[allow(clippy::too_many_arguments)]
959    fn make_check_switch_threshold_decision(
960        &self,
961        switch_slot: Slot,
962        ancestors: &HashMap<Slot, HashSet<u64>>,
963        descendants: &HashMap<Slot, HashSet<u64>>,
964        progress: &ProgressMap,
965        total_stake: u64,
966        epoch_vote_accounts: &VoteAccountsHashMap,
967        latest_validator_votes_for_frozen_banks: &LatestValidatorVotesForFrozenBanks,
968        heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice,
969    ) -> SwitchForkDecision {
970        let Some((last_voted_slot, last_voted_hash)) = self.last_voted_slot_hash() else {
971            return SwitchForkDecision::SameFork;
972        };
973        let root = self.root();
974        let empty_ancestors = HashSet::default();
975        let empty_ancestors_due_to_minor_unsynced_ledger = || {
976            // This condition (stale stray last vote) shouldn't occur under normal validator
977            // operation, indicating something unusual happened.
978            // This condition could be introduced by manual ledger mishandling,
979            // validator SEGV, OS/HW crash, or plain No Free Space FS error.
980
981            // However, returning empty ancestors as a fallback here shouldn't result in
982            // slashing by itself (Note that we couldn't fully preclude any kind of slashing if
983            // the failure was OS or HW level).
984
985            // Firstly, lockout is ensured elsewhere.
986
987            // Also, there is no risk of optimistic conf. violation. Although empty ancestors
988            // could result in incorrect (= more than actual) locked_out_stake and
989            // false-positive SwitchProof later in this function, there should be no such a
990            // heavier fork candidate, first of all, if the last vote (or any of its
991            // unavailable ancestors) were already optimistically confirmed.
992            // The only exception is that other validator is already violating it...
993            if self.is_first_switch_check() && switch_slot < last_voted_slot {
994                // `switch < last` is needed not to warn! this message just because of using
995                // newer snapshots on validator restart
996                let message = format!(
997                    "bank_forks doesn't have corresponding data for the stray restored last \
998                     vote({last_voted_slot}), meaning some inconsistency between saved tower and \
999                     ledger."
1000                );
1001                warn!("{message}");
1002                datapoint_warn!("tower_warn", ("warn", message, String));
1003            }
1004            &empty_ancestors
1005        };
1006
1007        let suspended_decision_due_to_major_unsynced_ledger = || {
1008            // This peculiar corner handling is needed mainly for a tower which is newer than
1009            // blockstore. (Yeah, we tolerate it for ease of maintaining validator by operators)
1010            // This condition could be introduced by manual ledger mishandling,
1011            // validator SEGV, OS/HW crash, or plain No Free Space FS error.
1012
1013            // When we're in this clause, it basically means validator is badly running
1014            // with a future tower while replaying past slots, especially problematic is
1015            // last_voted_slot.
1016            // So, don't re-vote on it by returning pseudo FailedSwitchThreshold, otherwise
1017            // there would be slashing because of double vote on one of last_vote_ancestors.
1018            // (Well, needless to say, re-creating the duplicate block must be handled properly
1019            // at the banking stage: https://github.com/solana-labs/solana/issues/8232)
1020            //
1021            // To be specific, the replay stage is tricked into a false perception where
1022            // last_vote_ancestors is AVAILABLE for descendant-of-`switch_slot`,  stale, and
1023            // stray slots (which should always be empty_ancestors).
1024            //
1025            // This is covered by test_future_tower_* in local_cluster
1026            SwitchForkDecision::FailedSwitchThreshold(0, total_stake)
1027        };
1028
1029        let rollback_due_to_duplicate_ancestor = |latest_duplicate_ancestor| {
1030            SwitchForkDecision::FailedSwitchDuplicateRollback(latest_duplicate_ancestor)
1031        };
1032
1033        // `heaviest_subtree_fork_choice` entries are not cleaned by duplicate block purging/rollback logic,
1034        // so this is safe to check here. We return here if the last voted slot was rolled back/purged due to
1035        // being a duplicate because `ancestors`/`descendants`/`progress` structures may be missing this slot due
1036        // to duplicate purging. This would cause many of the `unwrap()` checks below to fail.
1037        //
1038        // TODO: Handle if the last vote is on a dupe, and then we restart. The dupe won't be in
1039        // heaviest_subtree_fork_choice, so `heaviest_subtree_fork_choice.latest_invalid_ancestor()` will return
1040        // None, but the last vote will be persisted in tower.
1041        let switch_hash = progress
1042            .get_hash(switch_slot)
1043            .expect("Slot we're trying to switch to must exist AND be frozen in progress map");
1044        if let Some(latest_duplicate_ancestor) = heaviest_subtree_fork_choice
1045            .latest_invalid_ancestor(&(last_voted_slot, last_voted_hash))
1046        {
1047            // We're rolling back because one of the ancestors of the last vote was a duplicate. In this
1048            // case, it's acceptable if the switch candidate is one of ancestors of the previous vote,
1049            // just fail the switch check because there's no point in voting on an ancestor. ReplayStage
1050            // should then have a special case continue building an alternate fork from this ancestor, NOT
1051            // the `last_voted_slot`. This is in contrast to usual SwitchFailure where ReplayStage continues to build blocks
1052            // on latest vote. See `ReplayStage::select_vote_and_reset_forks()` for more details.
1053            if heaviest_subtree_fork_choice.is_strict_ancestor(
1054                &(switch_slot, switch_hash),
1055                &(last_voted_slot, last_voted_hash),
1056            ) {
1057                return rollback_due_to_duplicate_ancestor(latest_duplicate_ancestor);
1058            } else if progress
1059                .get_hash(last_voted_slot)
1060                .map(|current_slot_hash| current_slot_hash != last_voted_hash)
1061                .unwrap_or(true)
1062            {
1063                // Our last vote slot was purged because it was on a duplicate fork, don't continue below
1064                // where checks may panic. We allow a freebie vote here that may violate switching
1065                // thresholds
1066                // TODO: Properly handle this case
1067                info!(
1068                    "Allowing switch vote on {:?} because last vote {:?} was rolled back",
1069                    (switch_slot, switch_hash),
1070                    (last_voted_slot, last_voted_hash)
1071                );
1072                return SwitchForkDecision::SwitchProof(Hash::default());
1073            }
1074        }
1075
1076        let last_vote_ancestors = ancestors.get(&last_voted_slot).unwrap_or_else(|| {
1077            if self.is_stray_last_vote() {
1078                // Unless last vote is stray and stale, ancestors.get(last_voted_slot) must
1079                // return Some(_), justifying to panic! here.
1080                // Also, adjust_lockouts_after_replay() correctly makes last_voted_slot None,
1081                // if all saved votes are ancestors of replayed_root_slot. So this code shouldn't be
1082                // touched in that case as well.
1083                // In other words, except being stray, all other slots have been voted on while
1084                // this validator has been running, so we must be able to fetch ancestors for
1085                // all of them.
1086                empty_ancestors_due_to_minor_unsynced_ledger()
1087            } else {
1088                panic!("no ancestors found with slot: {last_voted_slot}");
1089            }
1090        });
1091
1092        let switch_slot_ancestors = ancestors.get(&switch_slot).unwrap();
1093
1094        if switch_slot == last_voted_slot || switch_slot_ancestors.contains(&last_voted_slot) {
1095            // If the `switch_slot is a descendant of the last vote,
1096            // no switching proof is necessary
1097            return SwitchForkDecision::SameFork;
1098        }
1099
1100        if last_vote_ancestors.contains(&switch_slot) {
1101            if self.is_stray_last_vote() {
1102                return suspended_decision_due_to_major_unsynced_ledger();
1103            } else {
1104                panic!(
1105                    "Should never consider switching to ancestor ({switch_slot}) of last vote: \
1106                     {last_voted_slot}, ancestors({last_vote_ancestors:?})",
1107                );
1108            }
1109        }
1110
1111        // By this point, we know the `switch_slot` is on a different fork
1112        // (is neither an ancestor nor descendant of `last_vote`), so a
1113        // switching proof is necessary
1114        let switch_proof = Hash::default();
1115        let mut locked_out_stake = 0;
1116        let mut locked_out_vote_accounts = HashSet::new();
1117        for (candidate_slot, descendants) in descendants.iter() {
1118            // 1) Don't consider any banks that haven't been frozen yet
1119            //    because the needed stats are unavailable
1120            // 2) Only consider lockouts at the latest `frozen` bank
1121            //    on each fork, as that bank will contain all the
1122            //    lockout intervals for ancestors on that fork as well.
1123            // 3) Don't consider lockouts on the `last_vote` itself
1124            // 4) Don't consider lockouts on any descendants of
1125            //    `last_vote`
1126            // 5) Don't consider any banks before the root because
1127            //    all lockouts must be ancestors of `last_vote`
1128            if !progress
1129                .get_fork_stats(*candidate_slot)
1130                .map(|stats| stats.computed)
1131                .unwrap_or(false)
1132                || {
1133                    // If any of the descendants have the `computed` flag set, then there must be a more
1134                    // recent frozen bank on this fork to use, so we can ignore this one. Otherwise,
1135                    // even if this bank has descendants, if they have not yet been frozen / stats computed,
1136                    // then use this bank as a representative for the fork.
1137                    descendants.iter().any(|d| {
1138                        progress
1139                            .get_fork_stats(*d)
1140                            .map(|stats| stats.computed)
1141                            .unwrap_or(false)
1142                    })
1143                }
1144                || *candidate_slot == last_voted_slot
1145                || *candidate_slot <= root
1146                || {
1147                    !self
1148                        .is_valid_switching_proof_vote(
1149                            *candidate_slot,
1150                            last_voted_slot,
1151                            switch_slot,
1152                            ancestors,
1153                            last_vote_ancestors,
1154                        )
1155                        .expect(
1156                            "candidate_slot and switch_slot exist in descendants map, so they \
1157                             must exist in ancestors map",
1158                        )
1159                }
1160            {
1161                continue;
1162            }
1163
1164            // By the time we reach here, any ancestors of the `last_vote`,
1165            // should have been filtered out, as they all have a descendant,
1166            // namely the `last_vote` itself.
1167            assert!(!last_vote_ancestors.contains(candidate_slot));
1168
1169            // Evaluate which vote accounts in the bank are locked out
1170            // in the interval candidate_slot..last_vote, which means
1171            // finding any lockout intervals in the `lockout_intervals` tree
1172            // for this bank that contain `last_vote`.
1173            let lockout_intervals = &progress
1174                .get(candidate_slot)
1175                .unwrap()
1176                .fork_stats
1177                .lockout_intervals;
1178            // Find any locked out intervals for vote accounts in this bank with
1179            // `lockout_interval_end` >= `last_vote`, which implies that the most recent tower slot is locked out
1180            // at `last_vote` on another fork. We also consider the remaining tower slots, however these older slots
1181            // could be from before the fork so we must filter by their ancestry.
1182            for LockoutInterval {
1183                start: lockout_interval_start,
1184                voter: vote_account_pubkey,
1185                ..
1186            } in lockout_intervals
1187                .iter()
1188                .filter(|interval| interval.end >= last_voted_slot)
1189            {
1190                if locked_out_vote_accounts.contains(vote_account_pubkey) {
1191                    continue;
1192                }
1193
1194                // Only count lockouts on slots that are:
1195                // 1) Not ancestors of `last_vote`, meaning being on different fork
1196                // 2) Not from before the current root as we can't determine if
1197                // anything before the root was an ancestor of `last_vote` or not
1198                if !last_vote_ancestors.contains(lockout_interval_start) && {
1199                    // Given a `lockout_interval_start` < root that appears in a
1200                    // bank for a `candidate_slot`, it must be that `lockout_interval_start`
1201                    // is an ancestor of the current root, because `candidate_slot` is a
1202                    // descendant of the current root
1203                    *lockout_interval_start > root
1204                } {
1205                    let stake = epoch_vote_accounts
1206                        .get(vote_account_pubkey)
1207                        .map(|(stake, _)| *stake)
1208                        .unwrap_or(0);
1209                    locked_out_stake += stake;
1210                    if (locked_out_stake as f64 / total_stake as f64) > SWITCH_FORK_THRESHOLD {
1211                        return SwitchForkDecision::SwitchProof(switch_proof);
1212                    }
1213                    locked_out_vote_accounts.insert(vote_account_pubkey);
1214                }
1215            }
1216        }
1217
1218        // Check the latest votes for potentially gossip votes that haven't landed yet
1219        for (
1220            vote_account_pubkey,
1221            (candidate_latest_frozen_vote, _candidate_latest_frozen_vote_hash),
1222        ) in latest_validator_votes_for_frozen_banks.max_gossip_frozen_votes()
1223        {
1224            if locked_out_vote_accounts.contains(&vote_account_pubkey) {
1225                continue;
1226            }
1227
1228            if *candidate_latest_frozen_vote > last_voted_slot && {
1229                // Because `candidate_latest_frozen_vote` is the last vote made by some validator
1230                // in the cluster for a frozen bank `B` observed through gossip, we may have cleared
1231                // that frozen bank `B` because we `set_root(root)` for a `root` on a different fork,
1232                // like so:
1233                //
1234                //    |----------X ------candidate_latest_frozen_vote (frozen)
1235                // old root
1236                //    |----------new root ----last_voted_slot
1237                //
1238                // In most cases, because `last_voted_slot` must be a descendant of `root`, then
1239                // if `candidate_latest_frozen_vote` is not found in the ancestors/descendants map (recall these
1240                // directly reflect the state of BankForks), this implies that `B` was pruned from BankForks
1241                // because it was on a different fork than `last_voted_slot`, and thus this vote for `candidate_latest_frozen_vote`
1242                // should be safe to count towards the switching proof:
1243                //
1244                // However, there is also the possibility that `last_voted_slot` is a stray, in which
1245                // case we cannot make this conclusion as we do not know the ancestors/descendants
1246                // of strays. Hence we err on the side of caution here and ignore this vote. This
1247                // is ok because validators voting on different unrooted forks should eventually vote
1248                // on some descendant of the root, at which time they can be included in switching proofs.
1249                self.is_valid_switching_proof_vote(
1250                    *candidate_latest_frozen_vote,
1251                    last_voted_slot,
1252                    switch_slot,
1253                    ancestors,
1254                    last_vote_ancestors,
1255                )
1256                .unwrap_or(false)
1257            } {
1258                let stake = epoch_vote_accounts
1259                    .get(vote_account_pubkey)
1260                    .map(|(stake, _)| *stake)
1261                    .unwrap_or(0);
1262                locked_out_stake += stake;
1263                if (locked_out_stake as f64 / total_stake as f64) > SWITCH_FORK_THRESHOLD {
1264                    return SwitchForkDecision::SwitchProof(switch_proof);
1265                }
1266                locked_out_vote_accounts.insert(vote_account_pubkey);
1267            }
1268        }
1269
1270        // We have not detected sufficient lockout past the last voted slot to generate
1271        // a switching proof
1272        SwitchForkDecision::FailedSwitchThreshold(locked_out_stake, total_stake)
1273    }
1274
1275    #[allow(clippy::too_many_arguments)]
1276    pub(crate) fn check_switch_threshold(
1277        &mut self,
1278        switch_slot: Slot,
1279        ancestors: &HashMap<Slot, HashSet<u64>>,
1280        descendants: &HashMap<Slot, HashSet<u64>>,
1281        progress: &ProgressMap,
1282        total_stake: u64,
1283        epoch_vote_accounts: &VoteAccountsHashMap,
1284        latest_validator_votes_for_frozen_banks: &LatestValidatorVotesForFrozenBanks,
1285        heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice,
1286    ) -> SwitchForkDecision {
1287        let decision = self.make_check_switch_threshold_decision(
1288            switch_slot,
1289            ancestors,
1290            descendants,
1291            progress,
1292            total_stake,
1293            epoch_vote_accounts,
1294            latest_validator_votes_for_frozen_banks,
1295            heaviest_subtree_fork_choice,
1296        );
1297        let new_check = Some((switch_slot, decision.clone()));
1298        if new_check != self.last_switch_threshold_check {
1299            trace!("new switch threshold check: slot {switch_slot}: {decision:?}",);
1300            self.last_switch_threshold_check = new_check;
1301        }
1302        decision
1303    }
1304
1305    fn is_first_switch_check(&self) -> bool {
1306        self.last_switch_threshold_check.is_none()
1307    }
1308
1309    // Optimistically skip the stake check if casting a vote would not increase
1310    // the lockout at this threshold. This is because if you bounce back to
1311    // voting on the main fork after not voting for a while, your latest vote
1312    // might pop off a lot of the votes in the tower. The stake from these votes
1313    // would have rolled up to earlier votes in the tower, which presumably
1314    // could have helped us pass the threshold check. Worst case, we'll just
1315    // recheck later without having increased lockouts.
1316    fn optimistically_bypass_vote_stake_threshold_check<'a>(
1317        tower_before_applying_vote: impl Iterator<Item = &'a Lockout>,
1318        threshold_vote: &Lockout,
1319    ) -> bool {
1320        for old_vote in tower_before_applying_vote {
1321            if old_vote.slot() == threshold_vote.slot()
1322                && old_vote.confirmation_count() == threshold_vote.confirmation_count()
1323            {
1324                return true;
1325            }
1326        }
1327        false
1328    }
1329
1330    /// Checks a single vote threshold for `slot`
1331    fn check_vote_stake_threshold<'a>(
1332        threshold_vote: Option<&Lockout>,
1333        tower_before_applying_vote: impl Iterator<Item = &'a Lockout>,
1334        threshold_depth: usize,
1335        threshold_size: f64,
1336        slot: Slot,
1337        voted_stakes: &VotedStakes,
1338        total_stake: u64,
1339    ) -> ThresholdDecision {
1340        let Some(threshold_vote) = threshold_vote else {
1341            // Tower isn't that deep.
1342            return ThresholdDecision::PassedThreshold;
1343        };
1344        let Some(fork_stake) = voted_stakes.get(&threshold_vote.slot()) else {
1345            // We haven't seen any votes on this fork yet, so no stake
1346            return ThresholdDecision::FailedThreshold(threshold_depth as u64, 0);
1347        };
1348
1349        let lockout = *fork_stake as f64 / total_stake as f64;
1350        trace!(
1351            "fork_stake slot: {}, threshold_vote slot: {}, lockout: {} fork_stake: {} \
1352             total_stake: {}",
1353            slot,
1354            threshold_vote.slot(),
1355            lockout,
1356            fork_stake,
1357            total_stake
1358        );
1359        if Self::optimistically_bypass_vote_stake_threshold_check(
1360            tower_before_applying_vote,
1361            threshold_vote,
1362        ) || lockout > threshold_size
1363        {
1364            return ThresholdDecision::PassedThreshold;
1365        }
1366        ThresholdDecision::FailedThreshold(threshold_depth as u64, *fork_stake)
1367    }
1368
1369    /// Performs vote threshold checks for `slot`
1370    pub fn check_vote_stake_thresholds(
1371        &self,
1372        slot: Slot,
1373        voted_stakes: &VotedStakes,
1374        total_stake: Stake,
1375    ) -> Vec<ThresholdDecision> {
1376        let mut threshold_decisions = vec![];
1377        // Generate the vote state assuming this vote is included.
1378        let mut vote_state = self.vote_state.clone();
1379        vote_state.process_next_vote_slot(slot);
1380
1381        // Assemble all the vote thresholds and depths to check.
1382        let vote_thresholds_and_depths = vec![
1383            // The following two checks are log only and are currently being used for experimentation
1384            // purposes. We wish to impose a shallow threshold check to prevent the frequent 8 deep
1385            // lockouts seen multiple times a day. We check both the 4th and 5th deep here to collect
1386            // metrics to determine the right depth and threshold percentage to set in the future.
1387            (VOTE_THRESHOLD_DEPTH_SHALLOW, SWITCH_FORK_THRESHOLD),
1388            (VOTE_THRESHOLD_DEPTH_SHALLOW + 1, SWITCH_FORK_THRESHOLD),
1389            (self.threshold_depth, self.threshold_size),
1390        ];
1391
1392        // Check one by one and add any failures to be returned
1393        for (threshold_depth, threshold_size) in vote_thresholds_and_depths {
1394            if let ThresholdDecision::FailedThreshold(vote_depth, stake) =
1395                Self::check_vote_stake_threshold(
1396                    vote_state.nth_recent_lockout(threshold_depth),
1397                    self.vote_state.votes.iter(),
1398                    threshold_depth,
1399                    threshold_size,
1400                    slot,
1401                    voted_stakes,
1402                    total_stake,
1403                )
1404            {
1405                threshold_decisions.push(ThresholdDecision::FailedThreshold(vote_depth, stake));
1406            }
1407        }
1408        threshold_decisions
1409    }
1410
1411    /// Update lockouts for all the ancestors
1412    pub(crate) fn populate_ancestor_voted_stakes(
1413        voted_stakes: &mut VotedStakes,
1414        vote_slots: impl IntoIterator<Item = Slot>,
1415        ancestors: &HashMap<Slot, HashSet<Slot>>,
1416    ) {
1417        // If there's no ancestors, that means this slot must be from before the current root,
1418        // in which case the lockouts won't be calculated in bank_weight anyways, so ignore
1419        // this slot
1420        for vote_slot in vote_slots {
1421            if let Some(slot_ancestors) = ancestors.get(&vote_slot) {
1422                voted_stakes.entry(vote_slot).or_default();
1423                for slot in slot_ancestors {
1424                    voted_stakes.entry(*slot).or_default();
1425                }
1426            }
1427        }
1428    }
1429
1430    /// Update stake for all the ancestors.
1431    /// Note, stake is the same for all the ancestor.
1432    fn update_ancestor_voted_stakes(
1433        voted_stakes: &mut VotedStakes,
1434        voted_slot: Slot,
1435        voted_stake: u64,
1436        ancestors: &HashMap<Slot, HashSet<Slot>>,
1437    ) {
1438        // If there's no ancestors, that means this slot must be from
1439        // before the current root, so ignore this slot
1440        if let Some(vote_slot_ancestors) = ancestors.get(&voted_slot) {
1441            *voted_stakes.entry(voted_slot).or_default() += voted_stake;
1442            for slot in vote_slot_ancestors {
1443                *voted_stakes.entry(*slot).or_default() += voted_stake;
1444            }
1445        }
1446    }
1447
1448    fn voted_slots(&self) -> Vec<Slot> {
1449        self.vote_state
1450            .votes
1451            .iter()
1452            .map(|lockout| lockout.slot())
1453            .collect()
1454    }
1455
1456    pub fn is_stray_last_vote(&self) -> bool {
1457        self.stray_restored_slot.is_some() && self.stray_restored_slot == self.last_voted_slot()
1458    }
1459
1460    // The tower root can be older/newer if the validator booted from a newer/older snapshot, so
1461    // tower lockouts may need adjustment
1462    pub fn adjust_lockouts_after_replay(
1463        mut self,
1464        replayed_root: Slot,
1465        slot_history: &SlotHistory,
1466    ) -> Result<Self> {
1467        // sanity assertions for roots
1468        let tower_root = self.root();
1469        info!(
1470            "adjusting lockouts (after replay up to {}): {:?} tower root: {} replayed root: {}",
1471            replayed_root,
1472            self.voted_slots(),
1473            tower_root,
1474            replayed_root,
1475        );
1476        assert_eq!(slot_history.check(replayed_root), Check::Found);
1477
1478        assert!(
1479            self.last_vote == VoteTransaction::from(VoteStateUpdate::default())
1480                && self.vote_state.votes.is_empty()
1481                || self.last_vote == VoteTransaction::from(TowerSync::default())
1482                    && self.vote_state.votes.is_empty()
1483                || !self.vote_state.votes.is_empty(),
1484            "last vote: {:?} vote_state.votes: {:?}",
1485            self.last_vote,
1486            self.vote_state.votes
1487        );
1488
1489        if let Some(last_voted_slot) = self.last_voted_slot() {
1490            if tower_root <= replayed_root {
1491                // Normally, we goes into this clause with possible help of
1492                // reconcile_blockstore_roots_with_external_source()
1493                if slot_history.check(last_voted_slot) == Check::TooOld {
1494                    // We could try hard to anchor with other older votes, but opt to simplify the
1495                    // following logic
1496                    return Err(TowerError::TooOldTower(
1497                        last_voted_slot,
1498                        slot_history.oldest(),
1499                    ));
1500                }
1501
1502                self.adjust_lockouts_with_slot_history(slot_history)?;
1503                self.initialize_root(replayed_root);
1504            } else {
1505                // This should never occur under normal operation.
1506                // While this validator's voting is suspended this way,
1507                // suspended_decision_due_to_major_unsynced_ledger() will be also touched.
1508                let message = format!(
1509                    "For some reason, we're REPROCESSING slots which has already been voted and \
1510                     ROOTED by us; VOTING will be SUSPENDED UNTIL {last_voted_slot}!",
1511                );
1512                error!("{message}");
1513                datapoint_error!("tower_error", ("error", message, String));
1514
1515                // Let's pass-through adjust_lockouts_with_slot_history just for sanitization,
1516                // using a synthesized SlotHistory.
1517
1518                let mut warped_slot_history = (*slot_history).clone();
1519                // Blockstore doesn't have the tower_root slot because of
1520                // (replayed_root < tower_root) in this else clause, meaning the tower is from
1521                // the future from the view of blockstore.
1522                // Pretend the blockstore has the future tower_root to anchor exactly with that
1523                // slot by adding tower_root to a slot history. The added slot will be newer
1524                // than all slots in the slot history (remember tower_root > replayed_root),
1525                // satisfying the slot history invariant.
1526                // Thus, the whole process will be safe as well because tower_root exists
1527                // within both tower and slot history, guaranteeing the success of adjustment
1528                // and retaining all of future votes correctly while sanitizing.
1529                warped_slot_history.add(tower_root);
1530
1531                self.adjust_lockouts_with_slot_history(&warped_slot_history)?;
1532                // don't update root; future tower's root should be kept across validator
1533                // restarts to continue to show the scary messages at restarts until the next
1534                // voting.
1535            }
1536        } else {
1537            // This else clause is for newly created tower.
1538            // initialize_lockouts_from_bank() should ensure the following invariant,
1539            // otherwise we're screwing something up.
1540            assert_eq!(tower_root, replayed_root);
1541        }
1542
1543        Ok(self)
1544    }
1545
1546    fn adjust_lockouts_with_slot_history(&mut self, slot_history: &SlotHistory) -> Result<()> {
1547        let tower_root = self.root();
1548        // retained slots will be consisted only from divergent slots
1549        let mut retain_flags_for_each_vote_in_reverse: Vec<_> =
1550            Vec::with_capacity(self.vote_state.votes.len());
1551
1552        let mut still_in_future = true;
1553        let mut past_outside_history = false;
1554        let mut checked_slot = None;
1555        let mut anchored_slot = None;
1556
1557        let mut slots_in_tower = vec![tower_root];
1558        slots_in_tower.extend(self.voted_slots());
1559
1560        // iterate over votes + root (if any) in the newest => oldest order
1561        // bail out early if bad condition is found
1562        for slot_in_tower in slots_in_tower.iter().rev() {
1563            let check = slot_history.check(*slot_in_tower);
1564
1565            if anchored_slot.is_none() && check == Check::Found {
1566                anchored_slot = Some(*slot_in_tower);
1567            } else if anchored_slot.is_some() && check == Check::NotFound {
1568                // this can't happen unless we're fed with bogus snapshot
1569                return Err(TowerError::FatallyInconsistent("diverged ancestor?"));
1570            }
1571
1572            if still_in_future && check != Check::Future {
1573                still_in_future = false;
1574            } else if !still_in_future && check == Check::Future {
1575                // really odd cases: bad ordered votes?
1576                return Err(TowerError::FatallyInconsistent("time warped?"));
1577            }
1578            if !past_outside_history && check == Check::TooOld {
1579                past_outside_history = true;
1580            } else if past_outside_history && check != Check::TooOld {
1581                // really odd cases: bad ordered votes?
1582                return Err(TowerError::FatallyInconsistent(
1583                    "not too old once after got too old?",
1584                ));
1585            }
1586
1587            if let Some(checked_slot) = checked_slot {
1588                // This is really special, only if tower is initialized and contains
1589                // a vote for the root, the root slot can repeat only once
1590                let voting_for_root =
1591                    *slot_in_tower == checked_slot && *slot_in_tower == tower_root;
1592
1593                if !voting_for_root {
1594                    // Unless we're voting since genesis, slots_in_tower must always be older than last checked_slot
1595                    // including all vote slot and the root slot.
1596                    assert!(
1597                        *slot_in_tower < checked_slot,
1598                        "slot_in_tower({}) < checked_slot({})",
1599                        *slot_in_tower,
1600                        checked_slot
1601                    );
1602                }
1603            }
1604
1605            checked_slot = Some(*slot_in_tower);
1606
1607            retain_flags_for_each_vote_in_reverse.push(anchored_slot.is_none());
1608        }
1609
1610        // Check for errors if not anchored
1611        info!("adjusted tower's anchored slot: {anchored_slot:?}");
1612        if anchored_slot.is_none() {
1613            // this error really shouldn't happen unless ledger/tower is corrupted
1614            return Err(TowerError::FatallyInconsistent(
1615                "no common slot for rooted tower",
1616            ));
1617        }
1618
1619        assert_eq!(
1620            slots_in_tower.len(),
1621            retain_flags_for_each_vote_in_reverse.len()
1622        );
1623        // pop for the tower root
1624        retain_flags_for_each_vote_in_reverse.pop();
1625        let mut retain_flags_for_each_vote =
1626            retain_flags_for_each_vote_in_reverse.into_iter().rev();
1627
1628        let original_votes_len = self.vote_state.votes.len();
1629        self.initialize_lockouts(move |_| retain_flags_for_each_vote.next().unwrap());
1630
1631        if self.vote_state.votes.is_empty() {
1632            info!("All restored votes were behind; resetting root_slot and last_vote in tower!");
1633            // we might not have banks for those votes so just reset.
1634            // That's because the votes may well past replayed_root
1635            self.last_vote = VoteTransaction::from(Vote::default());
1636        } else {
1637            info!(
1638                "{} restored votes (out of {}) were on different fork or are upcoming votes on \
1639                 unrooted slots: {:?}!",
1640                self.voted_slots().len(),
1641                original_votes_len,
1642                self.voted_slots()
1643            );
1644
1645            assert_eq!(self.last_voted_slot(), self.voted_slots().last().copied());
1646            self.stray_restored_slot = self.last_vote.last_voted_slot()
1647        }
1648
1649        Ok(())
1650    }
1651
1652    fn initialize_lockouts_from_bank(
1653        &mut self,
1654        vote_account_pubkey: &Pubkey,
1655        root: Slot,
1656        bank: &Bank,
1657    ) {
1658        if let Some(vote_account) = bank.get_vote_account(vote_account_pubkey) {
1659            self.vote_state = TowerVoteState::from(vote_account.vote_state_view());
1660            self.initialize_root(root);
1661            self.initialize_lockouts(|v| v.slot() > root);
1662        } else {
1663            self.initialize_root(root);
1664            info!(
1665                "vote account({}) not found in bank (slot={})",
1666                vote_account_pubkey,
1667                bank.slot()
1668            );
1669        }
1670    }
1671
1672    fn initialize_lockouts<F: FnMut(&Lockout) -> bool>(&mut self, should_retain: F) {
1673        self.vote_state.votes.retain(should_retain);
1674    }
1675
1676    // Updating root is needed to correctly restore from newly-saved tower for the next
1677    // boot
1678    fn initialize_root(&mut self, root: Slot) {
1679        self.vote_state.root_slot = Some(root);
1680    }
1681
1682    pub fn save(&self, tower_storage: &dyn TowerStorage, node_keypair: &Keypair) -> Result<()> {
1683        let saved_tower = SavedTower::new(self, node_keypair)?;
1684        tower_storage.store(&SavedTowerVersions::from(saved_tower))?;
1685        Ok(())
1686    }
1687
1688    pub fn restore(tower_storage: &dyn TowerStorage, node_pubkey: &Pubkey) -> Result<Self> {
1689        tower_storage.load(node_pubkey)
1690    }
1691}
1692
1693#[derive(Error, Debug)]
1694pub enum TowerError {
1695    #[error("IO Error: {0}")]
1696    IoError(#[from] std::io::Error),
1697
1698    #[error("Serialization Error: {0}")]
1699    SerializeError(#[from] bincode::Error),
1700
1701    #[error("The signature on the saved tower is invalid")]
1702    InvalidSignature,
1703
1704    #[error("The tower does not match this validator: {0}")]
1705    WrongTower(String),
1706
1707    #[error(
1708        "The tower is too old: newest slot in tower ({0}) << oldest slot in available history \
1709         ({1})"
1710    )]
1711    TooOldTower(Slot, Slot),
1712
1713    #[error("The tower is fatally inconsistent with blockstore: {0}")]
1714    FatallyInconsistent(&'static str),
1715
1716    #[error("The tower is useless because of new hard fork: {0}")]
1717    HardFork(Slot),
1718}
1719
1720impl TowerError {
1721    pub fn is_file_missing(&self) -> bool {
1722        if let TowerError::IoError(io_err) = &self {
1723            io_err.kind() == std::io::ErrorKind::NotFound
1724        } else {
1725            false
1726        }
1727    }
1728    pub fn is_too_old(&self) -> bool {
1729        matches!(self, TowerError::TooOldTower(_, _))
1730    }
1731}
1732
1733#[derive(Debug)]
1734pub enum ExternalRootSource {
1735    Tower(Slot),
1736    HardFork(Slot),
1737}
1738
1739impl ExternalRootSource {
1740    fn root(&self) -> Slot {
1741        match self {
1742            ExternalRootSource::Tower(slot) => *slot,
1743            ExternalRootSource::HardFork(slot) => *slot,
1744        }
1745    }
1746}
1747
1748// Given an untimely crash, tower may have roots that are not reflected in blockstore,
1749// or the reverse of this.
1750// That's because we don't impose any ordering guarantee or any kind of write barriers
1751// between tower (plain old POSIX fs calls) and blockstore (through RocksDB), when
1752// `ReplayState::handle_votable_bank()` saves tower before setting blockstore roots.
1753pub fn reconcile_blockstore_roots_with_external_source(
1754    external_source: ExternalRootSource,
1755    blockstore: &Blockstore,
1756    // blockstore.max_root() might have been updated already.
1757    // so take a &mut param both to input (and output iff we update root)
1758    last_blockstore_root: &mut Slot,
1759) -> blockstore::Result<()> {
1760    let external_root = external_source.root();
1761    if *last_blockstore_root < external_root {
1762        // Ensure external_root itself to exist and be marked as rooted in the blockstore
1763        // in addition to its ancestors.
1764        let new_roots: Vec<_> = AncestorIterator::new_inclusive(external_root, blockstore)
1765            .take_while(|current| match current.cmp(last_blockstore_root) {
1766                Ordering::Greater => true,
1767                Ordering::Equal => false,
1768                Ordering::Less => panic!(
1769                    "last_blockstore_root({last_blockstore_root}) is skipped while traversing \
1770                     blockstore (currently at {current}) from external root \
1771                     ({external_source:?})!?",
1772                ),
1773            })
1774            .collect();
1775        if !new_roots.is_empty() {
1776            info!(
1777                "Reconciling slots as root based on external root: {new_roots:?} (external: \
1778                 {external_source:?}, blockstore: {last_blockstore_root})"
1779            );
1780
1781            // Unfortunately, we can't supply duplicate-confirmed hashes,
1782            // because it can't be guaranteed to be able to replay these slots
1783            // under this code-path's limited condition (i.e.  those shreds
1784            // might not be available, etc...) also correctly overcoming this
1785            // limitation is hard...
1786            blockstore.mark_slots_as_if_rooted_normally_at_startup(
1787                new_roots.into_iter().map(|root| (root, None)).collect(),
1788                false,
1789            )?;
1790
1791            // Update the caller-managed state of last root in blockstore.
1792            // Repeated calls of this function should result in a no-op for
1793            // the range of `new_roots`.
1794            *last_blockstore_root = blockstore.max_root();
1795        } else {
1796            // This indicates we're in bad state; but still don't panic here.
1797            // That's because we might have a chance of recovering properly with
1798            // newer snapshot.
1799            warn!(
1800                "Couldn't find any ancestor slots from external source ({external_source:?}) \
1801                 towards blockstore root ({last_blockstore_root}); blockstore pruned or only \
1802                 tower moved into new ledger or just hard fork?",
1803            );
1804        }
1805    }
1806    Ok(())
1807}
1808
1809#[cfg(test)]
1810pub mod test {
1811    use {
1812        super::*,
1813        crate::{
1814            consensus::{
1815                fork_choice::ForkChoice, heaviest_subtree_fork_choice::SlotHashKey,
1816                tower_storage::FileTowerStorage,
1817            },
1818            replay_stage::HeaviestForkFailures,
1819            vote_simulator::VoteSimulator,
1820        },
1821        itertools::Itertools,
1822        solana_account::{Account, AccountSharedData, ReadableAccount, WritableAccount},
1823        solana_clock::Slot,
1824        solana_hash::Hash,
1825        solana_ledger::{blockstore::make_slot_entries, get_tmp_ledger_path_auto_delete},
1826        solana_pubkey::Pubkey,
1827        solana_runtime::bank::Bank,
1828        solana_signer::Signer,
1829        solana_slot_history::SlotHistory,
1830        solana_vote::vote_account::VoteAccount,
1831        solana_vote_program::vote_state::{
1832            process_slot_vote_unchecked, Vote, VoteStateV4, VoteStateVersions, MAX_LOCKOUT_HISTORY,
1833        },
1834        std::{
1835            collections::{HashMap, VecDeque},
1836            fs::{remove_file, OpenOptions},
1837            io::{Read, Seek, SeekFrom, Write},
1838            path::PathBuf,
1839            sync::Arc,
1840        },
1841        tempfile::TempDir,
1842        trees::tr,
1843    };
1844
1845    fn gen_stakes(stake_votes: &[(u64, &[u64])]) -> VoteAccountsHashMap {
1846        stake_votes
1847            .iter()
1848            .map(|(lamports, votes)| {
1849                let mut account = AccountSharedData::from(Account {
1850                    data: vec![0; VoteStateV4::size_of()],
1851                    lamports: *lamports,
1852                    owner: solana_vote_program::id(),
1853                    ..Account::default()
1854                });
1855                let mut vote_state = VoteStateV4::default();
1856                for slot in *votes {
1857                    process_slot_vote_unchecked(&mut vote_state, *slot);
1858                }
1859                VoteStateV4::serialize(
1860                    &VoteStateVersions::new_v4(vote_state),
1861                    account.data_as_mut_slice(),
1862                )
1863                .expect("serialize state");
1864                (
1865                    solana_pubkey::new_rand(),
1866                    (*lamports, VoteAccount::try_from(account).unwrap()),
1867                )
1868            })
1869            .collect()
1870    }
1871
1872    #[test]
1873    fn test_to_vote_instruction() {
1874        let vote = Vote::default();
1875        let mut decision = SwitchForkDecision::FailedSwitchThreshold(0, 1);
1876        assert!(decision
1877            .to_vote_instruction(
1878                VoteTransaction::from(vote.clone()),
1879                &Pubkey::default(),
1880                &Pubkey::default()
1881            )
1882            .is_none());
1883
1884        decision = SwitchForkDecision::FailedSwitchDuplicateRollback(0);
1885        assert!(decision
1886            .to_vote_instruction(
1887                VoteTransaction::from(vote.clone()),
1888                &Pubkey::default(),
1889                &Pubkey::default()
1890            )
1891            .is_none());
1892
1893        decision = SwitchForkDecision::SameFork;
1894        assert_eq!(
1895            decision.to_vote_instruction(
1896                VoteTransaction::from(vote.clone()),
1897                &Pubkey::default(),
1898                &Pubkey::default()
1899            ),
1900            Some(vote_instruction::vote(
1901                &Pubkey::default(),
1902                &Pubkey::default(),
1903                vote.clone(),
1904            ))
1905        );
1906
1907        decision = SwitchForkDecision::SwitchProof(Hash::default());
1908        assert_eq!(
1909            decision.to_vote_instruction(
1910                VoteTransaction::from(vote.clone()),
1911                &Pubkey::default(),
1912                &Pubkey::default()
1913            ),
1914            Some(vote_instruction::vote_switch(
1915                &Pubkey::default(),
1916                &Pubkey::default(),
1917                vote,
1918                Hash::default()
1919            ))
1920        );
1921    }
1922
1923    #[test]
1924    fn test_simple_votes() {
1925        // Init state
1926        let mut vote_simulator = VoteSimulator::new(1);
1927        let node_pubkey = vote_simulator.node_pubkeys[0];
1928        let mut tower = Tower::default();
1929
1930        // Create the tree of banks
1931        let forks = tr(0) / (tr(1) / (tr(2) / (tr(3) / (tr(4) / tr(5)))));
1932
1933        // Set the voting behavior
1934        let mut cluster_votes = HashMap::new();
1935        let votes = vec![1, 2, 3, 4, 5];
1936        cluster_votes.insert(node_pubkey, votes.clone());
1937        vote_simulator.fill_bank_forks(forks, &cluster_votes, true);
1938
1939        // Simulate the votes
1940        for vote in votes {
1941            assert!(vote_simulator
1942                .simulate_vote(vote, &node_pubkey, &mut tower,)
1943                .is_empty());
1944        }
1945
1946        for i in 1..5 {
1947            assert_eq!(tower.vote_state.votes[i - 1].slot() as usize, i);
1948            assert_eq!(
1949                tower.vote_state.votes[i - 1].confirmation_count() as usize,
1950                6 - i
1951            );
1952        }
1953    }
1954
1955    #[test]
1956    fn test_switch_threshold_duplicate_rollback() {
1957        run_test_switch_threshold_duplicate_rollback(false);
1958    }
1959
1960    #[test]
1961    #[should_panic]
1962    fn test_switch_threshold_duplicate_rollback_panic() {
1963        run_test_switch_threshold_duplicate_rollback(true);
1964    }
1965
1966    fn setup_switch_test(num_accounts: usize) -> (Arc<Bank>, VoteSimulator, u64) {
1967        // Init state
1968        assert!(num_accounts > 1);
1969        let mut vote_simulator = VoteSimulator::new(num_accounts);
1970        let bank0 = vote_simulator.bank_forks.read().unwrap().get(0).unwrap();
1971        let total_stake = bank0.total_epoch_stake();
1972        assert_eq!(
1973            total_stake,
1974            vote_simulator.validator_keypairs.len() as u64 * 10_000
1975        );
1976
1977        // Create the tree of banks
1978        let forks = tr(0)
1979            / (tr(1)
1980                / (tr(2)
1981                    // Minor fork 1
1982                    / (tr(10) / (tr(11) / (tr(12) / (tr(13) / (tr(14))))))
1983                    / (tr(43)
1984                        / (tr(44)
1985                            // Minor fork 2
1986                            / (tr(45) / (tr(46) / (tr(47) / (tr(48) / (tr(49) / (tr(50)))))))
1987                            / (tr(110)))
1988                        / tr(112))));
1989
1990        // Fill the BankForks according to the above fork structure
1991        vote_simulator.fill_bank_forks(forks, &HashMap::new(), true);
1992        for (_, fork_progress) in vote_simulator.progress.iter_mut() {
1993            fork_progress.fork_stats.computed = true;
1994        }
1995
1996        (bank0, vote_simulator, total_stake)
1997    }
1998
1999    fn run_test_switch_threshold_duplicate_rollback(should_panic: bool) {
2000        let (bank0, mut vote_simulator, total_stake) = setup_switch_test(2);
2001        let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
2002        let descendants = vote_simulator.bank_forks.read().unwrap().descendants();
2003        let mut tower = Tower::default();
2004
2005        // Last vote is 47
2006        tower.record_vote(
2007            47,
2008            vote_simulator
2009                .bank_forks
2010                .read()
2011                .unwrap()
2012                .get(47)
2013                .unwrap()
2014                .hash(),
2015        );
2016
2017        // Trying to switch to an ancestor of last vote should only not panic
2018        // if the current vote has a duplicate ancestor
2019        let ancestor_of_voted_slot = 43;
2020        let duplicate_ancestor1 = 44;
2021        let duplicate_ancestor2 = 45;
2022        vote_simulator
2023            .tbft_structs
2024            .heaviest_subtree_fork_choice
2025            .mark_fork_invalid_candidate(&(
2026                duplicate_ancestor1,
2027                vote_simulator
2028                    .bank_forks
2029                    .read()
2030                    .unwrap()
2031                    .get(duplicate_ancestor1)
2032                    .unwrap()
2033                    .hash(),
2034            ));
2035        vote_simulator
2036            .tbft_structs
2037            .heaviest_subtree_fork_choice
2038            .mark_fork_invalid_candidate(&(
2039                duplicate_ancestor2,
2040                vote_simulator
2041                    .bank_forks
2042                    .read()
2043                    .unwrap()
2044                    .get(duplicate_ancestor2)
2045                    .unwrap()
2046                    .hash(),
2047            ));
2048        assert_eq!(
2049            tower.check_switch_threshold(
2050                ancestor_of_voted_slot,
2051                &ancestors,
2052                &descendants,
2053                &vote_simulator.progress,
2054                total_stake,
2055                bank0.epoch_vote_accounts(0).unwrap(),
2056                &vote_simulator.latest_validator_votes_for_frozen_banks,
2057                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2058            ),
2059            SwitchForkDecision::FailedSwitchDuplicateRollback(duplicate_ancestor2)
2060        );
2061        let mut confirm_ancestors = vec![duplicate_ancestor1];
2062        if should_panic {
2063            // Adding the last duplicate ancestor will
2064            // 1) Cause loop below to confirm last ancestor
2065            // 2) Check switch threshold on a vote ancestor when there
2066            // are no duplicates on that fork, which will cause a panic
2067            confirm_ancestors.push(duplicate_ancestor2);
2068        }
2069        for (i, duplicate_ancestor) in confirm_ancestors.into_iter().enumerate() {
2070            vote_simulator
2071                .tbft_structs
2072                .heaviest_subtree_fork_choice
2073                .mark_fork_valid_candidate(&(
2074                    duplicate_ancestor,
2075                    vote_simulator
2076                        .bank_forks
2077                        .read()
2078                        .unwrap()
2079                        .get(duplicate_ancestor)
2080                        .unwrap()
2081                        .hash(),
2082                ));
2083            let res = tower.check_switch_threshold(
2084                ancestor_of_voted_slot,
2085                &ancestors,
2086                &descendants,
2087                &vote_simulator.progress,
2088                total_stake,
2089                bank0.epoch_vote_accounts(0).unwrap(),
2090                &vote_simulator.latest_validator_votes_for_frozen_banks,
2091                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2092            );
2093            if i == 0 {
2094                assert_eq!(
2095                    res,
2096                    SwitchForkDecision::FailedSwitchDuplicateRollback(duplicate_ancestor2)
2097                );
2098            }
2099        }
2100    }
2101
2102    #[test]
2103    fn test_switch_threshold() {
2104        let (bank0, mut vote_simulator, total_stake) = setup_switch_test(2);
2105        let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
2106        let mut descendants = vote_simulator.bank_forks.read().unwrap().descendants();
2107        let mut tower = Tower::default();
2108        let other_vote_account = vote_simulator.vote_pubkeys[1];
2109
2110        // Last vote is 47
2111        tower.record_vote(47, Hash::default());
2112
2113        // Trying to switch to a descendant of last vote should always work
2114        assert_eq!(
2115            tower.check_switch_threshold(
2116                48,
2117                &ancestors,
2118                &descendants,
2119                &vote_simulator.progress,
2120                total_stake,
2121                bank0.epoch_vote_accounts(0).unwrap(),
2122                &vote_simulator.latest_validator_votes_for_frozen_banks,
2123                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2124            ),
2125            SwitchForkDecision::SameFork
2126        );
2127
2128        // Trying to switch to another fork at 110 should fail
2129        assert_eq!(
2130            tower.check_switch_threshold(
2131                110,
2132                &ancestors,
2133                &descendants,
2134                &vote_simulator.progress,
2135                total_stake,
2136                bank0.epoch_vote_accounts(0).unwrap(),
2137                &vote_simulator.latest_validator_votes_for_frozen_banks,
2138                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2139            ),
2140            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
2141        );
2142
2143        // Adding another validator lockout on a descendant of last vote should
2144        // not count toward the switch threshold
2145        vote_simulator.simulate_lockout_interval(50, (49, 100), &other_vote_account);
2146        assert_eq!(
2147            tower.check_switch_threshold(
2148                110,
2149                &ancestors,
2150                &descendants,
2151                &vote_simulator.progress,
2152                total_stake,
2153                bank0.epoch_vote_accounts(0).unwrap(),
2154                &vote_simulator.latest_validator_votes_for_frozen_banks,
2155                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2156            ),
2157            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
2158        );
2159
2160        // Adding another validator lockout on an ancestor of last vote should
2161        // not count toward the switch threshold
2162        vote_simulator.simulate_lockout_interval(50, (45, 100), &other_vote_account);
2163        assert_eq!(
2164            tower.check_switch_threshold(
2165                110,
2166                &ancestors,
2167                &descendants,
2168                &vote_simulator.progress,
2169                total_stake,
2170                bank0.epoch_vote_accounts(0).unwrap(),
2171                &vote_simulator.latest_validator_votes_for_frozen_banks,
2172                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2173            ),
2174            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
2175        );
2176
2177        // Adding another validator lockout on a different fork, but the lockout
2178        // doesn't cover the last vote, should not satisfy the switch threshold
2179        vote_simulator.simulate_lockout_interval(14, (12, 46), &other_vote_account);
2180        assert_eq!(
2181            tower.check_switch_threshold(
2182                110,
2183                &ancestors,
2184                &descendants,
2185                &vote_simulator.progress,
2186                total_stake,
2187                bank0.epoch_vote_accounts(0).unwrap(),
2188                &vote_simulator.latest_validator_votes_for_frozen_banks,
2189                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2190            ),
2191            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
2192        );
2193
2194        // Adding another validator lockout on a different fork, and the lockout
2195        // covers the last vote would count towards the switch threshold,
2196        // unless the bank is not the most recent frozen bank on the fork (14 is a
2197        // frozen/computed bank > 13 on the same fork in this case)
2198        vote_simulator.simulate_lockout_interval(13, (12, 47), &other_vote_account);
2199        assert_eq!(
2200            tower.check_switch_threshold(
2201                110,
2202                &ancestors,
2203                &descendants,
2204                &vote_simulator.progress,
2205                total_stake,
2206                bank0.epoch_vote_accounts(0).unwrap(),
2207                &vote_simulator.latest_validator_votes_for_frozen_banks,
2208                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2209            ),
2210            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
2211        );
2212
2213        // Adding another validator lockout on a different fork, and the lockout
2214        // covers the last vote, should satisfy the switch threshold
2215        vote_simulator.simulate_lockout_interval(14, (12, 47), &other_vote_account);
2216        assert_eq!(
2217            tower.check_switch_threshold(
2218                110,
2219                &ancestors,
2220                &descendants,
2221                &vote_simulator.progress,
2222                total_stake,
2223                bank0.epoch_vote_accounts(0).unwrap(),
2224                &vote_simulator.latest_validator_votes_for_frozen_banks,
2225                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2226            ),
2227            SwitchForkDecision::SwitchProof(Hash::default())
2228        );
2229
2230        // Adding another unfrozen descendant of the tip of 14 should not remove
2231        // slot 14 from consideration because it is still the most recent frozen
2232        // bank on its fork
2233        descendants.get_mut(&14).unwrap().insert(10000);
2234        assert_eq!(
2235            tower.check_switch_threshold(
2236                110,
2237                &ancestors,
2238                &descendants,
2239                &vote_simulator.progress,
2240                total_stake,
2241                bank0.epoch_vote_accounts(0).unwrap(),
2242                &vote_simulator.latest_validator_votes_for_frozen_banks,
2243                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2244            ),
2245            SwitchForkDecision::SwitchProof(Hash::default())
2246        );
2247
2248        // If we set a root, then any lockout intervals below the root shouldn't
2249        // count toward the switch threshold. This means the other validator's
2250        // vote lockout no longer counts
2251        tower.vote_state.root_slot = Some(43);
2252        // Refresh ancestors and descendants for new root.
2253        let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
2254        let descendants = vote_simulator.bank_forks.read().unwrap().descendants();
2255
2256        assert_eq!(
2257            tower.check_switch_threshold(
2258                110,
2259                &ancestors,
2260                &descendants,
2261                &vote_simulator.progress,
2262                total_stake,
2263                bank0.epoch_vote_accounts(0).unwrap(),
2264                &vote_simulator.latest_validator_votes_for_frozen_banks,
2265                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2266            ),
2267            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
2268        );
2269    }
2270
2271    #[test]
2272    fn test_switch_threshold_use_gossip_votes() {
2273        let num_validators = 2;
2274        let (bank0, mut vote_simulator, total_stake) = setup_switch_test(2);
2275        let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
2276        let descendants = vote_simulator.bank_forks.read().unwrap().descendants();
2277        let mut tower = Tower::default();
2278        let other_vote_account = vote_simulator.vote_pubkeys[1];
2279
2280        // Last vote is 47
2281        tower.record_vote(47, Hash::default());
2282
2283        // Trying to switch to another fork at 110 should fail
2284        assert_eq!(
2285            tower.check_switch_threshold(
2286                110,
2287                &ancestors,
2288                &descendants,
2289                &vote_simulator.progress,
2290                total_stake,
2291                bank0.epoch_vote_accounts(0).unwrap(),
2292                &vote_simulator.latest_validator_votes_for_frozen_banks,
2293                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2294            ),
2295            SwitchForkDecision::FailedSwitchThreshold(0, num_validators * 10000)
2296        );
2297
2298        // Adding a vote on the descendant shouldn't count toward the switch threshold
2299        vote_simulator.simulate_lockout_interval(50, (49, 100), &other_vote_account);
2300        assert_eq!(
2301            tower.check_switch_threshold(
2302                110,
2303                &ancestors,
2304                &descendants,
2305                &vote_simulator.progress,
2306                total_stake,
2307                bank0.epoch_vote_accounts(0).unwrap(),
2308                &vote_simulator.latest_validator_votes_for_frozen_banks,
2309                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2310            ),
2311            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
2312        );
2313
2314        // Adding a later vote from gossip that isn't on the same fork should count toward the
2315        // switch threshold
2316        vote_simulator
2317            .latest_validator_votes_for_frozen_banks
2318            .check_add_vote(
2319                other_vote_account,
2320                112,
2321                Some(
2322                    vote_simulator
2323                        .bank_forks
2324                        .read()
2325                        .unwrap()
2326                        .get(112)
2327                        .unwrap()
2328                        .hash(),
2329                ),
2330                false,
2331            );
2332
2333        assert_eq!(
2334            tower.check_switch_threshold(
2335                110,
2336                &ancestors,
2337                &descendants,
2338                &vote_simulator.progress,
2339                total_stake,
2340                bank0.epoch_vote_accounts(0).unwrap(),
2341                &vote_simulator.latest_validator_votes_for_frozen_banks,
2342                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2343            ),
2344            SwitchForkDecision::SwitchProof(Hash::default())
2345        );
2346
2347        // If we now set a root that causes slot 112 to be purged from BankForks, then
2348        // the switch proof will now fail since that validator's vote can no longer be
2349        // included in the switching proof
2350        vote_simulator.set_root(44);
2351        let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
2352        let descendants = vote_simulator.bank_forks.read().unwrap().descendants();
2353        assert_eq!(
2354            tower.check_switch_threshold(
2355                110,
2356                &ancestors,
2357                &descendants,
2358                &vote_simulator.progress,
2359                total_stake,
2360                bank0.epoch_vote_accounts(0).unwrap(),
2361                &vote_simulator.latest_validator_votes_for_frozen_banks,
2362                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2363            ),
2364            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
2365        );
2366    }
2367
2368    #[test]
2369    fn test_switch_threshold_votes() {
2370        // Init state
2371        let mut vote_simulator = VoteSimulator::new(4);
2372        let node_pubkey = vote_simulator.node_pubkeys[0];
2373        let mut tower = Tower::default();
2374        let forks = tr(0)
2375            / (tr(1)
2376                / (tr(2)
2377                    // Minor fork 1
2378                    / (tr(10) / (tr(11) / (tr(12) / (tr(13) / (tr(14))))))
2379                    / (tr(43)
2380                        / (tr(44)
2381                            // Minor fork 2
2382                            / (tr(45) / (tr(46))))
2383                        / (tr(110)))));
2384
2385        // Have two validators, each representing 20% of the stake vote on
2386        // minor fork 2 at slots 46 + 47
2387        let mut cluster_votes: HashMap<Pubkey, Vec<Slot>> = HashMap::new();
2388        cluster_votes.insert(vote_simulator.node_pubkeys[1], vec![46]);
2389        cluster_votes.insert(vote_simulator.node_pubkeys[2], vec![47]);
2390        vote_simulator.fill_bank_forks(forks, &cluster_votes, true);
2391
2392        // Vote on the first minor fork at slot 14, should succeed
2393        assert!(vote_simulator
2394            .simulate_vote(14, &node_pubkey, &mut tower,)
2395            .is_empty());
2396
2397        // The other two validators voted at slots 46, 47, which
2398        // will only both show up in slot 48, at which point
2399        // 2/5 > SWITCH_FORK_THRESHOLD of the stake has voted
2400        // on another fork, so switching should succeed
2401        let votes_to_simulate = (46..=48).collect();
2402        let results = vote_simulator.create_and_vote_new_branch(
2403            45,
2404            48,
2405            &cluster_votes,
2406            &votes_to_simulate,
2407            &node_pubkey,
2408            &mut tower,
2409        );
2410        assert_eq!(
2411            *results.get(&46).unwrap(),
2412            vec![HeaviestForkFailures::FailedSwitchThreshold(46, 0, 40000)]
2413        );
2414        assert_eq!(
2415            *results.get(&47).unwrap(),
2416            vec![HeaviestForkFailures::FailedSwitchThreshold(
2417                47, 10000, 40000
2418            )]
2419        );
2420        assert!(results.get(&48).unwrap().is_empty());
2421    }
2422
2423    #[test]
2424    fn test_double_partition() {
2425        // Init state
2426        let mut vote_simulator = VoteSimulator::new(2);
2427        let node_pubkey = vote_simulator.node_pubkeys[0];
2428        let vote_pubkey = vote_simulator.vote_pubkeys[0];
2429        let mut tower = Tower::default();
2430
2431        let num_slots_to_try = 200;
2432        // Create the tree of banks
2433        let forks = tr(0)
2434            / (tr(1)
2435                / (tr(2)
2436                    / (tr(3)
2437                        / (tr(4)
2438                            / (tr(5)
2439                                / (tr(6)
2440                                    / (tr(7)
2441                                        / (tr(8)
2442                                            / (tr(9)
2443                                                // Minor fork 1
2444                                                / (tr(10) / (tr(11) / (tr(12) / (tr(13) / (tr(14))))))
2445                                                / (tr(43)
2446                                                    / (tr(44)
2447                                                        // Minor fork 2
2448                                                        / (tr(45) / (tr(46) / (tr(47) / (tr(48) / (tr(49) / (tr(50)))))))
2449                                                        / (tr(110) / (tr(110 + 2 * num_slots_to_try))))))))))))));
2450
2451        // Set the successful voting behavior
2452        let mut cluster_votes = HashMap::new();
2453        let mut my_votes: Vec<Slot> = vec![];
2454        let next_unlocked_slot = 110;
2455        // Vote on the first minor fork
2456        my_votes.extend(1..=14);
2457        // Come back to the main fork
2458        my_votes.extend(43..=44);
2459        // Vote on the second minor fork
2460        my_votes.extend(45..=50);
2461        // Vote to come back to main fork
2462        my_votes.push(next_unlocked_slot);
2463        cluster_votes.insert(node_pubkey, my_votes.clone());
2464        // Make the other validator vote fork to pass the threshold checks
2465        let other_votes = my_votes.clone();
2466        cluster_votes.insert(vote_simulator.node_pubkeys[1], other_votes);
2467        vote_simulator.fill_bank_forks(forks, &cluster_votes, true);
2468
2469        // Simulate the votes.
2470        for vote in &my_votes {
2471            // All these votes should be ok
2472            assert!(vote_simulator
2473                .simulate_vote(*vote, &node_pubkey, &mut tower,)
2474                .is_empty());
2475        }
2476
2477        info!("local tower: {:#?}", tower.vote_state.votes);
2478        let observed = vote_simulator
2479            .bank_forks
2480            .read()
2481            .unwrap()
2482            .get(next_unlocked_slot)
2483            .unwrap()
2484            .get_vote_account(&vote_pubkey)
2485            .unwrap();
2486        let state = observed.vote_state_view();
2487        info!("observed tower: {:#?}", state.votes_iter().collect_vec());
2488
2489        let num_slots_to_try = 200;
2490        cluster_votes
2491            .get_mut(&vote_simulator.node_pubkeys[1])
2492            .unwrap()
2493            .extend(next_unlocked_slot + 1..next_unlocked_slot + num_slots_to_try);
2494        assert!(vote_simulator.can_progress_on_fork(
2495            &node_pubkey,
2496            &mut tower,
2497            next_unlocked_slot,
2498            num_slots_to_try,
2499            &mut cluster_votes,
2500        ));
2501    }
2502
2503    #[test]
2504    fn test_collect_vote_lockouts_sums() {
2505        //two accounts voting for slot 0 with 1 token staked
2506        let accounts = gen_stakes(&[(1, &[0]), (1, &[0])]);
2507        let account_latest_votes: Vec<(Pubkey, SlotHashKey)> = accounts
2508            .iter()
2509            .sorted_by_key(|(pk, _)| *pk)
2510            .map(|(pubkey, _)| (*pubkey, (0, Hash::default())))
2511            .collect();
2512
2513        let ancestors = vec![(1, vec![0].into_iter().collect()), (0, HashSet::new())]
2514            .into_iter()
2515            .collect();
2516        let mut latest_validator_votes_for_frozen_banks =
2517            LatestValidatorVotesForFrozenBanks::default();
2518        let ComputedBankState {
2519            voted_stakes,
2520            total_stake,
2521            ..
2522        } = Tower::collect_vote_lockouts(
2523            &Pubkey::default(),
2524            1,
2525            0,
2526            &accounts,
2527            &ancestors,
2528            |_| Some(Hash::default()),
2529            &mut latest_validator_votes_for_frozen_banks,
2530            &mut HashSet::default(),
2531        );
2532        assert_eq!(voted_stakes[&0], 2);
2533        assert_eq!(total_stake, 2);
2534        let mut new_votes = latest_validator_votes_for_frozen_banks.take_votes_dirty_set(0);
2535        new_votes.sort();
2536        assert_eq!(new_votes, account_latest_votes);
2537    }
2538
2539    #[test]
2540    fn test_collect_vote_lockouts_root() {
2541        let votes: Vec<u64> = (0..MAX_LOCKOUT_HISTORY as u64).collect();
2542        //two accounts voting for slots 0..MAX_LOCKOUT_HISTORY with 1 token staked
2543        let accounts = gen_stakes(&[(1, &votes), (1, &votes)]);
2544        let account_latest_votes: Vec<(Pubkey, SlotHashKey)> = accounts
2545            .iter()
2546            .sorted_by_key(|(pk, _)| *pk)
2547            .map(|(pubkey, _)| {
2548                (
2549                    *pubkey,
2550                    ((MAX_LOCKOUT_HISTORY - 1) as Slot, Hash::default()),
2551                )
2552            })
2553            .collect();
2554        let mut tower = Tower::new_for_tests(0, 0.67);
2555        let mut ancestors = HashMap::new();
2556        for i in 0..(MAX_LOCKOUT_HISTORY + 1) {
2557            tower.record_vote(i as u64, Hash::default());
2558            ancestors.insert(i as u64, (0..i as u64).collect());
2559        }
2560        let root = Lockout::new_with_confirmation_count(0, MAX_LOCKOUT_HISTORY as u32);
2561        let expected_bank_stake = 2;
2562        let expected_total_stake = 2;
2563        assert_eq!(tower.vote_state.root_slot, Some(0));
2564        let mut latest_validator_votes_for_frozen_banks =
2565            LatestValidatorVotesForFrozenBanks::default();
2566        let ComputedBankState {
2567            voted_stakes,
2568            fork_stake,
2569            total_stake,
2570            ..
2571        } = Tower::collect_vote_lockouts(
2572            &Pubkey::default(),
2573            MAX_LOCKOUT_HISTORY as u64,
2574            0,
2575            &accounts,
2576            &ancestors,
2577            |_| Some(Hash::default()),
2578            &mut latest_validator_votes_for_frozen_banks,
2579            &mut HashSet::default(),
2580        );
2581        for i in 0..MAX_LOCKOUT_HISTORY {
2582            assert_eq!(voted_stakes[&(i as u64)], 2);
2583        }
2584
2585        // should be the sum of all voted stake for on the fork
2586        assert_eq!(fork_stake, expected_bank_stake);
2587        assert_eq!(total_stake, expected_total_stake);
2588        let mut new_votes =
2589            latest_validator_votes_for_frozen_banks.take_votes_dirty_set(root.slot());
2590        new_votes.sort();
2591        assert_eq!(new_votes, account_latest_votes);
2592    }
2593
2594    #[test]
2595    fn test_check_vote_threshold_without_votes() {
2596        let tower = Tower::new_for_tests(1, 0.67);
2597        let stakes = vec![(0, 1)].into_iter().collect();
2598        assert!(tower.check_vote_stake_thresholds(0, &stakes, 2).is_empty());
2599    }
2600
2601    #[test]
2602    fn test_check_vote_threshold_no_skip_lockout_with_new_root() {
2603        agave_logger::setup();
2604        let mut tower = Tower::new_for_tests(4, 0.67);
2605        let mut stakes = HashMap::default();
2606        for i in 0..(MAX_LOCKOUT_HISTORY as u64 + 1) {
2607            stakes.insert(i, 1);
2608            tower.record_vote(i, Hash::default());
2609        }
2610        assert!(!tower
2611            .check_vote_stake_thresholds(MAX_LOCKOUT_HISTORY as u64 + 1, &stakes, 2)
2612            .is_empty());
2613    }
2614
2615    #[test]
2616    fn test_is_slot_confirmed_not_enough_stake_failure() {
2617        let tower = Tower::new_for_tests(1, 0.67);
2618        let stakes = vec![(0, 1)].into_iter().collect();
2619        assert!(!tower.is_slot_confirmed(0, &stakes, 2));
2620    }
2621
2622    #[test]
2623    fn test_is_slot_confirmed_unknown_slot() {
2624        let tower = Tower::new_for_tests(1, 0.67);
2625        let stakes = HashMap::default();
2626        assert!(!tower.is_slot_confirmed(0, &stakes, 2));
2627    }
2628
2629    #[test]
2630    fn test_is_slot_confirmed_pass() {
2631        let tower = Tower::new_for_tests(1, 0.67);
2632        let stakes = vec![(0, 2)].into_iter().collect();
2633        assert!(tower.is_slot_confirmed(0, &stakes, 2));
2634    }
2635
2636    #[test]
2637    fn test_is_slot_duplicate_confirmed_not_enough_stake_failure() {
2638        let tower = Tower::new_for_tests(1, 0.67);
2639        let stakes = vec![(0, 52)].into_iter().collect();
2640        assert!(!tower.is_slot_duplicate_confirmed(0, &stakes, 100));
2641    }
2642
2643    #[test]
2644    fn test_is_slot_duplicate_confirmed_unknown_slot() {
2645        let tower = Tower::new_for_tests(1, 0.67);
2646        let stakes = HashMap::default();
2647        assert!(!tower.is_slot_duplicate_confirmed(0, &stakes, 100));
2648    }
2649
2650    #[test]
2651    fn test_is_slot_duplicate_confirmed_pass() {
2652        let tower = Tower::new_for_tests(1, 0.67);
2653        let stakes = vec![(0, 53)].into_iter().collect();
2654        assert!(tower.is_slot_duplicate_confirmed(0, &stakes, 100));
2655    }
2656
2657    #[test]
2658    fn test_is_locked_out_empty() {
2659        let tower = Tower::new_for_tests(0, 0.67);
2660        let ancestors = HashSet::from([0]);
2661        assert!(!tower.is_locked_out(1, &ancestors));
2662    }
2663
2664    #[test]
2665    fn test_is_locked_out_root_slot_child_pass() {
2666        let mut tower = Tower::new_for_tests(0, 0.67);
2667        let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
2668        tower.vote_state.root_slot = Some(0);
2669        assert!(!tower.is_locked_out(1, &ancestors));
2670    }
2671
2672    #[test]
2673    fn test_is_locked_out_root_slot_sibling_fail() {
2674        let mut tower = Tower::new_for_tests(0, 0.67);
2675        let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
2676        tower.vote_state.root_slot = Some(0);
2677        tower.record_vote(1, Hash::default());
2678        assert!(tower.is_locked_out(2, &ancestors));
2679    }
2680
2681    #[test]
2682    fn test_check_already_voted() {
2683        let mut tower = Tower::new_for_tests(0, 0.67);
2684        tower.record_vote(0, Hash::default());
2685        assert!(tower.has_voted(0));
2686        assert!(!tower.has_voted(1));
2687    }
2688
2689    #[test]
2690    fn test_check_recent_slot() {
2691        let mut tower = Tower::new_for_tests(0, 0.67);
2692        assert!(tower.is_recent(1));
2693        assert!(tower.is_recent(32));
2694        for i in 0..64 {
2695            tower.record_vote(i, Hash::default());
2696        }
2697        assert!(!tower.is_recent(0));
2698        assert!(!tower.is_recent(32));
2699        assert!(!tower.is_recent(63));
2700        assert!(tower.is_recent(65));
2701    }
2702
2703    #[test]
2704    fn test_is_locked_out_double_vote() {
2705        let mut tower = Tower::new_for_tests(0, 0.67);
2706        let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
2707        tower.record_vote(0, Hash::default());
2708        tower.record_vote(1, Hash::default());
2709        assert!(tower.is_locked_out(0, &ancestors));
2710    }
2711
2712    #[test]
2713    fn test_is_locked_out_child() {
2714        let mut tower = Tower::new_for_tests(0, 0.67);
2715        let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
2716        tower.record_vote(0, Hash::default());
2717        assert!(!tower.is_locked_out(1, &ancestors));
2718    }
2719
2720    #[test]
2721    fn test_is_locked_out_sibling() {
2722        let mut tower = Tower::new_for_tests(0, 0.67);
2723        let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
2724        tower.record_vote(0, Hash::default());
2725        tower.record_vote(1, Hash::default());
2726        assert!(tower.is_locked_out(2, &ancestors));
2727    }
2728
2729    #[test]
2730    fn test_is_locked_out_last_vote_expired() {
2731        let mut tower = Tower::new_for_tests(0, 0.67);
2732        let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
2733        tower.record_vote(0, Hash::default());
2734        tower.record_vote(1, Hash::default());
2735        assert!(!tower.is_locked_out(4, &ancestors));
2736        tower.record_vote(4, Hash::default());
2737        assert_eq!(tower.vote_state.votes[0].slot(), 0);
2738        assert_eq!(tower.vote_state.votes[0].confirmation_count(), 2);
2739        assert_eq!(tower.vote_state.votes[1].slot(), 4);
2740        assert_eq!(tower.vote_state.votes[1].confirmation_count(), 1);
2741    }
2742
2743    #[test]
2744    fn test_check_vote_threshold_below_threshold() {
2745        let mut tower = Tower::new_for_tests(1, 0.67);
2746        let stakes = vec![(0, 1)].into_iter().collect();
2747        tower.record_vote(0, Hash::default());
2748        assert!(!tower.check_vote_stake_thresholds(1, &stakes, 2).is_empty());
2749    }
2750    #[test]
2751    fn test_check_vote_threshold_above_threshold() {
2752        let mut tower = Tower::new_for_tests(1, 0.67);
2753        let stakes = vec![(0, 2)].into_iter().collect();
2754        tower.record_vote(0, Hash::default());
2755        assert!(tower.check_vote_stake_thresholds(1, &stakes, 2).is_empty());
2756    }
2757
2758    #[test]
2759    fn test_check_vote_thresholds_above_thresholds() {
2760        let mut tower = Tower::new_for_tests(VOTE_THRESHOLD_DEPTH, 0.67);
2761        let stakes = vec![
2762            (0, 3),
2763            (VOTE_THRESHOLD_DEPTH_SHALLOW as u64, 2),
2764            ((VOTE_THRESHOLD_DEPTH_SHALLOW as u64) - 1, 2),
2765        ]
2766        .into_iter()
2767        .collect();
2768        for slot in 0..VOTE_THRESHOLD_DEPTH {
2769            tower.record_vote(slot as Slot, Hash::default());
2770        }
2771        assert!(tower
2772            .check_vote_stake_thresholds(VOTE_THRESHOLD_DEPTH.try_into().unwrap(), &stakes, 4)
2773            .is_empty());
2774    }
2775
2776    #[test]
2777    fn test_check_vote_threshold_deep_below_threshold() {
2778        let mut tower = Tower::new_for_tests(VOTE_THRESHOLD_DEPTH, 0.67);
2779        let stakes = vec![(0, 6), (VOTE_THRESHOLD_DEPTH_SHALLOW as u64, 4)]
2780            .into_iter()
2781            .collect();
2782        for slot in 0..VOTE_THRESHOLD_DEPTH {
2783            tower.record_vote(slot as Slot, Hash::default());
2784        }
2785        assert!(!tower
2786            .check_vote_stake_thresholds(VOTE_THRESHOLD_DEPTH.try_into().unwrap(), &stakes, 10)
2787            .is_empty());
2788    }
2789
2790    #[test]
2791    fn test_check_vote_threshold_shallow_below_threshold() {
2792        let mut tower = Tower::new_for_tests(VOTE_THRESHOLD_DEPTH, 0.67);
2793        let stakes = vec![(0, 7), (VOTE_THRESHOLD_DEPTH_SHALLOW as u64, 1)]
2794            .into_iter()
2795            .collect();
2796        for slot in 0..VOTE_THRESHOLD_DEPTH {
2797            tower.record_vote(slot as Slot, Hash::default());
2798        }
2799        assert!(!tower
2800            .check_vote_stake_thresholds(VOTE_THRESHOLD_DEPTH.try_into().unwrap(), &stakes, 10)
2801            .is_empty());
2802    }
2803
2804    #[test]
2805    fn test_check_vote_threshold_above_threshold_after_pop() {
2806        let mut tower = Tower::new_for_tests(1, 0.67);
2807        let stakes = vec![(0, 2)].into_iter().collect();
2808        tower.record_vote(0, Hash::default());
2809        tower.record_vote(1, Hash::default());
2810        tower.record_vote(2, Hash::default());
2811        assert!(tower.check_vote_stake_thresholds(6, &stakes, 2).is_empty());
2812    }
2813
2814    #[test]
2815    fn test_check_vote_threshold_above_threshold_no_stake() {
2816        let mut tower = Tower::new_for_tests(1, 0.67);
2817        let stakes = HashMap::default();
2818        tower.record_vote(0, Hash::default());
2819        assert!(!tower.check_vote_stake_thresholds(1, &stakes, 2).is_empty());
2820    }
2821
2822    #[test]
2823    fn test_check_vote_threshold_lockouts_not_updated() {
2824        agave_logger::setup();
2825        let mut tower = Tower::new_for_tests(1, 0.67);
2826        let stakes = vec![(0, 1), (1, 2)].into_iter().collect();
2827        tower.record_vote(0, Hash::default());
2828        tower.record_vote(1, Hash::default());
2829        tower.record_vote(2, Hash::default());
2830        assert!(tower.check_vote_stake_thresholds(6, &stakes, 2).is_empty());
2831    }
2832
2833    #[test]
2834    fn test_stake_is_updated_for_entire_branch() {
2835        let mut voted_stakes = HashMap::default();
2836        let account = AccountSharedData::from(Account {
2837            lamports: 1,
2838            ..Account::default()
2839        });
2840        let set: HashSet<u64> = vec![0u64, 1u64].into_iter().collect();
2841        let ancestors: HashMap<u64, HashSet<u64>> = [(2u64, set)].iter().cloned().collect();
2842        Tower::update_ancestor_voted_stakes(&mut voted_stakes, 2, account.lamports(), &ancestors);
2843        assert_eq!(voted_stakes[&0], 1);
2844        assert_eq!(voted_stakes[&1], 1);
2845        assert_eq!(voted_stakes[&2], 1);
2846    }
2847
2848    #[test]
2849    fn test_check_vote_threshold_forks() {
2850        // Create the ancestor relationships
2851        let ancestors = (0..=(VOTE_THRESHOLD_DEPTH + 1) as u64)
2852            .map(|slot| {
2853                let slot_parents: HashSet<_> = (0..slot).collect();
2854                (slot, slot_parents)
2855            })
2856            .collect();
2857
2858        // Create votes such that
2859        // 1) 3/4 of the stake has voted on slot: VOTE_THRESHOLD_DEPTH - 2, lockout: 2
2860        // 2) 1/4 of the stake has voted on slot: VOTE_THRESHOLD_DEPTH, lockout: 2^9
2861        let total_stake = 4;
2862        let threshold_size = 0.67;
2863        let threshold_stake = (f64::ceil(total_stake as f64 * threshold_size)) as u64;
2864        let tower_votes: Vec<Slot> = (0..VOTE_THRESHOLD_DEPTH as u64).collect();
2865        let accounts = gen_stakes(&[
2866            (threshold_stake, &[(VOTE_THRESHOLD_DEPTH - 2) as u64]),
2867            (total_stake - threshold_stake, &tower_votes[..]),
2868        ]);
2869
2870        // Initialize tower
2871        let mut tower = Tower::new_for_tests(VOTE_THRESHOLD_DEPTH, threshold_size);
2872        let mut vote_slots = HashSet::default();
2873        // CASE 1: Record the first VOTE_THRESHOLD tower votes for fork 2. We want to
2874        // evaluate a vote on slot VOTE_THRESHOLD_DEPTH. The nth most recent vote should be
2875        // for slot 0, which is common to all account vote states, so we should pass the
2876        // threshold check
2877        let vote_to_evaluate = VOTE_THRESHOLD_DEPTH as u64;
2878        for vote in &tower_votes {
2879            tower.record_vote(*vote, Hash::default());
2880        }
2881        let ComputedBankState {
2882            voted_stakes,
2883            total_stake,
2884            ..
2885        } = Tower::collect_vote_lockouts(
2886            &Pubkey::default(),
2887            vote_to_evaluate,
2888            0,
2889            &accounts,
2890            &ancestors,
2891            |_| None,
2892            &mut LatestValidatorVotesForFrozenBanks::default(),
2893            &mut vote_slots,
2894        );
2895        assert!(tower
2896            .check_vote_stake_thresholds(vote_to_evaluate, &voted_stakes, total_stake)
2897            .is_empty());
2898
2899        // CASE 2: Now we want to evaluate a vote for slot VOTE_THRESHOLD_DEPTH + 1. This slot
2900        // will expire the vote in one of the vote accounts, so we should have insufficient
2901        // stake to pass the threshold
2902        let vote_to_evaluate = VOTE_THRESHOLD_DEPTH as u64 + 1;
2903        let ComputedBankState {
2904            voted_stakes,
2905            total_stake,
2906            ..
2907        } = Tower::collect_vote_lockouts(
2908            &Pubkey::default(),
2909            vote_to_evaluate,
2910            0,
2911            &accounts,
2912            &ancestors,
2913            |_| None,
2914            &mut LatestValidatorVotesForFrozenBanks::default(),
2915            &mut vote_slots,
2916        );
2917        assert!(!tower
2918            .check_vote_stake_thresholds(vote_to_evaluate, &voted_stakes, total_stake)
2919            .is_empty());
2920    }
2921
2922    fn vote_and_check_recent(num_votes: usize) {
2923        let mut tower = Tower::new_for_tests(1, 0.67);
2924        let slots = if num_votes > 0 {
2925            { 0..num_votes }
2926                .map(|i| {
2927                    Lockout::new_with_confirmation_count(i as Slot, (num_votes as u32) - (i as u32))
2928                })
2929                .collect()
2930        } else {
2931            vec![]
2932        };
2933        let mut expected = TowerSync::new(
2934            VecDeque::from(slots),
2935            if num_votes > 0 { Some(0) } else { None },
2936            Hash::default(),
2937            Hash::default(),
2938        );
2939        for i in 0..num_votes {
2940            tower.record_vote(i as u64, Hash::default());
2941        }
2942
2943        expected.timestamp = tower.last_vote.timestamp();
2944        assert_eq!(VoteTransaction::from(expected), tower.last_vote)
2945    }
2946
2947    #[test]
2948    fn test_recent_votes_full() {
2949        vote_and_check_recent(MAX_LOCKOUT_HISTORY)
2950    }
2951
2952    #[test]
2953    fn test_recent_votes_empty() {
2954        vote_and_check_recent(0)
2955    }
2956
2957    #[test]
2958    fn test_recent_votes_exact() {
2959        vote_and_check_recent(5)
2960    }
2961
2962    #[test]
2963    fn test_maybe_timestamp() {
2964        let mut tower = Tower::default();
2965        assert!(tower.maybe_timestamp(0).is_some());
2966        assert!(tower.maybe_timestamp(1).is_some());
2967        assert!(tower.maybe_timestamp(0).is_none()); // Refuse to timestamp an older slot
2968        assert!(tower.maybe_timestamp(1).is_none()); // Refuse to timestamp the same slot twice
2969
2970        tower.last_timestamp.timestamp -= 1; // Move last_timestamp into the past
2971        assert!(tower.maybe_timestamp(2).is_some()); // slot 2 gets a timestamp
2972
2973        tower.last_timestamp.timestamp += 1_000_000; // Move last_timestamp well into the future
2974        assert!(tower.maybe_timestamp(3).is_none()); // slot 3 gets no timestamp
2975    }
2976
2977    #[test]
2978    fn test_refresh_last_vote_timestamp() {
2979        let mut tower = Tower::default();
2980
2981        // Tower has no vote or timestamp
2982        tower.last_vote.set_timestamp(None);
2983        tower.refresh_last_vote_timestamp(5);
2984        assert_eq!(tower.last_vote.timestamp(), None);
2985        assert_eq!(tower.last_timestamp.slot, 0);
2986        assert_eq!(tower.last_timestamp.timestamp, 0);
2987
2988        // Tower has vote no timestamp, but is greater than heaviest_bank
2989        tower.last_vote = VoteTransaction::from(TowerSync::from(vec![(0, 3), (1, 2), (6, 1)]));
2990        assert_eq!(tower.last_vote.timestamp(), None);
2991        tower.refresh_last_vote_timestamp(5);
2992        assert_eq!(tower.last_vote.timestamp(), None);
2993        assert_eq!(tower.last_timestamp.slot, 0);
2994        assert_eq!(tower.last_timestamp.timestamp, 0);
2995
2996        // Tower has vote with no timestamp
2997        tower.last_vote = VoteTransaction::from(TowerSync::from(vec![(0, 3), (1, 2), (2, 1)]));
2998        assert_eq!(tower.last_vote.timestamp(), None);
2999        tower.refresh_last_vote_timestamp(5);
3000        assert_eq!(tower.last_vote.timestamp(), Some(1));
3001        assert_eq!(tower.last_timestamp.slot, 2);
3002        assert_eq!(tower.last_timestamp.timestamp, 1);
3003
3004        // Vote has timestamp
3005        tower.last_vote = VoteTransaction::from(TowerSync::from(vec![(0, 3), (1, 2), (2, 1)]));
3006        tower.refresh_last_vote_timestamp(5);
3007        assert_eq!(tower.last_vote.timestamp(), Some(2));
3008        assert_eq!(tower.last_timestamp.slot, 2);
3009        assert_eq!(tower.last_timestamp.timestamp, 2);
3010    }
3011
3012    fn run_test_load_tower_snapshot<F, G>(
3013        modify_original: F,
3014        modify_serialized: G,
3015    ) -> (Tower, Result<Tower>)
3016    where
3017        F: Fn(&mut Tower, &Pubkey),
3018        G: Fn(&PathBuf),
3019    {
3020        let tower_path = TempDir::new().unwrap();
3021        let identity_keypair = Arc::new(Keypair::new());
3022        let node_pubkey = identity_keypair.pubkey();
3023
3024        // Use values that will not match the default derived from BankForks
3025        let mut tower = Tower::new_for_tests(10, 0.9);
3026
3027        let tower_storage = FileTowerStorage::new(tower_path.path().to_path_buf());
3028
3029        modify_original(&mut tower, &node_pubkey);
3030
3031        tower.save(&tower_storage, &identity_keypair).unwrap();
3032        modify_serialized(&tower_storage.filename(&node_pubkey));
3033        let loaded = Tower::restore(&tower_storage, &node_pubkey);
3034
3035        (tower, loaded)
3036    }
3037
3038    #[test]
3039    fn test_switch_threshold_across_tower_reload() {
3040        agave_logger::setup();
3041        // Init state
3042        let mut vote_simulator = VoteSimulator::new(2);
3043        let other_vote_account = vote_simulator.vote_pubkeys[1];
3044        let bank0 = vote_simulator.bank_forks.read().unwrap().get(0).unwrap();
3045        let total_stake = bank0.total_epoch_stake();
3046        assert_eq!(
3047            total_stake,
3048            vote_simulator.validator_keypairs.len() as u64 * 10_000
3049        );
3050
3051        // Create the tree of banks
3052        let forks = tr(0)
3053            / (tr(1)
3054                / (tr(2)
3055                    / tr(10)
3056                    / (tr(43)
3057                        / (tr(44)
3058                            // Minor fork 2
3059                            / (tr(45) / (tr(46) / (tr(47) / (tr(48) / (tr(49) / (tr(50)))))))
3060                            / (tr(110) / tr(111))))));
3061
3062        // Fill the BankForks according to the above fork structure
3063        vote_simulator.fill_bank_forks(forks, &HashMap::new(), true);
3064        for (_, fork_progress) in vote_simulator.progress.iter_mut() {
3065            fork_progress.fork_stats.computed = true;
3066        }
3067
3068        let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
3069        let descendants = vote_simulator.bank_forks.read().unwrap().descendants();
3070        let mut tower = Tower::default();
3071
3072        tower.record_vote(43, Hash::default());
3073        tower.record_vote(44, Hash::default());
3074        tower.record_vote(45, Hash::default());
3075        tower.record_vote(46, Hash::default());
3076        tower.record_vote(47, Hash::default());
3077        tower.record_vote(48, Hash::default());
3078        tower.record_vote(49, Hash::default());
3079
3080        // Trying to switch to a descendant of last vote should always work
3081        assert_eq!(
3082            tower.check_switch_threshold(
3083                50,
3084                &ancestors,
3085                &descendants,
3086                &vote_simulator.progress,
3087                total_stake,
3088                bank0.epoch_vote_accounts(0).unwrap(),
3089                &vote_simulator.latest_validator_votes_for_frozen_banks,
3090                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
3091            ),
3092            SwitchForkDecision::SameFork
3093        );
3094
3095        // Trying to switch to another fork at 110 should fail
3096        assert_eq!(
3097            tower.check_switch_threshold(
3098                110,
3099                &ancestors,
3100                &descendants,
3101                &vote_simulator.progress,
3102                total_stake,
3103                bank0.epoch_vote_accounts(0).unwrap(),
3104                &vote_simulator.latest_validator_votes_for_frozen_banks,
3105                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
3106            ),
3107            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
3108        );
3109
3110        vote_simulator.simulate_lockout_interval(111, (10, 49), &other_vote_account);
3111
3112        assert_eq!(
3113            tower.check_switch_threshold(
3114                110,
3115                &ancestors,
3116                &descendants,
3117                &vote_simulator.progress,
3118                total_stake,
3119                bank0.epoch_vote_accounts(0).unwrap(),
3120                &vote_simulator.latest_validator_votes_for_frozen_banks,
3121                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
3122            ),
3123            SwitchForkDecision::SwitchProof(Hash::default())
3124        );
3125
3126        assert_eq!(tower.voted_slots(), vec![43, 44, 45, 46, 47, 48, 49]);
3127        {
3128            let mut tower = tower.clone();
3129            tower.record_vote(110, Hash::default());
3130            tower.record_vote(111, Hash::default());
3131            assert_eq!(tower.voted_slots(), vec![43, 110, 111]);
3132            assert_eq!(tower.vote_state.root_slot, Some(0));
3133        }
3134
3135        // Prepare simulated validator restart!
3136        let mut vote_simulator = VoteSimulator::new(2);
3137        let other_vote_account = vote_simulator.vote_pubkeys[1];
3138        let bank0 = vote_simulator.bank_forks.read().unwrap().get(0).unwrap();
3139        let total_stake = bank0.total_epoch_stake();
3140        let forks = tr(0)
3141            / (tr(1)
3142                / (tr(2)
3143                    / tr(10)
3144                    / (tr(43)
3145                        / (tr(44)
3146                            // Minor fork 2
3147                            / (tr(45) / (tr(46) / (tr(47) / (tr(48) / (tr(49) / (tr(50)))))))
3148                            / (tr(110) / tr(111))))));
3149        let replayed_root_slot = 44;
3150
3151        // Fill the BankForks according to the above fork structure
3152        vote_simulator.fill_bank_forks(forks, &HashMap::new(), true);
3153        for (_, fork_progress) in vote_simulator.progress.iter_mut() {
3154            fork_progress.fork_stats.computed = true;
3155        }
3156
3157        // prepend tower restart!
3158        let mut slot_history = SlotHistory::default();
3159        vote_simulator.set_root(replayed_root_slot);
3160        let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
3161        let descendants = vote_simulator.bank_forks.read().unwrap().descendants();
3162        for slot in &[0, 1, 2, 43, replayed_root_slot] {
3163            slot_history.add(*slot);
3164        }
3165        let mut tower = tower
3166            .adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
3167            .unwrap();
3168
3169        assert_eq!(tower.voted_slots(), vec![45, 46, 47, 48, 49]);
3170
3171        // Trying to switch to another fork at 110 should fail
3172        assert_eq!(
3173            tower.check_switch_threshold(
3174                110,
3175                &ancestors,
3176                &descendants,
3177                &vote_simulator.progress,
3178                total_stake,
3179                bank0.epoch_vote_accounts(0).unwrap(),
3180                &vote_simulator.latest_validator_votes_for_frozen_banks,
3181                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
3182            ),
3183            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
3184        );
3185
3186        // Add lockout_interval which should be excluded
3187        vote_simulator.simulate_lockout_interval(111, (45, 50), &other_vote_account);
3188        assert_eq!(
3189            tower.check_switch_threshold(
3190                110,
3191                &ancestors,
3192                &descendants,
3193                &vote_simulator.progress,
3194                total_stake,
3195                bank0.epoch_vote_accounts(0).unwrap(),
3196                &vote_simulator.latest_validator_votes_for_frozen_banks,
3197                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
3198            ),
3199            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
3200        );
3201
3202        // Add lockout_interval which should not be excluded
3203        vote_simulator.simulate_lockout_interval(111, (110, 200), &other_vote_account);
3204        assert_eq!(
3205            tower.check_switch_threshold(
3206                110,
3207                &ancestors,
3208                &descendants,
3209                &vote_simulator.progress,
3210                total_stake,
3211                bank0.epoch_vote_accounts(0).unwrap(),
3212                &vote_simulator.latest_validator_votes_for_frozen_banks,
3213                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
3214            ),
3215            SwitchForkDecision::SwitchProof(Hash::default())
3216        );
3217
3218        tower.record_vote(110, Hash::default());
3219        tower.record_vote(111, Hash::default());
3220        assert_eq!(tower.voted_slots(), vec![110, 111]);
3221        assert_eq!(tower.vote_state.root_slot, Some(replayed_root_slot));
3222    }
3223
3224    #[test]
3225    fn test_load_tower_ok() {
3226        let (tower, loaded) =
3227            run_test_load_tower_snapshot(|tower, pubkey| tower.node_pubkey = *pubkey, |_| ());
3228        let loaded = loaded.unwrap();
3229        assert_eq!(loaded, tower);
3230        assert_eq!(tower.threshold_depth, 10);
3231        assert!((tower.threshold_size - 0.9_f64).abs() < f64::EPSILON);
3232        assert_eq!(loaded.threshold_depth, 10);
3233        assert!((loaded.threshold_size - 0.9_f64).abs() < f64::EPSILON);
3234    }
3235
3236    #[test]
3237    fn test_load_tower_wrong_identity() {
3238        let identity_keypair = Arc::new(Keypair::new());
3239        let tower = Tower::default();
3240        let tower_storage = FileTowerStorage::default();
3241        assert_matches!(
3242            tower.save(&tower_storage, &identity_keypair),
3243            Err(TowerError::WrongTower(_))
3244        )
3245    }
3246
3247    #[test]
3248    fn test_load_tower_invalid_signature() {
3249        let (_, loaded) = run_test_load_tower_snapshot(
3250            |tower, pubkey| tower.node_pubkey = *pubkey,
3251            |path| {
3252                let mut file = OpenOptions::new()
3253                    .read(true)
3254                    .write(true)
3255                    .open(path)
3256                    .unwrap();
3257                // 4 is the offset into SavedTowerVersions for the signature
3258                assert_eq!(file.seek(SeekFrom::Start(4)).unwrap(), 4);
3259                let mut buf = [0u8];
3260                assert_eq!(file.read(&mut buf).unwrap(), 1);
3261                buf[0] = !buf[0];
3262                assert_eq!(file.seek(SeekFrom::Start(4)).unwrap(), 4);
3263                assert_eq!(file.write(&buf).unwrap(), 1);
3264            },
3265        );
3266        assert_matches!(loaded, Err(TowerError::InvalidSignature))
3267    }
3268
3269    #[test]
3270    fn test_load_tower_deser_failure() {
3271        let (_, loaded) = run_test_load_tower_snapshot(
3272            |tower, pubkey| tower.node_pubkey = *pubkey,
3273            |path| {
3274                OpenOptions::new()
3275                    .write(true)
3276                    .truncate(true)
3277                    .open(path)
3278                    .unwrap_or_else(|_| panic!("Failed to truncate file: {path:?}"));
3279            },
3280        );
3281        assert_matches!(loaded, Err(TowerError::SerializeError(_)))
3282    }
3283
3284    #[test]
3285    fn test_load_tower_missing() {
3286        let (_, loaded) = run_test_load_tower_snapshot(
3287            |tower, pubkey| tower.node_pubkey = *pubkey,
3288            |path| {
3289                remove_file(path).unwrap();
3290            },
3291        );
3292        assert_matches!(loaded, Err(TowerError::IoError(_)))
3293    }
3294
3295    #[test]
3296    fn test_reconcile_blockstore_roots_with_tower_normal() {
3297        agave_logger::setup();
3298        let ledger_path = get_tmp_ledger_path_auto_delete!();
3299        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
3300
3301        let (shreds, _) = make_slot_entries(1, 0, 42);
3302        blockstore.insert_shreds(shreds, None, false).unwrap();
3303        let (shreds, _) = make_slot_entries(3, 1, 42);
3304        blockstore.insert_shreds(shreds, None, false).unwrap();
3305        let (shreds, _) = make_slot_entries(4, 1, 42);
3306        blockstore.insert_shreds(shreds, None, false).unwrap();
3307        assert!(!blockstore.is_root(0));
3308        assert!(!blockstore.is_root(1));
3309        assert!(!blockstore.is_root(3));
3310        assert!(!blockstore.is_root(4));
3311
3312        let mut tower = Tower::default();
3313        tower.vote_state.root_slot = Some(4);
3314        reconcile_blockstore_roots_with_external_source(
3315            ExternalRootSource::Tower(tower.root()),
3316            &blockstore,
3317            &mut blockstore.max_root(),
3318        )
3319        .unwrap();
3320
3321        assert!(!blockstore.is_root(0));
3322        assert!(blockstore.is_root(1));
3323        assert!(!blockstore.is_root(3));
3324        assert!(blockstore.is_root(4));
3325    }
3326
3327    #[test]
3328    #[should_panic(
3329        expected = "last_blockstore_root(3) is skipped while traversing blockstore (currently at \
3330                    1) from external root (Tower(4))!?"
3331    )]
3332    fn test_reconcile_blockstore_roots_with_tower_panic_no_common_root() {
3333        agave_logger::setup();
3334        let ledger_path = get_tmp_ledger_path_auto_delete!();
3335        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
3336
3337        let (shreds, _) = make_slot_entries(1, 0, 42);
3338        blockstore.insert_shreds(shreds, None, false).unwrap();
3339        let (shreds, _) = make_slot_entries(3, 1, 42);
3340        blockstore.insert_shreds(shreds, None, false).unwrap();
3341        let (shreds, _) = make_slot_entries(4, 1, 42);
3342        blockstore.insert_shreds(shreds, None, false).unwrap();
3343        blockstore.set_roots(std::iter::once(&3)).unwrap();
3344        assert!(!blockstore.is_root(0));
3345        assert!(!blockstore.is_root(1));
3346        assert!(blockstore.is_root(3));
3347        assert!(!blockstore.is_root(4));
3348
3349        let mut tower = Tower::default();
3350        tower.vote_state.root_slot = Some(4);
3351        reconcile_blockstore_roots_with_external_source(
3352            ExternalRootSource::Tower(tower.root()),
3353            &blockstore,
3354            &mut blockstore.max_root(),
3355        )
3356        .unwrap();
3357    }
3358
3359    #[test]
3360    fn test_reconcile_blockstore_roots_with_tower_nop_no_parent() {
3361        agave_logger::setup();
3362        let ledger_path = get_tmp_ledger_path_auto_delete!();
3363        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
3364
3365        let (shreds, _) = make_slot_entries(1, 0, 42);
3366        blockstore.insert_shreds(shreds, None, false).unwrap();
3367        let (shreds, _) = make_slot_entries(3, 1, 42);
3368        blockstore.insert_shreds(shreds, None, false).unwrap();
3369        assert!(!blockstore.is_root(0));
3370        assert!(!blockstore.is_root(1));
3371        assert!(!blockstore.is_root(3));
3372
3373        let mut tower = Tower::default();
3374        tower.vote_state.root_slot = Some(4);
3375        assert_eq!(blockstore.max_root(), 0);
3376        reconcile_blockstore_roots_with_external_source(
3377            ExternalRootSource::Tower(tower.root()),
3378            &blockstore,
3379            &mut blockstore.max_root(),
3380        )
3381        .unwrap();
3382        assert_eq!(blockstore.max_root(), 0);
3383    }
3384
3385    #[test]
3386    fn test_adjust_lockouts_after_replay_future_slots() {
3387        agave_logger::setup();
3388        let mut tower = Tower::new_for_tests(10, 0.9);
3389        tower.record_vote(0, Hash::default());
3390        tower.record_vote(1, Hash::default());
3391        tower.record_vote(2, Hash::default());
3392        tower.record_vote(3, Hash::default());
3393
3394        let mut slot_history = SlotHistory::default();
3395        slot_history.add(0);
3396        slot_history.add(1);
3397
3398        let replayed_root_slot = 1;
3399        tower = tower
3400            .adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
3401            .unwrap();
3402
3403        assert_eq!(tower.voted_slots(), vec![2, 3]);
3404        assert_eq!(tower.root(), replayed_root_slot);
3405
3406        tower = tower
3407            .adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
3408            .unwrap();
3409        assert_eq!(tower.voted_slots(), vec![2, 3]);
3410        assert_eq!(tower.root(), replayed_root_slot);
3411    }
3412
3413    #[test]
3414    fn test_adjust_lockouts_after_replay_not_found_slots() {
3415        let mut tower = Tower::new_for_tests(10, 0.9);
3416        tower.record_vote(0, Hash::default());
3417        tower.record_vote(1, Hash::default());
3418        tower.record_vote(2, Hash::default());
3419        tower.record_vote(3, Hash::default());
3420
3421        let mut slot_history = SlotHistory::default();
3422        slot_history.add(0);
3423        slot_history.add(1);
3424        slot_history.add(4);
3425
3426        let replayed_root_slot = 4;
3427        tower = tower
3428            .adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
3429            .unwrap();
3430
3431        assert_eq!(tower.voted_slots(), vec![2, 3]);
3432        assert_eq!(tower.root(), replayed_root_slot);
3433    }
3434
3435    #[test]
3436    fn test_adjust_lockouts_after_replay_all_rooted_with_no_too_old() {
3437        let mut tower = Tower::new_for_tests(10, 0.9);
3438        tower.record_vote(0, Hash::default());
3439        tower.record_vote(1, Hash::default());
3440        tower.record_vote(2, Hash::default());
3441
3442        let mut slot_history = SlotHistory::default();
3443        slot_history.add(0);
3444        slot_history.add(1);
3445        slot_history.add(2);
3446        slot_history.add(3);
3447        slot_history.add(4);
3448        slot_history.add(5);
3449
3450        let replayed_root_slot = 5;
3451        tower = tower
3452            .adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
3453            .unwrap();
3454
3455        assert_eq!(tower.voted_slots(), vec![] as Vec<Slot>);
3456        assert_eq!(tower.root(), replayed_root_slot);
3457        assert_eq!(tower.stray_restored_slot, None);
3458    }
3459
3460    #[test]
3461    fn test_adjust_lockouts_after_replay_all_rooted_with_too_old() {
3462        use solana_slot_history::MAX_ENTRIES;
3463
3464        let mut tower = Tower::new_for_tests(10, 0.9);
3465        tower.record_vote(0, Hash::default());
3466        tower.record_vote(1, Hash::default());
3467        tower.record_vote(2, Hash::default());
3468
3469        let mut slot_history = SlotHistory::default();
3470        slot_history.add(0);
3471        slot_history.add(1);
3472        slot_history.add(2);
3473        slot_history.add(MAX_ENTRIES);
3474
3475        tower = tower
3476            .adjust_lockouts_after_replay(MAX_ENTRIES, &slot_history)
3477            .unwrap();
3478        assert_eq!(tower.voted_slots(), vec![] as Vec<Slot>);
3479        assert_eq!(tower.root(), MAX_ENTRIES);
3480    }
3481
3482    #[test]
3483    fn test_adjust_lockouts_after_replay_anchored_future_slots() {
3484        let mut tower = Tower::new_for_tests(10, 0.9);
3485        tower.record_vote(0, Hash::default());
3486        tower.record_vote(1, Hash::default());
3487        tower.record_vote(2, Hash::default());
3488        tower.record_vote(3, Hash::default());
3489        tower.record_vote(4, Hash::default());
3490
3491        let mut slot_history = SlotHistory::default();
3492        slot_history.add(0);
3493        slot_history.add(1);
3494        slot_history.add(2);
3495
3496        let replayed_root_slot = 2;
3497        tower = tower
3498            .adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
3499            .unwrap();
3500
3501        assert_eq!(tower.voted_slots(), vec![3, 4]);
3502        assert_eq!(tower.root(), replayed_root_slot);
3503    }
3504
3505    #[test]
3506    fn test_adjust_lockouts_after_replay_all_not_found() {
3507        let mut tower = Tower::new_for_tests(10, 0.9);
3508        tower.record_vote(5, Hash::default());
3509        tower.record_vote(6, Hash::default());
3510
3511        let mut slot_history = SlotHistory::default();
3512        slot_history.add(0);
3513        slot_history.add(1);
3514        slot_history.add(2);
3515        slot_history.add(7);
3516
3517        let replayed_root_slot = 7;
3518        tower = tower
3519            .adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
3520            .unwrap();
3521
3522        assert_eq!(tower.voted_slots(), vec![5, 6]);
3523        assert_eq!(tower.root(), replayed_root_slot);
3524    }
3525
3526    #[test]
3527    fn test_adjust_lockouts_after_replay_all_not_found_even_if_rooted() {
3528        let mut tower = Tower::new_for_tests(10, 0.9);
3529        tower.vote_state.root_slot = Some(4);
3530        tower.record_vote(5, Hash::default());
3531        tower.record_vote(6, Hash::default());
3532
3533        let mut slot_history = SlotHistory::default();
3534        slot_history.add(0);
3535        slot_history.add(1);
3536        slot_history.add(2);
3537        slot_history.add(7);
3538
3539        let replayed_root_slot = 7;
3540        let result = tower.adjust_lockouts_after_replay(replayed_root_slot, &slot_history);
3541
3542        assert_eq!(
3543            format!("{}", result.unwrap_err()),
3544            "The tower is fatally inconsistent with blockstore: no common slot for rooted tower"
3545        );
3546    }
3547
3548    #[test]
3549    fn test_adjust_lockouts_after_replay_all_future_votes_only_root_found() {
3550        let mut tower = Tower::new_for_tests(10, 0.9);
3551        tower.vote_state.root_slot = Some(2);
3552        tower.record_vote(3, Hash::default());
3553        tower.record_vote(4, Hash::default());
3554        tower.record_vote(5, Hash::default());
3555
3556        let mut slot_history = SlotHistory::default();
3557        slot_history.add(0);
3558        slot_history.add(1);
3559        slot_history.add(2);
3560
3561        let replayed_root_slot = 2;
3562        tower = tower
3563            .adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
3564            .unwrap();
3565
3566        assert_eq!(tower.voted_slots(), vec![3, 4, 5]);
3567        assert_eq!(tower.root(), replayed_root_slot);
3568    }
3569
3570    #[test]
3571    fn test_adjust_lockouts_after_replay_empty() {
3572        let mut tower = Tower::new_for_tests(10, 0.9);
3573
3574        let mut slot_history = SlotHistory::default();
3575        slot_history.add(0);
3576
3577        let replayed_root_slot = 0;
3578        tower = tower
3579            .adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
3580            .unwrap();
3581
3582        assert_eq!(tower.voted_slots(), vec![] as Vec<Slot>);
3583        assert_eq!(tower.root(), replayed_root_slot);
3584    }
3585
3586    #[test]
3587    fn test_adjust_lockouts_after_replay_too_old_tower() {
3588        use solana_slot_history::MAX_ENTRIES;
3589
3590        let mut tower = Tower::new_for_tests(10, 0.9);
3591        tower.record_vote(0, Hash::default());
3592
3593        let mut slot_history = SlotHistory::default();
3594        slot_history.add(0);
3595        slot_history.add(MAX_ENTRIES);
3596
3597        let result = tower.adjust_lockouts_after_replay(MAX_ENTRIES, &slot_history);
3598        assert_eq!(
3599            format!("{}", result.unwrap_err()),
3600            "The tower is too old: newest slot in tower (0) << oldest slot in available history \
3601             (1)"
3602        );
3603    }
3604
3605    #[test]
3606    fn test_adjust_lockouts_after_replay_time_warped() {
3607        let mut tower = Tower::new_for_tests(10, 0.9);
3608        tower.vote_state.votes.push_back(Lockout::new(1));
3609        tower.vote_state.votes.push_back(Lockout::new(0));
3610        let vote = Vote::new(vec![0], Hash::default());
3611        tower.last_vote = VoteTransaction::from(vote);
3612
3613        let mut slot_history = SlotHistory::default();
3614        slot_history.add(0);
3615
3616        let result = tower.adjust_lockouts_after_replay(0, &slot_history);
3617        assert_eq!(
3618            format!("{}", result.unwrap_err()),
3619            "The tower is fatally inconsistent with blockstore: time warped?"
3620        );
3621    }
3622
3623    #[test]
3624    fn test_adjust_lockouts_after_replay_diverged_ancestor() {
3625        let mut tower = Tower::new_for_tests(10, 0.9);
3626        tower.vote_state.votes.push_back(Lockout::new(1));
3627        tower.vote_state.votes.push_back(Lockout::new(2));
3628        let vote = Vote::new(vec![2], Hash::default());
3629        tower.last_vote = VoteTransaction::from(vote);
3630
3631        let mut slot_history = SlotHistory::default();
3632        slot_history.add(0);
3633        slot_history.add(2);
3634
3635        let result = tower.adjust_lockouts_after_replay(2, &slot_history);
3636        assert_eq!(
3637            format!("{}", result.unwrap_err()),
3638            "The tower is fatally inconsistent with blockstore: diverged ancestor?"
3639        );
3640    }
3641
3642    #[test]
3643    fn test_adjust_lockouts_after_replay_out_of_order() {
3644        use solana_slot_history::MAX_ENTRIES;
3645
3646        let mut tower = Tower::new_for_tests(10, 0.9);
3647        tower
3648            .vote_state
3649            .votes
3650            .push_back(Lockout::new(MAX_ENTRIES - 1));
3651        tower.vote_state.votes.push_back(Lockout::new(0));
3652        tower.vote_state.votes.push_back(Lockout::new(1));
3653        let vote = Vote::new(vec![1], Hash::default());
3654        tower.last_vote = VoteTransaction::from(vote);
3655
3656        let mut slot_history = SlotHistory::default();
3657        slot_history.add(MAX_ENTRIES);
3658
3659        let result = tower.adjust_lockouts_after_replay(MAX_ENTRIES, &slot_history);
3660        assert_eq!(
3661            format!("{}", result.unwrap_err()),
3662            "The tower is fatally inconsistent with blockstore: not too old once after got too \
3663             old?"
3664        );
3665    }
3666
3667    #[test]
3668    #[should_panic(expected = "slot_in_tower(2) < checked_slot(1)")]
3669    fn test_adjust_lockouts_after_replay_reversed_votes() {
3670        let mut tower = Tower::new_for_tests(10, 0.9);
3671        tower.vote_state.votes.push_back(Lockout::new(2));
3672        tower.vote_state.votes.push_back(Lockout::new(1));
3673        let vote = Vote::new(vec![1], Hash::default());
3674        tower.last_vote = VoteTransaction::from(vote);
3675
3676        let mut slot_history = SlotHistory::default();
3677        slot_history.add(0);
3678        slot_history.add(2);
3679
3680        tower
3681            .adjust_lockouts_after_replay(2, &slot_history)
3682            .unwrap();
3683    }
3684
3685    #[test]
3686    #[should_panic(expected = "slot_in_tower(3) < checked_slot(3)")]
3687    fn test_adjust_lockouts_after_replay_repeated_non_root_votes() {
3688        let mut tower = Tower::new_for_tests(10, 0.9);
3689        tower.vote_state.votes.push_back(Lockout::new(2));
3690        tower.vote_state.votes.push_back(Lockout::new(3));
3691        tower.vote_state.votes.push_back(Lockout::new(3));
3692        let vote = Vote::new(vec![3], Hash::default());
3693        tower.last_vote = VoteTransaction::from(vote);
3694
3695        let mut slot_history = SlotHistory::default();
3696        slot_history.add(0);
3697        slot_history.add(2);
3698
3699        tower
3700            .adjust_lockouts_after_replay(2, &slot_history)
3701            .unwrap();
3702    }
3703
3704    #[test]
3705    fn test_adjust_lockouts_after_replay_vote_on_root() {
3706        let mut tower = Tower::new_for_tests(10, 0.9);
3707        tower.vote_state.root_slot = Some(42);
3708        tower.vote_state.votes.push_back(Lockout::new(42));
3709        tower.vote_state.votes.push_back(Lockout::new(43));
3710        tower.vote_state.votes.push_back(Lockout::new(44));
3711        let vote = Vote::new(vec![44], Hash::default());
3712        tower.last_vote = VoteTransaction::from(vote);
3713
3714        let mut slot_history = SlotHistory::default();
3715        slot_history.add(42);
3716
3717        let tower = tower.adjust_lockouts_after_replay(42, &slot_history);
3718        assert_eq!(tower.unwrap().voted_slots(), [43, 44]);
3719    }
3720
3721    #[test]
3722    fn test_adjust_lockouts_after_replay_vote_on_genesis() {
3723        let mut tower = Tower::new_for_tests(10, 0.9);
3724        tower.vote_state.votes.push_back(Lockout::new(0));
3725        let vote = Vote::new(vec![0], Hash::default());
3726        tower.last_vote = VoteTransaction::from(vote);
3727
3728        let mut slot_history = SlotHistory::default();
3729        slot_history.add(0);
3730
3731        assert!(tower.adjust_lockouts_after_replay(0, &slot_history).is_ok());
3732    }
3733
3734    #[test]
3735    fn test_adjust_lockouts_after_replay_future_tower() {
3736        let mut tower = Tower::new_for_tests(10, 0.9);
3737        tower.vote_state.votes.push_back(Lockout::new(13));
3738        tower.vote_state.votes.push_back(Lockout::new(14));
3739        let vote = Vote::new(vec![14], Hash::default());
3740        tower.last_vote = VoteTransaction::from(vote);
3741        tower.initialize_root(12);
3742
3743        let mut slot_history = SlotHistory::default();
3744        slot_history.add(0);
3745        slot_history.add(2);
3746
3747        let tower = tower
3748            .adjust_lockouts_after_replay(2, &slot_history)
3749            .unwrap();
3750        assert_eq!(tower.root(), 12);
3751        assert_eq!(tower.voted_slots(), vec![13, 14]);
3752        assert_eq!(tower.stray_restored_slot, Some(14));
3753    }
3754
3755    #[test]
3756    fn test_default_tower_has_no_stray_last_vote() {
3757        let tower = Tower::default();
3758        assert!(!tower.is_stray_last_vote());
3759    }
3760
3761    #[test]
3762    fn test_switch_threshold_common_ancestor() {
3763        let mut vote_simulator = VoteSimulator::new(2);
3764        let other_vote_account = vote_simulator.vote_pubkeys[1];
3765        let bank0 = vote_simulator.bank_forks.read().unwrap().get(0).unwrap();
3766        let total_stake = bank0.total_epoch_stake();
3767        assert_eq!(
3768            total_stake,
3769            vote_simulator.validator_keypairs.len() as u64 * 10_000
3770        );
3771
3772        // Create the tree of banks
3773        //                                       /- 50
3774        //          /- 51    /- 45 - 46 - 47 - 48 - 49
3775        // 0 - 1 - 2 - 43 - 44
3776        //                   \- 110 - 111 - 112
3777        //                    \- 113
3778        let forks = tr(0)
3779            / (tr(1)
3780                / (tr(2)
3781                    / tr(51)
3782                    / (tr(43)
3783                        / (tr(44)
3784                            / (tr(45) / (tr(46) / (tr(47) / (tr(48) / tr(49) / tr(50)))))
3785                            / tr(113)
3786                            / (tr(110) / tr(111) / tr(112))))));
3787        let switch_slot = 111;
3788
3789        // Fill the BankForks according to the above fork structure
3790        vote_simulator.fill_bank_forks(forks, &HashMap::new(), true);
3791        for (_, fork_progress) in vote_simulator.progress.iter_mut() {
3792            fork_progress.fork_stats.computed = true;
3793        }
3794
3795        let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
3796        let descendants = vote_simulator.bank_forks.read().unwrap().descendants();
3797        let mut tower = Tower::default();
3798
3799        tower.record_vote(43, Hash::default());
3800        tower.record_vote(44, Hash::default());
3801        tower.record_vote(45, Hash::default());
3802        tower.record_vote(46, Hash::default());
3803        tower.record_vote(47, Hash::default());
3804        tower.record_vote(48, Hash::default());
3805        tower.record_vote(49, Hash::default());
3806
3807        // Candidate slot 50 should *not* work
3808        vote_simulator.simulate_lockout_interval(50, (10, 49), &other_vote_account);
3809        assert_eq!(
3810            tower.check_switch_threshold(
3811                switch_slot,
3812                &ancestors,
3813                &descendants,
3814                &vote_simulator.progress,
3815                total_stake,
3816                bank0.epoch_vote_accounts(0).unwrap(),
3817                &vote_simulator.latest_validator_votes_for_frozen_banks,
3818                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
3819            ),
3820            SwitchForkDecision::FailedSwitchThreshold(0, 20_000)
3821        );
3822        vote_simulator.clear_lockout_intervals(50);
3823
3824        // 51, 111, 112, and 113 are all valid
3825        for candidate_slot in [51, 111, 113] {
3826            vote_simulator.simulate_lockout_interval(candidate_slot, (10, 49), &other_vote_account);
3827            assert_eq!(
3828                tower.check_switch_threshold(
3829                    switch_slot,
3830                    &ancestors,
3831                    &descendants,
3832                    &vote_simulator.progress,
3833                    total_stake,
3834                    bank0.epoch_vote_accounts(0).unwrap(),
3835                    &vote_simulator.latest_validator_votes_for_frozen_banks,
3836                    &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
3837                ),
3838                SwitchForkDecision::SwitchProof(Hash::default())
3839            );
3840            vote_simulator.clear_lockout_intervals(candidate_slot);
3841        }
3842
3843        // Same checks for gossip votes
3844        let insert_gossip_vote = |vote_simulator: &mut VoteSimulator, slot| {
3845            vote_simulator
3846                .latest_validator_votes_for_frozen_banks
3847                .check_add_vote(
3848                    other_vote_account,
3849                    slot,
3850                    Some(
3851                        vote_simulator
3852                            .bank_forks
3853                            .read()
3854                            .unwrap()
3855                            .get(slot)
3856                            .unwrap()
3857                            .hash(),
3858                    ),
3859                    false,
3860                );
3861        };
3862
3863        // Candidate slot 50 should *not* work
3864        insert_gossip_vote(&mut vote_simulator, 50);
3865        assert_eq!(
3866            tower.check_switch_threshold(
3867                switch_slot,
3868                &ancestors,
3869                &descendants,
3870                &vote_simulator.progress,
3871                total_stake,
3872                bank0.epoch_vote_accounts(0).unwrap(),
3873                &vote_simulator.latest_validator_votes_for_frozen_banks,
3874                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
3875            ),
3876            SwitchForkDecision::FailedSwitchThreshold(0, 20_000)
3877        );
3878        vote_simulator.latest_validator_votes_for_frozen_banks =
3879            LatestValidatorVotesForFrozenBanks::default();
3880
3881        // 51, 110, 111, 112, and 113 are all valid
3882        // Note: We can use 110 here since gossip votes aren't limited to leaf banks
3883        for candidate_slot in [51, 110, 111, 112, 113] {
3884            insert_gossip_vote(&mut vote_simulator, candidate_slot);
3885            assert_eq!(
3886                tower.check_switch_threshold(
3887                    switch_slot,
3888                    &ancestors,
3889                    &descendants,
3890                    &vote_simulator.progress,
3891                    total_stake,
3892                    bank0.epoch_vote_accounts(0).unwrap(),
3893                    &vote_simulator.latest_validator_votes_for_frozen_banks,
3894                    &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
3895                ),
3896                SwitchForkDecision::SwitchProof(Hash::default())
3897            );
3898            vote_simulator.latest_validator_votes_for_frozen_banks =
3899                LatestValidatorVotesForFrozenBanks::default();
3900        }
3901    }
3902}