solana_core/
consensus.rs

1pub mod fork_choice;
2pub mod heaviest_subtree_fork_choice;
3pub(crate) mod latest_validator_votes_for_frozen_banks;
4pub mod progress_map;
5mod tower1_14_11;
6mod tower1_7_14;
7pub mod tower_storage;
8pub(crate) mod tower_vote_state;
9pub mod tree_diff;
10pub mod vote_stake_tracker;
11
12use {
13    self::{
14        heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice,
15        latest_validator_votes_for_frozen_banks::LatestValidatorVotesForFrozenBanks,
16        progress_map::{LockoutIntervals, ProgressMap},
17        tower1_14_11::Tower1_14_11,
18        tower1_7_14::Tower1_7_14,
19        tower_storage::{SavedTower, SavedTowerVersions, TowerStorage},
20        tower_vote_state::TowerVoteState,
21    },
22    crate::replay_stage::DUPLICATE_THRESHOLD,
23    chrono::prelude::*,
24    solana_clock::{Slot, UnixTimestamp},
25    solana_hash::Hash,
26    solana_instruction::Instruction,
27    solana_keypair::Keypair,
28    solana_ledger::{
29        ancestor_iterator::AncestorIterator,
30        blockstore::{self, Blockstore},
31    },
32    solana_pubkey::Pubkey,
33    solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::VOTE_THRESHOLD_SIZE},
34    solana_slot_history::{Check, SlotHistory},
35    solana_vote::{vote_account::VoteAccountsHashMap, vote_transaction::VoteTransaction},
36    solana_vote_program::{
37        vote_error::VoteError,
38        vote_instruction,
39        vote_state::{
40            BlockTimestamp, Lockout, TowerSync, Vote, VoteState1_14_11, VoteStateUpdate,
41            MAX_LOCKOUT_HISTORY,
42        },
43    },
44    std::{
45        cmp::Ordering,
46        collections::{HashMap, HashSet},
47        ops::{
48            Bound::{Included, Unbounded},
49            Deref,
50        },
51    },
52    thiserror::Error,
53};
54
55#[derive(PartialEq, Eq, Clone, Copy, Debug, Default)]
56pub enum ThresholdDecision {
57    #[default]
58    PassedThreshold,
59    FailedThreshold(/* vote depth */ u64, /* Observed stake */ u64),
60}
61
62impl ThresholdDecision {
63    pub fn passed(&self) -> bool {
64        matches!(self, Self::PassedThreshold)
65    }
66}
67
68#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
69#[derive(PartialEq, Eq, Clone, Debug)]
70pub enum SwitchForkDecision {
71    SwitchProof(Hash),
72    SameFork,
73    FailedSwitchThreshold(
74        /* Switch proof stake */ u64,
75        /* Total stake */ u64,
76    ),
77    FailedSwitchDuplicateRollback(Slot),
78}
79
80impl SwitchForkDecision {
81    pub fn to_vote_instruction(
82        &self,
83        vote: VoteTransaction,
84        vote_account_pubkey: &Pubkey,
85        authorized_voter_pubkey: &Pubkey,
86    ) -> Option<Instruction> {
87        match (self, vote) {
88            (SwitchForkDecision::FailedSwitchThreshold(_, total_stake), _) => {
89                assert_ne!(*total_stake, 0);
90                None
91            }
92            (SwitchForkDecision::FailedSwitchDuplicateRollback(_), _) => None,
93            (SwitchForkDecision::SameFork, VoteTransaction::Vote(v)) => Some(
94                vote_instruction::vote(vote_account_pubkey, authorized_voter_pubkey, v),
95            ),
96            (SwitchForkDecision::SameFork, VoteTransaction::VoteStateUpdate(v)) => {
97                Some(vote_instruction::update_vote_state(
98                    vote_account_pubkey,
99                    authorized_voter_pubkey,
100                    v,
101                ))
102            }
103            (SwitchForkDecision::SameFork, VoteTransaction::TowerSync(t)) => Some(
104                vote_instruction::tower_sync(vote_account_pubkey, authorized_voter_pubkey, t),
105            ),
106            (SwitchForkDecision::SwitchProof(switch_proof_hash), VoteTransaction::Vote(v)) => {
107                Some(vote_instruction::vote_switch(
108                    vote_account_pubkey,
109                    authorized_voter_pubkey,
110                    v,
111                    *switch_proof_hash,
112                ))
113            }
114            (
115                SwitchForkDecision::SwitchProof(switch_proof_hash),
116                VoteTransaction::VoteStateUpdate(v),
117            ) => Some(vote_instruction::update_vote_state_switch(
118                vote_account_pubkey,
119                authorized_voter_pubkey,
120                v,
121                *switch_proof_hash,
122            )),
123            (SwitchForkDecision::SwitchProof(switch_proof_hash), VoteTransaction::TowerSync(t)) => {
124                Some(vote_instruction::tower_sync_switch(
125                    vote_account_pubkey,
126                    authorized_voter_pubkey,
127                    t,
128                    *switch_proof_hash,
129                ))
130            }
131            (SwitchForkDecision::SameFork, VoteTransaction::CompactVoteStateUpdate(v)) => {
132                Some(vote_instruction::compact_update_vote_state(
133                    vote_account_pubkey,
134                    authorized_voter_pubkey,
135                    v,
136                ))
137            }
138            (
139                SwitchForkDecision::SwitchProof(switch_proof_hash),
140                VoteTransaction::CompactVoteStateUpdate(v),
141            ) => Some(vote_instruction::compact_update_vote_state_switch(
142                vote_account_pubkey,
143                authorized_voter_pubkey,
144                v,
145                *switch_proof_hash,
146            )),
147        }
148    }
149
150    pub fn can_vote(&self) -> bool {
151        match self {
152            SwitchForkDecision::FailedSwitchThreshold(_, _) => false,
153            SwitchForkDecision::FailedSwitchDuplicateRollback(_) => false,
154            SwitchForkDecision::SameFork => true,
155            SwitchForkDecision::SwitchProof(_) => true,
156        }
157    }
158}
159
160const VOTE_THRESHOLD_DEPTH_SHALLOW: usize = 4;
161pub const VOTE_THRESHOLD_DEPTH: usize = 8;
162pub const SWITCH_FORK_THRESHOLD: f64 = 0.38;
163
164pub type Result<T> = std::result::Result<T, TowerError>;
165
166pub type Stake = u64;
167pub type VotedStakes = HashMap<Slot, Stake>;
168pub type PubkeyVotes = Vec<(Pubkey, Slot)>;
169
170pub(crate) struct ComputedBankState {
171    pub voted_stakes: VotedStakes,
172    pub total_stake: Stake,
173    pub fork_stake: Stake,
174    // Tree of intervals of lockouts of the form [slot, slot + slot.lockout],
175    // keyed by end of the range
176    pub lockout_intervals: LockoutIntervals,
177    pub my_latest_landed_vote: Option<Slot>,
178}
179
180#[derive(Debug, PartialEq, Clone)]
181#[allow(clippy::large_enum_variant)]
182pub enum TowerVersions {
183    V1_7_14(Tower1_7_14),
184    V1_14_11(Tower1_14_11),
185    Current(Tower),
186}
187
188impl TowerVersions {
189    pub fn new_current(tower: Tower) -> Self {
190        Self::Current(tower)
191    }
192
193    pub fn convert_to_current(self) -> Tower {
194        match self {
195            TowerVersions::V1_7_14(tower) => tower.into(),
196            TowerVersions::V1_14_11(tower) => tower.into(),
197            TowerVersions::Current(tower) => tower,
198        }
199    }
200}
201
202#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
203#[derive(PartialEq, Eq, Debug, Default, Clone, Copy)]
204pub(crate) enum BlockhashStatus {
205    /// No vote since restart
206    #[default]
207    Uninitialized,
208    /// Non voting validator
209    NonVoting,
210    /// Hot spare validator
211    HotSpare,
212    /// Successfully generated vote tx with blockhash
213    Blockhash(Hash),
214}
215
216#[derive(Clone, Debug, PartialEq)]
217pub struct Tower {
218    pub node_pubkey: Pubkey,
219    pub(crate) threshold_depth: usize,
220    threshold_size: f64,
221    pub(crate) vote_state: TowerVoteState,
222    last_vote: VoteTransaction,
223    // The blockhash used in the last vote transaction, may or may not equal the
224    // blockhash of the voted block itself, depending if the vote slot was refreshed.
225    // For instance, a vote for slot 5, may be refreshed/resubmitted for inclusion in
226    //  block 10, in  which case `last_vote_tx_blockhash` equals the blockhash of 10, not 5.
227    // For non voting validators this is NonVoting
228    last_vote_tx_blockhash: BlockhashStatus,
229    last_timestamp: BlockTimestamp,
230    // Restored last voted slot which cannot be found in SlotHistory at replayed root
231    // (This is a special field for slashing-free validator restart with edge cases).
232    // This could be emptied after some time; but left intact indefinitely for easier
233    // implementation
234    // Further, stray slot can be stale or not. `Stale` here means whether given
235    // bank_forks (=~ ledger) lacks the slot or not.
236    stray_restored_slot: Option<Slot>,
237    pub last_switch_threshold_check: Option<(Slot, SwitchForkDecision)>,
238}
239
240impl Default for Tower {
241    fn default() -> Self {
242        let mut tower = Self {
243            node_pubkey: Pubkey::default(),
244            threshold_depth: VOTE_THRESHOLD_DEPTH,
245            threshold_size: VOTE_THRESHOLD_SIZE,
246            vote_state: TowerVoteState::default(),
247            last_vote: VoteTransaction::from(TowerSync::default()),
248            last_timestamp: BlockTimestamp::default(),
249            last_vote_tx_blockhash: BlockhashStatus::default(),
250            stray_restored_slot: Option::default(),
251            last_switch_threshold_check: Option::default(),
252        };
253        // VoteState::root_slot is ensured to be Some in Tower
254        tower.vote_state.root_slot = Some(Slot::default());
255        tower
256    }
257}
258
259// Tower1_14_11 is the persisted data format for the Tower,
260// decoupling it from VoteState::Current.
261impl From<Tower> for Tower1_14_11 {
262    fn from(tower: Tower) -> Self {
263        Self {
264            node_pubkey: tower.node_pubkey,
265            threshold_depth: tower.threshold_depth,
266            threshold_size: tower.threshold_size,
267            vote_state: VoteState1_14_11::from(tower.vote_state),
268            last_vote: tower.last_vote,
269            last_vote_tx_blockhash: tower.last_vote_tx_blockhash,
270            last_timestamp: tower.last_timestamp,
271            stray_restored_slot: tower.stray_restored_slot,
272            last_switch_threshold_check: tower.last_switch_threshold_check,
273        }
274    }
275}
276
277// Tower1_14_11 is the persisted data format for the Tower,
278// decoupling it from VoteState::Current.
279impl From<Tower1_14_11> for Tower {
280    fn from(tower: Tower1_14_11) -> Self {
281        Self {
282            node_pubkey: tower.node_pubkey,
283            threshold_depth: tower.threshold_depth,
284            threshold_size: tower.threshold_size,
285            vote_state: TowerVoteState::from(tower.vote_state),
286            last_vote: tower.last_vote,
287            last_vote_tx_blockhash: tower.last_vote_tx_blockhash,
288            last_timestamp: tower.last_timestamp,
289            stray_restored_slot: tower.stray_restored_slot,
290            last_switch_threshold_check: tower.last_switch_threshold_check,
291        }
292    }
293}
294
295impl From<Tower1_7_14> for Tower {
296    fn from(tower: Tower1_7_14) -> Self {
297        let box_last_vote = VoteTransaction::from(tower.last_vote.clone());
298
299        Self {
300            node_pubkey: tower.node_pubkey,
301            threshold_depth: tower.threshold_depth,
302            threshold_size: tower.threshold_size,
303            vote_state: TowerVoteState::from(tower.vote_state),
304            last_vote: box_last_vote,
305            last_vote_tx_blockhash: tower.last_vote_tx_blockhash,
306            last_timestamp: tower.last_timestamp,
307            stray_restored_slot: tower.stray_restored_slot,
308            last_switch_threshold_check: tower.last_switch_threshold_check,
309        }
310    }
311}
312
313impl Tower {
314    pub fn new(
315        node_pubkey: &Pubkey,
316        vote_account_pubkey: &Pubkey,
317        root: Slot,
318        bank: &Bank,
319    ) -> Self {
320        let mut tower = Tower {
321            node_pubkey: *node_pubkey,
322            ..Tower::default()
323        };
324        tower.initialize_lockouts_from_bank(vote_account_pubkey, root, bank);
325        tower
326    }
327
328    #[cfg(test)]
329    pub fn new_for_tests(threshold_depth: usize, threshold_size: f64) -> Self {
330        Self {
331            threshold_depth,
332            threshold_size,
333            ..Tower::default()
334        }
335    }
336
337    #[cfg(test)]
338    pub fn new_random(node_pubkey: Pubkey) -> Self {
339        use {rand::Rng, solana_vote_program::vote_state::VoteStateV3};
340
341        let mut rng = rand::thread_rng();
342        let root_slot = rng.gen();
343        let vote_state = VoteStateV3::new_rand_for_tests(node_pubkey, root_slot);
344        let last_vote = TowerSync::from(
345            vote_state
346                .votes
347                .iter()
348                .map(|lv| (lv.slot(), lv.confirmation_count()))
349                .collect::<Vec<_>>(),
350        );
351        Self {
352            node_pubkey,
353            vote_state: TowerVoteState::from(vote_state),
354            last_vote: VoteTransaction::from(last_vote),
355            ..Tower::default()
356        }
357    }
358
359    pub fn new_from_bankforks(
360        bank_forks: &BankForks,
361        node_pubkey: &Pubkey,
362        vote_account: &Pubkey,
363    ) -> Self {
364        let root_bank = bank_forks.root_bank();
365        let frozen_banks: Vec<_> = bank_forks
366            .frozen_banks()
367            .map(|(_slot, bank)| bank)
368            .collect();
369        let (_progress, heaviest_subtree_fork_choice) =
370            crate::replay_stage::ReplayStage::initialize_progress_and_fork_choice(
371                root_bank.deref(),
372                frozen_banks,
373                node_pubkey,
374                vote_account,
375                vec![],
376            );
377        let root = root_bank.slot();
378
379        let (best_slot, best_hash) = heaviest_subtree_fork_choice.best_overall_slot();
380        let heaviest_bank = bank_forks
381            .get_with_checked_hash((best_slot, best_hash))
382            .expect(
383                "The best overall slot must be one of `frozen_banks` which all exist in bank_forks",
384            );
385
386        Self::new(node_pubkey, vote_account, root, &heaviest_bank)
387    }
388
389    pub(crate) fn collect_vote_lockouts(
390        vote_account_pubkey: &Pubkey,
391        bank_slot: Slot,
392        vote_accounts: &VoteAccountsHashMap,
393        ancestors: &HashMap<Slot, HashSet<Slot>>,
394        get_frozen_hash: impl Fn(Slot) -> Option<Hash>,
395        latest_validator_votes_for_frozen_banks: &mut LatestValidatorVotesForFrozenBanks,
396    ) -> ComputedBankState {
397        let mut vote_slots = HashSet::new();
398        let mut voted_stakes = HashMap::new();
399        let mut total_stake = 0;
400
401        // Tree of intervals of lockouts of the form [slot, slot + slot.lockout],
402        // keyed by end of the range
403        let mut lockout_intervals = LockoutIntervals::new();
404        let mut my_latest_landed_vote = None;
405        for (&key, (voted_stake, account)) in vote_accounts.iter() {
406            let voted_stake = *voted_stake;
407            if voted_stake == 0 {
408                continue;
409            }
410            trace!("{vote_account_pubkey} {key} with stake {voted_stake}");
411            let mut vote_state = TowerVoteState::from(account.vote_state_view());
412            for vote in &vote_state.votes {
413                lockout_intervals
414                    .entry(vote.last_locked_out_slot())
415                    .or_default()
416                    .push((vote.slot(), key));
417            }
418
419            if key == *vote_account_pubkey {
420                my_latest_landed_vote = vote_state.nth_recent_lockout(0).map(|l| l.slot());
421                debug!("vote state {vote_state:?}");
422                debug!(
423                    "observed slot {}",
424                    vote_state
425                        .nth_recent_lockout(0)
426                        .map(|l| l.slot())
427                        .unwrap_or(0) as i64
428                );
429                debug!("observed root {}", vote_state.root_slot.unwrap_or(0) as i64);
430                datapoint_info!(
431                    "tower-observed",
432                    (
433                        "slot",
434                        vote_state
435                            .nth_recent_lockout(0)
436                            .map(|l| l.slot())
437                            .unwrap_or(0),
438                        i64
439                    ),
440                    ("root", vote_state.root_slot.unwrap_or(0), i64)
441                );
442            }
443            let start_root = vote_state.root_slot;
444
445            // Add the last vote to update the `heaviest_subtree_fork_choice`
446            if let Some(last_landed_voted_slot) = vote_state.last_voted_slot() {
447                latest_validator_votes_for_frozen_banks.check_add_vote(
448                    key,
449                    last_landed_voted_slot,
450                    get_frozen_hash(last_landed_voted_slot),
451                    true,
452                );
453            }
454
455            vote_state.process_next_vote_slot(bank_slot);
456
457            for vote in &vote_state.votes {
458                vote_slots.insert(vote.slot());
459            }
460
461            if start_root != vote_state.root_slot {
462                if let Some(root) = start_root {
463                    let vote =
464                        Lockout::new_with_confirmation_count(root, MAX_LOCKOUT_HISTORY as u32);
465                    trace!("ROOT: {}", vote.slot());
466                    vote_slots.insert(vote.slot());
467                }
468            }
469            if let Some(root) = vote_state.root_slot {
470                let vote = Lockout::new_with_confirmation_count(root, MAX_LOCKOUT_HISTORY as u32);
471                vote_slots.insert(vote.slot());
472            }
473
474            // The last vote in the vote stack is a simulated vote on bank_slot, which
475            // we added to the vote stack earlier in this function by calling process_vote().
476            // We don't want to update the ancestors stakes of this vote b/c it does not
477            // represent an actual vote by the validator.
478
479            // Note: It should not be possible for any vote state in this bank to have
480            // a vote for a slot >= bank_slot, so we are guaranteed that the last vote in
481            // this vote stack is the simulated vote, so this fetch should be sufficient
482            // to find the last unsimulated vote.
483            assert_eq!(
484                vote_state.nth_recent_lockout(0).map(|l| l.slot()),
485                Some(bank_slot)
486            );
487            if let Some(vote) = vote_state.nth_recent_lockout(1) {
488                // Update all the parents of this last vote with the stake of this vote account
489                Self::update_ancestor_voted_stakes(
490                    &mut voted_stakes,
491                    vote.slot(),
492                    voted_stake,
493                    ancestors,
494                );
495            }
496            total_stake += voted_stake;
497        }
498
499        // TODO: populate_ancestor_voted_stakes only adds zeros. Comment why
500        // that is necessary (if so).
501        Self::populate_ancestor_voted_stakes(&mut voted_stakes, vote_slots, ancestors);
502
503        // As commented above, since the votes at current bank_slot are
504        // simulated votes, the voted_stake for `bank_slot` is not populated.
505        // Therefore, we use the voted_stake for the parent of bank_slot as the
506        // `fork_stake` instead.
507        let fork_stake = ancestors
508            .get(&bank_slot)
509            .and_then(|ancestors| {
510                ancestors
511                    .iter()
512                    .max()
513                    .and_then(|parent| voted_stakes.get(parent))
514                    .copied()
515            })
516            .unwrap_or(0);
517
518        ComputedBankState {
519            voted_stakes,
520            total_stake,
521            fork_stake,
522            lockout_intervals,
523            my_latest_landed_vote,
524        }
525    }
526
527    #[cfg(test)]
528    fn is_slot_confirmed(
529        &self,
530        slot: Slot,
531        voted_stakes: &VotedStakes,
532        total_stake: Stake,
533    ) -> bool {
534        voted_stakes
535            .get(&slot)
536            .map(|stake| (*stake as f64 / total_stake as f64) > self.threshold_size)
537            .unwrap_or(false)
538    }
539
540    pub(crate) fn is_slot_duplicate_confirmed(
541        &self,
542        slot: Slot,
543        voted_stakes: &VotedStakes,
544        total_stake: Stake,
545    ) -> bool {
546        voted_stakes
547            .get(&slot)
548            .map(|stake| (*stake as f64 / total_stake as f64) > DUPLICATE_THRESHOLD)
549            .unwrap_or(false)
550    }
551
552    pub fn tower_slots(&self) -> Vec<Slot> {
553        self.vote_state.tower()
554    }
555
556    pub(crate) fn last_vote_tx_blockhash(&self) -> BlockhashStatus {
557        self.last_vote_tx_blockhash
558    }
559
560    pub fn refresh_last_vote_timestamp(&mut self, heaviest_slot_on_same_fork: Slot) {
561        let timestamp = if let Some(last_vote_timestamp) = self.last_vote.timestamp() {
562            // To avoid a refreshed vote tx getting caught in deduplication filters,
563            // we need to update timestamp. Increment by smallest amount to avoid skewing
564            // the Timestamp Oracle.
565            last_vote_timestamp.saturating_add(1)
566        } else {
567            // If the previous vote did not send a timestamp due to clock error,
568            // use the last good timestamp + 1
569            datapoint_info!(
570                "refresh-timestamp-missing",
571                ("heaviest-slot", heaviest_slot_on_same_fork, i64),
572                ("last-timestamp", self.last_timestamp.timestamp, i64),
573                ("last-slot", self.last_timestamp.slot, i64),
574            );
575            self.last_timestamp.timestamp.saturating_add(1)
576        };
577
578        if let Some(last_voted_slot) = self.last_vote.last_voted_slot() {
579            if heaviest_slot_on_same_fork <= last_voted_slot {
580                warn!(
581                    "Trying to refresh timestamp for vote on {last_voted_slot} using smaller \
582                     heaviest bank {heaviest_slot_on_same_fork}"
583                );
584                return;
585            }
586            self.last_timestamp = BlockTimestamp {
587                slot: last_voted_slot,
588                timestamp,
589            };
590            self.last_vote.set_timestamp(Some(timestamp));
591        } else {
592            warn!(
593                "Trying to refresh timestamp for last vote on heaviest bank on same fork \
594                 {heaviest_slot_on_same_fork}, but there is no vote to refresh"
595            );
596        }
597    }
598
599    pub fn refresh_last_vote_tx_blockhash(&mut self, new_vote_tx_blockhash: Hash) {
600        self.last_vote_tx_blockhash = BlockhashStatus::Blockhash(new_vote_tx_blockhash);
601    }
602
603    pub(crate) fn mark_last_vote_tx_blockhash_non_voting(&mut self) {
604        self.last_vote_tx_blockhash = BlockhashStatus::NonVoting;
605    }
606
607    pub(crate) fn mark_last_vote_tx_blockhash_hot_spare(&mut self) {
608        self.last_vote_tx_blockhash = BlockhashStatus::HotSpare;
609    }
610
611    pub fn last_voted_slot_in_bank(bank: &Bank, vote_account_pubkey: &Pubkey) -> Option<Slot> {
612        let vote_account = bank.get_vote_account(vote_account_pubkey)?;
613        vote_account.vote_state_view().last_voted_slot()
614    }
615
616    pub fn record_bank_vote(&mut self, bank: &Bank) -> Option<Slot> {
617        // Returns the new root if one is made after applying a vote for the given bank to
618        // `self.vote_state`
619        let block_id = bank.block_id().unwrap_or_else(|| {
620            // This can only happen for our leader bank
621            // Note: since the new shred format is yet to be rolled out to all clusters,
622            // this can also happen for non-leader banks. Once rolled out we can assert
623            // here that this is our leader bank.
624            Hash::default()
625        });
626        self.record_bank_vote_and_update_lockouts(
627            bank.slot(),
628            bank.hash(),
629            bank.feature_set
630                .is_active(&agave_feature_set::enable_tower_sync_ix::id()),
631            block_id,
632        )
633    }
634
635    /// If we've recently updated the vote state by applying a new vote
636    /// or syncing from a bank, generate the proper last_vote.
637    pub(crate) fn update_last_vote_from_vote_state(
638        &mut self,
639        vote_hash: Hash,
640        enable_tower_sync_ix: bool,
641        block_id: Hash,
642    ) {
643        let mut new_vote = if enable_tower_sync_ix {
644            VoteTransaction::from(TowerSync::new(
645                self.vote_state.votes.clone(),
646                self.vote_state.root_slot,
647                vote_hash,
648                block_id,
649            ))
650        } else {
651            VoteTransaction::from(VoteStateUpdate::new(
652                self.vote_state.votes.clone(),
653                self.vote_state.root_slot,
654                vote_hash,
655            ))
656        };
657
658        new_vote.set_timestamp(self.maybe_timestamp(self.last_voted_slot().unwrap_or_default()));
659        self.last_vote = new_vote;
660    }
661
662    fn record_bank_vote_and_update_lockouts(
663        &mut self,
664        vote_slot: Slot,
665        vote_hash: Hash,
666        enable_tower_sync_ix: bool,
667        block_id: Hash,
668    ) -> Option<Slot> {
669        if let Some(last_voted_slot) = self.vote_state.last_voted_slot() {
670            if vote_slot <= last_voted_slot {
671                panic!(
672                    "Error while recording vote {} {} in local tower {:?}",
673                    vote_slot,
674                    vote_hash,
675                    VoteError::VoteTooOld
676                );
677            }
678        }
679
680        trace!("{} record_vote for {}", self.node_pubkey, vote_slot);
681        let old_root = self.root();
682
683        self.vote_state.process_next_vote_slot(vote_slot);
684        self.update_last_vote_from_vote_state(vote_hash, enable_tower_sync_ix, block_id);
685
686        let new_root = self.root();
687
688        datapoint_info!(
689            "tower-vote",
690            ("latest", vote_slot, i64),
691            ("root", new_root, i64)
692        );
693        if old_root != new_root {
694            Some(new_root)
695        } else {
696            None
697        }
698    }
699
700    #[cfg(feature = "dev-context-only-utils")]
701    pub fn record_vote(&mut self, slot: Slot, hash: Hash) -> Option<Slot> {
702        self.record_bank_vote_and_update_lockouts(slot, hash, true, Hash::default())
703    }
704
705    #[cfg(feature = "dev-context-only-utils")]
706    pub fn increase_lockout(&mut self, confirmation_count_increase: u32) {
707        for vote in self.vote_state.votes.iter_mut() {
708            vote.increase_confirmation_count(confirmation_count_increase);
709        }
710    }
711
712    pub fn last_voted_slot(&self) -> Option<Slot> {
713        if self.last_vote.is_empty() {
714            None
715        } else {
716            Some(self.last_vote.slot(self.last_vote.len() - 1))
717        }
718    }
719
720    pub fn last_voted_slot_hash(&self) -> Option<(Slot, Hash)> {
721        Some((self.last_voted_slot()?, self.last_vote.hash()))
722    }
723
724    pub fn stray_restored_slot(&self) -> Option<Slot> {
725        self.stray_restored_slot
726    }
727
728    pub fn last_vote(&self) -> VoteTransaction {
729        self.last_vote.clone()
730    }
731
732    fn maybe_timestamp(&mut self, current_slot: Slot) -> Option<UnixTimestamp> {
733        if current_slot > self.last_timestamp.slot
734            || self.last_timestamp.slot == 0 && current_slot == self.last_timestamp.slot
735        {
736            let timestamp = Utc::now().timestamp();
737            if timestamp >= self.last_timestamp.timestamp {
738                self.last_timestamp = BlockTimestamp {
739                    slot: current_slot,
740                    timestamp,
741                };
742                return Some(timestamp);
743            } else {
744                datapoint_info!(
745                    "backwards-timestamp",
746                    ("slot", current_slot, i64),
747                    ("timestamp", timestamp, i64),
748                    ("last-timestamp", self.last_timestamp.timestamp, i64),
749                )
750            }
751        }
752        None
753    }
754
755    // root may be forcibly set by arbitrary replay root slot, for example from a root
756    // after replaying a snapshot.
757    // Also, tower.root() couldn't be None; initialize_lockouts() ensures that.
758    // Conceptually, every tower must have been constructed from a concrete starting point,
759    // which establishes the origin of trust (i.e. root) whether booting from genesis (slot 0) or
760    // snapshot (slot N). In other words, there should be no possibility a Tower doesn't have
761    // root, unlike young vote accounts.
762    pub fn root(&self) -> Slot {
763        self.vote_state.root_slot.unwrap()
764    }
765
766    // a slot is recent if it's newer than the last vote we have. If we haven't voted yet
767    // but have a root (hard forks situation) then compare it to the root
768    pub fn is_recent(&self, slot: Slot) -> bool {
769        if let Some(last_voted_slot) = self.vote_state.last_voted_slot() {
770            if slot <= last_voted_slot {
771                return false;
772            }
773        } else if let Some(root) = self.vote_state.root_slot {
774            if slot <= root {
775                return false;
776            }
777        }
778        true
779    }
780
781    pub fn has_voted(&self, slot: Slot) -> bool {
782        for vote in &self.vote_state.votes {
783            if slot == vote.slot() {
784                return true;
785            }
786        }
787        false
788    }
789
790    pub fn is_locked_out(&self, slot: Slot, ancestors: &HashSet<Slot>) -> bool {
791        if !self.is_recent(slot) {
792            return true;
793        }
794
795        // Check if a slot is locked out by simulating adding a vote for that
796        // slot to the current lockouts to pop any expired votes. If any of the
797        // remaining voted slots are on a different fork from the checked slot,
798        // it's still locked out.
799        let mut vote_state = self.vote_state.clone();
800        vote_state.process_next_vote_slot(slot);
801        for vote in &vote_state.votes {
802            if slot != vote.slot() && !ancestors.contains(&vote.slot()) {
803                return true;
804            }
805        }
806
807        if let Some(root_slot) = vote_state.root_slot {
808            if slot != root_slot {
809                // This case should never happen because bank forks purges all
810                // non-descendants of the root every time root is set
811                assert!(
812                    ancestors.contains(&root_slot),
813                    "ancestors: {ancestors:?}, slot: {slot} root: {root_slot}"
814                );
815            }
816        }
817
818        false
819    }
820
821    /// Checks if a vote for `candidate_slot` is usable in a switching proof
822    /// from `last_voted_slot` to `switch_slot`.
823    /// We assume `candidate_slot` is not an ancestor of `last_voted_slot`.
824    ///
825    /// Returns None if `candidate_slot` or `switch_slot` is not present in `ancestors`
826    fn is_valid_switching_proof_vote(
827        &self,
828        candidate_slot: Slot,
829        last_voted_slot: Slot,
830        switch_slot: Slot,
831        ancestors: &HashMap<Slot, HashSet<Slot>>,
832        last_vote_ancestors: &HashSet<Slot>,
833    ) -> Option<bool> {
834        trace!(
835            "Checking if {candidate_slot} is a valid switching proof vote from {last_voted_slot} \
836             to {switch_slot}"
837        );
838        // Ignore if the `candidate_slot` is a descendant of the `last_voted_slot`, since we do not
839        // want to count votes on the same fork.
840        if Self::is_descendant_slot(candidate_slot, last_voted_slot, ancestors)? {
841            return Some(false);
842        }
843
844        if last_vote_ancestors.is_empty() {
845            // If `last_vote_ancestors` is empty, this means we must have a last vote that is stray. If the `last_voted_slot`
846            // is stray, it must be descended from some earlier root than the latest root (the anchor at startup).
847            // The above check also guarentees that the candidate slot is not a descendant of this stray last vote.
848            //
849            // This gives us a fork graph:
850            //     / ------------- stray `last_voted_slot`
851            // old root
852            //     \- latest root (anchor) - ... - candidate slot
853            //                                \- switch slot
854            //
855            // Thus the common acnestor of `last_voted_slot` and `candidate_slot` is `old_root`, which the `switch_slot`
856            // descends from. Thus it is safe to use `candidate_slot` in the switching proof.
857            //
858            // Note: the calling function should have already panicked if we do not have ancestors and the last vote is not stray.
859            assert!(self.is_stray_last_vote());
860            return Some(true);
861        }
862
863        // Only consider forks that split at the common_ancestor of `switch_slot` and `last_voted_slot` or earlier.
864        // This is to prevent situations like this from being included in the switching proof:
865        //
866        //         /-- `last_voted_slot`
867        //     /--Y
868        //    X    \-- `candidate_slot`
869        //     \-- `switch_slot`
870        //
871        // The common ancestor of `last_voted_slot` and `switch_slot` is `X`. Votes for the `candidate_slot`
872        // should not count towards the switch proof since `candidate_slot` is "on the same fork" as `last_voted_slot`
873        // in relation to `switch_slot`.
874        // However these candidate slots should be allowed:
875        //
876        //             /-- Y -- `last_voted_slot`
877        //    V - W - X
878        //        \    \-- `candidate_slot` -- `switch_slot`
879        //         \    \-- `candidate_slot`
880        //          \-- `candidate_slot`
881        //
882        // As the `candidate_slot`s forked off from `X` or earlier.
883        //
884        // To differentiate, we check the common ancestor of `last_voted_slot` and `candidate_slot`.
885        // If the `switch_slot` descends from this ancestor, then the vote for `candidate_slot` can be included.
886        Self::greatest_common_ancestor(ancestors, candidate_slot, last_voted_slot)
887            .and_then(|ancestor| Self::is_descendant_slot(switch_slot, ancestor, ancestors))
888    }
889
890    /// Checks if `maybe_descendant` is a descendant of `slot`.
891    ///
892    /// Returns None if `maybe_descendant` is not present in `ancestors`
893    fn is_descendant_slot(
894        maybe_descendant: Slot,
895        slot: Slot,
896        ancestors: &HashMap<Slot, HashSet<u64>>,
897    ) -> Option<bool> {
898        ancestors
899            .get(&maybe_descendant)
900            .map(|candidate_slot_ancestors| candidate_slot_ancestors.contains(&slot))
901    }
902
903    /// Returns `Some(gca)` where `gca` is the greatest (by slot number)
904    /// common ancestor of both `slot_a` and `slot_b`.
905    ///
906    /// Returns `None` if:
907    /// * `slot_a` is not in `ancestors`
908    /// * `slot_b` is not in `ancestors`
909    /// * There is no common ancestor of slot_a and slot_b in `ancestors`
910    fn greatest_common_ancestor(
911        ancestors: &HashMap<Slot, HashSet<Slot>>,
912        slot_a: Slot,
913        slot_b: Slot,
914    ) -> Option<Slot> {
915        (ancestors.get(&slot_a)?)
916            .intersection(ancestors.get(&slot_b)?)
917            .max()
918            .copied()
919    }
920
921    #[allow(clippy::too_many_arguments)]
922    fn make_check_switch_threshold_decision(
923        &self,
924        switch_slot: Slot,
925        ancestors: &HashMap<Slot, HashSet<u64>>,
926        descendants: &HashMap<Slot, HashSet<u64>>,
927        progress: &ProgressMap,
928        total_stake: u64,
929        epoch_vote_accounts: &VoteAccountsHashMap,
930        latest_validator_votes_for_frozen_banks: &LatestValidatorVotesForFrozenBanks,
931        heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice,
932    ) -> SwitchForkDecision {
933        let Some((last_voted_slot, last_voted_hash)) = self.last_voted_slot_hash() else {
934            return SwitchForkDecision::SameFork;
935        };
936        let root = self.root();
937        let empty_ancestors = HashSet::default();
938        let empty_ancestors_due_to_minor_unsynced_ledger = || {
939            // This condition (stale stray last vote) shouldn't occur under normal validator
940            // operation, indicating something unusual happened.
941            // This condition could be introduced by manual ledger mishandling,
942            // validator SEGV, OS/HW crash, or plain No Free Space FS error.
943
944            // However, returning empty ancestors as a fallback here shouldn't result in
945            // slashing by itself (Note that we couldn't fully preclude any kind of slashing if
946            // the failure was OS or HW level).
947
948            // Firstly, lockout is ensured elsewhere.
949
950            // Also, there is no risk of optimistic conf. violation. Although empty ancestors
951            // could result in incorrect (= more than actual) locked_out_stake and
952            // false-positive SwitchProof later in this function, there should be no such a
953            // heavier fork candidate, first of all, if the last vote (or any of its
954            // unavailable ancestors) were already optimistically confirmed.
955            // The only exception is that other validator is already violating it...
956            if self.is_first_switch_check() && switch_slot < last_voted_slot {
957                // `switch < last` is needed not to warn! this message just because of using
958                // newer snapshots on validator restart
959                let message = format!(
960                    "bank_forks doesn't have corresponding data for the stray restored last \
961                     vote({last_voted_slot}), meaning some inconsistency between saved tower and \
962                     ledger."
963                );
964                warn!("{message}");
965                datapoint_warn!("tower_warn", ("warn", message, String));
966            }
967            &empty_ancestors
968        };
969
970        let suspended_decision_due_to_major_unsynced_ledger = || {
971            // This peculiar corner handling is needed mainly for a tower which is newer than
972            // blockstore. (Yeah, we tolerate it for ease of maintaining validator by operators)
973            // This condition could be introduced by manual ledger mishandling,
974            // validator SEGV, OS/HW crash, or plain No Free Space FS error.
975
976            // When we're in this clause, it basically means validator is badly running
977            // with a future tower while replaying past slots, especially problematic is
978            // last_voted_slot.
979            // So, don't re-vote on it by returning pseudo FailedSwitchThreshold, otherwise
980            // there would be slashing because of double vote on one of last_vote_ancestors.
981            // (Well, needless to say, re-creating the duplicate block must be handled properly
982            // at the banking stage: https://github.com/solana-labs/solana/issues/8232)
983            //
984            // To be specific, the replay stage is tricked into a false perception where
985            // last_vote_ancestors is AVAILABLE for descendant-of-`switch_slot`,  stale, and
986            // stray slots (which should always be empty_ancestors).
987            //
988            // This is covered by test_future_tower_* in local_cluster
989            SwitchForkDecision::FailedSwitchThreshold(0, total_stake)
990        };
991
992        let rollback_due_to_duplicate_ancestor = |latest_duplicate_ancestor| {
993            SwitchForkDecision::FailedSwitchDuplicateRollback(latest_duplicate_ancestor)
994        };
995
996        // `heaviest_subtree_fork_choice` entries are not cleaned by duplicate block purging/rollback logic,
997        // so this is safe to check here. We return here if the last voted slot was rolled back/purged due to
998        // being a duplicate because `ancestors`/`descendants`/`progress` structures may be missing this slot due
999        // to duplicate purging. This would cause many of the `unwrap()` checks below to fail.
1000        //
1001        // TODO: Handle if the last vote is on a dupe, and then we restart. The dupe won't be in
1002        // heaviest_subtree_fork_choice, so `heaviest_subtree_fork_choice.latest_invalid_ancestor()` will return
1003        // None, but the last vote will be persisted in tower.
1004        let switch_hash = progress
1005            .get_hash(switch_slot)
1006            .expect("Slot we're trying to switch to must exist AND be frozen in progress map");
1007        if let Some(latest_duplicate_ancestor) = heaviest_subtree_fork_choice
1008            .latest_invalid_ancestor(&(last_voted_slot, last_voted_hash))
1009        {
1010            // We're rolling back because one of the ancestors of the last vote was a duplicate. In this
1011            // case, it's acceptable if the switch candidate is one of ancestors of the previous vote,
1012            // just fail the switch check because there's no point in voting on an ancestor. ReplayStage
1013            // should then have a special case continue building an alternate fork from this ancestor, NOT
1014            // the `last_voted_slot`. This is in contrast to usual SwitchFailure where ReplayStage continues to build blocks
1015            // on latest vote. See `ReplayStage::select_vote_and_reset_forks()` for more details.
1016            if heaviest_subtree_fork_choice.is_strict_ancestor(
1017                &(switch_slot, switch_hash),
1018                &(last_voted_slot, last_voted_hash),
1019            ) {
1020                return rollback_due_to_duplicate_ancestor(latest_duplicate_ancestor);
1021            } else if progress
1022                .get_hash(last_voted_slot)
1023                .map(|current_slot_hash| current_slot_hash != last_voted_hash)
1024                .unwrap_or(true)
1025            {
1026                // Our last vote slot was purged because it was on a duplicate fork, don't continue below
1027                // where checks may panic. We allow a freebie vote here that may violate switching
1028                // thresholds
1029                // TODO: Properly handle this case
1030                info!(
1031                    "Allowing switch vote on {:?} because last vote {:?} was rolled back",
1032                    (switch_slot, switch_hash),
1033                    (last_voted_slot, last_voted_hash)
1034                );
1035                return SwitchForkDecision::SwitchProof(Hash::default());
1036            }
1037        }
1038
1039        let last_vote_ancestors = ancestors.get(&last_voted_slot).unwrap_or_else(|| {
1040            if self.is_stray_last_vote() {
1041                // Unless last vote is stray and stale, ancestors.get(last_voted_slot) must
1042                // return Some(_), justifying to panic! here.
1043                // Also, adjust_lockouts_after_replay() correctly makes last_voted_slot None,
1044                // if all saved votes are ancestors of replayed_root_slot. So this code shouldn't be
1045                // touched in that case as well.
1046                // In other words, except being stray, all other slots have been voted on while
1047                // this validator has been running, so we must be able to fetch ancestors for
1048                // all of them.
1049                empty_ancestors_due_to_minor_unsynced_ledger()
1050            } else {
1051                panic!("no ancestors found with slot: {last_voted_slot}");
1052            }
1053        });
1054
1055        let switch_slot_ancestors = ancestors.get(&switch_slot).unwrap();
1056
1057        if switch_slot == last_voted_slot || switch_slot_ancestors.contains(&last_voted_slot) {
1058            // If the `switch_slot is a descendant of the last vote,
1059            // no switching proof is necessary
1060            return SwitchForkDecision::SameFork;
1061        }
1062
1063        if last_vote_ancestors.contains(&switch_slot) {
1064            if self.is_stray_last_vote() {
1065                return suspended_decision_due_to_major_unsynced_ledger();
1066            } else {
1067                panic!(
1068                    "Should never consider switching to ancestor ({switch_slot}) of last vote: \
1069                     {last_voted_slot}, ancestors({last_vote_ancestors:?})",
1070                );
1071            }
1072        }
1073
1074        // By this point, we know the `switch_slot` is on a different fork
1075        // (is neither an ancestor nor descendant of `last_vote`), so a
1076        // switching proof is necessary
1077        let switch_proof = Hash::default();
1078        let mut locked_out_stake = 0;
1079        let mut locked_out_vote_accounts = HashSet::new();
1080        for (candidate_slot, descendants) in descendants.iter() {
1081            // 1) Don't consider any banks that haven't been frozen yet
1082            //    because the needed stats are unavailable
1083            // 2) Only consider lockouts at the latest `frozen` bank
1084            //    on each fork, as that bank will contain all the
1085            //    lockout intervals for ancestors on that fork as well.
1086            // 3) Don't consider lockouts on the `last_vote` itself
1087            // 4) Don't consider lockouts on any descendants of
1088            //    `last_vote`
1089            // 5) Don't consider any banks before the root because
1090            //    all lockouts must be ancestors of `last_vote`
1091            if !progress
1092                .get_fork_stats(*candidate_slot)
1093                .map(|stats| stats.computed)
1094                .unwrap_or(false)
1095                || {
1096                    // If any of the descendants have the `computed` flag set, then there must be a more
1097                    // recent frozen bank on this fork to use, so we can ignore this one. Otherwise,
1098                    // even if this bank has descendants, if they have not yet been frozen / stats computed,
1099                    // then use this bank as a representative for the fork.
1100                    descendants.iter().any(|d| {
1101                        progress
1102                            .get_fork_stats(*d)
1103                            .map(|stats| stats.computed)
1104                            .unwrap_or(false)
1105                    })
1106                }
1107                || *candidate_slot == last_voted_slot
1108                || *candidate_slot <= root
1109                || {
1110                    !self
1111                        .is_valid_switching_proof_vote(
1112                            *candidate_slot,
1113                            last_voted_slot,
1114                            switch_slot,
1115                            ancestors,
1116                            last_vote_ancestors,
1117                        )
1118                        .expect(
1119                            "candidate_slot and switch_slot exist in descendants map, so they \
1120                             must exist in ancestors map",
1121                        )
1122                }
1123            {
1124                continue;
1125            }
1126
1127            // By the time we reach here, any ancestors of the `last_vote`,
1128            // should have been filtered out, as they all have a descendant,
1129            // namely the `last_vote` itself.
1130            assert!(!last_vote_ancestors.contains(candidate_slot));
1131
1132            // Evaluate which vote accounts in the bank are locked out
1133            // in the interval candidate_slot..last_vote, which means
1134            // finding any lockout intervals in the `lockout_intervals` tree
1135            // for this bank that contain `last_vote`.
1136            let lockout_intervals = &progress
1137                .get(candidate_slot)
1138                .unwrap()
1139                .fork_stats
1140                .lockout_intervals;
1141            // Find any locked out intervals for vote accounts in this bank with
1142            // `lockout_interval_end` >= `last_vote`, which implies they are locked out at
1143            // `last_vote` on another fork.
1144            for (_lockout_interval_end, intervals_keyed_by_end) in
1145                lockout_intervals.range((Included(last_voted_slot), Unbounded))
1146            {
1147                for (lockout_interval_start, vote_account_pubkey) in intervals_keyed_by_end {
1148                    if locked_out_vote_accounts.contains(vote_account_pubkey) {
1149                        continue;
1150                    }
1151
1152                    // Only count lockouts on slots that are:
1153                    // 1) Not ancestors of `last_vote`, meaning being on different fork
1154                    // 2) Not from before the current root as we can't determine if
1155                    // anything before the root was an ancestor of `last_vote` or not
1156                    if !last_vote_ancestors.contains(lockout_interval_start) && {
1157                        // Given a `lockout_interval_start` < root that appears in a
1158                        // bank for a `candidate_slot`, it must be that `lockout_interval_start`
1159                        // is an ancestor of the current root, because `candidate_slot` is a
1160                        // descendant of the current root
1161                        *lockout_interval_start > root
1162                    } {
1163                        let stake = epoch_vote_accounts
1164                            .get(vote_account_pubkey)
1165                            .map(|(stake, _)| *stake)
1166                            .unwrap_or(0);
1167                        locked_out_stake += stake;
1168                        if (locked_out_stake as f64 / total_stake as f64) > SWITCH_FORK_THRESHOLD {
1169                            return SwitchForkDecision::SwitchProof(switch_proof);
1170                        }
1171                        locked_out_vote_accounts.insert(vote_account_pubkey);
1172                    }
1173                }
1174            }
1175        }
1176
1177        // Check the latest votes for potentially gossip votes that haven't landed yet
1178        for (
1179            vote_account_pubkey,
1180            (candidate_latest_frozen_vote, _candidate_latest_frozen_vote_hash),
1181        ) in latest_validator_votes_for_frozen_banks.max_gossip_frozen_votes()
1182        {
1183            if locked_out_vote_accounts.contains(&vote_account_pubkey) {
1184                continue;
1185            }
1186
1187            if *candidate_latest_frozen_vote > last_voted_slot && {
1188                // Because `candidate_latest_frozen_vote` is the last vote made by some validator
1189                // in the cluster for a frozen bank `B` observed through gossip, we may have cleared
1190                // that frozen bank `B` because we `set_root(root)` for a `root` on a different fork,
1191                // like so:
1192                //
1193                //    |----------X ------candidate_latest_frozen_vote (frozen)
1194                // old root
1195                //    |----------new root ----last_voted_slot
1196                //
1197                // In most cases, because `last_voted_slot` must be a descendant of `root`, then
1198                // if `candidate_latest_frozen_vote` is not found in the ancestors/descendants map (recall these
1199                // directly reflect the state of BankForks), this implies that `B` was pruned from BankForks
1200                // because it was on a different fork than `last_voted_slot`, and thus this vote for `candidate_latest_frozen_vote`
1201                // should be safe to count towards the switching proof:
1202                //
1203                // However, there is also the possibility that `last_voted_slot` is a stray, in which
1204                // case we cannot make this conclusion as we do not know the ancestors/descendants
1205                // of strays. Hence we err on the side of caution here and ignore this vote. This
1206                // is ok because validators voting on different unrooted forks should eventually vote
1207                // on some descendant of the root, at which time they can be included in switching proofs.
1208                self.is_valid_switching_proof_vote(
1209                    *candidate_latest_frozen_vote,
1210                    last_voted_slot,
1211                    switch_slot,
1212                    ancestors,
1213                    last_vote_ancestors,
1214                )
1215                .unwrap_or(false)
1216            } {
1217                let stake = epoch_vote_accounts
1218                    .get(vote_account_pubkey)
1219                    .map(|(stake, _)| *stake)
1220                    .unwrap_or(0);
1221                locked_out_stake += stake;
1222                if (locked_out_stake as f64 / total_stake as f64) > SWITCH_FORK_THRESHOLD {
1223                    return SwitchForkDecision::SwitchProof(switch_proof);
1224                }
1225                locked_out_vote_accounts.insert(vote_account_pubkey);
1226            }
1227        }
1228
1229        // We have not detected sufficient lockout past the last voted slot to generate
1230        // a switching proof
1231        SwitchForkDecision::FailedSwitchThreshold(locked_out_stake, total_stake)
1232    }
1233
1234    #[allow(clippy::too_many_arguments)]
1235    pub(crate) fn check_switch_threshold(
1236        &mut self,
1237        switch_slot: Slot,
1238        ancestors: &HashMap<Slot, HashSet<u64>>,
1239        descendants: &HashMap<Slot, HashSet<u64>>,
1240        progress: &ProgressMap,
1241        total_stake: u64,
1242        epoch_vote_accounts: &VoteAccountsHashMap,
1243        latest_validator_votes_for_frozen_banks: &LatestValidatorVotesForFrozenBanks,
1244        heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice,
1245    ) -> SwitchForkDecision {
1246        let decision = self.make_check_switch_threshold_decision(
1247            switch_slot,
1248            ancestors,
1249            descendants,
1250            progress,
1251            total_stake,
1252            epoch_vote_accounts,
1253            latest_validator_votes_for_frozen_banks,
1254            heaviest_subtree_fork_choice,
1255        );
1256        let new_check = Some((switch_slot, decision.clone()));
1257        if new_check != self.last_switch_threshold_check {
1258            trace!("new switch threshold check: slot {switch_slot}: {decision:?}",);
1259            self.last_switch_threshold_check = new_check;
1260        }
1261        decision
1262    }
1263
1264    fn is_first_switch_check(&self) -> bool {
1265        self.last_switch_threshold_check.is_none()
1266    }
1267
1268    // Optimistically skip the stake check if casting a vote would not increase
1269    // the lockout at this threshold. This is because if you bounce back to
1270    // voting on the main fork after not voting for a while, your latest vote
1271    // might pop off a lot of the votes in the tower. The stake from these votes
1272    // would have rolled up to earlier votes in the tower, which presumably
1273    // could have helped us pass the threshold check. Worst case, we'll just
1274    // recheck later without having increased lockouts.
1275    fn optimistically_bypass_vote_stake_threshold_check<'a>(
1276        tower_before_applying_vote: impl Iterator<Item = &'a Lockout>,
1277        threshold_vote: &Lockout,
1278    ) -> bool {
1279        for old_vote in tower_before_applying_vote {
1280            if old_vote.slot() == threshold_vote.slot()
1281                && old_vote.confirmation_count() == threshold_vote.confirmation_count()
1282            {
1283                return true;
1284            }
1285        }
1286        false
1287    }
1288
1289    /// Checks a single vote threshold for `slot`
1290    fn check_vote_stake_threshold<'a>(
1291        threshold_vote: Option<&Lockout>,
1292        tower_before_applying_vote: impl Iterator<Item = &'a Lockout>,
1293        threshold_depth: usize,
1294        threshold_size: f64,
1295        slot: Slot,
1296        voted_stakes: &HashMap<Slot, u64>,
1297        total_stake: u64,
1298    ) -> ThresholdDecision {
1299        let Some(threshold_vote) = threshold_vote else {
1300            // Tower isn't that deep.
1301            return ThresholdDecision::PassedThreshold;
1302        };
1303        let Some(fork_stake) = voted_stakes.get(&threshold_vote.slot()) else {
1304            // We haven't seen any votes on this fork yet, so no stake
1305            return ThresholdDecision::FailedThreshold(threshold_depth as u64, 0);
1306        };
1307
1308        let lockout = *fork_stake as f64 / total_stake as f64;
1309        trace!(
1310            "fork_stake slot: {}, threshold_vote slot: {}, lockout: {} fork_stake: {} \
1311             total_stake: {}",
1312            slot,
1313            threshold_vote.slot(),
1314            lockout,
1315            fork_stake,
1316            total_stake
1317        );
1318        if Self::optimistically_bypass_vote_stake_threshold_check(
1319            tower_before_applying_vote,
1320            threshold_vote,
1321        ) || lockout > threshold_size
1322        {
1323            return ThresholdDecision::PassedThreshold;
1324        }
1325        ThresholdDecision::FailedThreshold(threshold_depth as u64, *fork_stake)
1326    }
1327
1328    /// Performs vote threshold checks for `slot`
1329    pub fn check_vote_stake_thresholds(
1330        &self,
1331        slot: Slot,
1332        voted_stakes: &VotedStakes,
1333        total_stake: Stake,
1334    ) -> Vec<ThresholdDecision> {
1335        let mut threshold_decisions = vec![];
1336        // Generate the vote state assuming this vote is included.
1337        let mut vote_state = self.vote_state.clone();
1338        vote_state.process_next_vote_slot(slot);
1339
1340        // Assemble all the vote thresholds and depths to check.
1341        let vote_thresholds_and_depths = vec![
1342            // The following two checks are log only and are currently being used for experimentation
1343            // purposes. We wish to impose a shallow threshold check to prevent the frequent 8 deep
1344            // lockouts seen multiple times a day. We check both the 4th and 5th deep here to collect
1345            // metrics to determine the right depth and threshold percentage to set in the future.
1346            (VOTE_THRESHOLD_DEPTH_SHALLOW, SWITCH_FORK_THRESHOLD),
1347            (VOTE_THRESHOLD_DEPTH_SHALLOW + 1, SWITCH_FORK_THRESHOLD),
1348            (self.threshold_depth, self.threshold_size),
1349        ];
1350
1351        // Check one by one and add any failures to be returned
1352        for (threshold_depth, threshold_size) in vote_thresholds_and_depths {
1353            if let ThresholdDecision::FailedThreshold(vote_depth, stake) =
1354                Self::check_vote_stake_threshold(
1355                    vote_state.nth_recent_lockout(threshold_depth),
1356                    self.vote_state.votes.iter(),
1357                    threshold_depth,
1358                    threshold_size,
1359                    slot,
1360                    voted_stakes,
1361                    total_stake,
1362                )
1363            {
1364                threshold_decisions.push(ThresholdDecision::FailedThreshold(vote_depth, stake));
1365            }
1366        }
1367        threshold_decisions
1368    }
1369
1370    /// Update lockouts for all the ancestors
1371    pub(crate) fn populate_ancestor_voted_stakes(
1372        voted_stakes: &mut VotedStakes,
1373        vote_slots: impl IntoIterator<Item = Slot>,
1374        ancestors: &HashMap<Slot, HashSet<Slot>>,
1375    ) {
1376        // If there's no ancestors, that means this slot must be from before the current root,
1377        // in which case the lockouts won't be calculated in bank_weight anyways, so ignore
1378        // this slot
1379        for vote_slot in vote_slots {
1380            if let Some(slot_ancestors) = ancestors.get(&vote_slot) {
1381                voted_stakes.entry(vote_slot).or_default();
1382                for slot in slot_ancestors {
1383                    voted_stakes.entry(*slot).or_default();
1384                }
1385            }
1386        }
1387    }
1388
1389    /// Update stake for all the ancestors.
1390    /// Note, stake is the same for all the ancestor.
1391    fn update_ancestor_voted_stakes(
1392        voted_stakes: &mut VotedStakes,
1393        voted_slot: Slot,
1394        voted_stake: u64,
1395        ancestors: &HashMap<Slot, HashSet<Slot>>,
1396    ) {
1397        // If there's no ancestors, that means this slot must be from
1398        // before the current root, so ignore this slot
1399        if let Some(vote_slot_ancestors) = ancestors.get(&voted_slot) {
1400            *voted_stakes.entry(voted_slot).or_default() += voted_stake;
1401            for slot in vote_slot_ancestors {
1402                *voted_stakes.entry(*slot).or_default() += voted_stake;
1403            }
1404        }
1405    }
1406
1407    fn voted_slots(&self) -> Vec<Slot> {
1408        self.vote_state
1409            .votes
1410            .iter()
1411            .map(|lockout| lockout.slot())
1412            .collect()
1413    }
1414
1415    pub fn is_stray_last_vote(&self) -> bool {
1416        self.stray_restored_slot.is_some() && self.stray_restored_slot == self.last_voted_slot()
1417    }
1418
1419    // The tower root can be older/newer if the validator booted from a newer/older snapshot, so
1420    // tower lockouts may need adjustment
1421    pub fn adjust_lockouts_after_replay(
1422        mut self,
1423        replayed_root: Slot,
1424        slot_history: &SlotHistory,
1425    ) -> Result<Self> {
1426        // sanity assertions for roots
1427        let tower_root = self.root();
1428        info!(
1429            "adjusting lockouts (after replay up to {}): {:?} tower root: {} replayed root: {}",
1430            replayed_root,
1431            self.voted_slots(),
1432            tower_root,
1433            replayed_root,
1434        );
1435        assert_eq!(slot_history.check(replayed_root), Check::Found);
1436
1437        assert!(
1438            self.last_vote == VoteTransaction::from(VoteStateUpdate::default())
1439                && self.vote_state.votes.is_empty()
1440                || self.last_vote == VoteTransaction::from(TowerSync::default())
1441                    && self.vote_state.votes.is_empty()
1442                || !self.vote_state.votes.is_empty(),
1443            "last vote: {:?} vote_state.votes: {:?}",
1444            self.last_vote,
1445            self.vote_state.votes
1446        );
1447
1448        if let Some(last_voted_slot) = self.last_voted_slot() {
1449            if tower_root <= replayed_root {
1450                // Normally, we goes into this clause with possible help of
1451                // reconcile_blockstore_roots_with_external_source()
1452                if slot_history.check(last_voted_slot) == Check::TooOld {
1453                    // We could try hard to anchor with other older votes, but opt to simplify the
1454                    // following logic
1455                    return Err(TowerError::TooOldTower(
1456                        last_voted_slot,
1457                        slot_history.oldest(),
1458                    ));
1459                }
1460
1461                self.adjust_lockouts_with_slot_history(slot_history)?;
1462                self.initialize_root(replayed_root);
1463            } else {
1464                // This should never occur under normal operation.
1465                // While this validator's voting is suspended this way,
1466                // suspended_decision_due_to_major_unsynced_ledger() will be also touched.
1467                let message = format!(
1468                    "For some reason, we're REPROCESSING slots which has already been voted and \
1469                     ROOTED by us; VOTING will be SUSPENDED UNTIL {last_voted_slot}!",
1470                );
1471                error!("{message}");
1472                datapoint_error!("tower_error", ("error", message, String));
1473
1474                // Let's pass-through adjust_lockouts_with_slot_history just for sanitization,
1475                // using a synthesized SlotHistory.
1476
1477                let mut warped_slot_history = (*slot_history).clone();
1478                // Blockstore doesn't have the tower_root slot because of
1479                // (replayed_root < tower_root) in this else clause, meaning the tower is from
1480                // the future from the view of blockstore.
1481                // Pretend the blockstore has the future tower_root to anchor exactly with that
1482                // slot by adding tower_root to a slot history. The added slot will be newer
1483                // than all slots in the slot history (remember tower_root > replayed_root),
1484                // satisfying the slot history invariant.
1485                // Thus, the whole process will be safe as well because tower_root exists
1486                // within both tower and slot history, guaranteeing the success of adjustment
1487                // and retaining all of future votes correctly while sanitizing.
1488                warped_slot_history.add(tower_root);
1489
1490                self.adjust_lockouts_with_slot_history(&warped_slot_history)?;
1491                // don't update root; future tower's root should be kept across validator
1492                // restarts to continue to show the scary messages at restarts until the next
1493                // voting.
1494            }
1495        } else {
1496            // This else clause is for newly created tower.
1497            // initialize_lockouts_from_bank() should ensure the following invariant,
1498            // otherwise we're screwing something up.
1499            assert_eq!(tower_root, replayed_root);
1500        }
1501
1502        Ok(self)
1503    }
1504
1505    fn adjust_lockouts_with_slot_history(&mut self, slot_history: &SlotHistory) -> Result<()> {
1506        let tower_root = self.root();
1507        // retained slots will be consisted only from divergent slots
1508        let mut retain_flags_for_each_vote_in_reverse: Vec<_> =
1509            Vec::with_capacity(self.vote_state.votes.len());
1510
1511        let mut still_in_future = true;
1512        let mut past_outside_history = false;
1513        let mut checked_slot = None;
1514        let mut anchored_slot = None;
1515
1516        let mut slots_in_tower = vec![tower_root];
1517        slots_in_tower.extend(self.voted_slots());
1518
1519        // iterate over votes + root (if any) in the newest => oldest order
1520        // bail out early if bad condition is found
1521        for slot_in_tower in slots_in_tower.iter().rev() {
1522            let check = slot_history.check(*slot_in_tower);
1523
1524            if anchored_slot.is_none() && check == Check::Found {
1525                anchored_slot = Some(*slot_in_tower);
1526            } else if anchored_slot.is_some() && check == Check::NotFound {
1527                // this can't happen unless we're fed with bogus snapshot
1528                return Err(TowerError::FatallyInconsistent("diverged ancestor?"));
1529            }
1530
1531            if still_in_future && check != Check::Future {
1532                still_in_future = false;
1533            } else if !still_in_future && check == Check::Future {
1534                // really odd cases: bad ordered votes?
1535                return Err(TowerError::FatallyInconsistent("time warped?"));
1536            }
1537            if !past_outside_history && check == Check::TooOld {
1538                past_outside_history = true;
1539            } else if past_outside_history && check != Check::TooOld {
1540                // really odd cases: bad ordered votes?
1541                return Err(TowerError::FatallyInconsistent(
1542                    "not too old once after got too old?",
1543                ));
1544            }
1545
1546            if let Some(checked_slot) = checked_slot {
1547                // This is really special, only if tower is initialized and contains
1548                // a vote for the root, the root slot can repeat only once
1549                let voting_for_root =
1550                    *slot_in_tower == checked_slot && *slot_in_tower == tower_root;
1551
1552                if !voting_for_root {
1553                    // Unless we're voting since genesis, slots_in_tower must always be older than last checked_slot
1554                    // including all vote slot and the root slot.
1555                    assert!(
1556                        *slot_in_tower < checked_slot,
1557                        "slot_in_tower({}) < checked_slot({})",
1558                        *slot_in_tower,
1559                        checked_slot
1560                    );
1561                }
1562            }
1563
1564            checked_slot = Some(*slot_in_tower);
1565
1566            retain_flags_for_each_vote_in_reverse.push(anchored_slot.is_none());
1567        }
1568
1569        // Check for errors if not anchored
1570        info!("adjusted tower's anchored slot: {anchored_slot:?}");
1571        if anchored_slot.is_none() {
1572            // this error really shouldn't happen unless ledger/tower is corrupted
1573            return Err(TowerError::FatallyInconsistent(
1574                "no common slot for rooted tower",
1575            ));
1576        }
1577
1578        assert_eq!(
1579            slots_in_tower.len(),
1580            retain_flags_for_each_vote_in_reverse.len()
1581        );
1582        // pop for the tower root
1583        retain_flags_for_each_vote_in_reverse.pop();
1584        let mut retain_flags_for_each_vote =
1585            retain_flags_for_each_vote_in_reverse.into_iter().rev();
1586
1587        let original_votes_len = self.vote_state.votes.len();
1588        self.initialize_lockouts(move |_| retain_flags_for_each_vote.next().unwrap());
1589
1590        if self.vote_state.votes.is_empty() {
1591            info!("All restored votes were behind; resetting root_slot and last_vote in tower!");
1592            // we might not have banks for those votes so just reset.
1593            // That's because the votes may well past replayed_root
1594            self.last_vote = VoteTransaction::from(Vote::default());
1595        } else {
1596            info!(
1597                "{} restored votes (out of {}) were on different fork or are upcoming votes on \
1598                 unrooted slots: {:?}!",
1599                self.voted_slots().len(),
1600                original_votes_len,
1601                self.voted_slots()
1602            );
1603
1604            assert_eq!(self.last_voted_slot(), self.voted_slots().last().copied());
1605            self.stray_restored_slot = self.last_vote.last_voted_slot()
1606        }
1607
1608        Ok(())
1609    }
1610
1611    fn initialize_lockouts_from_bank(
1612        &mut self,
1613        vote_account_pubkey: &Pubkey,
1614        root: Slot,
1615        bank: &Bank,
1616    ) {
1617        if let Some(vote_account) = bank.get_vote_account(vote_account_pubkey) {
1618            self.vote_state = TowerVoteState::from(vote_account.vote_state_view());
1619            self.initialize_root(root);
1620            self.initialize_lockouts(|v| v.slot() > root);
1621        } else {
1622            self.initialize_root(root);
1623            info!(
1624                "vote account({}) not found in bank (slot={})",
1625                vote_account_pubkey,
1626                bank.slot()
1627            );
1628        }
1629    }
1630
1631    fn initialize_lockouts<F: FnMut(&Lockout) -> bool>(&mut self, should_retain: F) {
1632        self.vote_state.votes.retain(should_retain);
1633    }
1634
1635    // Updating root is needed to correctly restore from newly-saved tower for the next
1636    // boot
1637    fn initialize_root(&mut self, root: Slot) {
1638        self.vote_state.root_slot = Some(root);
1639    }
1640
1641    pub fn save(&self, tower_storage: &dyn TowerStorage, node_keypair: &Keypair) -> Result<()> {
1642        let saved_tower = SavedTower::new(self, node_keypair)?;
1643        tower_storage.store(&SavedTowerVersions::from(saved_tower))?;
1644        Ok(())
1645    }
1646
1647    pub fn restore(tower_storage: &dyn TowerStorage, node_pubkey: &Pubkey) -> Result<Self> {
1648        tower_storage.load(node_pubkey)
1649    }
1650}
1651
1652#[derive(Error, Debug)]
1653pub enum TowerError {
1654    #[error("IO Error: {0}")]
1655    IoError(#[from] std::io::Error),
1656
1657    #[error("Serialization Error: {0}")]
1658    SerializeError(#[from] bincode::Error),
1659
1660    #[error("The signature on the saved tower is invalid")]
1661    InvalidSignature,
1662
1663    #[error("The tower does not match this validator: {0}")]
1664    WrongTower(String),
1665
1666    #[error(
1667        "The tower is too old: newest slot in tower ({0}) << oldest slot in available history \
1668         ({1})"
1669    )]
1670    TooOldTower(Slot, Slot),
1671
1672    #[error("The tower is fatally inconsistent with blockstore: {0}")]
1673    FatallyInconsistent(&'static str),
1674
1675    #[error("The tower is useless because of new hard fork: {0}")]
1676    HardFork(Slot),
1677}
1678
1679impl TowerError {
1680    pub fn is_file_missing(&self) -> bool {
1681        if let TowerError::IoError(io_err) = &self {
1682            io_err.kind() == std::io::ErrorKind::NotFound
1683        } else {
1684            false
1685        }
1686    }
1687    pub fn is_too_old(&self) -> bool {
1688        matches!(self, TowerError::TooOldTower(_, _))
1689    }
1690}
1691
1692#[derive(Debug)]
1693pub enum ExternalRootSource {
1694    Tower(Slot),
1695    HardFork(Slot),
1696}
1697
1698impl ExternalRootSource {
1699    fn root(&self) -> Slot {
1700        match self {
1701            ExternalRootSource::Tower(slot) => *slot,
1702            ExternalRootSource::HardFork(slot) => *slot,
1703        }
1704    }
1705}
1706
1707// Given an untimely crash, tower may have roots that are not reflected in blockstore,
1708// or the reverse of this.
1709// That's because we don't impose any ordering guarantee or any kind of write barriers
1710// between tower (plain old POSIX fs calls) and blockstore (through RocksDB), when
1711// `ReplayState::handle_votable_bank()` saves tower before setting blockstore roots.
1712pub fn reconcile_blockstore_roots_with_external_source(
1713    external_source: ExternalRootSource,
1714    blockstore: &Blockstore,
1715    // blockstore.max_root() might have been updated already.
1716    // so take a &mut param both to input (and output iff we update root)
1717    last_blockstore_root: &mut Slot,
1718) -> blockstore::Result<()> {
1719    let external_root = external_source.root();
1720    if *last_blockstore_root < external_root {
1721        // Ensure external_root itself to exist and be marked as rooted in the blockstore
1722        // in addition to its ancestors.
1723        let new_roots: Vec<_> = AncestorIterator::new_inclusive(external_root, blockstore)
1724            .take_while(|current| match current.cmp(last_blockstore_root) {
1725                Ordering::Greater => true,
1726                Ordering::Equal => false,
1727                Ordering::Less => panic!(
1728                    "last_blockstore_root({last_blockstore_root}) is skipped while traversing \
1729                     blockstore (currently at {current}) from external root \
1730                     ({external_source:?})!?",
1731                ),
1732            })
1733            .collect();
1734        if !new_roots.is_empty() {
1735            info!(
1736                "Reconciling slots as root based on external root: {new_roots:?} (external: \
1737                 {external_source:?}, blockstore: {last_blockstore_root})"
1738            );
1739
1740            // Unfortunately, we can't supply duplicate-confirmed hashes,
1741            // because it can't be guaranteed to be able to replay these slots
1742            // under this code-path's limited condition (i.e.  those shreds
1743            // might not be available, etc...) also correctly overcoming this
1744            // limitation is hard...
1745            blockstore.mark_slots_as_if_rooted_normally_at_startup(
1746                new_roots.into_iter().map(|root| (root, None)).collect(),
1747                false,
1748            )?;
1749
1750            // Update the caller-managed state of last root in blockstore.
1751            // Repeated calls of this function should result in a no-op for
1752            // the range of `new_roots`.
1753            *last_blockstore_root = blockstore.max_root();
1754        } else {
1755            // This indicates we're in bad state; but still don't panic here.
1756            // That's because we might have a chance of recovering properly with
1757            // newer snapshot.
1758            warn!(
1759                "Couldn't find any ancestor slots from external source ({external_source:?}) \
1760                 towards blockstore root ({last_blockstore_root}); blockstore pruned or only \
1761                 tower moved into new ledger or just hard fork?",
1762            );
1763        }
1764    }
1765    Ok(())
1766}
1767
1768#[cfg(test)]
1769pub mod test {
1770    use {
1771        super::*,
1772        crate::{
1773            consensus::{
1774                fork_choice::ForkChoice, heaviest_subtree_fork_choice::SlotHashKey,
1775                tower_storage::FileTowerStorage,
1776            },
1777            replay_stage::HeaviestForkFailures,
1778            vote_simulator::VoteSimulator,
1779        },
1780        itertools::Itertools,
1781        solana_account::{Account, AccountSharedData, ReadableAccount, WritableAccount},
1782        solana_clock::Slot,
1783        solana_hash::Hash,
1784        solana_ledger::{blockstore::make_slot_entries, get_tmp_ledger_path_auto_delete},
1785        solana_pubkey::Pubkey,
1786        solana_runtime::bank::Bank,
1787        solana_signer::Signer,
1788        solana_slot_history::SlotHistory,
1789        solana_vote::vote_account::VoteAccount,
1790        solana_vote_program::vote_state::{
1791            process_slot_vote_unchecked, Vote, VoteStateV3, VoteStateVersions, MAX_LOCKOUT_HISTORY,
1792        },
1793        std::{
1794            collections::{HashMap, VecDeque},
1795            fs::{remove_file, OpenOptions},
1796            io::{Read, Seek, SeekFrom, Write},
1797            path::PathBuf,
1798            sync::Arc,
1799        },
1800        tempfile::TempDir,
1801        trees::tr,
1802    };
1803
1804    fn gen_stakes(stake_votes: &[(u64, &[u64])]) -> VoteAccountsHashMap {
1805        stake_votes
1806            .iter()
1807            .map(|(lamports, votes)| {
1808                let mut account = AccountSharedData::from(Account {
1809                    data: vec![0; VoteStateV3::size_of()],
1810                    lamports: *lamports,
1811                    owner: solana_vote_program::id(),
1812                    ..Account::default()
1813                });
1814                let mut vote_state = VoteStateV3::default();
1815                for slot in *votes {
1816                    process_slot_vote_unchecked(&mut vote_state, *slot);
1817                }
1818                VoteStateV3::serialize(
1819                    &VoteStateVersions::new_v3(vote_state),
1820                    account.data_as_mut_slice(),
1821                )
1822                .expect("serialize state");
1823                (
1824                    solana_pubkey::new_rand(),
1825                    (*lamports, VoteAccount::try_from(account).unwrap()),
1826                )
1827            })
1828            .collect()
1829    }
1830
1831    #[test]
1832    fn test_to_vote_instruction() {
1833        let vote = Vote::default();
1834        let mut decision = SwitchForkDecision::FailedSwitchThreshold(0, 1);
1835        assert!(decision
1836            .to_vote_instruction(
1837                VoteTransaction::from(vote.clone()),
1838                &Pubkey::default(),
1839                &Pubkey::default()
1840            )
1841            .is_none());
1842
1843        decision = SwitchForkDecision::FailedSwitchDuplicateRollback(0);
1844        assert!(decision
1845            .to_vote_instruction(
1846                VoteTransaction::from(vote.clone()),
1847                &Pubkey::default(),
1848                &Pubkey::default()
1849            )
1850            .is_none());
1851
1852        decision = SwitchForkDecision::SameFork;
1853        assert_eq!(
1854            decision.to_vote_instruction(
1855                VoteTransaction::from(vote.clone()),
1856                &Pubkey::default(),
1857                &Pubkey::default()
1858            ),
1859            Some(vote_instruction::vote(
1860                &Pubkey::default(),
1861                &Pubkey::default(),
1862                vote.clone(),
1863            ))
1864        );
1865
1866        decision = SwitchForkDecision::SwitchProof(Hash::default());
1867        assert_eq!(
1868            decision.to_vote_instruction(
1869                VoteTransaction::from(vote.clone()),
1870                &Pubkey::default(),
1871                &Pubkey::default()
1872            ),
1873            Some(vote_instruction::vote_switch(
1874                &Pubkey::default(),
1875                &Pubkey::default(),
1876                vote,
1877                Hash::default()
1878            ))
1879        );
1880    }
1881
1882    #[test]
1883    fn test_simple_votes() {
1884        // Init state
1885        let mut vote_simulator = VoteSimulator::new(1);
1886        let node_pubkey = vote_simulator.node_pubkeys[0];
1887        let mut tower = Tower::default();
1888
1889        // Create the tree of banks
1890        let forks = tr(0) / (tr(1) / (tr(2) / (tr(3) / (tr(4) / tr(5)))));
1891
1892        // Set the voting behavior
1893        let mut cluster_votes = HashMap::new();
1894        let votes = vec![1, 2, 3, 4, 5];
1895        cluster_votes.insert(node_pubkey, votes.clone());
1896        vote_simulator.fill_bank_forks(forks, &cluster_votes, true);
1897
1898        // Simulate the votes
1899        for vote in votes {
1900            assert!(vote_simulator
1901                .simulate_vote(vote, &node_pubkey, &mut tower,)
1902                .is_empty());
1903        }
1904
1905        for i in 1..5 {
1906            assert_eq!(tower.vote_state.votes[i - 1].slot() as usize, i);
1907            assert_eq!(
1908                tower.vote_state.votes[i - 1].confirmation_count() as usize,
1909                6 - i
1910            );
1911        }
1912    }
1913
1914    #[test]
1915    fn test_switch_threshold_duplicate_rollback() {
1916        run_test_switch_threshold_duplicate_rollback(false);
1917    }
1918
1919    #[test]
1920    #[should_panic]
1921    fn test_switch_threshold_duplicate_rollback_panic() {
1922        run_test_switch_threshold_duplicate_rollback(true);
1923    }
1924
1925    fn setup_switch_test(num_accounts: usize) -> (Arc<Bank>, VoteSimulator, u64) {
1926        // Init state
1927        assert!(num_accounts > 1);
1928        let mut vote_simulator = VoteSimulator::new(num_accounts);
1929        let bank0 = vote_simulator.bank_forks.read().unwrap().get(0).unwrap();
1930        let total_stake = bank0.total_epoch_stake();
1931        assert_eq!(
1932            total_stake,
1933            vote_simulator.validator_keypairs.len() as u64 * 10_000
1934        );
1935
1936        // Create the tree of banks
1937        let forks = tr(0)
1938            / (tr(1)
1939                / (tr(2)
1940                    // Minor fork 1
1941                    / (tr(10) / (tr(11) / (tr(12) / (tr(13) / (tr(14))))))
1942                    / (tr(43)
1943                        / (tr(44)
1944                            // Minor fork 2
1945                            / (tr(45) / (tr(46) / (tr(47) / (tr(48) / (tr(49) / (tr(50)))))))
1946                            / (tr(110)))
1947                        / tr(112))));
1948
1949        // Fill the BankForks according to the above fork structure
1950        vote_simulator.fill_bank_forks(forks, &HashMap::new(), true);
1951        for (_, fork_progress) in vote_simulator.progress.iter_mut() {
1952            fork_progress.fork_stats.computed = true;
1953        }
1954
1955        (bank0, vote_simulator, total_stake)
1956    }
1957
1958    fn run_test_switch_threshold_duplicate_rollback(should_panic: bool) {
1959        let (bank0, mut vote_simulator, total_stake) = setup_switch_test(2);
1960        let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
1961        let descendants = vote_simulator.bank_forks.read().unwrap().descendants();
1962        let mut tower = Tower::default();
1963
1964        // Last vote is 47
1965        tower.record_vote(
1966            47,
1967            vote_simulator
1968                .bank_forks
1969                .read()
1970                .unwrap()
1971                .get(47)
1972                .unwrap()
1973                .hash(),
1974        );
1975
1976        // Trying to switch to an ancestor of last vote should only not panic
1977        // if the current vote has a duplicate ancestor
1978        let ancestor_of_voted_slot = 43;
1979        let duplicate_ancestor1 = 44;
1980        let duplicate_ancestor2 = 45;
1981        vote_simulator
1982            .tbft_structs
1983            .heaviest_subtree_fork_choice
1984            .mark_fork_invalid_candidate(&(
1985                duplicate_ancestor1,
1986                vote_simulator
1987                    .bank_forks
1988                    .read()
1989                    .unwrap()
1990                    .get(duplicate_ancestor1)
1991                    .unwrap()
1992                    .hash(),
1993            ));
1994        vote_simulator
1995            .tbft_structs
1996            .heaviest_subtree_fork_choice
1997            .mark_fork_invalid_candidate(&(
1998                duplicate_ancestor2,
1999                vote_simulator
2000                    .bank_forks
2001                    .read()
2002                    .unwrap()
2003                    .get(duplicate_ancestor2)
2004                    .unwrap()
2005                    .hash(),
2006            ));
2007        assert_eq!(
2008            tower.check_switch_threshold(
2009                ancestor_of_voted_slot,
2010                &ancestors,
2011                &descendants,
2012                &vote_simulator.progress,
2013                total_stake,
2014                bank0.epoch_vote_accounts(0).unwrap(),
2015                &vote_simulator.latest_validator_votes_for_frozen_banks,
2016                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2017            ),
2018            SwitchForkDecision::FailedSwitchDuplicateRollback(duplicate_ancestor2)
2019        );
2020        let mut confirm_ancestors = vec![duplicate_ancestor1];
2021        if should_panic {
2022            // Adding the last duplicate ancestor will
2023            // 1) Cause loop below to confirm last ancestor
2024            // 2) Check switch threshold on a vote ancestor when there
2025            // are no duplicates on that fork, which will cause a panic
2026            confirm_ancestors.push(duplicate_ancestor2);
2027        }
2028        for (i, duplicate_ancestor) in confirm_ancestors.into_iter().enumerate() {
2029            vote_simulator
2030                .tbft_structs
2031                .heaviest_subtree_fork_choice
2032                .mark_fork_valid_candidate(&(
2033                    duplicate_ancestor,
2034                    vote_simulator
2035                        .bank_forks
2036                        .read()
2037                        .unwrap()
2038                        .get(duplicate_ancestor)
2039                        .unwrap()
2040                        .hash(),
2041                ));
2042            let res = tower.check_switch_threshold(
2043                ancestor_of_voted_slot,
2044                &ancestors,
2045                &descendants,
2046                &vote_simulator.progress,
2047                total_stake,
2048                bank0.epoch_vote_accounts(0).unwrap(),
2049                &vote_simulator.latest_validator_votes_for_frozen_banks,
2050                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2051            );
2052            if i == 0 {
2053                assert_eq!(
2054                    res,
2055                    SwitchForkDecision::FailedSwitchDuplicateRollback(duplicate_ancestor2)
2056                );
2057            }
2058        }
2059    }
2060
2061    #[test]
2062    fn test_switch_threshold() {
2063        let (bank0, mut vote_simulator, total_stake) = setup_switch_test(2);
2064        let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
2065        let mut descendants = vote_simulator.bank_forks.read().unwrap().descendants();
2066        let mut tower = Tower::default();
2067        let other_vote_account = vote_simulator.vote_pubkeys[1];
2068
2069        // Last vote is 47
2070        tower.record_vote(47, Hash::default());
2071
2072        // Trying to switch to a descendant of last vote should always work
2073        assert_eq!(
2074            tower.check_switch_threshold(
2075                48,
2076                &ancestors,
2077                &descendants,
2078                &vote_simulator.progress,
2079                total_stake,
2080                bank0.epoch_vote_accounts(0).unwrap(),
2081                &vote_simulator.latest_validator_votes_for_frozen_banks,
2082                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2083            ),
2084            SwitchForkDecision::SameFork
2085        );
2086
2087        // Trying to switch to another fork at 110 should fail
2088        assert_eq!(
2089            tower.check_switch_threshold(
2090                110,
2091                &ancestors,
2092                &descendants,
2093                &vote_simulator.progress,
2094                total_stake,
2095                bank0.epoch_vote_accounts(0).unwrap(),
2096                &vote_simulator.latest_validator_votes_for_frozen_banks,
2097                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2098            ),
2099            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
2100        );
2101
2102        // Adding another validator lockout on a descendant of last vote should
2103        // not count toward the switch threshold
2104        vote_simulator.simulate_lockout_interval(50, (49, 100), &other_vote_account);
2105        assert_eq!(
2106            tower.check_switch_threshold(
2107                110,
2108                &ancestors,
2109                &descendants,
2110                &vote_simulator.progress,
2111                total_stake,
2112                bank0.epoch_vote_accounts(0).unwrap(),
2113                &vote_simulator.latest_validator_votes_for_frozen_banks,
2114                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2115            ),
2116            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
2117        );
2118
2119        // Adding another validator lockout on an ancestor of last vote should
2120        // not count toward the switch threshold
2121        vote_simulator.simulate_lockout_interval(50, (45, 100), &other_vote_account);
2122        assert_eq!(
2123            tower.check_switch_threshold(
2124                110,
2125                &ancestors,
2126                &descendants,
2127                &vote_simulator.progress,
2128                total_stake,
2129                bank0.epoch_vote_accounts(0).unwrap(),
2130                &vote_simulator.latest_validator_votes_for_frozen_banks,
2131                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2132            ),
2133            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
2134        );
2135
2136        // Adding another validator lockout on a different fork, but the lockout
2137        // doesn't cover the last vote, should not satisfy the switch threshold
2138        vote_simulator.simulate_lockout_interval(14, (12, 46), &other_vote_account);
2139        assert_eq!(
2140            tower.check_switch_threshold(
2141                110,
2142                &ancestors,
2143                &descendants,
2144                &vote_simulator.progress,
2145                total_stake,
2146                bank0.epoch_vote_accounts(0).unwrap(),
2147                &vote_simulator.latest_validator_votes_for_frozen_banks,
2148                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2149            ),
2150            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
2151        );
2152
2153        // Adding another validator lockout on a different fork, and the lockout
2154        // covers the last vote would count towards the switch threshold,
2155        // unless the bank is not the most recent frozen bank on the fork (14 is a
2156        // frozen/computed bank > 13 on the same fork in this case)
2157        vote_simulator.simulate_lockout_interval(13, (12, 47), &other_vote_account);
2158        assert_eq!(
2159            tower.check_switch_threshold(
2160                110,
2161                &ancestors,
2162                &descendants,
2163                &vote_simulator.progress,
2164                total_stake,
2165                bank0.epoch_vote_accounts(0).unwrap(),
2166                &vote_simulator.latest_validator_votes_for_frozen_banks,
2167                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2168            ),
2169            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
2170        );
2171
2172        // Adding another validator lockout on a different fork, and the lockout
2173        // covers the last vote, should satisfy the switch threshold
2174        vote_simulator.simulate_lockout_interval(14, (12, 47), &other_vote_account);
2175        assert_eq!(
2176            tower.check_switch_threshold(
2177                110,
2178                &ancestors,
2179                &descendants,
2180                &vote_simulator.progress,
2181                total_stake,
2182                bank0.epoch_vote_accounts(0).unwrap(),
2183                &vote_simulator.latest_validator_votes_for_frozen_banks,
2184                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2185            ),
2186            SwitchForkDecision::SwitchProof(Hash::default())
2187        );
2188
2189        // Adding another unfrozen descendant of the tip of 14 should not remove
2190        // slot 14 from consideration because it is still the most recent frozen
2191        // bank on its fork
2192        descendants.get_mut(&14).unwrap().insert(10000);
2193        assert_eq!(
2194            tower.check_switch_threshold(
2195                110,
2196                &ancestors,
2197                &descendants,
2198                &vote_simulator.progress,
2199                total_stake,
2200                bank0.epoch_vote_accounts(0).unwrap(),
2201                &vote_simulator.latest_validator_votes_for_frozen_banks,
2202                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2203            ),
2204            SwitchForkDecision::SwitchProof(Hash::default())
2205        );
2206
2207        // If we set a root, then any lockout intervals below the root shouldn't
2208        // count toward the switch threshold. This means the other validator's
2209        // vote lockout no longer counts
2210        tower.vote_state.root_slot = Some(43);
2211        // Refresh ancestors and descendants for new root.
2212        let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
2213        let descendants = vote_simulator.bank_forks.read().unwrap().descendants();
2214
2215        assert_eq!(
2216            tower.check_switch_threshold(
2217                110,
2218                &ancestors,
2219                &descendants,
2220                &vote_simulator.progress,
2221                total_stake,
2222                bank0.epoch_vote_accounts(0).unwrap(),
2223                &vote_simulator.latest_validator_votes_for_frozen_banks,
2224                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2225            ),
2226            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
2227        );
2228    }
2229
2230    #[test]
2231    fn test_switch_threshold_use_gossip_votes() {
2232        let num_validators = 2;
2233        let (bank0, mut vote_simulator, total_stake) = setup_switch_test(2);
2234        let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
2235        let descendants = vote_simulator.bank_forks.read().unwrap().descendants();
2236        let mut tower = Tower::default();
2237        let other_vote_account = vote_simulator.vote_pubkeys[1];
2238
2239        // Last vote is 47
2240        tower.record_vote(47, Hash::default());
2241
2242        // Trying to switch to another fork at 110 should fail
2243        assert_eq!(
2244            tower.check_switch_threshold(
2245                110,
2246                &ancestors,
2247                &descendants,
2248                &vote_simulator.progress,
2249                total_stake,
2250                bank0.epoch_vote_accounts(0).unwrap(),
2251                &vote_simulator.latest_validator_votes_for_frozen_banks,
2252                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2253            ),
2254            SwitchForkDecision::FailedSwitchThreshold(0, num_validators * 10000)
2255        );
2256
2257        // Adding a vote on the descendant shouldn't count toward the switch threshold
2258        vote_simulator.simulate_lockout_interval(50, (49, 100), &other_vote_account);
2259        assert_eq!(
2260            tower.check_switch_threshold(
2261                110,
2262                &ancestors,
2263                &descendants,
2264                &vote_simulator.progress,
2265                total_stake,
2266                bank0.epoch_vote_accounts(0).unwrap(),
2267                &vote_simulator.latest_validator_votes_for_frozen_banks,
2268                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2269            ),
2270            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
2271        );
2272
2273        // Adding a later vote from gossip that isn't on the same fork should count toward the
2274        // switch threshold
2275        vote_simulator
2276            .latest_validator_votes_for_frozen_banks
2277            .check_add_vote(
2278                other_vote_account,
2279                112,
2280                Some(
2281                    vote_simulator
2282                        .bank_forks
2283                        .read()
2284                        .unwrap()
2285                        .get(112)
2286                        .unwrap()
2287                        .hash(),
2288                ),
2289                false,
2290            );
2291
2292        assert_eq!(
2293            tower.check_switch_threshold(
2294                110,
2295                &ancestors,
2296                &descendants,
2297                &vote_simulator.progress,
2298                total_stake,
2299                bank0.epoch_vote_accounts(0).unwrap(),
2300                &vote_simulator.latest_validator_votes_for_frozen_banks,
2301                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2302            ),
2303            SwitchForkDecision::SwitchProof(Hash::default())
2304        );
2305
2306        // If we now set a root that causes slot 112 to be purged from BankForks, then
2307        // the switch proof will now fail since that validator's vote can no longer be
2308        // included in the switching proof
2309        vote_simulator.set_root(44);
2310        let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
2311        let descendants = vote_simulator.bank_forks.read().unwrap().descendants();
2312        assert_eq!(
2313            tower.check_switch_threshold(
2314                110,
2315                &ancestors,
2316                &descendants,
2317                &vote_simulator.progress,
2318                total_stake,
2319                bank0.epoch_vote_accounts(0).unwrap(),
2320                &vote_simulator.latest_validator_votes_for_frozen_banks,
2321                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
2322            ),
2323            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
2324        );
2325    }
2326
2327    #[test]
2328    fn test_switch_threshold_votes() {
2329        // Init state
2330        let mut vote_simulator = VoteSimulator::new(4);
2331        let node_pubkey = vote_simulator.node_pubkeys[0];
2332        let mut tower = Tower::default();
2333        let forks = tr(0)
2334            / (tr(1)
2335                / (tr(2)
2336                    // Minor fork 1
2337                    / (tr(10) / (tr(11) / (tr(12) / (tr(13) / (tr(14))))))
2338                    / (tr(43)
2339                        / (tr(44)
2340                            // Minor fork 2
2341                            / (tr(45) / (tr(46))))
2342                        / (tr(110)))));
2343
2344        // Have two validators, each representing 20% of the stake vote on
2345        // minor fork 2 at slots 46 + 47
2346        let mut cluster_votes: HashMap<Pubkey, Vec<Slot>> = HashMap::new();
2347        cluster_votes.insert(vote_simulator.node_pubkeys[1], vec![46]);
2348        cluster_votes.insert(vote_simulator.node_pubkeys[2], vec![47]);
2349        vote_simulator.fill_bank_forks(forks, &cluster_votes, true);
2350
2351        // Vote on the first minor fork at slot 14, should succeed
2352        assert!(vote_simulator
2353            .simulate_vote(14, &node_pubkey, &mut tower,)
2354            .is_empty());
2355
2356        // The other two validators voted at slots 46, 47, which
2357        // will only both show up in slot 48, at which point
2358        // 2/5 > SWITCH_FORK_THRESHOLD of the stake has voted
2359        // on another fork, so switching should succeed
2360        let votes_to_simulate = (46..=48).collect();
2361        let results = vote_simulator.create_and_vote_new_branch(
2362            45,
2363            48,
2364            &cluster_votes,
2365            &votes_to_simulate,
2366            &node_pubkey,
2367            &mut tower,
2368        );
2369        assert_eq!(
2370            *results.get(&46).unwrap(),
2371            vec![HeaviestForkFailures::FailedSwitchThreshold(46, 0, 40000)]
2372        );
2373        assert_eq!(
2374            *results.get(&47).unwrap(),
2375            vec![HeaviestForkFailures::FailedSwitchThreshold(
2376                47, 10000, 40000
2377            )]
2378        );
2379        assert!(results.get(&48).unwrap().is_empty());
2380    }
2381
2382    #[test]
2383    fn test_double_partition() {
2384        // Init state
2385        let mut vote_simulator = VoteSimulator::new(2);
2386        let node_pubkey = vote_simulator.node_pubkeys[0];
2387        let vote_pubkey = vote_simulator.vote_pubkeys[0];
2388        let mut tower = Tower::default();
2389
2390        let num_slots_to_try = 200;
2391        // Create the tree of banks
2392        let forks = tr(0)
2393            / (tr(1)
2394                / (tr(2)
2395                    / (tr(3)
2396                        / (tr(4)
2397                            / (tr(5)
2398                                / (tr(6)
2399                                    / (tr(7)
2400                                        / (tr(8)
2401                                            / (tr(9)
2402                                                // Minor fork 1
2403                                                / (tr(10) / (tr(11) / (tr(12) / (tr(13) / (tr(14))))))
2404                                                / (tr(43)
2405                                                    / (tr(44)
2406                                                        // Minor fork 2
2407                                                        / (tr(45) / (tr(46) / (tr(47) / (tr(48) / (tr(49) / (tr(50)))))))
2408                                                        / (tr(110) / (tr(110 + 2 * num_slots_to_try))))))))))))));
2409
2410        // Set the successful voting behavior
2411        let mut cluster_votes = HashMap::new();
2412        let mut my_votes: Vec<Slot> = vec![];
2413        let next_unlocked_slot = 110;
2414        // Vote on the first minor fork
2415        my_votes.extend(1..=14);
2416        // Come back to the main fork
2417        my_votes.extend(43..=44);
2418        // Vote on the second minor fork
2419        my_votes.extend(45..=50);
2420        // Vote to come back to main fork
2421        my_votes.push(next_unlocked_slot);
2422        cluster_votes.insert(node_pubkey, my_votes.clone());
2423        // Make the other validator vote fork to pass the threshold checks
2424        let other_votes = my_votes.clone();
2425        cluster_votes.insert(vote_simulator.node_pubkeys[1], other_votes);
2426        vote_simulator.fill_bank_forks(forks, &cluster_votes, true);
2427
2428        // Simulate the votes.
2429        for vote in &my_votes {
2430            // All these votes should be ok
2431            assert!(vote_simulator
2432                .simulate_vote(*vote, &node_pubkey, &mut tower,)
2433                .is_empty());
2434        }
2435
2436        info!("local tower: {:#?}", tower.vote_state.votes);
2437        let observed = vote_simulator
2438            .bank_forks
2439            .read()
2440            .unwrap()
2441            .get(next_unlocked_slot)
2442            .unwrap()
2443            .get_vote_account(&vote_pubkey)
2444            .unwrap();
2445        let state = observed.vote_state_view();
2446        info!("observed tower: {:#?}", state.votes_iter().collect_vec());
2447
2448        let num_slots_to_try = 200;
2449        cluster_votes
2450            .get_mut(&vote_simulator.node_pubkeys[1])
2451            .unwrap()
2452            .extend(next_unlocked_slot + 1..next_unlocked_slot + num_slots_to_try);
2453        assert!(vote_simulator.can_progress_on_fork(
2454            &node_pubkey,
2455            &mut tower,
2456            next_unlocked_slot,
2457            num_slots_to_try,
2458            &mut cluster_votes,
2459        ));
2460    }
2461
2462    #[test]
2463    fn test_collect_vote_lockouts_sums() {
2464        //two accounts voting for slot 0 with 1 token staked
2465        let accounts = gen_stakes(&[(1, &[0]), (1, &[0])]);
2466        let account_latest_votes: Vec<(Pubkey, SlotHashKey)> = accounts
2467            .iter()
2468            .sorted_by_key(|(pk, _)| *pk)
2469            .map(|(pubkey, _)| (*pubkey, (0, Hash::default())))
2470            .collect();
2471
2472        let ancestors = vec![(1, vec![0].into_iter().collect()), (0, HashSet::new())]
2473            .into_iter()
2474            .collect();
2475        let mut latest_validator_votes_for_frozen_banks =
2476            LatestValidatorVotesForFrozenBanks::default();
2477        let ComputedBankState {
2478            voted_stakes,
2479            total_stake,
2480            ..
2481        } = Tower::collect_vote_lockouts(
2482            &Pubkey::default(),
2483            1,
2484            &accounts,
2485            &ancestors,
2486            |_| Some(Hash::default()),
2487            &mut latest_validator_votes_for_frozen_banks,
2488        );
2489        assert_eq!(voted_stakes[&0], 2);
2490        assert_eq!(total_stake, 2);
2491        let mut new_votes = latest_validator_votes_for_frozen_banks.take_votes_dirty_set(0);
2492        new_votes.sort();
2493        assert_eq!(new_votes, account_latest_votes);
2494    }
2495
2496    #[test]
2497    fn test_collect_vote_lockouts_root() {
2498        let votes: Vec<u64> = (0..MAX_LOCKOUT_HISTORY as u64).collect();
2499        //two accounts voting for slots 0..MAX_LOCKOUT_HISTORY with 1 token staked
2500        let accounts = gen_stakes(&[(1, &votes), (1, &votes)]);
2501        let account_latest_votes: Vec<(Pubkey, SlotHashKey)> = accounts
2502            .iter()
2503            .sorted_by_key(|(pk, _)| *pk)
2504            .map(|(pubkey, _)| {
2505                (
2506                    *pubkey,
2507                    ((MAX_LOCKOUT_HISTORY - 1) as Slot, Hash::default()),
2508                )
2509            })
2510            .collect();
2511        let mut tower = Tower::new_for_tests(0, 0.67);
2512        let mut ancestors = HashMap::new();
2513        for i in 0..(MAX_LOCKOUT_HISTORY + 1) {
2514            tower.record_vote(i as u64, Hash::default());
2515            ancestors.insert(i as u64, (0..i as u64).collect());
2516        }
2517        let root = Lockout::new_with_confirmation_count(0, MAX_LOCKOUT_HISTORY as u32);
2518        let expected_bank_stake = 2;
2519        let expected_total_stake = 2;
2520        assert_eq!(tower.vote_state.root_slot, Some(0));
2521        let mut latest_validator_votes_for_frozen_banks =
2522            LatestValidatorVotesForFrozenBanks::default();
2523        let ComputedBankState {
2524            voted_stakes,
2525            fork_stake,
2526            total_stake,
2527            ..
2528        } = Tower::collect_vote_lockouts(
2529            &Pubkey::default(),
2530            MAX_LOCKOUT_HISTORY as u64,
2531            &accounts,
2532            &ancestors,
2533            |_| Some(Hash::default()),
2534            &mut latest_validator_votes_for_frozen_banks,
2535        );
2536        for i in 0..MAX_LOCKOUT_HISTORY {
2537            assert_eq!(voted_stakes[&(i as u64)], 2);
2538        }
2539
2540        // should be the sum of all voted stake for on the fork
2541        assert_eq!(fork_stake, expected_bank_stake);
2542        assert_eq!(total_stake, expected_total_stake);
2543        let mut new_votes =
2544            latest_validator_votes_for_frozen_banks.take_votes_dirty_set(root.slot());
2545        new_votes.sort();
2546        assert_eq!(new_votes, account_latest_votes);
2547    }
2548
2549    #[test]
2550    fn test_check_vote_threshold_without_votes() {
2551        let tower = Tower::new_for_tests(1, 0.67);
2552        let stakes = vec![(0, 1)].into_iter().collect();
2553        assert!(tower.check_vote_stake_thresholds(0, &stakes, 2).is_empty());
2554    }
2555
2556    #[test]
2557    fn test_check_vote_threshold_no_skip_lockout_with_new_root() {
2558        solana_logger::setup();
2559        let mut tower = Tower::new_for_tests(4, 0.67);
2560        let mut stakes = HashMap::new();
2561        for i in 0..(MAX_LOCKOUT_HISTORY as u64 + 1) {
2562            stakes.insert(i, 1);
2563            tower.record_vote(i, Hash::default());
2564        }
2565        assert!(!tower
2566            .check_vote_stake_thresholds(MAX_LOCKOUT_HISTORY as u64 + 1, &stakes, 2)
2567            .is_empty());
2568    }
2569
2570    #[test]
2571    fn test_is_slot_confirmed_not_enough_stake_failure() {
2572        let tower = Tower::new_for_tests(1, 0.67);
2573        let stakes = vec![(0, 1)].into_iter().collect();
2574        assert!(!tower.is_slot_confirmed(0, &stakes, 2));
2575    }
2576
2577    #[test]
2578    fn test_is_slot_confirmed_unknown_slot() {
2579        let tower = Tower::new_for_tests(1, 0.67);
2580        let stakes = HashMap::new();
2581        assert!(!tower.is_slot_confirmed(0, &stakes, 2));
2582    }
2583
2584    #[test]
2585    fn test_is_slot_confirmed_pass() {
2586        let tower = Tower::new_for_tests(1, 0.67);
2587        let stakes = vec![(0, 2)].into_iter().collect();
2588        assert!(tower.is_slot_confirmed(0, &stakes, 2));
2589    }
2590
2591    #[test]
2592    fn test_is_slot_duplicate_confirmed_not_enough_stake_failure() {
2593        let tower = Tower::new_for_tests(1, 0.67);
2594        let stakes = vec![(0, 52)].into_iter().collect();
2595        assert!(!tower.is_slot_duplicate_confirmed(0, &stakes, 100));
2596    }
2597
2598    #[test]
2599    fn test_is_slot_duplicate_confirmed_unknown_slot() {
2600        let tower = Tower::new_for_tests(1, 0.67);
2601        let stakes = HashMap::new();
2602        assert!(!tower.is_slot_duplicate_confirmed(0, &stakes, 100));
2603    }
2604
2605    #[test]
2606    fn test_is_slot_duplicate_confirmed_pass() {
2607        let tower = Tower::new_for_tests(1, 0.67);
2608        let stakes = vec![(0, 53)].into_iter().collect();
2609        assert!(tower.is_slot_duplicate_confirmed(0, &stakes, 100));
2610    }
2611
2612    #[test]
2613    fn test_is_locked_out_empty() {
2614        let tower = Tower::new_for_tests(0, 0.67);
2615        let ancestors = HashSet::from([0]);
2616        assert!(!tower.is_locked_out(1, &ancestors));
2617    }
2618
2619    #[test]
2620    fn test_is_locked_out_root_slot_child_pass() {
2621        let mut tower = Tower::new_for_tests(0, 0.67);
2622        let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
2623        tower.vote_state.root_slot = Some(0);
2624        assert!(!tower.is_locked_out(1, &ancestors));
2625    }
2626
2627    #[test]
2628    fn test_is_locked_out_root_slot_sibling_fail() {
2629        let mut tower = Tower::new_for_tests(0, 0.67);
2630        let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
2631        tower.vote_state.root_slot = Some(0);
2632        tower.record_vote(1, Hash::default());
2633        assert!(tower.is_locked_out(2, &ancestors));
2634    }
2635
2636    #[test]
2637    fn test_check_already_voted() {
2638        let mut tower = Tower::new_for_tests(0, 0.67);
2639        tower.record_vote(0, Hash::default());
2640        assert!(tower.has_voted(0));
2641        assert!(!tower.has_voted(1));
2642    }
2643
2644    #[test]
2645    fn test_check_recent_slot() {
2646        let mut tower = Tower::new_for_tests(0, 0.67);
2647        assert!(tower.is_recent(1));
2648        assert!(tower.is_recent(32));
2649        for i in 0..64 {
2650            tower.record_vote(i, Hash::default());
2651        }
2652        assert!(!tower.is_recent(0));
2653        assert!(!tower.is_recent(32));
2654        assert!(!tower.is_recent(63));
2655        assert!(tower.is_recent(65));
2656    }
2657
2658    #[test]
2659    fn test_is_locked_out_double_vote() {
2660        let mut tower = Tower::new_for_tests(0, 0.67);
2661        let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
2662        tower.record_vote(0, Hash::default());
2663        tower.record_vote(1, Hash::default());
2664        assert!(tower.is_locked_out(0, &ancestors));
2665    }
2666
2667    #[test]
2668    fn test_is_locked_out_child() {
2669        let mut tower = Tower::new_for_tests(0, 0.67);
2670        let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
2671        tower.record_vote(0, Hash::default());
2672        assert!(!tower.is_locked_out(1, &ancestors));
2673    }
2674
2675    #[test]
2676    fn test_is_locked_out_sibling() {
2677        let mut tower = Tower::new_for_tests(0, 0.67);
2678        let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
2679        tower.record_vote(0, Hash::default());
2680        tower.record_vote(1, Hash::default());
2681        assert!(tower.is_locked_out(2, &ancestors));
2682    }
2683
2684    #[test]
2685    fn test_is_locked_out_last_vote_expired() {
2686        let mut tower = Tower::new_for_tests(0, 0.67);
2687        let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
2688        tower.record_vote(0, Hash::default());
2689        tower.record_vote(1, Hash::default());
2690        assert!(!tower.is_locked_out(4, &ancestors));
2691        tower.record_vote(4, Hash::default());
2692        assert_eq!(tower.vote_state.votes[0].slot(), 0);
2693        assert_eq!(tower.vote_state.votes[0].confirmation_count(), 2);
2694        assert_eq!(tower.vote_state.votes[1].slot(), 4);
2695        assert_eq!(tower.vote_state.votes[1].confirmation_count(), 1);
2696    }
2697
2698    #[test]
2699    fn test_check_vote_threshold_below_threshold() {
2700        let mut tower = Tower::new_for_tests(1, 0.67);
2701        let stakes = vec![(0, 1)].into_iter().collect();
2702        tower.record_vote(0, Hash::default());
2703        assert!(!tower.check_vote_stake_thresholds(1, &stakes, 2).is_empty());
2704    }
2705    #[test]
2706    fn test_check_vote_threshold_above_threshold() {
2707        let mut tower = Tower::new_for_tests(1, 0.67);
2708        let stakes = vec![(0, 2)].into_iter().collect();
2709        tower.record_vote(0, Hash::default());
2710        assert!(tower.check_vote_stake_thresholds(1, &stakes, 2).is_empty());
2711    }
2712
2713    #[test]
2714    fn test_check_vote_thresholds_above_thresholds() {
2715        let mut tower = Tower::new_for_tests(VOTE_THRESHOLD_DEPTH, 0.67);
2716        let stakes = vec![
2717            (0, 3),
2718            (VOTE_THRESHOLD_DEPTH_SHALLOW as u64, 2),
2719            ((VOTE_THRESHOLD_DEPTH_SHALLOW as u64) - 1, 2),
2720        ]
2721        .into_iter()
2722        .collect();
2723        for slot in 0..VOTE_THRESHOLD_DEPTH {
2724            tower.record_vote(slot as Slot, Hash::default());
2725        }
2726        assert!(tower
2727            .check_vote_stake_thresholds(VOTE_THRESHOLD_DEPTH.try_into().unwrap(), &stakes, 4)
2728            .is_empty());
2729    }
2730
2731    #[test]
2732    fn test_check_vote_threshold_deep_below_threshold() {
2733        let mut tower = Tower::new_for_tests(VOTE_THRESHOLD_DEPTH, 0.67);
2734        let stakes = vec![(0, 6), (VOTE_THRESHOLD_DEPTH_SHALLOW as u64, 4)]
2735            .into_iter()
2736            .collect();
2737        for slot in 0..VOTE_THRESHOLD_DEPTH {
2738            tower.record_vote(slot as Slot, Hash::default());
2739        }
2740        assert!(!tower
2741            .check_vote_stake_thresholds(VOTE_THRESHOLD_DEPTH.try_into().unwrap(), &stakes, 10)
2742            .is_empty());
2743    }
2744
2745    #[test]
2746    fn test_check_vote_threshold_shallow_below_threshold() {
2747        let mut tower = Tower::new_for_tests(VOTE_THRESHOLD_DEPTH, 0.67);
2748        let stakes = vec![(0, 7), (VOTE_THRESHOLD_DEPTH_SHALLOW as u64, 1)]
2749            .into_iter()
2750            .collect();
2751        for slot in 0..VOTE_THRESHOLD_DEPTH {
2752            tower.record_vote(slot as Slot, Hash::default());
2753        }
2754        assert!(!tower
2755            .check_vote_stake_thresholds(VOTE_THRESHOLD_DEPTH.try_into().unwrap(), &stakes, 10)
2756            .is_empty());
2757    }
2758
2759    #[test]
2760    fn test_check_vote_threshold_above_threshold_after_pop() {
2761        let mut tower = Tower::new_for_tests(1, 0.67);
2762        let stakes = vec![(0, 2)].into_iter().collect();
2763        tower.record_vote(0, Hash::default());
2764        tower.record_vote(1, Hash::default());
2765        tower.record_vote(2, Hash::default());
2766        assert!(tower.check_vote_stake_thresholds(6, &stakes, 2).is_empty());
2767    }
2768
2769    #[test]
2770    fn test_check_vote_threshold_above_threshold_no_stake() {
2771        let mut tower = Tower::new_for_tests(1, 0.67);
2772        let stakes = HashMap::new();
2773        tower.record_vote(0, Hash::default());
2774        assert!(!tower.check_vote_stake_thresholds(1, &stakes, 2).is_empty());
2775    }
2776
2777    #[test]
2778    fn test_check_vote_threshold_lockouts_not_updated() {
2779        solana_logger::setup();
2780        let mut tower = Tower::new_for_tests(1, 0.67);
2781        let stakes = vec![(0, 1), (1, 2)].into_iter().collect();
2782        tower.record_vote(0, Hash::default());
2783        tower.record_vote(1, Hash::default());
2784        tower.record_vote(2, Hash::default());
2785        assert!(tower.check_vote_stake_thresholds(6, &stakes, 2).is_empty());
2786    }
2787
2788    #[test]
2789    fn test_stake_is_updated_for_entire_branch() {
2790        let mut voted_stakes = HashMap::new();
2791        let account = AccountSharedData::from(Account {
2792            lamports: 1,
2793            ..Account::default()
2794        });
2795        let set: HashSet<u64> = vec![0u64, 1u64].into_iter().collect();
2796        let ancestors: HashMap<u64, HashSet<u64>> = [(2u64, set)].iter().cloned().collect();
2797        Tower::update_ancestor_voted_stakes(&mut voted_stakes, 2, account.lamports(), &ancestors);
2798        assert_eq!(voted_stakes[&0], 1);
2799        assert_eq!(voted_stakes[&1], 1);
2800        assert_eq!(voted_stakes[&2], 1);
2801    }
2802
2803    #[test]
2804    fn test_check_vote_threshold_forks() {
2805        // Create the ancestor relationships
2806        let ancestors = (0..=(VOTE_THRESHOLD_DEPTH + 1) as u64)
2807            .map(|slot| {
2808                let slot_parents: HashSet<_> = (0..slot).collect();
2809                (slot, slot_parents)
2810            })
2811            .collect();
2812
2813        // Create votes such that
2814        // 1) 3/4 of the stake has voted on slot: VOTE_THRESHOLD_DEPTH - 2, lockout: 2
2815        // 2) 1/4 of the stake has voted on slot: VOTE_THRESHOLD_DEPTH, lockout: 2^9
2816        let total_stake = 4;
2817        let threshold_size = 0.67;
2818        let threshold_stake = (f64::ceil(total_stake as f64 * threshold_size)) as u64;
2819        let tower_votes: Vec<Slot> = (0..VOTE_THRESHOLD_DEPTH as u64).collect();
2820        let accounts = gen_stakes(&[
2821            (threshold_stake, &[(VOTE_THRESHOLD_DEPTH - 2) as u64]),
2822            (total_stake - threshold_stake, &tower_votes[..]),
2823        ]);
2824
2825        // Initialize tower
2826        let mut tower = Tower::new_for_tests(VOTE_THRESHOLD_DEPTH, threshold_size);
2827
2828        // CASE 1: Record the first VOTE_THRESHOLD tower votes for fork 2. We want to
2829        // evaluate a vote on slot VOTE_THRESHOLD_DEPTH. The nth most recent vote should be
2830        // for slot 0, which is common to all account vote states, so we should pass the
2831        // threshold check
2832        let vote_to_evaluate = VOTE_THRESHOLD_DEPTH as u64;
2833        for vote in &tower_votes {
2834            tower.record_vote(*vote, Hash::default());
2835        }
2836        let ComputedBankState {
2837            voted_stakes,
2838            total_stake,
2839            ..
2840        } = Tower::collect_vote_lockouts(
2841            &Pubkey::default(),
2842            vote_to_evaluate,
2843            &accounts,
2844            &ancestors,
2845            |_| None,
2846            &mut LatestValidatorVotesForFrozenBanks::default(),
2847        );
2848        assert!(tower
2849            .check_vote_stake_thresholds(vote_to_evaluate, &voted_stakes, total_stake)
2850            .is_empty());
2851
2852        // CASE 2: Now we want to evaluate a vote for slot VOTE_THRESHOLD_DEPTH + 1. This slot
2853        // will expire the vote in one of the vote accounts, so we should have insufficient
2854        // stake to pass the threshold
2855        let vote_to_evaluate = VOTE_THRESHOLD_DEPTH as u64 + 1;
2856        let ComputedBankState {
2857            voted_stakes,
2858            total_stake,
2859            ..
2860        } = Tower::collect_vote_lockouts(
2861            &Pubkey::default(),
2862            vote_to_evaluate,
2863            &accounts,
2864            &ancestors,
2865            |_| None,
2866            &mut LatestValidatorVotesForFrozenBanks::default(),
2867        );
2868        assert!(!tower
2869            .check_vote_stake_thresholds(vote_to_evaluate, &voted_stakes, total_stake)
2870            .is_empty());
2871    }
2872
2873    fn vote_and_check_recent(num_votes: usize) {
2874        let mut tower = Tower::new_for_tests(1, 0.67);
2875        let slots = if num_votes > 0 {
2876            { 0..num_votes }
2877                .map(|i| {
2878                    Lockout::new_with_confirmation_count(i as Slot, (num_votes as u32) - (i as u32))
2879                })
2880                .collect()
2881        } else {
2882            vec![]
2883        };
2884        let mut expected = TowerSync::new(
2885            VecDeque::from(slots),
2886            if num_votes > 0 { Some(0) } else { None },
2887            Hash::default(),
2888            Hash::default(),
2889        );
2890        for i in 0..num_votes {
2891            tower.record_vote(i as u64, Hash::default());
2892        }
2893
2894        expected.timestamp = tower.last_vote.timestamp();
2895        assert_eq!(VoteTransaction::from(expected), tower.last_vote)
2896    }
2897
2898    #[test]
2899    fn test_recent_votes_full() {
2900        vote_and_check_recent(MAX_LOCKOUT_HISTORY)
2901    }
2902
2903    #[test]
2904    fn test_recent_votes_empty() {
2905        vote_and_check_recent(0)
2906    }
2907
2908    #[test]
2909    fn test_recent_votes_exact() {
2910        vote_and_check_recent(5)
2911    }
2912
2913    #[test]
2914    fn test_maybe_timestamp() {
2915        let mut tower = Tower::default();
2916        assert!(tower.maybe_timestamp(0).is_some());
2917        assert!(tower.maybe_timestamp(1).is_some());
2918        assert!(tower.maybe_timestamp(0).is_none()); // Refuse to timestamp an older slot
2919        assert!(tower.maybe_timestamp(1).is_none()); // Refuse to timestamp the same slot twice
2920
2921        tower.last_timestamp.timestamp -= 1; // Move last_timestamp into the past
2922        assert!(tower.maybe_timestamp(2).is_some()); // slot 2 gets a timestamp
2923
2924        tower.last_timestamp.timestamp += 1_000_000; // Move last_timestamp well into the future
2925        assert!(tower.maybe_timestamp(3).is_none()); // slot 3 gets no timestamp
2926    }
2927
2928    #[test]
2929    fn test_refresh_last_vote_timestamp() {
2930        let mut tower = Tower::default();
2931
2932        // Tower has no vote or timestamp
2933        tower.last_vote.set_timestamp(None);
2934        tower.refresh_last_vote_timestamp(5);
2935        assert_eq!(tower.last_vote.timestamp(), None);
2936        assert_eq!(tower.last_timestamp.slot, 0);
2937        assert_eq!(tower.last_timestamp.timestamp, 0);
2938
2939        // Tower has vote no timestamp, but is greater than heaviest_bank
2940        tower.last_vote = VoteTransaction::from(TowerSync::from(vec![(0, 3), (1, 2), (6, 1)]));
2941        assert_eq!(tower.last_vote.timestamp(), None);
2942        tower.refresh_last_vote_timestamp(5);
2943        assert_eq!(tower.last_vote.timestamp(), None);
2944        assert_eq!(tower.last_timestamp.slot, 0);
2945        assert_eq!(tower.last_timestamp.timestamp, 0);
2946
2947        // Tower has vote with no timestamp
2948        tower.last_vote = VoteTransaction::from(TowerSync::from(vec![(0, 3), (1, 2), (2, 1)]));
2949        assert_eq!(tower.last_vote.timestamp(), None);
2950        tower.refresh_last_vote_timestamp(5);
2951        assert_eq!(tower.last_vote.timestamp(), Some(1));
2952        assert_eq!(tower.last_timestamp.slot, 2);
2953        assert_eq!(tower.last_timestamp.timestamp, 1);
2954
2955        // Vote has timestamp
2956        tower.last_vote = VoteTransaction::from(TowerSync::from(vec![(0, 3), (1, 2), (2, 1)]));
2957        tower.refresh_last_vote_timestamp(5);
2958        assert_eq!(tower.last_vote.timestamp(), Some(2));
2959        assert_eq!(tower.last_timestamp.slot, 2);
2960        assert_eq!(tower.last_timestamp.timestamp, 2);
2961    }
2962
2963    fn run_test_load_tower_snapshot<F, G>(
2964        modify_original: F,
2965        modify_serialized: G,
2966    ) -> (Tower, Result<Tower>)
2967    where
2968        F: Fn(&mut Tower, &Pubkey),
2969        G: Fn(&PathBuf),
2970    {
2971        let tower_path = TempDir::new().unwrap();
2972        let identity_keypair = Arc::new(Keypair::new());
2973        let node_pubkey = identity_keypair.pubkey();
2974
2975        // Use values that will not match the default derived from BankForks
2976        let mut tower = Tower::new_for_tests(10, 0.9);
2977
2978        let tower_storage = FileTowerStorage::new(tower_path.path().to_path_buf());
2979
2980        modify_original(&mut tower, &node_pubkey);
2981
2982        tower.save(&tower_storage, &identity_keypair).unwrap();
2983        modify_serialized(&tower_storage.filename(&node_pubkey));
2984        let loaded = Tower::restore(&tower_storage, &node_pubkey);
2985
2986        (tower, loaded)
2987    }
2988
2989    #[test]
2990    fn test_switch_threshold_across_tower_reload() {
2991        solana_logger::setup();
2992        // Init state
2993        let mut vote_simulator = VoteSimulator::new(2);
2994        let other_vote_account = vote_simulator.vote_pubkeys[1];
2995        let bank0 = vote_simulator.bank_forks.read().unwrap().get(0).unwrap();
2996        let total_stake = bank0.total_epoch_stake();
2997        assert_eq!(
2998            total_stake,
2999            vote_simulator.validator_keypairs.len() as u64 * 10_000
3000        );
3001
3002        // Create the tree of banks
3003        let forks = tr(0)
3004            / (tr(1)
3005                / (tr(2)
3006                    / tr(10)
3007                    / (tr(43)
3008                        / (tr(44)
3009                            // Minor fork 2
3010                            / (tr(45) / (tr(46) / (tr(47) / (tr(48) / (tr(49) / (tr(50)))))))
3011                            / (tr(110) / tr(111))))));
3012
3013        // Fill the BankForks according to the above fork structure
3014        vote_simulator.fill_bank_forks(forks, &HashMap::new(), true);
3015        for (_, fork_progress) in vote_simulator.progress.iter_mut() {
3016            fork_progress.fork_stats.computed = true;
3017        }
3018
3019        let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
3020        let descendants = vote_simulator.bank_forks.read().unwrap().descendants();
3021        let mut tower = Tower::default();
3022
3023        tower.record_vote(43, Hash::default());
3024        tower.record_vote(44, Hash::default());
3025        tower.record_vote(45, Hash::default());
3026        tower.record_vote(46, Hash::default());
3027        tower.record_vote(47, Hash::default());
3028        tower.record_vote(48, Hash::default());
3029        tower.record_vote(49, Hash::default());
3030
3031        // Trying to switch to a descendant of last vote should always work
3032        assert_eq!(
3033            tower.check_switch_threshold(
3034                50,
3035                &ancestors,
3036                &descendants,
3037                &vote_simulator.progress,
3038                total_stake,
3039                bank0.epoch_vote_accounts(0).unwrap(),
3040                &vote_simulator.latest_validator_votes_for_frozen_banks,
3041                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
3042            ),
3043            SwitchForkDecision::SameFork
3044        );
3045
3046        // Trying to switch to another fork at 110 should fail
3047        assert_eq!(
3048            tower.check_switch_threshold(
3049                110,
3050                &ancestors,
3051                &descendants,
3052                &vote_simulator.progress,
3053                total_stake,
3054                bank0.epoch_vote_accounts(0).unwrap(),
3055                &vote_simulator.latest_validator_votes_for_frozen_banks,
3056                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
3057            ),
3058            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
3059        );
3060
3061        vote_simulator.simulate_lockout_interval(111, (10, 49), &other_vote_account);
3062
3063        assert_eq!(
3064            tower.check_switch_threshold(
3065                110,
3066                &ancestors,
3067                &descendants,
3068                &vote_simulator.progress,
3069                total_stake,
3070                bank0.epoch_vote_accounts(0).unwrap(),
3071                &vote_simulator.latest_validator_votes_for_frozen_banks,
3072                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
3073            ),
3074            SwitchForkDecision::SwitchProof(Hash::default())
3075        );
3076
3077        assert_eq!(tower.voted_slots(), vec![43, 44, 45, 46, 47, 48, 49]);
3078        {
3079            let mut tower = tower.clone();
3080            tower.record_vote(110, Hash::default());
3081            tower.record_vote(111, Hash::default());
3082            assert_eq!(tower.voted_slots(), vec![43, 110, 111]);
3083            assert_eq!(tower.vote_state.root_slot, Some(0));
3084        }
3085
3086        // Prepare simulated validator restart!
3087        let mut vote_simulator = VoteSimulator::new(2);
3088        let other_vote_account = vote_simulator.vote_pubkeys[1];
3089        let bank0 = vote_simulator.bank_forks.read().unwrap().get(0).unwrap();
3090        let total_stake = bank0.total_epoch_stake();
3091        let forks = tr(0)
3092            / (tr(1)
3093                / (tr(2)
3094                    / tr(10)
3095                    / (tr(43)
3096                        / (tr(44)
3097                            // Minor fork 2
3098                            / (tr(45) / (tr(46) / (tr(47) / (tr(48) / (tr(49) / (tr(50)))))))
3099                            / (tr(110) / tr(111))))));
3100        let replayed_root_slot = 44;
3101
3102        // Fill the BankForks according to the above fork structure
3103        vote_simulator.fill_bank_forks(forks, &HashMap::new(), true);
3104        for (_, fork_progress) in vote_simulator.progress.iter_mut() {
3105            fork_progress.fork_stats.computed = true;
3106        }
3107
3108        // prepend tower restart!
3109        let mut slot_history = SlotHistory::default();
3110        vote_simulator.set_root(replayed_root_slot);
3111        let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
3112        let descendants = vote_simulator.bank_forks.read().unwrap().descendants();
3113        for slot in &[0, 1, 2, 43, replayed_root_slot] {
3114            slot_history.add(*slot);
3115        }
3116        let mut tower = tower
3117            .adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
3118            .unwrap();
3119
3120        assert_eq!(tower.voted_slots(), vec![45, 46, 47, 48, 49]);
3121
3122        // Trying to switch to another fork at 110 should fail
3123        assert_eq!(
3124            tower.check_switch_threshold(
3125                110,
3126                &ancestors,
3127                &descendants,
3128                &vote_simulator.progress,
3129                total_stake,
3130                bank0.epoch_vote_accounts(0).unwrap(),
3131                &vote_simulator.latest_validator_votes_for_frozen_banks,
3132                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
3133            ),
3134            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
3135        );
3136
3137        // Add lockout_interval which should be excluded
3138        vote_simulator.simulate_lockout_interval(111, (45, 50), &other_vote_account);
3139        assert_eq!(
3140            tower.check_switch_threshold(
3141                110,
3142                &ancestors,
3143                &descendants,
3144                &vote_simulator.progress,
3145                total_stake,
3146                bank0.epoch_vote_accounts(0).unwrap(),
3147                &vote_simulator.latest_validator_votes_for_frozen_banks,
3148                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
3149            ),
3150            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
3151        );
3152
3153        // Add lockout_interval which should not be excluded
3154        vote_simulator.simulate_lockout_interval(111, (110, 200), &other_vote_account);
3155        assert_eq!(
3156            tower.check_switch_threshold(
3157                110,
3158                &ancestors,
3159                &descendants,
3160                &vote_simulator.progress,
3161                total_stake,
3162                bank0.epoch_vote_accounts(0).unwrap(),
3163                &vote_simulator.latest_validator_votes_for_frozen_banks,
3164                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
3165            ),
3166            SwitchForkDecision::SwitchProof(Hash::default())
3167        );
3168
3169        tower.record_vote(110, Hash::default());
3170        tower.record_vote(111, Hash::default());
3171        assert_eq!(tower.voted_slots(), vec![110, 111]);
3172        assert_eq!(tower.vote_state.root_slot, Some(replayed_root_slot));
3173    }
3174
3175    #[test]
3176    fn test_load_tower_ok() {
3177        let (tower, loaded) =
3178            run_test_load_tower_snapshot(|tower, pubkey| tower.node_pubkey = *pubkey, |_| ());
3179        let loaded = loaded.unwrap();
3180        assert_eq!(loaded, tower);
3181        assert_eq!(tower.threshold_depth, 10);
3182        assert!((tower.threshold_size - 0.9_f64).abs() < f64::EPSILON);
3183        assert_eq!(loaded.threshold_depth, 10);
3184        assert!((loaded.threshold_size - 0.9_f64).abs() < f64::EPSILON);
3185    }
3186
3187    #[test]
3188    fn test_load_tower_wrong_identity() {
3189        let identity_keypair = Arc::new(Keypair::new());
3190        let tower = Tower::default();
3191        let tower_storage = FileTowerStorage::default();
3192        assert_matches!(
3193            tower.save(&tower_storage, &identity_keypair),
3194            Err(TowerError::WrongTower(_))
3195        )
3196    }
3197
3198    #[test]
3199    fn test_load_tower_invalid_signature() {
3200        let (_, loaded) = run_test_load_tower_snapshot(
3201            |tower, pubkey| tower.node_pubkey = *pubkey,
3202            |path| {
3203                let mut file = OpenOptions::new()
3204                    .read(true)
3205                    .write(true)
3206                    .open(path)
3207                    .unwrap();
3208                // 4 is the offset into SavedTowerVersions for the signature
3209                assert_eq!(file.seek(SeekFrom::Start(4)).unwrap(), 4);
3210                let mut buf = [0u8];
3211                assert_eq!(file.read(&mut buf).unwrap(), 1);
3212                buf[0] = !buf[0];
3213                assert_eq!(file.seek(SeekFrom::Start(4)).unwrap(), 4);
3214                assert_eq!(file.write(&buf).unwrap(), 1);
3215            },
3216        );
3217        assert_matches!(loaded, Err(TowerError::InvalidSignature))
3218    }
3219
3220    #[test]
3221    fn test_load_tower_deser_failure() {
3222        let (_, loaded) = run_test_load_tower_snapshot(
3223            |tower, pubkey| tower.node_pubkey = *pubkey,
3224            |path| {
3225                OpenOptions::new()
3226                    .write(true)
3227                    .truncate(true)
3228                    .open(path)
3229                    .unwrap_or_else(|_| panic!("Failed to truncate file: {path:?}"));
3230            },
3231        );
3232        assert_matches!(loaded, Err(TowerError::SerializeError(_)))
3233    }
3234
3235    #[test]
3236    fn test_load_tower_missing() {
3237        let (_, loaded) = run_test_load_tower_snapshot(
3238            |tower, pubkey| tower.node_pubkey = *pubkey,
3239            |path| {
3240                remove_file(path).unwrap();
3241            },
3242        );
3243        assert_matches!(loaded, Err(TowerError::IoError(_)))
3244    }
3245
3246    #[test]
3247    fn test_reconcile_blockstore_roots_with_tower_normal() {
3248        solana_logger::setup();
3249        let ledger_path = get_tmp_ledger_path_auto_delete!();
3250        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
3251
3252        let (shreds, _) = make_slot_entries(1, 0, 42);
3253        blockstore.insert_shreds(shreds, None, false).unwrap();
3254        let (shreds, _) = make_slot_entries(3, 1, 42);
3255        blockstore.insert_shreds(shreds, None, false).unwrap();
3256        let (shreds, _) = make_slot_entries(4, 1, 42);
3257        blockstore.insert_shreds(shreds, None, false).unwrap();
3258        assert!(!blockstore.is_root(0));
3259        assert!(!blockstore.is_root(1));
3260        assert!(!blockstore.is_root(3));
3261        assert!(!blockstore.is_root(4));
3262
3263        let mut tower = Tower::default();
3264        tower.vote_state.root_slot = Some(4);
3265        reconcile_blockstore_roots_with_external_source(
3266            ExternalRootSource::Tower(tower.root()),
3267            &blockstore,
3268            &mut blockstore.max_root(),
3269        )
3270        .unwrap();
3271
3272        assert!(!blockstore.is_root(0));
3273        assert!(blockstore.is_root(1));
3274        assert!(!blockstore.is_root(3));
3275        assert!(blockstore.is_root(4));
3276    }
3277
3278    #[test]
3279    #[should_panic(
3280        expected = "last_blockstore_root(3) is skipped while traversing blockstore (currently at \
3281                    1) from external root (Tower(4))!?"
3282    )]
3283    fn test_reconcile_blockstore_roots_with_tower_panic_no_common_root() {
3284        solana_logger::setup();
3285        let ledger_path = get_tmp_ledger_path_auto_delete!();
3286        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
3287
3288        let (shreds, _) = make_slot_entries(1, 0, 42);
3289        blockstore.insert_shreds(shreds, None, false).unwrap();
3290        let (shreds, _) = make_slot_entries(3, 1, 42);
3291        blockstore.insert_shreds(shreds, None, false).unwrap();
3292        let (shreds, _) = make_slot_entries(4, 1, 42);
3293        blockstore.insert_shreds(shreds, None, false).unwrap();
3294        blockstore.set_roots(std::iter::once(&3)).unwrap();
3295        assert!(!blockstore.is_root(0));
3296        assert!(!blockstore.is_root(1));
3297        assert!(blockstore.is_root(3));
3298        assert!(!blockstore.is_root(4));
3299
3300        let mut tower = Tower::default();
3301        tower.vote_state.root_slot = Some(4);
3302        reconcile_blockstore_roots_with_external_source(
3303            ExternalRootSource::Tower(tower.root()),
3304            &blockstore,
3305            &mut blockstore.max_root(),
3306        )
3307        .unwrap();
3308    }
3309
3310    #[test]
3311    fn test_reconcile_blockstore_roots_with_tower_nop_no_parent() {
3312        solana_logger::setup();
3313        let ledger_path = get_tmp_ledger_path_auto_delete!();
3314        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
3315
3316        let (shreds, _) = make_slot_entries(1, 0, 42);
3317        blockstore.insert_shreds(shreds, None, false).unwrap();
3318        let (shreds, _) = make_slot_entries(3, 1, 42);
3319        blockstore.insert_shreds(shreds, None, false).unwrap();
3320        assert!(!blockstore.is_root(0));
3321        assert!(!blockstore.is_root(1));
3322        assert!(!blockstore.is_root(3));
3323
3324        let mut tower = Tower::default();
3325        tower.vote_state.root_slot = Some(4);
3326        assert_eq!(blockstore.max_root(), 0);
3327        reconcile_blockstore_roots_with_external_source(
3328            ExternalRootSource::Tower(tower.root()),
3329            &blockstore,
3330            &mut blockstore.max_root(),
3331        )
3332        .unwrap();
3333        assert_eq!(blockstore.max_root(), 0);
3334    }
3335
3336    #[test]
3337    fn test_adjust_lockouts_after_replay_future_slots() {
3338        solana_logger::setup();
3339        let mut tower = Tower::new_for_tests(10, 0.9);
3340        tower.record_vote(0, Hash::default());
3341        tower.record_vote(1, Hash::default());
3342        tower.record_vote(2, Hash::default());
3343        tower.record_vote(3, Hash::default());
3344
3345        let mut slot_history = SlotHistory::default();
3346        slot_history.add(0);
3347        slot_history.add(1);
3348
3349        let replayed_root_slot = 1;
3350        tower = tower
3351            .adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
3352            .unwrap();
3353
3354        assert_eq!(tower.voted_slots(), vec![2, 3]);
3355        assert_eq!(tower.root(), replayed_root_slot);
3356
3357        tower = tower
3358            .adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
3359            .unwrap();
3360        assert_eq!(tower.voted_slots(), vec![2, 3]);
3361        assert_eq!(tower.root(), replayed_root_slot);
3362    }
3363
3364    #[test]
3365    fn test_adjust_lockouts_after_replay_not_found_slots() {
3366        let mut tower = Tower::new_for_tests(10, 0.9);
3367        tower.record_vote(0, Hash::default());
3368        tower.record_vote(1, Hash::default());
3369        tower.record_vote(2, Hash::default());
3370        tower.record_vote(3, Hash::default());
3371
3372        let mut slot_history = SlotHistory::default();
3373        slot_history.add(0);
3374        slot_history.add(1);
3375        slot_history.add(4);
3376
3377        let replayed_root_slot = 4;
3378        tower = tower
3379            .adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
3380            .unwrap();
3381
3382        assert_eq!(tower.voted_slots(), vec![2, 3]);
3383        assert_eq!(tower.root(), replayed_root_slot);
3384    }
3385
3386    #[test]
3387    fn test_adjust_lockouts_after_replay_all_rooted_with_no_too_old() {
3388        let mut tower = Tower::new_for_tests(10, 0.9);
3389        tower.record_vote(0, Hash::default());
3390        tower.record_vote(1, Hash::default());
3391        tower.record_vote(2, Hash::default());
3392
3393        let mut slot_history = SlotHistory::default();
3394        slot_history.add(0);
3395        slot_history.add(1);
3396        slot_history.add(2);
3397        slot_history.add(3);
3398        slot_history.add(4);
3399        slot_history.add(5);
3400
3401        let replayed_root_slot = 5;
3402        tower = tower
3403            .adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
3404            .unwrap();
3405
3406        assert_eq!(tower.voted_slots(), vec![] as Vec<Slot>);
3407        assert_eq!(tower.root(), replayed_root_slot);
3408        assert_eq!(tower.stray_restored_slot, None);
3409    }
3410
3411    #[test]
3412    fn test_adjust_lockouts_after_replay_all_rooted_with_too_old() {
3413        use solana_slot_history::MAX_ENTRIES;
3414
3415        let mut tower = Tower::new_for_tests(10, 0.9);
3416        tower.record_vote(0, Hash::default());
3417        tower.record_vote(1, Hash::default());
3418        tower.record_vote(2, Hash::default());
3419
3420        let mut slot_history = SlotHistory::default();
3421        slot_history.add(0);
3422        slot_history.add(1);
3423        slot_history.add(2);
3424        slot_history.add(MAX_ENTRIES);
3425
3426        tower = tower
3427            .adjust_lockouts_after_replay(MAX_ENTRIES, &slot_history)
3428            .unwrap();
3429        assert_eq!(tower.voted_slots(), vec![] as Vec<Slot>);
3430        assert_eq!(tower.root(), MAX_ENTRIES);
3431    }
3432
3433    #[test]
3434    fn test_adjust_lockouts_after_replay_anchored_future_slots() {
3435        let mut tower = Tower::new_for_tests(10, 0.9);
3436        tower.record_vote(0, Hash::default());
3437        tower.record_vote(1, Hash::default());
3438        tower.record_vote(2, Hash::default());
3439        tower.record_vote(3, Hash::default());
3440        tower.record_vote(4, Hash::default());
3441
3442        let mut slot_history = SlotHistory::default();
3443        slot_history.add(0);
3444        slot_history.add(1);
3445        slot_history.add(2);
3446
3447        let replayed_root_slot = 2;
3448        tower = tower
3449            .adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
3450            .unwrap();
3451
3452        assert_eq!(tower.voted_slots(), vec![3, 4]);
3453        assert_eq!(tower.root(), replayed_root_slot);
3454    }
3455
3456    #[test]
3457    fn test_adjust_lockouts_after_replay_all_not_found() {
3458        let mut tower = Tower::new_for_tests(10, 0.9);
3459        tower.record_vote(5, Hash::default());
3460        tower.record_vote(6, Hash::default());
3461
3462        let mut slot_history = SlotHistory::default();
3463        slot_history.add(0);
3464        slot_history.add(1);
3465        slot_history.add(2);
3466        slot_history.add(7);
3467
3468        let replayed_root_slot = 7;
3469        tower = tower
3470            .adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
3471            .unwrap();
3472
3473        assert_eq!(tower.voted_slots(), vec![5, 6]);
3474        assert_eq!(tower.root(), replayed_root_slot);
3475    }
3476
3477    #[test]
3478    fn test_adjust_lockouts_after_replay_all_not_found_even_if_rooted() {
3479        let mut tower = Tower::new_for_tests(10, 0.9);
3480        tower.vote_state.root_slot = Some(4);
3481        tower.record_vote(5, Hash::default());
3482        tower.record_vote(6, Hash::default());
3483
3484        let mut slot_history = SlotHistory::default();
3485        slot_history.add(0);
3486        slot_history.add(1);
3487        slot_history.add(2);
3488        slot_history.add(7);
3489
3490        let replayed_root_slot = 7;
3491        let result = tower.adjust_lockouts_after_replay(replayed_root_slot, &slot_history);
3492
3493        assert_eq!(
3494            format!("{}", result.unwrap_err()),
3495            "The tower is fatally inconsistent with blockstore: no common slot for rooted tower"
3496        );
3497    }
3498
3499    #[test]
3500    fn test_adjust_lockouts_after_replay_all_future_votes_only_root_found() {
3501        let mut tower = Tower::new_for_tests(10, 0.9);
3502        tower.vote_state.root_slot = Some(2);
3503        tower.record_vote(3, Hash::default());
3504        tower.record_vote(4, Hash::default());
3505        tower.record_vote(5, Hash::default());
3506
3507        let mut slot_history = SlotHistory::default();
3508        slot_history.add(0);
3509        slot_history.add(1);
3510        slot_history.add(2);
3511
3512        let replayed_root_slot = 2;
3513        tower = tower
3514            .adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
3515            .unwrap();
3516
3517        assert_eq!(tower.voted_slots(), vec![3, 4, 5]);
3518        assert_eq!(tower.root(), replayed_root_slot);
3519    }
3520
3521    #[test]
3522    fn test_adjust_lockouts_after_replay_empty() {
3523        let mut tower = Tower::new_for_tests(10, 0.9);
3524
3525        let mut slot_history = SlotHistory::default();
3526        slot_history.add(0);
3527
3528        let replayed_root_slot = 0;
3529        tower = tower
3530            .adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
3531            .unwrap();
3532
3533        assert_eq!(tower.voted_slots(), vec![] as Vec<Slot>);
3534        assert_eq!(tower.root(), replayed_root_slot);
3535    }
3536
3537    #[test]
3538    fn test_adjust_lockouts_after_replay_too_old_tower() {
3539        use solana_slot_history::MAX_ENTRIES;
3540
3541        let mut tower = Tower::new_for_tests(10, 0.9);
3542        tower.record_vote(0, Hash::default());
3543
3544        let mut slot_history = SlotHistory::default();
3545        slot_history.add(0);
3546        slot_history.add(MAX_ENTRIES);
3547
3548        let result = tower.adjust_lockouts_after_replay(MAX_ENTRIES, &slot_history);
3549        assert_eq!(
3550            format!("{}", result.unwrap_err()),
3551            "The tower is too old: newest slot in tower (0) << oldest slot in available history \
3552             (1)"
3553        );
3554    }
3555
3556    #[test]
3557    fn test_adjust_lockouts_after_replay_time_warped() {
3558        let mut tower = Tower::new_for_tests(10, 0.9);
3559        tower.vote_state.votes.push_back(Lockout::new(1));
3560        tower.vote_state.votes.push_back(Lockout::new(0));
3561        let vote = Vote::new(vec![0], Hash::default());
3562        tower.last_vote = VoteTransaction::from(vote);
3563
3564        let mut slot_history = SlotHistory::default();
3565        slot_history.add(0);
3566
3567        let result = tower.adjust_lockouts_after_replay(0, &slot_history);
3568        assert_eq!(
3569            format!("{}", result.unwrap_err()),
3570            "The tower is fatally inconsistent with blockstore: time warped?"
3571        );
3572    }
3573
3574    #[test]
3575    fn test_adjust_lockouts_after_replay_diverged_ancestor() {
3576        let mut tower = Tower::new_for_tests(10, 0.9);
3577        tower.vote_state.votes.push_back(Lockout::new(1));
3578        tower.vote_state.votes.push_back(Lockout::new(2));
3579        let vote = Vote::new(vec![2], Hash::default());
3580        tower.last_vote = VoteTransaction::from(vote);
3581
3582        let mut slot_history = SlotHistory::default();
3583        slot_history.add(0);
3584        slot_history.add(2);
3585
3586        let result = tower.adjust_lockouts_after_replay(2, &slot_history);
3587        assert_eq!(
3588            format!("{}", result.unwrap_err()),
3589            "The tower is fatally inconsistent with blockstore: diverged ancestor?"
3590        );
3591    }
3592
3593    #[test]
3594    fn test_adjust_lockouts_after_replay_out_of_order() {
3595        use solana_slot_history::MAX_ENTRIES;
3596
3597        let mut tower = Tower::new_for_tests(10, 0.9);
3598        tower
3599            .vote_state
3600            .votes
3601            .push_back(Lockout::new(MAX_ENTRIES - 1));
3602        tower.vote_state.votes.push_back(Lockout::new(0));
3603        tower.vote_state.votes.push_back(Lockout::new(1));
3604        let vote = Vote::new(vec![1], Hash::default());
3605        tower.last_vote = VoteTransaction::from(vote);
3606
3607        let mut slot_history = SlotHistory::default();
3608        slot_history.add(MAX_ENTRIES);
3609
3610        let result = tower.adjust_lockouts_after_replay(MAX_ENTRIES, &slot_history);
3611        assert_eq!(
3612            format!("{}", result.unwrap_err()),
3613            "The tower is fatally inconsistent with blockstore: not too old once after got too \
3614             old?"
3615        );
3616    }
3617
3618    #[test]
3619    #[should_panic(expected = "slot_in_tower(2) < checked_slot(1)")]
3620    fn test_adjust_lockouts_after_replay_reversed_votes() {
3621        let mut tower = Tower::new_for_tests(10, 0.9);
3622        tower.vote_state.votes.push_back(Lockout::new(2));
3623        tower.vote_state.votes.push_back(Lockout::new(1));
3624        let vote = Vote::new(vec![1], Hash::default());
3625        tower.last_vote = VoteTransaction::from(vote);
3626
3627        let mut slot_history = SlotHistory::default();
3628        slot_history.add(0);
3629        slot_history.add(2);
3630
3631        tower
3632            .adjust_lockouts_after_replay(2, &slot_history)
3633            .unwrap();
3634    }
3635
3636    #[test]
3637    #[should_panic(expected = "slot_in_tower(3) < checked_slot(3)")]
3638    fn test_adjust_lockouts_after_replay_repeated_non_root_votes() {
3639        let mut tower = Tower::new_for_tests(10, 0.9);
3640        tower.vote_state.votes.push_back(Lockout::new(2));
3641        tower.vote_state.votes.push_back(Lockout::new(3));
3642        tower.vote_state.votes.push_back(Lockout::new(3));
3643        let vote = Vote::new(vec![3], Hash::default());
3644        tower.last_vote = VoteTransaction::from(vote);
3645
3646        let mut slot_history = SlotHistory::default();
3647        slot_history.add(0);
3648        slot_history.add(2);
3649
3650        tower
3651            .adjust_lockouts_after_replay(2, &slot_history)
3652            .unwrap();
3653    }
3654
3655    #[test]
3656    fn test_adjust_lockouts_after_replay_vote_on_root() {
3657        let mut tower = Tower::new_for_tests(10, 0.9);
3658        tower.vote_state.root_slot = Some(42);
3659        tower.vote_state.votes.push_back(Lockout::new(42));
3660        tower.vote_state.votes.push_back(Lockout::new(43));
3661        tower.vote_state.votes.push_back(Lockout::new(44));
3662        let vote = Vote::new(vec![44], Hash::default());
3663        tower.last_vote = VoteTransaction::from(vote);
3664
3665        let mut slot_history = SlotHistory::default();
3666        slot_history.add(42);
3667
3668        let tower = tower.adjust_lockouts_after_replay(42, &slot_history);
3669        assert_eq!(tower.unwrap().voted_slots(), [43, 44]);
3670    }
3671
3672    #[test]
3673    fn test_adjust_lockouts_after_replay_vote_on_genesis() {
3674        let mut tower = Tower::new_for_tests(10, 0.9);
3675        tower.vote_state.votes.push_back(Lockout::new(0));
3676        let vote = Vote::new(vec![0], Hash::default());
3677        tower.last_vote = VoteTransaction::from(vote);
3678
3679        let mut slot_history = SlotHistory::default();
3680        slot_history.add(0);
3681
3682        assert!(tower.adjust_lockouts_after_replay(0, &slot_history).is_ok());
3683    }
3684
3685    #[test]
3686    fn test_adjust_lockouts_after_replay_future_tower() {
3687        let mut tower = Tower::new_for_tests(10, 0.9);
3688        tower.vote_state.votes.push_back(Lockout::new(13));
3689        tower.vote_state.votes.push_back(Lockout::new(14));
3690        let vote = Vote::new(vec![14], Hash::default());
3691        tower.last_vote = VoteTransaction::from(vote);
3692        tower.initialize_root(12);
3693
3694        let mut slot_history = SlotHistory::default();
3695        slot_history.add(0);
3696        slot_history.add(2);
3697
3698        let tower = tower
3699            .adjust_lockouts_after_replay(2, &slot_history)
3700            .unwrap();
3701        assert_eq!(tower.root(), 12);
3702        assert_eq!(tower.voted_slots(), vec![13, 14]);
3703        assert_eq!(tower.stray_restored_slot, Some(14));
3704    }
3705
3706    #[test]
3707    fn test_default_tower_has_no_stray_last_vote() {
3708        let tower = Tower::default();
3709        assert!(!tower.is_stray_last_vote());
3710    }
3711
3712    #[test]
3713    fn test_switch_threshold_common_ancestor() {
3714        let mut vote_simulator = VoteSimulator::new(2);
3715        let other_vote_account = vote_simulator.vote_pubkeys[1];
3716        let bank0 = vote_simulator.bank_forks.read().unwrap().get(0).unwrap();
3717        let total_stake = bank0.total_epoch_stake();
3718        assert_eq!(
3719            total_stake,
3720            vote_simulator.validator_keypairs.len() as u64 * 10_000
3721        );
3722
3723        // Create the tree of banks
3724        //                                       /- 50
3725        //          /- 51    /- 45 - 46 - 47 - 48 - 49
3726        // 0 - 1 - 2 - 43 - 44
3727        //                   \- 110 - 111 - 112
3728        //                    \- 113
3729        let forks = tr(0)
3730            / (tr(1)
3731                / (tr(2)
3732                    / tr(51)
3733                    / (tr(43)
3734                        / (tr(44)
3735                            / (tr(45) / (tr(46) / (tr(47) / (tr(48) / tr(49) / tr(50)))))
3736                            / tr(113)
3737                            / (tr(110) / tr(111) / tr(112))))));
3738        let switch_slot = 111;
3739
3740        // Fill the BankForks according to the above fork structure
3741        vote_simulator.fill_bank_forks(forks, &HashMap::new(), true);
3742        for (_, fork_progress) in vote_simulator.progress.iter_mut() {
3743            fork_progress.fork_stats.computed = true;
3744        }
3745
3746        let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
3747        let descendants = vote_simulator.bank_forks.read().unwrap().descendants();
3748        let mut tower = Tower::default();
3749
3750        tower.record_vote(43, Hash::default());
3751        tower.record_vote(44, Hash::default());
3752        tower.record_vote(45, Hash::default());
3753        tower.record_vote(46, Hash::default());
3754        tower.record_vote(47, Hash::default());
3755        tower.record_vote(48, Hash::default());
3756        tower.record_vote(49, Hash::default());
3757
3758        // Candidate slot 50 should *not* work
3759        vote_simulator.simulate_lockout_interval(50, (10, 49), &other_vote_account);
3760        assert_eq!(
3761            tower.check_switch_threshold(
3762                switch_slot,
3763                &ancestors,
3764                &descendants,
3765                &vote_simulator.progress,
3766                total_stake,
3767                bank0.epoch_vote_accounts(0).unwrap(),
3768                &vote_simulator.latest_validator_votes_for_frozen_banks,
3769                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
3770            ),
3771            SwitchForkDecision::FailedSwitchThreshold(0, 20_000)
3772        );
3773        vote_simulator.clear_lockout_intervals(50);
3774
3775        // 51, 111, 112, and 113 are all valid
3776        for candidate_slot in [51, 111, 113] {
3777            vote_simulator.simulate_lockout_interval(candidate_slot, (10, 49), &other_vote_account);
3778            assert_eq!(
3779                tower.check_switch_threshold(
3780                    switch_slot,
3781                    &ancestors,
3782                    &descendants,
3783                    &vote_simulator.progress,
3784                    total_stake,
3785                    bank0.epoch_vote_accounts(0).unwrap(),
3786                    &vote_simulator.latest_validator_votes_for_frozen_banks,
3787                    &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
3788                ),
3789                SwitchForkDecision::SwitchProof(Hash::default())
3790            );
3791            vote_simulator.clear_lockout_intervals(candidate_slot);
3792        }
3793
3794        // Same checks for gossip votes
3795        let insert_gossip_vote = |vote_simulator: &mut VoteSimulator, slot| {
3796            vote_simulator
3797                .latest_validator_votes_for_frozen_banks
3798                .check_add_vote(
3799                    other_vote_account,
3800                    slot,
3801                    Some(
3802                        vote_simulator
3803                            .bank_forks
3804                            .read()
3805                            .unwrap()
3806                            .get(slot)
3807                            .unwrap()
3808                            .hash(),
3809                    ),
3810                    false,
3811                );
3812        };
3813
3814        // Candidate slot 50 should *not* work
3815        insert_gossip_vote(&mut vote_simulator, 50);
3816        assert_eq!(
3817            tower.check_switch_threshold(
3818                switch_slot,
3819                &ancestors,
3820                &descendants,
3821                &vote_simulator.progress,
3822                total_stake,
3823                bank0.epoch_vote_accounts(0).unwrap(),
3824                &vote_simulator.latest_validator_votes_for_frozen_banks,
3825                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
3826            ),
3827            SwitchForkDecision::FailedSwitchThreshold(0, 20_000)
3828        );
3829        vote_simulator.latest_validator_votes_for_frozen_banks =
3830            LatestValidatorVotesForFrozenBanks::default();
3831
3832        // 51, 110, 111, 112, and 113 are all valid
3833        // Note: We can use 110 here since gossip votes aren't limited to leaf banks
3834        for candidate_slot in [51, 110, 111, 112, 113] {
3835            insert_gossip_vote(&mut vote_simulator, candidate_slot);
3836            assert_eq!(
3837                tower.check_switch_threshold(
3838                    switch_slot,
3839                    &ancestors,
3840                    &descendants,
3841                    &vote_simulator.progress,
3842                    total_stake,
3843                    bank0.epoch_vote_accounts(0).unwrap(),
3844                    &vote_simulator.latest_validator_votes_for_frozen_banks,
3845                    &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
3846                ),
3847                SwitchForkDecision::SwitchProof(Hash::default())
3848            );
3849            vote_simulator.latest_validator_votes_for_frozen_banks =
3850                LatestValidatorVotesForFrozenBanks::default();
3851        }
3852    }
3853}