solana_core/
consensus.rs

1pub mod fork_choice;
2pub mod heaviest_subtree_fork_choice;
3pub(crate) mod latest_validator_votes_for_frozen_banks;
4pub mod progress_map;
5mod tower1_14_11;
6mod tower1_7_14;
7pub mod tower_storage;
8pub(crate) mod tower_vote_state;
9pub mod tree_diff;
10pub mod vote_stake_tracker;
11
12use {
13    self::{
14        heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice,
15        latest_validator_votes_for_frozen_banks::LatestValidatorVotesForFrozenBanks,
16        progress_map::{LockoutIntervals, ProgressMap},
17        tower1_14_11::Tower1_14_11,
18        tower1_7_14::Tower1_7_14,
19        tower_storage::{SavedTower, SavedTowerVersions, TowerStorage},
20        tower_vote_state::TowerVoteState,
21    },
22    crate::replay_stage::DUPLICATE_THRESHOLD,
23    chrono::prelude::*,
24    solana_clock::{Slot, UnixTimestamp},
25    solana_hash::Hash,
26    solana_instruction::Instruction,
27    solana_keypair::Keypair,
28    solana_ledger::{
29        ancestor_iterator::AncestorIterator,
30        blockstore::{self, Blockstore},
31    },
32    solana_pubkey::Pubkey,
33    solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::VOTE_THRESHOLD_SIZE},
34    solana_slot_history::{Check, SlotHistory},
35    solana_vote::{vote_account::VoteAccountsHashMap, vote_transaction::VoteTransaction},
36    solana_vote_program::{
37        vote_error::VoteError,
38        vote_instruction,
39        vote_state::{
40            BlockTimestamp, Lockout, TowerSync, Vote, VoteState1_14_11, VoteStateUpdate,
41            MAX_LOCKOUT_HISTORY,
42        },
43    },
44    std::{
45        cmp::Ordering,
46        collections::{HashMap, HashSet},
47        ops::{
48            Bound::{Included, Unbounded},
49            Deref,
50        },
51    },
52    thiserror::Error,
53};
54
55#[derive(PartialEq, Eq, Clone, Copy, Debug, Default)]
56pub enum ThresholdDecision {
57    #[default]
58    PassedThreshold,
59    FailedThreshold(/* vote depth */ u64, /* Observed stake */ u64),
60}
61
62impl ThresholdDecision {
63    pub fn passed(&self) -> bool {
64        matches!(self, Self::PassedThreshold)
65    }
66}
67
68#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
69#[derive(PartialEq, Eq, Clone, Debug)]
70pub enum SwitchForkDecision {
71    SwitchProof(Hash),
72    SameFork,
73    FailedSwitchThreshold(
74        /* Switch proof stake */ u64,
75        /* Total stake */ u64,
76    ),
77    FailedSwitchDuplicateRollback(Slot),
78}
79
80impl SwitchForkDecision {
81    pub fn to_vote_instruction(
82        &self,
83        vote: VoteTransaction,
84        vote_account_pubkey: &Pubkey,
85        authorized_voter_pubkey: &Pubkey,
86    ) -> Option<Instruction> {
87        match (self, vote) {
88            (SwitchForkDecision::FailedSwitchThreshold(_, total_stake), _) => {
89                assert_ne!(*total_stake, 0);
90                None
91            }
92            (SwitchForkDecision::FailedSwitchDuplicateRollback(_), _) => None,
93            (SwitchForkDecision::SameFork, VoteTransaction::Vote(v)) => Some(
94                vote_instruction::vote(vote_account_pubkey, authorized_voter_pubkey, v),
95            ),
96            (SwitchForkDecision::SameFork, VoteTransaction::VoteStateUpdate(v)) => {
97                Some(vote_instruction::update_vote_state(
98                    vote_account_pubkey,
99                    authorized_voter_pubkey,
100                    v,
101                ))
102            }
103            (SwitchForkDecision::SameFork, VoteTransaction::TowerSync(t)) => Some(
104                vote_instruction::tower_sync(vote_account_pubkey, authorized_voter_pubkey, t),
105            ),
106            (SwitchForkDecision::SwitchProof(switch_proof_hash), VoteTransaction::Vote(v)) => {
107                Some(vote_instruction::vote_switch(
108                    vote_account_pubkey,
109                    authorized_voter_pubkey,
110                    v,
111                    *switch_proof_hash,
112                ))
113            }
114            (
115                SwitchForkDecision::SwitchProof(switch_proof_hash),
116                VoteTransaction::VoteStateUpdate(v),
117            ) => Some(vote_instruction::update_vote_state_switch(
118                vote_account_pubkey,
119                authorized_voter_pubkey,
120                v,
121                *switch_proof_hash,
122            )),
123            (SwitchForkDecision::SwitchProof(switch_proof_hash), VoteTransaction::TowerSync(t)) => {
124                Some(vote_instruction::tower_sync_switch(
125                    vote_account_pubkey,
126                    authorized_voter_pubkey,
127                    t,
128                    *switch_proof_hash,
129                ))
130            }
131            (SwitchForkDecision::SameFork, VoteTransaction::CompactVoteStateUpdate(v)) => {
132                Some(vote_instruction::compact_update_vote_state(
133                    vote_account_pubkey,
134                    authorized_voter_pubkey,
135                    v,
136                ))
137            }
138            (
139                SwitchForkDecision::SwitchProof(switch_proof_hash),
140                VoteTransaction::CompactVoteStateUpdate(v),
141            ) => Some(vote_instruction::compact_update_vote_state_switch(
142                vote_account_pubkey,
143                authorized_voter_pubkey,
144                v,
145                *switch_proof_hash,
146            )),
147        }
148    }
149
150    pub fn can_vote(&self) -> bool {
151        match self {
152            SwitchForkDecision::FailedSwitchThreshold(_, _) => false,
153            SwitchForkDecision::FailedSwitchDuplicateRollback(_) => false,
154            SwitchForkDecision::SameFork => true,
155            SwitchForkDecision::SwitchProof(_) => true,
156        }
157    }
158}
159
160const VOTE_THRESHOLD_DEPTH_SHALLOW: usize = 4;
161pub const VOTE_THRESHOLD_DEPTH: usize = 8;
162pub const SWITCH_FORK_THRESHOLD: f64 = 0.38;
163
164pub type Result<T> = std::result::Result<T, TowerError>;
165
166pub type Stake = u64;
167pub type VotedStakes = HashMap<Slot, Stake>;
168pub type PubkeyVotes = Vec<(Pubkey, Slot)>;
169
170pub(crate) struct ComputedBankState {
171    pub voted_stakes: VotedStakes,
172    pub total_stake: Stake,
173    pub fork_stake: Stake,
174    // Tree of intervals of lockouts of the form [slot, slot + slot.lockout],
175    // keyed by end of the range
176    pub lockout_intervals: LockoutIntervals,
177    pub my_latest_landed_vote: Option<Slot>,
178}
179
180#[derive(Debug, PartialEq, Clone)]
181#[allow(clippy::large_enum_variant)]
182pub enum TowerVersions {
183    V1_7_14(Tower1_7_14),
184    V1_14_11(Tower1_14_11),
185    Current(Tower),
186}
187
188impl TowerVersions {
189    pub fn new_current(tower: Tower) -> Self {
190        Self::Current(tower)
191    }
192
193    pub fn convert_to_current(self) -> Tower {
194        match self {
195            TowerVersions::V1_7_14(tower) => tower.into(),
196            TowerVersions::V1_14_11(tower) => tower.into(),
197            TowerVersions::Current(tower) => tower,
198        }
199    }
200}
201
202#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
203#[derive(PartialEq, Eq, Debug, Default, Clone, Copy)]
204pub(crate) enum BlockhashStatus {
205    /// No vote since restart
206    #[default]
207    Uninitialized,
208    /// Non voting validator
209    NonVoting,
210    /// Hot spare validator
211    HotSpare,
212    /// Successfully generated vote tx with blockhash
213    Blockhash(Hash),
214}
215
216#[derive(Clone, Debug, PartialEq)]
217pub struct Tower {
218    pub node_pubkey: Pubkey,
219    pub(crate) threshold_depth: usize,
220    threshold_size: f64,
221    pub(crate) vote_state: TowerVoteState,
222    last_vote: VoteTransaction,
223    // The blockhash used in the last vote transaction, may or may not equal the
224    // blockhash of the voted block itself, depending if the vote slot was refreshed.
225    // For instance, a vote for slot 5, may be refreshed/resubmitted for inclusion in
226    //  block 10, in  which case `last_vote_tx_blockhash` equals the blockhash of 10, not 5.
227    // For non voting validators this is NonVoting
228    last_vote_tx_blockhash: BlockhashStatus,
229    last_timestamp: BlockTimestamp,
230    // Restored last voted slot which cannot be found in SlotHistory at replayed root
231    // (This is a special field for slashing-free validator restart with edge cases).
232    // This could be emptied after some time; but left intact indefinitely for easier
233    // implementation
234    // Further, stray slot can be stale or not. `Stale` here means whether given
235    // bank_forks (=~ ledger) lacks the slot or not.
236    stray_restored_slot: Option<Slot>,
237    pub last_switch_threshold_check: Option<(Slot, SwitchForkDecision)>,
238}
239
240impl Default for Tower {
241    fn default() -> Self {
242        let mut tower = Self {
243            node_pubkey: Pubkey::default(),
244            threshold_depth: VOTE_THRESHOLD_DEPTH,
245            threshold_size: VOTE_THRESHOLD_SIZE,
246            vote_state: TowerVoteState::default(),
247            last_vote: VoteTransaction::from(TowerSync::default()),
248            last_timestamp: BlockTimestamp::default(),
249            last_vote_tx_blockhash: BlockhashStatus::default(),
250            stray_restored_slot: Option::default(),
251            last_switch_threshold_check: Option::default(),
252        };
253        // VoteState::root_slot is ensured to be Some in Tower
254        tower.vote_state.root_slot = Some(Slot::default());
255        tower
256    }
257}
258
259// Tower1_14_11 is the persisted data format for the Tower,
260// decoupling it from VoteState::Current.
261impl From<Tower> for Tower1_14_11 {
262    fn from(tower: Tower) -> Self {
263        Self {
264            node_pubkey: tower.node_pubkey,
265            threshold_depth: tower.threshold_depth,
266            threshold_size: tower.threshold_size,
267            vote_state: VoteState1_14_11::from(tower.vote_state),
268            last_vote: tower.last_vote,
269            last_vote_tx_blockhash: tower.last_vote_tx_blockhash,
270            last_timestamp: tower.last_timestamp,
271            stray_restored_slot: tower.stray_restored_slot,
272            last_switch_threshold_check: tower.last_switch_threshold_check,
273        }
274    }
275}
276
277// Tower1_14_11 is the persisted data format for the Tower,
278// decoupling it from VoteState::Current.
279impl From<Tower1_14_11> for Tower {
280    fn from(tower: Tower1_14_11) -> Self {
281        Self {
282            node_pubkey: tower.node_pubkey,
283            threshold_depth: tower.threshold_depth,
284            threshold_size: tower.threshold_size,
285            vote_state: TowerVoteState::from(tower.vote_state),
286            last_vote: tower.last_vote,
287            last_vote_tx_blockhash: tower.last_vote_tx_blockhash,
288            last_timestamp: tower.last_timestamp,
289            stray_restored_slot: tower.stray_restored_slot,
290            last_switch_threshold_check: tower.last_switch_threshold_check,
291        }
292    }
293}
294
295impl From<Tower1_7_14> for Tower {
296    fn from(tower: Tower1_7_14) -> Self {
297        let box_last_vote = VoteTransaction::from(tower.last_vote.clone());
298
299        Self {
300            node_pubkey: tower.node_pubkey,
301            threshold_depth: tower.threshold_depth,
302            threshold_size: tower.threshold_size,
303            vote_state: TowerVoteState::from(tower.vote_state),
304            last_vote: box_last_vote,
305            last_vote_tx_blockhash: tower.last_vote_tx_blockhash,
306            last_timestamp: tower.last_timestamp,
307            stray_restored_slot: tower.stray_restored_slot,
308            last_switch_threshold_check: tower.last_switch_threshold_check,
309        }
310    }
311}
312
313impl Tower {
314    pub fn new(
315        node_pubkey: &Pubkey,
316        vote_account_pubkey: &Pubkey,
317        root: Slot,
318        bank: &Bank,
319    ) -> Self {
320        let mut tower = Tower {
321            node_pubkey: *node_pubkey,
322            ..Tower::default()
323        };
324        tower.initialize_lockouts_from_bank(vote_account_pubkey, root, bank);
325        tower
326    }
327
328    #[cfg(test)]
329    pub fn new_for_tests(threshold_depth: usize, threshold_size: f64) -> Self {
330        Self {
331            threshold_depth,
332            threshold_size,
333            ..Tower::default()
334        }
335    }
336
337    #[cfg(test)]
338    pub fn new_random(node_pubkey: Pubkey) -> Self {
339        use {rand::Rng, solana_vote_program::vote_state::VoteState};
340
341        let mut rng = rand::thread_rng();
342        let root_slot = rng.gen();
343        let vote_state = VoteState::new_rand_for_tests(node_pubkey, root_slot);
344        let last_vote = TowerSync::from(
345            vote_state
346                .votes
347                .iter()
348                .map(|lv| (lv.slot(), lv.confirmation_count()))
349                .collect::<Vec<_>>(),
350        );
351        Self {
352            node_pubkey,
353            vote_state: TowerVoteState::from(vote_state),
354            last_vote: VoteTransaction::from(last_vote),
355            ..Tower::default()
356        }
357    }
358
359    pub fn new_from_bankforks(
360        bank_forks: &BankForks,
361        node_pubkey: &Pubkey,
362        vote_account: &Pubkey,
363    ) -> Self {
364        let root_bank = bank_forks.root_bank();
365        let frozen_banks: Vec<_> = bank_forks
366            .frozen_banks()
367            .map(|(_slot, bank)| bank)
368            .collect();
369        let (_progress, heaviest_subtree_fork_choice) =
370            crate::replay_stage::ReplayStage::initialize_progress_and_fork_choice(
371                root_bank.deref(),
372                frozen_banks,
373                node_pubkey,
374                vote_account,
375                vec![],
376            );
377        let root = root_bank.slot();
378
379        let (best_slot, best_hash) = heaviest_subtree_fork_choice.best_overall_slot();
380        let heaviest_bank = bank_forks
381            .get_with_checked_hash((best_slot, best_hash))
382            .expect(
383                "The best overall slot must be one of `frozen_banks` which all exist in bank_forks",
384            );
385
386        Self::new(node_pubkey, vote_account, root, &heaviest_bank)
387    }
388
389    pub(crate) fn collect_vote_lockouts(
390        vote_account_pubkey: &Pubkey,
391        bank_slot: Slot,
392        vote_accounts: &VoteAccountsHashMap,
393        ancestors: &HashMap<Slot, HashSet<Slot>>,
394        get_frozen_hash: impl Fn(Slot) -> Option<Hash>,
395        latest_validator_votes_for_frozen_banks: &mut LatestValidatorVotesForFrozenBanks,
396    ) -> ComputedBankState {
397        let mut vote_slots = HashSet::new();
398        let mut voted_stakes = HashMap::new();
399        let mut total_stake = 0;
400
401        // Tree of intervals of lockouts of the form [slot, slot + slot.lockout],
402        // keyed by end of the range
403        let mut lockout_intervals = LockoutIntervals::new();
404        let mut my_latest_landed_vote = None;
405        for (&key, (voted_stake, account)) in vote_accounts.iter() {
406            let voted_stake = *voted_stake;
407            if voted_stake == 0 {
408                continue;
409            }
410            trace!("{} {} with stake {}", vote_account_pubkey, key, voted_stake);
411            let mut vote_state = TowerVoteState::from(account.vote_state_view());
412            for vote in &vote_state.votes {
413                lockout_intervals
414                    .entry(vote.last_locked_out_slot())
415                    .or_default()
416                    .push((vote.slot(), key));
417            }
418
419            if key == *vote_account_pubkey {
420                my_latest_landed_vote = vote_state.nth_recent_lockout(0).map(|l| l.slot());
421                debug!("vote state {:?}", vote_state);
422                debug!(
423                    "observed slot {}",
424                    vote_state
425                        .nth_recent_lockout(0)
426                        .map(|l| l.slot())
427                        .unwrap_or(0) as i64
428                );
429                debug!("observed root {}", vote_state.root_slot.unwrap_or(0) as i64);
430                datapoint_info!(
431                    "tower-observed",
432                    (
433                        "slot",
434                        vote_state
435                            .nth_recent_lockout(0)
436                            .map(|l| l.slot())
437                            .unwrap_or(0),
438                        i64
439                    ),
440                    ("root", vote_state.root_slot.unwrap_or(0), i64)
441                );
442            }
443            let start_root = vote_state.root_slot;
444
445            // Add the last vote to update the `heaviest_subtree_fork_choice`
446            if let Some(last_landed_voted_slot) = vote_state.last_voted_slot() {
447                latest_validator_votes_for_frozen_banks.check_add_vote(
448                    key,
449                    last_landed_voted_slot,
450                    get_frozen_hash(last_landed_voted_slot),
451                    true,
452                );
453            }
454
455            vote_state.process_next_vote_slot(bank_slot);
456
457            for vote in &vote_state.votes {
458                vote_slots.insert(vote.slot());
459            }
460
461            if start_root != vote_state.root_slot {
462                if let Some(root) = start_root {
463                    let vote =
464                        Lockout::new_with_confirmation_count(root, MAX_LOCKOUT_HISTORY as u32);
465                    trace!("ROOT: {}", vote.slot());
466                    vote_slots.insert(vote.slot());
467                }
468            }
469            if let Some(root) = vote_state.root_slot {
470                let vote = Lockout::new_with_confirmation_count(root, MAX_LOCKOUT_HISTORY as u32);
471                vote_slots.insert(vote.slot());
472            }
473
474            // The last vote in the vote stack is a simulated vote on bank_slot, which
475            // we added to the vote stack earlier in this function by calling process_vote().
476            // We don't want to update the ancestors stakes of this vote b/c it does not
477            // represent an actual vote by the validator.
478
479            // Note: It should not be possible for any vote state in this bank to have
480            // a vote for a slot >= bank_slot, so we are guaranteed that the last vote in
481            // this vote stack is the simulated vote, so this fetch should be sufficient
482            // to find the last unsimulated vote.
483            assert_eq!(
484                vote_state.nth_recent_lockout(0).map(|l| l.slot()),
485                Some(bank_slot)
486            );
487            if let Some(vote) = vote_state.nth_recent_lockout(1) {
488                // Update all the parents of this last vote with the stake of this vote account
489                Self::update_ancestor_voted_stakes(
490                    &mut voted_stakes,
491                    vote.slot(),
492                    voted_stake,
493                    ancestors,
494                );
495            }
496            total_stake += voted_stake;
497        }
498
499        // TODO: populate_ancestor_voted_stakes only adds zeros. Comment why
500        // that is necessary (if so).
501        Self::populate_ancestor_voted_stakes(&mut voted_stakes, vote_slots, ancestors);
502
503        // As commented above, since the votes at current bank_slot are
504        // simulated votes, the voted_stake for `bank_slot` is not populated.
505        // Therefore, we use the voted_stake for the parent of bank_slot as the
506        // `fork_stake` instead.
507        let fork_stake = ancestors
508            .get(&bank_slot)
509            .and_then(|ancestors| {
510                ancestors
511                    .iter()
512                    .max()
513                    .and_then(|parent| voted_stakes.get(parent))
514                    .copied()
515            })
516            .unwrap_or(0);
517
518        ComputedBankState {
519            voted_stakes,
520            total_stake,
521            fork_stake,
522            lockout_intervals,
523            my_latest_landed_vote,
524        }
525    }
526
527    #[cfg(test)]
528    fn is_slot_confirmed(
529        &self,
530        slot: Slot,
531        voted_stakes: &VotedStakes,
532        total_stake: Stake,
533    ) -> bool {
534        voted_stakes
535            .get(&slot)
536            .map(|stake| (*stake as f64 / total_stake as f64) > self.threshold_size)
537            .unwrap_or(false)
538    }
539
540    pub(crate) fn is_slot_duplicate_confirmed(
541        &self,
542        slot: Slot,
543        voted_stakes: &VotedStakes,
544        total_stake: Stake,
545    ) -> bool {
546        voted_stakes
547            .get(&slot)
548            .map(|stake| (*stake as f64 / total_stake as f64) > DUPLICATE_THRESHOLD)
549            .unwrap_or(false)
550    }
551
552    pub fn tower_slots(&self) -> Vec<Slot> {
553        self.vote_state.tower()
554    }
555
556    pub(crate) fn last_vote_tx_blockhash(&self) -> BlockhashStatus {
557        self.last_vote_tx_blockhash
558    }
559
560    pub fn refresh_last_vote_timestamp(&mut self, heaviest_slot_on_same_fork: Slot) {
561        let timestamp = if let Some(last_vote_timestamp) = self.last_vote.timestamp() {
562            // To avoid a refreshed vote tx getting caught in deduplication filters,
563            // we need to update timestamp. Increment by smallest amount to avoid skewing
564            // the Timestamp Oracle.
565            last_vote_timestamp.saturating_add(1)
566        } else {
567            // If the previous vote did not send a timestamp due to clock error,
568            // use the last good timestamp + 1
569            datapoint_info!(
570                "refresh-timestamp-missing",
571                ("heaviest-slot", heaviest_slot_on_same_fork, i64),
572                ("last-timestamp", self.last_timestamp.timestamp, i64),
573                ("last-slot", self.last_timestamp.slot, i64),
574            );
575            self.last_timestamp.timestamp.saturating_add(1)
576        };
577
578        if let Some(last_voted_slot) = self.last_vote.last_voted_slot() {
579            if heaviest_slot_on_same_fork <= last_voted_slot {
580                warn!(
581                    "Trying to refresh timestamp for vote on {last_voted_slot} \
582                     using smaller heaviest bank {heaviest_slot_on_same_fork}"
583                );
584                return;
585            }
586            self.last_timestamp = BlockTimestamp {
587                slot: last_voted_slot,
588                timestamp,
589            };
590            self.last_vote.set_timestamp(Some(timestamp));
591        } else {
592            warn!(
593                "Trying to refresh timestamp for last vote on heaviest bank on same fork \
594                 {heaviest_slot_on_same_fork}, but there is no vote to refresh"
595            );
596        }
597    }
598
599    pub fn refresh_last_vote_tx_blockhash(&mut self, new_vote_tx_blockhash: Hash) {
600        self.last_vote_tx_blockhash = BlockhashStatus::Blockhash(new_vote_tx_blockhash);
601    }
602
603    pub(crate) fn mark_last_vote_tx_blockhash_non_voting(&mut self) {
604        self.last_vote_tx_blockhash = BlockhashStatus::NonVoting;
605    }
606
607    pub(crate) fn mark_last_vote_tx_blockhash_hot_spare(&mut self) {
608        self.last_vote_tx_blockhash = BlockhashStatus::HotSpare;
609    }
610
611    pub fn last_voted_slot_in_bank(bank: &Bank, vote_account_pubkey: &Pubkey) -> Option<Slot> {
612        let vote_account = bank.get_vote_account(vote_account_pubkey)?;
613        vote_account.vote_state_view().last_voted_slot()
614    }
615
616    pub fn record_bank_vote(&mut self, bank: &Bank) -> Option<Slot> {
617        // Returns the new root if one is made after applying a vote for the given bank to
618        // `self.vote_state`
619        let block_id = bank.block_id().unwrap_or_else(|| {
620            // This can only happen for our leader bank
621            // Note: since the new shred format is yet to be rolled out to all clusters,
622            // this can also happen for non-leader banks. Once rolled out we can assert
623            // here that this is our leader bank.
624            Hash::default()
625        });
626        self.record_bank_vote_and_update_lockouts(
627            bank.slot(),
628            bank.hash(),
629            bank.feature_set
630                .is_active(&agave_feature_set::enable_tower_sync_ix::id()),
631            block_id,
632        )
633    }
634
635    /// If we've recently updated the vote state by applying a new vote
636    /// or syncing from a bank, generate the proper last_vote.
637    pub(crate) fn update_last_vote_from_vote_state(
638        &mut self,
639        vote_hash: Hash,
640        enable_tower_sync_ix: bool,
641        block_id: Hash,
642    ) {
643        let mut new_vote = if enable_tower_sync_ix {
644            VoteTransaction::from(TowerSync::new(
645                self.vote_state.votes.clone(),
646                self.vote_state.root_slot,
647                vote_hash,
648                block_id,
649            ))
650        } else {
651            VoteTransaction::from(VoteStateUpdate::new(
652                self.vote_state.votes.clone(),
653                self.vote_state.root_slot,
654                vote_hash,
655            ))
656        };
657
658        new_vote.set_timestamp(self.maybe_timestamp(self.last_voted_slot().unwrap_or_default()));
659        self.last_vote = new_vote;
660    }
661
662    fn record_bank_vote_and_update_lockouts(
663        &mut self,
664        vote_slot: Slot,
665        vote_hash: Hash,
666        enable_tower_sync_ix: bool,
667        block_id: Hash,
668    ) -> Option<Slot> {
669        if let Some(last_voted_slot) = self.vote_state.last_voted_slot() {
670            if vote_slot <= last_voted_slot {
671                panic!(
672                    "Error while recording vote {} {} in local tower {:?}",
673                    vote_slot,
674                    vote_hash,
675                    VoteError::VoteTooOld
676                );
677            }
678        }
679
680        trace!("{} record_vote for {}", self.node_pubkey, vote_slot);
681        let old_root = self.root();
682
683        self.vote_state.process_next_vote_slot(vote_slot);
684        self.update_last_vote_from_vote_state(vote_hash, enable_tower_sync_ix, block_id);
685
686        let new_root = self.root();
687
688        datapoint_info!(
689            "tower-vote",
690            ("latest", vote_slot, i64),
691            ("root", new_root, i64)
692        );
693        if old_root != new_root {
694            Some(new_root)
695        } else {
696            None
697        }
698    }
699
700    #[cfg(feature = "dev-context-only-utils")]
701    pub fn record_vote(&mut self, slot: Slot, hash: Hash) -> Option<Slot> {
702        self.record_bank_vote_and_update_lockouts(slot, hash, true, Hash::default())
703    }
704
705    #[cfg(feature = "dev-context-only-utils")]
706    pub fn increase_lockout(&mut self, confirmation_count_increase: u32) {
707        for vote in self.vote_state.votes.iter_mut() {
708            vote.increase_confirmation_count(confirmation_count_increase);
709        }
710    }
711
712    pub fn last_voted_slot(&self) -> Option<Slot> {
713        if self.last_vote.is_empty() {
714            None
715        } else {
716            Some(self.last_vote.slot(self.last_vote.len() - 1))
717        }
718    }
719
720    pub fn last_voted_slot_hash(&self) -> Option<(Slot, Hash)> {
721        Some((self.last_voted_slot()?, self.last_vote.hash()))
722    }
723
724    pub fn stray_restored_slot(&self) -> Option<Slot> {
725        self.stray_restored_slot
726    }
727
728    pub fn last_vote(&self) -> VoteTransaction {
729        self.last_vote.clone()
730    }
731
732    fn maybe_timestamp(&mut self, current_slot: Slot) -> Option<UnixTimestamp> {
733        if current_slot > self.last_timestamp.slot
734            || self.last_timestamp.slot == 0 && current_slot == self.last_timestamp.slot
735        {
736            let timestamp = Utc::now().timestamp();
737            if timestamp >= self.last_timestamp.timestamp {
738                self.last_timestamp = BlockTimestamp {
739                    slot: current_slot,
740                    timestamp,
741                };
742                return Some(timestamp);
743            } else {
744                datapoint_info!(
745                    "backwards-timestamp",
746                    ("slot", current_slot, i64),
747                    ("timestamp", timestamp, i64),
748                    ("last-timestamp", self.last_timestamp.timestamp, i64),
749                )
750            }
751        }
752        None
753    }
754
755    // root may be forcibly set by arbitrary replay root slot, for example from a root
756    // after replaying a snapshot.
757    // Also, tower.root() couldn't be None; initialize_lockouts() ensures that.
758    // Conceptually, every tower must have been constructed from a concrete starting point,
759    // which establishes the origin of trust (i.e. root) whether booting from genesis (slot 0) or
760    // snapshot (slot N). In other words, there should be no possibility a Tower doesn't have
761    // root, unlike young vote accounts.
762    pub fn root(&self) -> Slot {
763        self.vote_state.root_slot.unwrap()
764    }
765
766    // a slot is recent if it's newer than the last vote we have. If we haven't voted yet
767    // but have a root (hard forks situation) then compare it to the root
768    pub fn is_recent(&self, slot: Slot) -> bool {
769        if let Some(last_voted_slot) = self.vote_state.last_voted_slot() {
770            if slot <= last_voted_slot {
771                return false;
772            }
773        } else if let Some(root) = self.vote_state.root_slot {
774            if slot <= root {
775                return false;
776            }
777        }
778        true
779    }
780
781    pub fn has_voted(&self, slot: Slot) -> bool {
782        for vote in &self.vote_state.votes {
783            if slot == vote.slot() {
784                return true;
785            }
786        }
787        false
788    }
789
790    pub fn is_locked_out(&self, slot: Slot, ancestors: &HashSet<Slot>) -> bool {
791        if !self.is_recent(slot) {
792            return true;
793        }
794
795        // Check if a slot is locked out by simulating adding a vote for that
796        // slot to the current lockouts to pop any expired votes. If any of the
797        // remaining voted slots are on a different fork from the checked slot,
798        // it's still locked out.
799        let mut vote_state = self.vote_state.clone();
800        vote_state.process_next_vote_slot(slot);
801        for vote in &vote_state.votes {
802            if slot != vote.slot() && !ancestors.contains(&vote.slot()) {
803                return true;
804            }
805        }
806
807        if let Some(root_slot) = vote_state.root_slot {
808            if slot != root_slot {
809                // This case should never happen because bank forks purges all
810                // non-descendants of the root every time root is set
811                assert!(
812                    ancestors.contains(&root_slot),
813                    "ancestors: {ancestors:?}, slot: {slot} root: {root_slot}"
814                );
815            }
816        }
817
818        false
819    }
820
821    /// Checks if a vote for `candidate_slot` is usable in a switching proof
822    /// from `last_voted_slot` to `switch_slot`.
823    /// We assume `candidate_slot` is not an ancestor of `last_voted_slot`.
824    ///
825    /// Returns None if `candidate_slot` or `switch_slot` is not present in `ancestors`
826    fn is_valid_switching_proof_vote(
827        &self,
828        candidate_slot: Slot,
829        last_voted_slot: Slot,
830        switch_slot: Slot,
831        ancestors: &HashMap<Slot, HashSet<Slot>>,
832        last_vote_ancestors: &HashSet<Slot>,
833    ) -> Option<bool> {
834        trace!(
835            "Checking if {candidate_slot} is a valid switching proof vote from {last_voted_slot} \
836             to {switch_slot}"
837        );
838        // Ignore if the `candidate_slot` is a descendant of the `last_voted_slot`, since we do not
839        // want to count votes on the same fork.
840        if Self::is_descendant_slot(candidate_slot, last_voted_slot, ancestors)? {
841            return Some(false);
842        }
843
844        if last_vote_ancestors.is_empty() {
845            // If `last_vote_ancestors` is empty, this means we must have a last vote that is stray. If the `last_voted_slot`
846            // is stray, it must be descended from some earlier root than the latest root (the anchor at startup).
847            // The above check also guarentees that the candidate slot is not a descendant of this stray last vote.
848            //
849            // This gives us a fork graph:
850            //     / ------------- stray `last_voted_slot`
851            // old root
852            //     \- latest root (anchor) - ... - candidate slot
853            //                                \- switch slot
854            //
855            // Thus the common acnestor of `last_voted_slot` and `candidate_slot` is `old_root`, which the `switch_slot`
856            // descends from. Thus it is safe to use `candidate_slot` in the switching proof.
857            //
858            // Note: the calling function should have already panicked if we do not have ancestors and the last vote is not stray.
859            assert!(self.is_stray_last_vote());
860            return Some(true);
861        }
862
863        // Only consider forks that split at the common_ancestor of `switch_slot` and `last_voted_slot` or earlier.
864        // This is to prevent situations like this from being included in the switching proof:
865        //
866        //         /-- `last_voted_slot`
867        //     /--Y
868        //    X    \-- `candidate_slot`
869        //     \-- `switch_slot`
870        //
871        // The common ancestor of `last_voted_slot` and `switch_slot` is `X`. Votes for the `candidate_slot`
872        // should not count towards the switch proof since `candidate_slot` is "on the same fork" as `last_voted_slot`
873        // in relation to `switch_slot`.
874        // However these candidate slots should be allowed:
875        //
876        //             /-- Y -- `last_voted_slot`
877        //    V - W - X
878        //        \    \-- `candidate_slot` -- `switch_slot`
879        //         \    \-- `candidate_slot`
880        //          \-- `candidate_slot`
881        //
882        // As the `candidate_slot`s forked off from `X` or earlier.
883        //
884        // To differentiate, we check the common ancestor of `last_voted_slot` and `candidate_slot`.
885        // If the `switch_slot` descends from this ancestor, then the vote for `candidate_slot` can be included.
886        Self::greatest_common_ancestor(ancestors, candidate_slot, last_voted_slot)
887            .and_then(|ancestor| Self::is_descendant_slot(switch_slot, ancestor, ancestors))
888    }
889
890    /// Checks if `maybe_descendant` is a descendant of `slot`.
891    ///
892    /// Returns None if `maybe_descendant` is not present in `ancestors`
893    fn is_descendant_slot(
894        maybe_descendant: Slot,
895        slot: Slot,
896        ancestors: &HashMap<Slot, HashSet<u64>>,
897    ) -> Option<bool> {
898        ancestors
899            .get(&maybe_descendant)
900            .map(|candidate_slot_ancestors| candidate_slot_ancestors.contains(&slot))
901    }
902
903    /// Returns `Some(gca)` where `gca` is the greatest (by slot number)
904    /// common ancestor of both `slot_a` and `slot_b`.
905    ///
906    /// Returns `None` if:
907    /// * `slot_a` is not in `ancestors`
908    /// * `slot_b` is not in `ancestors`
909    /// * There is no common ancestor of slot_a and slot_b in `ancestors`
910    fn greatest_common_ancestor(
911        ancestors: &HashMap<Slot, HashSet<Slot>>,
912        slot_a: Slot,
913        slot_b: Slot,
914    ) -> Option<Slot> {
915        (ancestors.get(&slot_a)?)
916            .intersection(ancestors.get(&slot_b)?)
917            .max()
918            .copied()
919    }
920
921    #[allow(clippy::too_many_arguments)]
922    fn make_check_switch_threshold_decision(
923        &self,
924        switch_slot: Slot,
925        ancestors: &HashMap<Slot, HashSet<u64>>,
926        descendants: &HashMap<Slot, HashSet<u64>>,
927        progress: &ProgressMap,
928        total_stake: u64,
929        epoch_vote_accounts: &VoteAccountsHashMap,
930        latest_validator_votes_for_frozen_banks: &LatestValidatorVotesForFrozenBanks,
931        heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice,
932    ) -> SwitchForkDecision {
933        let Some((last_voted_slot, last_voted_hash)) = self.last_voted_slot_hash() else {
934            return SwitchForkDecision::SameFork;
935        };
936        let root = self.root();
937        let empty_ancestors = HashSet::default();
938        let empty_ancestors_due_to_minor_unsynced_ledger = || {
939            // This condition (stale stray last vote) shouldn't occur under normal validator
940            // operation, indicating something unusual happened.
941            // This condition could be introduced by manual ledger mishandling,
942            // validator SEGV, OS/HW crash, or plain No Free Space FS error.
943
944            // However, returning empty ancestors as a fallback here shouldn't result in
945            // slashing by itself (Note that we couldn't fully preclude any kind of slashing if
946            // the failure was OS or HW level).
947
948            // Firstly, lockout is ensured elsewhere.
949
950            // Also, there is no risk of optimistic conf. violation. Although empty ancestors
951            // could result in incorrect (= more than actual) locked_out_stake and
952            // false-positive SwitchProof later in this function, there should be no such a
953            // heavier fork candidate, first of all, if the last vote (or any of its
954            // unavailable ancestors) were already optimistically confirmed.
955            // The only exception is that other validator is already violating it...
956            if self.is_first_switch_check() && switch_slot < last_voted_slot {
957                // `switch < last` is needed not to warn! this message just because of using
958                // newer snapshots on validator restart
959                let message = format!(
960                    "bank_forks doesn't have corresponding data for the stray restored last \
961                     vote({last_voted_slot}), meaning some inconsistency between saved tower and \
962                     ledger."
963                );
964                warn!("{}", message);
965                datapoint_warn!("tower_warn", ("warn", message, String));
966            }
967            &empty_ancestors
968        };
969
970        let suspended_decision_due_to_major_unsynced_ledger = || {
971            // This peculiar corner handling is needed mainly for a tower which is newer than
972            // blockstore. (Yeah, we tolerate it for ease of maintaining validator by operators)
973            // This condition could be introduced by manual ledger mishandling,
974            // validator SEGV, OS/HW crash, or plain No Free Space FS error.
975
976            // When we're in this clause, it basically means validator is badly running
977            // with a future tower while replaying past slots, especially problematic is
978            // last_voted_slot.
979            // So, don't re-vote on it by returning pseudo FailedSwitchThreshold, otherwise
980            // there would be slashing because of double vote on one of last_vote_ancestors.
981            // (Well, needless to say, re-creating the duplicate block must be handled properly
982            // at the banking stage: https://github.com/solana-labs/solana/issues/8232)
983            //
984            // To be specific, the replay stage is tricked into a false perception where
985            // last_vote_ancestors is AVAILABLE for descendant-of-`switch_slot`,  stale, and
986            // stray slots (which should always be empty_ancestors).
987            //
988            // This is covered by test_future_tower_* in local_cluster
989            SwitchForkDecision::FailedSwitchThreshold(0, total_stake)
990        };
991
992        let rollback_due_to_duplicate_ancestor = |latest_duplicate_ancestor| {
993            SwitchForkDecision::FailedSwitchDuplicateRollback(latest_duplicate_ancestor)
994        };
995
996        // `heaviest_subtree_fork_choice` entries are not cleaned by duplicate block purging/rollback logic,
997        // so this is safe to check here. We return here if the last voted slot was rolled back/purged due to
998        // being a duplicate because `ancestors`/`descendants`/`progress` structures may be missing this slot due
999        // to duplicate purging. This would cause many of the `unwrap()` checks below to fail.
1000        //
1001        // TODO: Handle if the last vote is on a dupe, and then we restart. The dupe won't be in
1002        // heaviest_subtree_fork_choice, so `heaviest_subtree_fork_choice.latest_invalid_ancestor()` will return
1003        // None, but the last vote will be persisted in tower.
1004        let switch_hash = progress
1005            .get_hash(switch_slot)
1006            .expect("Slot we're trying to switch to must exist AND be frozen in progress map");
1007        if let Some(latest_duplicate_ancestor) = heaviest_subtree_fork_choice
1008            .latest_invalid_ancestor(&(last_voted_slot, last_voted_hash))
1009        {
1010            // We're rolling back because one of the ancestors of the last vote was a duplicate. In this
1011            // case, it's acceptable if the switch candidate is one of ancestors of the previous vote,
1012            // just fail the switch check because there's no point in voting on an ancestor. ReplayStage
1013            // should then have a special case continue building an alternate fork from this ancestor, NOT
1014            // the `last_voted_slot`. This is in contrast to usual SwitchFailure where ReplayStage continues to build blocks
1015            // on latest vote. See `ReplayStage::select_vote_and_reset_forks()` for more details.
1016            if heaviest_subtree_fork_choice.is_strict_ancestor(
1017                &(switch_slot, switch_hash),
1018                &(last_voted_slot, last_voted_hash),
1019            ) {
1020                return rollback_due_to_duplicate_ancestor(latest_duplicate_ancestor);
1021            } else if progress
1022                .get_hash(last_voted_slot)
1023                .map(|current_slot_hash| current_slot_hash != last_voted_hash)
1024                .unwrap_or(true)
1025            {
1026                // Our last vote slot was purged because it was on a duplicate fork, don't continue below
1027                // where checks may panic. We allow a freebie vote here that may violate switching
1028                // thresholds
1029                // TODO: Properly handle this case
1030                info!(
1031                    "Allowing switch vote on {:?} because last vote {:?} was rolled back",
1032                    (switch_slot, switch_hash),
1033                    (last_voted_slot, last_voted_hash)
1034                );
1035                return SwitchForkDecision::SwitchProof(Hash::default());
1036            }
1037        }
1038
1039        let last_vote_ancestors = ancestors.get(&last_voted_slot).unwrap_or_else(|| {
1040            if self.is_stray_last_vote() {
1041                // Unless last vote is stray and stale, ancestors.get(last_voted_slot) must
1042                // return Some(_), justifying to panic! here.
1043                // Also, adjust_lockouts_after_replay() correctly makes last_voted_slot None,
1044                // if all saved votes are ancestors of replayed_root_slot. So this code shouldn't be
1045                // touched in that case as well.
1046                // In other words, except being stray, all other slots have been voted on while
1047                // this validator has been running, so we must be able to fetch ancestors for
1048                // all of them.
1049                empty_ancestors_due_to_minor_unsynced_ledger()
1050            } else {
1051                panic!("no ancestors found with slot: {last_voted_slot}");
1052            }
1053        });
1054
1055        let switch_slot_ancestors = ancestors.get(&switch_slot).unwrap();
1056
1057        if switch_slot == last_voted_slot || switch_slot_ancestors.contains(&last_voted_slot) {
1058            // If the `switch_slot is a descendant of the last vote,
1059            // no switching proof is necessary
1060            return SwitchForkDecision::SameFork;
1061        }
1062
1063        if last_vote_ancestors.contains(&switch_slot) {
1064            if self.is_stray_last_vote() {
1065                return suspended_decision_due_to_major_unsynced_ledger();
1066            } else {
1067                panic!(
1068                    "Should never consider switching to ancestor ({switch_slot}) of last vote: \
1069                     {last_voted_slot}, ancestors({last_vote_ancestors:?})",
1070                );
1071            }
1072        }
1073
1074        // By this point, we know the `switch_slot` is on a different fork
1075        // (is neither an ancestor nor descendant of `last_vote`), so a
1076        // switching proof is necessary
1077        let switch_proof = Hash::default();
1078        let mut locked_out_stake = 0;
1079        let mut locked_out_vote_accounts = HashSet::new();
1080        for (candidate_slot, descendants) in descendants.iter() {
1081            // 1) Don't consider any banks that haven't been frozen yet
1082            //    because the needed stats are unavailable
1083            // 2) Only consider lockouts at the latest `frozen` bank
1084            //    on each fork, as that bank will contain all the
1085            //    lockout intervals for ancestors on that fork as well.
1086            // 3) Don't consider lockouts on the `last_vote` itself
1087            // 4) Don't consider lockouts on any descendants of
1088            //    `last_vote`
1089            // 5) Don't consider any banks before the root because
1090            //    all lockouts must be ancestors of `last_vote`
1091            if !progress
1092                .get_fork_stats(*candidate_slot)
1093                .map(|stats| stats.computed)
1094                .unwrap_or(false)
1095                || {
1096                    // If any of the descendants have the `computed` flag set, then there must be a more
1097                    // recent frozen bank on this fork to use, so we can ignore this one. Otherwise,
1098                    // even if this bank has descendants, if they have not yet been frozen / stats computed,
1099                    // then use this bank as a representative for the fork.
1100                    descendants.iter().any(|d| {
1101                        progress
1102                            .get_fork_stats(*d)
1103                            .map(|stats| stats.computed)
1104                            .unwrap_or(false)
1105                    })
1106                }
1107                || *candidate_slot == last_voted_slot
1108                || *candidate_slot <= root
1109                || {
1110                    !self
1111                        .is_valid_switching_proof_vote(
1112                            *candidate_slot,
1113                            last_voted_slot,
1114                            switch_slot,
1115                            ancestors,
1116                            last_vote_ancestors,
1117                        )
1118                        .expect(
1119                            "candidate_slot and switch_slot exist in descendants map, \
1120                             so they must exist in ancestors map",
1121                        )
1122                }
1123            {
1124                continue;
1125            }
1126
1127            // By the time we reach here, any ancestors of the `last_vote`,
1128            // should have been filtered out, as they all have a descendant,
1129            // namely the `last_vote` itself.
1130            assert!(!last_vote_ancestors.contains(candidate_slot));
1131
1132            // Evaluate which vote accounts in the bank are locked out
1133            // in the interval candidate_slot..last_vote, which means
1134            // finding any lockout intervals in the `lockout_intervals` tree
1135            // for this bank that contain `last_vote`.
1136            let lockout_intervals = &progress
1137                .get(candidate_slot)
1138                .unwrap()
1139                .fork_stats
1140                .lockout_intervals;
1141            // Find any locked out intervals for vote accounts in this bank with
1142            // `lockout_interval_end` >= `last_vote`, which implies they are locked out at
1143            // `last_vote` on another fork.
1144            for (_lockout_interval_end, intervals_keyed_by_end) in
1145                lockout_intervals.range((Included(last_voted_slot), Unbounded))
1146            {
1147                for (lockout_interval_start, vote_account_pubkey) in intervals_keyed_by_end {
1148                    if locked_out_vote_accounts.contains(vote_account_pubkey) {
1149                        continue;
1150                    }
1151
1152                    // Only count lockouts on slots that are:
1153                    // 1) Not ancestors of `last_vote`, meaning being on different fork
1154                    // 2) Not from before the current root as we can't determine if
1155                    // anything before the root was an ancestor of `last_vote` or not
1156                    if !last_vote_ancestors.contains(lockout_interval_start) && {
1157                        // Given a `lockout_interval_start` < root that appears in a
1158                        // bank for a `candidate_slot`, it must be that `lockout_interval_start`
1159                        // is an ancestor of the current root, because `candidate_slot` is a
1160                        // descendant of the current root
1161                        *lockout_interval_start > root
1162                    } {
1163                        let stake = epoch_vote_accounts
1164                            .get(vote_account_pubkey)
1165                            .map(|(stake, _)| *stake)
1166                            .unwrap_or(0);
1167                        locked_out_stake += stake;
1168                        if (locked_out_stake as f64 / total_stake as f64) > SWITCH_FORK_THRESHOLD {
1169                            return SwitchForkDecision::SwitchProof(switch_proof);
1170                        }
1171                        locked_out_vote_accounts.insert(vote_account_pubkey);
1172                    }
1173                }
1174            }
1175        }
1176
1177        // Check the latest votes for potentially gossip votes that haven't landed yet
1178        for (
1179            vote_account_pubkey,
1180            (candidate_latest_frozen_vote, _candidate_latest_frozen_vote_hash),
1181        ) in latest_validator_votes_for_frozen_banks.max_gossip_frozen_votes()
1182        {
1183            if locked_out_vote_accounts.contains(&vote_account_pubkey) {
1184                continue;
1185            }
1186
1187            if *candidate_latest_frozen_vote > last_voted_slot && {
1188                // Because `candidate_latest_frozen_vote` is the last vote made by some validator
1189                // in the cluster for a frozen bank `B` observed through gossip, we may have cleared
1190                // that frozen bank `B` because we `set_root(root)` for a `root` on a different fork,
1191                // like so:
1192                //
1193                //    |----------X ------candidate_latest_frozen_vote (frozen)
1194                // old root
1195                //    |----------new root ----last_voted_slot
1196                //
1197                // In most cases, because `last_voted_slot` must be a descendant of `root`, then
1198                // if `candidate_latest_frozen_vote` is not found in the ancestors/descendants map (recall these
1199                // directly reflect the state of BankForks), this implies that `B` was pruned from BankForks
1200                // because it was on a different fork than `last_voted_slot`, and thus this vote for `candidate_latest_frozen_vote`
1201                // should be safe to count towards the switching proof:
1202                //
1203                // However, there is also the possibility that `last_voted_slot` is a stray, in which
1204                // case we cannot make this conclusion as we do not know the ancestors/descendants
1205                // of strays. Hence we err on the side of caution here and ignore this vote. This
1206                // is ok because validators voting on different unrooted forks should eventually vote
1207                // on some descendant of the root, at which time they can be included in switching proofs.
1208                self.is_valid_switching_proof_vote(
1209                    *candidate_latest_frozen_vote,
1210                    last_voted_slot,
1211                    switch_slot,
1212                    ancestors,
1213                    last_vote_ancestors,
1214                )
1215                .unwrap_or(false)
1216            } {
1217                let stake = epoch_vote_accounts
1218                    .get(vote_account_pubkey)
1219                    .map(|(stake, _)| *stake)
1220                    .unwrap_or(0);
1221                locked_out_stake += stake;
1222                if (locked_out_stake as f64 / total_stake as f64) > SWITCH_FORK_THRESHOLD {
1223                    return SwitchForkDecision::SwitchProof(switch_proof);
1224                }
1225                locked_out_vote_accounts.insert(vote_account_pubkey);
1226            }
1227        }
1228
1229        // We have not detected sufficient lockout past the last voted slot to generate
1230        // a switching proof
1231        SwitchForkDecision::FailedSwitchThreshold(locked_out_stake, total_stake)
1232    }
1233
1234    #[allow(clippy::too_many_arguments)]
1235    pub(crate) fn check_switch_threshold(
1236        &mut self,
1237        switch_slot: Slot,
1238        ancestors: &HashMap<Slot, HashSet<u64>>,
1239        descendants: &HashMap<Slot, HashSet<u64>>,
1240        progress: &ProgressMap,
1241        total_stake: u64,
1242        epoch_vote_accounts: &VoteAccountsHashMap,
1243        latest_validator_votes_for_frozen_banks: &LatestValidatorVotesForFrozenBanks,
1244        heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice,
1245    ) -> SwitchForkDecision {
1246        let decision = self.make_check_switch_threshold_decision(
1247            switch_slot,
1248            ancestors,
1249            descendants,
1250            progress,
1251            total_stake,
1252            epoch_vote_accounts,
1253            latest_validator_votes_for_frozen_banks,
1254            heaviest_subtree_fork_choice,
1255        );
1256        let new_check = Some((switch_slot, decision.clone()));
1257        if new_check != self.last_switch_threshold_check {
1258            trace!(
1259                "new switch threshold check: slot {}: {:?}",
1260                switch_slot,
1261                decision,
1262            );
1263            self.last_switch_threshold_check = new_check;
1264        }
1265        decision
1266    }
1267
1268    fn is_first_switch_check(&self) -> bool {
1269        self.last_switch_threshold_check.is_none()
1270    }
1271
1272    // Optimistically skip the stake check if casting a vote would not increase
1273    // the lockout at this threshold. This is because if you bounce back to
1274    // voting on the main fork after not voting for a while, your latest vote
1275    // might pop off a lot of the votes in the tower. The stake from these votes
1276    // would have rolled up to earlier votes in the tower, which presumably
1277    // could have helped us pass the threshold check. Worst case, we'll just
1278    // recheck later without having increased lockouts.
1279    fn optimistically_bypass_vote_stake_threshold_check<'a>(
1280        tower_before_applying_vote: impl Iterator<Item = &'a Lockout>,
1281        threshold_vote: &Lockout,
1282    ) -> bool {
1283        for old_vote in tower_before_applying_vote {
1284            if old_vote.slot() == threshold_vote.slot()
1285                && old_vote.confirmation_count() == threshold_vote.confirmation_count()
1286            {
1287                return true;
1288            }
1289        }
1290        false
1291    }
1292
1293    /// Checks a single vote threshold for `slot`
1294    fn check_vote_stake_threshold<'a>(
1295        threshold_vote: Option<&Lockout>,
1296        tower_before_applying_vote: impl Iterator<Item = &'a Lockout>,
1297        threshold_depth: usize,
1298        threshold_size: f64,
1299        slot: Slot,
1300        voted_stakes: &HashMap<Slot, u64>,
1301        total_stake: u64,
1302    ) -> ThresholdDecision {
1303        let Some(threshold_vote) = threshold_vote else {
1304            // Tower isn't that deep.
1305            return ThresholdDecision::PassedThreshold;
1306        };
1307        let Some(fork_stake) = voted_stakes.get(&threshold_vote.slot()) else {
1308            // We haven't seen any votes on this fork yet, so no stake
1309            return ThresholdDecision::FailedThreshold(threshold_depth as u64, 0);
1310        };
1311
1312        let lockout = *fork_stake as f64 / total_stake as f64;
1313        trace!(
1314            "fork_stake slot: {}, threshold_vote slot: {}, lockout: {} fork_stake: {} \
1315             total_stake: {}",
1316            slot,
1317            threshold_vote.slot(),
1318            lockout,
1319            fork_stake,
1320            total_stake
1321        );
1322        if Self::optimistically_bypass_vote_stake_threshold_check(
1323            tower_before_applying_vote,
1324            threshold_vote,
1325        ) || lockout > threshold_size
1326        {
1327            return ThresholdDecision::PassedThreshold;
1328        }
1329        ThresholdDecision::FailedThreshold(threshold_depth as u64, *fork_stake)
1330    }
1331
1332    /// Performs vote threshold checks for `slot`
1333    pub fn check_vote_stake_thresholds(
1334        &self,
1335        slot: Slot,
1336        voted_stakes: &VotedStakes,
1337        total_stake: Stake,
1338    ) -> Vec<ThresholdDecision> {
1339        let mut threshold_decisions = vec![];
1340        // Generate the vote state assuming this vote is included.
1341        let mut vote_state = self.vote_state.clone();
1342        vote_state.process_next_vote_slot(slot);
1343
1344        // Assemble all the vote thresholds and depths to check.
1345        let vote_thresholds_and_depths = vec![
1346            // The following two checks are log only and are currently being used for experimentation
1347            // purposes. We wish to impose a shallow threshold check to prevent the frequent 8 deep
1348            // lockouts seen multiple times a day. We check both the 4th and 5th deep here to collect
1349            // metrics to determine the right depth and threshold percentage to set in the future.
1350            (VOTE_THRESHOLD_DEPTH_SHALLOW, SWITCH_FORK_THRESHOLD),
1351            (VOTE_THRESHOLD_DEPTH_SHALLOW + 1, SWITCH_FORK_THRESHOLD),
1352            (self.threshold_depth, self.threshold_size),
1353        ];
1354
1355        // Check one by one and add any failures to be returned
1356        for (threshold_depth, threshold_size) in vote_thresholds_and_depths {
1357            if let ThresholdDecision::FailedThreshold(vote_depth, stake) =
1358                Self::check_vote_stake_threshold(
1359                    vote_state.nth_recent_lockout(threshold_depth),
1360                    self.vote_state.votes.iter(),
1361                    threshold_depth,
1362                    threshold_size,
1363                    slot,
1364                    voted_stakes,
1365                    total_stake,
1366                )
1367            {
1368                threshold_decisions.push(ThresholdDecision::FailedThreshold(vote_depth, stake));
1369            }
1370        }
1371        threshold_decisions
1372    }
1373
1374    /// Update lockouts for all the ancestors
1375    pub(crate) fn populate_ancestor_voted_stakes(
1376        voted_stakes: &mut VotedStakes,
1377        vote_slots: impl IntoIterator<Item = Slot>,
1378        ancestors: &HashMap<Slot, HashSet<Slot>>,
1379    ) {
1380        // If there's no ancestors, that means this slot must be from before the current root,
1381        // in which case the lockouts won't be calculated in bank_weight anyways, so ignore
1382        // this slot
1383        for vote_slot in vote_slots {
1384            if let Some(slot_ancestors) = ancestors.get(&vote_slot) {
1385                voted_stakes.entry(vote_slot).or_default();
1386                for slot in slot_ancestors {
1387                    voted_stakes.entry(*slot).or_default();
1388                }
1389            }
1390        }
1391    }
1392
1393    /// Update stake for all the ancestors.
1394    /// Note, stake is the same for all the ancestor.
1395    fn update_ancestor_voted_stakes(
1396        voted_stakes: &mut VotedStakes,
1397        voted_slot: Slot,
1398        voted_stake: u64,
1399        ancestors: &HashMap<Slot, HashSet<Slot>>,
1400    ) {
1401        // If there's no ancestors, that means this slot must be from
1402        // before the current root, so ignore this slot
1403        if let Some(vote_slot_ancestors) = ancestors.get(&voted_slot) {
1404            *voted_stakes.entry(voted_slot).or_default() += voted_stake;
1405            for slot in vote_slot_ancestors {
1406                *voted_stakes.entry(*slot).or_default() += voted_stake;
1407            }
1408        }
1409    }
1410
1411    fn voted_slots(&self) -> Vec<Slot> {
1412        self.vote_state
1413            .votes
1414            .iter()
1415            .map(|lockout| lockout.slot())
1416            .collect()
1417    }
1418
1419    pub fn is_stray_last_vote(&self) -> bool {
1420        self.stray_restored_slot.is_some() && self.stray_restored_slot == self.last_voted_slot()
1421    }
1422
1423    // The tower root can be older/newer if the validator booted from a newer/older snapshot, so
1424    // tower lockouts may need adjustment
1425    pub fn adjust_lockouts_after_replay(
1426        mut self,
1427        replayed_root: Slot,
1428        slot_history: &SlotHistory,
1429    ) -> Result<Self> {
1430        // sanity assertions for roots
1431        let tower_root = self.root();
1432        info!(
1433            "adjusting lockouts (after replay up to {}): {:?} tower root: {} replayed root: {}",
1434            replayed_root,
1435            self.voted_slots(),
1436            tower_root,
1437            replayed_root,
1438        );
1439        assert_eq!(slot_history.check(replayed_root), Check::Found);
1440
1441        assert!(
1442            self.last_vote == VoteTransaction::from(VoteStateUpdate::default())
1443                && self.vote_state.votes.is_empty()
1444                || self.last_vote == VoteTransaction::from(TowerSync::default())
1445                    && self.vote_state.votes.is_empty()
1446                || !self.vote_state.votes.is_empty(),
1447            "last vote: {:?} vote_state.votes: {:?}",
1448            self.last_vote,
1449            self.vote_state.votes
1450        );
1451
1452        if let Some(last_voted_slot) = self.last_voted_slot() {
1453            if tower_root <= replayed_root {
1454                // Normally, we goes into this clause with possible help of
1455                // reconcile_blockstore_roots_with_external_source()
1456                if slot_history.check(last_voted_slot) == Check::TooOld {
1457                    // We could try hard to anchor with other older votes, but opt to simplify the
1458                    // following logic
1459                    return Err(TowerError::TooOldTower(
1460                        last_voted_slot,
1461                        slot_history.oldest(),
1462                    ));
1463                }
1464
1465                self.adjust_lockouts_with_slot_history(slot_history)?;
1466                self.initialize_root(replayed_root);
1467            } else {
1468                // This should never occur under normal operation.
1469                // While this validator's voting is suspended this way,
1470                // suspended_decision_due_to_major_unsynced_ledger() will be also touched.
1471                let message = format!(
1472                    "For some reason, we're REPROCESSING slots which has already been voted and \
1473                     ROOTED by us; VOTING will be SUSPENDED UNTIL {last_voted_slot}!",
1474                );
1475                error!("{}", message);
1476                datapoint_error!("tower_error", ("error", message, String));
1477
1478                // Let's pass-through adjust_lockouts_with_slot_history just for sanitization,
1479                // using a synthesized SlotHistory.
1480
1481                let mut warped_slot_history = (*slot_history).clone();
1482                // Blockstore doesn't have the tower_root slot because of
1483                // (replayed_root < tower_root) in this else clause, meaning the tower is from
1484                // the future from the view of blockstore.
1485                // Pretend the blockstore has the future tower_root to anchor exactly with that
1486                // slot by adding tower_root to a slot history. The added slot will be newer
1487                // than all slots in the slot history (remember tower_root > replayed_root),
1488                // satisfying the slot history invariant.
1489                // Thus, the whole process will be safe as well because tower_root exists
1490                // within both tower and slot history, guaranteeing the success of adjustment
1491                // and retaining all of future votes correctly while sanitizing.
1492                warped_slot_history.add(tower_root);
1493
1494                self.adjust_lockouts_with_slot_history(&warped_slot_history)?;
1495                // don't update root; future tower's root should be kept across validator
1496                // restarts to continue to show the scary messages at restarts until the next
1497                // voting.
1498            }
1499        } else {
1500            // This else clause is for newly created tower.
1501            // initialize_lockouts_from_bank() should ensure the following invariant,
1502            // otherwise we're screwing something up.
1503            assert_eq!(tower_root, replayed_root);
1504        }
1505
1506        Ok(self)
1507    }
1508
1509    fn adjust_lockouts_with_slot_history(&mut self, slot_history: &SlotHistory) -> Result<()> {
1510        let tower_root = self.root();
1511        // retained slots will be consisted only from divergent slots
1512        let mut retain_flags_for_each_vote_in_reverse: Vec<_> =
1513            Vec::with_capacity(self.vote_state.votes.len());
1514
1515        let mut still_in_future = true;
1516        let mut past_outside_history = false;
1517        let mut checked_slot = None;
1518        let mut anchored_slot = None;
1519
1520        let mut slots_in_tower = vec![tower_root];
1521        slots_in_tower.extend(self.voted_slots());
1522
1523        // iterate over votes + root (if any) in the newest => oldest order
1524        // bail out early if bad condition is found
1525        for slot_in_tower in slots_in_tower.iter().rev() {
1526            let check = slot_history.check(*slot_in_tower);
1527
1528            if anchored_slot.is_none() && check == Check::Found {
1529                anchored_slot = Some(*slot_in_tower);
1530            } else if anchored_slot.is_some() && check == Check::NotFound {
1531                // this can't happen unless we're fed with bogus snapshot
1532                return Err(TowerError::FatallyInconsistent("diverged ancestor?"));
1533            }
1534
1535            if still_in_future && check != Check::Future {
1536                still_in_future = false;
1537            } else if !still_in_future && check == Check::Future {
1538                // really odd cases: bad ordered votes?
1539                return Err(TowerError::FatallyInconsistent("time warped?"));
1540            }
1541            if !past_outside_history && check == Check::TooOld {
1542                past_outside_history = true;
1543            } else if past_outside_history && check != Check::TooOld {
1544                // really odd cases: bad ordered votes?
1545                return Err(TowerError::FatallyInconsistent(
1546                    "not too old once after got too old?",
1547                ));
1548            }
1549
1550            if let Some(checked_slot) = checked_slot {
1551                // This is really special, only if tower is initialized and contains
1552                // a vote for the root, the root slot can repeat only once
1553                let voting_for_root =
1554                    *slot_in_tower == checked_slot && *slot_in_tower == tower_root;
1555
1556                if !voting_for_root {
1557                    // Unless we're voting since genesis, slots_in_tower must always be older than last checked_slot
1558                    // including all vote slot and the root slot.
1559                    assert!(
1560                        *slot_in_tower < checked_slot,
1561                        "slot_in_tower({}) < checked_slot({})",
1562                        *slot_in_tower,
1563                        checked_slot
1564                    );
1565                }
1566            }
1567
1568            checked_slot = Some(*slot_in_tower);
1569
1570            retain_flags_for_each_vote_in_reverse.push(anchored_slot.is_none());
1571        }
1572
1573        // Check for errors if not anchored
1574        info!("adjusted tower's anchored slot: {:?}", anchored_slot);
1575        if anchored_slot.is_none() {
1576            // this error really shouldn't happen unless ledger/tower is corrupted
1577            return Err(TowerError::FatallyInconsistent(
1578                "no common slot for rooted tower",
1579            ));
1580        }
1581
1582        assert_eq!(
1583            slots_in_tower.len(),
1584            retain_flags_for_each_vote_in_reverse.len()
1585        );
1586        // pop for the tower root
1587        retain_flags_for_each_vote_in_reverse.pop();
1588        let mut retain_flags_for_each_vote =
1589            retain_flags_for_each_vote_in_reverse.into_iter().rev();
1590
1591        let original_votes_len = self.vote_state.votes.len();
1592        self.initialize_lockouts(move |_| retain_flags_for_each_vote.next().unwrap());
1593
1594        if self.vote_state.votes.is_empty() {
1595            info!("All restored votes were behind; resetting root_slot and last_vote in tower!");
1596            // we might not have banks for those votes so just reset.
1597            // That's because the votes may well past replayed_root
1598            self.last_vote = VoteTransaction::from(Vote::default());
1599        } else {
1600            info!(
1601                "{} restored votes (out of {}) were on different fork or are upcoming votes on \
1602                 unrooted slots: {:?}!",
1603                self.voted_slots().len(),
1604                original_votes_len,
1605                self.voted_slots()
1606            );
1607
1608            assert_eq!(self.last_voted_slot(), self.voted_slots().last().copied());
1609            self.stray_restored_slot = self.last_vote.last_voted_slot()
1610        }
1611
1612        Ok(())
1613    }
1614
1615    fn initialize_lockouts_from_bank(
1616        &mut self,
1617        vote_account_pubkey: &Pubkey,
1618        root: Slot,
1619        bank: &Bank,
1620    ) {
1621        if let Some(vote_account) = bank.get_vote_account(vote_account_pubkey) {
1622            self.vote_state = TowerVoteState::from(vote_account.vote_state_view());
1623            self.initialize_root(root);
1624            self.initialize_lockouts(|v| v.slot() > root);
1625        } else {
1626            self.initialize_root(root);
1627            info!(
1628                "vote account({}) not found in bank (slot={})",
1629                vote_account_pubkey,
1630                bank.slot()
1631            );
1632        }
1633    }
1634
1635    fn initialize_lockouts<F: FnMut(&Lockout) -> bool>(&mut self, should_retain: F) {
1636        self.vote_state.votes.retain(should_retain);
1637    }
1638
1639    // Updating root is needed to correctly restore from newly-saved tower for the next
1640    // boot
1641    fn initialize_root(&mut self, root: Slot) {
1642        self.vote_state.root_slot = Some(root);
1643    }
1644
1645    pub fn save(&self, tower_storage: &dyn TowerStorage, node_keypair: &Keypair) -> Result<()> {
1646        let saved_tower = SavedTower::new(self, node_keypair)?;
1647        tower_storage.store(&SavedTowerVersions::from(saved_tower))?;
1648        Ok(())
1649    }
1650
1651    pub fn restore(tower_storage: &dyn TowerStorage, node_pubkey: &Pubkey) -> Result<Self> {
1652        tower_storage.load(node_pubkey)
1653    }
1654}
1655
1656#[derive(Error, Debug)]
1657pub enum TowerError {
1658    #[error("IO Error: {0}")]
1659    IoError(#[from] std::io::Error),
1660
1661    #[error("Serialization Error: {0}")]
1662    SerializeError(#[from] bincode::Error),
1663
1664    #[error("The signature on the saved tower is invalid")]
1665    InvalidSignature,
1666
1667    #[error("The tower does not match this validator: {0}")]
1668    WrongTower(String),
1669
1670    #[error(
1671        "The tower is too old: newest slot in tower ({0}) << oldest slot in available history \
1672         ({1})"
1673    )]
1674    TooOldTower(Slot, Slot),
1675
1676    #[error("The tower is fatally inconsistent with blockstore: {0}")]
1677    FatallyInconsistent(&'static str),
1678
1679    #[error("The tower is useless because of new hard fork: {0}")]
1680    HardFork(Slot),
1681}
1682
1683impl TowerError {
1684    pub fn is_file_missing(&self) -> bool {
1685        if let TowerError::IoError(io_err) = &self {
1686            io_err.kind() == std::io::ErrorKind::NotFound
1687        } else {
1688            false
1689        }
1690    }
1691    pub fn is_too_old(&self) -> bool {
1692        matches!(self, TowerError::TooOldTower(_, _))
1693    }
1694}
1695
1696#[derive(Debug)]
1697pub enum ExternalRootSource {
1698    Tower(Slot),
1699    HardFork(Slot),
1700}
1701
1702impl ExternalRootSource {
1703    fn root(&self) -> Slot {
1704        match self {
1705            ExternalRootSource::Tower(slot) => *slot,
1706            ExternalRootSource::HardFork(slot) => *slot,
1707        }
1708    }
1709}
1710
1711// Given an untimely crash, tower may have roots that are not reflected in blockstore,
1712// or the reverse of this.
1713// That's because we don't impose any ordering guarantee or any kind of write barriers
1714// between tower (plain old POSIX fs calls) and blockstore (through RocksDB), when
1715// `ReplayState::handle_votable_bank()` saves tower before setting blockstore roots.
1716pub fn reconcile_blockstore_roots_with_external_source(
1717    external_source: ExternalRootSource,
1718    blockstore: &Blockstore,
1719    // blockstore.max_root() might have been updated already.
1720    // so take a &mut param both to input (and output iff we update root)
1721    last_blockstore_root: &mut Slot,
1722) -> blockstore::Result<()> {
1723    let external_root = external_source.root();
1724    if *last_blockstore_root < external_root {
1725        // Ensure external_root itself to exist and be marked as rooted in the blockstore
1726        // in addition to its ancestors.
1727        let new_roots: Vec<_> = AncestorIterator::new_inclusive(external_root, blockstore)
1728            .take_while(|current| match current.cmp(last_blockstore_root) {
1729                Ordering::Greater => true,
1730                Ordering::Equal => false,
1731                Ordering::Less => panic!(
1732                    "last_blockstore_root({last_blockstore_root}) is skipped while traversing \
1733                     blockstore (currently at {current}) from external root \
1734                     ({external_source:?})!?",
1735                ),
1736            })
1737            .collect();
1738        if !new_roots.is_empty() {
1739            info!(
1740                "Reconciling slots as root based on external root: {:?} (external: {:?}, \
1741                 blockstore: {})",
1742                new_roots, external_source, last_blockstore_root
1743            );
1744
1745            // Unfortunately, we can't supply duplicate-confirmed hashes,
1746            // because it can't be guaranteed to be able to replay these slots
1747            // under this code-path's limited condition (i.e.  those shreds
1748            // might not be available, etc...) also correctly overcoming this
1749            // limitation is hard...
1750            blockstore.mark_slots_as_if_rooted_normally_at_startup(
1751                new_roots.into_iter().map(|root| (root, None)).collect(),
1752                false,
1753            )?;
1754
1755            // Update the caller-managed state of last root in blockstore.
1756            // Repeated calls of this function should result in a no-op for
1757            // the range of `new_roots`.
1758            *last_blockstore_root = blockstore.max_root();
1759        } else {
1760            // This indicates we're in bad state; but still don't panic here.
1761            // That's because we might have a chance of recovering properly with
1762            // newer snapshot.
1763            warn!(
1764                "Couldn't find any ancestor slots from external source ({:?}) towards blockstore \
1765                 root ({}); blockstore pruned or only tower moved into new ledger or just hard \
1766                 fork?",
1767                external_source, last_blockstore_root,
1768            );
1769        }
1770    }
1771    Ok(())
1772}
1773
1774#[cfg(test)]
1775pub mod test {
1776    use {
1777        super::*,
1778        crate::{
1779            consensus::{
1780                fork_choice::ForkChoice, heaviest_subtree_fork_choice::SlotHashKey,
1781                tower_storage::FileTowerStorage,
1782            },
1783            replay_stage::HeaviestForkFailures,
1784            vote_simulator::VoteSimulator,
1785        },
1786        itertools::Itertools,
1787        solana_account::{Account, AccountSharedData, ReadableAccount, WritableAccount},
1788        solana_clock::Slot,
1789        solana_hash::Hash,
1790        solana_ledger::{blockstore::make_slot_entries, get_tmp_ledger_path_auto_delete},
1791        solana_pubkey::Pubkey,
1792        solana_runtime::bank::Bank,
1793        solana_signer::Signer,
1794        solana_slot_history::SlotHistory,
1795        solana_vote::vote_account::VoteAccount,
1796        solana_vote_program::vote_state::{
1797            process_slot_vote_unchecked, Vote, VoteState, VoteStateVersions, MAX_LOCKOUT_HISTORY,
1798        },
1799        std::{
1800            collections::{HashMap, VecDeque},
1801            fs::{remove_file, OpenOptions},
1802            io::{Read, Seek, SeekFrom, Write},
1803            path::PathBuf,
1804            sync::Arc,
1805        },
1806        tempfile::TempDir,
1807        trees::tr,
1808    };
1809
1810    fn gen_stakes(stake_votes: &[(u64, &[u64])]) -> VoteAccountsHashMap {
1811        stake_votes
1812            .iter()
1813            .map(|(lamports, votes)| {
1814                let mut account = AccountSharedData::from(Account {
1815                    data: vec![0; VoteState::size_of()],
1816                    lamports: *lamports,
1817                    owner: solana_vote_program::id(),
1818                    ..Account::default()
1819                });
1820                let mut vote_state = VoteState::default();
1821                for slot in *votes {
1822                    process_slot_vote_unchecked(&mut vote_state, *slot);
1823                }
1824                VoteState::serialize(
1825                    &VoteStateVersions::new_current(vote_state),
1826                    account.data_as_mut_slice(),
1827                )
1828                .expect("serialize state");
1829                (
1830                    solana_pubkey::new_rand(),
1831                    (*lamports, VoteAccount::try_from(account).unwrap()),
1832                )
1833            })
1834            .collect()
1835    }
1836
1837    #[test]
1838    fn test_to_vote_instruction() {
1839        let vote = Vote::default();
1840        let mut decision = SwitchForkDecision::FailedSwitchThreshold(0, 1);
1841        assert!(decision
1842            .to_vote_instruction(
1843                VoteTransaction::from(vote.clone()),
1844                &Pubkey::default(),
1845                &Pubkey::default()
1846            )
1847            .is_none());
1848
1849        decision = SwitchForkDecision::FailedSwitchDuplicateRollback(0);
1850        assert!(decision
1851            .to_vote_instruction(
1852                VoteTransaction::from(vote.clone()),
1853                &Pubkey::default(),
1854                &Pubkey::default()
1855            )
1856            .is_none());
1857
1858        decision = SwitchForkDecision::SameFork;
1859        assert_eq!(
1860            decision.to_vote_instruction(
1861                VoteTransaction::from(vote.clone()),
1862                &Pubkey::default(),
1863                &Pubkey::default()
1864            ),
1865            Some(vote_instruction::vote(
1866                &Pubkey::default(),
1867                &Pubkey::default(),
1868                vote.clone(),
1869            ))
1870        );
1871
1872        decision = SwitchForkDecision::SwitchProof(Hash::default());
1873        assert_eq!(
1874            decision.to_vote_instruction(
1875                VoteTransaction::from(vote.clone()),
1876                &Pubkey::default(),
1877                &Pubkey::default()
1878            ),
1879            Some(vote_instruction::vote_switch(
1880                &Pubkey::default(),
1881                &Pubkey::default(),
1882                vote,
1883                Hash::default()
1884            ))
1885        );
1886    }
1887
1888    #[test]
1889    fn test_simple_votes() {
1890        // Init state
1891        let mut vote_simulator = VoteSimulator::new(1);
1892        let node_pubkey = vote_simulator.node_pubkeys[0];
1893        let mut tower = Tower::default();
1894
1895        // Create the tree of banks
1896        let forks = tr(0) / (tr(1) / (tr(2) / (tr(3) / (tr(4) / tr(5)))));
1897
1898        // Set the voting behavior
1899        let mut cluster_votes = HashMap::new();
1900        let votes = vec![1, 2, 3, 4, 5];
1901        cluster_votes.insert(node_pubkey, votes.clone());
1902        vote_simulator.fill_bank_forks(forks, &cluster_votes, true);
1903
1904        // Simulate the votes
1905        for vote in votes {
1906            assert!(vote_simulator
1907                .simulate_vote(vote, &node_pubkey, &mut tower,)
1908                .is_empty());
1909        }
1910
1911        for i in 1..5 {
1912            assert_eq!(tower.vote_state.votes[i - 1].slot() as usize, i);
1913            assert_eq!(
1914                tower.vote_state.votes[i - 1].confirmation_count() as usize,
1915                6 - i
1916            );
1917        }
1918    }
1919
1920    #[test]
1921    fn test_switch_threshold_duplicate_rollback() {
1922        run_test_switch_threshold_duplicate_rollback(false);
1923    }
1924
1925    #[test]
1926    #[should_panic]
1927    fn test_switch_threshold_duplicate_rollback_panic() {
1928        run_test_switch_threshold_duplicate_rollback(true);
1929    }
1930
1931    fn setup_switch_test(num_accounts: usize) -> (Arc<Bank>, VoteSimulator, u64) {
1932        // Init state
1933        assert!(num_accounts > 1);
1934        let mut vote_simulator = VoteSimulator::new(num_accounts);
1935        let bank0 = vote_simulator.bank_forks.read().unwrap().get(0).unwrap();
1936        let total_stake = bank0.total_epoch_stake();
1937        assert_eq!(
1938            total_stake,
1939            vote_simulator.validator_keypairs.len() as u64 * 10_000
1940        );
1941
1942        // Create the tree of banks
1943        let forks = tr(0)
1944            / (tr(1)
1945                / (tr(2)
1946                    // Minor fork 1
1947                    / (tr(10) / (tr(11) / (tr(12) / (tr(13) / (tr(14))))))
1948                    / (tr(43)
1949                        / (tr(44)
1950                            // Minor fork 2
1951                            / (tr(45) / (tr(46) / (tr(47) / (tr(48) / (tr(49) / (tr(50)))))))
1952                            / (tr(110)))
1953                        / tr(112))));
1954
1955        // Fill the BankForks according to the above fork structure
1956        vote_simulator.fill_bank_forks(forks, &HashMap::new(), true);
1957        for (_, fork_progress) in vote_simulator.progress.iter_mut() {
1958            fork_progress.fork_stats.computed = true;
1959        }
1960
1961        (bank0, vote_simulator, total_stake)
1962    }
1963
1964    fn run_test_switch_threshold_duplicate_rollback(should_panic: bool) {
1965        let (bank0, mut vote_simulator, total_stake) = setup_switch_test(2);
1966        let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
1967        let descendants = vote_simulator.bank_forks.read().unwrap().descendants();
1968        let mut tower = Tower::default();
1969
1970        // Last vote is 47
1971        tower.record_vote(
1972            47,
1973            vote_simulator
1974                .bank_forks
1975                .read()
1976                .unwrap()
1977                .get(47)
1978                .unwrap()
1979                .hash(),
1980        );
1981
1982        // Trying to switch to an ancestor of last vote should only not panic
1983        // if the current vote has a duplicate ancestor
1984        let ancestor_of_voted_slot = 43;
1985        let duplicate_ancestor1 = 44;
1986        let duplicate_ancestor2 = 45;
1987        vote_simulator
1988            .heaviest_subtree_fork_choice
1989            .mark_fork_invalid_candidate(&(
1990                duplicate_ancestor1,
1991                vote_simulator
1992                    .bank_forks
1993                    .read()
1994                    .unwrap()
1995                    .get(duplicate_ancestor1)
1996                    .unwrap()
1997                    .hash(),
1998            ));
1999        vote_simulator
2000            .heaviest_subtree_fork_choice
2001            .mark_fork_invalid_candidate(&(
2002                duplicate_ancestor2,
2003                vote_simulator
2004                    .bank_forks
2005                    .read()
2006                    .unwrap()
2007                    .get(duplicate_ancestor2)
2008                    .unwrap()
2009                    .hash(),
2010            ));
2011        assert_eq!(
2012            tower.check_switch_threshold(
2013                ancestor_of_voted_slot,
2014                &ancestors,
2015                &descendants,
2016                &vote_simulator.progress,
2017                total_stake,
2018                bank0.epoch_vote_accounts(0).unwrap(),
2019                &vote_simulator.latest_validator_votes_for_frozen_banks,
2020                &vote_simulator.heaviest_subtree_fork_choice,
2021            ),
2022            SwitchForkDecision::FailedSwitchDuplicateRollback(duplicate_ancestor2)
2023        );
2024        let mut confirm_ancestors = vec![duplicate_ancestor1];
2025        if should_panic {
2026            // Adding the last duplicate ancestor will
2027            // 1) Cause loop below to confirm last ancestor
2028            // 2) Check switch threshold on a vote ancestor when there
2029            // are no duplicates on that fork, which will cause a panic
2030            confirm_ancestors.push(duplicate_ancestor2);
2031        }
2032        for (i, duplicate_ancestor) in confirm_ancestors.into_iter().enumerate() {
2033            vote_simulator
2034                .heaviest_subtree_fork_choice
2035                .mark_fork_valid_candidate(&(
2036                    duplicate_ancestor,
2037                    vote_simulator
2038                        .bank_forks
2039                        .read()
2040                        .unwrap()
2041                        .get(duplicate_ancestor)
2042                        .unwrap()
2043                        .hash(),
2044                ));
2045            let res = tower.check_switch_threshold(
2046                ancestor_of_voted_slot,
2047                &ancestors,
2048                &descendants,
2049                &vote_simulator.progress,
2050                total_stake,
2051                bank0.epoch_vote_accounts(0).unwrap(),
2052                &vote_simulator.latest_validator_votes_for_frozen_banks,
2053                &vote_simulator.heaviest_subtree_fork_choice,
2054            );
2055            if i == 0 {
2056                assert_eq!(
2057                    res,
2058                    SwitchForkDecision::FailedSwitchDuplicateRollback(duplicate_ancestor2)
2059                );
2060            }
2061        }
2062    }
2063
2064    #[test]
2065    fn test_switch_threshold() {
2066        let (bank0, mut vote_simulator, total_stake) = setup_switch_test(2);
2067        let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
2068        let mut descendants = vote_simulator.bank_forks.read().unwrap().descendants();
2069        let mut tower = Tower::default();
2070        let other_vote_account = vote_simulator.vote_pubkeys[1];
2071
2072        // Last vote is 47
2073        tower.record_vote(47, Hash::default());
2074
2075        // Trying to switch to a descendant of last vote should always work
2076        assert_eq!(
2077            tower.check_switch_threshold(
2078                48,
2079                &ancestors,
2080                &descendants,
2081                &vote_simulator.progress,
2082                total_stake,
2083                bank0.epoch_vote_accounts(0).unwrap(),
2084                &vote_simulator.latest_validator_votes_for_frozen_banks,
2085                &vote_simulator.heaviest_subtree_fork_choice,
2086            ),
2087            SwitchForkDecision::SameFork
2088        );
2089
2090        // Trying to switch to another fork at 110 should fail
2091        assert_eq!(
2092            tower.check_switch_threshold(
2093                110,
2094                &ancestors,
2095                &descendants,
2096                &vote_simulator.progress,
2097                total_stake,
2098                bank0.epoch_vote_accounts(0).unwrap(),
2099                &vote_simulator.latest_validator_votes_for_frozen_banks,
2100                &vote_simulator.heaviest_subtree_fork_choice,
2101            ),
2102            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
2103        );
2104
2105        // Adding another validator lockout on a descendant of last vote should
2106        // not count toward the switch threshold
2107        vote_simulator.simulate_lockout_interval(50, (49, 100), &other_vote_account);
2108        assert_eq!(
2109            tower.check_switch_threshold(
2110                110,
2111                &ancestors,
2112                &descendants,
2113                &vote_simulator.progress,
2114                total_stake,
2115                bank0.epoch_vote_accounts(0).unwrap(),
2116                &vote_simulator.latest_validator_votes_for_frozen_banks,
2117                &vote_simulator.heaviest_subtree_fork_choice,
2118            ),
2119            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
2120        );
2121
2122        // Adding another validator lockout on an ancestor of last vote should
2123        // not count toward the switch threshold
2124        vote_simulator.simulate_lockout_interval(50, (45, 100), &other_vote_account);
2125        assert_eq!(
2126            tower.check_switch_threshold(
2127                110,
2128                &ancestors,
2129                &descendants,
2130                &vote_simulator.progress,
2131                total_stake,
2132                bank0.epoch_vote_accounts(0).unwrap(),
2133                &vote_simulator.latest_validator_votes_for_frozen_banks,
2134                &vote_simulator.heaviest_subtree_fork_choice,
2135            ),
2136            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
2137        );
2138
2139        // Adding another validator lockout on a different fork, but the lockout
2140        // doesn't cover the last vote, should not satisfy the switch threshold
2141        vote_simulator.simulate_lockout_interval(14, (12, 46), &other_vote_account);
2142        assert_eq!(
2143            tower.check_switch_threshold(
2144                110,
2145                &ancestors,
2146                &descendants,
2147                &vote_simulator.progress,
2148                total_stake,
2149                bank0.epoch_vote_accounts(0).unwrap(),
2150                &vote_simulator.latest_validator_votes_for_frozen_banks,
2151                &vote_simulator.heaviest_subtree_fork_choice,
2152            ),
2153            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
2154        );
2155
2156        // Adding another validator lockout on a different fork, and the lockout
2157        // covers the last vote would count towards the switch threshold,
2158        // unless the bank is not the most recent frozen bank on the fork (14 is a
2159        // frozen/computed bank > 13 on the same fork in this case)
2160        vote_simulator.simulate_lockout_interval(13, (12, 47), &other_vote_account);
2161        assert_eq!(
2162            tower.check_switch_threshold(
2163                110,
2164                &ancestors,
2165                &descendants,
2166                &vote_simulator.progress,
2167                total_stake,
2168                bank0.epoch_vote_accounts(0).unwrap(),
2169                &vote_simulator.latest_validator_votes_for_frozen_banks,
2170                &vote_simulator.heaviest_subtree_fork_choice,
2171            ),
2172            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
2173        );
2174
2175        // Adding another validator lockout on a different fork, and the lockout
2176        // covers the last vote, should satisfy the switch threshold
2177        vote_simulator.simulate_lockout_interval(14, (12, 47), &other_vote_account);
2178        assert_eq!(
2179            tower.check_switch_threshold(
2180                110,
2181                &ancestors,
2182                &descendants,
2183                &vote_simulator.progress,
2184                total_stake,
2185                bank0.epoch_vote_accounts(0).unwrap(),
2186                &vote_simulator.latest_validator_votes_for_frozen_banks,
2187                &vote_simulator.heaviest_subtree_fork_choice,
2188            ),
2189            SwitchForkDecision::SwitchProof(Hash::default())
2190        );
2191
2192        // Adding another unfrozen descendant of the tip of 14 should not remove
2193        // slot 14 from consideration because it is still the most recent frozen
2194        // bank on its fork
2195        descendants.get_mut(&14).unwrap().insert(10000);
2196        assert_eq!(
2197            tower.check_switch_threshold(
2198                110,
2199                &ancestors,
2200                &descendants,
2201                &vote_simulator.progress,
2202                total_stake,
2203                bank0.epoch_vote_accounts(0).unwrap(),
2204                &vote_simulator.latest_validator_votes_for_frozen_banks,
2205                &vote_simulator.heaviest_subtree_fork_choice,
2206            ),
2207            SwitchForkDecision::SwitchProof(Hash::default())
2208        );
2209
2210        // If we set a root, then any lockout intervals below the root shouldn't
2211        // count toward the switch threshold. This means the other validator's
2212        // vote lockout no longer counts
2213        tower.vote_state.root_slot = Some(43);
2214        // Refresh ancestors and descendants for new root.
2215        let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
2216        let descendants = vote_simulator.bank_forks.read().unwrap().descendants();
2217
2218        assert_eq!(
2219            tower.check_switch_threshold(
2220                110,
2221                &ancestors,
2222                &descendants,
2223                &vote_simulator.progress,
2224                total_stake,
2225                bank0.epoch_vote_accounts(0).unwrap(),
2226                &vote_simulator.latest_validator_votes_for_frozen_banks,
2227                &vote_simulator.heaviest_subtree_fork_choice,
2228            ),
2229            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
2230        );
2231    }
2232
2233    #[test]
2234    fn test_switch_threshold_use_gossip_votes() {
2235        let num_validators = 2;
2236        let (bank0, mut vote_simulator, total_stake) = setup_switch_test(2);
2237        let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
2238        let descendants = vote_simulator.bank_forks.read().unwrap().descendants();
2239        let mut tower = Tower::default();
2240        let other_vote_account = vote_simulator.vote_pubkeys[1];
2241
2242        // Last vote is 47
2243        tower.record_vote(47, Hash::default());
2244
2245        // Trying to switch to another fork at 110 should fail
2246        assert_eq!(
2247            tower.check_switch_threshold(
2248                110,
2249                &ancestors,
2250                &descendants,
2251                &vote_simulator.progress,
2252                total_stake,
2253                bank0.epoch_vote_accounts(0).unwrap(),
2254                &vote_simulator.latest_validator_votes_for_frozen_banks,
2255                &vote_simulator.heaviest_subtree_fork_choice,
2256            ),
2257            SwitchForkDecision::FailedSwitchThreshold(0, num_validators * 10000)
2258        );
2259
2260        // Adding a vote on the descendant shouldn't count toward the switch threshold
2261        vote_simulator.simulate_lockout_interval(50, (49, 100), &other_vote_account);
2262        assert_eq!(
2263            tower.check_switch_threshold(
2264                110,
2265                &ancestors,
2266                &descendants,
2267                &vote_simulator.progress,
2268                total_stake,
2269                bank0.epoch_vote_accounts(0).unwrap(),
2270                &vote_simulator.latest_validator_votes_for_frozen_banks,
2271                &vote_simulator.heaviest_subtree_fork_choice,
2272            ),
2273            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
2274        );
2275
2276        // Adding a later vote from gossip that isn't on the same fork should count toward the
2277        // switch threshold
2278        vote_simulator
2279            .latest_validator_votes_for_frozen_banks
2280            .check_add_vote(
2281                other_vote_account,
2282                112,
2283                Some(
2284                    vote_simulator
2285                        .bank_forks
2286                        .read()
2287                        .unwrap()
2288                        .get(112)
2289                        .unwrap()
2290                        .hash(),
2291                ),
2292                false,
2293            );
2294
2295        assert_eq!(
2296            tower.check_switch_threshold(
2297                110,
2298                &ancestors,
2299                &descendants,
2300                &vote_simulator.progress,
2301                total_stake,
2302                bank0.epoch_vote_accounts(0).unwrap(),
2303                &vote_simulator.latest_validator_votes_for_frozen_banks,
2304                &vote_simulator.heaviest_subtree_fork_choice,
2305            ),
2306            SwitchForkDecision::SwitchProof(Hash::default())
2307        );
2308
2309        // If we now set a root that causes slot 112 to be purged from BankForks, then
2310        // the switch proof will now fail since that validator's vote can no longer be
2311        // included in the switching proof
2312        vote_simulator.set_root(44);
2313        let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
2314        let descendants = vote_simulator.bank_forks.read().unwrap().descendants();
2315        assert_eq!(
2316            tower.check_switch_threshold(
2317                110,
2318                &ancestors,
2319                &descendants,
2320                &vote_simulator.progress,
2321                total_stake,
2322                bank0.epoch_vote_accounts(0).unwrap(),
2323                &vote_simulator.latest_validator_votes_for_frozen_banks,
2324                &vote_simulator.heaviest_subtree_fork_choice,
2325            ),
2326            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
2327        );
2328    }
2329
2330    #[test]
2331    fn test_switch_threshold_votes() {
2332        // Init state
2333        let mut vote_simulator = VoteSimulator::new(4);
2334        let node_pubkey = vote_simulator.node_pubkeys[0];
2335        let mut tower = Tower::default();
2336        let forks = tr(0)
2337            / (tr(1)
2338                / (tr(2)
2339                    // Minor fork 1
2340                    / (tr(10) / (tr(11) / (tr(12) / (tr(13) / (tr(14))))))
2341                    / (tr(43)
2342                        / (tr(44)
2343                            // Minor fork 2
2344                            / (tr(45) / (tr(46))))
2345                        / (tr(110)))));
2346
2347        // Have two validators, each representing 20% of the stake vote on
2348        // minor fork 2 at slots 46 + 47
2349        let mut cluster_votes: HashMap<Pubkey, Vec<Slot>> = HashMap::new();
2350        cluster_votes.insert(vote_simulator.node_pubkeys[1], vec![46]);
2351        cluster_votes.insert(vote_simulator.node_pubkeys[2], vec![47]);
2352        vote_simulator.fill_bank_forks(forks, &cluster_votes, true);
2353
2354        // Vote on the first minor fork at slot 14, should succeed
2355        assert!(vote_simulator
2356            .simulate_vote(14, &node_pubkey, &mut tower,)
2357            .is_empty());
2358
2359        // The other two validators voted at slots 46, 47, which
2360        // will only both show up in slot 48, at which point
2361        // 2/5 > SWITCH_FORK_THRESHOLD of the stake has voted
2362        // on another fork, so switching should succeed
2363        let votes_to_simulate = (46..=48).collect();
2364        let results = vote_simulator.create_and_vote_new_branch(
2365            45,
2366            48,
2367            &cluster_votes,
2368            &votes_to_simulate,
2369            &node_pubkey,
2370            &mut tower,
2371        );
2372        assert_eq!(
2373            *results.get(&46).unwrap(),
2374            vec![HeaviestForkFailures::FailedSwitchThreshold(46, 0, 40000)]
2375        );
2376        assert_eq!(
2377            *results.get(&47).unwrap(),
2378            vec![HeaviestForkFailures::FailedSwitchThreshold(
2379                47, 10000, 40000
2380            )]
2381        );
2382        assert!(results.get(&48).unwrap().is_empty());
2383    }
2384
2385    #[test]
2386    fn test_double_partition() {
2387        // Init state
2388        let mut vote_simulator = VoteSimulator::new(2);
2389        let node_pubkey = vote_simulator.node_pubkeys[0];
2390        let vote_pubkey = vote_simulator.vote_pubkeys[0];
2391        let mut tower = Tower::default();
2392
2393        let num_slots_to_try = 200;
2394        // Create the tree of banks
2395        let forks = tr(0)
2396            / (tr(1)
2397                / (tr(2)
2398                    / (tr(3)
2399                        / (tr(4)
2400                            / (tr(5)
2401                                / (tr(6)
2402                                    / (tr(7)
2403                                        / (tr(8)
2404                                            / (tr(9)
2405                                                // Minor fork 1
2406                                                / (tr(10) / (tr(11) / (tr(12) / (tr(13) / (tr(14))))))
2407                                                / (tr(43)
2408                                                    / (tr(44)
2409                                                        // Minor fork 2
2410                                                        / (tr(45) / (tr(46) / (tr(47) / (tr(48) / (tr(49) / (tr(50)))))))
2411                                                        / (tr(110) / (tr(110 + 2 * num_slots_to_try))))))))))))));
2412
2413        // Set the successful voting behavior
2414        let mut cluster_votes = HashMap::new();
2415        let mut my_votes: Vec<Slot> = vec![];
2416        let next_unlocked_slot = 110;
2417        // Vote on the first minor fork
2418        my_votes.extend(1..=14);
2419        // Come back to the main fork
2420        my_votes.extend(43..=44);
2421        // Vote on the second minor fork
2422        my_votes.extend(45..=50);
2423        // Vote to come back to main fork
2424        my_votes.push(next_unlocked_slot);
2425        cluster_votes.insert(node_pubkey, my_votes.clone());
2426        // Make the other validator vote fork to pass the threshold checks
2427        let other_votes = my_votes.clone();
2428        cluster_votes.insert(vote_simulator.node_pubkeys[1], other_votes);
2429        vote_simulator.fill_bank_forks(forks, &cluster_votes, true);
2430
2431        // Simulate the votes.
2432        for vote in &my_votes {
2433            // All these votes should be ok
2434            assert!(vote_simulator
2435                .simulate_vote(*vote, &node_pubkey, &mut tower,)
2436                .is_empty());
2437        }
2438
2439        info!("local tower: {:#?}", tower.vote_state.votes);
2440        let observed = vote_simulator
2441            .bank_forks
2442            .read()
2443            .unwrap()
2444            .get(next_unlocked_slot)
2445            .unwrap()
2446            .get_vote_account(&vote_pubkey)
2447            .unwrap();
2448        let state = observed.vote_state_view();
2449        info!("observed tower: {:#?}", state.votes_iter().collect_vec());
2450
2451        let num_slots_to_try = 200;
2452        cluster_votes
2453            .get_mut(&vote_simulator.node_pubkeys[1])
2454            .unwrap()
2455            .extend(next_unlocked_slot + 1..next_unlocked_slot + num_slots_to_try);
2456        assert!(vote_simulator.can_progress_on_fork(
2457            &node_pubkey,
2458            &mut tower,
2459            next_unlocked_slot,
2460            num_slots_to_try,
2461            &mut cluster_votes,
2462        ));
2463    }
2464
2465    #[test]
2466    fn test_collect_vote_lockouts_sums() {
2467        //two accounts voting for slot 0 with 1 token staked
2468        let accounts = gen_stakes(&[(1, &[0]), (1, &[0])]);
2469        let account_latest_votes: Vec<(Pubkey, SlotHashKey)> = accounts
2470            .iter()
2471            .sorted_by_key(|(pk, _)| *pk)
2472            .map(|(pubkey, _)| (*pubkey, (0, Hash::default())))
2473            .collect();
2474
2475        let ancestors = vec![(1, vec![0].into_iter().collect()), (0, HashSet::new())]
2476            .into_iter()
2477            .collect();
2478        let mut latest_validator_votes_for_frozen_banks =
2479            LatestValidatorVotesForFrozenBanks::default();
2480        let ComputedBankState {
2481            voted_stakes,
2482            total_stake,
2483            ..
2484        } = Tower::collect_vote_lockouts(
2485            &Pubkey::default(),
2486            1,
2487            &accounts,
2488            &ancestors,
2489            |_| Some(Hash::default()),
2490            &mut latest_validator_votes_for_frozen_banks,
2491        );
2492        assert_eq!(voted_stakes[&0], 2);
2493        assert_eq!(total_stake, 2);
2494        let mut new_votes = latest_validator_votes_for_frozen_banks.take_votes_dirty_set(0);
2495        new_votes.sort();
2496        assert_eq!(new_votes, account_latest_votes);
2497    }
2498
2499    #[test]
2500    fn test_collect_vote_lockouts_root() {
2501        let votes: Vec<u64> = (0..MAX_LOCKOUT_HISTORY as u64).collect();
2502        //two accounts voting for slots 0..MAX_LOCKOUT_HISTORY with 1 token staked
2503        let accounts = gen_stakes(&[(1, &votes), (1, &votes)]);
2504        let account_latest_votes: Vec<(Pubkey, SlotHashKey)> = accounts
2505            .iter()
2506            .sorted_by_key(|(pk, _)| *pk)
2507            .map(|(pubkey, _)| {
2508                (
2509                    *pubkey,
2510                    ((MAX_LOCKOUT_HISTORY - 1) as Slot, Hash::default()),
2511                )
2512            })
2513            .collect();
2514        let mut tower = Tower::new_for_tests(0, 0.67);
2515        let mut ancestors = HashMap::new();
2516        for i in 0..(MAX_LOCKOUT_HISTORY + 1) {
2517            tower.record_vote(i as u64, Hash::default());
2518            ancestors.insert(i as u64, (0..i as u64).collect());
2519        }
2520        let root = Lockout::new_with_confirmation_count(0, MAX_LOCKOUT_HISTORY as u32);
2521        let expected_bank_stake = 2;
2522        let expected_total_stake = 2;
2523        assert_eq!(tower.vote_state.root_slot, Some(0));
2524        let mut latest_validator_votes_for_frozen_banks =
2525            LatestValidatorVotesForFrozenBanks::default();
2526        let ComputedBankState {
2527            voted_stakes,
2528            fork_stake,
2529            total_stake,
2530            ..
2531        } = Tower::collect_vote_lockouts(
2532            &Pubkey::default(),
2533            MAX_LOCKOUT_HISTORY as u64,
2534            &accounts,
2535            &ancestors,
2536            |_| Some(Hash::default()),
2537            &mut latest_validator_votes_for_frozen_banks,
2538        );
2539        for i in 0..MAX_LOCKOUT_HISTORY {
2540            assert_eq!(voted_stakes[&(i as u64)], 2);
2541        }
2542
2543        // should be the sum of all voted stake for on the fork
2544        assert_eq!(fork_stake, expected_bank_stake);
2545        assert_eq!(total_stake, expected_total_stake);
2546        let mut new_votes =
2547            latest_validator_votes_for_frozen_banks.take_votes_dirty_set(root.slot());
2548        new_votes.sort();
2549        assert_eq!(new_votes, account_latest_votes);
2550    }
2551
2552    #[test]
2553    fn test_check_vote_threshold_without_votes() {
2554        let tower = Tower::new_for_tests(1, 0.67);
2555        let stakes = vec![(0, 1)].into_iter().collect();
2556        assert!(tower.check_vote_stake_thresholds(0, &stakes, 2).is_empty());
2557    }
2558
2559    #[test]
2560    fn test_check_vote_threshold_no_skip_lockout_with_new_root() {
2561        solana_logger::setup();
2562        let mut tower = Tower::new_for_tests(4, 0.67);
2563        let mut stakes = HashMap::new();
2564        for i in 0..(MAX_LOCKOUT_HISTORY as u64 + 1) {
2565            stakes.insert(i, 1);
2566            tower.record_vote(i, Hash::default());
2567        }
2568        assert!(!tower
2569            .check_vote_stake_thresholds(MAX_LOCKOUT_HISTORY as u64 + 1, &stakes, 2)
2570            .is_empty());
2571    }
2572
2573    #[test]
2574    fn test_is_slot_confirmed_not_enough_stake_failure() {
2575        let tower = Tower::new_for_tests(1, 0.67);
2576        let stakes = vec![(0, 1)].into_iter().collect();
2577        assert!(!tower.is_slot_confirmed(0, &stakes, 2));
2578    }
2579
2580    #[test]
2581    fn test_is_slot_confirmed_unknown_slot() {
2582        let tower = Tower::new_for_tests(1, 0.67);
2583        let stakes = HashMap::new();
2584        assert!(!tower.is_slot_confirmed(0, &stakes, 2));
2585    }
2586
2587    #[test]
2588    fn test_is_slot_confirmed_pass() {
2589        let tower = Tower::new_for_tests(1, 0.67);
2590        let stakes = vec![(0, 2)].into_iter().collect();
2591        assert!(tower.is_slot_confirmed(0, &stakes, 2));
2592    }
2593
2594    #[test]
2595    fn test_is_slot_duplicate_confirmed_not_enough_stake_failure() {
2596        let tower = Tower::new_for_tests(1, 0.67);
2597        let stakes = vec![(0, 52)].into_iter().collect();
2598        assert!(!tower.is_slot_duplicate_confirmed(0, &stakes, 100));
2599    }
2600
2601    #[test]
2602    fn test_is_slot_duplicate_confirmed_unknown_slot() {
2603        let tower = Tower::new_for_tests(1, 0.67);
2604        let stakes = HashMap::new();
2605        assert!(!tower.is_slot_duplicate_confirmed(0, &stakes, 100));
2606    }
2607
2608    #[test]
2609    fn test_is_slot_duplicate_confirmed_pass() {
2610        let tower = Tower::new_for_tests(1, 0.67);
2611        let stakes = vec![(0, 53)].into_iter().collect();
2612        assert!(tower.is_slot_duplicate_confirmed(0, &stakes, 100));
2613    }
2614
2615    #[test]
2616    fn test_is_locked_out_empty() {
2617        let tower = Tower::new_for_tests(0, 0.67);
2618        let ancestors = HashSet::from([0]);
2619        assert!(!tower.is_locked_out(1, &ancestors));
2620    }
2621
2622    #[test]
2623    fn test_is_locked_out_root_slot_child_pass() {
2624        let mut tower = Tower::new_for_tests(0, 0.67);
2625        let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
2626        tower.vote_state.root_slot = Some(0);
2627        assert!(!tower.is_locked_out(1, &ancestors));
2628    }
2629
2630    #[test]
2631    fn test_is_locked_out_root_slot_sibling_fail() {
2632        let mut tower = Tower::new_for_tests(0, 0.67);
2633        let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
2634        tower.vote_state.root_slot = Some(0);
2635        tower.record_vote(1, Hash::default());
2636        assert!(tower.is_locked_out(2, &ancestors));
2637    }
2638
2639    #[test]
2640    fn test_check_already_voted() {
2641        let mut tower = Tower::new_for_tests(0, 0.67);
2642        tower.record_vote(0, Hash::default());
2643        assert!(tower.has_voted(0));
2644        assert!(!tower.has_voted(1));
2645    }
2646
2647    #[test]
2648    fn test_check_recent_slot() {
2649        let mut tower = Tower::new_for_tests(0, 0.67);
2650        assert!(tower.is_recent(1));
2651        assert!(tower.is_recent(32));
2652        for i in 0..64 {
2653            tower.record_vote(i, Hash::default());
2654        }
2655        assert!(!tower.is_recent(0));
2656        assert!(!tower.is_recent(32));
2657        assert!(!tower.is_recent(63));
2658        assert!(tower.is_recent(65));
2659    }
2660
2661    #[test]
2662    fn test_is_locked_out_double_vote() {
2663        let mut tower = Tower::new_for_tests(0, 0.67);
2664        let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
2665        tower.record_vote(0, Hash::default());
2666        tower.record_vote(1, Hash::default());
2667        assert!(tower.is_locked_out(0, &ancestors));
2668    }
2669
2670    #[test]
2671    fn test_is_locked_out_child() {
2672        let mut tower = Tower::new_for_tests(0, 0.67);
2673        let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
2674        tower.record_vote(0, Hash::default());
2675        assert!(!tower.is_locked_out(1, &ancestors));
2676    }
2677
2678    #[test]
2679    fn test_is_locked_out_sibling() {
2680        let mut tower = Tower::new_for_tests(0, 0.67);
2681        let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
2682        tower.record_vote(0, Hash::default());
2683        tower.record_vote(1, Hash::default());
2684        assert!(tower.is_locked_out(2, &ancestors));
2685    }
2686
2687    #[test]
2688    fn test_is_locked_out_last_vote_expired() {
2689        let mut tower = Tower::new_for_tests(0, 0.67);
2690        let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
2691        tower.record_vote(0, Hash::default());
2692        tower.record_vote(1, Hash::default());
2693        assert!(!tower.is_locked_out(4, &ancestors));
2694        tower.record_vote(4, Hash::default());
2695        assert_eq!(tower.vote_state.votes[0].slot(), 0);
2696        assert_eq!(tower.vote_state.votes[0].confirmation_count(), 2);
2697        assert_eq!(tower.vote_state.votes[1].slot(), 4);
2698        assert_eq!(tower.vote_state.votes[1].confirmation_count(), 1);
2699    }
2700
2701    #[test]
2702    fn test_check_vote_threshold_below_threshold() {
2703        let mut tower = Tower::new_for_tests(1, 0.67);
2704        let stakes = vec![(0, 1)].into_iter().collect();
2705        tower.record_vote(0, Hash::default());
2706        assert!(!tower.check_vote_stake_thresholds(1, &stakes, 2).is_empty());
2707    }
2708    #[test]
2709    fn test_check_vote_threshold_above_threshold() {
2710        let mut tower = Tower::new_for_tests(1, 0.67);
2711        let stakes = vec![(0, 2)].into_iter().collect();
2712        tower.record_vote(0, Hash::default());
2713        assert!(tower.check_vote_stake_thresholds(1, &stakes, 2).is_empty());
2714    }
2715
2716    #[test]
2717    fn test_check_vote_thresholds_above_thresholds() {
2718        let mut tower = Tower::new_for_tests(VOTE_THRESHOLD_DEPTH, 0.67);
2719        let stakes = vec![
2720            (0, 3),
2721            (VOTE_THRESHOLD_DEPTH_SHALLOW as u64, 2),
2722            ((VOTE_THRESHOLD_DEPTH_SHALLOW as u64) - 1, 2),
2723        ]
2724        .into_iter()
2725        .collect();
2726        for slot in 0..VOTE_THRESHOLD_DEPTH {
2727            tower.record_vote(slot as Slot, Hash::default());
2728        }
2729        assert!(tower
2730            .check_vote_stake_thresholds(VOTE_THRESHOLD_DEPTH.try_into().unwrap(), &stakes, 4)
2731            .is_empty());
2732    }
2733
2734    #[test]
2735    fn test_check_vote_threshold_deep_below_threshold() {
2736        let mut tower = Tower::new_for_tests(VOTE_THRESHOLD_DEPTH, 0.67);
2737        let stakes = vec![(0, 6), (VOTE_THRESHOLD_DEPTH_SHALLOW as u64, 4)]
2738            .into_iter()
2739            .collect();
2740        for slot in 0..VOTE_THRESHOLD_DEPTH {
2741            tower.record_vote(slot as Slot, Hash::default());
2742        }
2743        assert!(!tower
2744            .check_vote_stake_thresholds(VOTE_THRESHOLD_DEPTH.try_into().unwrap(), &stakes, 10)
2745            .is_empty());
2746    }
2747
2748    #[test]
2749    fn test_check_vote_threshold_shallow_below_threshold() {
2750        let mut tower = Tower::new_for_tests(VOTE_THRESHOLD_DEPTH, 0.67);
2751        let stakes = vec![(0, 7), (VOTE_THRESHOLD_DEPTH_SHALLOW as u64, 1)]
2752            .into_iter()
2753            .collect();
2754        for slot in 0..VOTE_THRESHOLD_DEPTH {
2755            tower.record_vote(slot as Slot, Hash::default());
2756        }
2757        assert!(!tower
2758            .check_vote_stake_thresholds(VOTE_THRESHOLD_DEPTH.try_into().unwrap(), &stakes, 10)
2759            .is_empty());
2760    }
2761
2762    #[test]
2763    fn test_check_vote_threshold_above_threshold_after_pop() {
2764        let mut tower = Tower::new_for_tests(1, 0.67);
2765        let stakes = vec![(0, 2)].into_iter().collect();
2766        tower.record_vote(0, Hash::default());
2767        tower.record_vote(1, Hash::default());
2768        tower.record_vote(2, Hash::default());
2769        assert!(tower.check_vote_stake_thresholds(6, &stakes, 2).is_empty());
2770    }
2771
2772    #[test]
2773    fn test_check_vote_threshold_above_threshold_no_stake() {
2774        let mut tower = Tower::new_for_tests(1, 0.67);
2775        let stakes = HashMap::new();
2776        tower.record_vote(0, Hash::default());
2777        assert!(!tower.check_vote_stake_thresholds(1, &stakes, 2).is_empty());
2778    }
2779
2780    #[test]
2781    fn test_check_vote_threshold_lockouts_not_updated() {
2782        solana_logger::setup();
2783        let mut tower = Tower::new_for_tests(1, 0.67);
2784        let stakes = vec![(0, 1), (1, 2)].into_iter().collect();
2785        tower.record_vote(0, Hash::default());
2786        tower.record_vote(1, Hash::default());
2787        tower.record_vote(2, Hash::default());
2788        assert!(tower.check_vote_stake_thresholds(6, &stakes, 2).is_empty());
2789    }
2790
2791    #[test]
2792    fn test_stake_is_updated_for_entire_branch() {
2793        let mut voted_stakes = HashMap::new();
2794        let account = AccountSharedData::from(Account {
2795            lamports: 1,
2796            ..Account::default()
2797        });
2798        let set: HashSet<u64> = vec![0u64, 1u64].into_iter().collect();
2799        let ancestors: HashMap<u64, HashSet<u64>> = [(2u64, set)].iter().cloned().collect();
2800        Tower::update_ancestor_voted_stakes(&mut voted_stakes, 2, account.lamports(), &ancestors);
2801        assert_eq!(voted_stakes[&0], 1);
2802        assert_eq!(voted_stakes[&1], 1);
2803        assert_eq!(voted_stakes[&2], 1);
2804    }
2805
2806    #[test]
2807    fn test_check_vote_threshold_forks() {
2808        // Create the ancestor relationships
2809        let ancestors = (0..=(VOTE_THRESHOLD_DEPTH + 1) as u64)
2810            .map(|slot| {
2811                let slot_parents: HashSet<_> = (0..slot).collect();
2812                (slot, slot_parents)
2813            })
2814            .collect();
2815
2816        // Create votes such that
2817        // 1) 3/4 of the stake has voted on slot: VOTE_THRESHOLD_DEPTH - 2, lockout: 2
2818        // 2) 1/4 of the stake has voted on slot: VOTE_THRESHOLD_DEPTH, lockout: 2^9
2819        let total_stake = 4;
2820        let threshold_size = 0.67;
2821        let threshold_stake = (f64::ceil(total_stake as f64 * threshold_size)) as u64;
2822        let tower_votes: Vec<Slot> = (0..VOTE_THRESHOLD_DEPTH as u64).collect();
2823        let accounts = gen_stakes(&[
2824            (threshold_stake, &[(VOTE_THRESHOLD_DEPTH - 2) as u64]),
2825            (total_stake - threshold_stake, &tower_votes[..]),
2826        ]);
2827
2828        // Initialize tower
2829        let mut tower = Tower::new_for_tests(VOTE_THRESHOLD_DEPTH, threshold_size);
2830
2831        // CASE 1: Record the first VOTE_THRESHOLD tower votes for fork 2. We want to
2832        // evaluate a vote on slot VOTE_THRESHOLD_DEPTH. The nth most recent vote should be
2833        // for slot 0, which is common to all account vote states, so we should pass the
2834        // threshold check
2835        let vote_to_evaluate = VOTE_THRESHOLD_DEPTH as u64;
2836        for vote in &tower_votes {
2837            tower.record_vote(*vote, Hash::default());
2838        }
2839        let ComputedBankState {
2840            voted_stakes,
2841            total_stake,
2842            ..
2843        } = Tower::collect_vote_lockouts(
2844            &Pubkey::default(),
2845            vote_to_evaluate,
2846            &accounts,
2847            &ancestors,
2848            |_| None,
2849            &mut LatestValidatorVotesForFrozenBanks::default(),
2850        );
2851        assert!(tower
2852            .check_vote_stake_thresholds(vote_to_evaluate, &voted_stakes, total_stake)
2853            .is_empty());
2854
2855        // CASE 2: Now we want to evaluate a vote for slot VOTE_THRESHOLD_DEPTH + 1. This slot
2856        // will expire the vote in one of the vote accounts, so we should have insufficient
2857        // stake to pass the threshold
2858        let vote_to_evaluate = VOTE_THRESHOLD_DEPTH as u64 + 1;
2859        let ComputedBankState {
2860            voted_stakes,
2861            total_stake,
2862            ..
2863        } = Tower::collect_vote_lockouts(
2864            &Pubkey::default(),
2865            vote_to_evaluate,
2866            &accounts,
2867            &ancestors,
2868            |_| None,
2869            &mut LatestValidatorVotesForFrozenBanks::default(),
2870        );
2871        assert!(!tower
2872            .check_vote_stake_thresholds(vote_to_evaluate, &voted_stakes, total_stake)
2873            .is_empty());
2874    }
2875
2876    fn vote_and_check_recent(num_votes: usize) {
2877        let mut tower = Tower::new_for_tests(1, 0.67);
2878        let slots = if num_votes > 0 {
2879            { 0..num_votes }
2880                .map(|i| {
2881                    Lockout::new_with_confirmation_count(i as Slot, (num_votes as u32) - (i as u32))
2882                })
2883                .collect()
2884        } else {
2885            vec![]
2886        };
2887        let mut expected = TowerSync::new(
2888            VecDeque::from(slots),
2889            if num_votes > 0 { Some(0) } else { None },
2890            Hash::default(),
2891            Hash::default(),
2892        );
2893        for i in 0..num_votes {
2894            tower.record_vote(i as u64, Hash::default());
2895        }
2896
2897        expected.timestamp = tower.last_vote.timestamp();
2898        assert_eq!(VoteTransaction::from(expected), tower.last_vote)
2899    }
2900
2901    #[test]
2902    fn test_recent_votes_full() {
2903        vote_and_check_recent(MAX_LOCKOUT_HISTORY)
2904    }
2905
2906    #[test]
2907    fn test_recent_votes_empty() {
2908        vote_and_check_recent(0)
2909    }
2910
2911    #[test]
2912    fn test_recent_votes_exact() {
2913        vote_and_check_recent(5)
2914    }
2915
2916    #[test]
2917    fn test_maybe_timestamp() {
2918        let mut tower = Tower::default();
2919        assert!(tower.maybe_timestamp(0).is_some());
2920        assert!(tower.maybe_timestamp(1).is_some());
2921        assert!(tower.maybe_timestamp(0).is_none()); // Refuse to timestamp an older slot
2922        assert!(tower.maybe_timestamp(1).is_none()); // Refuse to timestamp the same slot twice
2923
2924        tower.last_timestamp.timestamp -= 1; // Move last_timestamp into the past
2925        assert!(tower.maybe_timestamp(2).is_some()); // slot 2 gets a timestamp
2926
2927        tower.last_timestamp.timestamp += 1_000_000; // Move last_timestamp well into the future
2928        assert!(tower.maybe_timestamp(3).is_none()); // slot 3 gets no timestamp
2929    }
2930
2931    #[test]
2932    fn test_refresh_last_vote_timestamp() {
2933        let mut tower = Tower::default();
2934
2935        // Tower has no vote or timestamp
2936        tower.last_vote.set_timestamp(None);
2937        tower.refresh_last_vote_timestamp(5);
2938        assert_eq!(tower.last_vote.timestamp(), None);
2939        assert_eq!(tower.last_timestamp.slot, 0);
2940        assert_eq!(tower.last_timestamp.timestamp, 0);
2941
2942        // Tower has vote no timestamp, but is greater than heaviest_bank
2943        tower.last_vote = VoteTransaction::from(TowerSync::from(vec![(0, 3), (1, 2), (6, 1)]));
2944        assert_eq!(tower.last_vote.timestamp(), None);
2945        tower.refresh_last_vote_timestamp(5);
2946        assert_eq!(tower.last_vote.timestamp(), None);
2947        assert_eq!(tower.last_timestamp.slot, 0);
2948        assert_eq!(tower.last_timestamp.timestamp, 0);
2949
2950        // Tower has vote with no timestamp
2951        tower.last_vote = VoteTransaction::from(TowerSync::from(vec![(0, 3), (1, 2), (2, 1)]));
2952        assert_eq!(tower.last_vote.timestamp(), None);
2953        tower.refresh_last_vote_timestamp(5);
2954        assert_eq!(tower.last_vote.timestamp(), Some(1));
2955        assert_eq!(tower.last_timestamp.slot, 2);
2956        assert_eq!(tower.last_timestamp.timestamp, 1);
2957
2958        // Vote has timestamp
2959        tower.last_vote = VoteTransaction::from(TowerSync::from(vec![(0, 3), (1, 2), (2, 1)]));
2960        tower.refresh_last_vote_timestamp(5);
2961        assert_eq!(tower.last_vote.timestamp(), Some(2));
2962        assert_eq!(tower.last_timestamp.slot, 2);
2963        assert_eq!(tower.last_timestamp.timestamp, 2);
2964    }
2965
2966    fn run_test_load_tower_snapshot<F, G>(
2967        modify_original: F,
2968        modify_serialized: G,
2969    ) -> (Tower, Result<Tower>)
2970    where
2971        F: Fn(&mut Tower, &Pubkey),
2972        G: Fn(&PathBuf),
2973    {
2974        let tower_path = TempDir::new().unwrap();
2975        let identity_keypair = Arc::new(Keypair::new());
2976        let node_pubkey = identity_keypair.pubkey();
2977
2978        // Use values that will not match the default derived from BankForks
2979        let mut tower = Tower::new_for_tests(10, 0.9);
2980
2981        let tower_storage = FileTowerStorage::new(tower_path.path().to_path_buf());
2982
2983        modify_original(&mut tower, &node_pubkey);
2984
2985        tower.save(&tower_storage, &identity_keypair).unwrap();
2986        modify_serialized(&tower_storage.filename(&node_pubkey));
2987        let loaded = Tower::restore(&tower_storage, &node_pubkey);
2988
2989        (tower, loaded)
2990    }
2991
2992    #[test]
2993    fn test_switch_threshold_across_tower_reload() {
2994        solana_logger::setup();
2995        // Init state
2996        let mut vote_simulator = VoteSimulator::new(2);
2997        let other_vote_account = vote_simulator.vote_pubkeys[1];
2998        let bank0 = vote_simulator.bank_forks.read().unwrap().get(0).unwrap();
2999        let total_stake = bank0.total_epoch_stake();
3000        assert_eq!(
3001            total_stake,
3002            vote_simulator.validator_keypairs.len() as u64 * 10_000
3003        );
3004
3005        // Create the tree of banks
3006        let forks = tr(0)
3007            / (tr(1)
3008                / (tr(2)
3009                    / tr(10)
3010                    / (tr(43)
3011                        / (tr(44)
3012                            // Minor fork 2
3013                            / (tr(45) / (tr(46) / (tr(47) / (tr(48) / (tr(49) / (tr(50)))))))
3014                            / (tr(110) / tr(111))))));
3015
3016        // Fill the BankForks according to the above fork structure
3017        vote_simulator.fill_bank_forks(forks, &HashMap::new(), true);
3018        for (_, fork_progress) in vote_simulator.progress.iter_mut() {
3019            fork_progress.fork_stats.computed = true;
3020        }
3021
3022        let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
3023        let descendants = vote_simulator.bank_forks.read().unwrap().descendants();
3024        let mut tower = Tower::default();
3025
3026        tower.record_vote(43, Hash::default());
3027        tower.record_vote(44, Hash::default());
3028        tower.record_vote(45, Hash::default());
3029        tower.record_vote(46, Hash::default());
3030        tower.record_vote(47, Hash::default());
3031        tower.record_vote(48, Hash::default());
3032        tower.record_vote(49, Hash::default());
3033
3034        // Trying to switch to a descendant of last vote should always work
3035        assert_eq!(
3036            tower.check_switch_threshold(
3037                50,
3038                &ancestors,
3039                &descendants,
3040                &vote_simulator.progress,
3041                total_stake,
3042                bank0.epoch_vote_accounts(0).unwrap(),
3043                &vote_simulator.latest_validator_votes_for_frozen_banks,
3044                &vote_simulator.heaviest_subtree_fork_choice,
3045            ),
3046            SwitchForkDecision::SameFork
3047        );
3048
3049        // Trying to switch to another fork at 110 should fail
3050        assert_eq!(
3051            tower.check_switch_threshold(
3052                110,
3053                &ancestors,
3054                &descendants,
3055                &vote_simulator.progress,
3056                total_stake,
3057                bank0.epoch_vote_accounts(0).unwrap(),
3058                &vote_simulator.latest_validator_votes_for_frozen_banks,
3059                &vote_simulator.heaviest_subtree_fork_choice,
3060            ),
3061            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
3062        );
3063
3064        vote_simulator.simulate_lockout_interval(111, (10, 49), &other_vote_account);
3065
3066        assert_eq!(
3067            tower.check_switch_threshold(
3068                110,
3069                &ancestors,
3070                &descendants,
3071                &vote_simulator.progress,
3072                total_stake,
3073                bank0.epoch_vote_accounts(0).unwrap(),
3074                &vote_simulator.latest_validator_votes_for_frozen_banks,
3075                &vote_simulator.heaviest_subtree_fork_choice,
3076            ),
3077            SwitchForkDecision::SwitchProof(Hash::default())
3078        );
3079
3080        assert_eq!(tower.voted_slots(), vec![43, 44, 45, 46, 47, 48, 49]);
3081        {
3082            let mut tower = tower.clone();
3083            tower.record_vote(110, Hash::default());
3084            tower.record_vote(111, Hash::default());
3085            assert_eq!(tower.voted_slots(), vec![43, 110, 111]);
3086            assert_eq!(tower.vote_state.root_slot, Some(0));
3087        }
3088
3089        // Prepare simulated validator restart!
3090        let mut vote_simulator = VoteSimulator::new(2);
3091        let other_vote_account = vote_simulator.vote_pubkeys[1];
3092        let bank0 = vote_simulator.bank_forks.read().unwrap().get(0).unwrap();
3093        let total_stake = bank0.total_epoch_stake();
3094        let forks = tr(0)
3095            / (tr(1)
3096                / (tr(2)
3097                    / tr(10)
3098                    / (tr(43)
3099                        / (tr(44)
3100                            // Minor fork 2
3101                            / (tr(45) / (tr(46) / (tr(47) / (tr(48) / (tr(49) / (tr(50)))))))
3102                            / (tr(110) / tr(111))))));
3103        let replayed_root_slot = 44;
3104
3105        // Fill the BankForks according to the above fork structure
3106        vote_simulator.fill_bank_forks(forks, &HashMap::new(), true);
3107        for (_, fork_progress) in vote_simulator.progress.iter_mut() {
3108            fork_progress.fork_stats.computed = true;
3109        }
3110
3111        // prepend tower restart!
3112        let mut slot_history = SlotHistory::default();
3113        vote_simulator.set_root(replayed_root_slot);
3114        let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
3115        let descendants = vote_simulator.bank_forks.read().unwrap().descendants();
3116        for slot in &[0, 1, 2, 43, replayed_root_slot] {
3117            slot_history.add(*slot);
3118        }
3119        let mut tower = tower
3120            .adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
3121            .unwrap();
3122
3123        assert_eq!(tower.voted_slots(), vec![45, 46, 47, 48, 49]);
3124
3125        // Trying to switch to another fork at 110 should fail
3126        assert_eq!(
3127            tower.check_switch_threshold(
3128                110,
3129                &ancestors,
3130                &descendants,
3131                &vote_simulator.progress,
3132                total_stake,
3133                bank0.epoch_vote_accounts(0).unwrap(),
3134                &vote_simulator.latest_validator_votes_for_frozen_banks,
3135                &vote_simulator.heaviest_subtree_fork_choice,
3136            ),
3137            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
3138        );
3139
3140        // Add lockout_interval which should be excluded
3141        vote_simulator.simulate_lockout_interval(111, (45, 50), &other_vote_account);
3142        assert_eq!(
3143            tower.check_switch_threshold(
3144                110,
3145                &ancestors,
3146                &descendants,
3147                &vote_simulator.progress,
3148                total_stake,
3149                bank0.epoch_vote_accounts(0).unwrap(),
3150                &vote_simulator.latest_validator_votes_for_frozen_banks,
3151                &vote_simulator.heaviest_subtree_fork_choice,
3152            ),
3153            SwitchForkDecision::FailedSwitchThreshold(0, 20000)
3154        );
3155
3156        // Add lockout_interval which should not be excluded
3157        vote_simulator.simulate_lockout_interval(111, (110, 200), &other_vote_account);
3158        assert_eq!(
3159            tower.check_switch_threshold(
3160                110,
3161                &ancestors,
3162                &descendants,
3163                &vote_simulator.progress,
3164                total_stake,
3165                bank0.epoch_vote_accounts(0).unwrap(),
3166                &vote_simulator.latest_validator_votes_for_frozen_banks,
3167                &vote_simulator.heaviest_subtree_fork_choice,
3168            ),
3169            SwitchForkDecision::SwitchProof(Hash::default())
3170        );
3171
3172        tower.record_vote(110, Hash::default());
3173        tower.record_vote(111, Hash::default());
3174        assert_eq!(tower.voted_slots(), vec![110, 111]);
3175        assert_eq!(tower.vote_state.root_slot, Some(replayed_root_slot));
3176    }
3177
3178    #[test]
3179    fn test_load_tower_ok() {
3180        let (tower, loaded) =
3181            run_test_load_tower_snapshot(|tower, pubkey| tower.node_pubkey = *pubkey, |_| ());
3182        let loaded = loaded.unwrap();
3183        assert_eq!(loaded, tower);
3184        assert_eq!(tower.threshold_depth, 10);
3185        assert!((tower.threshold_size - 0.9_f64).abs() < f64::EPSILON);
3186        assert_eq!(loaded.threshold_depth, 10);
3187        assert!((loaded.threshold_size - 0.9_f64).abs() < f64::EPSILON);
3188    }
3189
3190    #[test]
3191    fn test_load_tower_wrong_identity() {
3192        let identity_keypair = Arc::new(Keypair::new());
3193        let tower = Tower::default();
3194        let tower_storage = FileTowerStorage::default();
3195        assert_matches!(
3196            tower.save(&tower_storage, &identity_keypair),
3197            Err(TowerError::WrongTower(_))
3198        )
3199    }
3200
3201    #[test]
3202    fn test_load_tower_invalid_signature() {
3203        let (_, loaded) = run_test_load_tower_snapshot(
3204            |tower, pubkey| tower.node_pubkey = *pubkey,
3205            |path| {
3206                let mut file = OpenOptions::new()
3207                    .read(true)
3208                    .write(true)
3209                    .open(path)
3210                    .unwrap();
3211                // 4 is the offset into SavedTowerVersions for the signature
3212                assert_eq!(file.seek(SeekFrom::Start(4)).unwrap(), 4);
3213                let mut buf = [0u8];
3214                assert_eq!(file.read(&mut buf).unwrap(), 1);
3215                buf[0] = !buf[0];
3216                assert_eq!(file.seek(SeekFrom::Start(4)).unwrap(), 4);
3217                assert_eq!(file.write(&buf).unwrap(), 1);
3218            },
3219        );
3220        assert_matches!(loaded, Err(TowerError::InvalidSignature))
3221    }
3222
3223    #[test]
3224    fn test_load_tower_deser_failure() {
3225        let (_, loaded) = run_test_load_tower_snapshot(
3226            |tower, pubkey| tower.node_pubkey = *pubkey,
3227            |path| {
3228                OpenOptions::new()
3229                    .write(true)
3230                    .truncate(true)
3231                    .open(path)
3232                    .unwrap_or_else(|_| panic!("Failed to truncate file: {path:?}"));
3233            },
3234        );
3235        assert_matches!(loaded, Err(TowerError::SerializeError(_)))
3236    }
3237
3238    #[test]
3239    fn test_load_tower_missing() {
3240        let (_, loaded) = run_test_load_tower_snapshot(
3241            |tower, pubkey| tower.node_pubkey = *pubkey,
3242            |path| {
3243                remove_file(path).unwrap();
3244            },
3245        );
3246        assert_matches!(loaded, Err(TowerError::IoError(_)))
3247    }
3248
3249    #[test]
3250    fn test_reconcile_blockstore_roots_with_tower_normal() {
3251        solana_logger::setup();
3252        let ledger_path = get_tmp_ledger_path_auto_delete!();
3253        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
3254
3255        let (shreds, _) = make_slot_entries(1, 0, 42, /*merkle_variant:*/ true);
3256        blockstore.insert_shreds(shreds, None, false).unwrap();
3257        let (shreds, _) = make_slot_entries(3, 1, 42, /*merkle_variant:*/ true);
3258        blockstore.insert_shreds(shreds, None, false).unwrap();
3259        let (shreds, _) = make_slot_entries(4, 1, 42, /*merkle_variant:*/ true);
3260        blockstore.insert_shreds(shreds, None, false).unwrap();
3261        assert!(!blockstore.is_root(0));
3262        assert!(!blockstore.is_root(1));
3263        assert!(!blockstore.is_root(3));
3264        assert!(!blockstore.is_root(4));
3265
3266        let mut tower = Tower::default();
3267        tower.vote_state.root_slot = Some(4);
3268        reconcile_blockstore_roots_with_external_source(
3269            ExternalRootSource::Tower(tower.root()),
3270            &blockstore,
3271            &mut blockstore.max_root(),
3272        )
3273        .unwrap();
3274
3275        assert!(!blockstore.is_root(0));
3276        assert!(blockstore.is_root(1));
3277        assert!(!blockstore.is_root(3));
3278        assert!(blockstore.is_root(4));
3279    }
3280
3281    #[test]
3282    #[should_panic(
3283        expected = "last_blockstore_root(3) is skipped while traversing blockstore (currently at \
3284                    1) from external root (Tower(4))!?"
3285    )]
3286    fn test_reconcile_blockstore_roots_with_tower_panic_no_common_root() {
3287        solana_logger::setup();
3288        let ledger_path = get_tmp_ledger_path_auto_delete!();
3289        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
3290
3291        let (shreds, _) = make_slot_entries(1, 0, 42, /*merkle_variant:*/ true);
3292        blockstore.insert_shreds(shreds, None, false).unwrap();
3293        let (shreds, _) = make_slot_entries(3, 1, 42, /*merkle_variant:*/ true);
3294        blockstore.insert_shreds(shreds, None, false).unwrap();
3295        let (shreds, _) = make_slot_entries(4, 1, 42, /*merkle_variant:*/ true);
3296        blockstore.insert_shreds(shreds, None, false).unwrap();
3297        blockstore.set_roots(std::iter::once(&3)).unwrap();
3298        assert!(!blockstore.is_root(0));
3299        assert!(!blockstore.is_root(1));
3300        assert!(blockstore.is_root(3));
3301        assert!(!blockstore.is_root(4));
3302
3303        let mut tower = Tower::default();
3304        tower.vote_state.root_slot = Some(4);
3305        reconcile_blockstore_roots_with_external_source(
3306            ExternalRootSource::Tower(tower.root()),
3307            &blockstore,
3308            &mut blockstore.max_root(),
3309        )
3310        .unwrap();
3311    }
3312
3313    #[test]
3314    fn test_reconcile_blockstore_roots_with_tower_nop_no_parent() {
3315        solana_logger::setup();
3316        let ledger_path = get_tmp_ledger_path_auto_delete!();
3317        let blockstore = Blockstore::open(ledger_path.path()).unwrap();
3318
3319        let (shreds, _) = make_slot_entries(1, 0, 42, /*merkle_variant:*/ true);
3320        blockstore.insert_shreds(shreds, None, false).unwrap();
3321        let (shreds, _) = make_slot_entries(3, 1, 42, /*merkle_variant:*/ true);
3322        blockstore.insert_shreds(shreds, None, false).unwrap();
3323        assert!(!blockstore.is_root(0));
3324        assert!(!blockstore.is_root(1));
3325        assert!(!blockstore.is_root(3));
3326
3327        let mut tower = Tower::default();
3328        tower.vote_state.root_slot = Some(4);
3329        assert_eq!(blockstore.max_root(), 0);
3330        reconcile_blockstore_roots_with_external_source(
3331            ExternalRootSource::Tower(tower.root()),
3332            &blockstore,
3333            &mut blockstore.max_root(),
3334        )
3335        .unwrap();
3336        assert_eq!(blockstore.max_root(), 0);
3337    }
3338
3339    #[test]
3340    fn test_adjust_lockouts_after_replay_future_slots() {
3341        solana_logger::setup();
3342        let mut tower = Tower::new_for_tests(10, 0.9);
3343        tower.record_vote(0, Hash::default());
3344        tower.record_vote(1, Hash::default());
3345        tower.record_vote(2, Hash::default());
3346        tower.record_vote(3, Hash::default());
3347
3348        let mut slot_history = SlotHistory::default();
3349        slot_history.add(0);
3350        slot_history.add(1);
3351
3352        let replayed_root_slot = 1;
3353        tower = tower
3354            .adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
3355            .unwrap();
3356
3357        assert_eq!(tower.voted_slots(), vec![2, 3]);
3358        assert_eq!(tower.root(), replayed_root_slot);
3359
3360        tower = tower
3361            .adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
3362            .unwrap();
3363        assert_eq!(tower.voted_slots(), vec![2, 3]);
3364        assert_eq!(tower.root(), replayed_root_slot);
3365    }
3366
3367    #[test]
3368    fn test_adjust_lockouts_after_replay_not_found_slots() {
3369        let mut tower = Tower::new_for_tests(10, 0.9);
3370        tower.record_vote(0, Hash::default());
3371        tower.record_vote(1, Hash::default());
3372        tower.record_vote(2, Hash::default());
3373        tower.record_vote(3, Hash::default());
3374
3375        let mut slot_history = SlotHistory::default();
3376        slot_history.add(0);
3377        slot_history.add(1);
3378        slot_history.add(4);
3379
3380        let replayed_root_slot = 4;
3381        tower = tower
3382            .adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
3383            .unwrap();
3384
3385        assert_eq!(tower.voted_slots(), vec![2, 3]);
3386        assert_eq!(tower.root(), replayed_root_slot);
3387    }
3388
3389    #[test]
3390    fn test_adjust_lockouts_after_replay_all_rooted_with_no_too_old() {
3391        let mut tower = Tower::new_for_tests(10, 0.9);
3392        tower.record_vote(0, Hash::default());
3393        tower.record_vote(1, Hash::default());
3394        tower.record_vote(2, Hash::default());
3395
3396        let mut slot_history = SlotHistory::default();
3397        slot_history.add(0);
3398        slot_history.add(1);
3399        slot_history.add(2);
3400        slot_history.add(3);
3401        slot_history.add(4);
3402        slot_history.add(5);
3403
3404        let replayed_root_slot = 5;
3405        tower = tower
3406            .adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
3407            .unwrap();
3408
3409        assert_eq!(tower.voted_slots(), vec![] as Vec<Slot>);
3410        assert_eq!(tower.root(), replayed_root_slot);
3411        assert_eq!(tower.stray_restored_slot, None);
3412    }
3413
3414    #[test]
3415    fn test_adjust_lockouts_after_replay_all_rooted_with_too_old() {
3416        use solana_slot_history::MAX_ENTRIES;
3417
3418        let mut tower = Tower::new_for_tests(10, 0.9);
3419        tower.record_vote(0, Hash::default());
3420        tower.record_vote(1, Hash::default());
3421        tower.record_vote(2, Hash::default());
3422
3423        let mut slot_history = SlotHistory::default();
3424        slot_history.add(0);
3425        slot_history.add(1);
3426        slot_history.add(2);
3427        slot_history.add(MAX_ENTRIES);
3428
3429        tower = tower
3430            .adjust_lockouts_after_replay(MAX_ENTRIES, &slot_history)
3431            .unwrap();
3432        assert_eq!(tower.voted_slots(), vec![] as Vec<Slot>);
3433        assert_eq!(tower.root(), MAX_ENTRIES);
3434    }
3435
3436    #[test]
3437    fn test_adjust_lockouts_after_replay_anchored_future_slots() {
3438        let mut tower = Tower::new_for_tests(10, 0.9);
3439        tower.record_vote(0, Hash::default());
3440        tower.record_vote(1, Hash::default());
3441        tower.record_vote(2, Hash::default());
3442        tower.record_vote(3, Hash::default());
3443        tower.record_vote(4, Hash::default());
3444
3445        let mut slot_history = SlotHistory::default();
3446        slot_history.add(0);
3447        slot_history.add(1);
3448        slot_history.add(2);
3449
3450        let replayed_root_slot = 2;
3451        tower = tower
3452            .adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
3453            .unwrap();
3454
3455        assert_eq!(tower.voted_slots(), vec![3, 4]);
3456        assert_eq!(tower.root(), replayed_root_slot);
3457    }
3458
3459    #[test]
3460    fn test_adjust_lockouts_after_replay_all_not_found() {
3461        let mut tower = Tower::new_for_tests(10, 0.9);
3462        tower.record_vote(5, Hash::default());
3463        tower.record_vote(6, Hash::default());
3464
3465        let mut slot_history = SlotHistory::default();
3466        slot_history.add(0);
3467        slot_history.add(1);
3468        slot_history.add(2);
3469        slot_history.add(7);
3470
3471        let replayed_root_slot = 7;
3472        tower = tower
3473            .adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
3474            .unwrap();
3475
3476        assert_eq!(tower.voted_slots(), vec![5, 6]);
3477        assert_eq!(tower.root(), replayed_root_slot);
3478    }
3479
3480    #[test]
3481    fn test_adjust_lockouts_after_replay_all_not_found_even_if_rooted() {
3482        let mut tower = Tower::new_for_tests(10, 0.9);
3483        tower.vote_state.root_slot = Some(4);
3484        tower.record_vote(5, Hash::default());
3485        tower.record_vote(6, Hash::default());
3486
3487        let mut slot_history = SlotHistory::default();
3488        slot_history.add(0);
3489        slot_history.add(1);
3490        slot_history.add(2);
3491        slot_history.add(7);
3492
3493        let replayed_root_slot = 7;
3494        let result = tower.adjust_lockouts_after_replay(replayed_root_slot, &slot_history);
3495
3496        assert_eq!(
3497            format!("{}", result.unwrap_err()),
3498            "The tower is fatally inconsistent with blockstore: no common slot for rooted tower"
3499        );
3500    }
3501
3502    #[test]
3503    fn test_adjust_lockouts_after_replay_all_future_votes_only_root_found() {
3504        let mut tower = Tower::new_for_tests(10, 0.9);
3505        tower.vote_state.root_slot = Some(2);
3506        tower.record_vote(3, Hash::default());
3507        tower.record_vote(4, Hash::default());
3508        tower.record_vote(5, Hash::default());
3509
3510        let mut slot_history = SlotHistory::default();
3511        slot_history.add(0);
3512        slot_history.add(1);
3513        slot_history.add(2);
3514
3515        let replayed_root_slot = 2;
3516        tower = tower
3517            .adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
3518            .unwrap();
3519
3520        assert_eq!(tower.voted_slots(), vec![3, 4, 5]);
3521        assert_eq!(tower.root(), replayed_root_slot);
3522    }
3523
3524    #[test]
3525    fn test_adjust_lockouts_after_replay_empty() {
3526        let mut tower = Tower::new_for_tests(10, 0.9);
3527
3528        let mut slot_history = SlotHistory::default();
3529        slot_history.add(0);
3530
3531        let replayed_root_slot = 0;
3532        tower = tower
3533            .adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
3534            .unwrap();
3535
3536        assert_eq!(tower.voted_slots(), vec![] as Vec<Slot>);
3537        assert_eq!(tower.root(), replayed_root_slot);
3538    }
3539
3540    #[test]
3541    fn test_adjust_lockouts_after_replay_too_old_tower() {
3542        use solana_slot_history::MAX_ENTRIES;
3543
3544        let mut tower = Tower::new_for_tests(10, 0.9);
3545        tower.record_vote(0, Hash::default());
3546
3547        let mut slot_history = SlotHistory::default();
3548        slot_history.add(0);
3549        slot_history.add(MAX_ENTRIES);
3550
3551        let result = tower.adjust_lockouts_after_replay(MAX_ENTRIES, &slot_history);
3552        assert_eq!(
3553            format!("{}", result.unwrap_err()),
3554            "The tower is too old: newest slot in tower (0) << oldest slot in available history \
3555             (1)"
3556        );
3557    }
3558
3559    #[test]
3560    fn test_adjust_lockouts_after_replay_time_warped() {
3561        let mut tower = Tower::new_for_tests(10, 0.9);
3562        tower.vote_state.votes.push_back(Lockout::new(1));
3563        tower.vote_state.votes.push_back(Lockout::new(0));
3564        let vote = Vote::new(vec![0], Hash::default());
3565        tower.last_vote = VoteTransaction::from(vote);
3566
3567        let mut slot_history = SlotHistory::default();
3568        slot_history.add(0);
3569
3570        let result = tower.adjust_lockouts_after_replay(0, &slot_history);
3571        assert_eq!(
3572            format!("{}", result.unwrap_err()),
3573            "The tower is fatally inconsistent with blockstore: time warped?"
3574        );
3575    }
3576
3577    #[test]
3578    fn test_adjust_lockouts_after_replay_diverged_ancestor() {
3579        let mut tower = Tower::new_for_tests(10, 0.9);
3580        tower.vote_state.votes.push_back(Lockout::new(1));
3581        tower.vote_state.votes.push_back(Lockout::new(2));
3582        let vote = Vote::new(vec![2], Hash::default());
3583        tower.last_vote = VoteTransaction::from(vote);
3584
3585        let mut slot_history = SlotHistory::default();
3586        slot_history.add(0);
3587        slot_history.add(2);
3588
3589        let result = tower.adjust_lockouts_after_replay(2, &slot_history);
3590        assert_eq!(
3591            format!("{}", result.unwrap_err()),
3592            "The tower is fatally inconsistent with blockstore: diverged ancestor?"
3593        );
3594    }
3595
3596    #[test]
3597    fn test_adjust_lockouts_after_replay_out_of_order() {
3598        use solana_slot_history::MAX_ENTRIES;
3599
3600        let mut tower = Tower::new_for_tests(10, 0.9);
3601        tower
3602            .vote_state
3603            .votes
3604            .push_back(Lockout::new(MAX_ENTRIES - 1));
3605        tower.vote_state.votes.push_back(Lockout::new(0));
3606        tower.vote_state.votes.push_back(Lockout::new(1));
3607        let vote = Vote::new(vec![1], Hash::default());
3608        tower.last_vote = VoteTransaction::from(vote);
3609
3610        let mut slot_history = SlotHistory::default();
3611        slot_history.add(MAX_ENTRIES);
3612
3613        let result = tower.adjust_lockouts_after_replay(MAX_ENTRIES, &slot_history);
3614        assert_eq!(
3615            format!("{}", result.unwrap_err()),
3616            "The tower is fatally inconsistent with blockstore: not too old once after got too \
3617             old?"
3618        );
3619    }
3620
3621    #[test]
3622    #[should_panic(expected = "slot_in_tower(2) < checked_slot(1)")]
3623    fn test_adjust_lockouts_after_replay_reversed_votes() {
3624        let mut tower = Tower::new_for_tests(10, 0.9);
3625        tower.vote_state.votes.push_back(Lockout::new(2));
3626        tower.vote_state.votes.push_back(Lockout::new(1));
3627        let vote = Vote::new(vec![1], Hash::default());
3628        tower.last_vote = VoteTransaction::from(vote);
3629
3630        let mut slot_history = SlotHistory::default();
3631        slot_history.add(0);
3632        slot_history.add(2);
3633
3634        tower
3635            .adjust_lockouts_after_replay(2, &slot_history)
3636            .unwrap();
3637    }
3638
3639    #[test]
3640    #[should_panic(expected = "slot_in_tower(3) < checked_slot(3)")]
3641    fn test_adjust_lockouts_after_replay_repeated_non_root_votes() {
3642        let mut tower = Tower::new_for_tests(10, 0.9);
3643        tower.vote_state.votes.push_back(Lockout::new(2));
3644        tower.vote_state.votes.push_back(Lockout::new(3));
3645        tower.vote_state.votes.push_back(Lockout::new(3));
3646        let vote = Vote::new(vec![3], Hash::default());
3647        tower.last_vote = VoteTransaction::from(vote);
3648
3649        let mut slot_history = SlotHistory::default();
3650        slot_history.add(0);
3651        slot_history.add(2);
3652
3653        tower
3654            .adjust_lockouts_after_replay(2, &slot_history)
3655            .unwrap();
3656    }
3657
3658    #[test]
3659    fn test_adjust_lockouts_after_replay_vote_on_root() {
3660        let mut tower = Tower::new_for_tests(10, 0.9);
3661        tower.vote_state.root_slot = Some(42);
3662        tower.vote_state.votes.push_back(Lockout::new(42));
3663        tower.vote_state.votes.push_back(Lockout::new(43));
3664        tower.vote_state.votes.push_back(Lockout::new(44));
3665        let vote = Vote::new(vec![44], Hash::default());
3666        tower.last_vote = VoteTransaction::from(vote);
3667
3668        let mut slot_history = SlotHistory::default();
3669        slot_history.add(42);
3670
3671        let tower = tower.adjust_lockouts_after_replay(42, &slot_history);
3672        assert_eq!(tower.unwrap().voted_slots(), [43, 44]);
3673    }
3674
3675    #[test]
3676    fn test_adjust_lockouts_after_replay_vote_on_genesis() {
3677        let mut tower = Tower::new_for_tests(10, 0.9);
3678        tower.vote_state.votes.push_back(Lockout::new(0));
3679        let vote = Vote::new(vec![0], Hash::default());
3680        tower.last_vote = VoteTransaction::from(vote);
3681
3682        let mut slot_history = SlotHistory::default();
3683        slot_history.add(0);
3684
3685        assert!(tower.adjust_lockouts_after_replay(0, &slot_history).is_ok());
3686    }
3687
3688    #[test]
3689    fn test_adjust_lockouts_after_replay_future_tower() {
3690        let mut tower = Tower::new_for_tests(10, 0.9);
3691        tower.vote_state.votes.push_back(Lockout::new(13));
3692        tower.vote_state.votes.push_back(Lockout::new(14));
3693        let vote = Vote::new(vec![14], Hash::default());
3694        tower.last_vote = VoteTransaction::from(vote);
3695        tower.initialize_root(12);
3696
3697        let mut slot_history = SlotHistory::default();
3698        slot_history.add(0);
3699        slot_history.add(2);
3700
3701        let tower = tower
3702            .adjust_lockouts_after_replay(2, &slot_history)
3703            .unwrap();
3704        assert_eq!(tower.root(), 12);
3705        assert_eq!(tower.voted_slots(), vec![13, 14]);
3706        assert_eq!(tower.stray_restored_slot, Some(14));
3707    }
3708
3709    #[test]
3710    fn test_default_tower_has_no_stray_last_vote() {
3711        let tower = Tower::default();
3712        assert!(!tower.is_stray_last_vote());
3713    }
3714
3715    #[test]
3716    fn test_switch_threshold_common_ancestor() {
3717        let mut vote_simulator = VoteSimulator::new(2);
3718        let other_vote_account = vote_simulator.vote_pubkeys[1];
3719        let bank0 = vote_simulator.bank_forks.read().unwrap().get(0).unwrap();
3720        let total_stake = bank0.total_epoch_stake();
3721        assert_eq!(
3722            total_stake,
3723            vote_simulator.validator_keypairs.len() as u64 * 10_000
3724        );
3725
3726        // Create the tree of banks
3727        //                                       /- 50
3728        //          /- 51    /- 45 - 46 - 47 - 48 - 49
3729        // 0 - 1 - 2 - 43 - 44
3730        //                   \- 110 - 111 - 112
3731        //                    \- 113
3732        let forks = tr(0)
3733            / (tr(1)
3734                / (tr(2)
3735                    / tr(51)
3736                    / (tr(43)
3737                        / (tr(44)
3738                            / (tr(45) / (tr(46) / (tr(47) / (tr(48) / tr(49) / tr(50)))))
3739                            / tr(113)
3740                            / (tr(110) / tr(111) / tr(112))))));
3741        let switch_slot = 111;
3742
3743        // Fill the BankForks according to the above fork structure
3744        vote_simulator.fill_bank_forks(forks, &HashMap::new(), true);
3745        for (_, fork_progress) in vote_simulator.progress.iter_mut() {
3746            fork_progress.fork_stats.computed = true;
3747        }
3748
3749        let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
3750        let descendants = vote_simulator.bank_forks.read().unwrap().descendants();
3751        let mut tower = Tower::default();
3752
3753        tower.record_vote(43, Hash::default());
3754        tower.record_vote(44, Hash::default());
3755        tower.record_vote(45, Hash::default());
3756        tower.record_vote(46, Hash::default());
3757        tower.record_vote(47, Hash::default());
3758        tower.record_vote(48, Hash::default());
3759        tower.record_vote(49, Hash::default());
3760
3761        // Candidate slot 50 should *not* work
3762        vote_simulator.simulate_lockout_interval(50, (10, 49), &other_vote_account);
3763        assert_eq!(
3764            tower.check_switch_threshold(
3765                switch_slot,
3766                &ancestors,
3767                &descendants,
3768                &vote_simulator.progress,
3769                total_stake,
3770                bank0.epoch_vote_accounts(0).unwrap(),
3771                &vote_simulator.latest_validator_votes_for_frozen_banks,
3772                &vote_simulator.heaviest_subtree_fork_choice,
3773            ),
3774            SwitchForkDecision::FailedSwitchThreshold(0, 20_000)
3775        );
3776        vote_simulator.clear_lockout_intervals(50);
3777
3778        // 51, 111, 112, and 113 are all valid
3779        for candidate_slot in [51, 111, 113] {
3780            vote_simulator.simulate_lockout_interval(candidate_slot, (10, 49), &other_vote_account);
3781            assert_eq!(
3782                tower.check_switch_threshold(
3783                    switch_slot,
3784                    &ancestors,
3785                    &descendants,
3786                    &vote_simulator.progress,
3787                    total_stake,
3788                    bank0.epoch_vote_accounts(0).unwrap(),
3789                    &vote_simulator.latest_validator_votes_for_frozen_banks,
3790                    &vote_simulator.heaviest_subtree_fork_choice,
3791                ),
3792                SwitchForkDecision::SwitchProof(Hash::default())
3793            );
3794            vote_simulator.clear_lockout_intervals(candidate_slot);
3795        }
3796
3797        // Same checks for gossip votes
3798        let insert_gossip_vote = |vote_simulator: &mut VoteSimulator, slot| {
3799            vote_simulator
3800                .latest_validator_votes_for_frozen_banks
3801                .check_add_vote(
3802                    other_vote_account,
3803                    slot,
3804                    Some(
3805                        vote_simulator
3806                            .bank_forks
3807                            .read()
3808                            .unwrap()
3809                            .get(slot)
3810                            .unwrap()
3811                            .hash(),
3812                    ),
3813                    false,
3814                );
3815        };
3816
3817        // Candidate slot 50 should *not* work
3818        insert_gossip_vote(&mut vote_simulator, 50);
3819        assert_eq!(
3820            tower.check_switch_threshold(
3821                switch_slot,
3822                &ancestors,
3823                &descendants,
3824                &vote_simulator.progress,
3825                total_stake,
3826                bank0.epoch_vote_accounts(0).unwrap(),
3827                &vote_simulator.latest_validator_votes_for_frozen_banks,
3828                &vote_simulator.heaviest_subtree_fork_choice,
3829            ),
3830            SwitchForkDecision::FailedSwitchThreshold(0, 20_000)
3831        );
3832        vote_simulator.latest_validator_votes_for_frozen_banks =
3833            LatestValidatorVotesForFrozenBanks::default();
3834
3835        // 51, 110, 111, 112, and 113 are all valid
3836        // Note: We can use 110 here since gossip votes aren't limited to leaf banks
3837        for candidate_slot in [51, 110, 111, 112, 113] {
3838            insert_gossip_vote(&mut vote_simulator, candidate_slot);
3839            assert_eq!(
3840                tower.check_switch_threshold(
3841                    switch_slot,
3842                    &ancestors,
3843                    &descendants,
3844                    &vote_simulator.progress,
3845                    total_stake,
3846                    bank0.epoch_vote_accounts(0).unwrap(),
3847                    &vote_simulator.latest_validator_votes_for_frozen_banks,
3848                    &vote_simulator.heaviest_subtree_fork_choice,
3849                ),
3850                SwitchForkDecision::SwitchProof(Hash::default())
3851            );
3852            vote_simulator.latest_validator_votes_for_frozen_banks =
3853                LatestValidatorVotesForFrozenBanks::default();
3854        }
3855    }
3856}